diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2008-09-01 06:32:13 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2008-09-01 06:32:13 -0400 |
commit | 9d7548d4ca3c52ecb58f098a32b0756cdf8f96ee (patch) | |
tree | 651f7058bbaa2d8b2855286380d614afcf505118 /drivers | |
parent | 31db6e9ea1dbdcf66b8227b4f7035dee1b1dd8c0 (diff) | |
parent | bef69ea0dcce574a425feb0a5aa4c63dd108b9a6 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers')
476 files changed, 23708 insertions, 6409 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index a280ab3d0833..2735bde73475 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -57,6 +57,7 @@ obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/ | |||
57 | obj-$(CONFIG_PARIDE) += block/paride/ | 57 | obj-$(CONFIG_PARIDE) += block/paride/ |
58 | obj-$(CONFIG_TC) += tc/ | 58 | obj-$(CONFIG_TC) += tc/ |
59 | obj-$(CONFIG_USB) += usb/ | 59 | obj-$(CONFIG_USB) += usb/ |
60 | obj-$(CONFIG_USB_MUSB_HDRC) += usb/musb/ | ||
60 | obj-$(CONFIG_PCI) += usb/ | 61 | obj-$(CONFIG_PCI) += usb/ |
61 | obj-$(CONFIG_USB_GADGET) += usb/gadget/ | 62 | obj-$(CONFIG_USB_GADGET) += usb/gadget/ |
62 | obj-$(CONFIG_SERIO) += input/serio/ | 63 | obj-$(CONFIG_SERIO) += input/serio/ |
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index bb7c51f712bd..7d2edf143f16 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
@@ -563,9 +563,6 @@ EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device); | |||
563 | */ | 563 | */ |
564 | static int handle_eject_request(struct dock_station *ds, u32 event) | 564 | static int handle_eject_request(struct dock_station *ds, u32 event) |
565 | { | 565 | { |
566 | if (!dock_present(ds)) | ||
567 | return -ENODEV; | ||
568 | |||
569 | if (dock_in_progress(ds)) | 566 | if (dock_in_progress(ds)) |
570 | return -EBUSY; | 567 | return -EBUSY; |
571 | 568 | ||
@@ -573,8 +570,16 @@ static int handle_eject_request(struct dock_station *ds, u32 event) | |||
573 | * here we need to generate the undock | 570 | * here we need to generate the undock |
574 | * event prior to actually doing the undock | 571 | * event prior to actually doing the undock |
575 | * so that the device struct still exists. | 572 | * so that the device struct still exists. |
573 | * Also, even send the dock event if the | ||
574 | * device is not present anymore | ||
576 | */ | 575 | */ |
577 | dock_event(ds, event, UNDOCK_EVENT); | 576 | dock_event(ds, event, UNDOCK_EVENT); |
577 | |||
578 | if (!dock_present(ds)) { | ||
579 | complete_undock(ds); | ||
580 | return -ENODEV; | ||
581 | } | ||
582 | |||
578 | hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST); | 583 | hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST); |
579 | undock(ds); | 584 | undock(ds); |
580 | eject_dock(ds); | 585 | eject_dock(ds); |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 5622aee996b2..13593f9f2197 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -110,6 +110,31 @@ static struct acpi_ec { | |||
110 | u8 handlers_installed; | 110 | u8 handlers_installed; |
111 | } *boot_ec, *first_ec; | 111 | } *boot_ec, *first_ec; |
112 | 112 | ||
113 | /* | ||
114 | * Some Asus system have exchanged ECDT data/command IO addresses. | ||
115 | */ | ||
116 | static int print_ecdt_error(const struct dmi_system_id *id) | ||
117 | { | ||
118 | printk(KERN_NOTICE PREFIX "%s detected - " | ||
119 | "ECDT has exchanged control/data I/O address\n", | ||
120 | id->ident); | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static struct dmi_system_id __cpuinitdata ec_dmi_table[] = { | ||
125 | { | ||
126 | print_ecdt_error, "Asus L4R", { | ||
127 | DMI_MATCH(DMI_BIOS_VERSION, "1008.006"), | ||
128 | DMI_MATCH(DMI_PRODUCT_NAME, "L4R"), | ||
129 | DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL}, | ||
130 | { | ||
131 | print_ecdt_error, "Asus M6R", { | ||
132 | DMI_MATCH(DMI_BIOS_VERSION, "0207"), | ||
133 | DMI_MATCH(DMI_PRODUCT_NAME, "M6R"), | ||
134 | DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL}, | ||
135 | {}, | ||
136 | }; | ||
137 | |||
113 | /* -------------------------------------------------------------------------- | 138 | /* -------------------------------------------------------------------------- |
114 | Transaction Management | 139 | Transaction Management |
115 | -------------------------------------------------------------------------- */ | 140 | -------------------------------------------------------------------------- */ |
@@ -196,6 +221,8 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll) | |||
196 | return 0; | 221 | return 0; |
197 | msleep(1); | 222 | msleep(1); |
198 | } | 223 | } |
224 | if (acpi_ec_check_status(ec,event)) | ||
225 | return 0; | ||
199 | } | 226 | } |
200 | pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n", | 227 | pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n", |
201 | acpi_ec_read_status(ec), | 228 | acpi_ec_read_status(ec), |
@@ -911,6 +938,15 @@ int __init acpi_ec_ecdt_probe(void) | |||
911 | pr_info(PREFIX "EC description table is found, configuring boot EC\n"); | 938 | pr_info(PREFIX "EC description table is found, configuring boot EC\n"); |
912 | boot_ec->command_addr = ecdt_ptr->control.address; | 939 | boot_ec->command_addr = ecdt_ptr->control.address; |
913 | boot_ec->data_addr = ecdt_ptr->data.address; | 940 | boot_ec->data_addr = ecdt_ptr->data.address; |
941 | if (dmi_check_system(ec_dmi_table)) { | ||
942 | /* | ||
943 | * If the board falls into ec_dmi_table, it means | ||
944 | * that ECDT table gives the incorrect command/status | ||
945 | * & data I/O address. Just fix it. | ||
946 | */ | ||
947 | boot_ec->data_addr = ecdt_ptr->control.address; | ||
948 | boot_ec->command_addr = ecdt_ptr->data.address; | ||
949 | } | ||
914 | boot_ec->gpe = ecdt_ptr->gpe; | 950 | boot_ec->gpe = ecdt_ptr->gpe; |
915 | boot_ec->handle = ACPI_ROOT_OBJECT; | 951 | boot_ec->handle = ACPI_ROOT_OBJECT; |
916 | acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); | 952 | acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); |
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c index 2a32c843cb4a..8892b9824fae 100644 --- a/drivers/acpi/executer/exconfig.c +++ b/drivers/acpi/executer/exconfig.c | |||
@@ -479,5 +479,8 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) | |||
479 | 479 | ||
480 | acpi_tb_set_table_loaded_flag(table_index, FALSE); | 480 | acpi_tb_set_table_loaded_flag(table_index, FALSE); |
481 | 481 | ||
482 | /* Table unloaded, remove a reference to the ddb_handle object */ | ||
483 | |||
484 | acpi_ut_remove_reference(ddb_handle); | ||
482 | return_ACPI_STATUS(AE_OK); | 485 | return_ACPI_STATUS(AE_OK); |
483 | } | 486 | } |
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c index 549db42f16cf..bd5773878009 100644 --- a/drivers/acpi/namespace/nsnames.c +++ b/drivers/acpi/namespace/nsnames.c | |||
@@ -56,13 +56,14 @@ ACPI_MODULE_NAME("nsnames") | |||
56 | * Size - Size of the pathname | 56 | * Size - Size of the pathname |
57 | * *name_buffer - Where to return the pathname | 57 | * *name_buffer - Where to return the pathname |
58 | * | 58 | * |
59 | * RETURN: Places the pathname into the name_buffer, in external format | 59 | * RETURN: Status |
60 | * Places the pathname into the name_buffer, in external format | ||
60 | * (name segments separated by path separators) | 61 | * (name segments separated by path separators) |
61 | * | 62 | * |
62 | * DESCRIPTION: Generate a full pathaname | 63 | * DESCRIPTION: Generate a full pathaname |
63 | * | 64 | * |
64 | ******************************************************************************/ | 65 | ******************************************************************************/ |
65 | void | 66 | acpi_status |
66 | acpi_ns_build_external_path(struct acpi_namespace_node *node, | 67 | acpi_ns_build_external_path(struct acpi_namespace_node *node, |
67 | acpi_size size, char *name_buffer) | 68 | acpi_size size, char *name_buffer) |
68 | { | 69 | { |
@@ -77,7 +78,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node, | |||
77 | if (index < ACPI_NAME_SIZE) { | 78 | if (index < ACPI_NAME_SIZE) { |
78 | name_buffer[0] = AML_ROOT_PREFIX; | 79 | name_buffer[0] = AML_ROOT_PREFIX; |
79 | name_buffer[1] = 0; | 80 | name_buffer[1] = 0; |
80 | return; | 81 | return (AE_OK); |
81 | } | 82 | } |
82 | 83 | ||
83 | /* Store terminator byte, then build name backwards */ | 84 | /* Store terminator byte, then build name backwards */ |
@@ -105,11 +106,13 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node, | |||
105 | 106 | ||
106 | if (index != 0) { | 107 | if (index != 0) { |
107 | ACPI_ERROR((AE_INFO, | 108 | ACPI_ERROR((AE_INFO, |
108 | "Could not construct pathname; index=%X, size=%X, Path=%s", | 109 | "Could not construct external pathname; index=%X, size=%X, Path=%s", |
109 | (u32) index, (u32) size, &name_buffer[size])); | 110 | (u32) index, (u32) size, &name_buffer[size])); |
111 | |||
112 | return (AE_BAD_PARAMETER); | ||
110 | } | 113 | } |
111 | 114 | ||
112 | return; | 115 | return (AE_OK); |
113 | } | 116 | } |
114 | 117 | ||
115 | #ifdef ACPI_DEBUG_OUTPUT | 118 | #ifdef ACPI_DEBUG_OUTPUT |
@@ -129,6 +132,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node, | |||
129 | 132 | ||
130 | char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) | 133 | char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) |
131 | { | 134 | { |
135 | acpi_status status; | ||
132 | char *name_buffer; | 136 | char *name_buffer; |
133 | acpi_size size; | 137 | acpi_size size; |
134 | 138 | ||
@@ -138,8 +142,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) | |||
138 | 142 | ||
139 | size = acpi_ns_get_pathname_length(node); | 143 | size = acpi_ns_get_pathname_length(node); |
140 | if (!size) { | 144 | if (!size) { |
141 | ACPI_ERROR((AE_INFO, "Invalid node failure")); | 145 | return (NULL); |
142 | return_PTR(NULL); | ||
143 | } | 146 | } |
144 | 147 | ||
145 | /* Allocate a buffer to be returned to caller */ | 148 | /* Allocate a buffer to be returned to caller */ |
@@ -152,7 +155,11 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) | |||
152 | 155 | ||
153 | /* Build the path in the allocated buffer */ | 156 | /* Build the path in the allocated buffer */ |
154 | 157 | ||
155 | acpi_ns_build_external_path(node, size, name_buffer); | 158 | status = acpi_ns_build_external_path(node, size, name_buffer); |
159 | if (ACPI_FAILURE(status)) { | ||
160 | return (NULL); | ||
161 | } | ||
162 | |||
156 | return_PTR(name_buffer); | 163 | return_PTR(name_buffer); |
157 | } | 164 | } |
158 | #endif | 165 | #endif |
@@ -186,7 +193,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) | |||
186 | while (next_node && (next_node != acpi_gbl_root_node)) { | 193 | while (next_node && (next_node != acpi_gbl_root_node)) { |
187 | if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) { | 194 | if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) { |
188 | ACPI_ERROR((AE_INFO, | 195 | ACPI_ERROR((AE_INFO, |
189 | "Invalid NS Node (%p) while traversing path", | 196 | "Invalid Namespace Node (%p) while traversing namespace", |
190 | next_node)); | 197 | next_node)); |
191 | return 0; | 198 | return 0; |
192 | } | 199 | } |
@@ -234,8 +241,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle, | |||
234 | 241 | ||
235 | required_size = acpi_ns_get_pathname_length(node); | 242 | required_size = acpi_ns_get_pathname_length(node); |
236 | if (!required_size) { | 243 | if (!required_size) { |
237 | ACPI_ERROR((AE_INFO, "Invalid node failure")); | 244 | return_ACPI_STATUS(AE_BAD_PARAMETER); |
238 | return_ACPI_STATUS(AE_ERROR); | ||
239 | } | 245 | } |
240 | 246 | ||
241 | /* Validate/Allocate/Clear caller buffer */ | 247 | /* Validate/Allocate/Clear caller buffer */ |
@@ -247,7 +253,11 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle, | |||
247 | 253 | ||
248 | /* Build the path in the caller buffer */ | 254 | /* Build the path in the caller buffer */ |
249 | 255 | ||
250 | acpi_ns_build_external_path(node, required_size, buffer->pointer); | 256 | status = |
257 | acpi_ns_build_external_path(node, required_size, buffer->pointer); | ||
258 | if (ACPI_FAILURE(status)) { | ||
259 | return_ACPI_STATUS(status); | ||
260 | } | ||
251 | 261 | ||
252 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n", | 262 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n", |
253 | (char *)buffer->pointer, (u32) required_size)); | 263 | (char *)buffer->pointer, (u32) required_size)); |
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 89f3b2abfdc7..cf47805a7448 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c | |||
@@ -849,7 +849,7 @@ static int __init acpi_irq_penalty_update(char *str, int used) | |||
849 | if (irq < 0) | 849 | if (irq < 0) |
850 | continue; | 850 | continue; |
851 | 851 | ||
852 | if (irq >= ACPI_MAX_IRQS) | 852 | if (irq >= ARRAY_SIZE(acpi_irq_penalty)) |
853 | continue; | 853 | continue; |
854 | 854 | ||
855 | if (used) | 855 | if (used) |
@@ -872,10 +872,12 @@ static int __init acpi_irq_penalty_update(char *str, int used) | |||
872 | */ | 872 | */ |
873 | void acpi_penalize_isa_irq(int irq, int active) | 873 | void acpi_penalize_isa_irq(int irq, int active) |
874 | { | 874 | { |
875 | if (active) | 875 | if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) { |
876 | acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED; | 876 | if (active) |
877 | else | 877 | acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED; |
878 | acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING; | 878 | else |
879 | acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING; | ||
880 | } | ||
879 | } | 881 | } |
880 | 882 | ||
881 | /* | 883 | /* |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index e36422a7122c..d3f0a62efcc1 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -123,7 +123,7 @@ struct acpi_processor_errata errata __read_mostly; | |||
123 | static int set_no_mwait(const struct dmi_system_id *id) | 123 | static int set_no_mwait(const struct dmi_system_id *id) |
124 | { | 124 | { |
125 | printk(KERN_NOTICE PREFIX "%s detected - " | 125 | printk(KERN_NOTICE PREFIX "%s detected - " |
126 | "disable mwait for CPU C-stetes\n", id->ident); | 126 | "disabling mwait for CPU C-states\n", id->ident); |
127 | idle_nomwait = 1; | 127 | idle_nomwait = 1; |
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 283c08f5f4d4..cf5b1b7b684f 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <linux/pm_qos_params.h> | 41 | #include <linux/pm_qos_params.h> |
42 | #include <linux/clockchips.h> | 42 | #include <linux/clockchips.h> |
43 | #include <linux/cpuidle.h> | 43 | #include <linux/cpuidle.h> |
44 | #include <linux/cpuidle.h> | ||
45 | 44 | ||
46 | /* | 45 | /* |
47 | * Include the apic definitions for x86 to have the APIC timer related defines | 46 | * Include the apic definitions for x86 to have the APIC timer related defines |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 0133af49cf06..80e32093e977 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -70,7 +70,7 @@ static DEFINE_MUTEX(performance_mutex); | |||
70 | * 0 -> cpufreq low level drivers initialized -> consider _PPC values | 70 | * 0 -> cpufreq low level drivers initialized -> consider _PPC values |
71 | * 1 -> ignore _PPC totally -> forced by user through boot param | 71 | * 1 -> ignore _PPC totally -> forced by user through boot param |
72 | */ | 72 | */ |
73 | static unsigned int ignore_ppc = -1; | 73 | static int ignore_ppc = -1; |
74 | module_param(ignore_ppc, uint, 0644); | 74 | module_param(ignore_ppc, uint, 0644); |
75 | MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ | 75 | MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ |
76 | "limited by BIOS, this should help"); | 76 | "limited by BIOS, this should help"); |
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c index f61ebc679e66..d9063ea414e3 100644 --- a/drivers/acpi/resources/rscalc.c +++ b/drivers/acpi/resources/rscalc.c | |||
@@ -587,6 +587,9 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, | |||
587 | } else { | 587 | } else { |
588 | temp_size_needed += | 588 | temp_size_needed += |
589 | acpi_ns_get_pathname_length((*sub_object_list)->reference.node); | 589 | acpi_ns_get_pathname_length((*sub_object_list)->reference.node); |
590 | if (!temp_size_needed) { | ||
591 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
592 | } | ||
590 | } | 593 | } |
591 | } else { | 594 | } else { |
592 | /* | 595 | /* |
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c index e7bf34a7b1d2..7dcb67e0b215 100644 --- a/drivers/acpi/utilities/utalloc.c +++ b/drivers/acpi/utilities/utalloc.c | |||
@@ -242,10 +242,12 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer, | |||
242 | { | 242 | { |
243 | acpi_status status = AE_OK; | 243 | acpi_status status = AE_OK; |
244 | 244 | ||
245 | if (!required_length) { | 245 | /* Parameter validation */ |
246 | WARN_ON(1); | 246 | |
247 | return AE_ERROR; | 247 | if (!buffer || !required_length) { |
248 | return (AE_BAD_PARAMETER); | ||
248 | } | 249 | } |
250 | |||
249 | switch (buffer->length) { | 251 | switch (buffer->length) { |
250 | case ACPI_NO_BUFFER: | 252 | case ACPI_NO_BUFFER: |
251 | 253 | ||
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c index c5c791a575c9..42609d3a8aa9 100644 --- a/drivers/acpi/utilities/utdelete.c +++ b/drivers/acpi/utilities/utdelete.c | |||
@@ -135,6 +135,10 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) | |||
135 | obj_pointer = object->package.elements; | 135 | obj_pointer = object->package.elements; |
136 | break; | 136 | break; |
137 | 137 | ||
138 | /* | ||
139 | * These objects have a possible list of notify handlers. | ||
140 | * Device object also may have a GPE block. | ||
141 | */ | ||
138 | case ACPI_TYPE_DEVICE: | 142 | case ACPI_TYPE_DEVICE: |
139 | 143 | ||
140 | if (object->device.gpe_block) { | 144 | if (object->device.gpe_block) { |
@@ -142,9 +146,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) | |||
142 | gpe_block); | 146 | gpe_block); |
143 | } | 147 | } |
144 | 148 | ||
145 | /* Walk the handler list for this device */ | 149 | /*lint -fallthrough */ |
150 | |||
151 | case ACPI_TYPE_PROCESSOR: | ||
152 | case ACPI_TYPE_THERMAL: | ||
153 | |||
154 | /* Walk the notify handler list for this object */ | ||
146 | 155 | ||
147 | handler_desc = object->device.handler; | 156 | handler_desc = object->common_notify.handler; |
148 | while (handler_desc) { | 157 | while (handler_desc) { |
149 | next_desc = handler_desc->address_space.next; | 158 | next_desc = handler_desc->address_space.next; |
150 | acpi_ut_remove_reference(handler_desc); | 159 | acpi_ut_remove_reference(handler_desc); |
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c index e25484495e65..916eff399eb3 100644 --- a/drivers/acpi/utilities/utobject.c +++ b/drivers/acpi/utilities/utobject.c | |||
@@ -425,6 +425,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object, | |||
425 | acpi_size * obj_length) | 425 | acpi_size * obj_length) |
426 | { | 426 | { |
427 | acpi_size length; | 427 | acpi_size length; |
428 | acpi_size size; | ||
428 | acpi_status status = AE_OK; | 429 | acpi_status status = AE_OK; |
429 | 430 | ||
430 | ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object); | 431 | ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object); |
@@ -484,10 +485,14 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object, | |||
484 | * Get the actual length of the full pathname to this object. | 485 | * Get the actual length of the full pathname to this object. |
485 | * The reference will be converted to the pathname to the object | 486 | * The reference will be converted to the pathname to the object |
486 | */ | 487 | */ |
487 | length += | 488 | size = |
488 | ACPI_ROUND_UP_TO_NATIVE_WORD | 489 | acpi_ns_get_pathname_length(internal_object-> |
489 | (acpi_ns_get_pathname_length | 490 | reference.node); |
490 | (internal_object->reference.node)); | 491 | if (!size) { |
492 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
493 | } | ||
494 | |||
495 | length += ACPI_ROUND_UP_TO_NATIVE_WORD(size); | ||
491 | break; | 496 | break; |
492 | 497 | ||
493 | default: | 498 | default: |
diff --git a/drivers/acpi/wmi.c b/drivers/acpi/wmi.c index c33b1c6e93b1..cfe2c833474d 100644 --- a/drivers/acpi/wmi.c +++ b/drivers/acpi/wmi.c | |||
@@ -347,7 +347,7 @@ struct acpi_buffer *out) | |||
347 | strcpy(method, "WQ"); | 347 | strcpy(method, "WQ"); |
348 | strncat(method, block->object_id, 2); | 348 | strncat(method, block->object_id, 2); |
349 | 349 | ||
350 | status = acpi_evaluate_object(handle, method, NULL, out); | 350 | status = acpi_evaluate_object(handle, method, &input, out); |
351 | 351 | ||
352 | /* | 352 | /* |
353 | * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if | 353 | * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index ef3e5522e1a4..c729e6988bbb 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -486,6 +486,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
486 | { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ | 486 | { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ |
487 | { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ | 487 | { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ |
488 | { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ | 488 | { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ |
489 | { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ | ||
490 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ | ||
489 | 491 | ||
490 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ | 492 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
491 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 493 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
@@ -575,9 +577,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
575 | { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */ | 577 | { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */ |
576 | 578 | ||
577 | /* SiS */ | 579 | /* SiS */ |
578 | { PCI_VDEVICE(SI, 0x1184), board_ahci_nopmp }, /* SiS 966 */ | 580 | { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ |
579 | { PCI_VDEVICE(SI, 0x1185), board_ahci_nopmp }, /* SiS 968 */ | 581 | { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */ |
580 | { PCI_VDEVICE(SI, 0x0186), board_ahci_nopmp }, /* SiS 968 */ | 582 | { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ |
581 | 583 | ||
582 | /* Marvell */ | 584 | /* Marvell */ |
583 | { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ | 585 | { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index c294121fd69e..b1d08a8f5003 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -275,6 +275,14 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
275 | { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, | 275 | { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, |
276 | /* SATA Controller IDE (ICH10) */ | 276 | /* SATA Controller IDE (ICH10) */ |
277 | { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 277 | { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
278 | /* SATA Controller IDE (PCH) */ | ||
279 | { 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, | ||
280 | /* SATA Controller IDE (PCH) */ | ||
281 | { 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | ||
282 | /* SATA Controller IDE (PCH) */ | ||
283 | { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | ||
284 | /* SATA Controller IDE (PCH) */ | ||
285 | { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, | ||
278 | 286 | ||
279 | { } /* terminate list */ | 287 | { } /* terminate list */ |
280 | }; | 288 | }; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 5ba96c5052c8..79e3a8e7a84a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -104,6 +104,7 @@ struct ata_force_param { | |||
104 | unsigned long xfer_mask; | 104 | unsigned long xfer_mask; |
105 | unsigned int horkage_on; | 105 | unsigned int horkage_on; |
106 | unsigned int horkage_off; | 106 | unsigned int horkage_off; |
107 | unsigned int lflags; | ||
107 | }; | 108 | }; |
108 | 109 | ||
109 | struct ata_force_ent { | 110 | struct ata_force_ent { |
@@ -196,22 +197,23 @@ void ata_force_cbl(struct ata_port *ap) | |||
196 | } | 197 | } |
197 | 198 | ||
198 | /** | 199 | /** |
199 | * ata_force_spd_limit - force SATA spd limit according to libata.force | 200 | * ata_force_link_limits - force link limits according to libata.force |
200 | * @link: ATA link of interest | 201 | * @link: ATA link of interest |
201 | * | 202 | * |
202 | * Force SATA spd limit according to libata.force and whine about | 203 | * Force link flags and SATA spd limit according to libata.force |
203 | * it. When only the port part is specified (e.g. 1:), the limit | 204 | * and whine about it. When only the port part is specified |
204 | * applies to all links connected to both the host link and all | 205 | * (e.g. 1:), the limit applies to all links connected to both |
205 | * fan-out ports connected via PMP. If the device part is | 206 | * the host link and all fan-out ports connected via PMP. If the |
206 | * specified as 0 (e.g. 1.00:), it specifies the first fan-out | 207 | * device part is specified as 0 (e.g. 1.00:), it specifies the |
207 | * link not the host link. Device number 15 always points to the | 208 | * first fan-out link not the host link. Device number 15 always |
208 | * host link whether PMP is attached or not. | 209 | * points to the host link whether PMP is attached or not. |
209 | * | 210 | * |
210 | * LOCKING: | 211 | * LOCKING: |
211 | * EH context. | 212 | * EH context. |
212 | */ | 213 | */ |
213 | static void ata_force_spd_limit(struct ata_link *link) | 214 | static void ata_force_link_limits(struct ata_link *link) |
214 | { | 215 | { |
216 | bool did_spd = false; | ||
215 | int linkno, i; | 217 | int linkno, i; |
216 | 218 | ||
217 | if (ata_is_host_link(link)) | 219 | if (ata_is_host_link(link)) |
@@ -228,13 +230,22 @@ static void ata_force_spd_limit(struct ata_link *link) | |||
228 | if (fe->device != -1 && fe->device != linkno) | 230 | if (fe->device != -1 && fe->device != linkno) |
229 | continue; | 231 | continue; |
230 | 232 | ||
231 | if (!fe->param.spd_limit) | 233 | /* only honor the first spd limit */ |
232 | continue; | 234 | if (!did_spd && fe->param.spd_limit) { |
235 | link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; | ||
236 | ata_link_printk(link, KERN_NOTICE, | ||
237 | "FORCE: PHY spd limit set to %s\n", | ||
238 | fe->param.name); | ||
239 | did_spd = true; | ||
240 | } | ||
233 | 241 | ||
234 | link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; | 242 | /* let lflags stack */ |
235 | ata_link_printk(link, KERN_NOTICE, | 243 | if (fe->param.lflags) { |
236 | "FORCE: PHY spd limit set to %s\n", fe->param.name); | 244 | link->flags |= fe->param.lflags; |
237 | return; | 245 | ata_link_printk(link, KERN_NOTICE, |
246 | "FORCE: link flag 0x%x forced -> 0x%x\n", | ||
247 | fe->param.lflags, link->flags); | ||
248 | } | ||
238 | } | 249 | } |
239 | } | 250 | } |
240 | 251 | ||
@@ -3277,7 +3288,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) | |||
3277 | dev->dma_mode = ata_xfer_mask2mode(dma_mask); | 3288 | dev->dma_mode = ata_xfer_mask2mode(dma_mask); |
3278 | 3289 | ||
3279 | found = 1; | 3290 | found = 1; |
3280 | if (dev->dma_mode != 0xff) | 3291 | if (ata_dma_enabled(dev)) |
3281 | used_dma = 1; | 3292 | used_dma = 1; |
3282 | } | 3293 | } |
3283 | if (!found) | 3294 | if (!found) |
@@ -3302,7 +3313,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) | |||
3302 | 3313 | ||
3303 | /* step 3: set host DMA timings */ | 3314 | /* step 3: set host DMA timings */ |
3304 | ata_link_for_each_dev(dev, link) { | 3315 | ata_link_for_each_dev(dev, link) { |
3305 | if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff) | 3316 | if (!ata_dev_enabled(dev) || !ata_dma_enabled(dev)) |
3306 | continue; | 3317 | continue; |
3307 | 3318 | ||
3308 | dev->xfer_mode = dev->dma_mode; | 3319 | dev->xfer_mode = dev->dma_mode; |
@@ -5188,19 +5199,18 @@ void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) | |||
5188 | */ | 5199 | */ |
5189 | int sata_link_init_spd(struct ata_link *link) | 5200 | int sata_link_init_spd(struct ata_link *link) |
5190 | { | 5201 | { |
5191 | u32 scontrol; | ||
5192 | u8 spd; | 5202 | u8 spd; |
5193 | int rc; | 5203 | int rc; |
5194 | 5204 | ||
5195 | rc = sata_scr_read(link, SCR_CONTROL, &scontrol); | 5205 | rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); |
5196 | if (rc) | 5206 | if (rc) |
5197 | return rc; | 5207 | return rc; |
5198 | 5208 | ||
5199 | spd = (scontrol >> 4) & 0xf; | 5209 | spd = (link->saved_scontrol >> 4) & 0xf; |
5200 | if (spd) | 5210 | if (spd) |
5201 | link->hw_sata_spd_limit &= (1 << spd) - 1; | 5211 | link->hw_sata_spd_limit &= (1 << spd) - 1; |
5202 | 5212 | ||
5203 | ata_force_spd_limit(link); | 5213 | ata_force_link_limits(link); |
5204 | 5214 | ||
5205 | link->sata_spd_limit = link->hw_sata_spd_limit; | 5215 | link->sata_spd_limit = link->hw_sata_spd_limit; |
5206 | 5216 | ||
@@ -5783,9 +5793,10 @@ static void ata_port_detach(struct ata_port *ap) | |||
5783 | ata_port_wait_eh(ap); | 5793 | ata_port_wait_eh(ap); |
5784 | 5794 | ||
5785 | /* EH is now guaranteed to see UNLOADING - EH context belongs | 5795 | /* EH is now guaranteed to see UNLOADING - EH context belongs |
5786 | * to us. Disable all existing devices. | 5796 | * to us. Restore SControl and disable all existing devices. |
5787 | */ | 5797 | */ |
5788 | ata_port_for_each_link(link, ap) { | 5798 | __ata_port_for_each_link(link, ap) { |
5799 | sata_scr_write(link, SCR_CONTROL, link->saved_scontrol); | ||
5789 | ata_link_for_each_dev(dev, link) | 5800 | ata_link_for_each_dev(dev, link) |
5790 | ata_dev_disable(dev); | 5801 | ata_dev_disable(dev); |
5791 | } | 5802 | } |
@@ -5991,6 +6002,9 @@ static int __init ata_parse_force_one(char **cur, | |||
5991 | { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, | 6002 | { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, |
5992 | { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, | 6003 | { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, |
5993 | { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, | 6004 | { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, |
6005 | { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, | ||
6006 | { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, | ||
6007 | { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, | ||
5994 | }; | 6008 | }; |
5995 | char *start = *cur, *p = *cur; | 6009 | char *start = *cur, *p = *cur; |
5996 | char *id, *val, *endp; | 6010 | char *id, *val, *endp; |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 58bdc538d229..c1db2f234d2e 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -2040,7 +2040,7 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2040 | } | 2040 | } |
2041 | 2041 | ||
2042 | if (ehc->i.serror) | 2042 | if (ehc->i.serror) |
2043 | ata_port_printk(ap, KERN_ERR, | 2043 | ata_link_printk(link, KERN_ERR, |
2044 | "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", | 2044 | "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", |
2045 | ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", | 2045 | ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", |
2046 | ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", | 2046 | ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", |
@@ -2171,18 +2171,12 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, | |||
2171 | } | 2171 | } |
2172 | 2172 | ||
2173 | static int ata_eh_followup_srst_needed(struct ata_link *link, | 2173 | static int ata_eh_followup_srst_needed(struct ata_link *link, |
2174 | int rc, int classify, | 2174 | int rc, const unsigned int *classes) |
2175 | const unsigned int *classes) | ||
2176 | { | 2175 | { |
2177 | if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) | 2176 | if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) |
2178 | return 0; | 2177 | return 0; |
2179 | if (rc == -EAGAIN) { | 2178 | if (rc == -EAGAIN) |
2180 | if (classify) | 2179 | return 1; |
2181 | return 1; | ||
2182 | rc = 0; | ||
2183 | } | ||
2184 | if (rc != 0) | ||
2185 | return 0; | ||
2186 | if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) | 2180 | if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) |
2187 | return 1; | 2181 | return 1; |
2188 | return 0; | 2182 | return 0; |
@@ -2210,6 +2204,10 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2210 | */ | 2204 | */ |
2211 | while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) | 2205 | while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) |
2212 | max_tries++; | 2206 | max_tries++; |
2207 | if (link->flags & ATA_LFLAG_NO_HRST) | ||
2208 | hardreset = NULL; | ||
2209 | if (link->flags & ATA_LFLAG_NO_SRST) | ||
2210 | softreset = NULL; | ||
2213 | 2211 | ||
2214 | now = jiffies; | 2212 | now = jiffies; |
2215 | deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN); | 2213 | deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN); |
@@ -2247,10 +2245,10 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2247 | ehc->i.action &= ~ATA_EH_RESET; | 2245 | ehc->i.action &= ~ATA_EH_RESET; |
2248 | if (hardreset) { | 2246 | if (hardreset) { |
2249 | reset = hardreset; | 2247 | reset = hardreset; |
2250 | ehc->i.action = ATA_EH_HARDRESET; | 2248 | ehc->i.action |= ATA_EH_HARDRESET; |
2251 | } else if (softreset) { | 2249 | } else if (softreset) { |
2252 | reset = softreset; | 2250 | reset = softreset; |
2253 | ehc->i.action = ATA_EH_SOFTRESET; | 2251 | ehc->i.action |= ATA_EH_SOFTRESET; |
2254 | } | 2252 | } |
2255 | 2253 | ||
2256 | if (prereset) { | 2254 | if (prereset) { |
@@ -2305,9 +2303,11 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2305 | ehc->i.flags |= ATA_EHI_DID_SOFTRESET; | 2303 | ehc->i.flags |= ATA_EHI_DID_SOFTRESET; |
2306 | 2304 | ||
2307 | rc = ata_do_reset(link, reset, classes, deadline); | 2305 | rc = ata_do_reset(link, reset, classes, deadline); |
2306 | if (rc && rc != -EAGAIN) | ||
2307 | goto fail; | ||
2308 | 2308 | ||
2309 | if (reset == hardreset && | 2309 | if (reset == hardreset && |
2310 | ata_eh_followup_srst_needed(link, rc, classify, classes)) { | 2310 | ata_eh_followup_srst_needed(link, rc, classes)) { |
2311 | /* okay, let's do follow-up softreset */ | 2311 | /* okay, let's do follow-up softreset */ |
2312 | reset = softreset; | 2312 | reset = softreset; |
2313 | 2313 | ||
@@ -2322,10 +2322,6 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2322 | ata_eh_about_to_do(link, NULL, ATA_EH_RESET); | 2322 | ata_eh_about_to_do(link, NULL, ATA_EH_RESET); |
2323 | rc = ata_do_reset(link, reset, classes, deadline); | 2323 | rc = ata_do_reset(link, reset, classes, deadline); |
2324 | } | 2324 | } |
2325 | |||
2326 | /* -EAGAIN can happen if we skipped followup SRST */ | ||
2327 | if (rc && rc != -EAGAIN) | ||
2328 | goto fail; | ||
2329 | } else { | 2325 | } else { |
2330 | if (verbose) | 2326 | if (verbose) |
2331 | ata_link_printk(link, KERN_INFO, "no reset method " | 2327 | ata_link_printk(link, KERN_INFO, "no reset method " |
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c index fbe605711554..eb919c16a03e 100644 --- a/drivers/ata/pata_acpi.c +++ b/drivers/ata/pata_acpi.c | |||
@@ -181,7 +181,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc) | |||
181 | 181 | ||
182 | if (adev != acpi->last) { | 182 | if (adev != acpi->last) { |
183 | pacpi_set_piomode(ap, adev); | 183 | pacpi_set_piomode(ap, adev); |
184 | if (adev->dma_mode) | 184 | if (ata_dma_enabled(adev)) |
185 | pacpi_set_dmamode(ap, adev); | 185 | pacpi_set_dmamode(ap, adev); |
186 | acpi->last = adev; | 186 | acpi->last = adev; |
187 | } | 187 | } |
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index d7de7baf58a8..e8a0d99d7356 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c | |||
@@ -183,7 +183,7 @@ static void atiixp_bmdma_start(struct ata_queued_cmd *qc) | |||
183 | u16 tmp16; | 183 | u16 tmp16; |
184 | 184 | ||
185 | pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); | 185 | pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); |
186 | if (adev->dma_mode >= XFER_UDMA_0) | 186 | if (ata_using_udma(adev)) |
187 | tmp16 |= (1 << dn); | 187 | tmp16 |= (1 << dn); |
188 | else | 188 | else |
189 | tmp16 &= ~(1 << dn); | 189 | tmp16 &= ~(1 << dn); |
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c index 744beebaaf49..0c4b271a9d5a 100644 --- a/drivers/ata/pata_cs5530.c +++ b/drivers/ata/pata_cs5530.c | |||
@@ -149,10 +149,10 @@ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc) | |||
149 | struct ata_device *prev = ap->private_data; | 149 | struct ata_device *prev = ap->private_data; |
150 | 150 | ||
151 | /* See if the DMA settings could be wrong */ | 151 | /* See if the DMA settings could be wrong */ |
152 | if (adev->dma_mode != 0 && adev != prev && prev != NULL) { | 152 | if (ata_dma_enabled(adev) && adev != prev && prev != NULL) { |
153 | /* Maybe, but do the channels match MWDMA/UDMA ? */ | 153 | /* Maybe, but do the channels match MWDMA/UDMA ? */ |
154 | if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) || | 154 | if ((ata_using_udma(adev) && !ata_using_udma(prev)) || |
155 | (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0)) | 155 | (ata_using_udma(prev) && !ata_using_udma(adev))) |
156 | /* Switch the mode bits */ | 156 | /* Switch the mode bits */ |
157 | cs5530_set_dmamode(ap, adev); | 157 | cs5530_set_dmamode(ap, adev); |
158 | } | 158 | } |
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index 27843c70eb9d..0221c9a46769 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c | |||
@@ -606,7 +606,7 @@ static void it821x_display_disk(int n, u8 *buf) | |||
606 | { | 606 | { |
607 | unsigned char id[41]; | 607 | unsigned char id[41]; |
608 | int mode = 0; | 608 | int mode = 0; |
609 | char *mtype; | 609 | char *mtype = ""; |
610 | char mbuf[8]; | 610 | char mbuf[8]; |
611 | char *cbl = "(40 wire cable)"; | 611 | char *cbl = "(40 wire cable)"; |
612 | 612 | ||
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c index e678af383d13..df64f2443001 100644 --- a/drivers/ata/pata_oldpiix.c +++ b/drivers/ata/pata_oldpiix.c | |||
@@ -198,7 +198,7 @@ static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc) | |||
198 | 198 | ||
199 | if (adev != ap->private_data) { | 199 | if (adev != ap->private_data) { |
200 | oldpiix_set_piomode(ap, adev); | 200 | oldpiix_set_piomode(ap, adev); |
201 | if (adev->dma_mode) | 201 | if (ata_dma_enabled(adev)) |
202 | oldpiix_set_dmamode(ap, adev); | 202 | oldpiix_set_dmamode(ap, adev); |
203 | } | 203 | } |
204 | return ata_sff_qc_issue(qc); | 204 | return ata_sff_qc_issue(qc); |
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c index cbab397e3db7..0278fd2b8fb1 100644 --- a/drivers/ata/pata_sc1200.c +++ b/drivers/ata/pata_sc1200.c | |||
@@ -167,10 +167,10 @@ static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc) | |||
167 | struct ata_device *prev = ap->private_data; | 167 | struct ata_device *prev = ap->private_data; |
168 | 168 | ||
169 | /* See if the DMA settings could be wrong */ | 169 | /* See if the DMA settings could be wrong */ |
170 | if (adev->dma_mode != 0 && adev != prev && prev != NULL) { | 170 | if (ata_dma_enabled(adev) && adev != prev && prev != NULL) { |
171 | /* Maybe, but do the channels match MWDMA/UDMA ? */ | 171 | /* Maybe, but do the channels match MWDMA/UDMA ? */ |
172 | if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) || | 172 | if ((ata_using_udma(adev) && !ata_using_udma(prev)) || |
173 | (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0)) | 173 | (ata_using_udma(prev) && !ata_using_udma(adev))) |
174 | /* Switch the mode bits */ | 174 | /* Switch the mode bits */ |
175 | sc1200_set_dmamode(ap, adev); | 175 | sc1200_set_dmamode(ap, adev); |
176 | } | 176 | } |
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 57d951b11f2d..8fdb2ce73210 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c | |||
@@ -324,62 +324,26 @@ static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
324 | } | 324 | } |
325 | 325 | ||
326 | /** | 326 | /** |
327 | * via_ata_sff_tf_load - send taskfile registers to host controller | 327 | * via_tf_load - send taskfile registers to host controller |
328 | * @ap: Port to which output is sent | 328 | * @ap: Port to which output is sent |
329 | * @tf: ATA taskfile register set | 329 | * @tf: ATA taskfile register set |
330 | * | 330 | * |
331 | * Outputs ATA taskfile to standard ATA host controller. | 331 | * Outputs ATA taskfile to standard ATA host controller. |
332 | * | 332 | * |
333 | * Note: This is to fix the internal bug of via chipsets, which | 333 | * Note: This is to fix the internal bug of via chipsets, which |
334 | * will reset the device register after changing the IEN bit on | 334 | * will reset the device register after changing the IEN bit on |
335 | * ctl register | 335 | * ctl register |
336 | */ | 336 | */ |
337 | static void via_ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | 337 | static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) |
338 | { | 338 | { |
339 | struct ata_ioports *ioaddr = &ap->ioaddr; | 339 | struct ata_taskfile tmp_tf; |
340 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; | ||
341 | |||
342 | if (tf->ctl != ap->last_ctl) { | ||
343 | iowrite8(tf->ctl, ioaddr->ctl_addr); | ||
344 | iowrite8(tf->device, ioaddr->device_addr); | ||
345 | ap->last_ctl = tf->ctl; | ||
346 | ata_wait_idle(ap); | ||
347 | } | ||
348 | |||
349 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { | ||
350 | iowrite8(tf->hob_feature, ioaddr->feature_addr); | ||
351 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); | ||
352 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); | ||
353 | iowrite8(tf->hob_lbam, ioaddr->lbam_addr); | ||
354 | iowrite8(tf->hob_lbah, ioaddr->lbah_addr); | ||
355 | VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", | ||
356 | tf->hob_feature, | ||
357 | tf->hob_nsect, | ||
358 | tf->hob_lbal, | ||
359 | tf->hob_lbam, | ||
360 | tf->hob_lbah); | ||
361 | } | ||
362 | 340 | ||
363 | if (is_addr) { | 341 | if (ap->ctl != ap->last_ctl && !(tf->flags & ATA_TFLAG_DEVICE)) { |
364 | iowrite8(tf->feature, ioaddr->feature_addr); | 342 | tmp_tf = *tf; |
365 | iowrite8(tf->nsect, ioaddr->nsect_addr); | 343 | tmp_tf.flags |= ATA_TFLAG_DEVICE; |
366 | iowrite8(tf->lbal, ioaddr->lbal_addr); | 344 | tf = &tmp_tf; |
367 | iowrite8(tf->lbam, ioaddr->lbam_addr); | ||
368 | iowrite8(tf->lbah, ioaddr->lbah_addr); | ||
369 | VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", | ||
370 | tf->feature, | ||
371 | tf->nsect, | ||
372 | tf->lbal, | ||
373 | tf->lbam, | ||
374 | tf->lbah); | ||
375 | } | 345 | } |
376 | 346 | ata_sff_tf_load(ap, tf); | |
377 | if (tf->flags & ATA_TFLAG_DEVICE) { | ||
378 | iowrite8(tf->device, ioaddr->device_addr); | ||
379 | VPRINTK("device 0x%X\n", tf->device); | ||
380 | } | ||
381 | |||
382 | ata_wait_idle(ap); | ||
383 | } | 347 | } |
384 | 348 | ||
385 | static struct scsi_host_template via_sht = { | 349 | static struct scsi_host_template via_sht = { |
@@ -392,13 +356,12 @@ static struct ata_port_operations via_port_ops = { | |||
392 | .set_piomode = via_set_piomode, | 356 | .set_piomode = via_set_piomode, |
393 | .set_dmamode = via_set_dmamode, | 357 | .set_dmamode = via_set_dmamode, |
394 | .prereset = via_pre_reset, | 358 | .prereset = via_pre_reset, |
395 | .sff_tf_load = via_ata_tf_load, | 359 | .sff_tf_load = via_tf_load, |
396 | }; | 360 | }; |
397 | 361 | ||
398 | static struct ata_port_operations via_port_ops_noirq = { | 362 | static struct ata_port_operations via_port_ops_noirq = { |
399 | .inherits = &via_port_ops, | 363 | .inherits = &via_port_ops, |
400 | .sff_data_xfer = ata_sff_data_xfer_noirq, | 364 | .sff_data_xfer = ata_sff_data_xfer_noirq, |
401 | .sff_tf_load = via_ata_tf_load, | ||
402 | }; | 365 | }; |
403 | 366 | ||
404 | /** | 367 | /** |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index ad169ffbc4cb..13c1d2af18ac 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -1134,30 +1134,16 @@ static int mv_qc_defer(struct ata_queued_cmd *qc) | |||
1134 | if (ap->nr_active_links == 0) | 1134 | if (ap->nr_active_links == 0) |
1135 | return 0; | 1135 | return 0; |
1136 | 1136 | ||
1137 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | 1137 | /* |
1138 | /* | 1138 | * The port is operating in host queuing mode (EDMA) with NCQ |
1139 | * The port is operating in host queuing mode (EDMA). | 1139 | * enabled, allow multiple NCQ commands. EDMA also allows |
1140 | * It can accomodate a new qc if the qc protocol | 1140 | * queueing multiple DMA commands but libata core currently |
1141 | * is compatible with the current host queue mode. | 1141 | * doesn't allow it. |
1142 | */ | 1142 | */ |
1143 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { | 1143 | if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && |
1144 | /* | 1144 | (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol)) |
1145 | * The host queue (EDMA) is in NCQ mode. | 1145 | return 0; |
1146 | * If the new qc is also an NCQ command, | 1146 | |
1147 | * then allow the new qc. | ||
1148 | */ | ||
1149 | if (qc->tf.protocol == ATA_PROT_NCQ) | ||
1150 | return 0; | ||
1151 | } else { | ||
1152 | /* | ||
1153 | * The host queue (EDMA) is in non-NCQ, DMA mode. | ||
1154 | * If the new qc is also a non-NCQ, DMA command, | ||
1155 | * then allow the new qc. | ||
1156 | */ | ||
1157 | if (qc->tf.protocol == ATA_PROT_DMA) | ||
1158 | return 0; | ||
1159 | } | ||
1160 | } | ||
1161 | return ATA_DEFER_PORT; | 1147 | return ATA_DEFER_PORT; |
1162 | } | 1148 | } |
1163 | 1149 | ||
@@ -3036,7 +3022,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
3036 | break; | 3022 | break; |
3037 | case chip_soc: | 3023 | case chip_soc: |
3038 | hpriv->ops = &mv_soc_ops; | 3024 | hpriv->ops = &mv_soc_ops; |
3039 | hp_flags |= MV_HP_FLAG_SOC | MV_HP_ERRATA_60X1C0; | 3025 | hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | |
3026 | MV_HP_ERRATA_60X1C0; | ||
3040 | break; | 3027 | break; |
3041 | 3028 | ||
3042 | default: | 3029 | default: |
diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c index 2ebd07f2ef81..5effec6f5458 100644 --- a/drivers/atm/adummy.c +++ b/drivers/atm/adummy.c | |||
@@ -3,7 +3,6 @@ | |||
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <linux/version.h> | ||
7 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
8 | #include <linux/skbuff.h> | 7 | #include <linux/skbuff.h> |
9 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
diff --git a/drivers/base/class.c b/drivers/base/class.c index 5667c2f02c51..cc5e28c8885c 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c | |||
@@ -295,6 +295,12 @@ int class_for_each_device(struct class *class, struct device *start, | |||
295 | 295 | ||
296 | if (!class) | 296 | if (!class) |
297 | return -EINVAL; | 297 | return -EINVAL; |
298 | if (!class->p) { | ||
299 | WARN(1, "%s called for class '%s' before it was initialized", | ||
300 | __func__, class->name); | ||
301 | return -EINVAL; | ||
302 | } | ||
303 | |||
298 | mutex_lock(&class->p->class_mutex); | 304 | mutex_lock(&class->p->class_mutex); |
299 | list_for_each_entry(dev, &class->p->class_devices, node) { | 305 | list_for_each_entry(dev, &class->p->class_devices, node) { |
300 | if (start) { | 306 | if (start) { |
@@ -344,6 +350,11 @@ struct device *class_find_device(struct class *class, struct device *start, | |||
344 | 350 | ||
345 | if (!class) | 351 | if (!class) |
346 | return NULL; | 352 | return NULL; |
353 | if (!class->p) { | ||
354 | WARN(1, "%s called for class '%s' before it was initialized", | ||
355 | __func__, class->name); | ||
356 | return NULL; | ||
357 | } | ||
347 | 358 | ||
348 | mutex_lock(&class->p->class_mutex); | 359 | mutex_lock(&class->p->class_mutex); |
349 | list_for_each_entry(dev, &class->p->class_devices, node) { | 360 | list_for_each_entry(dev, &class->p->class_devices, node) { |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 068aa1c9538c..d021c98605b3 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -53,7 +53,7 @@ static inline int device_is_not_partition(struct device *dev) | |||
53 | * it is attached to. If it is not attached to a bus either, an empty | 53 | * it is attached to. If it is not attached to a bus either, an empty |
54 | * string will be returned. | 54 | * string will be returned. |
55 | */ | 55 | */ |
56 | const char *dev_driver_string(struct device *dev) | 56 | const char *dev_driver_string(const struct device *dev) |
57 | { | 57 | { |
58 | return dev->driver ? dev->driver->name : | 58 | return dev->driver ? dev->driver->name : |
59 | (dev->bus ? dev->bus->name : | 59 | (dev->bus ? dev->bus->name : |
@@ -541,6 +541,7 @@ void device_initialize(struct device *dev) | |||
541 | spin_lock_init(&dev->devres_lock); | 541 | spin_lock_init(&dev->devres_lock); |
542 | INIT_LIST_HEAD(&dev->devres_head); | 542 | INIT_LIST_HEAD(&dev->devres_head); |
543 | device_init_wakeup(dev, 0); | 543 | device_init_wakeup(dev, 0); |
544 | device_pm_init(dev); | ||
544 | set_dev_node(dev, -1); | 545 | set_dev_node(dev, -1); |
545 | } | 546 | } |
546 | 547 | ||
@@ -843,13 +844,19 @@ int device_add(struct device *dev) | |||
843 | { | 844 | { |
844 | struct device *parent = NULL; | 845 | struct device *parent = NULL; |
845 | struct class_interface *class_intf; | 846 | struct class_interface *class_intf; |
846 | int error; | 847 | int error = -EINVAL; |
847 | 848 | ||
848 | dev = get_device(dev); | 849 | dev = get_device(dev); |
849 | if (!dev || !strlen(dev->bus_id)) { | 850 | if (!dev) |
850 | error = -EINVAL; | 851 | goto done; |
851 | goto Done; | 852 | |
852 | } | 853 | /* Temporarily support init_name if it is set. |
854 | * It will override bus_id for now */ | ||
855 | if (dev->init_name) | ||
856 | dev_set_name(dev, "%s", dev->init_name); | ||
857 | |||
858 | if (!strlen(dev->bus_id)) | ||
859 | goto done; | ||
853 | 860 | ||
854 | pr_debug("device: '%s': %s\n", dev->bus_id, __func__); | 861 | pr_debug("device: '%s': %s\n", dev->bus_id, __func__); |
855 | 862 | ||
@@ -897,9 +904,10 @@ int device_add(struct device *dev) | |||
897 | error = bus_add_device(dev); | 904 | error = bus_add_device(dev); |
898 | if (error) | 905 | if (error) |
899 | goto BusError; | 906 | goto BusError; |
900 | error = device_pm_add(dev); | 907 | error = dpm_sysfs_add(dev); |
901 | if (error) | 908 | if (error) |
902 | goto PMError; | 909 | goto DPMError; |
910 | device_pm_add(dev); | ||
903 | kobject_uevent(&dev->kobj, KOBJ_ADD); | 911 | kobject_uevent(&dev->kobj, KOBJ_ADD); |
904 | bus_attach_device(dev); | 912 | bus_attach_device(dev); |
905 | if (parent) | 913 | if (parent) |
@@ -917,10 +925,10 @@ int device_add(struct device *dev) | |||
917 | class_intf->add_dev(dev, class_intf); | 925 | class_intf->add_dev(dev, class_intf); |
918 | mutex_unlock(&dev->class->p->class_mutex); | 926 | mutex_unlock(&dev->class->p->class_mutex); |
919 | } | 927 | } |
920 | Done: | 928 | done: |
921 | put_device(dev); | 929 | put_device(dev); |
922 | return error; | 930 | return error; |
923 | PMError: | 931 | DPMError: |
924 | bus_remove_device(dev); | 932 | bus_remove_device(dev); |
925 | BusError: | 933 | BusError: |
926 | if (dev->bus) | 934 | if (dev->bus) |
@@ -944,7 +952,7 @@ int device_add(struct device *dev) | |||
944 | cleanup_device_parent(dev); | 952 | cleanup_device_parent(dev); |
945 | if (parent) | 953 | if (parent) |
946 | put_device(parent); | 954 | put_device(parent); |
947 | goto Done; | 955 | goto done; |
948 | } | 956 | } |
949 | 957 | ||
950 | /** | 958 | /** |
@@ -1007,6 +1015,7 @@ void device_del(struct device *dev) | |||
1007 | struct class_interface *class_intf; | 1015 | struct class_interface *class_intf; |
1008 | 1016 | ||
1009 | device_pm_remove(dev); | 1017 | device_pm_remove(dev); |
1018 | dpm_sysfs_remove(dev); | ||
1010 | if (parent) | 1019 | if (parent) |
1011 | klist_del(&dev->knode_parent); | 1020 | klist_del(&dev->knode_parent); |
1012 | if (MAJOR(dev->devt)) { | 1021 | if (MAJOR(dev->devt)) { |
diff --git a/drivers/base/driver.c b/drivers/base/driver.c index 2ef5acf4368b..1e2bda780e48 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c | |||
@@ -16,9 +16,6 @@ | |||
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include "base.h" | 17 | #include "base.h" |
18 | 18 | ||
19 | #define to_dev(node) container_of(node, struct device, driver_list) | ||
20 | |||
21 | |||
22 | static struct device *next_device(struct klist_iter *i) | 19 | static struct device *next_device(struct klist_iter *i) |
23 | { | 20 | { |
24 | struct klist_node *n = klist_next(i); | 21 | struct klist_node *n = klist_next(i); |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 3250c5257b74..273a944d4040 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -67,20 +67,16 @@ void device_pm_unlock(void) | |||
67 | * device_pm_add - add a device to the list of active devices | 67 | * device_pm_add - add a device to the list of active devices |
68 | * @dev: Device to be added to the list | 68 | * @dev: Device to be added to the list |
69 | */ | 69 | */ |
70 | int device_pm_add(struct device *dev) | 70 | void device_pm_add(struct device *dev) |
71 | { | 71 | { |
72 | int error; | ||
73 | |||
74 | pr_debug("PM: Adding info for %s:%s\n", | 72 | pr_debug("PM: Adding info for %s:%s\n", |
75 | dev->bus ? dev->bus->name : "No Bus", | 73 | dev->bus ? dev->bus->name : "No Bus", |
76 | kobject_name(&dev->kobj)); | 74 | kobject_name(&dev->kobj)); |
77 | mutex_lock(&dpm_list_mtx); | 75 | mutex_lock(&dpm_list_mtx); |
78 | if (dev->parent) { | 76 | if (dev->parent) { |
79 | if (dev->parent->power.status >= DPM_SUSPENDING) { | 77 | if (dev->parent->power.status >= DPM_SUSPENDING) |
80 | dev_warn(dev, "parent %s is sleeping, will not add\n", | 78 | dev_warn(dev, "parent %s should not be sleeping\n", |
81 | dev->parent->bus_id); | 79 | dev->parent->bus_id); |
82 | WARN_ON(true); | ||
83 | } | ||
84 | } else if (transition_started) { | 80 | } else if (transition_started) { |
85 | /* | 81 | /* |
86 | * We refuse to register parentless devices while a PM | 82 | * We refuse to register parentless devices while a PM |
@@ -89,13 +85,9 @@ int device_pm_add(struct device *dev) | |||
89 | */ | 85 | */ |
90 | WARN_ON(true); | 86 | WARN_ON(true); |
91 | } | 87 | } |
92 | error = dpm_sysfs_add(dev); | 88 | |
93 | if (!error) { | 89 | list_add_tail(&dev->power.entry, &dpm_list); |
94 | dev->power.status = DPM_ON; | ||
95 | list_add_tail(&dev->power.entry, &dpm_list); | ||
96 | } | ||
97 | mutex_unlock(&dpm_list_mtx); | 90 | mutex_unlock(&dpm_list_mtx); |
98 | return error; | ||
99 | } | 91 | } |
100 | 92 | ||
101 | /** | 93 | /** |
@@ -110,7 +102,6 @@ void device_pm_remove(struct device *dev) | |||
110 | dev->bus ? dev->bus->name : "No Bus", | 102 | dev->bus ? dev->bus->name : "No Bus", |
111 | kobject_name(&dev->kobj)); | 103 | kobject_name(&dev->kobj)); |
112 | mutex_lock(&dpm_list_mtx); | 104 | mutex_lock(&dpm_list_mtx); |
113 | dpm_sysfs_remove(dev); | ||
114 | list_del_init(&dev->power.entry); | 105 | list_del_init(&dev->power.entry); |
115 | mutex_unlock(&dpm_list_mtx); | 106 | mutex_unlock(&dpm_list_mtx); |
116 | } | 107 | } |
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index a3252c0e2887..41f51fae042f 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -1,3 +1,8 @@ | |||
1 | static inline void device_pm_init(struct device *dev) | ||
2 | { | ||
3 | dev->power.status = DPM_ON; | ||
4 | } | ||
5 | |||
1 | #ifdef CONFIG_PM_SLEEP | 6 | #ifdef CONFIG_PM_SLEEP |
2 | 7 | ||
3 | /* | 8 | /* |
@@ -11,12 +16,12 @@ static inline struct device *to_device(struct list_head *entry) | |||
11 | return container_of(entry, struct device, power.entry); | 16 | return container_of(entry, struct device, power.entry); |
12 | } | 17 | } |
13 | 18 | ||
14 | extern int device_pm_add(struct device *); | 19 | extern void device_pm_add(struct device *); |
15 | extern void device_pm_remove(struct device *); | 20 | extern void device_pm_remove(struct device *); |
16 | 21 | ||
17 | #else /* CONFIG_PM_SLEEP */ | 22 | #else /* CONFIG_PM_SLEEP */ |
18 | 23 | ||
19 | static inline int device_pm_add(struct device *dev) { return 0; } | 24 | static inline void device_pm_add(struct device *dev) {} |
20 | static inline void device_pm_remove(struct device *dev) {} | 25 | static inline void device_pm_remove(struct device *dev) {} |
21 | 26 | ||
22 | #endif | 27 | #endif |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 24b97b0bef99..d070d492e385 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -571,8 +571,8 @@ out_free: | |||
571 | list_del(&brd->brd_list); | 571 | list_del(&brd->brd_list); |
572 | brd_free(brd); | 572 | brd_free(brd); |
573 | } | 573 | } |
574 | unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); | ||
574 | 575 | ||
575 | unregister_blkdev(RAMDISK_MAJOR, "brd"); | ||
576 | return -ENOMEM; | 576 | return -ENOMEM; |
577 | } | 577 | } |
578 | 578 | ||
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index ad98dda6037d..1778e4a2c672 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -707,15 +707,15 @@ static int __init nbd_init(void) | |||
707 | 707 | ||
708 | BUILD_BUG_ON(sizeof(struct nbd_request) != 28); | 708 | BUILD_BUG_ON(sizeof(struct nbd_request) != 28); |
709 | 709 | ||
710 | nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); | ||
711 | if (!nbd_dev) | ||
712 | return -ENOMEM; | ||
713 | |||
714 | if (max_part < 0) { | 710 | if (max_part < 0) { |
715 | printk(KERN_CRIT "nbd: max_part must be >= 0\n"); | 711 | printk(KERN_CRIT "nbd: max_part must be >= 0\n"); |
716 | return -EINVAL; | 712 | return -EINVAL; |
717 | } | 713 | } |
718 | 714 | ||
715 | nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); | ||
716 | if (!nbd_dev) | ||
717 | return -ENOMEM; | ||
718 | |||
719 | part_shift = 0; | 719 | part_shift = 0; |
720 | if (max_part > 0) | 720 | if (max_part > 0) |
721 | part_shift = fls(max_part); | 721 | part_shift = fls(max_part); |
@@ -779,6 +779,7 @@ out: | |||
779 | blk_cleanup_queue(nbd_dev[i].disk->queue); | 779 | blk_cleanup_queue(nbd_dev[i].disk->queue); |
780 | put_disk(nbd_dev[i].disk); | 780 | put_disk(nbd_dev[i].disk); |
781 | } | 781 | } |
782 | kfree(nbd_dev); | ||
782 | return err; | 783 | return err; |
783 | } | 784 | } |
784 | 785 | ||
@@ -795,6 +796,7 @@ static void __exit nbd_cleanup(void) | |||
795 | } | 796 | } |
796 | } | 797 | } |
797 | unregister_blkdev(NBD_MAJOR, "nbd"); | 798 | unregister_blkdev(NBD_MAJOR, "nbd"); |
799 | kfree(nbd_dev); | ||
798 | printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR); | 800 | printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR); |
799 | } | 801 | } |
800 | 802 | ||
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 158eed4d5161..29b7a648cc6e 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include <linux/types.h> | 49 | #include <linux/types.h> |
50 | #include <linux/kernel.h> | 50 | #include <linux/kernel.h> |
51 | #include <linux/kthread.h> | 51 | #include <linux/kthread.h> |
52 | #include <linux/smp_lock.h> | ||
53 | #include <linux/errno.h> | 52 | #include <linux/errno.h> |
54 | #include <linux/spinlock.h> | 53 | #include <linux/spinlock.h> |
55 | #include <linux/file.h> | 54 | #include <linux/file.h> |
@@ -2798,14 +2797,9 @@ out_mem: | |||
2798 | return ret; | 2797 | return ret; |
2799 | } | 2798 | } |
2800 | 2799 | ||
2801 | static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 2800 | static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) |
2802 | { | 2801 | { |
2803 | struct inode *inode = file->f_path.dentry->d_inode; | 2802 | struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data; |
2804 | struct pktcdvd_device *pd; | ||
2805 | long ret; | ||
2806 | |||
2807 | lock_kernel(); | ||
2808 | pd = inode->i_bdev->bd_disk->private_data; | ||
2809 | 2803 | ||
2810 | VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode)); | 2804 | VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode)); |
2811 | 2805 | ||
@@ -2818,8 +2812,7 @@ static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
2818 | case CDROM_LAST_WRITTEN: | 2812 | case CDROM_LAST_WRITTEN: |
2819 | case CDROM_SEND_PACKET: | 2813 | case CDROM_SEND_PACKET: |
2820 | case SCSI_IOCTL_SEND_COMMAND: | 2814 | case SCSI_IOCTL_SEND_COMMAND: |
2821 | ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); | 2815 | return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); |
2822 | break; | ||
2823 | 2816 | ||
2824 | case CDROMEJECT: | 2817 | case CDROMEJECT: |
2825 | /* | 2818 | /* |
@@ -2828,15 +2821,14 @@ static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
2828 | */ | 2821 | */ |
2829 | if (pd->refcnt == 1) | 2822 | if (pd->refcnt == 1) |
2830 | pkt_lock_door(pd, 0); | 2823 | pkt_lock_door(pd, 0); |
2831 | ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); | 2824 | return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); |
2832 | break; | ||
2833 | 2825 | ||
2834 | default: | 2826 | default: |
2835 | VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); | 2827 | VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); |
2836 | ret = -ENOTTY; | 2828 | return -ENOTTY; |
2837 | } | 2829 | } |
2838 | unlock_kernel(); | 2830 | |
2839 | return ret; | 2831 | return 0; |
2840 | } | 2832 | } |
2841 | 2833 | ||
2842 | static int pkt_media_changed(struct gendisk *disk) | 2834 | static int pkt_media_changed(struct gendisk *disk) |
@@ -2858,7 +2850,7 @@ static struct block_device_operations pktcdvd_ops = { | |||
2858 | .owner = THIS_MODULE, | 2850 | .owner = THIS_MODULE, |
2859 | .open = pkt_open, | 2851 | .open = pkt_open, |
2860 | .release = pkt_close, | 2852 | .release = pkt_close, |
2861 | .unlocked_ioctl = pkt_ioctl, | 2853 | .ioctl = pkt_ioctl, |
2862 | .media_changed = pkt_media_changed, | 2854 | .media_changed = pkt_media_changed, |
2863 | }; | 2855 | }; |
2864 | 2856 | ||
@@ -3023,8 +3015,7 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd) | |||
3023 | mutex_unlock(&ctl_mutex); | 3015 | mutex_unlock(&ctl_mutex); |
3024 | } | 3016 | } |
3025 | 3017 | ||
3026 | static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, | 3018 | static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) |
3027 | unsigned long arg) | ||
3028 | { | 3019 | { |
3029 | void __user *argp = (void __user *)arg; | 3020 | void __user *argp = (void __user *)arg; |
3030 | struct pkt_ctrl_command ctrl_cmd; | 3021 | struct pkt_ctrl_command ctrl_cmd; |
@@ -3041,22 +3032,16 @@ static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, | |||
3041 | case PKT_CTRL_CMD_SETUP: | 3032 | case PKT_CTRL_CMD_SETUP: |
3042 | if (!capable(CAP_SYS_ADMIN)) | 3033 | if (!capable(CAP_SYS_ADMIN)) |
3043 | return -EPERM; | 3034 | return -EPERM; |
3044 | lock_kernel(); | ||
3045 | ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev); | 3035 | ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev); |
3046 | ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev); | 3036 | ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev); |
3047 | unlock_kernel(); | ||
3048 | break; | 3037 | break; |
3049 | case PKT_CTRL_CMD_TEARDOWN: | 3038 | case PKT_CTRL_CMD_TEARDOWN: |
3050 | if (!capable(CAP_SYS_ADMIN)) | 3039 | if (!capable(CAP_SYS_ADMIN)) |
3051 | return -EPERM; | 3040 | return -EPERM; |
3052 | lock_kernel(); | ||
3053 | ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev)); | 3041 | ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev)); |
3054 | unlock_kernel(); | ||
3055 | break; | 3042 | break; |
3056 | case PKT_CTRL_CMD_STATUS: | 3043 | case PKT_CTRL_CMD_STATUS: |
3057 | lock_kernel(); | ||
3058 | pkt_get_status(&ctrl_cmd); | 3044 | pkt_get_status(&ctrl_cmd); |
3059 | unlock_kernel(); | ||
3060 | break; | 3045 | break; |
3061 | default: | 3046 | default: |
3062 | return -ENOTTY; | 3047 | return -ENOTTY; |
@@ -3069,7 +3054,7 @@ static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, | |||
3069 | 3054 | ||
3070 | 3055 | ||
3071 | static const struct file_operations pkt_ctl_fops = { | 3056 | static const struct file_operations pkt_ctl_fops = { |
3072 | .unlocked_ioctl = pkt_ctl_ioctl, | 3057 | .ioctl = pkt_ctl_ioctl, |
3073 | .owner = THIS_MODULE, | 3058 | .owner = THIS_MODULE, |
3074 | }; | 3059 | }; |
3075 | 3060 | ||
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index a235ca787465..7cb4029a5375 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig | |||
@@ -3,8 +3,8 @@ menu "Bluetooth device drivers" | |||
3 | depends on BT | 3 | depends on BT |
4 | 4 | ||
5 | config BT_HCIUSB | 5 | config BT_HCIUSB |
6 | tristate "HCI USB driver" | 6 | tristate "HCI USB driver (old version)" |
7 | depends on USB | 7 | depends on USB && BT_HCIBTUSB=n |
8 | help | 8 | help |
9 | Bluetooth HCI USB driver. | 9 | Bluetooth HCI USB driver. |
10 | This driver is required if you want to use Bluetooth devices with | 10 | This driver is required if you want to use Bluetooth devices with |
@@ -23,15 +23,13 @@ config BT_HCIUSB_SCO | |||
23 | Say Y here to compile support for SCO over HCI USB. | 23 | Say Y here to compile support for SCO over HCI USB. |
24 | 24 | ||
25 | config BT_HCIBTUSB | 25 | config BT_HCIBTUSB |
26 | tristate "HCI USB driver (alternate version)" | 26 | tristate "HCI USB driver" |
27 | depends on USB && EXPERIMENTAL && BT_HCIUSB=n | 27 | depends on USB |
28 | help | 28 | help |
29 | Bluetooth HCI USB driver. | 29 | Bluetooth HCI USB driver. |
30 | This driver is required if you want to use Bluetooth devices with | 30 | This driver is required if you want to use Bluetooth devices with |
31 | USB interface. | 31 | USB interface. |
32 | 32 | ||
33 | This driver is still experimental and has no SCO support. | ||
34 | |||
35 | Say Y here to compile support for Bluetooth USB devices into the | 33 | Say Y here to compile support for Bluetooth USB devices into the |
36 | kernel or say M to compile it as module (btusb). | 34 | kernel or say M to compile it as module (btusb). |
37 | 35 | ||
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 593b7c595038..27058477cc8b 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c | |||
@@ -60,7 +60,7 @@ | |||
60 | /* ======================== Module parameters ======================== */ | 60 | /* ======================== Module parameters ======================== */ |
61 | 61 | ||
62 | 62 | ||
63 | MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>, Jose Orlando Pereira <jop@di.uminho.pt>"); | 63 | MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); |
64 | MODULE_DESCRIPTION("Bluetooth driver for the 3Com Bluetooth PCMCIA card"); | 64 | MODULE_DESCRIPTION("Bluetooth driver for the 3Com Bluetooth PCMCIA card"); |
65 | MODULE_LICENSE("GPL"); | 65 | MODULE_LICENSE("GPL"); |
66 | MODULE_FIRMWARE("BT3CPCC.bin"); | 66 | MODULE_FIRMWARE("BT3CPCC.bin"); |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 95ae9ba5661e..6a010681ecf3 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * | 2 | * |
3 | * Generic Bluetooth USB driver | 3 | * Generic Bluetooth USB driver |
4 | * | 4 | * |
5 | * Copyright (C) 2005-2007 Marcel Holtmann <marcel@holtmann.org> | 5 | * Copyright (C) 2005-2008 Marcel Holtmann <marcel@holtmann.org> |
6 | * | 6 | * |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
@@ -41,7 +41,7 @@ | |||
41 | #define BT_DBG(D...) | 41 | #define BT_DBG(D...) |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | #define VERSION "0.2" | 44 | #define VERSION "0.3" |
45 | 45 | ||
46 | static int ignore_dga; | 46 | static int ignore_dga; |
47 | static int ignore_csr; | 47 | static int ignore_csr; |
@@ -160,12 +160,16 @@ static struct usb_device_id blacklist_table[] = { | |||
160 | { } /* Terminating entry */ | 160 | { } /* Terminating entry */ |
161 | }; | 161 | }; |
162 | 162 | ||
163 | #define BTUSB_MAX_ISOC_FRAMES 10 | ||
164 | |||
163 | #define BTUSB_INTR_RUNNING 0 | 165 | #define BTUSB_INTR_RUNNING 0 |
164 | #define BTUSB_BULK_RUNNING 1 | 166 | #define BTUSB_BULK_RUNNING 1 |
167 | #define BTUSB_ISOC_RUNNING 2 | ||
165 | 168 | ||
166 | struct btusb_data { | 169 | struct btusb_data { |
167 | struct hci_dev *hdev; | 170 | struct hci_dev *hdev; |
168 | struct usb_device *udev; | 171 | struct usb_device *udev; |
172 | struct usb_interface *isoc; | ||
169 | 173 | ||
170 | spinlock_t lock; | 174 | spinlock_t lock; |
171 | 175 | ||
@@ -176,10 +180,15 @@ struct btusb_data { | |||
176 | struct usb_anchor tx_anchor; | 180 | struct usb_anchor tx_anchor; |
177 | struct usb_anchor intr_anchor; | 181 | struct usb_anchor intr_anchor; |
178 | struct usb_anchor bulk_anchor; | 182 | struct usb_anchor bulk_anchor; |
183 | struct usb_anchor isoc_anchor; | ||
179 | 184 | ||
180 | struct usb_endpoint_descriptor *intr_ep; | 185 | struct usb_endpoint_descriptor *intr_ep; |
181 | struct usb_endpoint_descriptor *bulk_tx_ep; | 186 | struct usb_endpoint_descriptor *bulk_tx_ep; |
182 | struct usb_endpoint_descriptor *bulk_rx_ep; | 187 | struct usb_endpoint_descriptor *bulk_rx_ep; |
188 | struct usb_endpoint_descriptor *isoc_tx_ep; | ||
189 | struct usb_endpoint_descriptor *isoc_rx_ep; | ||
190 | |||
191 | int isoc_altsetting; | ||
183 | }; | 192 | }; |
184 | 193 | ||
185 | static void btusb_intr_complete(struct urb *urb) | 194 | static void btusb_intr_complete(struct urb *urb) |
@@ -195,6 +204,8 @@ static void btusb_intr_complete(struct urb *urb) | |||
195 | return; | 204 | return; |
196 | 205 | ||
197 | if (urb->status == 0) { | 206 | if (urb->status == 0) { |
207 | hdev->stat.byte_rx += urb->actual_length; | ||
208 | |||
198 | if (hci_recv_fragment(hdev, HCI_EVENT_PKT, | 209 | if (hci_recv_fragment(hdev, HCI_EVENT_PKT, |
199 | urb->transfer_buffer, | 210 | urb->transfer_buffer, |
200 | urb->actual_length) < 0) { | 211 | urb->actual_length) < 0) { |
@@ -216,7 +227,7 @@ static void btusb_intr_complete(struct urb *urb) | |||
216 | } | 227 | } |
217 | } | 228 | } |
218 | 229 | ||
219 | static inline int btusb_submit_intr_urb(struct hci_dev *hdev) | 230 | static int btusb_submit_intr_urb(struct hci_dev *hdev) |
220 | { | 231 | { |
221 | struct btusb_data *data = hdev->driver_data; | 232 | struct btusb_data *data = hdev->driver_data; |
222 | struct urb *urb; | 233 | struct urb *urb; |
@@ -226,6 +237,9 @@ static inline int btusb_submit_intr_urb(struct hci_dev *hdev) | |||
226 | 237 | ||
227 | BT_DBG("%s", hdev->name); | 238 | BT_DBG("%s", hdev->name); |
228 | 239 | ||
240 | if (!data->intr_ep) | ||
241 | return -ENODEV; | ||
242 | |||
229 | urb = usb_alloc_urb(0, GFP_ATOMIC); | 243 | urb = usb_alloc_urb(0, GFP_ATOMIC); |
230 | if (!urb) | 244 | if (!urb) |
231 | return -ENOMEM; | 245 | return -ENOMEM; |
@@ -274,6 +288,8 @@ static void btusb_bulk_complete(struct urb *urb) | |||
274 | return; | 288 | return; |
275 | 289 | ||
276 | if (urb->status == 0) { | 290 | if (urb->status == 0) { |
291 | hdev->stat.byte_rx += urb->actual_length; | ||
292 | |||
277 | if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT, | 293 | if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT, |
278 | urb->transfer_buffer, | 294 | urb->transfer_buffer, |
279 | urb->actual_length) < 0) { | 295 | urb->actual_length) < 0) { |
@@ -295,7 +311,7 @@ static void btusb_bulk_complete(struct urb *urb) | |||
295 | } | 311 | } |
296 | } | 312 | } |
297 | 313 | ||
298 | static inline int btusb_submit_bulk_urb(struct hci_dev *hdev) | 314 | static int btusb_submit_bulk_urb(struct hci_dev *hdev) |
299 | { | 315 | { |
300 | struct btusb_data *data = hdev->driver_data; | 316 | struct btusb_data *data = hdev->driver_data; |
301 | struct urb *urb; | 317 | struct urb *urb; |
@@ -305,6 +321,9 @@ static inline int btusb_submit_bulk_urb(struct hci_dev *hdev) | |||
305 | 321 | ||
306 | BT_DBG("%s", hdev->name); | 322 | BT_DBG("%s", hdev->name); |
307 | 323 | ||
324 | if (!data->bulk_rx_ep) | ||
325 | return -ENODEV; | ||
326 | |||
308 | urb = usb_alloc_urb(0, GFP_KERNEL); | 327 | urb = usb_alloc_urb(0, GFP_KERNEL); |
309 | if (!urb) | 328 | if (!urb) |
310 | return -ENOMEM; | 329 | return -ENOMEM; |
@@ -339,6 +358,127 @@ static inline int btusb_submit_bulk_urb(struct hci_dev *hdev) | |||
339 | return err; | 358 | return err; |
340 | } | 359 | } |
341 | 360 | ||
361 | static void btusb_isoc_complete(struct urb *urb) | ||
362 | { | ||
363 | struct hci_dev *hdev = urb->context; | ||
364 | struct btusb_data *data = hdev->driver_data; | ||
365 | int i, err; | ||
366 | |||
367 | BT_DBG("%s urb %p status %d count %d", hdev->name, | ||
368 | urb, urb->status, urb->actual_length); | ||
369 | |||
370 | if (!test_bit(HCI_RUNNING, &hdev->flags)) | ||
371 | return; | ||
372 | |||
373 | if (urb->status == 0) { | ||
374 | for (i = 0; i < urb->number_of_packets; i++) { | ||
375 | unsigned int offset = urb->iso_frame_desc[i].offset; | ||
376 | unsigned int length = urb->iso_frame_desc[i].actual_length; | ||
377 | |||
378 | if (urb->iso_frame_desc[i].status) | ||
379 | continue; | ||
380 | |||
381 | hdev->stat.byte_rx += length; | ||
382 | |||
383 | if (hci_recv_fragment(hdev, HCI_SCODATA_PKT, | ||
384 | urb->transfer_buffer + offset, | ||
385 | length) < 0) { | ||
386 | BT_ERR("%s corrupted SCO packet", hdev->name); | ||
387 | hdev->stat.err_rx++; | ||
388 | } | ||
389 | } | ||
390 | } | ||
391 | |||
392 | if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) | ||
393 | return; | ||
394 | |||
395 | usb_anchor_urb(urb, &data->isoc_anchor); | ||
396 | |||
397 | err = usb_submit_urb(urb, GFP_ATOMIC); | ||
398 | if (err < 0) { | ||
399 | BT_ERR("%s urb %p failed to resubmit (%d)", | ||
400 | hdev->name, urb, -err); | ||
401 | usb_unanchor_urb(urb); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu) | ||
406 | { | ||
407 | int i, offset = 0; | ||
408 | |||
409 | BT_DBG("len %d mtu %d", len, mtu); | ||
410 | |||
411 | for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu; | ||
412 | i++, offset += mtu, len -= mtu) { | ||
413 | urb->iso_frame_desc[i].offset = offset; | ||
414 | urb->iso_frame_desc[i].length = mtu; | ||
415 | } | ||
416 | |||
417 | if (len && i < BTUSB_MAX_ISOC_FRAMES) { | ||
418 | urb->iso_frame_desc[i].offset = offset; | ||
419 | urb->iso_frame_desc[i].length = len; | ||
420 | i++; | ||
421 | } | ||
422 | |||
423 | urb->number_of_packets = i; | ||
424 | } | ||
425 | |||
426 | static int btusb_submit_isoc_urb(struct hci_dev *hdev) | ||
427 | { | ||
428 | struct btusb_data *data = hdev->driver_data; | ||
429 | struct urb *urb; | ||
430 | unsigned char *buf; | ||
431 | unsigned int pipe; | ||
432 | int err, size; | ||
433 | |||
434 | BT_DBG("%s", hdev->name); | ||
435 | |||
436 | if (!data->isoc_rx_ep) | ||
437 | return -ENODEV; | ||
438 | |||
439 | urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_KERNEL); | ||
440 | if (!urb) | ||
441 | return -ENOMEM; | ||
442 | |||
443 | size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) * | ||
444 | BTUSB_MAX_ISOC_FRAMES; | ||
445 | |||
446 | buf = kmalloc(size, GFP_KERNEL); | ||
447 | if (!buf) { | ||
448 | usb_free_urb(urb); | ||
449 | return -ENOMEM; | ||
450 | } | ||
451 | |||
452 | pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); | ||
453 | |||
454 | urb->dev = data->udev; | ||
455 | urb->pipe = pipe; | ||
456 | urb->context = hdev; | ||
457 | urb->complete = btusb_isoc_complete; | ||
458 | urb->interval = data->isoc_rx_ep->bInterval; | ||
459 | |||
460 | urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; | ||
461 | urb->transfer_buffer = buf; | ||
462 | urb->transfer_buffer_length = size; | ||
463 | |||
464 | __fill_isoc_descriptor(urb, size, | ||
465 | le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); | ||
466 | |||
467 | usb_anchor_urb(urb, &data->isoc_anchor); | ||
468 | |||
469 | err = usb_submit_urb(urb, GFP_KERNEL); | ||
470 | if (err < 0) { | ||
471 | BT_ERR("%s urb %p submission failed (%d)", | ||
472 | hdev->name, urb, -err); | ||
473 | usb_unanchor_urb(urb); | ||
474 | kfree(buf); | ||
475 | } | ||
476 | |||
477 | usb_free_urb(urb); | ||
478 | |||
479 | return err; | ||
480 | } | ||
481 | |||
342 | static void btusb_tx_complete(struct urb *urb) | 482 | static void btusb_tx_complete(struct urb *urb) |
343 | { | 483 | { |
344 | struct sk_buff *skb = urb->context; | 484 | struct sk_buff *skb = urb->context; |
@@ -392,6 +532,9 @@ static int btusb_close(struct hci_dev *hdev) | |||
392 | if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) | 532 | if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) |
393 | return 0; | 533 | return 0; |
394 | 534 | ||
535 | clear_bit(BTUSB_ISOC_RUNNING, &data->flags); | ||
536 | usb_kill_anchored_urbs(&data->intr_anchor); | ||
537 | |||
395 | clear_bit(BTUSB_BULK_RUNNING, &data->flags); | 538 | clear_bit(BTUSB_BULK_RUNNING, &data->flags); |
396 | usb_kill_anchored_urbs(&data->bulk_anchor); | 539 | usb_kill_anchored_urbs(&data->bulk_anchor); |
397 | 540 | ||
@@ -453,6 +596,9 @@ static int btusb_send_frame(struct sk_buff *skb) | |||
453 | break; | 596 | break; |
454 | 597 | ||
455 | case HCI_ACLDATA_PKT: | 598 | case HCI_ACLDATA_PKT: |
599 | if (!data->bulk_tx_ep || hdev->conn_hash.acl_num < 1) | ||
600 | return -ENODEV; | ||
601 | |||
456 | urb = usb_alloc_urb(0, GFP_ATOMIC); | 602 | urb = usb_alloc_urb(0, GFP_ATOMIC); |
457 | if (!urb) | 603 | if (!urb) |
458 | return -ENOMEM; | 604 | return -ENOMEM; |
@@ -467,9 +613,31 @@ static int btusb_send_frame(struct sk_buff *skb) | |||
467 | break; | 613 | break; |
468 | 614 | ||
469 | case HCI_SCODATA_PKT: | 615 | case HCI_SCODATA_PKT: |
616 | if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1) | ||
617 | return -ENODEV; | ||
618 | |||
619 | urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC); | ||
620 | if (!urb) | ||
621 | return -ENOMEM; | ||
622 | |||
623 | pipe = usb_sndisocpipe(data->udev, | ||
624 | data->isoc_tx_ep->bEndpointAddress); | ||
625 | |||
626 | urb->dev = data->udev; | ||
627 | urb->pipe = pipe; | ||
628 | urb->context = skb; | ||
629 | urb->complete = btusb_tx_complete; | ||
630 | urb->interval = data->isoc_tx_ep->bInterval; | ||
631 | |||
632 | urb->transfer_flags = URB_ISO_ASAP; | ||
633 | urb->transfer_buffer = skb->data; | ||
634 | urb->transfer_buffer_length = skb->len; | ||
635 | |||
636 | __fill_isoc_descriptor(urb, skb->len, | ||
637 | le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); | ||
638 | |||
470 | hdev->stat.sco_tx++; | 639 | hdev->stat.sco_tx++; |
471 | kfree_skb(skb); | 640 | break; |
472 | return 0; | ||
473 | 641 | ||
474 | default: | 642 | default: |
475 | return -EILSEQ; | 643 | return -EILSEQ; |
@@ -508,22 +676,86 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt) | |||
508 | schedule_work(&data->work); | 676 | schedule_work(&data->work); |
509 | } | 677 | } |
510 | 678 | ||
679 | static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting) | ||
680 | { | ||
681 | struct btusb_data *data = hdev->driver_data; | ||
682 | struct usb_interface *intf = data->isoc; | ||
683 | struct usb_endpoint_descriptor *ep_desc; | ||
684 | int i, err; | ||
685 | |||
686 | if (!data->isoc) | ||
687 | return -ENODEV; | ||
688 | |||
689 | err = usb_set_interface(data->udev, 1, altsetting); | ||
690 | if (err < 0) { | ||
691 | BT_ERR("%s setting interface failed (%d)", hdev->name, -err); | ||
692 | return err; | ||
693 | } | ||
694 | |||
695 | data->isoc_altsetting = altsetting; | ||
696 | |||
697 | data->isoc_tx_ep = NULL; | ||
698 | data->isoc_rx_ep = NULL; | ||
699 | |||
700 | for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { | ||
701 | ep_desc = &intf->cur_altsetting->endpoint[i].desc; | ||
702 | |||
703 | if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) { | ||
704 | data->isoc_tx_ep = ep_desc; | ||
705 | continue; | ||
706 | } | ||
707 | |||
708 | if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) { | ||
709 | data->isoc_rx_ep = ep_desc; | ||
710 | continue; | ||
711 | } | ||
712 | } | ||
713 | |||
714 | if (!data->isoc_tx_ep || !data->isoc_rx_ep) { | ||
715 | BT_ERR("%s invalid SCO descriptors", hdev->name); | ||
716 | return -ENODEV; | ||
717 | } | ||
718 | |||
719 | return 0; | ||
720 | } | ||
721 | |||
511 | static void btusb_work(struct work_struct *work) | 722 | static void btusb_work(struct work_struct *work) |
512 | { | 723 | { |
513 | struct btusb_data *data = container_of(work, struct btusb_data, work); | 724 | struct btusb_data *data = container_of(work, struct btusb_data, work); |
514 | struct hci_dev *hdev = data->hdev; | 725 | struct hci_dev *hdev = data->hdev; |
515 | 726 | ||
516 | if (hdev->conn_hash.acl_num == 0) { | 727 | if (hdev->conn_hash.acl_num > 0) { |
728 | if (!test_and_set_bit(BTUSB_BULK_RUNNING, &data->flags)) { | ||
729 | if (btusb_submit_bulk_urb(hdev) < 0) | ||
730 | clear_bit(BTUSB_BULK_RUNNING, &data->flags); | ||
731 | else | ||
732 | btusb_submit_bulk_urb(hdev); | ||
733 | } | ||
734 | } else { | ||
517 | clear_bit(BTUSB_BULK_RUNNING, &data->flags); | 735 | clear_bit(BTUSB_BULK_RUNNING, &data->flags); |
518 | usb_kill_anchored_urbs(&data->bulk_anchor); | 736 | usb_kill_anchored_urbs(&data->bulk_anchor); |
519 | return; | ||
520 | } | 737 | } |
521 | 738 | ||
522 | if (!test_and_set_bit(BTUSB_BULK_RUNNING, &data->flags)) { | 739 | if (hdev->conn_hash.sco_num > 0) { |
523 | if (btusb_submit_bulk_urb(hdev) < 0) | 740 | if (data->isoc_altsetting != 2) { |
524 | clear_bit(BTUSB_BULK_RUNNING, &data->flags); | 741 | clear_bit(BTUSB_ISOC_RUNNING, &data->flags); |
525 | else | 742 | usb_kill_anchored_urbs(&data->isoc_anchor); |
526 | btusb_submit_bulk_urb(hdev); | 743 | |
744 | if (__set_isoc_interface(hdev, 2) < 0) | ||
745 | return; | ||
746 | } | ||
747 | |||
748 | if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) { | ||
749 | if (btusb_submit_isoc_urb(hdev) < 0) | ||
750 | clear_bit(BTUSB_ISOC_RUNNING, &data->flags); | ||
751 | else | ||
752 | btusb_submit_isoc_urb(hdev); | ||
753 | } | ||
754 | } else { | ||
755 | clear_bit(BTUSB_ISOC_RUNNING, &data->flags); | ||
756 | usb_kill_anchored_urbs(&data->isoc_anchor); | ||
757 | |||
758 | __set_isoc_interface(hdev, 0); | ||
527 | } | 759 | } |
528 | } | 760 | } |
529 | 761 | ||
@@ -597,6 +829,7 @@ static int btusb_probe(struct usb_interface *intf, | |||
597 | init_usb_anchor(&data->tx_anchor); | 829 | init_usb_anchor(&data->tx_anchor); |
598 | init_usb_anchor(&data->intr_anchor); | 830 | init_usb_anchor(&data->intr_anchor); |
599 | init_usb_anchor(&data->bulk_anchor); | 831 | init_usb_anchor(&data->bulk_anchor); |
832 | init_usb_anchor(&data->isoc_anchor); | ||
600 | 833 | ||
601 | hdev = hci_alloc_dev(); | 834 | hdev = hci_alloc_dev(); |
602 | if (!hdev) { | 835 | if (!hdev) { |
@@ -620,6 +853,9 @@ static int btusb_probe(struct usb_interface *intf, | |||
620 | 853 | ||
621 | hdev->owner = THIS_MODULE; | 854 | hdev->owner = THIS_MODULE; |
622 | 855 | ||
856 | /* interface numbers are hardcoded in the spec */ | ||
857 | data->isoc = usb_ifnum_to_if(data->udev, 1); | ||
858 | |||
623 | if (reset || id->driver_info & BTUSB_RESET) | 859 | if (reset || id->driver_info & BTUSB_RESET) |
624 | set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); | 860 | set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); |
625 | 861 | ||
@@ -628,11 +864,16 @@ static int btusb_probe(struct usb_interface *intf, | |||
628 | set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); | 864 | set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); |
629 | } | 865 | } |
630 | 866 | ||
867 | if (id->driver_info & BTUSB_BROKEN_ISOC) | ||
868 | data->isoc = NULL; | ||
869 | |||
631 | if (id->driver_info & BTUSB_SNIFFER) { | 870 | if (id->driver_info & BTUSB_SNIFFER) { |
632 | struct usb_device *udev = interface_to_usbdev(intf); | 871 | struct usb_device *udev = data->udev; |
633 | 872 | ||
634 | if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) | 873 | if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) |
635 | set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); | 874 | set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); |
875 | |||
876 | data->isoc = NULL; | ||
636 | } | 877 | } |
637 | 878 | ||
638 | if (id->driver_info & BTUSB_BCM92035) { | 879 | if (id->driver_info & BTUSB_BCM92035) { |
@@ -646,6 +887,16 @@ static int btusb_probe(struct usb_interface *intf, | |||
646 | } | 887 | } |
647 | } | 888 | } |
648 | 889 | ||
890 | if (data->isoc) { | ||
891 | err = usb_driver_claim_interface(&btusb_driver, | ||
892 | data->isoc, NULL); | ||
893 | if (err < 0) { | ||
894 | hci_free_dev(hdev); | ||
895 | kfree(data); | ||
896 | return err; | ||
897 | } | ||
898 | } | ||
899 | |||
649 | err = hci_register_dev(hdev); | 900 | err = hci_register_dev(hdev); |
650 | if (err < 0) { | 901 | if (err < 0) { |
651 | hci_free_dev(hdev); | 902 | hci_free_dev(hdev); |
@@ -670,6 +921,9 @@ static void btusb_disconnect(struct usb_interface *intf) | |||
670 | 921 | ||
671 | hdev = data->hdev; | 922 | hdev = data->hdev; |
672 | 923 | ||
924 | if (data->isoc) | ||
925 | usb_driver_release_interface(&btusb_driver, data->isoc); | ||
926 | |||
673 | usb_set_intfdata(intf, NULL); | 927 | usb_set_intfdata(intf, NULL); |
674 | 928 | ||
675 | hci_unregister_dev(hdev); | 929 | hci_unregister_dev(hdev); |
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 69df187d74ce..8dfcf77cb717 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c | |||
@@ -577,7 +577,7 @@ module_exit(hci_uart_exit); | |||
577 | module_param(reset, bool, 0644); | 577 | module_param(reset, bool, 0644); |
578 | MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); | 578 | MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); |
579 | 579 | ||
580 | MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); | 580 | MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); |
581 | MODULE_DESCRIPTION("Bluetooth HCI UART driver ver " VERSION); | 581 | MODULE_DESCRIPTION("Bluetooth HCI UART driver ver " VERSION); |
582 | MODULE_VERSION(VERSION); | 582 | MODULE_VERSION(VERSION); |
583 | MODULE_LICENSE("GPL"); | 583 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c index e397572bf574..3c453924f838 100644 --- a/drivers/bluetooth/hci_usb.c +++ b/drivers/bluetooth/hci_usb.c | |||
@@ -1130,7 +1130,7 @@ module_param(isoc, int, 0644); | |||
1130 | MODULE_PARM_DESC(isoc, "Set isochronous transfers for SCO over HCI support"); | 1130 | MODULE_PARM_DESC(isoc, "Set isochronous transfers for SCO over HCI support"); |
1131 | #endif | 1131 | #endif |
1132 | 1132 | ||
1133 | MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); | 1133 | MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); |
1134 | MODULE_DESCRIPTION("Bluetooth HCI USB driver ver " VERSION); | 1134 | MODULE_DESCRIPTION("Bluetooth HCI USB driver ver " VERSION); |
1135 | MODULE_VERSION(VERSION); | 1135 | MODULE_VERSION(VERSION); |
1136 | MODULE_LICENSE("GPL"); | 1136 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index d97700aa54a9..7320a71b6368 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c | |||
@@ -377,7 +377,7 @@ module_exit(vhci_exit); | |||
377 | module_param(minor, int, 0444); | 377 | module_param(minor, int, 0444); |
378 | MODULE_PARM_DESC(minor, "Miscellaneous minor device number"); | 378 | MODULE_PARM_DESC(minor, "Miscellaneous minor device number"); |
379 | 379 | ||
380 | MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); | 380 | MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); |
381 | MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); | 381 | MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); |
382 | MODULE_VERSION(VERSION); | 382 | MODULE_VERSION(VERSION); |
383 | MODULE_LICENSE("GPL"); | 383 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index d9d1b65d206c..74031de517e6 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -408,7 +408,6 @@ int register_cdrom(struct cdrom_device_info *cdi) | |||
408 | ENSURE(get_last_session, CDC_MULTI_SESSION); | 408 | ENSURE(get_last_session, CDC_MULTI_SESSION); |
409 | ENSURE(get_mcn, CDC_MCN); | 409 | ENSURE(get_mcn, CDC_MCN); |
410 | ENSURE(reset, CDC_RESET); | 410 | ENSURE(reset, CDC_RESET); |
411 | ENSURE(audio_ioctl, CDC_PLAY_AUDIO); | ||
412 | ENSURE(generic_packet, CDC_GENERIC_PACKET); | 411 | ENSURE(generic_packet, CDC_GENERIC_PACKET); |
413 | cdi->mc_flags = 0; | 412 | cdi->mc_flags = 0; |
414 | cdo->n_minors = 0; | 413 | cdo->n_minors = 0; |
@@ -2506,8 +2505,6 @@ static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi, | |||
2506 | 2505 | ||
2507 | /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ | 2506 | /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ |
2508 | 2507 | ||
2509 | if (!CDROM_CAN(CDC_PLAY_AUDIO)) | ||
2510 | return -ENOSYS; | ||
2511 | if (copy_from_user(&q, argp, sizeof(q))) | 2508 | if (copy_from_user(&q, argp, sizeof(q))) |
2512 | return -EFAULT; | 2509 | return -EFAULT; |
2513 | 2510 | ||
@@ -2538,8 +2535,6 @@ static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi, | |||
2538 | 2535 | ||
2539 | /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ | 2536 | /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ |
2540 | 2537 | ||
2541 | if (!CDROM_CAN(CDC_PLAY_AUDIO)) | ||
2542 | return -ENOSYS; | ||
2543 | if (copy_from_user(&header, argp, sizeof(header))) | 2538 | if (copy_from_user(&header, argp, sizeof(header))) |
2544 | return -EFAULT; | 2539 | return -EFAULT; |
2545 | 2540 | ||
@@ -2562,8 +2557,6 @@ static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi, | |||
2562 | 2557 | ||
2563 | /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ | 2558 | /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ |
2564 | 2559 | ||
2565 | if (!CDROM_CAN(CDC_PLAY_AUDIO)) | ||
2566 | return -ENOSYS; | ||
2567 | if (copy_from_user(&entry, argp, sizeof(entry))) | 2560 | if (copy_from_user(&entry, argp, sizeof(entry))) |
2568 | return -EFAULT; | 2561 | return -EFAULT; |
2569 | 2562 | ||
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 1e0455bd6df9..1231d95aa695 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c | |||
@@ -471,6 +471,12 @@ cleanup_sense_final: | |||
471 | return err; | 471 | return err; |
472 | } | 472 | } |
473 | 473 | ||
474 | static int gdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, | ||
475 | void *arg) | ||
476 | { | ||
477 | return -EINVAL; | ||
478 | } | ||
479 | |||
474 | static struct cdrom_device_ops gdrom_ops = { | 480 | static struct cdrom_device_ops gdrom_ops = { |
475 | .open = gdrom_open, | 481 | .open = gdrom_open, |
476 | .release = gdrom_release, | 482 | .release = gdrom_release, |
@@ -478,6 +484,7 @@ static struct cdrom_device_ops gdrom_ops = { | |||
478 | .media_changed = gdrom_mediachanged, | 484 | .media_changed = gdrom_mediachanged, |
479 | .get_last_session = gdrom_get_last_session, | 485 | .get_last_session = gdrom_get_last_session, |
480 | .reset = gdrom_hardreset, | 486 | .reset = gdrom_hardreset, |
487 | .audio_ioctl = gdrom_audio_ioctl, | ||
481 | .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | | 488 | .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | |
482 | CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R, | 489 | CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R, |
483 | .n_minors = 1, | 490 | .n_minors = 1, |
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 9d0dfe6e0d63..031e0e1a1a3b 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c | |||
@@ -550,12 +550,19 @@ return_complete: | |||
550 | } | 550 | } |
551 | } | 551 | } |
552 | 552 | ||
553 | static int viocd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, | ||
554 | void *arg) | ||
555 | { | ||
556 | return -EINVAL; | ||
557 | } | ||
558 | |||
553 | static struct cdrom_device_ops viocd_dops = { | 559 | static struct cdrom_device_ops viocd_dops = { |
554 | .open = viocd_open, | 560 | .open = viocd_open, |
555 | .release = viocd_release, | 561 | .release = viocd_release, |
556 | .media_changed = viocd_media_changed, | 562 | .media_changed = viocd_media_changed, |
557 | .lock_door = viocd_lock_door, | 563 | .lock_door = viocd_lock_door, |
558 | .generic_packet = viocd_packet, | 564 | .generic_packet = viocd_packet, |
565 | .audio_ioctl = viocd_audio_ioctl, | ||
559 | .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM | 566 | .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM |
560 | }; | 567 | }; |
561 | 568 | ||
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 81e14bea54bd..4bada0e8b812 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h | |||
@@ -148,6 +148,9 @@ struct agp_bridge_data { | |||
148 | char minor_version; | 148 | char minor_version; |
149 | struct list_head list; | 149 | struct list_head list; |
150 | u32 apbase_config; | 150 | u32 apbase_config; |
151 | /* list of agp_memory mapped to the aperture */ | ||
152 | struct list_head mapped_list; | ||
153 | spinlock_t mapped_lock; | ||
151 | }; | 154 | }; |
152 | 155 | ||
153 | #define KB(x) ((x) * 1024) | 156 | #define KB(x) ((x) * 1024) |
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index 1ffb381130c3..31dcd9142d54 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c | |||
@@ -110,7 +110,8 @@ static int ali_configure(void) | |||
110 | 110 | ||
111 | nlvm_addr+= agp_bridge->gart_bus_addr; | 111 | nlvm_addr+= agp_bridge->gart_bus_addr; |
112 | nlvm_addr|=(agp_bridge->gart_bus_addr>>12); | 112 | nlvm_addr|=(agp_bridge->gart_bus_addr>>12); |
113 | printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr); | 113 | dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n", |
114 | nlvm_addr); | ||
114 | } | 115 | } |
115 | #endif | 116 | #endif |
116 | 117 | ||
@@ -315,8 +316,8 @@ static int __devinit agp_ali_probe(struct pci_dev *pdev, | |||
315 | goto found; | 316 | goto found; |
316 | } | 317 | } |
317 | 318 | ||
318 | printk(KERN_ERR PFX "Unsupported ALi chipset (device id: %04x)\n", | 319 | dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n", |
319 | pdev->device); | 320 | pdev->vendor, pdev->device); |
320 | return -ENODEV; | 321 | return -ENODEV; |
321 | 322 | ||
322 | 323 | ||
@@ -361,8 +362,7 @@ found: | |||
361 | bridge->driver = &ali_generic_bridge; | 362 | bridge->driver = &ali_generic_bridge; |
362 | } | 363 | } |
363 | 364 | ||
364 | printk(KERN_INFO PFX "Detected ALi %s chipset\n", | 365 | dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name); |
365 | devs[j].chipset_name); | ||
366 | 366 | ||
367 | /* Fill in the mode register */ | 367 | /* Fill in the mode register */ |
368 | pci_read_config_dword(pdev, | 368 | pci_read_config_dword(pdev, |
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index 39a0718bc616..e280531843be 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c | |||
@@ -419,8 +419,8 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev, | |||
419 | return -ENODEV; | 419 | return -ENODEV; |
420 | 420 | ||
421 | j = ent - agp_amdk7_pci_table; | 421 | j = ent - agp_amdk7_pci_table; |
422 | printk(KERN_INFO PFX "Detected AMD %s chipset\n", | 422 | dev_info(&pdev->dev, "AMD %s chipset\n", |
423 | amd_agp_device_ids[j].chipset_name); | 423 | amd_agp_device_ids[j].chipset_name); |
424 | 424 | ||
425 | bridge = agp_alloc_bridge(); | 425 | bridge = agp_alloc_bridge(); |
426 | if (!bridge) | 426 | if (!bridge) |
@@ -442,7 +442,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev, | |||
442 | while (!cap_ptr) { | 442 | while (!cap_ptr) { |
443 | gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard); | 443 | gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard); |
444 | if (!gfxcard) { | 444 | if (!gfxcard) { |
445 | printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); | 445 | dev_info(&pdev->dev, "no AGP VGA controller\n"); |
446 | return -ENODEV; | 446 | return -ENODEV; |
447 | } | 447 | } |
448 | cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP); | 448 | cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP); |
@@ -453,7 +453,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev, | |||
453 | (if necessary at all). */ | 453 | (if necessary at all). */ |
454 | if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) { | 454 | if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) { |
455 | agp_bridge->flags |= AGP_ERRATA_1X; | 455 | agp_bridge->flags |= AGP_ERRATA_1X; |
456 | printk (KERN_INFO PFX "AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n"); | 456 | dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n"); |
457 | } | 457 | } |
458 | pci_dev_put(gfxcard); | 458 | pci_dev_put(gfxcard); |
459 | } | 459 | } |
@@ -469,7 +469,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev, | |||
469 | agp_bridge->flags = AGP_ERRATA_FASTWRITES; | 469 | agp_bridge->flags = AGP_ERRATA_FASTWRITES; |
470 | agp_bridge->flags |= AGP_ERRATA_SBA; | 470 | agp_bridge->flags |= AGP_ERRATA_SBA; |
471 | agp_bridge->flags |= AGP_ERRATA_1X; | 471 | agp_bridge->flags |= AGP_ERRATA_1X; |
472 | printk (KERN_INFO PFX "AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n"); | 472 | dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n"); |
473 | } | 473 | } |
474 | } | 474 | } |
475 | 475 | ||
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 481ffe87c716..7495c522d8e4 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | static struct resource *aperture_resource; | 35 | static struct resource *aperture_resource; |
36 | static int __initdata agp_try_unsupported = 1; | 36 | static int __initdata agp_try_unsupported = 1; |
37 | static int agp_bridges_found; | ||
37 | 38 | ||
38 | static void amd64_tlbflush(struct agp_memory *temp) | 39 | static void amd64_tlbflush(struct agp_memory *temp) |
39 | { | 40 | { |
@@ -293,12 +294,13 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, | |||
293 | * so let double check that order, and lets trust the AMD NB settings | 294 | * so let double check that order, and lets trust the AMD NB settings |
294 | */ | 295 | */ |
295 | if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) { | 296 | if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) { |
296 | printk(KERN_INFO "Aperture size %u MB is not right, using settings from NB\n", | 297 | dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n", |
297 | 32 << order); | 298 | 32 << order); |
298 | order = nb_order; | 299 | order = nb_order; |
299 | } | 300 | } |
300 | 301 | ||
301 | printk(KERN_INFO PFX "Aperture from AGP @ %Lx size %u MB\n", aper, 32 << order); | 302 | dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n", |
303 | aper, 32 << order); | ||
302 | if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) | 304 | if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) |
303 | return -1; | 305 | return -1; |
304 | 306 | ||
@@ -319,10 +321,10 @@ static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr) | |||
319 | for (i = 0; i < num_k8_northbridges; i++) { | 321 | for (i = 0; i < num_k8_northbridges; i++) { |
320 | struct pci_dev *dev = k8_northbridges[i]; | 322 | struct pci_dev *dev = k8_northbridges[i]; |
321 | if (fix_northbridge(dev, pdev, cap_ptr) < 0) { | 323 | if (fix_northbridge(dev, pdev, cap_ptr) < 0) { |
322 | printk(KERN_ERR PFX "No usable aperture found.\n"); | 324 | dev_err(&dev->dev, "no usable aperture found\n"); |
323 | #ifdef __x86_64__ | 325 | #ifdef __x86_64__ |
324 | /* should port this to i386 */ | 326 | /* should port this to i386 */ |
325 | printk(KERN_ERR PFX "Consider rebooting with iommu=memaper=2 to get a good aperture.\n"); | 327 | dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n"); |
326 | #endif | 328 | #endif |
327 | return -1; | 329 | return -1; |
328 | } | 330 | } |
@@ -345,14 +347,14 @@ static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data | |||
345 | default: revstring="??"; break; | 347 | default: revstring="??"; break; |
346 | } | 348 | } |
347 | 349 | ||
348 | printk (KERN_INFO PFX "Detected AMD 8151 AGP Bridge rev %s\n", revstring); | 350 | dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring); |
349 | 351 | ||
350 | /* | 352 | /* |
351 | * Work around errata. | 353 | * Work around errata. |
352 | * Chips before B2 stepping incorrectly reporting v3.5 | 354 | * Chips before B2 stepping incorrectly reporting v3.5 |
353 | */ | 355 | */ |
354 | if (pdev->revision < 0x13) { | 356 | if (pdev->revision < 0x13) { |
355 | printk (KERN_INFO PFX "Correcting AGP revision (reports 3.5, is really 3.0)\n"); | 357 | dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n"); |
356 | bridge->major_version = 3; | 358 | bridge->major_version = 3; |
357 | bridge->minor_version = 0; | 359 | bridge->minor_version = 0; |
358 | } | 360 | } |
@@ -375,11 +377,11 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) | |||
375 | struct pci_dev *dev1; | 377 | struct pci_dev *dev1; |
376 | int i; | 378 | int i; |
377 | unsigned size = amd64_fetch_size(); | 379 | unsigned size = amd64_fetch_size(); |
378 | printk(KERN_INFO "Setting up ULi AGP.\n"); | 380 | |
381 | dev_info(&pdev->dev, "setting up ULi AGP\n"); | ||
379 | dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0)); | 382 | dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0)); |
380 | if (dev1 == NULL) { | 383 | if (dev1 == NULL) { |
381 | printk(KERN_INFO PFX "Detected a ULi chipset, " | 384 | dev_info(&pdev->dev, "can't find ULi secondary device\n"); |
382 | "but could not fine the secondary device.\n"); | ||
383 | return -ENODEV; | 385 | return -ENODEV; |
384 | } | 386 | } |
385 | 387 | ||
@@ -388,7 +390,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) | |||
388 | break; | 390 | break; |
389 | 391 | ||
390 | if (i == ARRAY_SIZE(uli_sizes)) { | 392 | if (i == ARRAY_SIZE(uli_sizes)) { |
391 | printk(KERN_INFO PFX "No ULi size found for %d\n", size); | 393 | dev_info(&pdev->dev, "no ULi size found for %d\n", size); |
392 | return -ENODEV; | 394 | return -ENODEV; |
393 | } | 395 | } |
394 | 396 | ||
@@ -433,13 +435,11 @@ static int nforce3_agp_init(struct pci_dev *pdev) | |||
433 | int i; | 435 | int i; |
434 | unsigned size = amd64_fetch_size(); | 436 | unsigned size = amd64_fetch_size(); |
435 | 437 | ||
436 | printk(KERN_INFO PFX "Setting up Nforce3 AGP.\n"); | 438 | dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); |
437 | 439 | ||
438 | dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0)); | 440 | dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0)); |
439 | if (dev1 == NULL) { | 441 | if (dev1 == NULL) { |
440 | printk(KERN_INFO PFX "agpgart: Detected an NVIDIA " | 442 | dev_info(&pdev->dev, "can't find Nforce3 secondary device\n"); |
441 | "nForce3 chipset, but could not find " | ||
442 | "the secondary device.\n"); | ||
443 | return -ENODEV; | 443 | return -ENODEV; |
444 | } | 444 | } |
445 | 445 | ||
@@ -448,7 +448,7 @@ static int nforce3_agp_init(struct pci_dev *pdev) | |||
448 | break; | 448 | break; |
449 | 449 | ||
450 | if (i == ARRAY_SIZE(nforce3_sizes)) { | 450 | if (i == ARRAY_SIZE(nforce3_sizes)) { |
451 | printk(KERN_INFO PFX "No NForce3 size found for %d\n", size); | 451 | dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); |
452 | return -ENODEV; | 452 | return -ENODEV; |
453 | } | 453 | } |
454 | 454 | ||
@@ -462,7 +462,7 @@ static int nforce3_agp_init(struct pci_dev *pdev) | |||
462 | 462 | ||
463 | /* if x86-64 aperture base is beyond 4G, exit here */ | 463 | /* if x86-64 aperture base is beyond 4G, exit here */ |
464 | if ( (apbase & 0x7fff) >> (32 - 25) ) { | 464 | if ( (apbase & 0x7fff) >> (32 - 25) ) { |
465 | printk(KERN_INFO PFX "aperture base > 4G\n"); | 465 | dev_info(&pdev->dev, "aperture base > 4G\n"); |
466 | return -ENODEV; | 466 | return -ENODEV; |
467 | } | 467 | } |
468 | 468 | ||
@@ -489,6 +489,7 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev, | |||
489 | { | 489 | { |
490 | struct agp_bridge_data *bridge; | 490 | struct agp_bridge_data *bridge; |
491 | u8 cap_ptr; | 491 | u8 cap_ptr; |
492 | int err; | ||
492 | 493 | ||
493 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); | 494 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); |
494 | if (!cap_ptr) | 495 | if (!cap_ptr) |
@@ -504,7 +505,8 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev, | |||
504 | pdev->device == PCI_DEVICE_ID_AMD_8151_0) { | 505 | pdev->device == PCI_DEVICE_ID_AMD_8151_0) { |
505 | amd8151_init(pdev, bridge); | 506 | amd8151_init(pdev, bridge); |
506 | } else { | 507 | } else { |
507 | printk(KERN_INFO PFX "Detected AGP bridge %x\n", pdev->devfn); | 508 | dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n", |
509 | pdev->vendor, pdev->device); | ||
508 | } | 510 | } |
509 | 511 | ||
510 | bridge->driver = &amd_8151_driver; | 512 | bridge->driver = &amd_8151_driver; |
@@ -536,7 +538,12 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev, | |||
536 | } | 538 | } |
537 | 539 | ||
538 | pci_set_drvdata(pdev, bridge); | 540 | pci_set_drvdata(pdev, bridge); |
539 | return agp_add_bridge(bridge); | 541 | err = agp_add_bridge(bridge); |
542 | if (err < 0) | ||
543 | return err; | ||
544 | |||
545 | agp_bridges_found++; | ||
546 | return 0; | ||
540 | } | 547 | } |
541 | 548 | ||
542 | static void __devexit agp_amd64_remove(struct pci_dev *pdev) | 549 | static void __devexit agp_amd64_remove(struct pci_dev *pdev) |
@@ -713,7 +720,11 @@ int __init agp_amd64_init(void) | |||
713 | 720 | ||
714 | if (agp_off) | 721 | if (agp_off) |
715 | return -EINVAL; | 722 | return -EINVAL; |
716 | if (pci_register_driver(&agp_amd64_pci_driver) < 0) { | 723 | err = pci_register_driver(&agp_amd64_pci_driver); |
724 | if (err < 0) | ||
725 | return err; | ||
726 | |||
727 | if (agp_bridges_found == 0) { | ||
717 | struct pci_dev *dev; | 728 | struct pci_dev *dev; |
718 | if (!agp_try_unsupported && !agp_try_unsupported_boot) { | 729 | if (!agp_try_unsupported && !agp_try_unsupported_boot) { |
719 | printk(KERN_INFO PFX "No supported AGP bridge found.\n"); | 730 | printk(KERN_INFO PFX "No supported AGP bridge found.\n"); |
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index 3a4566c0d84f..6ecbcafb34b1 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c | |||
@@ -486,8 +486,8 @@ static int __devinit agp_ati_probe(struct pci_dev *pdev, | |||
486 | goto found; | 486 | goto found; |
487 | } | 487 | } |
488 | 488 | ||
489 | printk(KERN_ERR PFX | 489 | dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n", |
490 | "Unsupported Ati chipset (device id: %04x)\n", pdev->device); | 490 | pdev->vendor, pdev->device); |
491 | return -ENODEV; | 491 | return -ENODEV; |
492 | 492 | ||
493 | found: | 493 | found: |
@@ -500,8 +500,7 @@ found: | |||
500 | 500 | ||
501 | bridge->driver = &ati_generic_bridge; | 501 | bridge->driver = &ati_generic_bridge; |
502 | 502 | ||
503 | printk(KERN_INFO PFX "Detected Ati %s chipset\n", | 503 | dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name); |
504 | devs[j].chipset_name); | ||
505 | 504 | ||
506 | /* Fill in the mode register */ | 505 | /* Fill in the mode register */ |
507 | pci_read_config_dword(pdev, | 506 | pci_read_config_dword(pdev, |
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index 1ec87104e68c..3a3cc03d401c 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c | |||
@@ -144,7 +144,8 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) | |||
144 | void *addr = bridge->driver->agp_alloc_page(bridge); | 144 | void *addr = bridge->driver->agp_alloc_page(bridge); |
145 | 145 | ||
146 | if (!addr) { | 146 | if (!addr) { |
147 | printk(KERN_ERR PFX "unable to get memory for scratch page.\n"); | 147 | dev_err(&bridge->dev->dev, |
148 | "can't get memory for scratch page\n"); | ||
148 | return -ENOMEM; | 149 | return -ENOMEM; |
149 | } | 150 | } |
150 | 151 | ||
@@ -155,13 +156,13 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) | |||
155 | 156 | ||
156 | size_value = bridge->driver->fetch_size(); | 157 | size_value = bridge->driver->fetch_size(); |
157 | if (size_value == 0) { | 158 | if (size_value == 0) { |
158 | printk(KERN_ERR PFX "unable to determine aperture size.\n"); | 159 | dev_err(&bridge->dev->dev, "can't determine aperture size\n"); |
159 | rc = -EINVAL; | 160 | rc = -EINVAL; |
160 | goto err_out; | 161 | goto err_out; |
161 | } | 162 | } |
162 | if (bridge->driver->create_gatt_table(bridge)) { | 163 | if (bridge->driver->create_gatt_table(bridge)) { |
163 | printk(KERN_ERR PFX | 164 | dev_err(&bridge->dev->dev, |
164 | "unable to get memory for graphics translation table.\n"); | 165 | "can't get memory for graphics translation table\n"); |
165 | rc = -ENOMEM; | 166 | rc = -ENOMEM; |
166 | goto err_out; | 167 | goto err_out; |
167 | } | 168 | } |
@@ -169,7 +170,8 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) | |||
169 | 170 | ||
170 | bridge->key_list = vmalloc(PAGE_SIZE * 4); | 171 | bridge->key_list = vmalloc(PAGE_SIZE * 4); |
171 | if (bridge->key_list == NULL) { | 172 | if (bridge->key_list == NULL) { |
172 | printk(KERN_ERR PFX "error allocating memory for key lists.\n"); | 173 | dev_err(&bridge->dev->dev, |
174 | "can't allocate memory for key lists\n"); | ||
173 | rc = -ENOMEM; | 175 | rc = -ENOMEM; |
174 | goto err_out; | 176 | goto err_out; |
175 | } | 177 | } |
@@ -179,10 +181,12 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) | |||
179 | memset(bridge->key_list, 0, PAGE_SIZE * 4); | 181 | memset(bridge->key_list, 0, PAGE_SIZE * 4); |
180 | 182 | ||
181 | if (bridge->driver->configure()) { | 183 | if (bridge->driver->configure()) { |
182 | printk(KERN_ERR PFX "error configuring host chipset.\n"); | 184 | dev_err(&bridge->dev->dev, "error configuring host chipset\n"); |
183 | rc = -EINVAL; | 185 | rc = -EINVAL; |
184 | goto err_out; | 186 | goto err_out; |
185 | } | 187 | } |
188 | INIT_LIST_HEAD(&bridge->mapped_list); | ||
189 | spin_lock_init(&bridge->mapped_lock); | ||
186 | 190 | ||
187 | return 0; | 191 | return 0; |
188 | 192 | ||
@@ -269,25 +273,27 @@ int agp_add_bridge(struct agp_bridge_data *bridge) | |||
269 | 273 | ||
270 | /* Grab reference on the chipset driver. */ | 274 | /* Grab reference on the chipset driver. */ |
271 | if (!try_module_get(bridge->driver->owner)) { | 275 | if (!try_module_get(bridge->driver->owner)) { |
272 | printk (KERN_INFO PFX "Couldn't lock chipset driver.\n"); | 276 | dev_info(&bridge->dev->dev, "can't lock chipset driver\n"); |
273 | return -EINVAL; | 277 | return -EINVAL; |
274 | } | 278 | } |
275 | 279 | ||
276 | error = agp_backend_initialize(bridge); | 280 | error = agp_backend_initialize(bridge); |
277 | if (error) { | 281 | if (error) { |
278 | printk (KERN_INFO PFX "agp_backend_initialize() failed.\n"); | 282 | dev_info(&bridge->dev->dev, |
283 | "agp_backend_initialize() failed\n"); | ||
279 | goto err_out; | 284 | goto err_out; |
280 | } | 285 | } |
281 | 286 | ||
282 | if (list_empty(&agp_bridges)) { | 287 | if (list_empty(&agp_bridges)) { |
283 | error = agp_frontend_initialize(); | 288 | error = agp_frontend_initialize(); |
284 | if (error) { | 289 | if (error) { |
285 | printk (KERN_INFO PFX "agp_frontend_initialize() failed.\n"); | 290 | dev_info(&bridge->dev->dev, |
291 | "agp_frontend_initialize() failed\n"); | ||
286 | goto frontend_err; | 292 | goto frontend_err; |
287 | } | 293 | } |
288 | 294 | ||
289 | printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n", | 295 | dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n", |
290 | bridge->driver->fetch_size(), bridge->gart_bus_addr); | 296 | bridge->driver->fetch_size(), bridge->gart_bus_addr); |
291 | 297 | ||
292 | } | 298 | } |
293 | 299 | ||
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index eaa1a355bb32..118dbde25dc7 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c | |||
@@ -429,6 +429,10 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start) | |||
429 | 429 | ||
430 | curr->is_bound = true; | 430 | curr->is_bound = true; |
431 | curr->pg_start = pg_start; | 431 | curr->pg_start = pg_start; |
432 | spin_lock(&agp_bridge->mapped_lock); | ||
433 | list_add(&curr->mapped_list, &agp_bridge->mapped_list); | ||
434 | spin_unlock(&agp_bridge->mapped_lock); | ||
435 | |||
432 | return 0; | 436 | return 0; |
433 | } | 437 | } |
434 | EXPORT_SYMBOL(agp_bind_memory); | 438 | EXPORT_SYMBOL(agp_bind_memory); |
@@ -461,10 +465,34 @@ int agp_unbind_memory(struct agp_memory *curr) | |||
461 | 465 | ||
462 | curr->is_bound = false; | 466 | curr->is_bound = false; |
463 | curr->pg_start = 0; | 467 | curr->pg_start = 0; |
468 | spin_lock(&curr->bridge->mapped_lock); | ||
469 | list_del(&curr->mapped_list); | ||
470 | spin_unlock(&curr->bridge->mapped_lock); | ||
464 | return 0; | 471 | return 0; |
465 | } | 472 | } |
466 | EXPORT_SYMBOL(agp_unbind_memory); | 473 | EXPORT_SYMBOL(agp_unbind_memory); |
467 | 474 | ||
475 | /** | ||
476 | * agp_rebind_emmory - Rewrite the entire GATT, useful on resume | ||
477 | */ | ||
478 | int agp_rebind_memory(void) | ||
479 | { | ||
480 | struct agp_memory *curr; | ||
481 | int ret_val = 0; | ||
482 | |||
483 | spin_lock(&agp_bridge->mapped_lock); | ||
484 | list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) { | ||
485 | ret_val = curr->bridge->driver->insert_memory(curr, | ||
486 | curr->pg_start, | ||
487 | curr->type); | ||
488 | if (ret_val != 0) | ||
489 | break; | ||
490 | } | ||
491 | spin_unlock(&agp_bridge->mapped_lock); | ||
492 | return ret_val; | ||
493 | } | ||
494 | EXPORT_SYMBOL(agp_rebind_memory); | ||
495 | |||
468 | /* End - Routines for handling swapping of agp_memory into the GATT */ | 496 | /* End - Routines for handling swapping of agp_memory into the GATT */ |
469 | 497 | ||
470 | 498 | ||
@@ -771,8 +799,8 @@ void agp_device_command(u32 bridge_agpstat, bool agp_v3) | |||
771 | if (!agp) | 799 | if (!agp) |
772 | continue; | 800 | continue; |
773 | 801 | ||
774 | printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n", | 802 | dev_info(&device->dev, "putting AGP V%d device into %dx mode\n", |
775 | agp_v3 ? 3 : 2, pci_name(device), mode); | 803 | agp_v3 ? 3 : 2, mode); |
776 | pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); | 804 | pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); |
777 | } | 805 | } |
778 | } | 806 | } |
@@ -800,10 +828,8 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) | |||
800 | 828 | ||
801 | get_agp_version(agp_bridge); | 829 | get_agp_version(agp_bridge); |
802 | 830 | ||
803 | printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n", | 831 | dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", |
804 | agp_bridge->major_version, | 832 | agp_bridge->major_version, agp_bridge->minor_version); |
805 | agp_bridge->minor_version, | ||
806 | pci_name(agp_bridge->dev)); | ||
807 | 833 | ||
808 | pci_read_config_dword(agp_bridge->dev, | 834 | pci_read_config_dword(agp_bridge->dev, |
809 | agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); | 835 | agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); |
@@ -832,8 +858,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) | |||
832 | pci_write_config_dword(bridge->dev, | 858 | pci_write_config_dword(bridge->dev, |
833 | bridge->capndx+AGPCTRL, temp); | 859 | bridge->capndx+AGPCTRL, temp); |
834 | 860 | ||
835 | printk(KERN_INFO PFX "Device is in legacy mode," | 861 | dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n"); |
836 | " falling back to 2.x\n"); | ||
837 | } | 862 | } |
838 | } | 863 | } |
839 | 864 | ||
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index df702642ab8f..016fdf0623a4 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -32,8 +32,8 @@ | |||
32 | #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 | 32 | #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 |
33 | #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 | 33 | #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 |
34 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 | 34 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 |
35 | #define PCI_DEVICE_ID_INTEL_IGD_HB 0x2A40 | 35 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 |
36 | #define PCI_DEVICE_ID_INTEL_IGD_IG 0x2A42 | 36 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 |
37 | #define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 | 37 | #define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 |
38 | #define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02 | 38 | #define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02 |
39 | #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 | 39 | #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 |
@@ -55,7 +55,7 @@ | |||
55 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ | 55 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ |
56 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ | 56 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ |
57 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB || \ | 57 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB || \ |
58 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB) | 58 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB) |
59 | 59 | ||
60 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ | 60 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ |
61 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ | 61 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ |
@@ -161,7 +161,7 @@ static int intel_i810_fetch_size(void) | |||
161 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | 161 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); |
162 | 162 | ||
163 | if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { | 163 | if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { |
164 | printk(KERN_WARNING PFX "i810 is disabled\n"); | 164 | dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); |
165 | return 0; | 165 | return 0; |
166 | } | 166 | } |
167 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { | 167 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { |
@@ -193,7 +193,8 @@ static int intel_i810_configure(void) | |||
193 | 193 | ||
194 | intel_private.registers = ioremap(temp, 128 * 4096); | 194 | intel_private.registers = ioremap(temp, 128 * 4096); |
195 | if (!intel_private.registers) { | 195 | if (!intel_private.registers) { |
196 | printk(KERN_ERR PFX "Unable to remap memory.\n"); | 196 | dev_err(&intel_private.pcidev->dev, |
197 | "can't remap memory\n"); | ||
197 | return -ENOMEM; | 198 | return -ENOMEM; |
198 | } | 199 | } |
199 | } | 200 | } |
@@ -201,7 +202,8 @@ static int intel_i810_configure(void) | |||
201 | if ((readl(intel_private.registers+I810_DRAM_CTL) | 202 | if ((readl(intel_private.registers+I810_DRAM_CTL) |
202 | & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { | 203 | & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { |
203 | /* This will need to be dynamically assigned */ | 204 | /* This will need to be dynamically assigned */ |
204 | printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n"); | 205 | dev_info(&intel_private.pcidev->dev, |
206 | "detected 4MB dedicated video ram\n"); | ||
205 | intel_private.num_dcache_entries = 1024; | 207 | intel_private.num_dcache_entries = 1024; |
206 | } | 208 | } |
207 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | 209 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); |
@@ -500,8 +502,8 @@ static void intel_i830_init_gtt_entries(void) | |||
500 | size = 1024 + 512; | 502 | size = 1024 + 512; |
501 | break; | 503 | break; |
502 | default: | 504 | default: |
503 | printk(KERN_INFO PFX "Unknown page table size, " | 505 | dev_info(&intel_private.pcidev->dev, |
504 | "assuming 512KB\n"); | 506 | "unknown page table size, assuming 512KB\n"); |
505 | size = 512; | 507 | size = 512; |
506 | } | 508 | } |
507 | size += 4; /* add in BIOS popup space */ | 509 | size += 4; /* add in BIOS popup space */ |
@@ -515,8 +517,8 @@ static void intel_i830_init_gtt_entries(void) | |||
515 | size = 2048; | 517 | size = 2048; |
516 | break; | 518 | break; |
517 | default: | 519 | default: |
518 | printk(KERN_INFO PFX "Unknown page table size 0x%x, " | 520 | dev_info(&agp_bridge->dev->dev, |
519 | "assuming 512KB\n", | 521 | "unknown page table size 0x%x, assuming 512KB\n", |
520 | (gmch_ctrl & G33_PGETBL_SIZE_MASK)); | 522 | (gmch_ctrl & G33_PGETBL_SIZE_MASK)); |
521 | size = 512; | 523 | size = 512; |
522 | } | 524 | } |
@@ -627,11 +629,11 @@ static void intel_i830_init_gtt_entries(void) | |||
627 | } | 629 | } |
628 | } | 630 | } |
629 | if (gtt_entries > 0) | 631 | if (gtt_entries > 0) |
630 | printk(KERN_INFO PFX "Detected %dK %s memory.\n", | 632 | dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", |
631 | gtt_entries / KB(1), local ? "local" : "stolen"); | 633 | gtt_entries / KB(1), local ? "local" : "stolen"); |
632 | else | 634 | else |
633 | printk(KERN_INFO PFX | 635 | dev_info(&agp_bridge->dev->dev, |
634 | "No pre-allocated video memory detected.\n"); | 636 | "no pre-allocated video memory detected\n"); |
635 | gtt_entries /= KB(4); | 637 | gtt_entries /= KB(4); |
636 | 638 | ||
637 | intel_private.gtt_entries = gtt_entries; | 639 | intel_private.gtt_entries = gtt_entries; |
@@ -801,10 +803,12 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, | |||
801 | num_entries = A_SIZE_FIX(temp)->num_entries; | 803 | num_entries = A_SIZE_FIX(temp)->num_entries; |
802 | 804 | ||
803 | if (pg_start < intel_private.gtt_entries) { | 805 | if (pg_start < intel_private.gtt_entries) { |
804 | printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n", | 806 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, |
805 | pg_start, intel_private.gtt_entries); | 807 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", |
808 | pg_start, intel_private.gtt_entries); | ||
806 | 809 | ||
807 | printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n"); | 810 | dev_info(&intel_private.pcidev->dev, |
811 | "trying to insert into local/stolen memory\n"); | ||
808 | goto out_err; | 812 | goto out_err; |
809 | } | 813 | } |
810 | 814 | ||
@@ -851,7 +855,8 @@ static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, | |||
851 | return 0; | 855 | return 0; |
852 | 856 | ||
853 | if (pg_start < intel_private.gtt_entries) { | 857 | if (pg_start < intel_private.gtt_entries) { |
854 | printk(KERN_INFO PFX "Trying to disable local/stolen memory\n"); | 858 | dev_info(&intel_private.pcidev->dev, |
859 | "trying to disable local/stolen memory\n"); | ||
855 | return -EINVAL; | 860 | return -EINVAL; |
856 | } | 861 | } |
857 | 862 | ||
@@ -957,7 +962,7 @@ static void intel_i9xx_setup_flush(void) | |||
957 | if (intel_private.ifp_resource.start) { | 962 | if (intel_private.ifp_resource.start) { |
958 | intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); | 963 | intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); |
959 | if (!intel_private.i9xx_flush_page) | 964 | if (!intel_private.i9xx_flush_page) |
960 | printk(KERN_INFO "unable to ioremap flush page - no chipset flushing"); | 965 | dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); |
961 | } | 966 | } |
962 | } | 967 | } |
963 | 968 | ||
@@ -1028,10 +1033,12 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, | |||
1028 | num_entries = A_SIZE_FIX(temp)->num_entries; | 1033 | num_entries = A_SIZE_FIX(temp)->num_entries; |
1029 | 1034 | ||
1030 | if (pg_start < intel_private.gtt_entries) { | 1035 | if (pg_start < intel_private.gtt_entries) { |
1031 | printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n", | 1036 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, |
1032 | pg_start, intel_private.gtt_entries); | 1037 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", |
1038 | pg_start, intel_private.gtt_entries); | ||
1033 | 1039 | ||
1034 | printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n"); | 1040 | dev_info(&intel_private.pcidev->dev, |
1041 | "trying to insert into local/stolen memory\n"); | ||
1035 | goto out_err; | 1042 | goto out_err; |
1036 | } | 1043 | } |
1037 | 1044 | ||
@@ -1078,7 +1085,8 @@ static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, | |||
1078 | return 0; | 1085 | return 0; |
1079 | 1086 | ||
1080 | if (pg_start < intel_private.gtt_entries) { | 1087 | if (pg_start < intel_private.gtt_entries) { |
1081 | printk(KERN_INFO PFX "Trying to disable local/stolen memory\n"); | 1088 | dev_info(&intel_private.pcidev->dev, |
1089 | "trying to disable local/stolen memory\n"); | ||
1082 | return -EINVAL; | 1090 | return -EINVAL; |
1083 | } | 1091 | } |
1084 | 1092 | ||
@@ -1182,7 +1190,7 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, | |||
1182 | static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | 1190 | static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) |
1183 | { | 1191 | { |
1184 | switch (agp_bridge->dev->device) { | 1192 | switch (agp_bridge->dev->device) { |
1185 | case PCI_DEVICE_ID_INTEL_IGD_HB: | 1193 | case PCI_DEVICE_ID_INTEL_GM45_HB: |
1186 | case PCI_DEVICE_ID_INTEL_IGD_E_HB: | 1194 | case PCI_DEVICE_ID_INTEL_IGD_E_HB: |
1187 | case PCI_DEVICE_ID_INTEL_Q45_HB: | 1195 | case PCI_DEVICE_ID_INTEL_Q45_HB: |
1188 | case PCI_DEVICE_ID_INTEL_G45_HB: | 1196 | case PCI_DEVICE_ID_INTEL_G45_HB: |
@@ -1379,7 +1387,7 @@ static int intel_815_configure(void) | |||
1379 | /* the Intel 815 chipset spec. says that bits 29-31 in the | 1387 | /* the Intel 815 chipset spec. says that bits 29-31 in the |
1380 | * ATTBASE register are reserved -> try not to write them */ | 1388 | * ATTBASE register are reserved -> try not to write them */ |
1381 | if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) { | 1389 | if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) { |
1382 | printk(KERN_EMERG PFX "gatt bus addr too high"); | 1390 | dev_emerg(&agp_bridge->dev->dev, "gatt bus addr too high"); |
1383 | return -EINVAL; | 1391 | return -EINVAL; |
1384 | } | 1392 | } |
1385 | 1393 | ||
@@ -2117,8 +2125,8 @@ static const struct intel_driver_description { | |||
2117 | NULL, &intel_g33_driver }, | 2125 | NULL, &intel_g33_driver }, |
2118 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", | 2126 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", |
2119 | NULL, &intel_g33_driver }, | 2127 | NULL, &intel_g33_driver }, |
2120 | { PCI_DEVICE_ID_INTEL_IGD_HB, PCI_DEVICE_ID_INTEL_IGD_IG, 0, | 2128 | { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, |
2121 | "Intel Integrated Graphics Device", NULL, &intel_i965_driver }, | 2129 | "Mobile Intel? GM45 Express", NULL, &intel_i965_driver }, |
2122 | { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, | 2130 | { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, |
2123 | "Intel Integrated Graphics Device", NULL, &intel_i965_driver }, | 2131 | "Intel Integrated Graphics Device", NULL, &intel_i965_driver }, |
2124 | { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, | 2132 | { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, |
@@ -2163,8 +2171,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2163 | 2171 | ||
2164 | if (intel_agp_chipsets[i].name == NULL) { | 2172 | if (intel_agp_chipsets[i].name == NULL) { |
2165 | if (cap_ptr) | 2173 | if (cap_ptr) |
2166 | printk(KERN_WARNING PFX "Unsupported Intel chipset" | 2174 | dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n", |
2167 | "(device id: %04x)\n", pdev->device); | 2175 | pdev->vendor, pdev->device); |
2168 | agp_put_bridge(bridge); | 2176 | agp_put_bridge(bridge); |
2169 | return -ENODEV; | 2177 | return -ENODEV; |
2170 | } | 2178 | } |
@@ -2172,9 +2180,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2172 | if (bridge->driver == NULL) { | 2180 | if (bridge->driver == NULL) { |
2173 | /* bridge has no AGP and no IGD detected */ | 2181 | /* bridge has no AGP and no IGD detected */ |
2174 | if (cap_ptr) | 2182 | if (cap_ptr) |
2175 | printk(KERN_WARNING PFX "Failed to find bridge device " | 2183 | dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", |
2176 | "(chip_id: %04x)\n", | 2184 | intel_agp_chipsets[i].gmch_chip_id); |
2177 | intel_agp_chipsets[i].gmch_chip_id); | ||
2178 | agp_put_bridge(bridge); | 2185 | agp_put_bridge(bridge); |
2179 | return -ENODEV; | 2186 | return -ENODEV; |
2180 | } | 2187 | } |
@@ -2183,8 +2190,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2183 | bridge->capndx = cap_ptr; | 2190 | bridge->capndx = cap_ptr; |
2184 | bridge->dev_private_data = &intel_private; | 2191 | bridge->dev_private_data = &intel_private; |
2185 | 2192 | ||
2186 | printk(KERN_INFO PFX "Detected an Intel %s Chipset.\n", | 2193 | dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); |
2187 | intel_agp_chipsets[i].name); | ||
2188 | 2194 | ||
2189 | /* | 2195 | /* |
2190 | * The following fixes the case where the BIOS has "forgotten" to | 2196 | * The following fixes the case where the BIOS has "forgotten" to |
@@ -2194,7 +2200,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2194 | r = &pdev->resource[0]; | 2200 | r = &pdev->resource[0]; |
2195 | if (!r->start && r->end) { | 2201 | if (!r->start && r->end) { |
2196 | if (pci_assign_resource(pdev, 0)) { | 2202 | if (pci_assign_resource(pdev, 0)) { |
2197 | printk(KERN_ERR PFX "could not assign resource 0\n"); | 2203 | dev_err(&pdev->dev, "can't assign resource 0\n"); |
2198 | agp_put_bridge(bridge); | 2204 | agp_put_bridge(bridge); |
2199 | return -ENODEV; | 2205 | return -ENODEV; |
2200 | } | 2206 | } |
@@ -2206,7 +2212,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2206 | * 20030610 - hamish@zot.org | 2212 | * 20030610 - hamish@zot.org |
2207 | */ | 2213 | */ |
2208 | if (pci_enable_device(pdev)) { | 2214 | if (pci_enable_device(pdev)) { |
2209 | printk(KERN_ERR PFX "Unable to Enable PCI device\n"); | 2215 | dev_err(&pdev->dev, "can't enable PCI device\n"); |
2210 | agp_put_bridge(bridge); | 2216 | agp_put_bridge(bridge); |
2211 | return -ENODEV; | 2217 | return -ENODEV; |
2212 | } | 2218 | } |
@@ -2238,6 +2244,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev) | |||
2238 | static int agp_intel_resume(struct pci_dev *pdev) | 2244 | static int agp_intel_resume(struct pci_dev *pdev) |
2239 | { | 2245 | { |
2240 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); | 2246 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); |
2247 | int ret_val; | ||
2241 | 2248 | ||
2242 | pci_restore_state(pdev); | 2249 | pci_restore_state(pdev); |
2243 | 2250 | ||
@@ -2265,6 +2272,10 @@ static int agp_intel_resume(struct pci_dev *pdev) | |||
2265 | else if (bridge->driver == &intel_i965_driver) | 2272 | else if (bridge->driver == &intel_i965_driver) |
2266 | intel_i915_configure(); | 2273 | intel_i915_configure(); |
2267 | 2274 | ||
2275 | ret_val = agp_rebind_memory(); | ||
2276 | if (ret_val != 0) | ||
2277 | return ret_val; | ||
2278 | |||
2268 | return 0; | 2279 | return 0; |
2269 | } | 2280 | } |
2270 | #endif | 2281 | #endif |
@@ -2315,7 +2326,7 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
2315 | ID(PCI_DEVICE_ID_INTEL_G33_HB), | 2326 | ID(PCI_DEVICE_ID_INTEL_G33_HB), |
2316 | ID(PCI_DEVICE_ID_INTEL_Q35_HB), | 2327 | ID(PCI_DEVICE_ID_INTEL_Q35_HB), |
2317 | ID(PCI_DEVICE_ID_INTEL_Q33_HB), | 2328 | ID(PCI_DEVICE_ID_INTEL_Q33_HB), |
2318 | ID(PCI_DEVICE_ID_INTEL_IGD_HB), | 2329 | ID(PCI_DEVICE_ID_INTEL_GM45_HB), |
2319 | ID(PCI_DEVICE_ID_INTEL_IGD_E_HB), | 2330 | ID(PCI_DEVICE_ID_INTEL_IGD_E_HB), |
2320 | ID(PCI_DEVICE_ID_INTEL_Q45_HB), | 2331 | ID(PCI_DEVICE_ID_INTEL_Q45_HB), |
2321 | ID(PCI_DEVICE_ID_INTEL_G45_HB), | 2332 | ID(PCI_DEVICE_ID_INTEL_G45_HB), |
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c index 3f9ccde62377..c73385cc4b8a 100644 --- a/drivers/char/agp/isoch.c +++ b/drivers/char/agp/isoch.c | |||
@@ -153,7 +153,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, | |||
153 | 153 | ||
154 | /* Check if this configuration has any chance of working */ | 154 | /* Check if this configuration has any chance of working */ |
155 | if (tot_bw > target.maxbw) { | 155 | if (tot_bw > target.maxbw) { |
156 | printk(KERN_ERR PFX "isochronous bandwidth required " | 156 | dev_err(&td->dev, "isochronous bandwidth required " |
157 | "by AGP 3.0 devices exceeds that which is supported by " | 157 | "by AGP 3.0 devices exceeds that which is supported by " |
158 | "the AGP 3.0 bridge!\n"); | 158 | "the AGP 3.0 bridge!\n"); |
159 | ret = -ENODEV; | 159 | ret = -ENODEV; |
@@ -188,7 +188,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, | |||
188 | /* Exit if the minimal ISOCH_N allocation among the masters is more | 188 | /* Exit if the minimal ISOCH_N allocation among the masters is more |
189 | * than the target can handle. */ | 189 | * than the target can handle. */ |
190 | if (tot_n > target.n) { | 190 | if (tot_n > target.n) { |
191 | printk(KERN_ERR PFX "number of isochronous " | 191 | dev_err(&td->dev, "number of isochronous " |
192 | "transactions per period required by AGP 3.0 devices " | 192 | "transactions per period required by AGP 3.0 devices " |
193 | "exceeds that which is supported by the AGP 3.0 " | 193 | "exceeds that which is supported by the AGP 3.0 " |
194 | "bridge!\n"); | 194 | "bridge!\n"); |
@@ -229,7 +229,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, | |||
229 | /* Exit if the minimal RQ needs of the masters exceeds what the target | 229 | /* Exit if the minimal RQ needs of the masters exceeds what the target |
230 | * can provide. */ | 230 | * can provide. */ |
231 | if (tot_rq > rq_isoch) { | 231 | if (tot_rq > rq_isoch) { |
232 | printk(KERN_ERR PFX "number of request queue slots " | 232 | dev_err(&td->dev, "number of request queue slots " |
233 | "required by the isochronous bandwidth requested by " | 233 | "required by the isochronous bandwidth requested by " |
234 | "AGP 3.0 devices exceeds the number provided by the " | 234 | "AGP 3.0 devices exceeds the number provided by the " |
235 | "AGP 3.0 bridge!\n"); | 235 | "AGP 3.0 bridge!\n"); |
@@ -359,8 +359,9 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) | |||
359 | case 0x0001: /* Unclassified device */ | 359 | case 0x0001: /* Unclassified device */ |
360 | /* Don't know what this is, but log it for investigation. */ | 360 | /* Don't know what this is, but log it for investigation. */ |
361 | if (mcapndx != 0) { | 361 | if (mcapndx != 0) { |
362 | printk (KERN_INFO PFX "Wacky, found unclassified AGP device. %x:%x\n", | 362 | dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n", |
363 | dev->vendor, dev->device); | 363 | pci_name(dev), |
364 | dev->vendor, dev->device); | ||
364 | } | 365 | } |
365 | continue; | 366 | continue; |
366 | 367 | ||
@@ -407,17 +408,18 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) | |||
407 | } | 408 | } |
408 | 409 | ||
409 | if (mcapndx == 0) { | 410 | if (mcapndx == 0) { |
410 | printk(KERN_ERR PFX "woah! Non-AGP device " | 411 | dev_err(&td->dev, "woah! Non-AGP device %s on " |
411 | "found on the secondary bus of an AGP 3.5 bridge!\n"); | 412 | "secondary bus of AGP 3.5 bridge!\n", |
413 | pci_name(dev)); | ||
412 | ret = -ENODEV; | 414 | ret = -ENODEV; |
413 | goto free_and_exit; | 415 | goto free_and_exit; |
414 | } | 416 | } |
415 | 417 | ||
416 | mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; | 418 | mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; |
417 | if (mmajor < 3) { | 419 | if (mmajor < 3) { |
418 | printk(KERN_ERR PFX "woah! AGP 2.0 device " | 420 | dev_err(&td->dev, "woah! AGP 2.0 device %s on " |
419 | "found on the secondary bus of an AGP 3.5 " | 421 | "secondary bus of AGP 3.5 bridge operating " |
420 | "bridge operating with AGP 3.0 electricals!\n"); | 422 | "with AGP 3.0 electricals!\n", pci_name(dev)); |
421 | ret = -ENODEV; | 423 | ret = -ENODEV; |
422 | goto free_and_exit; | 424 | goto free_and_exit; |
423 | } | 425 | } |
@@ -427,10 +429,10 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) | |||
427 | pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus); | 429 | pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus); |
428 | 430 | ||
429 | if (((mstatus >> 3) & 0x1) == 0) { | 431 | if (((mstatus >> 3) & 0x1) == 0) { |
430 | printk(KERN_ERR PFX "woah! AGP 3.x device " | 432 | dev_err(&td->dev, "woah! AGP 3.x device %s not " |
431 | "not operating in AGP 3.x mode found on the " | 433 | "operating in AGP 3.x mode on secondary bus " |
432 | "secondary bus of an AGP 3.5 bridge operating " | 434 | "of AGP 3.5 bridge operating with AGP 3.0 " |
433 | "with AGP 3.0 electricals!\n"); | 435 | "electricals!\n", pci_name(dev)); |
434 | ret = -ENODEV; | 436 | ret = -ENODEV; |
435 | goto free_and_exit; | 437 | goto free_and_exit; |
436 | } | 438 | } |
@@ -444,9 +446,9 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) | |||
444 | if (isoch) { | 446 | if (isoch) { |
445 | ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs); | 447 | ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs); |
446 | if (ret) { | 448 | if (ret) { |
447 | printk(KERN_INFO PFX "Something bad happened setting " | 449 | dev_info(&td->dev, "something bad happened setting " |
448 | "up isochronous xfers. Falling back to " | 450 | "up isochronous xfers; falling back to " |
449 | "non-isochronous xfer mode.\n"); | 451 | "non-isochronous xfer mode\n"); |
450 | } else { | 452 | } else { |
451 | goto free_and_exit; | 453 | goto free_and_exit; |
452 | } | 454 | } |
@@ -466,4 +468,3 @@ free_and_exit: | |||
466 | get_out: | 468 | get_out: |
467 | return ret; | 469 | return ret; |
468 | } | 470 | } |
469 | |||
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c index b6791846809f..2587ef96a960 100644 --- a/drivers/char/agp/sis-agp.c +++ b/drivers/char/agp/sis-agp.c | |||
@@ -79,10 +79,8 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode) | |||
79 | u32 command; | 79 | u32 command; |
80 | int rate; | 80 | int rate; |
81 | 81 | ||
82 | printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n", | 82 | dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", |
83 | agp_bridge->major_version, | 83 | agp_bridge->major_version, agp_bridge->minor_version); |
84 | agp_bridge->minor_version, | ||
85 | pci_name(agp_bridge->dev)); | ||
86 | 84 | ||
87 | pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command); | 85 | pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command); |
88 | command = agp_collect_device_status(bridge, mode, command); | 86 | command = agp_collect_device_status(bridge, mode, command); |
@@ -94,8 +92,8 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode) | |||
94 | if (!agp) | 92 | if (!agp) |
95 | continue; | 93 | continue; |
96 | 94 | ||
97 | printk(KERN_INFO PFX "Putting AGP V3 device at %s into %dx mode\n", | 95 | dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n", |
98 | pci_name(device), rate); | 96 | pci_name(device), rate); |
99 | 97 | ||
100 | pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command); | 98 | pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command); |
101 | 99 | ||
@@ -105,7 +103,7 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode) | |||
105 | * cannot be configured | 103 | * cannot be configured |
106 | */ | 104 | */ |
107 | if (device->device == bridge->dev->device) { | 105 | if (device->device == bridge->dev->device) { |
108 | printk(KERN_INFO PFX "SiS delay workaround: giving bridge time to recover.\n"); | 106 | dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n"); |
109 | msleep(10); | 107 | msleep(10); |
110 | } | 108 | } |
111 | } | 109 | } |
@@ -190,7 +188,8 @@ static int __devinit agp_sis_probe(struct pci_dev *pdev, | |||
190 | return -ENODEV; | 188 | return -ENODEV; |
191 | 189 | ||
192 | 190 | ||
193 | printk(KERN_INFO PFX "Detected SiS chipset - id:%i\n", pdev->device); | 191 | dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n", |
192 | pdev->vendor, pdev->device); | ||
194 | bridge = agp_alloc_bridge(); | 193 | bridge = agp_alloc_bridge(); |
195 | if (!bridge) | 194 | if (!bridge) |
196 | return -ENOMEM; | 195 | return -ENOMEM; |
@@ -242,7 +241,7 @@ static struct pci_device_id agp_sis_pci_table[] = { | |||
242 | .class = (PCI_CLASS_BRIDGE_HOST << 8), | 241 | .class = (PCI_CLASS_BRIDGE_HOST << 8), |
243 | .class_mask = ~0, | 242 | .class_mask = ~0, |
244 | .vendor = PCI_VENDOR_ID_SI, | 243 | .vendor = PCI_VENDOR_ID_SI, |
245 | .device = PCI_DEVICE_ID_SI_5591_AGP, | 244 | .device = PCI_DEVICE_ID_SI_5591, |
246 | .subvendor = PCI_ANY_ID, | 245 | .subvendor = PCI_ANY_ID, |
247 | .subdevice = PCI_ANY_ID, | 246 | .subdevice = PCI_ANY_ID, |
248 | }, | 247 | }, |
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c index 0e054c134490..2fb27fe4c10c 100644 --- a/drivers/char/agp/sworks-agp.c +++ b/drivers/char/agp/sworks-agp.c | |||
@@ -241,7 +241,8 @@ static void serverworks_tlbflush(struct agp_memory *temp) | |||
241 | while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) { | 241 | while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) { |
242 | cpu_relax(); | 242 | cpu_relax(); |
243 | if (time_after(jiffies, timeout)) { | 243 | if (time_after(jiffies, timeout)) { |
244 | printk(KERN_ERR PFX "TLB post flush took more than 3 seconds\n"); | 244 | dev_err(&serverworks_private.svrwrks_dev->dev, |
245 | "TLB post flush took more than 3 seconds\n"); | ||
245 | break; | 246 | break; |
246 | } | 247 | } |
247 | } | 248 | } |
@@ -251,7 +252,8 @@ static void serverworks_tlbflush(struct agp_memory *temp) | |||
251 | while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) { | 252 | while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) { |
252 | cpu_relax(); | 253 | cpu_relax(); |
253 | if (time_after(jiffies, timeout)) { | 254 | if (time_after(jiffies, timeout)) { |
254 | printk(KERN_ERR PFX "TLB Dir flush took more than 3 seconds\n"); | 255 | dev_err(&serverworks_private.svrwrks_dev->dev, |
256 | "TLB Dir flush took more than 3 seconds\n"); | ||
255 | break; | 257 | break; |
256 | } | 258 | } |
257 | } | 259 | } |
@@ -271,7 +273,7 @@ static int serverworks_configure(void) | |||
271 | temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); | 273 | temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); |
272 | serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); | 274 | serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); |
273 | if (!serverworks_private.registers) { | 275 | if (!serverworks_private.registers) { |
274 | printk (KERN_ERR PFX "Unable to ioremap() memory.\n"); | 276 | dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp); |
275 | return -ENOMEM; | 277 | return -ENOMEM; |
276 | } | 278 | } |
277 | 279 | ||
@@ -451,7 +453,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev, | |||
451 | 453 | ||
452 | switch (pdev->device) { | 454 | switch (pdev->device) { |
453 | case 0x0006: | 455 | case 0x0006: |
454 | printk (KERN_ERR PFX "ServerWorks CNB20HE is unsupported due to lack of documentation.\n"); | 456 | dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n"); |
455 | return -ENODEV; | 457 | return -ENODEV; |
456 | 458 | ||
457 | case PCI_DEVICE_ID_SERVERWORKS_HE: | 459 | case PCI_DEVICE_ID_SERVERWORKS_HE: |
@@ -461,8 +463,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev, | |||
461 | 463 | ||
462 | default: | 464 | default: |
463 | if (cap_ptr) | 465 | if (cap_ptr) |
464 | printk(KERN_ERR PFX "Unsupported Serverworks chipset " | 466 | dev_err(&pdev->dev, "unsupported Serverworks chipset " |
465 | "(device id: %04x)\n", pdev->device); | 467 | "[%04x/%04x]\n", pdev->vendor, pdev->device); |
466 | return -ENODEV; | 468 | return -ENODEV; |
467 | } | 469 | } |
468 | 470 | ||
@@ -470,8 +472,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev, | |||
470 | bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number, | 472 | bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number, |
471 | PCI_DEVFN(0, 1)); | 473 | PCI_DEVFN(0, 1)); |
472 | if (!bridge_dev) { | 474 | if (!bridge_dev) { |
473 | printk(KERN_INFO PFX "Detected a Serverworks chipset " | 475 | dev_info(&pdev->dev, "can't find secondary device\n"); |
474 | "but could not find the secondary device.\n"); | ||
475 | return -ENODEV; | 476 | return -ENODEV; |
476 | } | 477 | } |
477 | 478 | ||
@@ -482,8 +483,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev, | |||
482 | if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { | 483 | if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { |
483 | pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2); | 484 | pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2); |
484 | if (temp2 != 0) { | 485 | if (temp2 != 0) { |
485 | printk(KERN_INFO PFX "Detected 64 bit aperture address, " | 486 | dev_info(&pdev->dev, "64 bit aperture address, " |
486 | "but top bits are not zero. Disabling agp\n"); | 487 | "but top bits are not zero; disabling AGP\n"); |
487 | return -ENODEV; | 488 | return -ENODEV; |
488 | } | 489 | } |
489 | serverworks_private.mm_addr_ofs = 0x18; | 490 | serverworks_private.mm_addr_ofs = 0x18; |
@@ -495,8 +496,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev, | |||
495 | pci_read_config_dword(pdev, | 496 | pci_read_config_dword(pdev, |
496 | serverworks_private.mm_addr_ofs + 4, &temp2); | 497 | serverworks_private.mm_addr_ofs + 4, &temp2); |
497 | if (temp2 != 0) { | 498 | if (temp2 != 0) { |
498 | printk(KERN_INFO PFX "Detected 64 bit MMIO address, " | 499 | dev_info(&pdev->dev, "64 bit MMIO address, but top " |
499 | "but top bits are not zero. Disabling agp\n"); | 500 | "bits are not zero; disabling AGP\n"); |
500 | return -ENODEV; | 501 | return -ENODEV; |
501 | } | 502 | } |
502 | } | 503 | } |
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index d2fa3cfca02a..eef72709ec53 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c | |||
@@ -46,8 +46,8 @@ static int uninorth_fetch_size(void) | |||
46 | break; | 46 | break; |
47 | 47 | ||
48 | if (i == agp_bridge->driver->num_aperture_sizes) { | 48 | if (i == agp_bridge->driver->num_aperture_sizes) { |
49 | printk(KERN_ERR PFX "Invalid aperture size, using" | 49 | dev_err(&agp_bridge->dev->dev, "invalid aperture size, " |
50 | " default\n"); | 50 | "using default\n"); |
51 | size = 0; | 51 | size = 0; |
52 | aperture = NULL; | 52 | aperture = NULL; |
53 | } | 53 | } |
@@ -108,8 +108,8 @@ static int uninorth_configure(void) | |||
108 | 108 | ||
109 | current_size = A_SIZE_32(agp_bridge->current_size); | 109 | current_size = A_SIZE_32(agp_bridge->current_size); |
110 | 110 | ||
111 | printk(KERN_INFO PFX "configuring for size idx: %d\n", | 111 | dev_info(&agp_bridge->dev->dev, "configuring for size idx: %d\n", |
112 | current_size->size_value); | 112 | current_size->size_value); |
113 | 113 | ||
114 | /* aperture size and gatt addr */ | 114 | /* aperture size and gatt addr */ |
115 | pci_write_config_dword(agp_bridge->dev, | 115 | pci_write_config_dword(agp_bridge->dev, |
@@ -197,8 +197,9 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
197 | gp = (u32 *) &agp_bridge->gatt_table[pg_start]; | 197 | gp = (u32 *) &agp_bridge->gatt_table[pg_start]; |
198 | for (i = 0; i < mem->page_count; ++i) { | 198 | for (i = 0; i < mem->page_count; ++i) { |
199 | if (gp[i]) { | 199 | if (gp[i]) { |
200 | printk("u3_insert_memory: entry 0x%x occupied (%x)\n", | 200 | dev_info(&agp_bridge->dev->dev, |
201 | i, gp[i]); | 201 | "u3_insert_memory: entry 0x%x occupied (%x)\n", |
202 | i, gp[i]); | ||
202 | return -EBUSY; | 203 | return -EBUSY; |
203 | } | 204 | } |
204 | } | 205 | } |
@@ -276,8 +277,8 @@ static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode) | |||
276 | &scratch); | 277 | &scratch); |
277 | } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000); | 278 | } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000); |
278 | if ((scratch & PCI_AGP_COMMAND_AGP) == 0) | 279 | if ((scratch & PCI_AGP_COMMAND_AGP) == 0) |
279 | printk(KERN_ERR PFX "failed to write UniNorth AGP" | 280 | dev_err(&bridge->dev->dev, "can't write UniNorth AGP " |
280 | " command register\n"); | 281 | "command register\n"); |
281 | 282 | ||
282 | if (uninorth_rev >= 0x30) { | 283 | if (uninorth_rev >= 0x30) { |
283 | /* This is an AGP V3 */ | 284 | /* This is an AGP V3 */ |
@@ -330,8 +331,8 @@ static int agp_uninorth_suspend(struct pci_dev *pdev) | |||
330 | pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd); | 331 | pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd); |
331 | if (!(cmd & PCI_AGP_COMMAND_AGP)) | 332 | if (!(cmd & PCI_AGP_COMMAND_AGP)) |
332 | continue; | 333 | continue; |
333 | printk("uninorth-agp: disabling AGP on device %s\n", | 334 | dev_info(&pdev->dev, "disabling AGP on device %s\n", |
334 | pci_name(device)); | 335 | pci_name(device)); |
335 | cmd &= ~PCI_AGP_COMMAND_AGP; | 336 | cmd &= ~PCI_AGP_COMMAND_AGP; |
336 | pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd); | 337 | pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd); |
337 | } | 338 | } |
@@ -341,8 +342,7 @@ static int agp_uninorth_suspend(struct pci_dev *pdev) | |||
341 | pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd); | 342 | pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd); |
342 | bridge->dev_private_data = (void *)(long)cmd; | 343 | bridge->dev_private_data = (void *)(long)cmd; |
343 | if (cmd & PCI_AGP_COMMAND_AGP) { | 344 | if (cmd & PCI_AGP_COMMAND_AGP) { |
344 | printk("uninorth-agp: disabling AGP on bridge %s\n", | 345 | dev_info(&pdev->dev, "disabling AGP on bridge\n"); |
345 | pci_name(pdev)); | ||
346 | cmd &= ~PCI_AGP_COMMAND_AGP; | 346 | cmd &= ~PCI_AGP_COMMAND_AGP; |
347 | pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd); | 347 | pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd); |
348 | } | 348 | } |
@@ -591,14 +591,14 @@ static int __devinit agp_uninorth_probe(struct pci_dev *pdev, | |||
591 | /* probe for known chipsets */ | 591 | /* probe for known chipsets */ |
592 | for (j = 0; devs[j].chipset_name != NULL; ++j) { | 592 | for (j = 0; devs[j].chipset_name != NULL; ++j) { |
593 | if (pdev->device == devs[j].device_id) { | 593 | if (pdev->device == devs[j].device_id) { |
594 | printk(KERN_INFO PFX "Detected Apple %s chipset\n", | 594 | dev_info(&pdev->dev, "Apple %s chipset\n", |
595 | devs[j].chipset_name); | 595 | devs[j].chipset_name); |
596 | goto found; | 596 | goto found; |
597 | } | 597 | } |
598 | } | 598 | } |
599 | 599 | ||
600 | printk(KERN_ERR PFX "Unsupported Apple chipset (device id: %04x).\n", | 600 | dev_err(&pdev->dev, "unsupported Apple chipset [%04x/%04x]\n", |
601 | pdev->device); | 601 | pdev->vendor, pdev->device); |
602 | return -ENODEV; | 602 | return -ENODEV; |
603 | 603 | ||
604 | found: | 604 | found: |
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 02aac104842d..fd64137b1ab9 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c | |||
@@ -322,11 +322,10 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) | |||
322 | 322 | ||
323 | hp->tty = tty; | 323 | hp->tty = tty; |
324 | 324 | ||
325 | if (hp->ops->notifier_add) | ||
326 | rc = hp->ops->notifier_add(hp, hp->data); | ||
327 | |||
328 | spin_unlock_irqrestore(&hp->lock, flags); | 325 | spin_unlock_irqrestore(&hp->lock, flags); |
329 | 326 | ||
327 | if (hp->ops->notifier_add) | ||
328 | rc = hp->ops->notifier_add(hp, hp->data); | ||
330 | 329 | ||
331 | /* | 330 | /* |
332 | * If the notifier fails we return an error. The tty layer | 331 | * If the notifier fails we return an error. The tty layer |
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index f7feae4ebb5e..128202e18fc9 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/io.h> | 31 | #include <asm/io.h> |
32 | #include <asm/msr.h> | 32 | #include <asm/msr.h> |
33 | #include <asm/cpufeature.h> | 33 | #include <asm/cpufeature.h> |
34 | #include <asm/i387.h> | ||
34 | 35 | ||
35 | 36 | ||
36 | #define PFX KBUILD_MODNAME ": " | 37 | #define PFX KBUILD_MODNAME ": " |
@@ -67,16 +68,23 @@ enum { | |||
67 | * Another possible performance boost may come from simply buffering | 68 | * Another possible performance boost may come from simply buffering |
68 | * until we have 4 bytes, thus returning a u32 at a time, | 69 | * until we have 4 bytes, thus returning a u32 at a time, |
69 | * instead of the current u8-at-a-time. | 70 | * instead of the current u8-at-a-time. |
71 | * | ||
72 | * Padlock instructions can generate a spurious DNA fault, so | ||
73 | * we have to call them in the context of irq_ts_save/restore() | ||
70 | */ | 74 | */ |
71 | 75 | ||
72 | static inline u32 xstore(u32 *addr, u32 edx_in) | 76 | static inline u32 xstore(u32 *addr, u32 edx_in) |
73 | { | 77 | { |
74 | u32 eax_out; | 78 | u32 eax_out; |
79 | int ts_state; | ||
80 | |||
81 | ts_state = irq_ts_save(); | ||
75 | 82 | ||
76 | asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" | 83 | asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" |
77 | :"=m"(*addr), "=a"(eax_out) | 84 | :"=m"(*addr), "=a"(eax_out) |
78 | :"D"(addr), "d"(edx_in)); | 85 | :"D"(addr), "d"(edx_in)); |
79 | 86 | ||
87 | irq_ts_restore(ts_state); | ||
80 | return eax_out; | 88 | return eax_out; |
81 | } | 89 | } |
82 | 90 | ||
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index f52931e1c16e..8e8afb6141f9 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -2695,15 +2695,13 @@ static __devinit void default_find_bmc(void) | |||
2695 | for (i = 0; ; i++) { | 2695 | for (i = 0; ; i++) { |
2696 | if (!ipmi_defaults[i].port) | 2696 | if (!ipmi_defaults[i].port) |
2697 | break; | 2697 | break; |
2698 | |||
2699 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
2700 | if (!info) | ||
2701 | return; | ||
2702 | |||
2703 | #ifdef CONFIG_PPC_MERGE | 2698 | #ifdef CONFIG_PPC_MERGE |
2704 | if (check_legacy_ioport(ipmi_defaults[i].port)) | 2699 | if (check_legacy_ioport(ipmi_defaults[i].port)) |
2705 | continue; | 2700 | continue; |
2706 | #endif | 2701 | #endif |
2702 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
2703 | if (!info) | ||
2704 | return; | ||
2707 | 2705 | ||
2708 | info->addr_source = NULL; | 2706 | info->addr_source = NULL; |
2709 | 2707 | ||
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c index b1414507997c..3a23e7694d55 100644 --- a/drivers/char/pcmcia/ipwireless/tty.c +++ b/drivers/char/pcmcia/ipwireless/tty.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/tty_driver.h> | 29 | #include <linux/tty_driver.h> |
30 | #include <linux/tty_flip.h> | 30 | #include <linux/tty_flip.h> |
31 | #include <linux/uaccess.h> | 31 | #include <linux/uaccess.h> |
32 | #include <linux/version.h> | ||
33 | 32 | ||
34 | #include "tty.h" | 33 | #include "tty.h" |
35 | #include "network.h" | 34 | #include "network.h" |
diff --git a/drivers/char/random.c b/drivers/char/random.c index e0d0e371909c..1838aa3d24fe 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -1571,6 +1571,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) | |||
1571 | 1571 | ||
1572 | return half_md4_transform(hash, keyptr->secret); | 1572 | return half_md4_transform(hash, keyptr->secret); |
1573 | } | 1573 | } |
1574 | EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral); | ||
1574 | 1575 | ||
1575 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1576 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
1576 | u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, | 1577 | u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, |
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index d9799e2bcfbf..f53d4d00faf0 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c | |||
@@ -78,7 +78,6 @@ | |||
78 | #include <linux/wait.h> | 78 | #include <linux/wait.h> |
79 | #include <linux/bcd.h> | 79 | #include <linux/bcd.h> |
80 | #include <linux/delay.h> | 80 | #include <linux/delay.h> |
81 | #include <linux/smp_lock.h> | ||
82 | #include <linux/uaccess.h> | 81 | #include <linux/uaccess.h> |
83 | 82 | ||
84 | #include <asm/current.h> | 83 | #include <asm/current.h> |
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index 509c89ac5bd3..08911ed66494 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
@@ -47,7 +47,6 @@ | |||
47 | 47 | ||
48 | 48 | ||
49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
50 | #include <linux/version.h> | ||
51 | #include <linux/errno.h> | 50 | #include <linux/errno.h> |
52 | #include <linux/signal.h> | 51 | #include <linux/signal.h> |
53 | #include <linux/sched.h> | 52 | #include <linux/sched.h> |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 0e6866fe0f96..daeb8f766971 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -2496,45 +2496,26 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg) | |||
2496 | } | 2496 | } |
2497 | 2497 | ||
2498 | /** | 2498 | /** |
2499 | * tiocswinsz - implement window size set ioctl | 2499 | * tty_do_resize - resize event |
2500 | * @tty; tty | 2500 | * @tty: tty being resized |
2501 | * @arg: user buffer for result | 2501 | * @real_tty: real tty (not the same as tty if using a pty/tty pair) |
2502 | * @rows: rows (character) | ||
2503 | * @cols: cols (character) | ||
2502 | * | 2504 | * |
2503 | * Copies the user idea of the window size to the kernel. Traditionally | 2505 | * Update the termios variables and send the neccessary signals to |
2504 | * this is just advisory information but for the Linux console it | 2506 | * peform a terminal resize correctly |
2505 | * actually has driver level meaning and triggers a VC resize. | ||
2506 | * | ||
2507 | * Locking: | ||
2508 | * Called function use the console_sem is used to ensure we do | ||
2509 | * not try and resize the console twice at once. | ||
2510 | * The tty->termios_mutex is used to ensure we don't double | ||
2511 | * resize and get confused. Lock order - tty->termios_mutex before | ||
2512 | * console sem | ||
2513 | */ | 2507 | */ |
2514 | 2508 | ||
2515 | static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, | 2509 | int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, |
2516 | struct winsize __user *arg) | 2510 | struct winsize *ws) |
2517 | { | 2511 | { |
2518 | struct winsize tmp_ws; | ||
2519 | struct pid *pgrp, *rpgrp; | 2512 | struct pid *pgrp, *rpgrp; |
2520 | unsigned long flags; | 2513 | unsigned long flags; |
2521 | 2514 | ||
2522 | if (copy_from_user(&tmp_ws, arg, sizeof(*arg))) | 2515 | /* For a PTY we need to lock the tty side */ |
2523 | return -EFAULT; | 2516 | mutex_lock(&real_tty->termios_mutex); |
2524 | 2517 | if (!memcmp(ws, &tty->winsize, sizeof(*ws))) | |
2525 | mutex_lock(&tty->termios_mutex); | ||
2526 | if (!memcmp(&tmp_ws, &tty->winsize, sizeof(*arg))) | ||
2527 | goto done; | 2518 | goto done; |
2528 | |||
2529 | #ifdef CONFIG_VT | ||
2530 | if (tty->driver->type == TTY_DRIVER_TYPE_CONSOLE) { | ||
2531 | if (vc_lock_resize(tty->driver_data, tmp_ws.ws_col, | ||
2532 | tmp_ws.ws_row)) { | ||
2533 | mutex_unlock(&tty->termios_mutex); | ||
2534 | return -ENXIO; | ||
2535 | } | ||
2536 | } | ||
2537 | #endif | ||
2538 | /* Get the PID values and reference them so we can | 2519 | /* Get the PID values and reference them so we can |
2539 | avoid holding the tty ctrl lock while sending signals */ | 2520 | avoid holding the tty ctrl lock while sending signals */ |
2540 | spin_lock_irqsave(&tty->ctrl_lock, flags); | 2521 | spin_lock_irqsave(&tty->ctrl_lock, flags); |
@@ -2550,14 +2531,42 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, | |||
2550 | put_pid(pgrp); | 2531 | put_pid(pgrp); |
2551 | put_pid(rpgrp); | 2532 | put_pid(rpgrp); |
2552 | 2533 | ||
2553 | tty->winsize = tmp_ws; | 2534 | tty->winsize = *ws; |
2554 | real_tty->winsize = tmp_ws; | 2535 | real_tty->winsize = *ws; |
2555 | done: | 2536 | done: |
2556 | mutex_unlock(&tty->termios_mutex); | 2537 | mutex_unlock(&real_tty->termios_mutex); |
2557 | return 0; | 2538 | return 0; |
2558 | } | 2539 | } |
2559 | 2540 | ||
2560 | /** | 2541 | /** |
2542 | * tiocswinsz - implement window size set ioctl | ||
2543 | * @tty; tty | ||
2544 | * @arg: user buffer for result | ||
2545 | * | ||
2546 | * Copies the user idea of the window size to the kernel. Traditionally | ||
2547 | * this is just advisory information but for the Linux console it | ||
2548 | * actually has driver level meaning and triggers a VC resize. | ||
2549 | * | ||
2550 | * Locking: | ||
2551 | * Driver dependant. The default do_resize method takes the | ||
2552 | * tty termios mutex and ctrl_lock. The console takes its own lock | ||
2553 | * then calls into the default method. | ||
2554 | */ | ||
2555 | |||
2556 | static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, | ||
2557 | struct winsize __user *arg) | ||
2558 | { | ||
2559 | struct winsize tmp_ws; | ||
2560 | if (copy_from_user(&tmp_ws, arg, sizeof(*arg))) | ||
2561 | return -EFAULT; | ||
2562 | |||
2563 | if (tty->ops->resize) | ||
2564 | return tty->ops->resize(tty, real_tty, &tmp_ws); | ||
2565 | else | ||
2566 | return tty_do_resize(tty, real_tty, &tmp_ws); | ||
2567 | } | ||
2568 | |||
2569 | /** | ||
2561 | * tioccons - allow admin to move logical console | 2570 | * tioccons - allow admin to move logical console |
2562 | * @file: the file to become console | 2571 | * @file: the file to become console |
2563 | * | 2572 | * |
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c index ea9fc5d03b99..bf34e4597421 100644 --- a/drivers/char/tty_ioctl.c +++ b/drivers/char/tty_ioctl.c | |||
@@ -937,12 +937,14 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file, | |||
937 | return 0; | 937 | return 0; |
938 | #endif | 938 | #endif |
939 | case TIOCGSOFTCAR: | 939 | case TIOCGSOFTCAR: |
940 | return put_user(C_CLOCAL(tty) ? 1 : 0, | 940 | /* FIXME: for correctness we may need to take the termios |
941 | lock here - review */ | ||
942 | return put_user(C_CLOCAL(real_tty) ? 1 : 0, | ||
941 | (int __user *)arg); | 943 | (int __user *)arg); |
942 | case TIOCSSOFTCAR: | 944 | case TIOCSSOFTCAR: |
943 | if (get_user(arg, (unsigned int __user *) arg)) | 945 | if (get_user(arg, (unsigned int __user *) arg)) |
944 | return -EFAULT; | 946 | return -EFAULT; |
945 | return tty_change_softcar(tty, arg); | 947 | return tty_change_softcar(real_tty, arg); |
946 | default: | 948 | default: |
947 | return -ENOIOCTLCMD; | 949 | return -ENOIOCTLCMD; |
948 | } | 950 | } |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 1bc00c9d860d..60359c360912 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -803,7 +803,25 @@ static inline int resize_screen(struct vc_data *vc, int width, int height, | |||
803 | */ | 803 | */ |
804 | #define VC_RESIZE_MAXCOL (32767) | 804 | #define VC_RESIZE_MAXCOL (32767) |
805 | #define VC_RESIZE_MAXROW (32767) | 805 | #define VC_RESIZE_MAXROW (32767) |
806 | int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines) | 806 | |
807 | /** | ||
808 | * vc_do_resize - resizing method for the tty | ||
809 | * @tty: tty being resized | ||
810 | * @real_tty: real tty (different to tty if a pty/tty pair) | ||
811 | * @vc: virtual console private data | ||
812 | * @cols: columns | ||
813 | * @lines: lines | ||
814 | * | ||
815 | * Resize a virtual console, clipping according to the actual constraints. | ||
816 | * If the caller passes a tty structure then update the termios winsize | ||
817 | * information and perform any neccessary signal handling. | ||
818 | * | ||
819 | * Caller must hold the console semaphore. Takes the termios mutex and | ||
820 | * ctrl_lock of the tty IFF a tty is passed. | ||
821 | */ | ||
822 | |||
823 | static int vc_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, | ||
824 | struct vc_data *vc, unsigned int cols, unsigned int lines) | ||
807 | { | 825 | { |
808 | unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0; | 826 | unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0; |
809 | unsigned int old_cols, old_rows, old_row_size, old_screen_size; | 827 | unsigned int old_cols, old_rows, old_row_size, old_screen_size; |
@@ -907,24 +925,15 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines) | |||
907 | gotoxy(vc, vc->vc_x, vc->vc_y); | 925 | gotoxy(vc, vc->vc_x, vc->vc_y); |
908 | save_cur(vc); | 926 | save_cur(vc); |
909 | 927 | ||
910 | if (vc->vc_tty) { | 928 | if (tty) { |
911 | struct winsize ws, *cws = &vc->vc_tty->winsize; | 929 | /* Rewrite the requested winsize data with the actual |
912 | struct pid *pgrp = NULL; | 930 | resulting sizes */ |
913 | 931 | struct winsize ws; | |
914 | memset(&ws, 0, sizeof(ws)); | 932 | memset(&ws, 0, sizeof(ws)); |
915 | ws.ws_row = vc->vc_rows; | 933 | ws.ws_row = vc->vc_rows; |
916 | ws.ws_col = vc->vc_cols; | 934 | ws.ws_col = vc->vc_cols; |
917 | ws.ws_ypixel = vc->vc_scan_lines; | 935 | ws.ws_ypixel = vc->vc_scan_lines; |
918 | 936 | tty_do_resize(tty, real_tty, &ws); | |
919 | spin_lock_irq(&vc->vc_tty->ctrl_lock); | ||
920 | if ((ws.ws_row != cws->ws_row || ws.ws_col != cws->ws_col)) | ||
921 | pgrp = get_pid(vc->vc_tty->pgrp); | ||
922 | spin_unlock_irq(&vc->vc_tty->ctrl_lock); | ||
923 | if (pgrp) { | ||
924 | kill_pgrp(vc->vc_tty->pgrp, SIGWINCH, 1); | ||
925 | put_pid(pgrp); | ||
926 | } | ||
927 | *cws = ws; | ||
928 | } | 937 | } |
929 | 938 | ||
930 | if (CON_IS_VISIBLE(vc)) | 939 | if (CON_IS_VISIBLE(vc)) |
@@ -932,14 +941,47 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines) | |||
932 | return err; | 941 | return err; |
933 | } | 942 | } |
934 | 943 | ||
935 | int vc_lock_resize(struct vc_data *vc, unsigned int cols, unsigned int lines) | 944 | /** |
945 | * vc_resize - resize a VT | ||
946 | * @vc: virtual console | ||
947 | * @cols: columns | ||
948 | * @rows: rows | ||
949 | * | ||
950 | * Resize a virtual console as seen from the console end of things. We | ||
951 | * use the common vc_do_resize methods to update the structures. The | ||
952 | * caller must hold the console sem to protect console internals and | ||
953 | * vc->vc_tty | ||
954 | */ | ||
955 | |||
956 | int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows) | ||
957 | { | ||
958 | return vc_do_resize(vc->vc_tty, vc->vc_tty, vc, cols, rows); | ||
959 | } | ||
960 | |||
961 | /** | ||
962 | * vt_resize - resize a VT | ||
963 | * @tty: tty to resize | ||
964 | * @real_tty: tty if a pty/tty pair | ||
965 | * @ws: winsize attributes | ||
966 | * | ||
967 | * Resize a virtual terminal. This is called by the tty layer as we | ||
968 | * register our own handler for resizing. The mutual helper does all | ||
969 | * the actual work. | ||
970 | * | ||
971 | * Takes the console sem and the called methods then take the tty | ||
972 | * termios_mutex and the tty ctrl_lock in that order. | ||
973 | */ | ||
974 | |||
975 | int vt_resize(struct tty_struct *tty, struct tty_struct *real_tty, | ||
976 | struct winsize *ws) | ||
936 | { | 977 | { |
937 | int rc; | 978 | struct vc_data *vc = tty->driver_data; |
979 | int ret; | ||
938 | 980 | ||
939 | acquire_console_sem(); | 981 | acquire_console_sem(); |
940 | rc = vc_resize(vc, cols, lines); | 982 | ret = vc_do_resize(tty, real_tty, vc, ws->ws_col, ws->ws_row); |
941 | release_console_sem(); | 983 | release_console_sem(); |
942 | return rc; | 984 | return ret; |
943 | } | 985 | } |
944 | 986 | ||
945 | void vc_deallocate(unsigned int currcons) | 987 | void vc_deallocate(unsigned int currcons) |
@@ -2907,6 +2949,7 @@ static const struct tty_operations con_ops = { | |||
2907 | .start = con_start, | 2949 | .start = con_start, |
2908 | .throttle = con_throttle, | 2950 | .throttle = con_throttle, |
2909 | .unthrottle = con_unthrottle, | 2951 | .unthrottle = con_unthrottle, |
2952 | .resize = vt_resize, | ||
2910 | }; | 2953 | }; |
2911 | 2954 | ||
2912 | int __init vty_init(void) | 2955 | int __init vty_init(void) |
@@ -4061,7 +4104,6 @@ EXPORT_SYMBOL(default_blu); | |||
4061 | EXPORT_SYMBOL(update_region); | 4104 | EXPORT_SYMBOL(update_region); |
4062 | EXPORT_SYMBOL(redraw_screen); | 4105 | EXPORT_SYMBOL(redraw_screen); |
4063 | EXPORT_SYMBOL(vc_resize); | 4106 | EXPORT_SYMBOL(vc_resize); |
4064 | EXPORT_SYMBOL(vc_lock_resize); | ||
4065 | EXPORT_SYMBOL(fg_console); | 4107 | EXPORT_SYMBOL(fg_console); |
4066 | EXPORT_SYMBOL(console_blank_hook); | 4108 | EXPORT_SYMBOL(console_blank_hook); |
4067 | EXPORT_SYMBOL(console_blanked); | 4109 | EXPORT_SYMBOL(console_blanked); |
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 3211afd9d57e..c904e9ad4a71 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c | |||
@@ -947,14 +947,16 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
947 | get_user(cc, &vtsizes->v_cols)) | 947 | get_user(cc, &vtsizes->v_cols)) |
948 | ret = -EFAULT; | 948 | ret = -EFAULT; |
949 | else { | 949 | else { |
950 | acquire_console_sem(); | ||
950 | for (i = 0; i < MAX_NR_CONSOLES; i++) { | 951 | for (i = 0; i < MAX_NR_CONSOLES; i++) { |
951 | vc = vc_cons[i].d; | 952 | vc = vc_cons[i].d; |
952 | 953 | ||
953 | if (vc) { | 954 | if (vc) { |
954 | vc->vc_resize_user = 1; | 955 | vc->vc_resize_user = 1; |
955 | vc_lock_resize(vc_cons[i].d, cc, ll); | 956 | vc_resize(vc_cons[i].d, cc, ll); |
956 | } | 957 | } |
957 | } | 958 | } |
959 | release_console_sem(); | ||
958 | } | 960 | } |
959 | break; | 961 | break; |
960 | } | 962 | } |
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.h b/drivers/char/xilinx_hwicap/buffer_icap.h index c5b1840906b2..8b0252bf06e2 100644 --- a/drivers/char/xilinx_hwicap/buffer_icap.h +++ b/drivers/char/xilinx_hwicap/buffer_icap.h | |||
@@ -38,7 +38,6 @@ | |||
38 | 38 | ||
39 | #include <linux/types.h> | 39 | #include <linux/types.h> |
40 | #include <linux/cdev.h> | 40 | #include <linux/cdev.h> |
41 | #include <linux/version.h> | ||
42 | #include <linux/platform_device.h> | 41 | #include <linux/platform_device.h> |
43 | 42 | ||
44 | #include <asm/io.h> | 43 | #include <asm/io.h> |
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.h b/drivers/char/xilinx_hwicap/fifo_icap.h index ffabd3ba2bd8..62bda453c90b 100644 --- a/drivers/char/xilinx_hwicap/fifo_icap.h +++ b/drivers/char/xilinx_hwicap/fifo_icap.h | |||
@@ -38,7 +38,6 @@ | |||
38 | 38 | ||
39 | #include <linux/types.h> | 39 | #include <linux/types.h> |
40 | #include <linux/cdev.h> | 40 | #include <linux/cdev.h> |
41 | #include <linux/version.h> | ||
42 | #include <linux/platform_device.h> | 41 | #include <linux/platform_device.h> |
43 | 42 | ||
44 | #include <asm/io.h> | 43 | #include <asm/io.h> |
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 8bfee5fb7223..278c9857bcf5 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c | |||
@@ -74,7 +74,6 @@ | |||
74 | * currently programmed in the FPGA. | 74 | * currently programmed in the FPGA. |
75 | */ | 75 | */ |
76 | 76 | ||
77 | #include <linux/version.h> | ||
78 | #include <linux/module.h> | 77 | #include <linux/module.h> |
79 | #include <linux/kernel.h> | 78 | #include <linux/kernel.h> |
80 | #include <linux/types.h> | 79 | #include <linux/types.h> |
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h index 1f9c8b082dbe..24d0d9b938fb 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h | |||
@@ -38,7 +38,6 @@ | |||
38 | 38 | ||
39 | #include <linux/types.h> | 39 | #include <linux/types.h> |
40 | #include <linux/cdev.h> | 40 | #include <linux/cdev.h> |
41 | #include <linux/version.h> | ||
42 | #include <linux/platform_device.h> | 41 | #include <linux/platform_device.h> |
43 | 42 | ||
44 | #include <asm/io.h> | 43 | #include <asm/io.h> |
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index ba7b9a6b17a1..a4bec3f919aa 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -67,10 +67,17 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | 67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); |
68 | struct ladder_device_state *last_state; | 68 | struct ladder_device_state *last_state; |
69 | int last_residency, last_idx = ldev->last_state_idx; | 69 | int last_residency, last_idx = ldev->last_state_idx; |
70 | int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); | ||
70 | 71 | ||
71 | if (unlikely(!ldev)) | 72 | if (unlikely(!ldev)) |
72 | return 0; | 73 | return 0; |
73 | 74 | ||
75 | /* Special case when user has set very strict latency requirement */ | ||
76 | if (unlikely(latency_req == 0)) { | ||
77 | ladder_do_selection(ldev, last_idx, 0); | ||
78 | return 0; | ||
79 | } | ||
80 | |||
74 | last_state = &ldev->states[last_idx]; | 81 | last_state = &ldev->states[last_idx]; |
75 | 82 | ||
76 | if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) | 83 | if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) |
@@ -81,8 +88,7 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
81 | /* consider promotion */ | 88 | /* consider promotion */ |
82 | if (last_idx < dev->state_count - 1 && | 89 | if (last_idx < dev->state_count - 1 && |
83 | last_residency > last_state->threshold.promotion_time && | 90 | last_residency > last_state->threshold.promotion_time && |
84 | dev->states[last_idx + 1].exit_latency <= | 91 | dev->states[last_idx + 1].exit_latency <= latency_req) { |
85 | pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { | ||
86 | last_state->stats.promotion_count++; | 92 | last_state->stats.promotion_count++; |
87 | last_state->stats.demotion_count = 0; | 93 | last_state->stats.demotion_count = 0; |
88 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { | 94 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { |
@@ -92,7 +98,19 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
92 | } | 98 | } |
93 | 99 | ||
94 | /* consider demotion */ | 100 | /* consider demotion */ |
95 | if (last_idx > 0 && | 101 | if (last_idx > CPUIDLE_DRIVER_STATE_START && |
102 | dev->states[last_idx].exit_latency > latency_req) { | ||
103 | int i; | ||
104 | |||
105 | for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { | ||
106 | if (dev->states[i].exit_latency <= latency_req) | ||
107 | break; | ||
108 | } | ||
109 | ladder_do_selection(ldev, last_idx, i); | ||
110 | return i; | ||
111 | } | ||
112 | |||
113 | if (last_idx > CPUIDLE_DRIVER_STATE_START && | ||
96 | last_residency < last_state->threshold.demotion_time) { | 114 | last_residency < last_state->threshold.demotion_time) { |
97 | last_state->stats.demotion_count++; | 115 | last_state->stats.demotion_count++; |
98 | last_state->stats.promotion_count = 0; | 116 | last_state->stats.promotion_count = 0; |
@@ -117,7 +135,7 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
117 | struct ladder_device_state *lstate; | 135 | struct ladder_device_state *lstate; |
118 | struct cpuidle_state *state; | 136 | struct cpuidle_state *state; |
119 | 137 | ||
120 | ldev->last_state_idx = 0; | 138 | ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; |
121 | 139 | ||
122 | for (i = 0; i < dev->state_count; i++) { | 140 | for (i = 0; i < dev->state_count; i++) { |
123 | state = &dev->states[i]; | 141 | state = &dev->states[i]; |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 78d77c5dc35c..8d7cf3f31450 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -34,21 +34,28 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices); | |||
34 | static int menu_select(struct cpuidle_device *dev) | 34 | static int menu_select(struct cpuidle_device *dev) |
35 | { | 35 | { |
36 | struct menu_device *data = &__get_cpu_var(menu_devices); | 36 | struct menu_device *data = &__get_cpu_var(menu_devices); |
37 | int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); | ||
37 | int i; | 38 | int i; |
38 | 39 | ||
40 | /* Special case when user has set very strict latency requirement */ | ||
41 | if (unlikely(latency_req == 0)) { | ||
42 | data->last_state_idx = 0; | ||
43 | return 0; | ||
44 | } | ||
45 | |||
39 | /* determine the expected residency time */ | 46 | /* determine the expected residency time */ |
40 | data->expected_us = | 47 | data->expected_us = |
41 | (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; | 48 | (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; |
42 | 49 | ||
43 | /* find the deepest idle state that satisfies our constraints */ | 50 | /* find the deepest idle state that satisfies our constraints */ |
44 | for (i = 1; i < dev->state_count; i++) { | 51 | for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) { |
45 | struct cpuidle_state *s = &dev->states[i]; | 52 | struct cpuidle_state *s = &dev->states[i]; |
46 | 53 | ||
47 | if (s->target_residency > data->expected_us) | 54 | if (s->target_residency > data->expected_us) |
48 | break; | 55 | break; |
49 | if (s->target_residency > data->predicted_us) | 56 | if (s->target_residency > data->predicted_us) |
50 | break; | 57 | break; |
51 | if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) | 58 | if (s->exit_latency > latency_req) |
52 | break; | 59 | break; |
53 | } | 60 | } |
54 | 61 | ||
@@ -67,9 +74,9 @@ static void menu_reflect(struct cpuidle_device *dev) | |||
67 | { | 74 | { |
68 | struct menu_device *data = &__get_cpu_var(menu_devices); | 75 | struct menu_device *data = &__get_cpu_var(menu_devices); |
69 | int last_idx = data->last_state_idx; | 76 | int last_idx = data->last_state_idx; |
70 | unsigned int measured_us = | 77 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); |
71 | cpuidle_get_last_residency(dev) + data->elapsed_us; | ||
72 | struct cpuidle_state *target = &dev->states[last_idx]; | 78 | struct cpuidle_state *target = &dev->states[last_idx]; |
79 | unsigned int measured_us; | ||
73 | 80 | ||
74 | /* | 81 | /* |
75 | * Ugh, this idle state doesn't support residency measurements, so we | 82 | * Ugh, this idle state doesn't support residency measurements, so we |
@@ -77,20 +84,27 @@ static void menu_reflect(struct cpuidle_device *dev) | |||
77 | * for one full standard timer tick. However, be aware that this | 84 | * for one full standard timer tick. However, be aware that this |
78 | * could potentially result in a suboptimal state transition. | 85 | * could potentially result in a suboptimal state transition. |
79 | */ | 86 | */ |
80 | if (!(target->flags & CPUIDLE_FLAG_TIME_VALID)) | 87 | if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) |
81 | measured_us = USEC_PER_SEC / HZ; | 88 | last_idle_us = USEC_PER_SEC / HZ; |
89 | |||
90 | /* | ||
91 | * measured_us and elapsed_us are the cumulative idle time, since the | ||
92 | * last time we were woken out of idle by an interrupt. | ||
93 | */ | ||
94 | if (data->elapsed_us <= data->elapsed_us + last_idle_us) | ||
95 | measured_us = data->elapsed_us + last_idle_us; | ||
96 | else | ||
97 | measured_us = -1; | ||
98 | |||
99 | /* Predict time until next break event */ | ||
100 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
82 | 101 | ||
83 | /* Predict time remaining until next break event */ | 102 | if (last_idle_us + BREAK_FUZZ < |
84 | if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) { | 103 | data->expected_us - target->exit_latency) { |
85 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
86 | data->last_measured_us = measured_us; | 104 | data->last_measured_us = measured_us; |
87 | data->elapsed_us = 0; | 105 | data->elapsed_us = 0; |
88 | } else { | 106 | } else { |
89 | if (data->elapsed_us < data->elapsed_us + measured_us) | 107 | data->elapsed_us = measured_us; |
90 | data->elapsed_us = measured_us; | ||
91 | else | ||
92 | data->elapsed_us = -1; | ||
93 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
94 | } | 108 | } |
95 | } | 109 | } |
96 | 110 | ||
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 31a0e0b455b6..97b003839fb6 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
@@ -21,8 +21,8 @@ static int __init cpuidle_sysfs_setup(char *unused) | |||
21 | } | 21 | } |
22 | __setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup); | 22 | __setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup); |
23 | 23 | ||
24 | static ssize_t show_available_governors(struct sys_device *dev, | 24 | static ssize_t show_available_governors(struct sysdev_class *class, |
25 | struct sysdev_attribute *attr, char *buf) | 25 | char *buf) |
26 | { | 26 | { |
27 | ssize_t i = 0; | 27 | ssize_t i = 0; |
28 | struct cpuidle_governor *tmp; | 28 | struct cpuidle_governor *tmp; |
@@ -40,8 +40,8 @@ out: | |||
40 | return i; | 40 | return i; |
41 | } | 41 | } |
42 | 42 | ||
43 | static ssize_t show_current_driver(struct sys_device *dev, | 43 | static ssize_t show_current_driver(struct sysdev_class *class, |
44 | struct sysdev_attribute *attr, char *buf) | 44 | char *buf) |
45 | { | 45 | { |
46 | ssize_t ret; | 46 | ssize_t ret; |
47 | 47 | ||
@@ -55,8 +55,8 @@ static ssize_t show_current_driver(struct sys_device *dev, | |||
55 | return ret; | 55 | return ret; |
56 | } | 56 | } |
57 | 57 | ||
58 | static ssize_t show_current_governor(struct sys_device *dev, | 58 | static ssize_t show_current_governor(struct sysdev_class *class, |
59 | struct sysdev_attribute *attr, char *buf) | 59 | char *buf) |
60 | { | 60 | { |
61 | ssize_t ret; | 61 | ssize_t ret; |
62 | 62 | ||
@@ -70,9 +70,8 @@ static ssize_t show_current_governor(struct sys_device *dev, | |||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
73 | static ssize_t store_current_governor(struct sys_device *dev, | 73 | static ssize_t store_current_governor(struct sysdev_class *class, |
74 | struct sysdev_attribute *attr, | 74 | const char *buf, size_t count) |
75 | const char *buf, size_t count) | ||
76 | { | 75 | { |
77 | char gov_name[CPUIDLE_NAME_LEN]; | 76 | char gov_name[CPUIDLE_NAME_LEN]; |
78 | int ret = -EINVAL; | 77 | int ret = -EINVAL; |
@@ -104,8 +103,9 @@ static ssize_t store_current_governor(struct sys_device *dev, | |||
104 | return count; | 103 | return count; |
105 | } | 104 | } |
106 | 105 | ||
107 | static SYSDEV_ATTR(current_driver, 0444, show_current_driver, NULL); | 106 | static SYSDEV_CLASS_ATTR(current_driver, 0444, show_current_driver, NULL); |
108 | static SYSDEV_ATTR(current_governor_ro, 0444, show_current_governor, NULL); | 107 | static SYSDEV_CLASS_ATTR(current_governor_ro, 0444, show_current_governor, |
108 | NULL); | ||
109 | 109 | ||
110 | static struct attribute *cpuclass_default_attrs[] = { | 110 | static struct attribute *cpuclass_default_attrs[] = { |
111 | &attr_current_driver.attr, | 111 | &attr_current_driver.attr, |
@@ -113,9 +113,10 @@ static struct attribute *cpuclass_default_attrs[] = { | |||
113 | NULL | 113 | NULL |
114 | }; | 114 | }; |
115 | 115 | ||
116 | static SYSDEV_ATTR(available_governors, 0444, show_available_governors, NULL); | 116 | static SYSDEV_CLASS_ATTR(available_governors, 0444, show_available_governors, |
117 | static SYSDEV_ATTR(current_governor, 0644, show_current_governor, | 117 | NULL); |
118 | store_current_governor); | 118 | static SYSDEV_CLASS_ATTR(current_governor, 0644, show_current_governor, |
119 | store_current_governor); | ||
119 | 120 | ||
120 | static struct attribute *cpuclass_switch_attrs[] = { | 121 | static struct attribute *cpuclass_switch_attrs[] = { |
121 | &attr_available_governors.attr, | 122 | &attr_available_governors.attr, |
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 54a2a166e566..bf2917d197a0 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <asm/byteorder.h> | 18 | #include <asm/byteorder.h> |
19 | #include <asm/i387.h> | ||
19 | #include "padlock.h" | 20 | #include "padlock.h" |
20 | 21 | ||
21 | /* Control word. */ | 22 | /* Control word. */ |
@@ -141,6 +142,12 @@ static inline void padlock_reset_key(void) | |||
141 | asm volatile ("pushfl; popfl"); | 142 | asm volatile ("pushfl; popfl"); |
142 | } | 143 | } |
143 | 144 | ||
145 | /* | ||
146 | * While the padlock instructions don't use FP/SSE registers, they | ||
147 | * generate a spurious DNA fault when cr0.ts is '1'. These instructions | ||
148 | * should be used only inside the irq_ts_save/restore() context | ||
149 | */ | ||
150 | |||
144 | static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, | 151 | static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, |
145 | void *control_word) | 152 | void *control_word) |
146 | { | 153 | { |
@@ -205,15 +212,23 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, | |||
205 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | 212 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
206 | { | 213 | { |
207 | struct aes_ctx *ctx = aes_ctx(tfm); | 214 | struct aes_ctx *ctx = aes_ctx(tfm); |
215 | int ts_state; | ||
208 | padlock_reset_key(); | 216 | padlock_reset_key(); |
217 | |||
218 | ts_state = irq_ts_save(); | ||
209 | aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); | 219 | aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); |
220 | irq_ts_restore(ts_state); | ||
210 | } | 221 | } |
211 | 222 | ||
212 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | 223 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
213 | { | 224 | { |
214 | struct aes_ctx *ctx = aes_ctx(tfm); | 225 | struct aes_ctx *ctx = aes_ctx(tfm); |
226 | int ts_state; | ||
215 | padlock_reset_key(); | 227 | padlock_reset_key(); |
228 | |||
229 | ts_state = irq_ts_save(); | ||
216 | aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); | 230 | aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); |
231 | irq_ts_restore(ts_state); | ||
217 | } | 232 | } |
218 | 233 | ||
219 | static struct crypto_alg aes_alg = { | 234 | static struct crypto_alg aes_alg = { |
@@ -244,12 +259,14 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, | |||
244 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | 259 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
245 | struct blkcipher_walk walk; | 260 | struct blkcipher_walk walk; |
246 | int err; | 261 | int err; |
262 | int ts_state; | ||
247 | 263 | ||
248 | padlock_reset_key(); | 264 | padlock_reset_key(); |
249 | 265 | ||
250 | blkcipher_walk_init(&walk, dst, src, nbytes); | 266 | blkcipher_walk_init(&walk, dst, src, nbytes); |
251 | err = blkcipher_walk_virt(desc, &walk); | 267 | err = blkcipher_walk_virt(desc, &walk); |
252 | 268 | ||
269 | ts_state = irq_ts_save(); | ||
253 | while ((nbytes = walk.nbytes)) { | 270 | while ((nbytes = walk.nbytes)) { |
254 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | 271 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, |
255 | ctx->E, &ctx->cword.encrypt, | 272 | ctx->E, &ctx->cword.encrypt, |
@@ -257,6 +274,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, | |||
257 | nbytes &= AES_BLOCK_SIZE - 1; | 274 | nbytes &= AES_BLOCK_SIZE - 1; |
258 | err = blkcipher_walk_done(desc, &walk, nbytes); | 275 | err = blkcipher_walk_done(desc, &walk, nbytes); |
259 | } | 276 | } |
277 | irq_ts_restore(ts_state); | ||
260 | 278 | ||
261 | return err; | 279 | return err; |
262 | } | 280 | } |
@@ -268,12 +286,14 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, | |||
268 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | 286 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
269 | struct blkcipher_walk walk; | 287 | struct blkcipher_walk walk; |
270 | int err; | 288 | int err; |
289 | int ts_state; | ||
271 | 290 | ||
272 | padlock_reset_key(); | 291 | padlock_reset_key(); |
273 | 292 | ||
274 | blkcipher_walk_init(&walk, dst, src, nbytes); | 293 | blkcipher_walk_init(&walk, dst, src, nbytes); |
275 | err = blkcipher_walk_virt(desc, &walk); | 294 | err = blkcipher_walk_virt(desc, &walk); |
276 | 295 | ||
296 | ts_state = irq_ts_save(); | ||
277 | while ((nbytes = walk.nbytes)) { | 297 | while ((nbytes = walk.nbytes)) { |
278 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | 298 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, |
279 | ctx->D, &ctx->cword.decrypt, | 299 | ctx->D, &ctx->cword.decrypt, |
@@ -281,7 +301,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, | |||
281 | nbytes &= AES_BLOCK_SIZE - 1; | 301 | nbytes &= AES_BLOCK_SIZE - 1; |
282 | err = blkcipher_walk_done(desc, &walk, nbytes); | 302 | err = blkcipher_walk_done(desc, &walk, nbytes); |
283 | } | 303 | } |
284 | 304 | irq_ts_restore(ts_state); | |
285 | return err; | 305 | return err; |
286 | } | 306 | } |
287 | 307 | ||
@@ -314,12 +334,14 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, | |||
314 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | 334 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
315 | struct blkcipher_walk walk; | 335 | struct blkcipher_walk walk; |
316 | int err; | 336 | int err; |
337 | int ts_state; | ||
317 | 338 | ||
318 | padlock_reset_key(); | 339 | padlock_reset_key(); |
319 | 340 | ||
320 | blkcipher_walk_init(&walk, dst, src, nbytes); | 341 | blkcipher_walk_init(&walk, dst, src, nbytes); |
321 | err = blkcipher_walk_virt(desc, &walk); | 342 | err = blkcipher_walk_virt(desc, &walk); |
322 | 343 | ||
344 | ts_state = irq_ts_save(); | ||
323 | while ((nbytes = walk.nbytes)) { | 345 | while ((nbytes = walk.nbytes)) { |
324 | u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, | 346 | u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, |
325 | walk.dst.virt.addr, ctx->E, | 347 | walk.dst.virt.addr, ctx->E, |
@@ -329,6 +351,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, | |||
329 | nbytes &= AES_BLOCK_SIZE - 1; | 351 | nbytes &= AES_BLOCK_SIZE - 1; |
330 | err = blkcipher_walk_done(desc, &walk, nbytes); | 352 | err = blkcipher_walk_done(desc, &walk, nbytes); |
331 | } | 353 | } |
354 | irq_ts_restore(ts_state); | ||
332 | 355 | ||
333 | return err; | 356 | return err; |
334 | } | 357 | } |
@@ -340,12 +363,14 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, | |||
340 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | 363 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
341 | struct blkcipher_walk walk; | 364 | struct blkcipher_walk walk; |
342 | int err; | 365 | int err; |
366 | int ts_state; | ||
343 | 367 | ||
344 | padlock_reset_key(); | 368 | padlock_reset_key(); |
345 | 369 | ||
346 | blkcipher_walk_init(&walk, dst, src, nbytes); | 370 | blkcipher_walk_init(&walk, dst, src, nbytes); |
347 | err = blkcipher_walk_virt(desc, &walk); | 371 | err = blkcipher_walk_virt(desc, &walk); |
348 | 372 | ||
373 | ts_state = irq_ts_save(); | ||
349 | while ((nbytes = walk.nbytes)) { | 374 | while ((nbytes = walk.nbytes)) { |
350 | padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, | 375 | padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, |
351 | ctx->D, walk.iv, &ctx->cword.decrypt, | 376 | ctx->D, walk.iv, &ctx->cword.decrypt, |
@@ -354,6 +379,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, | |||
354 | err = blkcipher_walk_done(desc, &walk, nbytes); | 379 | err = blkcipher_walk_done(desc, &walk, nbytes); |
355 | } | 380 | } |
356 | 381 | ||
382 | irq_ts_restore(ts_state); | ||
357 | return err; | 383 | return err; |
358 | } | 384 | } |
359 | 385 | ||
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 40d5680fa013..a7fbadebf623 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/scatterlist.h> | 24 | #include <linux/scatterlist.h> |
25 | #include <asm/i387.h> | ||
25 | #include "padlock.h" | 26 | #include "padlock.h" |
26 | 27 | ||
27 | #define SHA1_DEFAULT_FALLBACK "sha1-generic" | 28 | #define SHA1_DEFAULT_FALLBACK "sha1-generic" |
@@ -102,6 +103,7 @@ static void padlock_do_sha1(const char *in, char *out, int count) | |||
102 | * PadLock microcode needs it that big. */ | 103 | * PadLock microcode needs it that big. */ |
103 | char buf[128+16]; | 104 | char buf[128+16]; |
104 | char *result = NEAREST_ALIGNED(buf); | 105 | char *result = NEAREST_ALIGNED(buf); |
106 | int ts_state; | ||
105 | 107 | ||
106 | ((uint32_t *)result)[0] = SHA1_H0; | 108 | ((uint32_t *)result)[0] = SHA1_H0; |
107 | ((uint32_t *)result)[1] = SHA1_H1; | 109 | ((uint32_t *)result)[1] = SHA1_H1; |
@@ -109,9 +111,12 @@ static void padlock_do_sha1(const char *in, char *out, int count) | |||
109 | ((uint32_t *)result)[3] = SHA1_H3; | 111 | ((uint32_t *)result)[3] = SHA1_H3; |
110 | ((uint32_t *)result)[4] = SHA1_H4; | 112 | ((uint32_t *)result)[4] = SHA1_H4; |
111 | 113 | ||
114 | /* prevent taking the spurious DNA fault with padlock. */ | ||
115 | ts_state = irq_ts_save(); | ||
112 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ | 116 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ |
113 | : "+S"(in), "+D"(result) | 117 | : "+S"(in), "+D"(result) |
114 | : "c"(count), "a"(0)); | 118 | : "c"(count), "a"(0)); |
119 | irq_ts_restore(ts_state); | ||
115 | 120 | ||
116 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); | 121 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); |
117 | } | 122 | } |
@@ -123,6 +128,7 @@ static void padlock_do_sha256(const char *in, char *out, int count) | |||
123 | * PadLock microcode needs it that big. */ | 128 | * PadLock microcode needs it that big. */ |
124 | char buf[128+16]; | 129 | char buf[128+16]; |
125 | char *result = NEAREST_ALIGNED(buf); | 130 | char *result = NEAREST_ALIGNED(buf); |
131 | int ts_state; | ||
126 | 132 | ||
127 | ((uint32_t *)result)[0] = SHA256_H0; | 133 | ((uint32_t *)result)[0] = SHA256_H0; |
128 | ((uint32_t *)result)[1] = SHA256_H1; | 134 | ((uint32_t *)result)[1] = SHA256_H1; |
@@ -133,9 +139,12 @@ static void padlock_do_sha256(const char *in, char *out, int count) | |||
133 | ((uint32_t *)result)[6] = SHA256_H6; | 139 | ((uint32_t *)result)[6] = SHA256_H6; |
134 | ((uint32_t *)result)[7] = SHA256_H7; | 140 | ((uint32_t *)result)[7] = SHA256_H7; |
135 | 141 | ||
142 | /* prevent taking the spurious DNA fault with padlock. */ | ||
143 | ts_state = irq_ts_save(); | ||
136 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ | 144 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ |
137 | : "+S"(in), "+D"(result) | 145 | : "+S"(in), "+D"(result) |
138 | : "c"(count), "a"(0)); | 146 | : "c"(count), "a"(0)); |
147 | irq_ts_restore(ts_state); | ||
139 | 148 | ||
140 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); | 149 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); |
141 | } | 150 | } |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 681c15f42083..ee827a7f7c6a 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -96,6 +96,9 @@ struct talitos_private { | |||
96 | unsigned int exec_units; | 96 | unsigned int exec_units; |
97 | unsigned int desc_types; | 97 | unsigned int desc_types; |
98 | 98 | ||
99 | /* SEC Compatibility info */ | ||
100 | unsigned long features; | ||
101 | |||
99 | /* next channel to be assigned next incoming descriptor */ | 102 | /* next channel to be assigned next incoming descriptor */ |
100 | atomic_t last_chan; | 103 | atomic_t last_chan; |
101 | 104 | ||
@@ -133,6 +136,9 @@ struct talitos_private { | |||
133 | struct hwrng rng; | 136 | struct hwrng rng; |
134 | }; | 137 | }; |
135 | 138 | ||
139 | /* .features flag */ | ||
140 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 | ||
141 | |||
136 | /* | 142 | /* |
137 | * map virtual single (contiguous) pointer to h/w descriptor pointer | 143 | * map virtual single (contiguous) pointer to h/w descriptor pointer |
138 | */ | 144 | */ |
@@ -785,7 +791,7 @@ static void ipsec_esp_encrypt_done(struct device *dev, | |||
785 | /* copy the generated ICV to dst */ | 791 | /* copy the generated ICV to dst */ |
786 | if (edesc->dma_len) { | 792 | if (edesc->dma_len) { |
787 | icvdata = &edesc->link_tbl[edesc->src_nents + | 793 | icvdata = &edesc->link_tbl[edesc->src_nents + |
788 | edesc->dst_nents + 1]; | 794 | edesc->dst_nents + 2]; |
789 | sg = sg_last(areq->dst, edesc->dst_nents); | 795 | sg = sg_last(areq->dst, edesc->dst_nents); |
790 | memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, | 796 | memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, |
791 | icvdata, ctx->authsize); | 797 | icvdata, ctx->authsize); |
@@ -814,7 +820,7 @@ static void ipsec_esp_decrypt_done(struct device *dev, | |||
814 | /* auth check */ | 820 | /* auth check */ |
815 | if (edesc->dma_len) | 821 | if (edesc->dma_len) |
816 | icvdata = &edesc->link_tbl[edesc->src_nents + | 822 | icvdata = &edesc->link_tbl[edesc->src_nents + |
817 | edesc->dst_nents + 1]; | 823 | edesc->dst_nents + 2]; |
818 | else | 824 | else |
819 | icvdata = &edesc->link_tbl[0]; | 825 | icvdata = &edesc->link_tbl[0]; |
820 | 826 | ||
@@ -921,10 +927,30 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
921 | sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, | 927 | sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, |
922 | &edesc->link_tbl[0]); | 928 | &edesc->link_tbl[0]); |
923 | if (sg_count > 1) { | 929 | if (sg_count > 1) { |
930 | struct talitos_ptr *link_tbl_ptr = | ||
931 | &edesc->link_tbl[sg_count-1]; | ||
932 | struct scatterlist *sg; | ||
933 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
934 | |||
924 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | 935 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; |
925 | desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); | 936 | desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); |
926 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 937 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, |
927 | edesc->dma_len, DMA_BIDIRECTIONAL); | 938 | edesc->dma_len, DMA_BIDIRECTIONAL); |
939 | /* If necessary for this SEC revision, | ||
940 | * add a link table entry for ICV. | ||
941 | */ | ||
942 | if ((priv->features & | ||
943 | TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT) && | ||
944 | (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) { | ||
945 | link_tbl_ptr->j_extent = 0; | ||
946 | link_tbl_ptr++; | ||
947 | link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; | ||
948 | link_tbl_ptr->len = cpu_to_be16(authsize); | ||
949 | sg = sg_last(areq->src, edesc->src_nents ? : 1); | ||
950 | link_tbl_ptr->ptr = cpu_to_be32( | ||
951 | (char *)sg_dma_address(sg) | ||
952 | + sg->length - authsize); | ||
953 | } | ||
928 | } else { | 954 | } else { |
929 | /* Only one segment now, so no link tbl needed */ | 955 | /* Only one segment now, so no link tbl needed */ |
930 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 956 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); |
@@ -944,12 +970,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
944 | desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); | 970 | desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); |
945 | } else { | 971 | } else { |
946 | struct talitos_ptr *link_tbl_ptr = | 972 | struct talitos_ptr *link_tbl_ptr = |
947 | &edesc->link_tbl[edesc->src_nents]; | 973 | &edesc->link_tbl[edesc->src_nents + 1]; |
948 | struct scatterlist *sg; | ||
949 | 974 | ||
950 | desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) | 975 | desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) |
951 | edesc->dma_link_tbl + | 976 | edesc->dma_link_tbl + |
952 | edesc->src_nents); | 977 | edesc->src_nents + 1); |
953 | if (areq->src == areq->dst) { | 978 | if (areq->src == areq->dst) { |
954 | memcpy(link_tbl_ptr, &edesc->link_tbl[0], | 979 | memcpy(link_tbl_ptr, &edesc->link_tbl[0], |
955 | edesc->src_nents * sizeof(struct talitos_ptr)); | 980 | edesc->src_nents * sizeof(struct talitos_ptr)); |
@@ -957,14 +982,10 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
957 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | 982 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, |
958 | link_tbl_ptr); | 983 | link_tbl_ptr); |
959 | } | 984 | } |
985 | /* Add an entry to the link table for ICV data */ | ||
960 | link_tbl_ptr += sg_count - 1; | 986 | link_tbl_ptr += sg_count - 1; |
961 | |||
962 | /* handle case where sg_last contains the ICV exclusively */ | ||
963 | sg = sg_last(areq->dst, edesc->dst_nents); | ||
964 | if (sg->length == ctx->authsize) | ||
965 | link_tbl_ptr--; | ||
966 | |||
967 | link_tbl_ptr->j_extent = 0; | 987 | link_tbl_ptr->j_extent = 0; |
988 | sg_count++; | ||
968 | link_tbl_ptr++; | 989 | link_tbl_ptr++; |
969 | link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; | 990 | link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; |
970 | link_tbl_ptr->len = cpu_to_be16(authsize); | 991 | link_tbl_ptr->len = cpu_to_be16(authsize); |
@@ -973,7 +994,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
973 | link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) | 994 | link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) |
974 | edesc->dma_link_tbl + | 995 | edesc->dma_link_tbl + |
975 | edesc->src_nents + | 996 | edesc->src_nents + |
976 | edesc->dst_nents + 1); | 997 | edesc->dst_nents + 2); |
977 | 998 | ||
978 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; | 999 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; |
979 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 1000 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, |
@@ -1040,12 +1061,12 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | |||
1040 | 1061 | ||
1041 | /* | 1062 | /* |
1042 | * allocate space for base edesc plus the link tables, | 1063 | * allocate space for base edesc plus the link tables, |
1043 | * allowing for a separate entry for the generated ICV (+ 1), | 1064 | * allowing for two separate entries for ICV and generated ICV (+ 2), |
1044 | * and the ICV data itself | 1065 | * and the ICV data itself |
1045 | */ | 1066 | */ |
1046 | alloc_len = sizeof(struct ipsec_esp_edesc); | 1067 | alloc_len = sizeof(struct ipsec_esp_edesc); |
1047 | if (src_nents || dst_nents) { | 1068 | if (src_nents || dst_nents) { |
1048 | dma_len = (src_nents + dst_nents + 1) * | 1069 | dma_len = (src_nents + dst_nents + 2) * |
1049 | sizeof(struct talitos_ptr) + ctx->authsize; | 1070 | sizeof(struct talitos_ptr) + ctx->authsize; |
1050 | alloc_len += dma_len; | 1071 | alloc_len += dma_len; |
1051 | } else { | 1072 | } else { |
@@ -1104,7 +1125,7 @@ static int aead_authenc_decrypt(struct aead_request *req) | |||
1104 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ | 1125 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ |
1105 | if (edesc->dma_len) | 1126 | if (edesc->dma_len) |
1106 | icvdata = &edesc->link_tbl[edesc->src_nents + | 1127 | icvdata = &edesc->link_tbl[edesc->src_nents + |
1107 | edesc->dst_nents + 1]; | 1128 | edesc->dst_nents + 2]; |
1108 | else | 1129 | else |
1109 | icvdata = &edesc->link_tbl[0]; | 1130 | icvdata = &edesc->link_tbl[0]; |
1110 | 1131 | ||
@@ -1480,6 +1501,9 @@ static int talitos_probe(struct of_device *ofdev, | |||
1480 | goto err_out; | 1501 | goto err_out; |
1481 | } | 1502 | } |
1482 | 1503 | ||
1504 | if (of_device_is_compatible(np, "fsl,sec3.0")) | ||
1505 | priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; | ||
1506 | |||
1483 | priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, | 1507 | priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, |
1484 | GFP_KERNEL); | 1508 | GFP_KERNEL); |
1485 | priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, | 1509 | priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index a4e4494663bf..0328da020a10 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/memory.h> | 27 | #include <linux/memory.h> |
28 | #include <asm/plat-orion/mv_xor.h> | 28 | #include <plat/mv_xor.h> |
29 | #include "mv_xor.h" | 29 | #include "mv_xor.h" |
30 | 30 | ||
31 | static void mv_xor_issue_pending(struct dma_chan *chan); | 31 | static void mv_xor_issue_pending(struct dma_chan *chan); |
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index b27b13c5eb5a..4b55ec607a88 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/platform_device.h> | 34 | #include <linux/platform_device.h> |
35 | #include <linux/sysdev.h> | 35 | #include <linux/sysdev.h> |
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | #include <linux/version.h> | ||
38 | 37 | ||
39 | #define EDAC_MC_LABEL_LEN 31 | 38 | #define EDAC_MC_LABEL_LEN 31 |
40 | #define EDAC_DEVICE_NAME_LEN 31 | 39 | #define EDAC_DEVICE_NAME_LEN 31 |
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index fa6d6abefd4d..450902438208 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig | |||
@@ -12,8 +12,8 @@ config FIREWIRE | |||
12 | This is the "Juju" FireWire stack, a new alternative implementation | 12 | This is the "Juju" FireWire stack, a new alternative implementation |
13 | designed for robustness and simplicity. You can build either this | 13 | designed for robustness and simplicity. You can build either this |
14 | stack, or the old stack (the ieee1394 driver, ohci1394 etc.) or both. | 14 | stack, or the old stack (the ieee1394 driver, ohci1394 etc.) or both. |
15 | Please read http://wiki.linux1394.org/JujuMigration before you | 15 | Please read http://ieee1394.wiki.kernel.org/index.php/Juju_Migration |
16 | enable the new stack. | 16 | before you enable the new stack. |
17 | 17 | ||
18 | To compile this driver as a module, say M here: the module will be | 18 | To compile this driver as a module, say M here: the module will be |
19 | called firewire-core. | 19 | called firewire-core. |
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 001622eb86f9..3bf8ee120d42 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c | |||
@@ -84,20 +84,23 @@ static struct kobj_type memmap_ktype = { | |||
84 | */ | 84 | */ |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * Firmware memory map entries | 87 | * Firmware memory map entries. No locking is needed because the |
88 | * firmware_map_add() and firmware_map_add_early() functions are called | ||
89 | * in firmware initialisation code in one single thread of execution. | ||
88 | */ | 90 | */ |
89 | static LIST_HEAD(map_entries); | 91 | static LIST_HEAD(map_entries); |
90 | 92 | ||
91 | /** | 93 | /** |
92 | * Common implementation of firmware_map_add() and firmware_map_add_early() | 94 | * firmware_map_add_entry() - Does the real work to add a firmware memmap entry. |
93 | * which expects a pre-allocated struct firmware_map_entry. | ||
94 | * | ||
95 | * @start: Start of the memory range. | 95 | * @start: Start of the memory range. |
96 | * @end: End of the memory range (inclusive). | 96 | * @end: End of the memory range (inclusive). |
97 | * @type: Type of the memory range. | 97 | * @type: Type of the memory range. |
98 | * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised | 98 | * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised |
99 | * entry. | 99 | * entry. |
100 | */ | 100 | * |
101 | * Common implementation of firmware_map_add() and firmware_map_add_early() | ||
102 | * which expects a pre-allocated struct firmware_map_entry. | ||
103 | **/ | ||
101 | static int firmware_map_add_entry(resource_size_t start, resource_size_t end, | 104 | static int firmware_map_add_entry(resource_size_t start, resource_size_t end, |
102 | const char *type, | 105 | const char *type, |
103 | struct firmware_map_entry *entry) | 106 | struct firmware_map_entry *entry) |
@@ -115,33 +118,52 @@ static int firmware_map_add_entry(resource_size_t start, resource_size_t end, | |||
115 | return 0; | 118 | return 0; |
116 | } | 119 | } |
117 | 120 | ||
118 | /* | 121 | /** |
119 | * See <linux/firmware-map.h> for documentation. | 122 | * firmware_map_add() - Adds a firmware mapping entry. |
120 | */ | 123 | * @start: Start of the memory range. |
124 | * @end: End of the memory range (inclusive). | ||
125 | * @type: Type of the memory range. | ||
126 | * | ||
127 | * This function uses kmalloc() for memory | ||
128 | * allocation. Use firmware_map_add_early() if you want to use the bootmem | ||
129 | * allocator. | ||
130 | * | ||
131 | * That function must be called before late_initcall. | ||
132 | * | ||
133 | * Returns 0 on success, or -ENOMEM if no memory could be allocated. | ||
134 | **/ | ||
121 | int firmware_map_add(resource_size_t start, resource_size_t end, | 135 | int firmware_map_add(resource_size_t start, resource_size_t end, |
122 | const char *type) | 136 | const char *type) |
123 | { | 137 | { |
124 | struct firmware_map_entry *entry; | 138 | struct firmware_map_entry *entry; |
125 | 139 | ||
126 | entry = kmalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC); | 140 | entry = kmalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC); |
127 | WARN_ON(!entry); | ||
128 | if (!entry) | 141 | if (!entry) |
129 | return -ENOMEM; | 142 | return -ENOMEM; |
130 | 143 | ||
131 | return firmware_map_add_entry(start, end, type, entry); | 144 | return firmware_map_add_entry(start, end, type, entry); |
132 | } | 145 | } |
133 | 146 | ||
134 | /* | 147 | /** |
135 | * See <linux/firmware-map.h> for documentation. | 148 | * firmware_map_add_early() - Adds a firmware mapping entry. |
136 | */ | 149 | * @start: Start of the memory range. |
150 | * @end: End of the memory range (inclusive). | ||
151 | * @type: Type of the memory range. | ||
152 | * | ||
153 | * Adds a firmware mapping entry. This function uses the bootmem allocator | ||
154 | * for memory allocation. Use firmware_map_add() if you want to use kmalloc(). | ||
155 | * | ||
156 | * That function must be called before late_initcall. | ||
157 | * | ||
158 | * Returns 0 on success, or -ENOMEM if no memory could be allocated. | ||
159 | **/ | ||
137 | int __init firmware_map_add_early(resource_size_t start, resource_size_t end, | 160 | int __init firmware_map_add_early(resource_size_t start, resource_size_t end, |
138 | const char *type) | 161 | const char *type) |
139 | { | 162 | { |
140 | struct firmware_map_entry *entry; | 163 | struct firmware_map_entry *entry; |
141 | 164 | ||
142 | entry = alloc_bootmem_low(sizeof(struct firmware_map_entry)); | 165 | entry = alloc_bootmem_low(sizeof(struct firmware_map_entry)); |
143 | WARN_ON(!entry); | 166 | if (WARN_ON(!entry)) |
144 | if (!entry) | ||
145 | return -ENOMEM; | 167 | return -ENOMEM; |
146 | 168 | ||
147 | return firmware_map_add_entry(start, end, type, entry); | 169 | return firmware_map_add_entry(start, end, type, entry); |
@@ -183,7 +205,10 @@ static ssize_t memmap_attr_show(struct kobject *kobj, | |||
183 | /* | 205 | /* |
184 | * Initialises stuff and adds the entries in the map_entries list to | 206 | * Initialises stuff and adds the entries in the map_entries list to |
185 | * sysfs. Important is that firmware_map_add() and firmware_map_add_early() | 207 | * sysfs. Important is that firmware_map_add() and firmware_map_add_early() |
186 | * must be called before late_initcall. | 208 | * must be called before late_initcall. That's just because that function |
209 | * is called as late_initcall() function, which means that if you call | ||
210 | * firmware_map_add() or firmware_map_add_early() afterwards, the entries | ||
211 | * are not added to sysfs. | ||
187 | */ | 212 | */ |
188 | static int __init memmap_init(void) | 213 | static int __init memmap_init(void) |
189 | { | 214 | { |
@@ -192,13 +217,13 @@ static int __init memmap_init(void) | |||
192 | struct kset *memmap_kset; | 217 | struct kset *memmap_kset; |
193 | 218 | ||
194 | memmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj); | 219 | memmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj); |
195 | WARN_ON(!memmap_kset); | 220 | if (WARN_ON(!memmap_kset)) |
196 | if (!memmap_kset) | ||
197 | return -ENOMEM; | 221 | return -ENOMEM; |
198 | 222 | ||
199 | list_for_each_entry(entry, &map_entries, list) { | 223 | list_for_each_entry(entry, &map_entries, list) { |
200 | entry->kobj.kset = memmap_kset; | 224 | entry->kobj.kset = memmap_kset; |
201 | kobject_add(&entry->kobj, NULL, "%d", i++); | 225 | if (kobject_add(&entry->kobj, NULL, "%d", i++)) |
226 | kobject_put(&entry->kobj); | ||
202 | } | 227 | } |
203 | 228 | ||
204 | return 0; | 229 | return 0; |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 089c015c01d1..53f0e5af1cc8 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -400,27 +400,31 @@ static void drm_locked_tasklet_func(unsigned long data) | |||
400 | { | 400 | { |
401 | struct drm_device *dev = (struct drm_device *)data; | 401 | struct drm_device *dev = (struct drm_device *)data; |
402 | unsigned long irqflags; | 402 | unsigned long irqflags; |
403 | 403 | void (*tasklet_func)(struct drm_device *); | |
404 | |||
404 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); | 405 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); |
406 | tasklet_func = dev->locked_tasklet_func; | ||
407 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
405 | 408 | ||
406 | if (!dev->locked_tasklet_func || | 409 | if (!tasklet_func || |
407 | !drm_lock_take(&dev->lock, | 410 | !drm_lock_take(&dev->lock, |
408 | DRM_KERNEL_CONTEXT)) { | 411 | DRM_KERNEL_CONTEXT)) { |
409 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
410 | return; | 412 | return; |
411 | } | 413 | } |
412 | 414 | ||
413 | dev->lock.lock_time = jiffies; | 415 | dev->lock.lock_time = jiffies; |
414 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); | 416 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); |
415 | 417 | ||
416 | dev->locked_tasklet_func(dev); | 418 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); |
419 | tasklet_func = dev->locked_tasklet_func; | ||
420 | dev->locked_tasklet_func = NULL; | ||
421 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
422 | |||
423 | if (tasklet_func != NULL) | ||
424 | tasklet_func(dev); | ||
417 | 425 | ||
418 | drm_lock_free(&dev->lock, | 426 | drm_lock_free(&dev->lock, |
419 | DRM_KERNEL_CONTEXT); | 427 | DRM_KERNEL_CONTEXT); |
420 | |||
421 | dev->locked_tasklet_func = NULL; | ||
422 | |||
423 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
424 | } | 428 | } |
425 | 429 | ||
426 | /** | 430 | /** |
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 0998723cde79..a4caf95485d7 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c | |||
@@ -105,14 +105,19 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
105 | ret ? "interrupted" : "has lock"); | 105 | ret ? "interrupted" : "has lock"); |
106 | if (ret) return ret; | 106 | if (ret) return ret; |
107 | 107 | ||
108 | sigemptyset(&dev->sigmask); | 108 | /* don't set the block all signals on the master process for now |
109 | sigaddset(&dev->sigmask, SIGSTOP); | 109 | * really probably not the correct answer but lets us debug xkb |
110 | sigaddset(&dev->sigmask, SIGTSTP); | 110 | * xserver for now */ |
111 | sigaddset(&dev->sigmask, SIGTTIN); | 111 | if (!file_priv->master) { |
112 | sigaddset(&dev->sigmask, SIGTTOU); | 112 | sigemptyset(&dev->sigmask); |
113 | dev->sigdata.context = lock->context; | 113 | sigaddset(&dev->sigmask, SIGSTOP); |
114 | dev->sigdata.lock = dev->lock.hw_lock; | 114 | sigaddset(&dev->sigmask, SIGTSTP); |
115 | block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); | 115 | sigaddset(&dev->sigmask, SIGTTIN); |
116 | sigaddset(&dev->sigmask, SIGTTOU); | ||
117 | dev->sigdata.context = lock->context; | ||
118 | dev->sigdata.lock = dev->lock.hw_lock; | ||
119 | block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); | ||
120 | } | ||
116 | 121 | ||
117 | if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY)) | 122 | if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY)) |
118 | dev->driver->dma_ready(dev); | 123 | dev->driver->dma_ready(dev); |
@@ -150,6 +155,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
150 | { | 155 | { |
151 | struct drm_lock *lock = data; | 156 | struct drm_lock *lock = data; |
152 | unsigned long irqflags; | 157 | unsigned long irqflags; |
158 | void (*tasklet_func)(struct drm_device *); | ||
153 | 159 | ||
154 | if (lock->context == DRM_KERNEL_CONTEXT) { | 160 | if (lock->context == DRM_KERNEL_CONTEXT) { |
155 | DRM_ERROR("Process %d using kernel context %d\n", | 161 | DRM_ERROR("Process %d using kernel context %d\n", |
@@ -158,14 +164,11 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
158 | } | 164 | } |
159 | 165 | ||
160 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); | 166 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); |
161 | 167 | tasklet_func = dev->locked_tasklet_func; | |
162 | if (dev->locked_tasklet_func) { | 168 | dev->locked_tasklet_func = NULL; |
163 | dev->locked_tasklet_func(dev); | ||
164 | |||
165 | dev->locked_tasklet_func = NULL; | ||
166 | } | ||
167 | |||
168 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | 169 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); |
170 | if (tasklet_func != NULL) | ||
171 | tasklet_func(dev); | ||
169 | 172 | ||
170 | atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); | 173 | atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); |
171 | 174 | ||
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index 702df45320f7..4b27d9abb7bc 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c | |||
@@ -77,6 +77,9 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, | |||
77 | return -EFAULT; | 77 | return -EFAULT; |
78 | } | 78 | } |
79 | 79 | ||
80 | box.x2--; /* Hardware expects inclusive bottom-right corner */ | ||
81 | box.y2--; | ||
82 | |||
80 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { | 83 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { |
81 | box.x1 = (box.x1) & | 84 | box.x1 = (box.x1) & |
82 | R300_CLIPRECT_MASK; | 85 | R300_CLIPRECT_MASK; |
@@ -95,8 +98,8 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, | |||
95 | R300_CLIPRECT_MASK; | 98 | R300_CLIPRECT_MASK; |
96 | box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & | 99 | box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & |
97 | R300_CLIPRECT_MASK; | 100 | R300_CLIPRECT_MASK; |
98 | |||
99 | } | 101 | } |
102 | |||
100 | OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) | | 103 | OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) | |
101 | (box.y1 << R300_CLIPRECT_Y_SHIFT)); | 104 | (box.y1 << R300_CLIPRECT_Y_SHIFT)); |
102 | OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) | | 105 | OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) | |
@@ -136,6 +139,18 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, | |||
136 | ADVANCE_RING(); | 139 | ADVANCE_RING(); |
137 | } | 140 | } |
138 | 141 | ||
142 | /* flus cache and wait idle clean after cliprect change */ | ||
143 | BEGIN_RING(2); | ||
144 | OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | ||
145 | OUT_RING(R300_RB3D_DC_FLUSH); | ||
146 | ADVANCE_RING(); | ||
147 | BEGIN_RING(2); | ||
148 | OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); | ||
149 | OUT_RING(RADEON_WAIT_3D_IDLECLEAN); | ||
150 | ADVANCE_RING(); | ||
151 | /* set flush flag */ | ||
152 | dev_priv->track_flush |= RADEON_FLUSH_EMITED; | ||
153 | |||
139 | return 0; | 154 | return 0; |
140 | } | 155 | } |
141 | 156 | ||
@@ -166,13 +181,13 @@ void r300_init_reg_flags(struct drm_device *dev) | |||
166 | ADD_RANGE(0x21DC, 1); | 181 | ADD_RANGE(0x21DC, 1); |
167 | ADD_RANGE(R300_VAP_UNKNOWN_221C, 1); | 182 | ADD_RANGE(R300_VAP_UNKNOWN_221C, 1); |
168 | ADD_RANGE(R300_VAP_CLIP_X_0, 4); | 183 | ADD_RANGE(R300_VAP_CLIP_X_0, 4); |
169 | ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1); | 184 | ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1); |
170 | ADD_RANGE(R300_VAP_UNKNOWN_2288, 1); | 185 | ADD_RANGE(R300_VAP_UNKNOWN_2288, 1); |
171 | ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2); | 186 | ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2); |
172 | ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); | 187 | ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); |
173 | ADD_RANGE(R300_GB_ENABLE, 1); | 188 | ADD_RANGE(R300_GB_ENABLE, 1); |
174 | ADD_RANGE(R300_GB_MSPOS0, 5); | 189 | ADD_RANGE(R300_GB_MSPOS0, 5); |
175 | ADD_RANGE(R300_TX_CNTL, 1); | 190 | ADD_RANGE(R300_TX_INVALTAGS, 1); |
176 | ADD_RANGE(R300_TX_ENABLE, 1); | 191 | ADD_RANGE(R300_TX_ENABLE, 1); |
177 | ADD_RANGE(0x4200, 4); | 192 | ADD_RANGE(0x4200, 4); |
178 | ADD_RANGE(0x4214, 1); | 193 | ADD_RANGE(0x4214, 1); |
@@ -388,15 +403,28 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv, | |||
388 | if (sz * 16 > cmdbuf->bufsz) | 403 | if (sz * 16 > cmdbuf->bufsz) |
389 | return -EINVAL; | 404 | return -EINVAL; |
390 | 405 | ||
391 | BEGIN_RING(5 + sz * 4); | 406 | /* VAP is very sensitive so we purge cache before we program it |
392 | /* Wait for VAP to come to senses.. */ | 407 | * and we also flush its state before & after */ |
393 | /* there is no need to emit it multiple times, (only once before VAP is programmed, | 408 | BEGIN_RING(6); |
394 | but this optimization is for later */ | 409 | OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
395 | OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0); | 410 | OUT_RING(R300_RB3D_DC_FLUSH); |
411 | OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); | ||
412 | OUT_RING(RADEON_WAIT_3D_IDLECLEAN); | ||
413 | OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0)); | ||
414 | OUT_RING(0); | ||
415 | ADVANCE_RING(); | ||
416 | /* set flush flag */ | ||
417 | dev_priv->track_flush |= RADEON_FLUSH_EMITED; | ||
418 | |||
419 | BEGIN_RING(3 + sz * 4); | ||
396 | OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); | 420 | OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); |
397 | OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); | 421 | OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); |
398 | OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); | 422 | OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); |
423 | ADVANCE_RING(); | ||
399 | 424 | ||
425 | BEGIN_RING(2); | ||
426 | OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0)); | ||
427 | OUT_RING(0); | ||
400 | ADVANCE_RING(); | 428 | ADVANCE_RING(); |
401 | 429 | ||
402 | cmdbuf->buf += sz * 16; | 430 | cmdbuf->buf += sz * 16; |
@@ -424,6 +452,15 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv, | |||
424 | OUT_RING_TABLE((int *)cmdbuf->buf, 8); | 452 | OUT_RING_TABLE((int *)cmdbuf->buf, 8); |
425 | ADVANCE_RING(); | 453 | ADVANCE_RING(); |
426 | 454 | ||
455 | BEGIN_RING(4); | ||
456 | OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | ||
457 | OUT_RING(R300_RB3D_DC_FLUSH); | ||
458 | OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); | ||
459 | OUT_RING(RADEON_WAIT_3D_IDLECLEAN); | ||
460 | ADVANCE_RING(); | ||
461 | /* set flush flag */ | ||
462 | dev_priv->track_flush |= RADEON_FLUSH_EMITED; | ||
463 | |||
427 | cmdbuf->buf += 8 * 4; | 464 | cmdbuf->buf += 8 * 4; |
428 | cmdbuf->bufsz -= 8 * 4; | 465 | cmdbuf->bufsz -= 8 * 4; |
429 | 466 | ||
@@ -543,22 +580,23 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, | |||
543 | return 0; | 580 | return 0; |
544 | } | 581 | } |
545 | 582 | ||
546 | static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv, | 583 | static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, |
547 | drm_radeon_kcmd_buffer_t *cmdbuf) | 584 | drm_radeon_kcmd_buffer_t *cmdbuf) |
548 | { | 585 | { |
549 | u32 *cmd = (u32 *) cmdbuf->buf; | 586 | u32 *cmd; |
550 | int count, ret; | 587 | int count; |
588 | int expected_count; | ||
551 | RING_LOCALS; | 589 | RING_LOCALS; |
552 | 590 | ||
553 | count=(cmd[0]>>16) & 0x3fff; | 591 | cmd = (u32 *) cmdbuf->buf; |
592 | count = (cmd[0]>>16) & 0x3fff; | ||
593 | expected_count = cmd[1] >> 16; | ||
594 | if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit)) | ||
595 | expected_count = (expected_count+1)/2; | ||
554 | 596 | ||
555 | if ((cmd[1] & 0x8000ffff) != 0x80000810) { | 597 | if (count && count != expected_count) { |
556 | DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); | 598 | DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n", |
557 | return -EINVAL; | 599 | count, expected_count); |
558 | } | ||
559 | ret = !radeon_check_offset(dev_priv, cmd[2]); | ||
560 | if (ret) { | ||
561 | DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); | ||
562 | return -EINVAL; | 600 | return -EINVAL; |
563 | } | 601 | } |
564 | 602 | ||
@@ -570,6 +608,50 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv, | |||
570 | cmdbuf->buf += (count+2)*4; | 608 | cmdbuf->buf += (count+2)*4; |
571 | cmdbuf->bufsz -= (count+2)*4; | 609 | cmdbuf->bufsz -= (count+2)*4; |
572 | 610 | ||
611 | if (!count) { | ||
612 | drm_r300_cmd_header_t header; | ||
613 | |||
614 | if (cmdbuf->bufsz < 4*4 + sizeof(header)) { | ||
615 | DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n"); | ||
616 | return -EINVAL; | ||
617 | } | ||
618 | |||
619 | header.u = *(unsigned int *)cmdbuf->buf; | ||
620 | |||
621 | cmdbuf->buf += sizeof(header); | ||
622 | cmdbuf->bufsz -= sizeof(header); | ||
623 | cmd = (u32 *) cmdbuf->buf; | ||
624 | |||
625 | if (header.header.cmd_type != R300_CMD_PACKET3 || | ||
626 | header.packet3.packet != R300_CMD_PACKET3_RAW || | ||
627 | cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) { | ||
628 | DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n"); | ||
629 | return -EINVAL; | ||
630 | } | ||
631 | |||
632 | if ((cmd[1] & 0x8000ffff) != 0x80000810) { | ||
633 | DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); | ||
634 | return -EINVAL; | ||
635 | } | ||
636 | if (!radeon_check_offset(dev_priv, cmd[2])) { | ||
637 | DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); | ||
638 | return -EINVAL; | ||
639 | } | ||
640 | if (cmd[3] != expected_count) { | ||
641 | DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n", | ||
642 | cmd[3], expected_count); | ||
643 | return -EINVAL; | ||
644 | } | ||
645 | |||
646 | BEGIN_RING(4); | ||
647 | OUT_RING(cmd[0]); | ||
648 | OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3); | ||
649 | ADVANCE_RING(); | ||
650 | |||
651 | cmdbuf->buf += 4*4; | ||
652 | cmdbuf->bufsz -= 4*4; | ||
653 | } | ||
654 | |||
573 | return 0; | 655 | return 0; |
574 | } | 656 | } |
575 | 657 | ||
@@ -613,11 +695,22 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, | |||
613 | case RADEON_CNTL_BITBLT_MULTI: | 695 | case RADEON_CNTL_BITBLT_MULTI: |
614 | return r300_emit_bitblt_multi(dev_priv, cmdbuf); | 696 | return r300_emit_bitblt_multi(dev_priv, cmdbuf); |
615 | 697 | ||
616 | case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */ | 698 | case RADEON_CP_INDX_BUFFER: |
617 | return r300_emit_indx_buffer(dev_priv, cmdbuf); | 699 | DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n"); |
618 | case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ | 700 | return -EINVAL; |
619 | case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ | 701 | case RADEON_CP_3D_DRAW_IMMD_2: |
620 | case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ | 702 | /* triggers drawing using in-packet vertex data */ |
703 | case RADEON_CP_3D_DRAW_VBUF_2: | ||
704 | /* triggers drawing of vertex buffers setup elsewhere */ | ||
705 | dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED | | ||
706 | RADEON_PURGE_EMITED); | ||
707 | break; | ||
708 | case RADEON_CP_3D_DRAW_INDX_2: | ||
709 | /* triggers drawing using indices to vertex buffer */ | ||
710 | /* whenever we send vertex we clear flush & purge */ | ||
711 | dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED | | ||
712 | RADEON_PURGE_EMITED); | ||
713 | return r300_emit_draw_indx_2(dev_priv, cmdbuf); | ||
621 | case RADEON_WAIT_FOR_IDLE: | 714 | case RADEON_WAIT_FOR_IDLE: |
622 | case RADEON_CP_NOP: | 715 | case RADEON_CP_NOP: |
623 | /* these packets are safe */ | 716 | /* these packets are safe */ |
@@ -713,17 +806,53 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, | |||
713 | */ | 806 | */ |
714 | static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) | 807 | static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) |
715 | { | 808 | { |
809 | uint32_t cache_z, cache_3d, cache_2d; | ||
716 | RING_LOCALS; | 810 | RING_LOCALS; |
717 | 811 | ||
718 | BEGIN_RING(6); | 812 | cache_z = R300_ZC_FLUSH; |
719 | OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | 813 | cache_2d = R300_RB2D_DC_FLUSH; |
720 | OUT_RING(R300_RB3D_DSTCACHE_UNKNOWN_0A); | 814 | cache_3d = R300_RB3D_DC_FLUSH; |
815 | if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) { | ||
816 | /* we can purge, primitive where draw since last purge */ | ||
817 | cache_z |= R300_ZC_FREE; | ||
818 | cache_2d |= R300_RB2D_DC_FREE; | ||
819 | cache_3d |= R300_RB3D_DC_FREE; | ||
820 | } | ||
821 | |||
822 | /* flush & purge zbuffer */ | ||
823 | BEGIN_RING(2); | ||
721 | OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); | 824 | OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); |
722 | OUT_RING(R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE| | 825 | OUT_RING(cache_z); |
723 | R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE); | 826 | ADVANCE_RING(); |
724 | OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0)); | 827 | /* flush & purge 3d */ |
725 | OUT_RING(0x0); | 828 | BEGIN_RING(2); |
829 | OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | ||
830 | OUT_RING(cache_3d); | ||
831 | ADVANCE_RING(); | ||
832 | /* flush & purge texture */ | ||
833 | BEGIN_RING(2); | ||
834 | OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0)); | ||
835 | OUT_RING(0); | ||
836 | ADVANCE_RING(); | ||
837 | /* FIXME: is this one really needed ? */ | ||
838 | BEGIN_RING(2); | ||
839 | OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0)); | ||
840 | OUT_RING(0); | ||
841 | ADVANCE_RING(); | ||
842 | BEGIN_RING(2); | ||
843 | OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); | ||
844 | OUT_RING(RADEON_WAIT_3D_IDLECLEAN); | ||
845 | ADVANCE_RING(); | ||
846 | /* flush & purge 2d through E2 as RB2D will trigger lockup */ | ||
847 | BEGIN_RING(4); | ||
848 | OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0)); | ||
849 | OUT_RING(cache_2d); | ||
850 | OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); | ||
851 | OUT_RING(RADEON_WAIT_2D_IDLECLEAN | | ||
852 | RADEON_WAIT_HOST_IDLECLEAN); | ||
726 | ADVANCE_RING(); | 853 | ADVANCE_RING(); |
854 | /* set flush & purge flags */ | ||
855 | dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED; | ||
727 | } | 856 | } |
728 | 857 | ||
729 | /** | 858 | /** |
@@ -905,8 +1034,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
905 | 1034 | ||
906 | DRM_DEBUG("\n"); | 1035 | DRM_DEBUG("\n"); |
907 | 1036 | ||
908 | /* See the comment above r300_emit_begin3d for why this call must be here, | 1037 | /* pacify */ |
909 | * and what the cleanup gotos are for. */ | ||
910 | r300_pacify(dev_priv); | 1038 | r300_pacify(dev_priv); |
911 | 1039 | ||
912 | if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) { | 1040 | if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) { |
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index a6802f26afc4..ee6f811599a3 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h | |||
@@ -317,7 +317,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
317 | * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and | 317 | * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and |
318 | * avoids bugs caused by still running shaders reading bad data from memory. | 318 | * avoids bugs caused by still running shaders reading bad data from memory. |
319 | */ | 319 | */ |
320 | #define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */ | 320 | #define R300_VAP_PVS_STATE_FLUSH_REG 0x2284 |
321 | 321 | ||
322 | /* Absolutely no clue what this register is about. */ | 322 | /* Absolutely no clue what this register is about. */ |
323 | #define R300_VAP_UNKNOWN_2288 0x2288 | 323 | #define R300_VAP_UNKNOWN_2288 0x2288 |
@@ -513,7 +513,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
513 | /* gap */ | 513 | /* gap */ |
514 | 514 | ||
515 | /* Zero to flush caches. */ | 515 | /* Zero to flush caches. */ |
516 | #define R300_TX_CNTL 0x4100 | 516 | #define R300_TX_INVALTAGS 0x4100 |
517 | #define R300_TX_FLUSH 0x0 | 517 | #define R300_TX_FLUSH 0x0 |
518 | 518 | ||
519 | /* The upper enable bits are guessed, based on fglrx reported limits. */ | 519 | /* The upper enable bits are guessed, based on fglrx reported limits. */ |
@@ -1362,6 +1362,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
1362 | #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ | 1362 | #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ |
1363 | #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ | 1363 | #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ |
1364 | 1364 | ||
1365 | #define R300_RB3D_AARESOLVE_CTL 0x4E88 | ||
1365 | /* gap */ | 1366 | /* gap */ |
1366 | 1367 | ||
1367 | /* Guess by Vladimir. | 1368 | /* Guess by Vladimir. |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index f0de81a5689d..3331f88dcfb6 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #define RADEON_FIFO_DEBUG 0 | 40 | #define RADEON_FIFO_DEBUG 0 |
41 | 41 | ||
42 | static int radeon_do_cleanup_cp(struct drm_device * dev); | 42 | static int radeon_do_cleanup_cp(struct drm_device * dev); |
43 | static void radeon_do_cp_start(drm_radeon_private_t * dev_priv); | ||
43 | 44 | ||
44 | static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) | 45 | static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) |
45 | { | 46 | { |
@@ -198,23 +199,8 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv) | |||
198 | DRM_UDELAY(1); | 199 | DRM_UDELAY(1); |
199 | } | 200 | } |
200 | } else { | 201 | } else { |
201 | /* 3D */ | 202 | /* don't flush or purge cache here or lockup */ |
202 | tmp = RADEON_READ(R300_RB3D_DSTCACHE_CTLSTAT); | 203 | return 0; |
203 | tmp |= RADEON_RB3D_DC_FLUSH_ALL; | ||
204 | RADEON_WRITE(R300_RB3D_DSTCACHE_CTLSTAT, tmp); | ||
205 | |||
206 | /* 2D */ | ||
207 | tmp = RADEON_READ(R300_DSTCACHE_CTLSTAT); | ||
208 | tmp |= RADEON_RB3D_DC_FLUSH_ALL; | ||
209 | RADEON_WRITE(R300_DSTCACHE_CTLSTAT, tmp); | ||
210 | |||
211 | for (i = 0; i < dev_priv->usec_timeout; i++) { | ||
212 | if (!(RADEON_READ(R300_DSTCACHE_CTLSTAT) | ||
213 | & RADEON_RB3D_DC_BUSY)) { | ||
214 | return 0; | ||
215 | } | ||
216 | DRM_UDELAY(1); | ||
217 | } | ||
218 | } | 204 | } |
219 | 205 | ||
220 | #if RADEON_FIFO_DEBUG | 206 | #if RADEON_FIFO_DEBUG |
@@ -237,6 +223,9 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) | |||
237 | return 0; | 223 | return 0; |
238 | DRM_UDELAY(1); | 224 | DRM_UDELAY(1); |
239 | } | 225 | } |
226 | DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n", | ||
227 | RADEON_READ(RADEON_RBBM_STATUS), | ||
228 | RADEON_READ(R300_VAP_CNTL_STATUS)); | ||
240 | 229 | ||
241 | #if RADEON_FIFO_DEBUG | 230 | #if RADEON_FIFO_DEBUG |
242 | DRM_ERROR("failed!\n"); | 231 | DRM_ERROR("failed!\n"); |
@@ -263,6 +252,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) | |||
263 | } | 252 | } |
264 | DRM_UDELAY(1); | 253 | DRM_UDELAY(1); |
265 | } | 254 | } |
255 | DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n", | ||
256 | RADEON_READ(RADEON_RBBM_STATUS), | ||
257 | RADEON_READ(R300_VAP_CNTL_STATUS)); | ||
266 | 258 | ||
267 | #if RADEON_FIFO_DEBUG | 259 | #if RADEON_FIFO_DEBUG |
268 | DRM_ERROR("failed!\n"); | 260 | DRM_ERROR("failed!\n"); |
@@ -443,14 +435,20 @@ static void radeon_do_cp_start(drm_radeon_private_t * dev_priv) | |||
443 | 435 | ||
444 | dev_priv->cp_running = 1; | 436 | dev_priv->cp_running = 1; |
445 | 437 | ||
446 | BEGIN_RING(6); | 438 | BEGIN_RING(8); |
447 | 439 | /* isync can only be written through cp on r5xx write it here */ | |
440 | OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0)); | ||
441 | OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D | | ||
442 | RADEON_ISYNC_ANY3D_IDLE2D | | ||
443 | RADEON_ISYNC_WAIT_IDLEGUI | | ||
444 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); | ||
448 | RADEON_PURGE_CACHE(); | 445 | RADEON_PURGE_CACHE(); |
449 | RADEON_PURGE_ZCACHE(); | 446 | RADEON_PURGE_ZCACHE(); |
450 | RADEON_WAIT_UNTIL_IDLE(); | 447 | RADEON_WAIT_UNTIL_IDLE(); |
451 | |||
452 | ADVANCE_RING(); | 448 | ADVANCE_RING(); |
453 | COMMIT_RING(); | 449 | COMMIT_RING(); |
450 | |||
451 | dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED; | ||
454 | } | 452 | } |
455 | 453 | ||
456 | /* Reset the Command Processor. This will not flush any pending | 454 | /* Reset the Command Processor. This will not flush any pending |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 3f0eca957aa7..099381693175 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -220,6 +220,9 @@ struct radeon_virt_surface { | |||
220 | struct drm_file *file_priv; | 220 | struct drm_file *file_priv; |
221 | }; | 221 | }; |
222 | 222 | ||
223 | #define RADEON_FLUSH_EMITED (1 < 0) | ||
224 | #define RADEON_PURGE_EMITED (1 < 1) | ||
225 | |||
223 | typedef struct drm_radeon_private { | 226 | typedef struct drm_radeon_private { |
224 | drm_radeon_ring_buffer_t ring; | 227 | drm_radeon_ring_buffer_t ring; |
225 | drm_radeon_sarea_t *sarea_priv; | 228 | drm_radeon_sarea_t *sarea_priv; |
@@ -311,6 +314,7 @@ typedef struct drm_radeon_private { | |||
311 | unsigned long fb_aper_offset; | 314 | unsigned long fb_aper_offset; |
312 | 315 | ||
313 | int num_gb_pipes; | 316 | int num_gb_pipes; |
317 | int track_flush; | ||
314 | } drm_radeon_private_t; | 318 | } drm_radeon_private_t; |
315 | 319 | ||
316 | typedef struct drm_radeon_buf_priv { | 320 | typedef struct drm_radeon_buf_priv { |
@@ -693,7 +697,6 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev, | |||
693 | #define R300_ZB_ZCACHE_CTLSTAT 0x4f18 | 697 | #define R300_ZB_ZCACHE_CTLSTAT 0x4f18 |
694 | # define R300_ZC_FLUSH (1 << 0) | 698 | # define R300_ZC_FLUSH (1 << 0) |
695 | # define R300_ZC_FREE (1 << 1) | 699 | # define R300_ZC_FREE (1 << 1) |
696 | # define R300_ZC_FLUSH_ALL 0x3 | ||
697 | # define R300_ZC_BUSY (1 << 31) | 700 | # define R300_ZC_BUSY (1 << 31) |
698 | #define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c | 701 | #define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c |
699 | # define RADEON_RB3D_DC_FLUSH (3 << 0) | 702 | # define RADEON_RB3D_DC_FLUSH (3 << 0) |
@@ -701,6 +704,8 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev, | |||
701 | # define RADEON_RB3D_DC_FLUSH_ALL 0xf | 704 | # define RADEON_RB3D_DC_FLUSH_ALL 0xf |
702 | # define RADEON_RB3D_DC_BUSY (1 << 31) | 705 | # define RADEON_RB3D_DC_BUSY (1 << 31) |
703 | #define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c | 706 | #define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c |
707 | # define R300_RB3D_DC_FLUSH (2 << 0) | ||
708 | # define R300_RB3D_DC_FREE (2 << 2) | ||
704 | # define R300_RB3D_DC_FINISH (1 << 4) | 709 | # define R300_RB3D_DC_FINISH (1 << 4) |
705 | #define RADEON_RB3D_ZSTENCILCNTL 0x1c2c | 710 | #define RADEON_RB3D_ZSTENCILCNTL 0x1c2c |
706 | # define RADEON_Z_TEST_MASK (7 << 4) | 711 | # define RADEON_Z_TEST_MASK (7 << 4) |
@@ -1246,17 +1251,17 @@ do { \ | |||
1246 | OUT_RING(RADEON_RB3D_DC_FLUSH); \ | 1251 | OUT_RING(RADEON_RB3D_DC_FLUSH); \ |
1247 | } else { \ | 1252 | } else { \ |
1248 | OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ | 1253 | OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ |
1249 | OUT_RING(RADEON_RB3D_DC_FLUSH); \ | 1254 | OUT_RING(R300_RB3D_DC_FLUSH); \ |
1250 | } \ | 1255 | } \ |
1251 | } while (0) | 1256 | } while (0) |
1252 | 1257 | ||
1253 | #define RADEON_PURGE_CACHE() do { \ | 1258 | #define RADEON_PURGE_CACHE() do { \ |
1254 | if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ | 1259 | if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ |
1255 | OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ | 1260 | OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ |
1256 | OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \ | 1261 | OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \ |
1257 | } else { \ | 1262 | } else { \ |
1258 | OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ | 1263 | OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ |
1259 | OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \ | 1264 | OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); \ |
1260 | } \ | 1265 | } \ |
1261 | } while (0) | 1266 | } while (0) |
1262 | 1267 | ||
@@ -1273,10 +1278,10 @@ do { \ | |||
1273 | #define RADEON_PURGE_ZCACHE() do { \ | 1278 | #define RADEON_PURGE_ZCACHE() do { \ |
1274 | if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ | 1279 | if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ |
1275 | OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ | 1280 | OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ |
1276 | OUT_RING(RADEON_RB3D_ZC_FLUSH_ALL); \ | 1281 | OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \ |
1277 | } else { \ | 1282 | } else { \ |
1278 | OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ | 1283 | OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ |
1279 | OUT_RING(R300_ZC_FLUSH_ALL); \ | 1284 | OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \ |
1280 | } \ | 1285 | } \ |
1281 | } while (0) | 1286 | } while (0) |
1282 | 1287 | ||
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 61e78a4369b9..b15f88249639 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -654,12 +654,12 @@ static const struct hid_blacklist { | |||
654 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, | 654 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, |
655 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, | 655 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, |
656 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, | 656 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, |
657 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN }, | 657 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, |
658 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, | 658 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD | HID_QUIRK_IGNORE_MOUSE}, |
659 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN }, | 659 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE}, |
660 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN }, | 660 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE}, |
661 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, | 661 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD | HID_QUIRK_IGNORE_MOUSE }, |
662 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN }, | 662 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, |
663 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, | 663 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, |
664 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, | 664 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, |
665 | 665 | ||
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index bf4ebfb86fa5..d402e8d813ce 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -77,6 +77,22 @@ config SENSORS_AD7418 | |||
77 | This driver can also be built as a module. If so, the module | 77 | This driver can also be built as a module. If so, the module |
78 | will be called ad7418. | 78 | will be called ad7418. |
79 | 79 | ||
80 | config SENSORS_ADCXX | ||
81 | tristate "National Semiconductor ADCxxxSxxx" | ||
82 | depends on SPI_MASTER && EXPERIMENTAL | ||
83 | help | ||
84 | If you say yes here you get support for the National Semiconductor | ||
85 | ADC<bb><c>S<sss> chip family, where | ||
86 | * bb is the resolution in number of bits (8, 10, 12) | ||
87 | * c is the number of channels (1, 2, 4, 8) | ||
88 | * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500 | ||
89 | kSPS and 101 for 1 MSPS) | ||
90 | |||
91 | Examples : ADC081S101, ADC124S501, ... | ||
92 | |||
93 | This driver can also be built as a module. If so, the module | ||
94 | will be called adcxx. | ||
95 | |||
80 | config SENSORS_ADM1021 | 96 | config SENSORS_ADM1021 |
81 | tristate "Analog Devices ADM1021 and compatibles" | 97 | tristate "Analog Devices ADM1021 and compatibles" |
82 | depends on I2C | 98 | depends on I2C |
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 7943e5cefb06..950134ab8426 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile | |||
@@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o | |||
17 | obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o | 17 | obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o |
18 | obj-$(CONFIG_SENSORS_AD7414) += ad7414.o | 18 | obj-$(CONFIG_SENSORS_AD7414) += ad7414.o |
19 | obj-$(CONFIG_SENSORS_AD7418) += ad7418.o | 19 | obj-$(CONFIG_SENSORS_AD7418) += ad7418.o |
20 | obj-$(CONFIG_SENSORS_ADCXX) += adcxx.o | ||
20 | obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o | 21 | obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o |
21 | obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o | 22 | obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o |
22 | obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o | 23 | obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o |
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c index f00f497b9ca9..d568c65c1370 100644 --- a/drivers/hwmon/abituguru3.c +++ b/drivers/hwmon/abituguru3.c | |||
@@ -1,5 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | abituguru3.c Copyright (c) 2006 Hans de Goede <j.w.r.degoede@hhs.nl> | 2 | abituguru3.c |
3 | |||
4 | Copyright (c) 2006-2008 Hans de Goede <j.w.r.degoede@hhs.nl> | ||
5 | Copyright (c) 2008 Alistair John Strachan <alistair@devzero.co.uk> | ||
3 | 6 | ||
4 | This program is free software; you can redistribute it and/or modify | 7 | This program is free software; you can redistribute it and/or modify |
5 | it under the terms of the GNU General Public License as published by | 8 | it under the terms of the GNU General Public License as published by |
@@ -116,7 +119,7 @@ struct abituguru3_sensor_info { | |||
116 | 119 | ||
117 | struct abituguru3_motherboard_info { | 120 | struct abituguru3_motherboard_info { |
118 | u16 id; | 121 | u16 id; |
119 | const char *name; | 122 | const char *dmi_name; |
120 | /* + 1 -> end of sensors indicated by a sensor with name == NULL */ | 123 | /* + 1 -> end of sensors indicated by a sensor with name == NULL */ |
121 | struct abituguru3_sensor_info sensors[ABIT_UGURU3_MAX_NO_SENSORS + 1]; | 124 | struct abituguru3_sensor_info sensors[ABIT_UGURU3_MAX_NO_SENSORS + 1]; |
122 | }; | 125 | }; |
@@ -161,7 +164,7 @@ struct abituguru3_data { | |||
161 | 164 | ||
162 | /* Constants */ | 165 | /* Constants */ |
163 | static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | 166 | static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { |
164 | { 0x000C, "unknown", { | 167 | { 0x000C, NULL /* Unknown, need DMI string */, { |
165 | { "CPU Core", 0, 0, 10, 1, 0 }, | 168 | { "CPU Core", 0, 0, 10, 1, 0 }, |
166 | { "DDR", 1, 0, 10, 1, 0 }, | 169 | { "DDR", 1, 0, 10, 1, 0 }, |
167 | { "DDR VTT", 2, 0, 10, 1, 0 }, | 170 | { "DDR VTT", 2, 0, 10, 1, 0 }, |
@@ -183,7 +186,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
183 | { "AUX1 Fan", 35, 2, 60, 1, 0 }, | 186 | { "AUX1 Fan", 35, 2, 60, 1, 0 }, |
184 | { NULL, 0, 0, 0, 0, 0 } } | 187 | { NULL, 0, 0, 0, 0, 0 } } |
185 | }, | 188 | }, |
186 | { 0x000D, "Abit AW8", { | 189 | { 0x000D, NULL /* Abit AW8, need DMI string */, { |
187 | { "CPU Core", 0, 0, 10, 1, 0 }, | 190 | { "CPU Core", 0, 0, 10, 1, 0 }, |
188 | { "DDR", 1, 0, 10, 1, 0 }, | 191 | { "DDR", 1, 0, 10, 1, 0 }, |
189 | { "DDR VTT", 2, 0, 10, 1, 0 }, | 192 | { "DDR VTT", 2, 0, 10, 1, 0 }, |
@@ -212,7 +215,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
212 | { "AUX5 Fan", 39, 2, 60, 1, 0 }, | 215 | { "AUX5 Fan", 39, 2, 60, 1, 0 }, |
213 | { NULL, 0, 0, 0, 0, 0 } } | 216 | { NULL, 0, 0, 0, 0, 0 } } |
214 | }, | 217 | }, |
215 | { 0x000E, "AL-8", { | 218 | { 0x000E, NULL /* AL-8, need DMI string */, { |
216 | { "CPU Core", 0, 0, 10, 1, 0 }, | 219 | { "CPU Core", 0, 0, 10, 1, 0 }, |
217 | { "DDR", 1, 0, 10, 1, 0 }, | 220 | { "DDR", 1, 0, 10, 1, 0 }, |
218 | { "DDR VTT", 2, 0, 10, 1, 0 }, | 221 | { "DDR VTT", 2, 0, 10, 1, 0 }, |
@@ -233,7 +236,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
233 | { "SYS Fan", 34, 2, 60, 1, 0 }, | 236 | { "SYS Fan", 34, 2, 60, 1, 0 }, |
234 | { NULL, 0, 0, 0, 0, 0 } } | 237 | { NULL, 0, 0, 0, 0, 0 } } |
235 | }, | 238 | }, |
236 | { 0x000F, "unknown", { | 239 | { 0x000F, NULL /* Unknown, need DMI string */, { |
237 | { "CPU Core", 0, 0, 10, 1, 0 }, | 240 | { "CPU Core", 0, 0, 10, 1, 0 }, |
238 | { "DDR", 1, 0, 10, 1, 0 }, | 241 | { "DDR", 1, 0, 10, 1, 0 }, |
239 | { "DDR VTT", 2, 0, 10, 1, 0 }, | 242 | { "DDR VTT", 2, 0, 10, 1, 0 }, |
@@ -254,7 +257,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
254 | { "SYS Fan", 34, 2, 60, 1, 0 }, | 257 | { "SYS Fan", 34, 2, 60, 1, 0 }, |
255 | { NULL, 0, 0, 0, 0, 0 } } | 258 | { NULL, 0, 0, 0, 0, 0 } } |
256 | }, | 259 | }, |
257 | { 0x0010, "Abit NI8 SLI GR", { | 260 | { 0x0010, NULL /* Abit NI8 SLI GR, need DMI string */, { |
258 | { "CPU Core", 0, 0, 10, 1, 0 }, | 261 | { "CPU Core", 0, 0, 10, 1, 0 }, |
259 | { "DDR", 1, 0, 10, 1, 0 }, | 262 | { "DDR", 1, 0, 10, 1, 0 }, |
260 | { "DDR VTT", 2, 0, 10, 1, 0 }, | 263 | { "DDR VTT", 2, 0, 10, 1, 0 }, |
@@ -276,7 +279,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
276 | { "OTES1 Fan", 36, 2, 60, 1, 0 }, | 279 | { "OTES1 Fan", 36, 2, 60, 1, 0 }, |
277 | { NULL, 0, 0, 0, 0, 0 } } | 280 | { NULL, 0, 0, 0, 0, 0 } } |
278 | }, | 281 | }, |
279 | { 0x0011, "Abit AT8 32X", { | 282 | { 0x0011, NULL /* Abit AT8 32X, need DMI string */, { |
280 | { "CPU Core", 0, 0, 10, 1, 0 }, | 283 | { "CPU Core", 0, 0, 10, 1, 0 }, |
281 | { "DDR", 1, 0, 20, 1, 0 }, | 284 | { "DDR", 1, 0, 20, 1, 0 }, |
282 | { "DDR VTT", 2, 0, 10, 1, 0 }, | 285 | { "DDR VTT", 2, 0, 10, 1, 0 }, |
@@ -302,7 +305,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
302 | { "AUX2 Fan", 36, 2, 60, 1, 0 }, | 305 | { "AUX2 Fan", 36, 2, 60, 1, 0 }, |
303 | { NULL, 0, 0, 0, 0, 0 } } | 306 | { NULL, 0, 0, 0, 0, 0 } } |
304 | }, | 307 | }, |
305 | { 0x0012, "Abit AN8 32X", { | 308 | { 0x0012, NULL /* Abit AN8 32X, need DMI string */, { |
306 | { "CPU Core", 0, 0, 10, 1, 0 }, | 309 | { "CPU Core", 0, 0, 10, 1, 0 }, |
307 | { "DDR", 1, 0, 20, 1, 0 }, | 310 | { "DDR", 1, 0, 20, 1, 0 }, |
308 | { "DDR VTT", 2, 0, 10, 1, 0 }, | 311 | { "DDR VTT", 2, 0, 10, 1, 0 }, |
@@ -324,7 +327,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
324 | { "AUX1 Fan", 36, 2, 60, 1, 0 }, | 327 | { "AUX1 Fan", 36, 2, 60, 1, 0 }, |
325 | { NULL, 0, 0, 0, 0, 0 } } | 328 | { NULL, 0, 0, 0, 0, 0 } } |
326 | }, | 329 | }, |
327 | { 0x0013, "Abit AW8D", { | 330 | { 0x0013, NULL /* Abit AW8D, need DMI string */, { |
328 | { "CPU Core", 0, 0, 10, 1, 0 }, | 331 | { "CPU Core", 0, 0, 10, 1, 0 }, |
329 | { "DDR", 1, 0, 10, 1, 0 }, | 332 | { "DDR", 1, 0, 10, 1, 0 }, |
330 | { "DDR VTT", 2, 0, 10, 1, 0 }, | 333 | { "DDR VTT", 2, 0, 10, 1, 0 }, |
@@ -353,7 +356,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
353 | { "AUX5 Fan", 39, 2, 60, 1, 0 }, | 356 | { "AUX5 Fan", 39, 2, 60, 1, 0 }, |
354 | { NULL, 0, 0, 0, 0, 0 } } | 357 | { NULL, 0, 0, 0, 0, 0 } } |
355 | }, | 358 | }, |
356 | { 0x0014, "Abit AB9 Pro", { | 359 | { 0x0014, NULL /* Abit AB9 Pro, need DMI string */, { |
357 | { "CPU Core", 0, 0, 10, 1, 0 }, | 360 | { "CPU Core", 0, 0, 10, 1, 0 }, |
358 | { "DDR", 1, 0, 10, 1, 0 }, | 361 | { "DDR", 1, 0, 10, 1, 0 }, |
359 | { "DDR VTT", 2, 0, 10, 1, 0 }, | 362 | { "DDR VTT", 2, 0, 10, 1, 0 }, |
@@ -374,7 +377,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
374 | { "SYS Fan", 34, 2, 60, 1, 0 }, | 377 | { "SYS Fan", 34, 2, 60, 1, 0 }, |
375 | { NULL, 0, 0, 0, 0, 0 } } | 378 | { NULL, 0, 0, 0, 0, 0 } } |
376 | }, | 379 | }, |
377 | { 0x0015, "unknown", { | 380 | { 0x0015, NULL /* Unknown, need DMI string */, { |
378 | { "CPU Core", 0, 0, 10, 1, 0 }, | 381 | { "CPU Core", 0, 0, 10, 1, 0 }, |
379 | { "DDR", 1, 0, 20, 1, 0 }, | 382 | { "DDR", 1, 0, 20, 1, 0 }, |
380 | { "DDR VTT", 2, 0, 10, 1, 0 }, | 383 | { "DDR VTT", 2, 0, 10, 1, 0 }, |
@@ -398,7 +401,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
398 | { "AUX3 Fan", 36, 2, 60, 1, 0 }, | 401 | { "AUX3 Fan", 36, 2, 60, 1, 0 }, |
399 | { NULL, 0, 0, 0, 0, 0 } } | 402 | { NULL, 0, 0, 0, 0, 0 } } |
400 | }, | 403 | }, |
401 | { 0x0016, "AW9D-MAX", { | 404 | { 0x0016, NULL /* AW9D-MAX, need DMI string */, { |
402 | { "CPU Core", 0, 0, 10, 1, 0 }, | 405 | { "CPU Core", 0, 0, 10, 1, 0 }, |
403 | { "DDR2", 1, 0, 20, 1, 0 }, | 406 | { "DDR2", 1, 0, 20, 1, 0 }, |
404 | { "DDR2 VTT", 2, 0, 10, 1, 0 }, | 407 | { "DDR2 VTT", 2, 0, 10, 1, 0 }, |
@@ -426,7 +429,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
426 | { "OTES1 Fan", 38, 2, 60, 1, 0 }, | 429 | { "OTES1 Fan", 38, 2, 60, 1, 0 }, |
427 | { NULL, 0, 0, 0, 0, 0 } } | 430 | { NULL, 0, 0, 0, 0, 0 } } |
428 | }, | 431 | }, |
429 | { 0x0017, "unknown", { | 432 | { 0x0017, NULL /* Unknown, need DMI string */, { |
430 | { "CPU Core", 0, 0, 10, 1, 0 }, | 433 | { "CPU Core", 0, 0, 10, 1, 0 }, |
431 | { "DDR2", 1, 0, 20, 1, 0 }, | 434 | { "DDR2", 1, 0, 20, 1, 0 }, |
432 | { "DDR2 VTT", 2, 0, 10, 1, 0 }, | 435 | { "DDR2 VTT", 2, 0, 10, 1, 0 }, |
@@ -451,7 +454,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
451 | { "AUX3 FAN", 37, 2, 60, 1, 0 }, | 454 | { "AUX3 FAN", 37, 2, 60, 1, 0 }, |
452 | { NULL, 0, 0, 0, 0, 0 } } | 455 | { NULL, 0, 0, 0, 0, 0 } } |
453 | }, | 456 | }, |
454 | { 0x0018, "unknown", { | 457 | { 0x0018, NULL /* Unknown, need DMI string */, { |
455 | { "CPU Core", 0, 0, 10, 1, 0 }, | 458 | { "CPU Core", 0, 0, 10, 1, 0 }, |
456 | { "DDR2", 1, 0, 20, 1, 0 }, | 459 | { "DDR2", 1, 0, 20, 1, 0 }, |
457 | { "DDR2 VTT", 2, 0, 10, 1, 0 }, | 460 | { "DDR2 VTT", 2, 0, 10, 1, 0 }, |
@@ -478,7 +481,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
478 | { "AUX3 Fan", 36, 2, 60, 1, 0 }, | 481 | { "AUX3 Fan", 36, 2, 60, 1, 0 }, |
479 | { NULL, 0, 0, 0, 0, 0 } } | 482 | { NULL, 0, 0, 0, 0, 0 } } |
480 | }, | 483 | }, |
481 | { 0x0019, "unknown", { | 484 | { 0x0019, NULL /* Unknown, need DMI string */, { |
482 | { "CPU Core", 7, 0, 10, 1, 0 }, | 485 | { "CPU Core", 7, 0, 10, 1, 0 }, |
483 | { "DDR2", 13, 0, 20, 1, 0 }, | 486 | { "DDR2", 13, 0, 20, 1, 0 }, |
484 | { "DDR2 VTT", 14, 0, 10, 1, 0 }, | 487 | { "DDR2 VTT", 14, 0, 10, 1, 0 }, |
@@ -505,7 +508,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
505 | { "AUX3 FAN", 36, 2, 60, 1, 0 }, | 508 | { "AUX3 FAN", 36, 2, 60, 1, 0 }, |
506 | { NULL, 0, 0, 0, 0, 0 } } | 509 | { NULL, 0, 0, 0, 0, 0 } } |
507 | }, | 510 | }, |
508 | { 0x001A, "Abit IP35 Pro", { | 511 | { 0x001A, "IP35 Pro(Intel P35-ICH9R)", { |
509 | { "CPU Core", 0, 0, 10, 1, 0 }, | 512 | { "CPU Core", 0, 0, 10, 1, 0 }, |
510 | { "DDR2", 1, 0, 20, 1, 0 }, | 513 | { "DDR2", 1, 0, 20, 1, 0 }, |
511 | { "DDR2 VTT", 2, 0, 10, 1, 0 }, | 514 | { "DDR2 VTT", 2, 0, 10, 1, 0 }, |
@@ -533,7 +536,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
533 | { "AUX4 Fan", 37, 2, 60, 1, 0 }, | 536 | { "AUX4 Fan", 37, 2, 60, 1, 0 }, |
534 | { NULL, 0, 0, 0, 0, 0 } } | 537 | { NULL, 0, 0, 0, 0, 0 } } |
535 | }, | 538 | }, |
536 | { 0x001B, "unknown", { | 539 | { 0x001B, NULL /* Unknown, need DMI string */, { |
537 | { "CPU Core", 0, 0, 10, 1, 0 }, | 540 | { "CPU Core", 0, 0, 10, 1, 0 }, |
538 | { "DDR3", 1, 0, 20, 1, 0 }, | 541 | { "DDR3", 1, 0, 20, 1, 0 }, |
539 | { "DDR3 VTT", 2, 0, 10, 1, 0 }, | 542 | { "DDR3 VTT", 2, 0, 10, 1, 0 }, |
@@ -560,7 +563,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { | |||
560 | { "AUX3 Fan", 36, 2, 60, 1, 0 }, | 563 | { "AUX3 Fan", 36, 2, 60, 1, 0 }, |
561 | { NULL, 0, 0, 0, 0, 0 } } | 564 | { NULL, 0, 0, 0, 0, 0 } } |
562 | }, | 565 | }, |
563 | { 0x001C, "unknown", { | 566 | { 0x001C, NULL /* Unknown, need DMI string */, { |
564 | { "CPU Core", 0, 0, 10, 1, 0 }, | 567 | { "CPU Core", 0, 0, 10, 1, 0 }, |
565 | { "DDR2", 1, 0, 20, 1, 0 }, | 568 | { "DDR2", 1, 0, 20, 1, 0 }, |
566 | { "DDR2 VTT", 2, 0, 10, 1, 0 }, | 569 | { "DDR2 VTT", 2, 0, 10, 1, 0 }, |
@@ -935,9 +938,18 @@ static int __devinit abituguru3_probe(struct platform_device *pdev) | |||
935 | goto abituguru3_probe_error; | 938 | goto abituguru3_probe_error; |
936 | } | 939 | } |
937 | data->sensors = abituguru3_motherboards[i].sensors; | 940 | data->sensors = abituguru3_motherboards[i].sensors; |
941 | |||
938 | printk(KERN_INFO ABIT_UGURU3_NAME ": found Abit uGuru3, motherboard " | 942 | printk(KERN_INFO ABIT_UGURU3_NAME ": found Abit uGuru3, motherboard " |
939 | "ID: %04X (%s)\n", (unsigned int)id, | 943 | "ID: %04X\n", (unsigned int)id); |
940 | abituguru3_motherboards[i].name); | 944 | |
945 | #ifdef CONFIG_DMI | ||
946 | if (!abituguru3_motherboards[i].dmi_name) { | ||
947 | printk(KERN_WARNING ABIT_UGURU3_NAME ": this motherboard was " | ||
948 | "not detected using DMI. Please send the output of " | ||
949 | "\"dmidecode\" to the abituguru3 maintainer" | ||
950 | "(see MAINTAINERS)\n"); | ||
951 | } | ||
952 | #endif | ||
941 | 953 | ||
942 | /* Fill the sysfs attr array */ | 954 | /* Fill the sysfs attr array */ |
943 | sysfs_attr_i = 0; | 955 | sysfs_attr_i = 0; |
@@ -1109,6 +1121,46 @@ static struct platform_driver abituguru3_driver = { | |||
1109 | .resume = abituguru3_resume | 1121 | .resume = abituguru3_resume |
1110 | }; | 1122 | }; |
1111 | 1123 | ||
1124 | #ifdef CONFIG_DMI | ||
1125 | |||
1126 | static int __init abituguru3_dmi_detect(void) | ||
1127 | { | ||
1128 | const char *board_vendor, *board_name; | ||
1129 | int i, err = (force) ? 1 : -ENODEV; | ||
1130 | |||
1131 | board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR); | ||
1132 | if (!board_vendor || strcmp(board_vendor, "http://www.abit.com.tw/")) | ||
1133 | return err; | ||
1134 | |||
1135 | board_name = dmi_get_system_info(DMI_BOARD_NAME); | ||
1136 | if (!board_name) | ||
1137 | return err; | ||
1138 | |||
1139 | for (i = 0; abituguru3_motherboards[i].id; i++) { | ||
1140 | const char *dmi_name = abituguru3_motherboards[i].dmi_name; | ||
1141 | if (dmi_name && !strcmp(dmi_name, board_name)) | ||
1142 | break; | ||
1143 | } | ||
1144 | |||
1145 | if (!abituguru3_motherboards[i].id) | ||
1146 | return 1; | ||
1147 | |||
1148 | return 0; | ||
1149 | } | ||
1150 | |||
1151 | #else /* !CONFIG_DMI */ | ||
1152 | |||
1153 | static inline int abituguru3_dmi_detect(void) | ||
1154 | { | ||
1155 | return -ENODEV; | ||
1156 | } | ||
1157 | |||
1158 | #endif /* CONFIG_DMI */ | ||
1159 | |||
1160 | /* FIXME: Manual detection should die eventually; we need to collect stable | ||
1161 | * DMI model names first before we can rely entirely on CONFIG_DMI. | ||
1162 | */ | ||
1163 | |||
1112 | static int __init abituguru3_detect(void) | 1164 | static int __init abituguru3_detect(void) |
1113 | { | 1165 | { |
1114 | /* See if there is an uguru3 there. An idle uGuru3 will hold 0x00 or | 1166 | /* See if there is an uguru3 there. An idle uGuru3 will hold 0x00 or |
@@ -1119,7 +1171,7 @@ static int __init abituguru3_detect(void) | |||
1119 | if (((data_val == 0x00) || (data_val == 0x08)) && | 1171 | if (((data_val == 0x00) || (data_val == 0x08)) && |
1120 | ((cmd_val == 0xAC) || (cmd_val == 0x05) || | 1172 | ((cmd_val == 0xAC) || (cmd_val == 0x05) || |
1121 | (cmd_val == 0x55))) | 1173 | (cmd_val == 0x55))) |
1122 | return ABIT_UGURU3_BASE; | 1174 | return 0; |
1123 | 1175 | ||
1124 | ABIT_UGURU3_DEBUG("no Abit uGuru3 found, data = 0x%02X, cmd = " | 1176 | ABIT_UGURU3_DEBUG("no Abit uGuru3 found, data = 0x%02X, cmd = " |
1125 | "0x%02X\n", (unsigned int)data_val, (unsigned int)cmd_val); | 1177 | "0x%02X\n", (unsigned int)data_val, (unsigned int)cmd_val); |
@@ -1127,7 +1179,7 @@ static int __init abituguru3_detect(void) | |||
1127 | if (force) { | 1179 | if (force) { |
1128 | printk(KERN_INFO ABIT_UGURU3_NAME ": Assuming Abit uGuru3 is " | 1180 | printk(KERN_INFO ABIT_UGURU3_NAME ": Assuming Abit uGuru3 is " |
1129 | "present because of \"force\" parameter\n"); | 1181 | "present because of \"force\" parameter\n"); |
1130 | return ABIT_UGURU3_BASE; | 1182 | return 0; |
1131 | } | 1183 | } |
1132 | 1184 | ||
1133 | /* No uGuru3 found */ | 1185 | /* No uGuru3 found */ |
@@ -1138,27 +1190,29 @@ static struct platform_device *abituguru3_pdev; | |||
1138 | 1190 | ||
1139 | static int __init abituguru3_init(void) | 1191 | static int __init abituguru3_init(void) |
1140 | { | 1192 | { |
1141 | int address, err; | ||
1142 | struct resource res = { .flags = IORESOURCE_IO }; | 1193 | struct resource res = { .flags = IORESOURCE_IO }; |
1143 | 1194 | int err; | |
1144 | #ifdef CONFIG_DMI | 1195 | |
1145 | const char *board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR); | 1196 | /* Attempt DMI detection first */ |
1146 | 1197 | err = abituguru3_dmi_detect(); | |
1147 | /* safety check, refuse to load on non Abit motherboards */ | 1198 | if (err < 0) |
1148 | if (!force && (!board_vendor || | 1199 | return err; |
1149 | strcmp(board_vendor, "http://www.abit.com.tw/"))) | 1200 | |
1150 | return -ENODEV; | 1201 | /* Fall back to manual detection if there was no exact |
1151 | #endif | 1202 | * board name match, or force was specified. |
1152 | 1203 | */ | |
1153 | address = abituguru3_detect(); | 1204 | if (err > 0) { |
1154 | if (address < 0) | 1205 | err = abituguru3_detect(); |
1155 | return address; | 1206 | if (err) |
1207 | return err; | ||
1208 | } | ||
1156 | 1209 | ||
1157 | err = platform_driver_register(&abituguru3_driver); | 1210 | err = platform_driver_register(&abituguru3_driver); |
1158 | if (err) | 1211 | if (err) |
1159 | goto exit; | 1212 | goto exit; |
1160 | 1213 | ||
1161 | abituguru3_pdev = platform_device_alloc(ABIT_UGURU3_NAME, address); | 1214 | abituguru3_pdev = platform_device_alloc(ABIT_UGURU3_NAME, |
1215 | ABIT_UGURU3_BASE); | ||
1162 | if (!abituguru3_pdev) { | 1216 | if (!abituguru3_pdev) { |
1163 | printk(KERN_ERR ABIT_UGURU3_NAME | 1217 | printk(KERN_ERR ABIT_UGURU3_NAME |
1164 | ": Device allocation failed\n"); | 1218 | ": Device allocation failed\n"); |
@@ -1166,8 +1220,8 @@ static int __init abituguru3_init(void) | |||
1166 | goto exit_driver_unregister; | 1220 | goto exit_driver_unregister; |
1167 | } | 1221 | } |
1168 | 1222 | ||
1169 | res.start = address; | 1223 | res.start = ABIT_UGURU3_BASE; |
1170 | res.end = address + ABIT_UGURU3_REGION_LENGTH - 1; | 1224 | res.end = ABIT_UGURU3_BASE + ABIT_UGURU3_REGION_LENGTH - 1; |
1171 | res.name = ABIT_UGURU3_NAME; | 1225 | res.name = ABIT_UGURU3_NAME; |
1172 | 1226 | ||
1173 | err = platform_device_add_resources(abituguru3_pdev, &res, 1); | 1227 | err = platform_device_add_resources(abituguru3_pdev, &res, 1); |
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c new file mode 100644 index 000000000000..242294db3db6 --- /dev/null +++ b/drivers/hwmon/adcxx.c | |||
@@ -0,0 +1,329 @@ | |||
1 | /* | ||
2 | * adcxx.c | ||
3 | * | ||
4 | * The adcxx4s is an AD converter family from National Semiconductor (NS). | ||
5 | * | ||
6 | * Copyright (c) 2008 Marc Pignat <marc.pignat@hevs.ch> | ||
7 | * | ||
8 | * The adcxx4s communicates with a host processor via an SPI/Microwire Bus | ||
9 | * interface. This driver supports the whole family of devices with name | ||
10 | * ADC<bb><c>S<sss>, where | ||
11 | * * bb is the resolution in number of bits (8, 10, 12) | ||
12 | * * c is the number of channels (1, 2, 4, 8) | ||
13 | * * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500 kSPS | ||
14 | * and 101 for 1 MSPS) | ||
15 | * | ||
16 | * Complete datasheets are available at National's website here: | ||
17 | * http://www.national.com/ds/DC/ADC<bb><c>S<sss>.pdf | ||
18 | * | ||
19 | * Handling of 8, 10 and 12 bits converters are the same, the | ||
20 | * unavailable bits are 0 :) | ||
21 | * | ||
22 | * This program is free software; you can redistribute it and/or modify | ||
23 | * it under the terms of the GNU General Public License as published by | ||
24 | * the Free Software Foundation; either version 2 of the License, or | ||
25 | * (at your option) any later version. | ||
26 | * | ||
27 | * This program is distributed in the hope that it will be useful, | ||
28 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
29 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
30 | * GNU General Public License for more details. | ||
31 | * | ||
32 | * You should have received a copy of the GNU General Public License | ||
33 | * along with this program; if not, write to the Free Software | ||
34 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
35 | */ | ||
36 | |||
37 | #include <linux/init.h> | ||
38 | #include <linux/module.h> | ||
39 | #include <linux/kernel.h> | ||
40 | #include <linux/device.h> | ||
41 | #include <linux/err.h> | ||
42 | #include <linux/sysfs.h> | ||
43 | #include <linux/hwmon.h> | ||
44 | #include <linux/hwmon-sysfs.h> | ||
45 | #include <linux/mutex.h> | ||
46 | #include <linux/spi/spi.h> | ||
47 | |||
48 | #define DRVNAME "adcxx" | ||
49 | |||
50 | struct adcxx { | ||
51 | struct device *hwmon_dev; | ||
52 | struct mutex lock; | ||
53 | u32 channels; | ||
54 | u32 reference; /* in millivolts */ | ||
55 | }; | ||
56 | |||
57 | /* sysfs hook function */ | ||
58 | static ssize_t adcxx_read(struct device *dev, | ||
59 | struct device_attribute *devattr, char *buf) | ||
60 | { | ||
61 | struct spi_device *spi = to_spi_device(dev); | ||
62 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
63 | struct adcxx *adc = dev_get_drvdata(&spi->dev); | ||
64 | u8 tx_buf[2] = { attr->index << 3 }; /* other bits are don't care */ | ||
65 | u8 rx_buf[2]; | ||
66 | int status; | ||
67 | int value; | ||
68 | |||
69 | if (mutex_lock_interruptible(&adc->lock)) | ||
70 | return -ERESTARTSYS; | ||
71 | |||
72 | status = spi_write_then_read(spi, tx_buf, sizeof(tx_buf), | ||
73 | rx_buf, sizeof(rx_buf)); | ||
74 | if (status < 0) { | ||
75 | dev_warn(dev, "spi_write_then_read failed with status %d\n", | ||
76 | status); | ||
77 | goto out; | ||
78 | } | ||
79 | |||
80 | value = (rx_buf[0] << 8) + rx_buf[1]; | ||
81 | dev_dbg(dev, "raw value = 0x%x\n", value); | ||
82 | |||
83 | value = value * adc->reference >> 12; | ||
84 | status = sprintf(buf, "%d\n", value); | ||
85 | out: | ||
86 | mutex_unlock(&adc->lock); | ||
87 | return status; | ||
88 | } | ||
89 | |||
90 | static ssize_t adcxx_show_min(struct device *dev, | ||
91 | struct device_attribute *devattr, char *buf) | ||
92 | { | ||
93 | /* The minimum reference is 0 for this chip family */ | ||
94 | return sprintf(buf, "0\n"); | ||
95 | } | ||
96 | |||
97 | static ssize_t adcxx_show_max(struct device *dev, | ||
98 | struct device_attribute *devattr, char *buf) | ||
99 | { | ||
100 | struct spi_device *spi = to_spi_device(dev); | ||
101 | struct adcxx *adc = dev_get_drvdata(&spi->dev); | ||
102 | u32 reference; | ||
103 | |||
104 | if (mutex_lock_interruptible(&adc->lock)) | ||
105 | return -ERESTARTSYS; | ||
106 | |||
107 | reference = adc->reference; | ||
108 | |||
109 | mutex_unlock(&adc->lock); | ||
110 | |||
111 | return sprintf(buf, "%d\n", reference); | ||
112 | } | ||
113 | |||
114 | static ssize_t adcxx_set_max(struct device *dev, | ||
115 | struct device_attribute *devattr, const char *buf, size_t count) | ||
116 | { | ||
117 | struct spi_device *spi = to_spi_device(dev); | ||
118 | struct adcxx *adc = dev_get_drvdata(&spi->dev); | ||
119 | unsigned long value; | ||
120 | |||
121 | if (strict_strtoul(buf, 10, &value)) | ||
122 | return -EINVAL; | ||
123 | |||
124 | if (mutex_lock_interruptible(&adc->lock)) | ||
125 | return -ERESTARTSYS; | ||
126 | |||
127 | adc->reference = value; | ||
128 | |||
129 | mutex_unlock(&adc->lock); | ||
130 | |||
131 | return count; | ||
132 | } | ||
133 | |||
134 | static ssize_t adcxx_show_name(struct device *dev, struct device_attribute | ||
135 | *devattr, char *buf) | ||
136 | { | ||
137 | struct spi_device *spi = to_spi_device(dev); | ||
138 | struct adcxx *adc = dev_get_drvdata(&spi->dev); | ||
139 | |||
140 | return sprintf(buf, "adcxx%ds\n", adc->channels); | ||
141 | } | ||
142 | |||
143 | static struct sensor_device_attribute ad_input[] = { | ||
144 | SENSOR_ATTR(name, S_IRUGO, adcxx_show_name, NULL, 0), | ||
145 | SENSOR_ATTR(in_min, S_IRUGO, adcxx_show_min, NULL, 0), | ||
146 | SENSOR_ATTR(in_max, S_IWUSR | S_IRUGO, adcxx_show_max, | ||
147 | adcxx_set_max, 0), | ||
148 | SENSOR_ATTR(in0_input, S_IRUGO, adcxx_read, NULL, 0), | ||
149 | SENSOR_ATTR(in1_input, S_IRUGO, adcxx_read, NULL, 1), | ||
150 | SENSOR_ATTR(in2_input, S_IRUGO, adcxx_read, NULL, 2), | ||
151 | SENSOR_ATTR(in3_input, S_IRUGO, adcxx_read, NULL, 3), | ||
152 | SENSOR_ATTR(in4_input, S_IRUGO, adcxx_read, NULL, 4), | ||
153 | SENSOR_ATTR(in5_input, S_IRUGO, adcxx_read, NULL, 5), | ||
154 | SENSOR_ATTR(in6_input, S_IRUGO, adcxx_read, NULL, 6), | ||
155 | SENSOR_ATTR(in7_input, S_IRUGO, adcxx_read, NULL, 7), | ||
156 | }; | ||
157 | |||
158 | /*----------------------------------------------------------------------*/ | ||
159 | |||
160 | static int __devinit adcxx_probe(struct spi_device *spi, int channels) | ||
161 | { | ||
162 | struct adcxx *adc; | ||
163 | int status; | ||
164 | int i; | ||
165 | |||
166 | adc = kzalloc(sizeof *adc, GFP_KERNEL); | ||
167 | if (!adc) | ||
168 | return -ENOMEM; | ||
169 | |||
170 | /* set a default value for the reference */ | ||
171 | adc->reference = 3300; | ||
172 | adc->channels = channels; | ||
173 | mutex_init(&adc->lock); | ||
174 | |||
175 | mutex_lock(&adc->lock); | ||
176 | |||
177 | dev_set_drvdata(&spi->dev, adc); | ||
178 | |||
179 | for (i = 0; i < 3 + adc->channels; i++) { | ||
180 | status = device_create_file(&spi->dev, &ad_input[i].dev_attr); | ||
181 | if (status) { | ||
182 | dev_err(&spi->dev, "device_create_file failed.\n"); | ||
183 | goto out_err; | ||
184 | } | ||
185 | } | ||
186 | |||
187 | adc->hwmon_dev = hwmon_device_register(&spi->dev); | ||
188 | if (IS_ERR(adc->hwmon_dev)) { | ||
189 | dev_err(&spi->dev, "hwmon_device_register failed.\n"); | ||
190 | status = PTR_ERR(adc->hwmon_dev); | ||
191 | goto out_err; | ||
192 | } | ||
193 | |||
194 | mutex_unlock(&adc->lock); | ||
195 | return 0; | ||
196 | |||
197 | out_err: | ||
198 | for (i--; i >= 0; i--) | ||
199 | device_remove_file(&spi->dev, &ad_input[i].dev_attr); | ||
200 | |||
201 | dev_set_drvdata(&spi->dev, NULL); | ||
202 | mutex_unlock(&adc->lock); | ||
203 | kfree(adc); | ||
204 | return status; | ||
205 | } | ||
206 | |||
207 | static int __devinit adcxx1s_probe(struct spi_device *spi) | ||
208 | { | ||
209 | return adcxx_probe(spi, 1); | ||
210 | } | ||
211 | |||
212 | static int __devinit adcxx2s_probe(struct spi_device *spi) | ||
213 | { | ||
214 | return adcxx_probe(spi, 2); | ||
215 | } | ||
216 | |||
217 | static int __devinit adcxx4s_probe(struct spi_device *spi) | ||
218 | { | ||
219 | return adcxx_probe(spi, 4); | ||
220 | } | ||
221 | |||
222 | static int __devinit adcxx8s_probe(struct spi_device *spi) | ||
223 | { | ||
224 | return adcxx_probe(spi, 8); | ||
225 | } | ||
226 | |||
227 | static int __devexit adcxx_remove(struct spi_device *spi) | ||
228 | { | ||
229 | struct adcxx *adc = dev_get_drvdata(&spi->dev); | ||
230 | int i; | ||
231 | |||
232 | mutex_lock(&adc->lock); | ||
233 | hwmon_device_unregister(adc->hwmon_dev); | ||
234 | for (i = 0; i < 3 + adc->channels; i++) | ||
235 | device_remove_file(&spi->dev, &ad_input[i].dev_attr); | ||
236 | |||
237 | dev_set_drvdata(&spi->dev, NULL); | ||
238 | mutex_unlock(&adc->lock); | ||
239 | kfree(adc); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static struct spi_driver adcxx1s_driver = { | ||
245 | .driver = { | ||
246 | .name = "adcxx1s", | ||
247 | .owner = THIS_MODULE, | ||
248 | }, | ||
249 | .probe = adcxx1s_probe, | ||
250 | .remove = __devexit_p(adcxx_remove), | ||
251 | }; | ||
252 | |||
253 | static struct spi_driver adcxx2s_driver = { | ||
254 | .driver = { | ||
255 | .name = "adcxx2s", | ||
256 | .owner = THIS_MODULE, | ||
257 | }, | ||
258 | .probe = adcxx2s_probe, | ||
259 | .remove = __devexit_p(adcxx_remove), | ||
260 | }; | ||
261 | |||
262 | static struct spi_driver adcxx4s_driver = { | ||
263 | .driver = { | ||
264 | .name = "adcxx4s", | ||
265 | .owner = THIS_MODULE, | ||
266 | }, | ||
267 | .probe = adcxx4s_probe, | ||
268 | .remove = __devexit_p(adcxx_remove), | ||
269 | }; | ||
270 | |||
271 | static struct spi_driver adcxx8s_driver = { | ||
272 | .driver = { | ||
273 | .name = "adcxx8s", | ||
274 | .owner = THIS_MODULE, | ||
275 | }, | ||
276 | .probe = adcxx8s_probe, | ||
277 | .remove = __devexit_p(adcxx_remove), | ||
278 | }; | ||
279 | |||
280 | static int __init init_adcxx(void) | ||
281 | { | ||
282 | int status; | ||
283 | status = spi_register_driver(&adcxx1s_driver); | ||
284 | if (status) | ||
285 | goto reg_1_failed; | ||
286 | |||
287 | status = spi_register_driver(&adcxx2s_driver); | ||
288 | if (status) | ||
289 | goto reg_2_failed; | ||
290 | |||
291 | status = spi_register_driver(&adcxx4s_driver); | ||
292 | if (status) | ||
293 | goto reg_4_failed; | ||
294 | |||
295 | status = spi_register_driver(&adcxx8s_driver); | ||
296 | if (status) | ||
297 | goto reg_8_failed; | ||
298 | |||
299 | return status; | ||
300 | |||
301 | reg_8_failed: | ||
302 | spi_unregister_driver(&adcxx4s_driver); | ||
303 | reg_4_failed: | ||
304 | spi_unregister_driver(&adcxx2s_driver); | ||
305 | reg_2_failed: | ||
306 | spi_unregister_driver(&adcxx1s_driver); | ||
307 | reg_1_failed: | ||
308 | return status; | ||
309 | } | ||
310 | |||
311 | static void __exit exit_adcxx(void) | ||
312 | { | ||
313 | spi_unregister_driver(&adcxx1s_driver); | ||
314 | spi_unregister_driver(&adcxx2s_driver); | ||
315 | spi_unregister_driver(&adcxx4s_driver); | ||
316 | spi_unregister_driver(&adcxx8s_driver); | ||
317 | } | ||
318 | |||
319 | module_init(init_adcxx); | ||
320 | module_exit(exit_adcxx); | ||
321 | |||
322 | MODULE_AUTHOR("Marc Pignat"); | ||
323 | MODULE_DESCRIPTION("National Semiconductor adcxx8sxxx Linux driver"); | ||
324 | MODULE_LICENSE("GPL"); | ||
325 | |||
326 | MODULE_ALIAS("adcxx1s"); | ||
327 | MODULE_ALIAS("adcxx2s"); | ||
328 | MODULE_ALIAS("adcxx4s"); | ||
329 | MODULE_ALIAS("adcxx8s"); | ||
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index aacc0c4b809c..b06b8e090a27 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c | |||
@@ -98,6 +98,12 @@ static const char* temperature_sensors_sets[][36] = { | |||
98 | "TH1P", "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S", | 98 | "TH1P", "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S", |
99 | "TM1P", "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", | 99 | "TM1P", "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", |
100 | "TM9S", "TN0H", "TS0C", NULL }, | 100 | "TM9S", "TN0H", "TS0C", NULL }, |
101 | /* Set 5: iMac */ | ||
102 | { "TC0D", "TA0P", "TG0P", "TG0D", "TG0H", "TH0P", "Tm0P", "TO0P", | ||
103 | "Tp0C", NULL }, | ||
104 | /* Set 6: Macbook3 set */ | ||
105 | { "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TTF0", "TW0P", "Th0H", | ||
106 | "Th0S", "Th1H", NULL }, | ||
101 | }; | 107 | }; |
102 | 108 | ||
103 | /* List of keys used to read/write fan speeds */ | 109 | /* List of keys used to read/write fan speeds */ |
@@ -1223,6 +1229,10 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = { | |||
1223 | { .accelerometer = 0, .light = 0, .temperature_set = 3 }, | 1229 | { .accelerometer = 0, .light = 0, .temperature_set = 3 }, |
1224 | /* MacPro: temperature set 4 */ | 1230 | /* MacPro: temperature set 4 */ |
1225 | { .accelerometer = 0, .light = 0, .temperature_set = 4 }, | 1231 | { .accelerometer = 0, .light = 0, .temperature_set = 4 }, |
1232 | /* iMac: temperature set 5 */ | ||
1233 | { .accelerometer = 0, .light = 0, .temperature_set = 5 }, | ||
1234 | /* MacBook3: accelerometer and temperature set 6 */ | ||
1235 | { .accelerometer = 1, .light = 0, .temperature_set = 6 }, | ||
1226 | }; | 1236 | }; |
1227 | 1237 | ||
1228 | /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". | 1238 | /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". |
@@ -1232,10 +1242,14 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { | |||
1232 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), | 1242 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), |
1233 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") }, | 1243 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") }, |
1234 | (void*)&applesmc_dmi_data[0]}, | 1244 | (void*)&applesmc_dmi_data[0]}, |
1235 | { applesmc_dmi_match, "Apple MacBook", { | 1245 | { applesmc_dmi_match, "Apple MacBook (v2)", { |
1236 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), | 1246 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), |
1237 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBook2") }, | 1247 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBook2") }, |
1238 | (void*)&applesmc_dmi_data[1]}, | 1248 | (void*)&applesmc_dmi_data[1]}, |
1249 | { applesmc_dmi_match, "Apple MacBook (v3)", { | ||
1250 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), | ||
1251 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBook3") }, | ||
1252 | (void*)&applesmc_dmi_data[6]}, | ||
1239 | { applesmc_dmi_match, "Apple MacBook", { | 1253 | { applesmc_dmi_match, "Apple MacBook", { |
1240 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), | 1254 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), |
1241 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") }, | 1255 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") }, |
@@ -1248,6 +1262,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { | |||
1248 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), | 1262 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), |
1249 | DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") }, | 1263 | DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") }, |
1250 | (void*)&applesmc_dmi_data[4]}, | 1264 | (void*)&applesmc_dmi_data[4]}, |
1265 | { applesmc_dmi_match, "Apple iMac", { | ||
1266 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), | ||
1267 | DMI_MATCH(DMI_PRODUCT_NAME,"iMac") }, | ||
1268 | (void*)&applesmc_dmi_data[5]}, | ||
1251 | { .ident = NULL } | 1269 | { .ident = NULL } |
1252 | }; | 1270 | }; |
1253 | 1271 | ||
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 70239acecc8e..93c17223b527 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -413,10 +413,11 @@ static int __init coretemp_init(void) | |||
413 | for_each_online_cpu(i) { | 413 | for_each_online_cpu(i) { |
414 | struct cpuinfo_x86 *c = &cpu_data(i); | 414 | struct cpuinfo_x86 *c = &cpu_data(i); |
415 | 415 | ||
416 | /* check if family 6, models 0xe, 0xf, 0x16, 0x17 */ | 416 | /* check if family 6, models 0xe, 0xf, 0x16, 0x17, 0x1A */ |
417 | if ((c->cpuid_level < 0) || (c->x86 != 0x6) || | 417 | if ((c->cpuid_level < 0) || (c->x86 != 0x6) || |
418 | !((c->x86_model == 0xe) || (c->x86_model == 0xf) || | 418 | !((c->x86_model == 0xe) || (c->x86_model == 0xf) || |
419 | (c->x86_model == 0x16) || (c->x86_model == 0x17))) { | 419 | (c->x86_model == 0x16) || (c->x86_model == 0x17) || |
420 | (c->x86_model == 0x1A))) { | ||
420 | 421 | ||
421 | /* supported CPU not found, but report the unknown | 422 | /* supported CPU not found, but report the unknown |
422 | family 6 CPU */ | 423 | family 6 CPU */ |
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c index 7b0a32c4dcfb..c54eff92be4a 100644 --- a/drivers/hwmon/hwmon-vid.c +++ b/drivers/hwmon/hwmon-vid.c | |||
@@ -37,13 +37,21 @@ | |||
37 | * For VRD 10.0 and up, "VRD x.y Design Guide", | 37 | * For VRD 10.0 and up, "VRD x.y Design Guide", |
38 | * available at http://developer.intel.com/. | 38 | * available at http://developer.intel.com/. |
39 | * | 39 | * |
40 | * AMD NPT 0Fh (Athlon64 & Opteron), AMD Publication 32559, | 40 | * AMD Athlon 64 and AMD Opteron Processors, AMD Publication 26094, |
41 | * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/26094.PDF | ||
42 | * Table 74. VID Code Voltages | ||
43 | * This corresponds to an arbitrary VRM code of 24 in the functions below. | ||
44 | * These CPU models (K8 revision <= E) have 5 VID pins. See also: | ||
45 | * Revision Guide for AMD Athlon 64 and AMD Opteron Processors, AMD Publication 25759, | ||
46 | * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/25759.pdf | ||
47 | * | ||
48 | * AMD NPT Family 0Fh Processors, AMD Publication 32559, | ||
41 | * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/32559.pdf | 49 | * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/32559.pdf |
42 | * Table 71. VID Code Voltages | 50 | * Table 71. VID Code Voltages |
43 | * AMD Opteron processors don't follow the Intel specifications. | 51 | * This corresponds to an arbitrary VRM code of 25 in the functions below. |
44 | * I'm going to "make up" 2.4 as the spec number for the Opterons. | 52 | * These CPU models (K8 revision >= F) have 6 VID pins. See also: |
45 | * No good reason just a mnemonic for the 24x Opteron processor | 53 | * Revision Guide for AMD NPT Family 0Fh Processors, AMD Publication 33610, |
46 | * series. | 54 | * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf |
47 | * | 55 | * |
48 | * The 17 specification is in fact Intel Mobile Voltage Positioning - | 56 | * The 17 specification is in fact Intel Mobile Voltage Positioning - |
49 | * (IMVP-II). You can find more information in the datasheet of Max1718 | 57 | * (IMVP-II). You can find more information in the datasheet of Max1718 |
@@ -95,7 +103,12 @@ int vid_from_reg(int val, u8 vrm) | |||
95 | return 0; | 103 | return 0; |
96 | return((1600000 - (val - 2) * 6250 + 500) / 1000); | 104 | return((1600000 - (val - 2) * 6250 + 500) / 1000); |
97 | 105 | ||
98 | case 24: /* AMD NPT 0Fh (Athlon64 & Opteron) */ | 106 | case 24: /* Athlon64 & Opteron */ |
107 | val &= 0x1f; | ||
108 | if (val == 0x1f) | ||
109 | return 0; | ||
110 | /* fall through */ | ||
111 | case 25: /* AMD NPT 0Fh */ | ||
99 | val &= 0x3f; | 112 | val &= 0x3f; |
100 | return (val < 32) ? 1550 - 25 * val | 113 | return (val < 32) ? 1550 - 25 * val |
101 | : 775 - (25 * (val - 31)) / 2; | 114 | : 775 - (25 * (val - 31)) / 2; |
@@ -157,11 +170,16 @@ struct vrm_model { | |||
157 | 170 | ||
158 | #ifdef CONFIG_X86 | 171 | #ifdef CONFIG_X86 |
159 | 172 | ||
160 | /* the stepping parameter is highest acceptable stepping for current line */ | 173 | /* |
174 | * The stepping parameter is highest acceptable stepping for current line. | ||
175 | * The model match must be exact for 4-bit values. For model values 0x10 | ||
176 | * and above (extended model), all models below the parameter will match. | ||
177 | */ | ||
161 | 178 | ||
162 | static struct vrm_model vrm_models[] = { | 179 | static struct vrm_model vrm_models[] = { |
163 | {X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */ | 180 | {X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */ |
164 | {X86_VENDOR_AMD, 0xF, ANY, ANY, 24}, /* Athlon 64, Opteron and above VRM 24 */ | 181 | {X86_VENDOR_AMD, 0xF, 0x3F, ANY, 24}, /* Athlon 64, Opteron */ |
182 | {X86_VENDOR_AMD, 0xF, ANY, ANY, 25}, /* NPT family 0Fh */ | ||
165 | {X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */ | 183 | {X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */ |
166 | {X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */ | 184 | {X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */ |
167 | {X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */ | 185 | {X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */ |
@@ -189,6 +207,8 @@ static u8 find_vrm(u8 eff_family, u8 eff_model, u8 eff_stepping, u8 vendor) | |||
189 | if (vrm_models[i].vendor==vendor) | 207 | if (vrm_models[i].vendor==vendor) |
190 | if ((vrm_models[i].eff_family==eff_family) | 208 | if ((vrm_models[i].eff_family==eff_family) |
191 | && ((vrm_models[i].eff_model==eff_model) || | 209 | && ((vrm_models[i].eff_model==eff_model) || |
210 | (vrm_models[i].eff_model >= 0x10 && | ||
211 | eff_model <= vrm_models[i].eff_model) || | ||
192 | (vrm_models[i].eff_model==ANY)) && | 212 | (vrm_models[i].eff_model==ANY)) && |
193 | (eff_stepping <= vrm_models[i].eff_stepping)) | 213 | (eff_stepping <= vrm_models[i].eff_stepping)) |
194 | return vrm_models[i].vrm_type; | 214 | return vrm_models[i].vrm_type; |
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c index f9e2ed621f7b..2ede9388096b 100644 --- a/drivers/hwmon/i5k_amb.c +++ b/drivers/hwmon/i5k_amb.c | |||
@@ -81,6 +81,8 @@ static unsigned long amb_reg_temp(unsigned int amb) | |||
81 | #define MAX_AMBS_PER_CHANNEL 16 | 81 | #define MAX_AMBS_PER_CHANNEL 16 |
82 | #define MAX_AMBS (MAX_MEM_CHANNELS * \ | 82 | #define MAX_AMBS (MAX_MEM_CHANNELS * \ |
83 | MAX_AMBS_PER_CHANNEL) | 83 | MAX_AMBS_PER_CHANNEL) |
84 | #define CHANNEL_SHIFT 4 | ||
85 | #define DIMM_MASK 0xF | ||
84 | /* | 86 | /* |
85 | * Ugly hack: For some reason the highest bit is set if there | 87 | * Ugly hack: For some reason the highest bit is set if there |
86 | * are _any_ DIMMs in the channel. Attempting to read from | 88 | * are _any_ DIMMs in the channel. Attempting to read from |
@@ -89,7 +91,7 @@ static unsigned long amb_reg_temp(unsigned int amb) | |||
89 | * might prevent us from seeing the 16th DIMM in the channel. | 91 | * might prevent us from seeing the 16th DIMM in the channel. |
90 | */ | 92 | */ |
91 | #define REAL_MAX_AMBS_PER_CHANNEL 15 | 93 | #define REAL_MAX_AMBS_PER_CHANNEL 15 |
92 | #define KNOBS_PER_AMB 5 | 94 | #define KNOBS_PER_AMB 6 |
93 | 95 | ||
94 | static unsigned long amb_num_from_reg(unsigned int byte_num, unsigned int bit) | 96 | static unsigned long amb_num_from_reg(unsigned int byte_num, unsigned int bit) |
95 | { | 97 | { |
@@ -238,6 +240,16 @@ static ssize_t show_amb_temp(struct device *dev, | |||
238 | 500 * amb_read_byte(data, amb_reg_temp(attr->index))); | 240 | 500 * amb_read_byte(data, amb_reg_temp(attr->index))); |
239 | } | 241 | } |
240 | 242 | ||
243 | static ssize_t show_label(struct device *dev, | ||
244 | struct device_attribute *devattr, | ||
245 | char *buf) | ||
246 | { | ||
247 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
248 | |||
249 | return sprintf(buf, "Ch. %d DIMM %d\n", attr->index >> CHANNEL_SHIFT, | ||
250 | attr->index & DIMM_MASK); | ||
251 | } | ||
252 | |||
241 | static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev) | 253 | static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev) |
242 | { | 254 | { |
243 | int i, j, k, d = 0; | 255 | int i, j, k, d = 0; |
@@ -268,6 +280,20 @@ static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev) | |||
268 | continue; | 280 | continue; |
269 | d++; | 281 | d++; |
270 | 282 | ||
283 | /* sysfs label */ | ||
284 | iattr = data->attrs + data->num_attrs; | ||
285 | snprintf(iattr->name, AMB_SYSFS_NAME_LEN, | ||
286 | "temp%d_label", d); | ||
287 | iattr->s_attr.dev_attr.attr.name = iattr->name; | ||
288 | iattr->s_attr.dev_attr.attr.mode = S_IRUGO; | ||
289 | iattr->s_attr.dev_attr.show = show_label; | ||
290 | iattr->s_attr.index = k; | ||
291 | res = device_create_file(&pdev->dev, | ||
292 | &iattr->s_attr.dev_attr); | ||
293 | if (res) | ||
294 | goto exit_remove; | ||
295 | data->num_attrs++; | ||
296 | |||
271 | /* Temperature sysfs knob */ | 297 | /* Temperature sysfs knob */ |
272 | iattr = data->attrs + data->num_attrs; | 298 | iattr = data->attrs + data->num_attrs; |
273 | snprintf(iattr->name, AMB_SYSFS_NAME_LEN, | 299 | snprintf(iattr->name, AMB_SYSFS_NAME_LEN, |
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index c9416e657487..0f70dc204105 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * A hwmon driver for the IBM Active Energy Manager temperature/power sensors | 2 | * A hwmon driver for the IBM System Director Active Energy Manager (AEM) |
3 | * and capping functionality. | 3 | * temperature/power/energy sensors and capping functionality. |
4 | * Copyright (C) 2008 IBM | 4 | * Copyright (C) 2008 IBM |
5 | * | 5 | * |
6 | * Author: Darrick J. Wong <djwong@us.ibm.com> | 6 | * Author: Darrick J. Wong <djwong@us.ibm.com> |
@@ -463,12 +463,18 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg, | |||
463 | } | 463 | } |
464 | 464 | ||
465 | /* Update AEM energy registers */ | 465 | /* Update AEM energy registers */ |
466 | static void update_aem_energy_one(struct aem_data *data, int which) | ||
467 | { | ||
468 | aem_read_sensor(data, AEM_ENERGY_ELEMENT, which, | ||
469 | &data->energy[which], 8); | ||
470 | } | ||
471 | |||
466 | static void update_aem_energy(struct aem_data *data) | 472 | static void update_aem_energy(struct aem_data *data) |
467 | { | 473 | { |
468 | aem_read_sensor(data, AEM_ENERGY_ELEMENT, 0, &data->energy[0], 8); | 474 | update_aem_energy_one(data, 0); |
469 | if (data->ver_major < 2) | 475 | if (data->ver_major < 2) |
470 | return; | 476 | return; |
471 | aem_read_sensor(data, AEM_ENERGY_ELEMENT, 1, &data->energy[1], 8); | 477 | update_aem_energy_one(data, 1); |
472 | } | 478 | } |
473 | 479 | ||
474 | /* Update all AEM1 sensors */ | 480 | /* Update all AEM1 sensors */ |
@@ -676,7 +682,8 @@ static int aem_find_aem2(struct aem_ipmi_data *data, | |||
676 | return -ETIMEDOUT; | 682 | return -ETIMEDOUT; |
677 | 683 | ||
678 | if (data->rx_result || data->rx_msg_len != sizeof(*fi_resp) || | 684 | if (data->rx_result || data->rx_msg_len != sizeof(*fi_resp) || |
679 | memcmp(&fi_resp->id, &system_x_id, sizeof(system_x_id))) | 685 | memcmp(&fi_resp->id, &system_x_id, sizeof(system_x_id)) || |
686 | fi_resp->num_instances <= instance_num) | ||
680 | return -ENOENT; | 687 | return -ENOENT; |
681 | 688 | ||
682 | return 0; | 689 | return 0; |
@@ -849,7 +856,7 @@ static ssize_t aem_show_power(struct device *dev, | |||
849 | struct timespec b, a; | 856 | struct timespec b, a; |
850 | 857 | ||
851 | mutex_lock(&data->lock); | 858 | mutex_lock(&data->lock); |
852 | update_aem_energy(data); | 859 | update_aem_energy_one(data, attr->index); |
853 | getnstimeofday(&b); | 860 | getnstimeofday(&b); |
854 | before = data->energy[attr->index]; | 861 | before = data->energy[attr->index]; |
855 | 862 | ||
@@ -861,7 +868,7 @@ static ssize_t aem_show_power(struct device *dev, | |||
861 | return 0; | 868 | return 0; |
862 | } | 869 | } |
863 | 870 | ||
864 | update_aem_energy(data); | 871 | update_aem_energy_one(data, attr->index); |
865 | getnstimeofday(&a); | 872 | getnstimeofday(&a); |
866 | after = data->energy[attr->index]; | 873 | after = data->energy[attr->index]; |
867 | mutex_unlock(&data->lock); | 874 | mutex_unlock(&data->lock); |
@@ -880,7 +887,9 @@ static ssize_t aem_show_energy(struct device *dev, | |||
880 | { | 887 | { |
881 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | 888 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); |
882 | struct aem_data *a = dev_get_drvdata(dev); | 889 | struct aem_data *a = dev_get_drvdata(dev); |
883 | a->update(a); | 890 | mutex_lock(&a->lock); |
891 | update_aem_energy_one(a, attr->index); | ||
892 | mutex_unlock(&a->lock); | ||
884 | 893 | ||
885 | return sprintf(buf, "%llu\n", | 894 | return sprintf(buf, "%llu\n", |
886 | (unsigned long long)a->energy[attr->index] * 1000); | 895 | (unsigned long long)a->energy[attr->index] * 1000); |
@@ -1104,7 +1113,7 @@ static void __exit aem_exit(void) | |||
1104 | } | 1113 | } |
1105 | 1114 | ||
1106 | MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>"); | 1115 | MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>"); |
1107 | MODULE_DESCRIPTION("IBM Active Energy Manager power/temp sensor driver"); | 1116 | MODULE_DESCRIPTION("IBM AEM power/temp/energy sensor driver"); |
1108 | MODULE_LICENSE("GPL"); | 1117 | MODULE_LICENSE("GPL"); |
1109 | 1118 | ||
1110 | module_init(aem_init); | 1119 | module_init(aem_init); |
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index daa7d121483b..de21142d106c 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c | |||
@@ -1055,9 +1055,10 @@ static int w83791d_probe(struct i2c_client *client, | |||
1055 | { | 1055 | { |
1056 | struct w83791d_data *data; | 1056 | struct w83791d_data *data; |
1057 | struct device *dev = &client->dev; | 1057 | struct device *dev = &client->dev; |
1058 | int i, val1, err; | 1058 | int i, err; |
1059 | 1059 | ||
1060 | #ifdef DEBUG | 1060 | #ifdef DEBUG |
1061 | int val1; | ||
1061 | val1 = w83791d_read(client, W83791D_REG_DID_VID4); | 1062 | val1 = w83791d_read(client, W83791D_REG_DID_VID4); |
1062 | dev_dbg(dev, "Device ID version: %d.%d (0x%02x)\n", | 1063 | dev_dbg(dev, "Device ID version: %d.%d (0x%02x)\n", |
1063 | (val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1); | 1064 | (val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1); |
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index c1adcdbf7979..9efb02137254 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c | |||
@@ -14,7 +14,6 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/version.h> | ||
18 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
19 | #include <linux/err.h> | 18 | #include <linux/err.h> |
20 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c index 18355ae2155d..4655b794ebe3 100644 --- a/drivers/i2c/chips/isp1301_omap.c +++ b/drivers/i2c/chips/isp1301_omap.c | |||
@@ -1593,7 +1593,7 @@ fail1: | |||
1593 | if (machine_is_omap_h2()) { | 1593 | if (machine_is_omap_h2()) { |
1594 | /* full speed signaling by default */ | 1594 | /* full speed signaling by default */ |
1595 | isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, | 1595 | isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, |
1596 | MC1_SPEED_REG); | 1596 | MC1_SPEED); |
1597 | isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, | 1597 | isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, |
1598 | MC2_SPD_SUSP_CTRL); | 1598 | MC2_SPD_SUSP_CTRL); |
1599 | 1599 | ||
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 550853f79ae8..b346a687ab59 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -108,6 +108,9 @@ static int i2c_device_probe(struct device *dev) | |||
108 | if (!driver->probe || !driver->id_table) | 108 | if (!driver->probe || !driver->id_table) |
109 | return -ENODEV; | 109 | return -ENODEV; |
110 | client->driver = driver; | 110 | client->driver = driver; |
111 | if (!device_can_wakeup(&client->dev)) | ||
112 | device_init_wakeup(&client->dev, | ||
113 | client->flags & I2C_CLIENT_WAKE); | ||
111 | dev_dbg(dev, "probe\n"); | 114 | dev_dbg(dev, "probe\n"); |
112 | 115 | ||
113 | status = driver->probe(client, i2c_match_id(driver->id_table, client)); | 116 | status = driver->probe(client, i2c_match_id(driver->id_table, client)); |
@@ -262,9 +265,8 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) | |||
262 | client->adapter = adap; | 265 | client->adapter = adap; |
263 | 266 | ||
264 | client->dev.platform_data = info->platform_data; | 267 | client->dev.platform_data = info->platform_data; |
265 | device_init_wakeup(&client->dev, info->flags & I2C_CLIENT_WAKE); | ||
266 | 268 | ||
267 | client->flags = info->flags & ~I2C_CLIENT_WAKE; | 269 | client->flags = info->flags; |
268 | client->addr = info->addr; | 270 | client->addr = info->addr; |
269 | client->irq = info->irq; | 271 | client->irq = info->irq; |
270 | 272 | ||
@@ -1188,8 +1190,8 @@ int i2c_probe(struct i2c_adapter *adapter, | |||
1188 | && address_data->normal_i2c[0] == I2C_CLIENT_END) | 1190 | && address_data->normal_i2c[0] == I2C_CLIENT_END) |
1189 | return 0; | 1191 | return 0; |
1190 | 1192 | ||
1191 | dev_warn(&adapter->dev, "SMBus Quick command not supported, " | 1193 | dev_dbg(&adapter->dev, "SMBus Quick command not supported, " |
1192 | "can't probe for chips\n"); | 1194 | "can't probe for chips\n"); |
1193 | return -EOPNOTSUPP; | 1195 | return -EOPNOTSUPP; |
1194 | } | 1196 | } |
1195 | 1197 | ||
@@ -1350,6 +1352,10 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) | |||
1350 | } | 1352 | } |
1351 | } | 1353 | } |
1352 | 1354 | ||
1355 | /* Stop here if the classes do not match */ | ||
1356 | if (!(adapter->class & driver->class)) | ||
1357 | goto exit_free; | ||
1358 | |||
1353 | /* Stop here if we can't use SMBUS_QUICK */ | 1359 | /* Stop here if we can't use SMBUS_QUICK */ |
1354 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_QUICK)) { | 1360 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_QUICK)) { |
1355 | if (address_data->probe[0] == I2C_CLIENT_END | 1361 | if (address_data->probe[0] == I2C_CLIENT_END |
@@ -1362,10 +1368,6 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) | |||
1362 | goto exit_free; | 1368 | goto exit_free; |
1363 | } | 1369 | } |
1364 | 1370 | ||
1365 | /* Stop here if the classes do not match */ | ||
1366 | if (!(adapter->class & driver->class)) | ||
1367 | goto exit_free; | ||
1368 | |||
1369 | /* Probe entries are done second, and are not affected by ignore | 1371 | /* Probe entries are done second, and are not affected by ignore |
1370 | entries either */ | 1372 | entries either */ |
1371 | for (i = 0; address_data->probe[i] != I2C_CLIENT_END; i += 2) { | 1373 | for (i = 0; address_data->probe[i] != I2C_CLIENT_END; i += 2) { |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 89a112d513ad..49a8c589e346 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -1272,9 +1272,9 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, | |||
1272 | */ | 1272 | */ |
1273 | static void msf_from_bcd(struct atapi_msf *msf) | 1273 | static void msf_from_bcd(struct atapi_msf *msf) |
1274 | { | 1274 | { |
1275 | msf->minute = BCD2BIN(msf->minute); | 1275 | msf->minute = bcd2bin(msf->minute); |
1276 | msf->second = BCD2BIN(msf->second); | 1276 | msf->second = bcd2bin(msf->second); |
1277 | msf->frame = BCD2BIN(msf->frame); | 1277 | msf->frame = bcd2bin(msf->frame); |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) | 1280 | int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) |
@@ -1415,8 +1415,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) | |||
1415 | return stat; | 1415 | return stat; |
1416 | 1416 | ||
1417 | if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { | 1417 | if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { |
1418 | toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); | 1418 | toc->hdr.first_track = bcd2bin(toc->hdr.first_track); |
1419 | toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); | 1419 | toc->hdr.last_track = bcd2bin(toc->hdr.last_track); |
1420 | } | 1420 | } |
1421 | 1421 | ||
1422 | ntracks = toc->hdr.last_track - toc->hdr.first_track + 1; | 1422 | ntracks = toc->hdr.last_track - toc->hdr.first_track + 1; |
@@ -1456,8 +1456,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) | |||
1456 | return stat; | 1456 | return stat; |
1457 | 1457 | ||
1458 | if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { | 1458 | if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { |
1459 | toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT); | 1459 | toc->hdr.first_track = (u8)bin2bcd(CDROM_LEADOUT); |
1460 | toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT); | 1460 | toc->hdr.last_track = (u8)bin2bcd(CDROM_LEADOUT); |
1461 | } else { | 1461 | } else { |
1462 | toc->hdr.first_track = CDROM_LEADOUT; | 1462 | toc->hdr.first_track = CDROM_LEADOUT; |
1463 | toc->hdr.last_track = CDROM_LEADOUT; | 1463 | toc->hdr.last_track = CDROM_LEADOUT; |
@@ -1470,14 +1470,14 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) | |||
1470 | toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length); | 1470 | toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length); |
1471 | 1471 | ||
1472 | if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { | 1472 | if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { |
1473 | toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); | 1473 | toc->hdr.first_track = bcd2bin(toc->hdr.first_track); |
1474 | toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); | 1474 | toc->hdr.last_track = bcd2bin(toc->hdr.last_track); |
1475 | } | 1475 | } |
1476 | 1476 | ||
1477 | for (i = 0; i <= ntracks; i++) { | 1477 | for (i = 0; i <= ntracks; i++) { |
1478 | if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) { | 1478 | if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) { |
1479 | if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) | 1479 | if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) |
1480 | toc->ent[i].track = BCD2BIN(toc->ent[i].track); | 1480 | toc->ent[i].track = bcd2bin(toc->ent[i].track); |
1481 | msf_from_bcd(&toc->ent[i].addr.msf); | 1481 | msf_from_bcd(&toc->ent[i].addr.msf); |
1482 | } | 1482 | } |
1483 | toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute, | 1483 | toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute, |
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c index 40644b6f1c00..3187215e8f89 100644 --- a/drivers/ide/pci/aec62xx.c +++ b/drivers/ide/pci/aec62xx.c | |||
@@ -307,7 +307,7 @@ static struct pci_driver driver = { | |||
307 | .name = "AEC62xx_IDE", | 307 | .name = "AEC62xx_IDE", |
308 | .id_table = aec62xx_pci_tbl, | 308 | .id_table = aec62xx_pci_tbl, |
309 | .probe = aec62xx_init_one, | 309 | .probe = aec62xx_init_one, |
310 | .remove = aec62xx_remove, | 310 | .remove = __devexit_p(aec62xx_remove), |
311 | }; | 311 | }; |
312 | 312 | ||
313 | static int __init aec62xx_ide_init(void) | 313 | static int __init aec62xx_ide_init(void) |
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c index bfae2f882f48..e6d8ee88d56d 100644 --- a/drivers/ide/pci/cy82c693.c +++ b/drivers/ide/pci/cy82c693.c | |||
@@ -447,7 +447,7 @@ static struct pci_driver driver = { | |||
447 | .name = "Cypress_IDE", | 447 | .name = "Cypress_IDE", |
448 | .id_table = cy82c693_pci_tbl, | 448 | .id_table = cy82c693_pci_tbl, |
449 | .probe = cy82c693_init_one, | 449 | .probe = cy82c693_init_one, |
450 | .remove = cy82c693_remove, | 450 | .remove = __devexit_p(cy82c693_remove), |
451 | }; | 451 | }; |
452 | 452 | ||
453 | static int __init cy82c693_ide_init(void) | 453 | static int __init cy82c693_ide_init(void) |
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index 748793a413ab..eb107eef0dbc 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c | |||
@@ -1620,7 +1620,7 @@ static struct pci_driver driver = { | |||
1620 | .name = "HPT366_IDE", | 1620 | .name = "HPT366_IDE", |
1621 | .id_table = hpt366_pci_tbl, | 1621 | .id_table = hpt366_pci_tbl, |
1622 | .probe = hpt366_init_one, | 1622 | .probe = hpt366_init_one, |
1623 | .remove = hpt366_remove, | 1623 | .remove = __devexit_p(hpt366_remove), |
1624 | }; | 1624 | }; |
1625 | 1625 | ||
1626 | static int __init hpt366_ide_init(void) | 1626 | static int __init hpt366_ide_init(void) |
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c index b6dc723de702..4a1508a707cc 100644 --- a/drivers/ide/pci/it821x.c +++ b/drivers/ide/pci/it821x.c | |||
@@ -686,7 +686,7 @@ static struct pci_driver driver = { | |||
686 | .name = "ITE821x IDE", | 686 | .name = "ITE821x IDE", |
687 | .id_table = it821x_pci_tbl, | 687 | .id_table = it821x_pci_tbl, |
688 | .probe = it821x_init_one, | 688 | .probe = it821x_init_one, |
689 | .remove = it821x_remove, | 689 | .remove = __devexit_p(it821x_remove), |
690 | }; | 690 | }; |
691 | 691 | ||
692 | static int __init it821x_ide_init(void) | 692 | static int __init it821x_ide_init(void) |
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c index 0f609b72f470..d477da6b5858 100644 --- a/drivers/ide/pci/pdc202xx_new.c +++ b/drivers/ide/pci/pdc202xx_new.c | |||
@@ -566,7 +566,7 @@ static struct pci_driver driver = { | |||
566 | .name = "Promise_IDE", | 566 | .name = "Promise_IDE", |
567 | .id_table = pdc202new_pci_tbl, | 567 | .id_table = pdc202new_pci_tbl, |
568 | .probe = pdc202new_init_one, | 568 | .probe = pdc202new_init_one, |
569 | .remove = pdc202new_remove, | 569 | .remove = __devexit_p(pdc202new_remove), |
570 | }; | 570 | }; |
571 | 571 | ||
572 | static int __init pdc202new_ide_init(void) | 572 | static int __init pdc202new_ide_init(void) |
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c index 6cde48bba6f8..44cccd1e086a 100644 --- a/drivers/ide/pci/scc_pata.c +++ b/drivers/ide/pci/scc_pata.c | |||
@@ -954,7 +954,7 @@ static struct pci_driver driver = { | |||
954 | .name = "SCC IDE", | 954 | .name = "SCC IDE", |
955 | .id_table = scc_pci_tbl, | 955 | .id_table = scc_pci_tbl, |
956 | .probe = scc_init_one, | 956 | .probe = scc_init_one, |
957 | .remove = scc_remove, | 957 | .remove = __devexit_p(scc_remove), |
958 | }; | 958 | }; |
959 | 959 | ||
960 | static int scc_ide_init(void) | 960 | static int scc_ide_init(void) |
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c index 42eef19a18f1..681306c9d79b 100644 --- a/drivers/ide/pci/sgiioc4.c +++ b/drivers/ide/pci/sgiioc4.c | |||
@@ -621,9 +621,9 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) | |||
621 | if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, | 621 | if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, |
622 | DRV_NAME)) { | 622 | DRV_NAME)) { |
623 | printk(KERN_ERR | 623 | printk(KERN_ERR |
624 | "%s : %s -- ERROR, Addresses " | 624 | "%s %s: -- ERROR, Addresses " |
625 | "0x%p to 0x%p ALREADY in use\n", | 625 | "0x%p to 0x%p ALREADY in use\n", |
626 | __func__, DRV_NAME, (void *) cmd_phys_base, | 626 | DRV_NAME, pci_name(dev), (void *)cmd_phys_base, |
627 | (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); | 627 | (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); |
628 | return -ENOMEM; | 628 | return -ENOMEM; |
629 | } | 629 | } |
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c index 445ce6fbea33..db2b88a369ab 100644 --- a/drivers/ide/pci/siimage.c +++ b/drivers/ide/pci/siimage.c | |||
@@ -832,7 +832,7 @@ static struct pci_driver driver = { | |||
832 | .name = "SiI_IDE", | 832 | .name = "SiI_IDE", |
833 | .id_table = siimage_pci_tbl, | 833 | .id_table = siimage_pci_tbl, |
834 | .probe = siimage_init_one, | 834 | .probe = siimage_init_one, |
835 | .remove = siimage_remove, | 835 | .remove = __devexit_p(siimage_remove), |
836 | }; | 836 | }; |
837 | 837 | ||
838 | static int __init siimage_ide_init(void) | 838 | static int __init siimage_ide_init(void) |
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index e5a4b42b4e33..5efe21d6ef97 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c | |||
@@ -610,7 +610,7 @@ static struct pci_driver driver = { | |||
610 | .name = "SIS_IDE", | 610 | .name = "SIS_IDE", |
611 | .id_table = sis5513_pci_tbl, | 611 | .id_table = sis5513_pci_tbl, |
612 | .probe = sis5513_init_one, | 612 | .probe = sis5513_init_one, |
613 | .remove = sis5513_remove, | 613 | .remove = __devexit_p(sis5513_remove), |
614 | }; | 614 | }; |
615 | 615 | ||
616 | static int __init sis5513_ide_init(void) | 616 | static int __init sis5513_ide_init(void) |
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c index 7fc88c375e5d..927277c54ec9 100644 --- a/drivers/ide/pci/tc86c001.c +++ b/drivers/ide/pci/tc86c001.c | |||
@@ -249,7 +249,7 @@ static struct pci_driver driver = { | |||
249 | .name = "TC86C001", | 249 | .name = "TC86C001", |
250 | .id_table = tc86c001_pci_tbl, | 250 | .id_table = tc86c001_pci_tbl, |
251 | .probe = tc86c001_init_one, | 251 | .probe = tc86c001_init_one, |
252 | .remove = tc86c001_remove, | 252 | .remove = __devexit_p(tc86c001_remove), |
253 | }; | 253 | }; |
254 | 254 | ||
255 | static int __init tc86c001_ide_init(void) | 255 | static int __init tc86c001_ide_init(void) |
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c index a6b2cc83f293..94fb9ab3223f 100644 --- a/drivers/ide/pci/via82cxxx.c +++ b/drivers/ide/pci/via82cxxx.c | |||
@@ -491,7 +491,7 @@ static struct pci_driver driver = { | |||
491 | .name = "VIA_IDE", | 491 | .name = "VIA_IDE", |
492 | .id_table = via_pci_tbl, | 492 | .id_table = via_pci_tbl, |
493 | .probe = via_init_one, | 493 | .probe = via_init_one, |
494 | .remove = via_remove, | 494 | .remove = __devexit_p(via_remove), |
495 | }; | 495 | }; |
496 | 496 | ||
497 | static int __init via_ide_init(void) | 497 | static int __init via_ide_init(void) |
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index 994a21e5a0aa..16240a789650 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c | |||
@@ -844,7 +844,7 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr | |||
844 | ne->host = host; | 844 | ne->host = host; |
845 | ne->nodeid = nodeid; | 845 | ne->nodeid = nodeid; |
846 | ne->generation = generation; | 846 | ne->generation = generation; |
847 | ne->needs_probe = 1; | 847 | ne->needs_probe = true; |
848 | 848 | ||
849 | ne->guid = guid; | 849 | ne->guid = guid; |
850 | ne->guid_vendor_id = (guid >> 40) & 0xffffff; | 850 | ne->guid_vendor_id = (guid >> 40) & 0xffffff; |
@@ -1144,7 +1144,7 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent | |||
1144 | struct csr1212_keyval *kv, *vendor_name_kv = NULL; | 1144 | struct csr1212_keyval *kv, *vendor_name_kv = NULL; |
1145 | u8 last_key_id = 0; | 1145 | u8 last_key_id = 0; |
1146 | 1146 | ||
1147 | ne->needs_probe = 0; | 1147 | ne->needs_probe = false; |
1148 | 1148 | ||
1149 | csr1212_for_each_dir_entry(ne->csr, kv, ne->csr->root_kv, dentry) { | 1149 | csr1212_for_each_dir_entry(ne->csr, kv, ne->csr->root_kv, dentry) { |
1150 | switch (kv->key.id) { | 1150 | switch (kv->key.id) { |
@@ -1295,7 +1295,7 @@ static void nodemgr_update_node(struct node_entry *ne, struct csr1212_csr *csr, | |||
1295 | nodemgr_update_bus_options(ne); | 1295 | nodemgr_update_bus_options(ne); |
1296 | 1296 | ||
1297 | /* Mark the node as new, so it gets re-probed */ | 1297 | /* Mark the node as new, so it gets re-probed */ |
1298 | ne->needs_probe = 1; | 1298 | ne->needs_probe = true; |
1299 | } else { | 1299 | } else { |
1300 | /* old cache is valid, so update its generation */ | 1300 | /* old cache is valid, so update its generation */ |
1301 | struct nodemgr_csr_info *ci = ne->csr->private; | 1301 | struct nodemgr_csr_info *ci = ne->csr->private; |
@@ -1566,57 +1566,60 @@ static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int ge | |||
1566 | struct probe_param { | 1566 | struct probe_param { |
1567 | struct host_info *hi; | 1567 | struct host_info *hi; |
1568 | int generation; | 1568 | int generation; |
1569 | bool probe_now; | ||
1569 | }; | 1570 | }; |
1570 | 1571 | ||
1571 | static int __nodemgr_node_probe(struct device *dev, void *data) | 1572 | static int node_probe(struct device *dev, void *data) |
1572 | { | 1573 | { |
1573 | struct probe_param *param = (struct probe_param *)data; | 1574 | struct probe_param *p = data; |
1574 | struct node_entry *ne; | 1575 | struct node_entry *ne; |
1575 | 1576 | ||
1577 | if (p->generation != get_hpsb_generation(p->hi->host)) | ||
1578 | return -EAGAIN; | ||
1579 | |||
1576 | ne = container_of(dev, struct node_entry, node_dev); | 1580 | ne = container_of(dev, struct node_entry, node_dev); |
1577 | if (!ne->needs_probe) | 1581 | if (ne->needs_probe == p->probe_now) |
1578 | nodemgr_probe_ne(param->hi, ne, param->generation); | 1582 | nodemgr_probe_ne(p->hi, ne, p->generation); |
1579 | if (ne->needs_probe) | ||
1580 | nodemgr_probe_ne(param->hi, ne, param->generation); | ||
1581 | return 0; | 1583 | return 0; |
1582 | } | 1584 | } |
1583 | 1585 | ||
1584 | static void nodemgr_node_probe(struct host_info *hi, int generation) | 1586 | static void nodemgr_node_probe(struct host_info *hi, int generation) |
1585 | { | 1587 | { |
1586 | struct hpsb_host *host = hi->host; | 1588 | struct probe_param p; |
1587 | struct probe_param param; | ||
1588 | 1589 | ||
1589 | param.hi = hi; | 1590 | p.hi = hi; |
1590 | param.generation = generation; | 1591 | p.generation = generation; |
1591 | /* Do some processing of the nodes we've probed. This pulls them | 1592 | /* |
1593 | * Do some processing of the nodes we've probed. This pulls them | ||
1592 | * into the sysfs layer if needed, and can result in processing of | 1594 | * into the sysfs layer if needed, and can result in processing of |
1593 | * unit-directories, or just updating the node and it's | 1595 | * unit-directories, or just updating the node and it's |
1594 | * unit-directories. | 1596 | * unit-directories. |
1595 | * | 1597 | * |
1596 | * Run updates before probes. Usually, updates are time-critical | 1598 | * Run updates before probes. Usually, updates are time-critical |
1597 | * while probes are time-consuming. (Well, those probes need some | 1599 | * while probes are time-consuming. |
1598 | * improvement...) */ | ||
1599 | |||
1600 | class_for_each_device(&nodemgr_ne_class, NULL, ¶m, | ||
1601 | __nodemgr_node_probe); | ||
1602 | |||
1603 | /* If we had a bus reset while we were scanning the bus, it is | ||
1604 | * possible that we did not probe all nodes. In that case, we | ||
1605 | * skip the clean up for now, since we could remove nodes that | ||
1606 | * were still on the bus. Another bus scan is pending which will | ||
1607 | * do the clean up eventually. | ||
1608 | * | 1600 | * |
1601 | * Meanwhile, another bus reset may have happened. In this case we | ||
1602 | * skip everything here and let the next bus scan handle it. | ||
1603 | * Otherwise we may prematurely remove nodes which are still there. | ||
1604 | */ | ||
1605 | p.probe_now = false; | ||
1606 | if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0) | ||
1607 | return; | ||
1608 | |||
1609 | p.probe_now = true; | ||
1610 | if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0) | ||
1611 | return; | ||
1612 | /* | ||
1609 | * Now let's tell the bus to rescan our devices. This may seem | 1613 | * Now let's tell the bus to rescan our devices. This may seem |
1610 | * like overhead, but the driver-model core will only scan a | 1614 | * like overhead, but the driver-model core will only scan a |
1611 | * device for a driver when either the device is added, or when a | 1615 | * device for a driver when either the device is added, or when a |
1612 | * new driver is added. A bus reset is a good reason to rescan | 1616 | * new driver is added. A bus reset is a good reason to rescan |
1613 | * devices that were there before. For example, an sbp2 device | 1617 | * devices that were there before. For example, an sbp2 device |
1614 | * may become available for login, if the host that held it was | 1618 | * may become available for login, if the host that held it was |
1615 | * just removed. */ | 1619 | * just removed. |
1616 | 1620 | */ | |
1617 | if (generation == get_hpsb_generation(host)) | 1621 | if (bus_rescan_devices(&ieee1394_bus_type) != 0) |
1618 | if (bus_rescan_devices(&ieee1394_bus_type)) | 1622 | HPSB_DEBUG("bus_rescan_devices had an error"); |
1619 | HPSB_DEBUG("bus_rescan_devices had an error"); | ||
1620 | } | 1623 | } |
1621 | 1624 | ||
1622 | static int nodemgr_send_resume_packet(struct hpsb_host *host) | 1625 | static int nodemgr_send_resume_packet(struct hpsb_host *host) |
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h index 919e92e2a955..6eb26465a84c 100644 --- a/drivers/ieee1394/nodemgr.h +++ b/drivers/ieee1394/nodemgr.h | |||
@@ -97,7 +97,7 @@ struct node_entry { | |||
97 | struct hpsb_host *host; /* Host this node is attached to */ | 97 | struct hpsb_host *host; /* Host this node is attached to */ |
98 | nodeid_t nodeid; /* NodeID */ | 98 | nodeid_t nodeid; /* NodeID */ |
99 | struct bus_options busopt; /* Bus Options */ | 99 | struct bus_options busopt; /* Bus Options */ |
100 | int needs_probe; | 100 | bool needs_probe; |
101 | unsigned int generation; /* Synced with hpsb generation */ | 101 | unsigned int generation; /* Synced with hpsb generation */ |
102 | 102 | ||
103 | /* The following is read from the config rom */ | 103 | /* The following is read from the config rom */ |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 9cbf3154d243..1d6ad3435537 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -731,15 +731,26 @@ static int sbp2_update(struct unit_directory *ud) | |||
731 | { | 731 | { |
732 | struct sbp2_lu *lu = ud->device.driver_data; | 732 | struct sbp2_lu *lu = ud->device.driver_data; |
733 | 733 | ||
734 | if (sbp2_reconnect_device(lu)) { | 734 | if (sbp2_reconnect_device(lu) != 0) { |
735 | /* Reconnect has failed. Perhaps we didn't reconnect fast | 735 | /* |
736 | * enough. Try a regular login, but first log out just in | 736 | * Reconnect failed. If another bus reset happened, |
737 | * case of any weirdness. */ | 737 | * let nodemgr proceed and call sbp2_update again later |
738 | * (or sbp2_remove if this node went away). | ||
739 | */ | ||
740 | if (!hpsb_node_entry_valid(lu->ne)) | ||
741 | return 0; | ||
742 | /* | ||
743 | * Or the target rejected the reconnect because we weren't | ||
744 | * fast enough. Try a regular login, but first log out | ||
745 | * just in case of any weirdness. | ||
746 | */ | ||
738 | sbp2_logout_device(lu); | 747 | sbp2_logout_device(lu); |
739 | 748 | ||
740 | if (sbp2_login_device(lu)) { | 749 | if (sbp2_login_device(lu) != 0) { |
741 | /* Login failed too, just fail, and the backend | 750 | if (!hpsb_node_entry_valid(lu->ne)) |
742 | * will call our sbp2_remove for us */ | 751 | return 0; |
752 | |||
753 | /* Maybe another initiator won the login. */ | ||
743 | SBP2_ERR("Failed to reconnect to sbp2 device!"); | 754 | SBP2_ERR("Failed to reconnect to sbp2 device!"); |
744 | return -EBUSY; | 755 | return -EBUSY; |
745 | } | 756 | } |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 0b0618edd645..1ab919f836a8 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -156,6 +156,14 @@ struct ehca_mod_qp_parm { | |||
156 | 156 | ||
157 | #define EHCA_MOD_QP_PARM_MAX 4 | 157 | #define EHCA_MOD_QP_PARM_MAX 4 |
158 | 158 | ||
159 | #define QMAP_IDX_MASK 0xFFFFULL | ||
160 | |||
161 | /* struct for tracking if cqes have been reported to the application */ | ||
162 | struct ehca_qmap_entry { | ||
163 | u16 app_wr_id; | ||
164 | u16 reported; | ||
165 | }; | ||
166 | |||
159 | struct ehca_qp { | 167 | struct ehca_qp { |
160 | union { | 168 | union { |
161 | struct ib_qp ib_qp; | 169 | struct ib_qp ib_qp; |
@@ -165,6 +173,7 @@ struct ehca_qp { | |||
165 | enum ehca_ext_qp_type ext_type; | 173 | enum ehca_ext_qp_type ext_type; |
166 | enum ib_qp_state state; | 174 | enum ib_qp_state state; |
167 | struct ipz_queue ipz_squeue; | 175 | struct ipz_queue ipz_squeue; |
176 | struct ehca_qmap_entry *sq_map; | ||
168 | struct ipz_queue ipz_rqueue; | 177 | struct ipz_queue ipz_rqueue; |
169 | struct h_galpas galpas; | 178 | struct h_galpas galpas; |
170 | u32 qkey; | 179 | u32 qkey; |
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h index 818803057ebf..5d28e3e98a20 100644 --- a/drivers/infiniband/hw/ehca/ehca_qes.h +++ b/drivers/infiniband/hw/ehca/ehca_qes.h | |||
@@ -213,6 +213,7 @@ struct ehca_wqe { | |||
213 | #define WC_STATUS_ERROR_BIT 0x80000000 | 213 | #define WC_STATUS_ERROR_BIT 0x80000000 |
214 | #define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 | 214 | #define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 |
215 | #define WC_STATUS_PURGE_BIT 0x10 | 215 | #define WC_STATUS_PURGE_BIT 0x10 |
216 | #define WC_SEND_RECEIVE_BIT 0x80 | ||
216 | 217 | ||
217 | struct ehca_cqe { | 218 | struct ehca_cqe { |
218 | u64 work_request_id; | 219 | u64 work_request_id; |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index ea13efddf175..b6bcee036734 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -412,6 +412,7 @@ static struct ehca_qp *internal_create_qp( | |||
412 | struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, | 412 | struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, |
413 | ib_device); | 413 | ib_device); |
414 | struct ib_ucontext *context = NULL; | 414 | struct ib_ucontext *context = NULL; |
415 | u32 nr_qes; | ||
415 | u64 h_ret; | 416 | u64 h_ret; |
416 | int is_llqp = 0, has_srq = 0; | 417 | int is_llqp = 0, has_srq = 0; |
417 | int qp_type, max_send_sge, max_recv_sge, ret; | 418 | int qp_type, max_send_sge, max_recv_sge, ret; |
@@ -715,6 +716,15 @@ static struct ehca_qp *internal_create_qp( | |||
715 | "and pages ret=%i", ret); | 716 | "and pages ret=%i", ret); |
716 | goto create_qp_exit2; | 717 | goto create_qp_exit2; |
717 | } | 718 | } |
719 | nr_qes = my_qp->ipz_squeue.queue_length / | ||
720 | my_qp->ipz_squeue.qe_size; | ||
721 | my_qp->sq_map = vmalloc(nr_qes * | ||
722 | sizeof(struct ehca_qmap_entry)); | ||
723 | if (!my_qp->sq_map) { | ||
724 | ehca_err(pd->device, "Couldn't allocate squeue " | ||
725 | "map ret=%i", ret); | ||
726 | goto create_qp_exit3; | ||
727 | } | ||
718 | } | 728 | } |
719 | 729 | ||
720 | if (HAS_RQ(my_qp)) { | 730 | if (HAS_RQ(my_qp)) { |
@@ -724,7 +734,7 @@ static struct ehca_qp *internal_create_qp( | |||
724 | if (ret) { | 734 | if (ret) { |
725 | ehca_err(pd->device, "Couldn't initialize rqueue " | 735 | ehca_err(pd->device, "Couldn't initialize rqueue " |
726 | "and pages ret=%i", ret); | 736 | "and pages ret=%i", ret); |
727 | goto create_qp_exit3; | 737 | goto create_qp_exit4; |
728 | } | 738 | } |
729 | } | 739 | } |
730 | 740 | ||
@@ -770,7 +780,7 @@ static struct ehca_qp *internal_create_qp( | |||
770 | if (!my_qp->mod_qp_parm) { | 780 | if (!my_qp->mod_qp_parm) { |
771 | ehca_err(pd->device, | 781 | ehca_err(pd->device, |
772 | "Could not alloc mod_qp_parm"); | 782 | "Could not alloc mod_qp_parm"); |
773 | goto create_qp_exit4; | 783 | goto create_qp_exit5; |
774 | } | 784 | } |
775 | } | 785 | } |
776 | } | 786 | } |
@@ -780,7 +790,7 @@ static struct ehca_qp *internal_create_qp( | |||
780 | h_ret = ehca_define_sqp(shca, my_qp, init_attr); | 790 | h_ret = ehca_define_sqp(shca, my_qp, init_attr); |
781 | if (h_ret != H_SUCCESS) { | 791 | if (h_ret != H_SUCCESS) { |
782 | ret = ehca2ib_return_code(h_ret); | 792 | ret = ehca2ib_return_code(h_ret); |
783 | goto create_qp_exit5; | 793 | goto create_qp_exit6; |
784 | } | 794 | } |
785 | } | 795 | } |
786 | 796 | ||
@@ -789,7 +799,7 @@ static struct ehca_qp *internal_create_qp( | |||
789 | if (ret) { | 799 | if (ret) { |
790 | ehca_err(pd->device, | 800 | ehca_err(pd->device, |
791 | "Couldn't assign qp to send_cq ret=%i", ret); | 801 | "Couldn't assign qp to send_cq ret=%i", ret); |
792 | goto create_qp_exit5; | 802 | goto create_qp_exit6; |
793 | } | 803 | } |
794 | } | 804 | } |
795 | 805 | ||
@@ -815,22 +825,26 @@ static struct ehca_qp *internal_create_qp( | |||
815 | if (ib_copy_to_udata(udata, &resp, sizeof resp)) { | 825 | if (ib_copy_to_udata(udata, &resp, sizeof resp)) { |
816 | ehca_err(pd->device, "Copy to udata failed"); | 826 | ehca_err(pd->device, "Copy to udata failed"); |
817 | ret = -EINVAL; | 827 | ret = -EINVAL; |
818 | goto create_qp_exit6; | 828 | goto create_qp_exit7; |
819 | } | 829 | } |
820 | } | 830 | } |
821 | 831 | ||
822 | return my_qp; | 832 | return my_qp; |
823 | 833 | ||
824 | create_qp_exit6: | 834 | create_qp_exit7: |
825 | ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); | 835 | ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); |
826 | 836 | ||
827 | create_qp_exit5: | 837 | create_qp_exit6: |
828 | kfree(my_qp->mod_qp_parm); | 838 | kfree(my_qp->mod_qp_parm); |
829 | 839 | ||
830 | create_qp_exit4: | 840 | create_qp_exit5: |
831 | if (HAS_RQ(my_qp)) | 841 | if (HAS_RQ(my_qp)) |
832 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); | 842 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); |
833 | 843 | ||
844 | create_qp_exit4: | ||
845 | if (HAS_SQ(my_qp)) | ||
846 | vfree(my_qp->sq_map); | ||
847 | |||
834 | create_qp_exit3: | 848 | create_qp_exit3: |
835 | if (HAS_SQ(my_qp)) | 849 | if (HAS_SQ(my_qp)) |
836 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); | 850 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); |
@@ -1534,8 +1548,6 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
1534 | if (attr_mask & IB_QP_QKEY) | 1548 | if (attr_mask & IB_QP_QKEY) |
1535 | my_qp->qkey = attr->qkey; | 1549 | my_qp->qkey = attr->qkey; |
1536 | 1550 | ||
1537 | my_qp->state = qp_new_state; | ||
1538 | |||
1539 | modify_qp_exit2: | 1551 | modify_qp_exit2: |
1540 | if (squeue_locked) { /* this means: sqe -> rts */ | 1552 | if (squeue_locked) { /* this means: sqe -> rts */ |
1541 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); | 1553 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); |
@@ -1551,6 +1563,8 @@ modify_qp_exit1: | |||
1551 | int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, | 1563 | int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, |
1552 | struct ib_udata *udata) | 1564 | struct ib_udata *udata) |
1553 | { | 1565 | { |
1566 | int ret = 0; | ||
1567 | |||
1554 | struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, | 1568 | struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, |
1555 | ib_device); | 1569 | ib_device); |
1556 | struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); | 1570 | struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); |
@@ -1597,12 +1611,18 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, | |||
1597 | attr->qp_state, my_qp->init_attr.port_num, | 1611 | attr->qp_state, my_qp->init_attr.port_num, |
1598 | ibqp->qp_type); | 1612 | ibqp->qp_type); |
1599 | spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); | 1613 | spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); |
1600 | return 0; | 1614 | goto out; |
1601 | } | 1615 | } |
1602 | spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); | 1616 | spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); |
1603 | } | 1617 | } |
1604 | 1618 | ||
1605 | return internal_modify_qp(ibqp, attr, attr_mask, 0); | 1619 | ret = internal_modify_qp(ibqp, attr, attr_mask, 0); |
1620 | |||
1621 | out: | ||
1622 | if ((ret == 0) && (attr_mask & IB_QP_STATE)) | ||
1623 | my_qp->state = attr->qp_state; | ||
1624 | |||
1625 | return ret; | ||
1606 | } | 1626 | } |
1607 | 1627 | ||
1608 | void ehca_recover_sqp(struct ib_qp *sqp) | 1628 | void ehca_recover_sqp(struct ib_qp *sqp) |
@@ -1973,8 +1993,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, | |||
1973 | 1993 | ||
1974 | if (HAS_RQ(my_qp)) | 1994 | if (HAS_RQ(my_qp)) |
1975 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); | 1995 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); |
1976 | if (HAS_SQ(my_qp)) | 1996 | if (HAS_SQ(my_qp)) { |
1977 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); | 1997 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); |
1998 | vfree(my_qp->sq_map); | ||
1999 | } | ||
1978 | kmem_cache_free(qp_cache, my_qp); | 2000 | kmem_cache_free(qp_cache, my_qp); |
1979 | atomic_dec(&shca->num_qps); | 2001 | atomic_dec(&shca->num_qps); |
1980 | return 0; | 2002 | return 0; |
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index 898c8b5c38dd..4426d82fe798 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c | |||
@@ -139,6 +139,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr) | |||
139 | static inline int ehca_write_swqe(struct ehca_qp *qp, | 139 | static inline int ehca_write_swqe(struct ehca_qp *qp, |
140 | struct ehca_wqe *wqe_p, | 140 | struct ehca_wqe *wqe_p, |
141 | const struct ib_send_wr *send_wr, | 141 | const struct ib_send_wr *send_wr, |
142 | u32 sq_map_idx, | ||
142 | int hidden) | 143 | int hidden) |
143 | { | 144 | { |
144 | u32 idx; | 145 | u32 idx; |
@@ -157,7 +158,11 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, | |||
157 | /* clear wqe header until sglist */ | 158 | /* clear wqe header until sglist */ |
158 | memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); | 159 | memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); |
159 | 160 | ||
160 | wqe_p->work_request_id = send_wr->wr_id; | 161 | wqe_p->work_request_id = send_wr->wr_id & ~QMAP_IDX_MASK; |
162 | wqe_p->work_request_id |= sq_map_idx & QMAP_IDX_MASK; | ||
163 | |||
164 | qp->sq_map[sq_map_idx].app_wr_id = send_wr->wr_id & QMAP_IDX_MASK; | ||
165 | qp->sq_map[sq_map_idx].reported = 0; | ||
161 | 166 | ||
162 | switch (send_wr->opcode) { | 167 | switch (send_wr->opcode) { |
163 | case IB_WR_SEND: | 168 | case IB_WR_SEND: |
@@ -381,6 +386,7 @@ static inline int post_one_send(struct ehca_qp *my_qp, | |||
381 | { | 386 | { |
382 | struct ehca_wqe *wqe_p; | 387 | struct ehca_wqe *wqe_p; |
383 | int ret; | 388 | int ret; |
389 | u32 sq_map_idx; | ||
384 | u64 start_offset = my_qp->ipz_squeue.current_q_offset; | 390 | u64 start_offset = my_qp->ipz_squeue.current_q_offset; |
385 | 391 | ||
386 | /* get pointer next to free WQE */ | 392 | /* get pointer next to free WQE */ |
@@ -393,8 +399,15 @@ static inline int post_one_send(struct ehca_qp *my_qp, | |||
393 | "qp_num=%x", my_qp->ib_qp.qp_num); | 399 | "qp_num=%x", my_qp->ib_qp.qp_num); |
394 | return -ENOMEM; | 400 | return -ENOMEM; |
395 | } | 401 | } |
402 | |||
403 | /* | ||
404 | * Get the index of the WQE in the send queue. The same index is used | ||
405 | * for writing into the sq_map. | ||
406 | */ | ||
407 | sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size; | ||
408 | |||
396 | /* write a SEND WQE into the QUEUE */ | 409 | /* write a SEND WQE into the QUEUE */ |
397 | ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden); | 410 | ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden); |
398 | /* | 411 | /* |
399 | * if something failed, | 412 | * if something failed, |
400 | * reset the free entry pointer to the start value | 413 | * reset the free entry pointer to the start value |
@@ -589,7 +602,7 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) | |||
589 | struct ehca_qp *my_qp; | 602 | struct ehca_qp *my_qp; |
590 | int cqe_count = 0, is_error; | 603 | int cqe_count = 0, is_error; |
591 | 604 | ||
592 | poll_cq_one_read_cqe: | 605 | repoll: |
593 | cqe = (struct ehca_cqe *) | 606 | cqe = (struct ehca_cqe *) |
594 | ipz_qeit_get_inc_valid(&my_cq->ipz_queue); | 607 | ipz_qeit_get_inc_valid(&my_cq->ipz_queue); |
595 | if (!cqe) { | 608 | if (!cqe) { |
@@ -617,7 +630,7 @@ poll_cq_one_read_cqe: | |||
617 | ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x", | 630 | ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x", |
618 | my_cq->cq_number, cqe->local_qp_number); | 631 | my_cq->cq_number, cqe->local_qp_number); |
619 | /* ignore this purged cqe */ | 632 | /* ignore this purged cqe */ |
620 | goto poll_cq_one_read_cqe; | 633 | goto repoll; |
621 | } | 634 | } |
622 | spin_lock_irqsave(&qp->spinlock_s, flags); | 635 | spin_lock_irqsave(&qp->spinlock_s, flags); |
623 | purgeflag = qp->sqerr_purgeflag; | 636 | purgeflag = qp->sqerr_purgeflag; |
@@ -636,7 +649,7 @@ poll_cq_one_read_cqe: | |||
636 | * that caused sqe and turn off purge flag | 649 | * that caused sqe and turn off purge flag |
637 | */ | 650 | */ |
638 | qp->sqerr_purgeflag = 0; | 651 | qp->sqerr_purgeflag = 0; |
639 | goto poll_cq_one_read_cqe; | 652 | goto repoll; |
640 | } | 653 | } |
641 | } | 654 | } |
642 | 655 | ||
@@ -654,8 +667,34 @@ poll_cq_one_read_cqe: | |||
654 | my_cq, my_cq->cq_number); | 667 | my_cq, my_cq->cq_number); |
655 | } | 668 | } |
656 | 669 | ||
657 | /* we got a completion! */ | 670 | read_lock(&ehca_qp_idr_lock); |
658 | wc->wr_id = cqe->work_request_id; | 671 | my_qp = idr_find(&ehca_qp_idr, cqe->qp_token); |
672 | read_unlock(&ehca_qp_idr_lock); | ||
673 | if (!my_qp) | ||
674 | goto repoll; | ||
675 | wc->qp = &my_qp->ib_qp; | ||
676 | |||
677 | if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT)) { | ||
678 | struct ehca_qmap_entry *qmap_entry; | ||
679 | /* | ||
680 | * We got a send completion and need to restore the original | ||
681 | * wr_id. | ||
682 | */ | ||
683 | qmap_entry = &my_qp->sq_map[cqe->work_request_id & | ||
684 | QMAP_IDX_MASK]; | ||
685 | |||
686 | if (qmap_entry->reported) { | ||
687 | ehca_warn(cq->device, "Double cqe on qp_num=%#x", | ||
688 | my_qp->real_qp_num); | ||
689 | /* found a double cqe, discard it and read next one */ | ||
690 | goto repoll; | ||
691 | } | ||
692 | wc->wr_id = cqe->work_request_id & ~QMAP_IDX_MASK; | ||
693 | wc->wr_id |= qmap_entry->app_wr_id; | ||
694 | qmap_entry->reported = 1; | ||
695 | } else | ||
696 | /* We got a receive completion. */ | ||
697 | wc->wr_id = cqe->work_request_id; | ||
659 | 698 | ||
660 | /* eval ib_wc_opcode */ | 699 | /* eval ib_wc_opcode */ |
661 | wc->opcode = ib_wc_opcode[cqe->optype]-1; | 700 | wc->opcode = ib_wc_opcode[cqe->optype]-1; |
@@ -667,7 +706,7 @@ poll_cq_one_read_cqe: | |||
667 | ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", | 706 | ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", |
668 | my_cq, my_cq->cq_number); | 707 | my_cq, my_cq->cq_number); |
669 | /* update also queue adder to throw away this entry!!! */ | 708 | /* update also queue adder to throw away this entry!!! */ |
670 | goto poll_cq_one_exit0; | 709 | goto repoll; |
671 | } | 710 | } |
672 | 711 | ||
673 | /* eval ib_wc_status */ | 712 | /* eval ib_wc_status */ |
@@ -678,11 +717,6 @@ poll_cq_one_read_cqe: | |||
678 | } else | 717 | } else |
679 | wc->status = IB_WC_SUCCESS; | 718 | wc->status = IB_WC_SUCCESS; |
680 | 719 | ||
681 | read_lock(&ehca_qp_idr_lock); | ||
682 | my_qp = idr_find(&ehca_qp_idr, cqe->qp_token); | ||
683 | wc->qp = &my_qp->ib_qp; | ||
684 | read_unlock(&ehca_qp_idr_lock); | ||
685 | |||
686 | wc->byte_len = cqe->nr_bytes_transferred; | 720 | wc->byte_len = cqe->nr_bytes_transferred; |
687 | wc->pkey_index = cqe->pkey_index; | 721 | wc->pkey_index = cqe->pkey_index; |
688 | wc->slid = cqe->rlid; | 722 | wc->slid = cqe->rlid; |
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h index ec950bf8c479..21f7d06f14ad 100644 --- a/drivers/infiniband/hw/ehca/ehca_tools.h +++ b/drivers/infiniband/hw/ehca/ehca_tools.h | |||
@@ -54,7 +54,6 @@ | |||
54 | #include <linux/module.h> | 54 | #include <linux/module.h> |
55 | #include <linux/moduleparam.h> | 55 | #include <linux/moduleparam.h> |
56 | #include <linux/vmalloc.h> | 56 | #include <linux/vmalloc.h> |
57 | #include <linux/version.h> | ||
58 | #include <linux/notifier.h> | 57 | #include <linux/notifier.h> |
59 | #include <linux/cpu.h> | 58 | #include <linux/cpu.h> |
60 | #include <linux/device.h> | 59 | #include <linux/device.h> |
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 23faba9d21eb..8bb5170b4e41 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c | |||
@@ -31,7 +31,6 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/version.h> | ||
35 | #include <linux/module.h> | 34 | #include <linux/module.h> |
36 | #include <linux/fs.h> | 35 | #include <linux/fs.h> |
37 | #include <linux/mount.h> | 36 | #include <linux/mount.h> |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c index d90f5e9a54fa..9839e20119bc 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba7220.c +++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c | |||
@@ -1720,7 +1720,7 @@ static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr, | |||
1720 | "not 2KB aligned!\n", pa); | 1720 | "not 2KB aligned!\n", pa); |
1721 | return; | 1721 | return; |
1722 | } | 1722 | } |
1723 | if (pa >= (1UL << IBA7220_TID_SZ_SHIFT)) { | 1723 | if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { |
1724 | ipath_dev_err(dd, | 1724 | ipath_dev_err(dd, |
1725 | "BUG: Physical page address 0x%lx " | 1725 | "BUG: Physical page address 0x%lx " |
1726 | "larger than supported\n", pa); | 1726 | "larger than supported\n", pa); |
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 36aa242c487c..729446f56aab 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -267,6 +267,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
267 | u16 lrh0; | 267 | u16 lrh0; |
268 | u16 lid; | 268 | u16 lid; |
269 | int ret = 0; | 269 | int ret = 0; |
270 | int next_cur; | ||
270 | 271 | ||
271 | spin_lock_irqsave(&qp->s_lock, flags); | 272 | spin_lock_irqsave(&qp->s_lock, flags); |
272 | 273 | ||
@@ -290,8 +291,9 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
290 | goto bail; | 291 | goto bail; |
291 | 292 | ||
292 | wqe = get_swqe_ptr(qp, qp->s_cur); | 293 | wqe = get_swqe_ptr(qp, qp->s_cur); |
293 | if (++qp->s_cur >= qp->s_size) | 294 | next_cur = qp->s_cur + 1; |
294 | qp->s_cur = 0; | 295 | if (next_cur >= qp->s_size) |
296 | next_cur = 0; | ||
295 | 297 | ||
296 | /* Construct the header. */ | 298 | /* Construct the header. */ |
297 | ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; | 299 | ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; |
@@ -315,6 +317,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
315 | qp->s_flags |= IPATH_S_WAIT_DMA; | 317 | qp->s_flags |= IPATH_S_WAIT_DMA; |
316 | goto bail; | 318 | goto bail; |
317 | } | 319 | } |
320 | qp->s_cur = next_cur; | ||
318 | spin_unlock_irqrestore(&qp->s_lock, flags); | 321 | spin_unlock_irqrestore(&qp->s_lock, flags); |
319 | ipath_ud_loopback(qp, wqe); | 322 | ipath_ud_loopback(qp, wqe); |
320 | spin_lock_irqsave(&qp->s_lock, flags); | 323 | spin_lock_irqsave(&qp->s_lock, flags); |
@@ -323,6 +326,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
323 | } | 326 | } |
324 | } | 327 | } |
325 | 328 | ||
329 | qp->s_cur = next_cur; | ||
326 | extra_bytes = -wqe->length & 3; | 330 | extra_bytes = -wqe->length & 3; |
327 | nwords = (wqe->length + extra_bytes) >> 2; | 331 | nwords = (wqe->length + extra_bytes) >> 2; |
328 | 332 | ||
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index a4cdb465cd1d..87f5c5a87b98 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -204,6 +204,8 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, | |||
204 | if (err) | 204 | if (err) |
205 | goto err_mr; | 205 | goto err_mr; |
206 | 206 | ||
207 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; | ||
208 | |||
207 | return &mr->ibmr; | 209 | return &mr->ibmr; |
208 | 210 | ||
209 | err_mr: | 211 | err_mr: |
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 39bd897b40c6..8eb7ae96974d 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -43,7 +43,6 @@ | |||
43 | #include <linux/dma-mapping.h> | 43 | #include <linux/dma-mapping.h> |
44 | #include <linux/workqueue.h> | 44 | #include <linux/workqueue.h> |
45 | #include <linux/slab.h> | 45 | #include <linux/slab.h> |
46 | #include <linux/version.h> | ||
47 | #include <asm/io.h> | 46 | #include <asm/io.h> |
48 | #include <linux/crc32c.h> | 47 | #include <linux/crc32c.h> |
49 | 48 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 7ebc400a4b3d..341ffedafed6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -202,7 +202,7 @@ static void ipoib_cm_free_rx_ring(struct net_device *dev, | |||
202 | dev_kfree_skb_any(rx_ring[i].skb); | 202 | dev_kfree_skb_any(rx_ring[i].skb); |
203 | } | 203 | } |
204 | 204 | ||
205 | kfree(rx_ring); | 205 | vfree(rx_ring); |
206 | } | 206 | } |
207 | 207 | ||
208 | static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) | 208 | static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) |
@@ -352,9 +352,14 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i | |||
352 | int ret; | 352 | int ret; |
353 | int i; | 353 | int i; |
354 | 354 | ||
355 | rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL); | 355 | rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring); |
356 | if (!rx->rx_ring) | 356 | if (!rx->rx_ring) { |
357 | printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n", | ||
358 | priv->ca->name, ipoib_recvq_size); | ||
357 | return -ENOMEM; | 359 | return -ENOMEM; |
360 | } | ||
361 | |||
362 | memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring); | ||
358 | 363 | ||
359 | t = kmalloc(sizeof *t, GFP_KERNEL); | 364 | t = kmalloc(sizeof *t, GFP_KERNEL); |
360 | if (!t) { | 365 | if (!t) { |
@@ -1494,14 +1499,16 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge) | |||
1494 | return; | 1499 | return; |
1495 | } | 1500 | } |
1496 | 1501 | ||
1497 | priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, | 1502 | priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring); |
1498 | GFP_KERNEL); | ||
1499 | if (!priv->cm.srq_ring) { | 1503 | if (!priv->cm.srq_ring) { |
1500 | printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", | 1504 | printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", |
1501 | priv->ca->name, ipoib_recvq_size); | 1505 | priv->ca->name, ipoib_recvq_size); |
1502 | ib_destroy_srq(priv->cm.srq); | 1506 | ib_destroy_srq(priv->cm.srq); |
1503 | priv->cm.srq = NULL; | 1507 | priv->cm.srq = NULL; |
1508 | return; | ||
1504 | } | 1509 | } |
1510 | |||
1511 | memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring); | ||
1505 | } | 1512 | } |
1506 | 1513 | ||
1507 | int ipoib_cm_dev_init(struct net_device *dev) | 1514 | int ipoib_cm_dev_init(struct net_device *dev) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index f51201b17bfd..7e9e218738fa 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -156,14 +156,8 @@ static int ipoib_stop(struct net_device *dev) | |||
156 | 156 | ||
157 | netif_stop_queue(dev); | 157 | netif_stop_queue(dev); |
158 | 158 | ||
159 | /* | 159 | ipoib_ib_dev_down(dev, 0); |
160 | * Now flush workqueue to make sure a scheduled task doesn't | 160 | ipoib_ib_dev_stop(dev, 0); |
161 | * bring our internal state back up. | ||
162 | */ | ||
163 | flush_workqueue(ipoib_workqueue); | ||
164 | |||
165 | ipoib_ib_dev_down(dev, 1); | ||
166 | ipoib_ib_dev_stop(dev, 1); | ||
167 | 161 | ||
168 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { | 162 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { |
169 | struct ipoib_dev_priv *cpriv; | 163 | struct ipoib_dev_priv *cpriv; |
@@ -1314,7 +1308,7 @@ sysfs_failed: | |||
1314 | 1308 | ||
1315 | register_failed: | 1309 | register_failed: |
1316 | ib_unregister_event_handler(&priv->event_handler); | 1310 | ib_unregister_event_handler(&priv->event_handler); |
1317 | flush_scheduled_work(); | 1311 | flush_workqueue(ipoib_workqueue); |
1318 | 1312 | ||
1319 | event_failed: | 1313 | event_failed: |
1320 | ipoib_dev_cleanup(priv->dev); | 1314 | ipoib_dev_cleanup(priv->dev); |
@@ -1373,7 +1367,12 @@ static void ipoib_remove_one(struct ib_device *device) | |||
1373 | 1367 | ||
1374 | list_for_each_entry_safe(priv, tmp, dev_list, list) { | 1368 | list_for_each_entry_safe(priv, tmp, dev_list, list) { |
1375 | ib_unregister_event_handler(&priv->event_handler); | 1369 | ib_unregister_event_handler(&priv->event_handler); |
1376 | flush_scheduled_work(); | 1370 | |
1371 | rtnl_lock(); | ||
1372 | dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); | ||
1373 | rtnl_unlock(); | ||
1374 | |||
1375 | flush_workqueue(ipoib_workqueue); | ||
1377 | 1376 | ||
1378 | unregister_netdev(priv->dev); | 1377 | unregister_netdev(priv->dev); |
1379 | ipoib_dev_cleanup(priv->dev); | 1378 | ipoib_dev_cleanup(priv->dev); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 8950e9546f4e..ac33c8f3ea85 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -392,8 +392,16 @@ static int ipoib_mcast_join_complete(int status, | |||
392 | &priv->mcast_task, 0); | 392 | &priv->mcast_task, 0); |
393 | mutex_unlock(&mcast_mutex); | 393 | mutex_unlock(&mcast_mutex); |
394 | 394 | ||
395 | if (mcast == priv->broadcast) | 395 | if (mcast == priv->broadcast) { |
396 | /* | ||
397 | * Take RTNL lock here to avoid racing with | ||
398 | * ipoib_stop() and turning the carrier back | ||
399 | * on while a device is being removed. | ||
400 | */ | ||
401 | rtnl_lock(); | ||
396 | netif_carrier_on(dev); | 402 | netif_carrier_on(dev); |
403 | rtnl_unlock(); | ||
404 | } | ||
397 | 405 | ||
398 | return 0; | 406 | return 0; |
399 | } | 407 | } |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 63462ecca147..26ff6214a81f 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
36 | #include <linux/version.h> | ||
37 | 36 | ||
38 | #include "iscsi_iser.h" | 37 | #include "iscsi_iser.h" |
39 | 38 | ||
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 2d65411f6763..3524bef62be6 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c | |||
@@ -647,6 +647,47 @@ static int str_to_user(const char *str, unsigned int maxlen, void __user *p) | |||
647 | return copy_to_user(p, str, len) ? -EFAULT : len; | 647 | return copy_to_user(p, str, len) ? -EFAULT : len; |
648 | } | 648 | } |
649 | 649 | ||
650 | #define OLD_KEY_MAX 0x1ff | ||
651 | static int handle_eviocgbit(struct input_dev *dev, unsigned int cmd, void __user *p, int compat_mode) | ||
652 | { | ||
653 | static unsigned long keymax_warn_time; | ||
654 | unsigned long *bits; | ||
655 | int len; | ||
656 | |||
657 | switch (_IOC_NR(cmd) & EV_MAX) { | ||
658 | |||
659 | case 0: bits = dev->evbit; len = EV_MAX; break; | ||
660 | case EV_KEY: bits = dev->keybit; len = KEY_MAX; break; | ||
661 | case EV_REL: bits = dev->relbit; len = REL_MAX; break; | ||
662 | case EV_ABS: bits = dev->absbit; len = ABS_MAX; break; | ||
663 | case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break; | ||
664 | case EV_LED: bits = dev->ledbit; len = LED_MAX; break; | ||
665 | case EV_SND: bits = dev->sndbit; len = SND_MAX; break; | ||
666 | case EV_FF: bits = dev->ffbit; len = FF_MAX; break; | ||
667 | case EV_SW: bits = dev->swbit; len = SW_MAX; break; | ||
668 | default: return -EINVAL; | ||
669 | } | ||
670 | |||
671 | /* | ||
672 | * Work around bugs in userspace programs that like to do | ||
673 | * EVIOCGBIT(EV_KEY, KEY_MAX) and not realize that 'len' | ||
674 | * should be in bytes, not in bits. | ||
675 | */ | ||
676 | if ((_IOC_NR(cmd) & EV_MAX) == EV_KEY && _IOC_SIZE(cmd) == OLD_KEY_MAX) { | ||
677 | len = OLD_KEY_MAX; | ||
678 | if (printk_timed_ratelimit(&keymax_warn_time, 10 * 1000)) | ||
679 | printk(KERN_WARNING | ||
680 | "evdev.c(EVIOCGBIT): Suspicious buffer size %u, " | ||
681 | "limiting output to %zu bytes. See " | ||
682 | "http://userweb.kernel.org/~dtor/eviocgbit-bug.html\n", | ||
683 | OLD_KEY_MAX, | ||
684 | BITS_TO_LONGS(OLD_KEY_MAX) * sizeof(long)); | ||
685 | } | ||
686 | |||
687 | return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode); | ||
688 | } | ||
689 | #undef OLD_KEY_MAX | ||
690 | |||
650 | static long evdev_do_ioctl(struct file *file, unsigned int cmd, | 691 | static long evdev_do_ioctl(struct file *file, unsigned int cmd, |
651 | void __user *p, int compat_mode) | 692 | void __user *p, int compat_mode) |
652 | { | 693 | { |
@@ -733,26 +774,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd, | |||
733 | 774 | ||
734 | if (_IOC_DIR(cmd) == _IOC_READ) { | 775 | if (_IOC_DIR(cmd) == _IOC_READ) { |
735 | 776 | ||
736 | if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0))) { | 777 | if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0))) |
737 | 778 | return handle_eviocgbit(dev, cmd, p, compat_mode); | |
738 | unsigned long *bits; | ||
739 | int len; | ||
740 | |||
741 | switch (_IOC_NR(cmd) & EV_MAX) { | ||
742 | |||
743 | case 0: bits = dev->evbit; len = EV_MAX; break; | ||
744 | case EV_KEY: bits = dev->keybit; len = KEY_MAX; break; | ||
745 | case EV_REL: bits = dev->relbit; len = REL_MAX; break; | ||
746 | case EV_ABS: bits = dev->absbit; len = ABS_MAX; break; | ||
747 | case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break; | ||
748 | case EV_LED: bits = dev->ledbit; len = LED_MAX; break; | ||
749 | case EV_SND: bits = dev->sndbit; len = SND_MAX; break; | ||
750 | case EV_FF: bits = dev->ffbit; len = FF_MAX; break; | ||
751 | case EV_SW: bits = dev->swbit; len = SW_MAX; break; | ||
752 | default: return -EINVAL; | ||
753 | } | ||
754 | return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode); | ||
755 | } | ||
756 | 779 | ||
757 | if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0))) | 780 | if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0))) |
758 | return bits_to_user(dev->key, KEY_MAX, _IOC_SIZE(cmd), | 781 | return bits_to_user(dev->key, KEY_MAX, _IOC_SIZE(cmd), |
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 87d3e7eabffd..6791be81eb29 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
@@ -127,6 +127,7 @@ static const struct xpad_device { | |||
127 | { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX360 }, | 127 | { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX360 }, |
128 | { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, | 128 | { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, |
129 | { 0x0c12, 0x8802, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX }, | 129 | { 0x0c12, 0x8802, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX }, |
130 | { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", MAP_DPAD_TO_AXES, XTYPE_XBOX }, | ||
130 | { 0x0c12, 0x8810, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX }, | 131 | { 0x0c12, 0x8810, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX }, |
131 | { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", MAP_DPAD_TO_AXES, XTYPE_XBOX }, | 132 | { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", MAP_DPAD_TO_AXES, XTYPE_XBOX }, |
132 | { 0x0e4c, 0x1097, "Radica Gamester Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX }, | 133 | { 0x0e4c, 0x1097, "Radica Gamester Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX }, |
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c index 54ed8e2e1c02..e348cfccc17a 100644 --- a/drivers/input/keyboard/bf54x-keys.c +++ b/drivers/input/keyboard/bf54x-keys.c | |||
@@ -29,7 +29,6 @@ | |||
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/version.h> | ||
33 | 32 | ||
34 | #include <linux/init.h> | 33 | #include <linux/init.h> |
35 | #include <linux/fs.h> | 34 | #include <linux/fs.h> |
@@ -44,7 +43,7 @@ | |||
44 | #include <linux/input.h> | 43 | #include <linux/input.h> |
45 | 44 | ||
46 | #include <asm/portmux.h> | 45 | #include <asm/portmux.h> |
47 | #include <asm/mach/bf54x_keys.h> | 46 | #include <mach/bf54x_keys.h> |
48 | 47 | ||
49 | #define DRV_NAME "bf54x-keys" | 48 | #define DRV_NAME "bf54x-keys" |
50 | #define TIME_SCALE 100 /* 100 ns */ | 49 | #define TIME_SCALE 100 /* 100 ns */ |
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index be58730e636a..ec96b369dd7a 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/version.h> | ||
13 | 12 | ||
14 | #include <linux/init.h> | 13 | #include <linux/init.h> |
15 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
@@ -118,6 +117,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev) | |||
118 | unsigned int type = button->type ?: EV_KEY; | 117 | unsigned int type = button->type ?: EV_KEY; |
119 | 118 | ||
120 | bdata->input = input; | 119 | bdata->input = input; |
120 | bdata->button = button; | ||
121 | setup_timer(&bdata->timer, | 121 | setup_timer(&bdata->timer, |
122 | gpio_check_button, (unsigned long)bdata); | 122 | gpio_check_button, (unsigned long)bdata); |
123 | 123 | ||
@@ -256,7 +256,7 @@ static int gpio_keys_resume(struct platform_device *pdev) | |||
256 | #define gpio_keys_resume NULL | 256 | #define gpio_keys_resume NULL |
257 | #endif | 257 | #endif |
258 | 258 | ||
259 | struct platform_driver gpio_keys_device_driver = { | 259 | static struct platform_driver gpio_keys_device_driver = { |
260 | .probe = gpio_keys_probe, | 260 | .probe = gpio_keys_probe, |
261 | .remove = __devexit_p(gpio_keys_remove), | 261 | .remove = __devexit_p(gpio_keys_remove), |
262 | .suspend = gpio_keys_suspend, | 262 | .suspend = gpio_keys_suspend, |
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c index 6a1f48b76e32..2adf9cb265da 100644 --- a/drivers/input/misc/cobalt_btns.c +++ b/drivers/input/misc/cobalt_btns.c | |||
@@ -148,6 +148,9 @@ static int __devexit cobalt_buttons_remove(struct platform_device *pdev) | |||
148 | return 0; | 148 | return 0; |
149 | } | 149 | } |
150 | 150 | ||
151 | MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); | ||
152 | MODULE_DESCRIPTION("Cobalt button interface driver"); | ||
153 | MODULE_LICENSE("GPL"); | ||
151 | /* work with hotplug and coldplug */ | 154 | /* work with hotplug and coldplug */ |
152 | MODULE_ALIAS("platform:Cobalt buttons"); | 155 | MODULE_ALIAS("platform:Cobalt buttons"); |
153 | 156 | ||
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index 7bbea097cda2..f996546fc443 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig | |||
@@ -130,6 +130,29 @@ config MOUSE_APPLETOUCH | |||
130 | To compile this driver as a module, choose M here: the | 130 | To compile this driver as a module, choose M here: the |
131 | module will be called appletouch. | 131 | module will be called appletouch. |
132 | 132 | ||
133 | config MOUSE_BCM5974 | ||
134 | tristate "Apple USB BCM5974 Multitouch trackpad support" | ||
135 | depends on USB_ARCH_HAS_HCD | ||
136 | select USB | ||
137 | help | ||
138 | Say Y here if you have an Apple USB BCM5974 Multitouch | ||
139 | trackpad. | ||
140 | |||
141 | The BCM5974 is the multitouch trackpad found in the Macbook | ||
142 | Air (JAN2008) and Macbook Pro Penryn (FEB2008) laptops. | ||
143 | |||
144 | It is also found in the IPhone (2007) and Ipod Touch (2008). | ||
145 | |||
146 | This driver provides multitouch functionality together with | ||
147 | the synaptics X11 driver. | ||
148 | |||
149 | The interface is currently identical to the appletouch interface, | ||
150 | for further information, see | ||
151 | <file:Documentation/input/appletouch.txt>. | ||
152 | |||
153 | To compile this driver as a module, choose M here: the | ||
154 | module will be called bcm5974. | ||
155 | |||
133 | config MOUSE_INPORT | 156 | config MOUSE_INPORT |
134 | tristate "InPort/MS/ATIXL busmouse" | 157 | tristate "InPort/MS/ATIXL busmouse" |
135 | depends on ISA | 158 | depends on ISA |
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile index 9e6e36330820..d4d202516090 100644 --- a/drivers/input/mouse/Makefile +++ b/drivers/input/mouse/Makefile | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o | 7 | obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o |
8 | obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o | 8 | obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o |
9 | obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o | ||
9 | obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o | 10 | obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o |
10 | obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o | 11 | obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o |
11 | obj-$(CONFIG_MOUSE_INPORT) += inport.o | 12 | obj-$(CONFIG_MOUSE_INPORT) += inport.o |
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c new file mode 100644 index 000000000000..2ec921bf3c60 --- /dev/null +++ b/drivers/input/mouse/bcm5974.c | |||
@@ -0,0 +1,681 @@ | |||
1 | /* | ||
2 | * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver | ||
3 | * | ||
4 | * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se) | ||
5 | * | ||
6 | * The USB initialization and package decoding was made by | ||
7 | * Scott Shawcroft as part of the touchd user-space driver project: | ||
8 | * Copyright (C) 2008 Scott Shawcroft (scott.shawcroft@gmail.com) | ||
9 | * | ||
10 | * The BCM5974 driver is based on the appletouch driver: | ||
11 | * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) | ||
12 | * Copyright (C) 2005 Johannes Berg (johannes@sipsolutions.net) | ||
13 | * Copyright (C) 2005 Stelian Pop (stelian@popies.net) | ||
14 | * Copyright (C) 2005 Frank Arnold (frank@scirocco-5v-turbo.de) | ||
15 | * Copyright (C) 2005 Peter Osterlund (petero2@telia.com) | ||
16 | * Copyright (C) 2005 Michael Hanselmann (linux-kernel@hansmi.ch) | ||
17 | * Copyright (C) 2006 Nicolas Boichat (nicolas@boichat.ch) | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or modify | ||
20 | * it under the terms of the GNU General Public License as published by | ||
21 | * the Free Software Foundation; either version 2 of the License, or | ||
22 | * (at your option) any later version. | ||
23 | * | ||
24 | * This program is distributed in the hope that it will be useful, | ||
25 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
26 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
27 | * GNU General Public License for more details. | ||
28 | * | ||
29 | * You should have received a copy of the GNU General Public License | ||
30 | * along with this program; if not, write to the Free Software | ||
31 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/errno.h> | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/module.h> | ||
40 | #include <linux/usb/input.h> | ||
41 | #include <linux/hid.h> | ||
42 | #include <linux/mutex.h> | ||
43 | |||
44 | #define USB_VENDOR_ID_APPLE 0x05ac | ||
45 | |||
46 | /* MacbookAir, aka wellspring */ | ||
47 | #define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223 | ||
48 | #define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224 | ||
49 | #define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225 | ||
50 | /* MacbookProPenryn, aka wellspring2 */ | ||
51 | #define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230 | ||
52 | #define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231 | ||
53 | #define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232 | ||
54 | |||
55 | #define BCM5974_DEVICE(prod) { \ | ||
56 | .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ | ||
57 | USB_DEVICE_ID_MATCH_INT_CLASS | \ | ||
58 | USB_DEVICE_ID_MATCH_INT_PROTOCOL), \ | ||
59 | .idVendor = USB_VENDOR_ID_APPLE, \ | ||
60 | .idProduct = (prod), \ | ||
61 | .bInterfaceClass = USB_INTERFACE_CLASS_HID, \ | ||
62 | .bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE \ | ||
63 | } | ||
64 | |||
65 | /* table of devices that work with this driver */ | ||
66 | static const struct usb_device_id bcm5974_table [] = { | ||
67 | /* MacbookAir1.1 */ | ||
68 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), | ||
69 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ISO), | ||
70 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_JIS), | ||
71 | /* MacbookProPenryn */ | ||
72 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI), | ||
73 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ISO), | ||
74 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_JIS), | ||
75 | /* Terminating entry */ | ||
76 | {} | ||
77 | }; | ||
78 | MODULE_DEVICE_TABLE(usb, bcm5974_table); | ||
79 | |||
80 | MODULE_AUTHOR("Henrik Rydberg"); | ||
81 | MODULE_DESCRIPTION("Apple USB BCM5974 multitouch driver"); | ||
82 | MODULE_LICENSE("GPL"); | ||
83 | |||
84 | #define dprintk(level, format, a...)\ | ||
85 | { if (debug >= level) printk(KERN_DEBUG format, ##a); } | ||
86 | |||
87 | static int debug = 1; | ||
88 | module_param(debug, int, 0644); | ||
89 | MODULE_PARM_DESC(debug, "Activate debugging output"); | ||
90 | |||
91 | /* button data structure */ | ||
92 | struct bt_data { | ||
93 | u8 unknown1; /* constant */ | ||
94 | u8 button; /* left button */ | ||
95 | u8 rel_x; /* relative x coordinate */ | ||
96 | u8 rel_y; /* relative y coordinate */ | ||
97 | }; | ||
98 | |||
99 | /* trackpad header structure */ | ||
100 | struct tp_header { | ||
101 | u8 unknown1[16]; /* constants, timers, etc */ | ||
102 | u8 fingers; /* number of fingers on trackpad */ | ||
103 | u8 unknown2[9]; /* constants, timers, etc */ | ||
104 | }; | ||
105 | |||
106 | /* trackpad finger structure */ | ||
107 | struct tp_finger { | ||
108 | __le16 origin; /* left/right origin? */ | ||
109 | __le16 abs_x; /* absolute x coodinate */ | ||
110 | __le16 abs_y; /* absolute y coodinate */ | ||
111 | __le16 rel_x; /* relative x coodinate */ | ||
112 | __le16 rel_y; /* relative y coodinate */ | ||
113 | __le16 size_major; /* finger size, major axis? */ | ||
114 | __le16 size_minor; /* finger size, minor axis? */ | ||
115 | __le16 orientation; /* 16384 when point, else 15 bit angle */ | ||
116 | __le16 force_major; /* trackpad force, major axis? */ | ||
117 | __le16 force_minor; /* trackpad force, minor axis? */ | ||
118 | __le16 unused[3]; /* zeros */ | ||
119 | __le16 multi; /* one finger: varies, more fingers: constant */ | ||
120 | }; | ||
121 | |||
122 | /* trackpad data structure, empirically at least ten fingers */ | ||
123 | struct tp_data { | ||
124 | struct tp_header header; | ||
125 | struct tp_finger finger[16]; | ||
126 | }; | ||
127 | |||
128 | /* device-specific parameters */ | ||
129 | struct bcm5974_param { | ||
130 | int dim; /* logical dimension */ | ||
131 | int fuzz; /* logical noise value */ | ||
132 | int devmin; /* device minimum reading */ | ||
133 | int devmax; /* device maximum reading */ | ||
134 | }; | ||
135 | |||
136 | /* device-specific configuration */ | ||
137 | struct bcm5974_config { | ||
138 | int ansi, iso, jis; /* the product id of this device */ | ||
139 | int bt_ep; /* the endpoint of the button interface */ | ||
140 | int bt_datalen; /* data length of the button interface */ | ||
141 | int tp_ep; /* the endpoint of the trackpad interface */ | ||
142 | int tp_datalen; /* data length of the trackpad interface */ | ||
143 | struct bcm5974_param p; /* finger pressure limits */ | ||
144 | struct bcm5974_param w; /* finger width limits */ | ||
145 | struct bcm5974_param x; /* horizontal limits */ | ||
146 | struct bcm5974_param y; /* vertical limits */ | ||
147 | }; | ||
148 | |||
149 | /* logical device structure */ | ||
150 | struct bcm5974 { | ||
151 | char phys[64]; | ||
152 | struct usb_device *udev; /* usb device */ | ||
153 | struct usb_interface *intf; /* our interface */ | ||
154 | struct input_dev *input; /* input dev */ | ||
155 | struct bcm5974_config cfg; /* device configuration */ | ||
156 | struct mutex pm_mutex; /* serialize access to open/suspend */ | ||
157 | int opened; /* 1: opened, 0: closed */ | ||
158 | struct urb *bt_urb; /* button usb request block */ | ||
159 | struct bt_data *bt_data; /* button transferred data */ | ||
160 | struct urb *tp_urb; /* trackpad usb request block */ | ||
161 | struct tp_data *tp_data; /* trackpad transferred data */ | ||
162 | }; | ||
163 | |||
164 | /* logical dimensions */ | ||
165 | #define DIM_PRESSURE 256 /* maximum finger pressure */ | ||
166 | #define DIM_WIDTH 16 /* maximum finger width */ | ||
167 | #define DIM_X 1280 /* maximum trackpad x value */ | ||
168 | #define DIM_Y 800 /* maximum trackpad y value */ | ||
169 | |||
170 | /* logical signal quality */ | ||
171 | #define SN_PRESSURE 45 /* pressure signal-to-noise ratio */ | ||
172 | #define SN_WIDTH 100 /* width signal-to-noise ratio */ | ||
173 | #define SN_COORD 250 /* coordinate signal-to-noise ratio */ | ||
174 | |||
175 | /* device constants */ | ||
176 | static const struct bcm5974_config bcm5974_config_table[] = { | ||
177 | { | ||
178 | USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, | ||
179 | USB_DEVICE_ID_APPLE_WELLSPRING_ISO, | ||
180 | USB_DEVICE_ID_APPLE_WELLSPRING_JIS, | ||
181 | 0x84, sizeof(struct bt_data), | ||
182 | 0x81, sizeof(struct tp_data), | ||
183 | { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 }, | ||
184 | { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, | ||
185 | { DIM_X, DIM_X / SN_COORD, -4824, 5342 }, | ||
186 | { DIM_Y, DIM_Y / SN_COORD, -172, 5820 } | ||
187 | }, | ||
188 | { | ||
189 | USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, | ||
190 | USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, | ||
191 | USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, | ||
192 | 0x84, sizeof(struct bt_data), | ||
193 | 0x81, sizeof(struct tp_data), | ||
194 | { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 }, | ||
195 | { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, | ||
196 | { DIM_X, DIM_X / SN_COORD, -4824, 4824 }, | ||
197 | { DIM_Y, DIM_Y / SN_COORD, -172, 4290 } | ||
198 | }, | ||
199 | {} | ||
200 | }; | ||
201 | |||
202 | /* return the device-specific configuration by device */ | ||
203 | static const struct bcm5974_config *bcm5974_get_config(struct usb_device *udev) | ||
204 | { | ||
205 | u16 id = le16_to_cpu(udev->descriptor.idProduct); | ||
206 | const struct bcm5974_config *cfg; | ||
207 | |||
208 | for (cfg = bcm5974_config_table; cfg->ansi; ++cfg) | ||
209 | if (cfg->ansi == id || cfg->iso == id || cfg->jis == id) | ||
210 | return cfg; | ||
211 | |||
212 | return bcm5974_config_table; | ||
213 | } | ||
214 | |||
215 | /* convert 16-bit little endian to signed integer */ | ||
216 | static inline int raw2int(__le16 x) | ||
217 | { | ||
218 | return (signed short)le16_to_cpu(x); | ||
219 | } | ||
220 | |||
221 | /* scale device data to logical dimensions (asserts devmin < devmax) */ | ||
222 | static inline int int2scale(const struct bcm5974_param *p, int x) | ||
223 | { | ||
224 | return x * p->dim / (p->devmax - p->devmin); | ||
225 | } | ||
226 | |||
227 | /* all logical value ranges are [0,dim). */ | ||
228 | static inline int int2bound(const struct bcm5974_param *p, int x) | ||
229 | { | ||
230 | int s = int2scale(p, x); | ||
231 | |||
232 | return clamp_val(s, 0, p->dim - 1); | ||
233 | } | ||
234 | |||
235 | /* setup which logical events to report */ | ||
236 | static void setup_events_to_report(struct input_dev *input_dev, | ||
237 | const struct bcm5974_config *cfg) | ||
238 | { | ||
239 | __set_bit(EV_ABS, input_dev->evbit); | ||
240 | |||
241 | input_set_abs_params(input_dev, ABS_PRESSURE, | ||
242 | 0, cfg->p.dim, cfg->p.fuzz, 0); | ||
243 | input_set_abs_params(input_dev, ABS_TOOL_WIDTH, | ||
244 | 0, cfg->w.dim, cfg->w.fuzz, 0); | ||
245 | input_set_abs_params(input_dev, ABS_X, | ||
246 | 0, cfg->x.dim, cfg->x.fuzz, 0); | ||
247 | input_set_abs_params(input_dev, ABS_Y, | ||
248 | 0, cfg->y.dim, cfg->y.fuzz, 0); | ||
249 | |||
250 | __set_bit(EV_KEY, input_dev->evbit); | ||
251 | __set_bit(BTN_TOOL_FINGER, input_dev->keybit); | ||
252 | __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); | ||
253 | __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); | ||
254 | __set_bit(BTN_LEFT, input_dev->keybit); | ||
255 | } | ||
256 | |||
257 | /* report button data as logical button state */ | ||
258 | static int report_bt_state(struct bcm5974 *dev, int size) | ||
259 | { | ||
260 | if (size != sizeof(struct bt_data)) | ||
261 | return -EIO; | ||
262 | |||
263 | input_report_key(dev->input, BTN_LEFT, dev->bt_data->button); | ||
264 | input_sync(dev->input); | ||
265 | |||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | /* report trackpad data as logical trackpad state */ | ||
270 | static int report_tp_state(struct bcm5974 *dev, int size) | ||
271 | { | ||
272 | const struct bcm5974_config *c = &dev->cfg; | ||
273 | const struct tp_finger *f = dev->tp_data->finger; | ||
274 | struct input_dev *input = dev->input; | ||
275 | const int fingers = (size - 26) / 28; | ||
276 | int p = 0, w, x, y, n = 0; | ||
277 | |||
278 | if (size < 26 || (size - 26) % 28 != 0) | ||
279 | return -EIO; | ||
280 | |||
281 | if (fingers) { | ||
282 | p = raw2int(f->force_major); | ||
283 | w = raw2int(f->size_major); | ||
284 | x = raw2int(f->abs_x); | ||
285 | y = raw2int(f->abs_y); | ||
286 | n = p > 0 ? fingers : 0; | ||
287 | |||
288 | dprintk(9, | ||
289 | "bcm5974: p: %+05d w: %+05d x: %+05d y: %+05d n: %d\n", | ||
290 | p, w, x, y, n); | ||
291 | |||
292 | input_report_abs(input, ABS_TOOL_WIDTH, int2bound(&c->w, w)); | ||
293 | input_report_abs(input, ABS_X, int2bound(&c->x, x - c->x.devmin)); | ||
294 | input_report_abs(input, ABS_Y, int2bound(&c->y, c->y.devmax - y)); | ||
295 | } | ||
296 | |||
297 | input_report_abs(input, ABS_PRESSURE, int2bound(&c->p, p)); | ||
298 | |||
299 | input_report_key(input, BTN_TOOL_FINGER, n == 1); | ||
300 | input_report_key(input, BTN_TOOL_DOUBLETAP, n == 2); | ||
301 | input_report_key(input, BTN_TOOL_TRIPLETAP, n > 2); | ||
302 | |||
303 | input_sync(input); | ||
304 | |||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | /* Wellspring initialization constants */ | ||
309 | #define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1 | ||
310 | #define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9 | ||
311 | #define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300 | ||
312 | #define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0 | ||
313 | #define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01 | ||
314 | |||
315 | static int bcm5974_wellspring_mode(struct bcm5974 *dev) | ||
316 | { | ||
317 | char *data = kmalloc(8, GFP_KERNEL); | ||
318 | int retval = 0, size; | ||
319 | |||
320 | if (!data) { | ||
321 | err("bcm5974: out of memory"); | ||
322 | retval = -ENOMEM; | ||
323 | goto out; | ||
324 | } | ||
325 | |||
326 | /* read configuration */ | ||
327 | size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), | ||
328 | BCM5974_WELLSPRING_MODE_READ_REQUEST_ID, | ||
329 | USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
330 | BCM5974_WELLSPRING_MODE_REQUEST_VALUE, | ||
331 | BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000); | ||
332 | |||
333 | if (size != 8) { | ||
334 | err("bcm5974: could not read from device"); | ||
335 | retval = -EIO; | ||
336 | goto out; | ||
337 | } | ||
338 | |||
339 | /* apply the mode switch */ | ||
340 | data[0] = BCM5974_WELLSPRING_MODE_VENDOR_VALUE; | ||
341 | |||
342 | /* write configuration */ | ||
343 | size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), | ||
344 | BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID, | ||
345 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
346 | BCM5974_WELLSPRING_MODE_REQUEST_VALUE, | ||
347 | BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000); | ||
348 | |||
349 | if (size != 8) { | ||
350 | err("bcm5974: could not write to device"); | ||
351 | retval = -EIO; | ||
352 | goto out; | ||
353 | } | ||
354 | |||
355 | dprintk(2, "bcm5974: switched to wellspring mode.\n"); | ||
356 | |||
357 | out: | ||
358 | kfree(data); | ||
359 | return retval; | ||
360 | } | ||
361 | |||
362 | static void bcm5974_irq_button(struct urb *urb) | ||
363 | { | ||
364 | struct bcm5974 *dev = urb->context; | ||
365 | int error; | ||
366 | |||
367 | switch (urb->status) { | ||
368 | case 0: | ||
369 | break; | ||
370 | case -EOVERFLOW: | ||
371 | case -ECONNRESET: | ||
372 | case -ENOENT: | ||
373 | case -ESHUTDOWN: | ||
374 | dbg("bcm5974: button urb shutting down: %d", urb->status); | ||
375 | return; | ||
376 | default: | ||
377 | dbg("bcm5974: button urb status: %d", urb->status); | ||
378 | goto exit; | ||
379 | } | ||
380 | |||
381 | if (report_bt_state(dev, dev->bt_urb->actual_length)) | ||
382 | dprintk(1, "bcm5974: bad button package, length: %d\n", | ||
383 | dev->bt_urb->actual_length); | ||
384 | |||
385 | exit: | ||
386 | error = usb_submit_urb(dev->bt_urb, GFP_ATOMIC); | ||
387 | if (error) | ||
388 | err("bcm5974: button urb failed: %d", error); | ||
389 | } | ||
390 | |||
391 | static void bcm5974_irq_trackpad(struct urb *urb) | ||
392 | { | ||
393 | struct bcm5974 *dev = urb->context; | ||
394 | int error; | ||
395 | |||
396 | switch (urb->status) { | ||
397 | case 0: | ||
398 | break; | ||
399 | case -EOVERFLOW: | ||
400 | case -ECONNRESET: | ||
401 | case -ENOENT: | ||
402 | case -ESHUTDOWN: | ||
403 | dbg("bcm5974: trackpad urb shutting down: %d", urb->status); | ||
404 | return; | ||
405 | default: | ||
406 | dbg("bcm5974: trackpad urb status: %d", urb->status); | ||
407 | goto exit; | ||
408 | } | ||
409 | |||
410 | /* control response ignored */ | ||
411 | if (dev->tp_urb->actual_length == 2) | ||
412 | goto exit; | ||
413 | |||
414 | if (report_tp_state(dev, dev->tp_urb->actual_length)) | ||
415 | dprintk(1, "bcm5974: bad trackpad package, length: %d\n", | ||
416 | dev->tp_urb->actual_length); | ||
417 | |||
418 | exit: | ||
419 | error = usb_submit_urb(dev->tp_urb, GFP_ATOMIC); | ||
420 | if (error) | ||
421 | err("bcm5974: trackpad urb failed: %d", error); | ||
422 | } | ||
423 | |||
424 | /* | ||
425 | * The Wellspring trackpad, like many recent Apple trackpads, share | ||
426 | * the usb device with the keyboard. Since keyboards are usually | ||
427 | * handled by the HID system, the device ends up being handled by two | ||
428 | * modules. Setting up the device therefore becomes slightly | ||
429 | * complicated. To enable multitouch features, a mode switch is | ||
430 | * required, which is usually applied via the control interface of the | ||
431 | * device. It can be argued where this switch should take place. In | ||
432 | * some drivers, like appletouch, the switch is made during | ||
433 | * probe. However, the hid module may also alter the state of the | ||
434 | * device, resulting in trackpad malfunction under certain | ||
435 | * circumstances. To get around this problem, there is at least one | ||
436 | * example that utilizes the USB_QUIRK_RESET_RESUME quirk in order to | ||
437 | * recieve a reset_resume request rather than the normal resume. | ||
438 | * Since the implementation of reset_resume is equal to mode switch | ||
439 | * plus start_traffic, it seems easier to always do the switch when | ||
440 | * starting traffic on the device. | ||
441 | */ | ||
442 | static int bcm5974_start_traffic(struct bcm5974 *dev) | ||
443 | { | ||
444 | if (bcm5974_wellspring_mode(dev)) { | ||
445 | dprintk(1, "bcm5974: mode switch failed\n"); | ||
446 | goto error; | ||
447 | } | ||
448 | |||
449 | if (usb_submit_urb(dev->bt_urb, GFP_KERNEL)) | ||
450 | goto error; | ||
451 | |||
452 | if (usb_submit_urb(dev->tp_urb, GFP_KERNEL)) | ||
453 | goto err_kill_bt; | ||
454 | |||
455 | return 0; | ||
456 | |||
457 | err_kill_bt: | ||
458 | usb_kill_urb(dev->bt_urb); | ||
459 | error: | ||
460 | return -EIO; | ||
461 | } | ||
462 | |||
463 | static void bcm5974_pause_traffic(struct bcm5974 *dev) | ||
464 | { | ||
465 | usb_kill_urb(dev->tp_urb); | ||
466 | usb_kill_urb(dev->bt_urb); | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * The code below implements open/close and manual suspend/resume. | ||
471 | * All functions may be called in random order. | ||
472 | * | ||
473 | * Opening a suspended device fails with EACCES - permission denied. | ||
474 | * | ||
475 | * Failing a resume leaves the device resumed but closed. | ||
476 | */ | ||
477 | static int bcm5974_open(struct input_dev *input) | ||
478 | { | ||
479 | struct bcm5974 *dev = input_get_drvdata(input); | ||
480 | int error; | ||
481 | |||
482 | error = usb_autopm_get_interface(dev->intf); | ||
483 | if (error) | ||
484 | return error; | ||
485 | |||
486 | mutex_lock(&dev->pm_mutex); | ||
487 | |||
488 | error = bcm5974_start_traffic(dev); | ||
489 | if (!error) | ||
490 | dev->opened = 1; | ||
491 | |||
492 | mutex_unlock(&dev->pm_mutex); | ||
493 | |||
494 | if (error) | ||
495 | usb_autopm_put_interface(dev->intf); | ||
496 | |||
497 | return error; | ||
498 | } | ||
499 | |||
500 | static void bcm5974_close(struct input_dev *input) | ||
501 | { | ||
502 | struct bcm5974 *dev = input_get_drvdata(input); | ||
503 | |||
504 | mutex_lock(&dev->pm_mutex); | ||
505 | |||
506 | bcm5974_pause_traffic(dev); | ||
507 | dev->opened = 0; | ||
508 | |||
509 | mutex_unlock(&dev->pm_mutex); | ||
510 | |||
511 | usb_autopm_put_interface(dev->intf); | ||
512 | } | ||
513 | |||
514 | static int bcm5974_suspend(struct usb_interface *iface, pm_message_t message) | ||
515 | { | ||
516 | struct bcm5974 *dev = usb_get_intfdata(iface); | ||
517 | |||
518 | mutex_lock(&dev->pm_mutex); | ||
519 | |||
520 | if (dev->opened) | ||
521 | bcm5974_pause_traffic(dev); | ||
522 | |||
523 | mutex_unlock(&dev->pm_mutex); | ||
524 | |||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | static int bcm5974_resume(struct usb_interface *iface) | ||
529 | { | ||
530 | struct bcm5974 *dev = usb_get_intfdata(iface); | ||
531 | int error = 0; | ||
532 | |||
533 | mutex_lock(&dev->pm_mutex); | ||
534 | |||
535 | if (dev->opened) | ||
536 | error = bcm5974_start_traffic(dev); | ||
537 | |||
538 | mutex_unlock(&dev->pm_mutex); | ||
539 | |||
540 | return error; | ||
541 | } | ||
542 | |||
543 | static int bcm5974_probe(struct usb_interface *iface, | ||
544 | const struct usb_device_id *id) | ||
545 | { | ||
546 | struct usb_device *udev = interface_to_usbdev(iface); | ||
547 | const struct bcm5974_config *cfg; | ||
548 | struct bcm5974 *dev; | ||
549 | struct input_dev *input_dev; | ||
550 | int error = -ENOMEM; | ||
551 | |||
552 | /* find the product index */ | ||
553 | cfg = bcm5974_get_config(udev); | ||
554 | |||
555 | /* allocate memory for our device state and initialize it */ | ||
556 | dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL); | ||
557 | input_dev = input_allocate_device(); | ||
558 | if (!dev || !input_dev) { | ||
559 | err("bcm5974: out of memory"); | ||
560 | goto err_free_devs; | ||
561 | } | ||
562 | |||
563 | dev->udev = udev; | ||
564 | dev->intf = iface; | ||
565 | dev->input = input_dev; | ||
566 | dev->cfg = *cfg; | ||
567 | mutex_init(&dev->pm_mutex); | ||
568 | |||
569 | /* setup urbs */ | ||
570 | dev->bt_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
571 | if (!dev->bt_urb) | ||
572 | goto err_free_devs; | ||
573 | |||
574 | dev->tp_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
575 | if (!dev->tp_urb) | ||
576 | goto err_free_bt_urb; | ||
577 | |||
578 | dev->bt_data = usb_buffer_alloc(dev->udev, | ||
579 | dev->cfg.bt_datalen, GFP_KERNEL, | ||
580 | &dev->bt_urb->transfer_dma); | ||
581 | if (!dev->bt_data) | ||
582 | goto err_free_urb; | ||
583 | |||
584 | dev->tp_data = usb_buffer_alloc(dev->udev, | ||
585 | dev->cfg.tp_datalen, GFP_KERNEL, | ||
586 | &dev->tp_urb->transfer_dma); | ||
587 | if (!dev->tp_data) | ||
588 | goto err_free_bt_buffer; | ||
589 | |||
590 | usb_fill_int_urb(dev->bt_urb, udev, | ||
591 | usb_rcvintpipe(udev, cfg->bt_ep), | ||
592 | dev->bt_data, dev->cfg.bt_datalen, | ||
593 | bcm5974_irq_button, dev, 1); | ||
594 | |||
595 | usb_fill_int_urb(dev->tp_urb, udev, | ||
596 | usb_rcvintpipe(udev, cfg->tp_ep), | ||
597 | dev->tp_data, dev->cfg.tp_datalen, | ||
598 | bcm5974_irq_trackpad, dev, 1); | ||
599 | |||
600 | /* create bcm5974 device */ | ||
601 | usb_make_path(udev, dev->phys, sizeof(dev->phys)); | ||
602 | strlcat(dev->phys, "/input0", sizeof(dev->phys)); | ||
603 | |||
604 | input_dev->name = "bcm5974"; | ||
605 | input_dev->phys = dev->phys; | ||
606 | usb_to_input_id(dev->udev, &input_dev->id); | ||
607 | input_dev->dev.parent = &iface->dev; | ||
608 | |||
609 | input_set_drvdata(input_dev, dev); | ||
610 | |||
611 | input_dev->open = bcm5974_open; | ||
612 | input_dev->close = bcm5974_close; | ||
613 | |||
614 | setup_events_to_report(input_dev, cfg); | ||
615 | |||
616 | error = input_register_device(dev->input); | ||
617 | if (error) | ||
618 | goto err_free_buffer; | ||
619 | |||
620 | /* save our data pointer in this interface device */ | ||
621 | usb_set_intfdata(iface, dev); | ||
622 | |||
623 | return 0; | ||
624 | |||
625 | err_free_buffer: | ||
626 | usb_buffer_free(dev->udev, dev->cfg.tp_datalen, | ||
627 | dev->tp_data, dev->tp_urb->transfer_dma); | ||
628 | err_free_bt_buffer: | ||
629 | usb_buffer_free(dev->udev, dev->cfg.bt_datalen, | ||
630 | dev->bt_data, dev->bt_urb->transfer_dma); | ||
631 | err_free_urb: | ||
632 | usb_free_urb(dev->tp_urb); | ||
633 | err_free_bt_urb: | ||
634 | usb_free_urb(dev->bt_urb); | ||
635 | err_free_devs: | ||
636 | usb_set_intfdata(iface, NULL); | ||
637 | input_free_device(input_dev); | ||
638 | kfree(dev); | ||
639 | return error; | ||
640 | } | ||
641 | |||
642 | static void bcm5974_disconnect(struct usb_interface *iface) | ||
643 | { | ||
644 | struct bcm5974 *dev = usb_get_intfdata(iface); | ||
645 | |||
646 | usb_set_intfdata(iface, NULL); | ||
647 | |||
648 | input_unregister_device(dev->input); | ||
649 | usb_buffer_free(dev->udev, dev->cfg.tp_datalen, | ||
650 | dev->tp_data, dev->tp_urb->transfer_dma); | ||
651 | usb_buffer_free(dev->udev, dev->cfg.bt_datalen, | ||
652 | dev->bt_data, dev->bt_urb->transfer_dma); | ||
653 | usb_free_urb(dev->tp_urb); | ||
654 | usb_free_urb(dev->bt_urb); | ||
655 | kfree(dev); | ||
656 | } | ||
657 | |||
658 | static struct usb_driver bcm5974_driver = { | ||
659 | .name = "bcm5974", | ||
660 | .probe = bcm5974_probe, | ||
661 | .disconnect = bcm5974_disconnect, | ||
662 | .suspend = bcm5974_suspend, | ||
663 | .resume = bcm5974_resume, | ||
664 | .reset_resume = bcm5974_resume, | ||
665 | .id_table = bcm5974_table, | ||
666 | .supports_autosuspend = 1, | ||
667 | }; | ||
668 | |||
669 | static int __init bcm5974_init(void) | ||
670 | { | ||
671 | return usb_register(&bcm5974_driver); | ||
672 | } | ||
673 | |||
674 | static void __exit bcm5974_exit(void) | ||
675 | { | ||
676 | usb_deregister(&bcm5974_driver); | ||
677 | } | ||
678 | |||
679 | module_init(bcm5974_init); | ||
680 | module_exit(bcm5974_exit); | ||
681 | |||
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c index 339290184871..72cf5e33790e 100644 --- a/drivers/input/mouse/gpio_mouse.c +++ b/drivers/input/mouse/gpio_mouse.c | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/version.h> | ||
13 | #include <linux/module.h> | 12 | #include <linux/module.h> |
14 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
15 | #include <linux/input-polldev.h> | 14 | #include <linux/input-polldev.h> |
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h index 66bafe308b0c..692a79ec2a22 100644 --- a/drivers/input/serio/i8042-sparcio.h +++ b/drivers/input/serio/i8042-sparcio.h | |||
@@ -1,10 +1,11 @@ | |||
1 | #ifndef _I8042_SPARCIO_H | 1 | #ifndef _I8042_SPARCIO_H |
2 | #define _I8042_SPARCIO_H | 2 | #define _I8042_SPARCIO_H |
3 | 3 | ||
4 | #include <linux/of_device.h> | ||
5 | |||
4 | #include <asm/io.h> | 6 | #include <asm/io.h> |
5 | #include <asm/oplib.h> | 7 | #include <asm/oplib.h> |
6 | #include <asm/prom.h> | 8 | #include <asm/prom.h> |
7 | #include <asm/of_device.h> | ||
8 | 9 | ||
9 | static int i8042_kbd_irq = -1; | 10 | static int i8042_kbd_irq = -1; |
10 | static int i8042_aux_irq = -1; | 11 | static int i8042_aux_irq = -1; |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index fe732a574ec2..3282b741e246 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -394,6 +394,13 @@ static struct dmi_system_id __initdata i8042_dmi_dritek_table[] = { | |||
394 | DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), | 394 | DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), |
395 | }, | 395 | }, |
396 | }, | 396 | }, |
397 | { | ||
398 | .ident = "Acer TravelMate 4280", | ||
399 | .matches = { | ||
400 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
401 | DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"), | ||
402 | }, | ||
403 | }, | ||
397 | { } | 404 | { } |
398 | }; | 405 | }; |
399 | 406 | ||
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c index 0ed044d5e685..765007899d9a 100644 --- a/drivers/input/serio/xilinx_ps2.c +++ b/drivers/input/serio/xilinx_ps2.c | |||
@@ -269,8 +269,8 @@ static int xps2_setup(struct device *dev, struct resource *regs_res, | |||
269 | * we have the PS2 in a good state */ | 269 | * we have the PS2 in a good state */ |
270 | out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET); | 270 | out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET); |
271 | 271 | ||
272 | dev_info(dev, "Xilinx PS2 at 0x%08X mapped to 0x%08X, irq=%d\n", | 272 | dev_info(dev, "Xilinx PS2 at 0x%08X mapped to 0x%p, irq=%d\n", |
273 | drvdata->phys_addr, (u32)drvdata->base_address, drvdata->irq); | 273 | drvdata->phys_addr, drvdata->base_address, drvdata->irq); |
274 | 274 | ||
275 | serio = &drvdata->serio; | 275 | serio = &drvdata->serio; |
276 | serio->id.type = SERIO_8042; | 276 | serio->id.type = SERIO_8042; |
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c index b9b7a98bc5a5..7df0228e836e 100644 --- a/drivers/input/tablet/gtco.c +++ b/drivers/input/tablet/gtco.c | |||
@@ -64,7 +64,6 @@ Scott Hill shill@gtcocalcomp.com | |||
64 | #include <asm/byteorder.h> | 64 | #include <asm/byteorder.h> |
65 | 65 | ||
66 | 66 | ||
67 | #include <linux/version.h> | ||
68 | #include <linux/usb/input.h> | 67 | #include <linux/usb/input.h> |
69 | 68 | ||
70 | /* Version with a Major number of 2 is for kernel inclusion only. */ | 69 | /* Version with a Major number of 2 is for kernel inclusion only. */ |
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 6e60a97a234c..25287e80e236 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig | |||
@@ -249,29 +249,26 @@ config TOUCHSCREEN_WM97XX | |||
249 | config TOUCHSCREEN_WM9705 | 249 | config TOUCHSCREEN_WM9705 |
250 | bool "WM9705 Touchscreen interface support" | 250 | bool "WM9705 Touchscreen interface support" |
251 | depends on TOUCHSCREEN_WM97XX | 251 | depends on TOUCHSCREEN_WM97XX |
252 | default y | ||
252 | help | 253 | help |
253 | Say Y here if you have a Wolfson Microelectronics WM9705 | 254 | Say Y here to enable support for the Wolfson Microelectronics |
254 | touchscreen controller connected to your system. | 255 | WM9705 touchscreen controller. |
255 | |||
256 | If unsure, say N. | ||
257 | 256 | ||
258 | config TOUCHSCREEN_WM9712 | 257 | config TOUCHSCREEN_WM9712 |
259 | bool "WM9712 Touchscreen interface support" | 258 | bool "WM9712 Touchscreen interface support" |
260 | depends on TOUCHSCREEN_WM97XX | 259 | depends on TOUCHSCREEN_WM97XX |
260 | default y | ||
261 | help | 261 | help |
262 | Say Y here if you have a Wolfson Microelectronics WM9712 | 262 | Say Y here to enable support for the Wolfson Microelectronics |
263 | touchscreen controller connected to your system. | 263 | WM9712 touchscreen controller. |
264 | |||
265 | If unsure, say N. | ||
266 | 264 | ||
267 | config TOUCHSCREEN_WM9713 | 265 | config TOUCHSCREEN_WM9713 |
268 | bool "WM9713 Touchscreen interface support" | 266 | bool "WM9713 Touchscreen interface support" |
269 | depends on TOUCHSCREEN_WM97XX | 267 | depends on TOUCHSCREEN_WM97XX |
268 | default y | ||
270 | help | 269 | help |
271 | Say Y here if you have a Wolfson Microelectronics WM9713 touchscreen | 270 | Say Y here to enable support for the Wolfson Microelectronics |
272 | controller connected to your system. | 271 | WM9713 touchscreen controller. |
273 | |||
274 | If unsure, say N. | ||
275 | 272 | ||
276 | config TOUCHSCREEN_WM97XX_MAINSTONE | 273 | config TOUCHSCREEN_WM97XX_MAINSTONE |
277 | tristate "WM97xx Mainstone accelerated touch" | 274 | tristate "WM97xx Mainstone accelerated touch" |
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c index 283f93a0cee2..37a555f37306 100644 --- a/drivers/input/touchscreen/mainstone-wm97xx.c +++ b/drivers/input/touchscreen/mainstone-wm97xx.c | |||
@@ -25,7 +25,6 @@ | |||
25 | 25 | ||
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
28 | #include <linux/version.h> | ||
29 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
30 | #include <linux/init.h> | 29 | #include <linux/init.h> |
31 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c index c1cd99d58981..504ca11749a1 100644 --- a/drivers/input/touchscreen/migor_ts.c +++ b/drivers/input/touchscreen/migor_ts.c | |||
@@ -173,7 +173,7 @@ static int migor_ts_probe(struct i2c_client *client, | |||
173 | input_set_abs_params(input, ABS_X, 95, 955, 0, 0); | 173 | input_set_abs_params(input, ABS_X, 95, 955, 0, 0); |
174 | input_set_abs_params(input, ABS_Y, 85, 935, 0, 0); | 174 | input_set_abs_params(input, ABS_Y, 85, 935, 0, 0); |
175 | 175 | ||
176 | input->name = client->driver_name; | 176 | input->name = client->name; |
177 | input->id.bustype = BUS_I2C; | 177 | input->id.bustype = BUS_I2C; |
178 | input->dev.parent = &client->dev; | 178 | input->dev.parent = &client->dev; |
179 | 179 | ||
@@ -192,7 +192,7 @@ static int migor_ts_probe(struct i2c_client *client, | |||
192 | goto err1; | 192 | goto err1; |
193 | 193 | ||
194 | error = request_irq(priv->irq, migor_ts_isr, IRQF_TRIGGER_LOW, | 194 | error = request_irq(priv->irq, migor_ts_isr, IRQF_TRIGGER_LOW, |
195 | client->driver_name, priv); | 195 | client->name, priv); |
196 | if (error) { | 196 | if (error) { |
197 | dev_err(&client->dev, "Unable to request touchscreen IRQ.\n"); | 197 | dev_err(&client->dev, "Unable to request touchscreen IRQ.\n"); |
198 | goto err2; | 198 | goto err2; |
@@ -224,12 +224,19 @@ static int migor_ts_remove(struct i2c_client *client) | |||
224 | return 0; | 224 | return 0; |
225 | } | 225 | } |
226 | 226 | ||
227 | static const struct i2c_device_id migor_ts_id[] = { | ||
228 | { "migor_ts", 0 }, | ||
229 | { } | ||
230 | }; | ||
231 | MODULE_DEVICE_TABLE(i2c, migor_ts); | ||
232 | |||
227 | static struct i2c_driver migor_ts_driver = { | 233 | static struct i2c_driver migor_ts_driver = { |
228 | .driver = { | 234 | .driver = { |
229 | .name = "migor_ts", | 235 | .name = "migor_ts", |
230 | }, | 236 | }, |
231 | .probe = migor_ts_probe, | 237 | .probe = migor_ts_probe, |
232 | .remove = migor_ts_remove, | 238 | .remove = migor_ts_remove, |
239 | .id_table = migor_ts_id, | ||
233 | }; | 240 | }; |
234 | 241 | ||
235 | static int __init migor_ts_init(void) | 242 | static int __init migor_ts_init(void) |
diff --git a/drivers/input/touchscreen/wm9705.c b/drivers/input/touchscreen/wm9705.c index 978e1a13ffc7..372efbc694ff 100644 --- a/drivers/input/touchscreen/wm9705.c +++ b/drivers/input/touchscreen/wm9705.c | |||
@@ -17,7 +17,6 @@ | |||
17 | 17 | ||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
20 | #include <linux/version.h> | ||
21 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
22 | #include <linux/input.h> | 21 | #include <linux/input.h> |
23 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
diff --git a/drivers/input/touchscreen/wm9712.c b/drivers/input/touchscreen/wm9712.c index 4c5d85a249ae..c8bb1e7335fc 100644 --- a/drivers/input/touchscreen/wm9712.c +++ b/drivers/input/touchscreen/wm9712.c | |||
@@ -17,7 +17,6 @@ | |||
17 | 17 | ||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
20 | #include <linux/version.h> | ||
21 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
22 | #include <linux/input.h> | 21 | #include <linux/input.h> |
23 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
diff --git a/drivers/input/touchscreen/wm9713.c b/drivers/input/touchscreen/wm9713.c index 838458792ea0..781ee83547e6 100644 --- a/drivers/input/touchscreen/wm9713.c +++ b/drivers/input/touchscreen/wm9713.c | |||
@@ -17,7 +17,6 @@ | |||
17 | 17 | ||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
20 | #include <linux/version.h> | ||
21 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
22 | #include <linux/input.h> | 21 | #include <linux/input.h> |
23 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index cdc24ad314e0..d589ab0e3adc 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c | |||
@@ -37,7 +37,6 @@ | |||
37 | 37 | ||
38 | #include <linux/module.h> | 38 | #include <linux/module.h> |
39 | #include <linux/moduleparam.h> | 39 | #include <linux/moduleparam.h> |
40 | #include <linux/version.h> | ||
41 | #include <linux/kernel.h> | 40 | #include <linux/kernel.h> |
42 | #include <linux/init.h> | 41 | #include <linux/init.h> |
43 | #include <linux/delay.h> | 42 | #include <linux/delay.h> |
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 37344aaee22f..a661bbdae3d6 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c | |||
@@ -98,6 +98,10 @@ static u32 lg_get_features(struct virtio_device *vdev) | |||
98 | return features; | 98 | return features; |
99 | } | 99 | } |
100 | 100 | ||
101 | /* The virtio core takes the features the Host offers, and copies the | ||
102 | * ones supported by the driver into the vdev->features array. Once | ||
103 | * that's all sorted out, this routine is called so we can tell the | ||
104 | * Host which features we understand and accept. */ | ||
101 | static void lg_finalize_features(struct virtio_device *vdev) | 105 | static void lg_finalize_features(struct virtio_device *vdev) |
102 | { | 106 | { |
103 | unsigned int i, bits; | 107 | unsigned int i, bits; |
@@ -108,6 +112,10 @@ static void lg_finalize_features(struct virtio_device *vdev) | |||
108 | /* Give virtio_ring a chance to accept features. */ | 112 | /* Give virtio_ring a chance to accept features. */ |
109 | vring_transport_features(vdev); | 113 | vring_transport_features(vdev); |
110 | 114 | ||
115 | /* The vdev->feature array is a Linux bitmask: this isn't the | ||
116 | * same as a the simple array of bits used by lguest devices | ||
117 | * for features. So we do this slow, manual conversion which is | ||
118 | * completely general. */ | ||
111 | memset(out_features, 0, desc->feature_len); | 119 | memset(out_features, 0, desc->feature_len); |
112 | bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; | 120 | bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; |
113 | for (i = 0; i < bits; i++) { | 121 | for (i = 0; i < bits; i++) { |
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index d93500f24fbb..81d0c6053447 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c | |||
@@ -108,9 +108,8 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) | |||
108 | } | 108 | } |
109 | /*:*/ | 109 | /*:*/ |
110 | 110 | ||
111 | /*M:014 get_pfn is slow; it takes the mmap sem and calls get_user_pages. We | 111 | /*M:014 get_pfn is slow: we could probably try to grab batches of pages here as |
112 | * could probably try to grab batches of pages here as an optimization | 112 | * an optimization (ie. pre-faulting). :*/ |
113 | * (ie. pre-faulting). :*/ | ||
114 | 113 | ||
115 | /*H:350 This routine takes a page number given by the Guest and converts it to | 114 | /*H:350 This routine takes a page number given by the Guest and converts it to |
116 | * an actual, physical page number. It can fail for several reasons: the | 115 | * an actual, physical page number. It can fail for several reasons: the |
@@ -123,19 +122,13 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) | |||
123 | static unsigned long get_pfn(unsigned long virtpfn, int write) | 122 | static unsigned long get_pfn(unsigned long virtpfn, int write) |
124 | { | 123 | { |
125 | struct page *page; | 124 | struct page *page; |
126 | /* This value indicates failure. */ | ||
127 | unsigned long ret = -1UL; | ||
128 | 125 | ||
129 | /* get_user_pages() is a complex interface: it gets the "struct | 126 | /* gup me one page at this address please! */ |
130 | * vm_area_struct" and "struct page" assocated with a range of pages. | 127 | if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1) |
131 | * It also needs the task's mmap_sem held, and is not very quick. | 128 | return page_to_pfn(page); |
132 | * It returns the number of pages it got. */ | 129 | |
133 | down_read(¤t->mm->mmap_sem); | 130 | /* This value indicates failure. */ |
134 | if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT, | 131 | return -1UL; |
135 | 1, write, 1, &page, NULL) == 1) | ||
136 | ret = page_to_pfn(page); | ||
137 | up_read(¤t->mm->mmap_sem); | ||
138 | return ret; | ||
139 | } | 132 | } |
140 | 133 | ||
141 | /*H:340 Converting a Guest page table entry to a shadow (ie. real) page table | 134 | /*H:340 Converting a Guest page table entry to a shadow (ie. real) page table |
@@ -174,7 +167,7 @@ static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) | |||
174 | /*H:460 And to complete the chain, release_pte() looks like this: */ | 167 | /*H:460 And to complete the chain, release_pte() looks like this: */ |
175 | static void release_pte(pte_t pte) | 168 | static void release_pte(pte_t pte) |
176 | { | 169 | { |
177 | /* Remember that get_user_pages() took a reference to the page, in | 170 | /* Remember that get_user_pages_fast() took a reference to the page, in |
178 | * get_pfn()? We have to put it back now. */ | 171 | * get_pfn()? We have to put it back now. */ |
179 | if (pte_flags(pte) & _PAGE_PRESENT) | 172 | if (pte_flags(pte) & _PAGE_PRESENT) |
180 | put_page(pfn_to_page(pte_pfn(pte))); | 173 | put_page(pfn_to_page(pte_pfn(pte))); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index c7aae66c6f9b..8cfadc5bd2ba 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -2393,6 +2393,8 @@ static void analyze_sbs(mddev_t * mddev) | |||
2393 | 2393 | ||
2394 | } | 2394 | } |
2395 | 2395 | ||
2396 | static void md_safemode_timeout(unsigned long data); | ||
2397 | |||
2396 | static ssize_t | 2398 | static ssize_t |
2397 | safe_delay_show(mddev_t *mddev, char *page) | 2399 | safe_delay_show(mddev_t *mddev, char *page) |
2398 | { | 2400 | { |
@@ -2432,9 +2434,12 @@ safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) | |||
2432 | if (msec == 0) | 2434 | if (msec == 0) |
2433 | mddev->safemode_delay = 0; | 2435 | mddev->safemode_delay = 0; |
2434 | else { | 2436 | else { |
2437 | unsigned long old_delay = mddev->safemode_delay; | ||
2435 | mddev->safemode_delay = (msec*HZ)/1000; | 2438 | mddev->safemode_delay = (msec*HZ)/1000; |
2436 | if (mddev->safemode_delay == 0) | 2439 | if (mddev->safemode_delay == 0) |
2437 | mddev->safemode_delay = 1; | 2440 | mddev->safemode_delay = 1; |
2441 | if (mddev->safemode_delay < old_delay) | ||
2442 | md_safemode_timeout((unsigned long)mddev); | ||
2438 | } | 2443 | } |
2439 | return len; | 2444 | return len; |
2440 | } | 2445 | } |
@@ -4634,6 +4639,11 @@ static int update_size(mddev_t *mddev, sector_t num_sectors) | |||
4634 | */ | 4639 | */ |
4635 | if (mddev->sync_thread) | 4640 | if (mddev->sync_thread) |
4636 | return -EBUSY; | 4641 | return -EBUSY; |
4642 | if (mddev->bitmap) | ||
4643 | /* Sorry, cannot grow a bitmap yet, just remove it, | ||
4644 | * grow, and re-add. | ||
4645 | */ | ||
4646 | return -EBUSY; | ||
4637 | rdev_for_each(rdev, tmp, mddev) { | 4647 | rdev_for_each(rdev, tmp, mddev) { |
4638 | sector_t avail; | 4648 | sector_t avail; |
4639 | avail = rdev->size * 2; | 4649 | avail = rdev->size * 2; |
@@ -5993,7 +6003,7 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
5993 | } | 6003 | } |
5994 | } | 6004 | } |
5995 | 6005 | ||
5996 | if (mddev->degraded) { | 6006 | if (mddev->degraded && ! mddev->ro) { |
5997 | rdev_for_each(rdev, rtmp, mddev) { | 6007 | rdev_for_each(rdev, rtmp, mddev) { |
5998 | if (rdev->raid_disk >= 0 && | 6008 | if (rdev->raid_disk >= 0 && |
5999 | !test_bit(In_sync, &rdev->flags) && | 6009 | !test_bit(In_sync, &rdev->flags) && |
@@ -6067,6 +6077,8 @@ void md_check_recovery(mddev_t *mddev) | |||
6067 | flush_signals(current); | 6077 | flush_signals(current); |
6068 | } | 6078 | } |
6069 | 6079 | ||
6080 | if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) | ||
6081 | return; | ||
6070 | if ( ! ( | 6082 | if ( ! ( |
6071 | (mddev->flags && !mddev->external) || | 6083 | (mddev->flags && !mddev->external) || |
6072 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || | 6084 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || |
@@ -6080,6 +6092,15 @@ void md_check_recovery(mddev_t *mddev) | |||
6080 | if (mddev_trylock(mddev)) { | 6092 | if (mddev_trylock(mddev)) { |
6081 | int spares = 0; | 6093 | int spares = 0; |
6082 | 6094 | ||
6095 | if (mddev->ro) { | ||
6096 | /* Only thing we do on a ro array is remove | ||
6097 | * failed devices. | ||
6098 | */ | ||
6099 | remove_and_add_spares(mddev); | ||
6100 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | ||
6101 | goto unlock; | ||
6102 | } | ||
6103 | |||
6083 | if (!mddev->external) { | 6104 | if (!mddev->external) { |
6084 | int did_change = 0; | 6105 | int did_change = 0; |
6085 | spin_lock_irq(&mddev->write_lock); | 6106 | spin_lock_irq(&mddev->write_lock); |
@@ -6117,7 +6138,8 @@ void md_check_recovery(mddev_t *mddev) | |||
6117 | /* resync has finished, collect result */ | 6138 | /* resync has finished, collect result */ |
6118 | md_unregister_thread(mddev->sync_thread); | 6139 | md_unregister_thread(mddev->sync_thread); |
6119 | mddev->sync_thread = NULL; | 6140 | mddev->sync_thread = NULL; |
6120 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 6141 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && |
6142 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | ||
6121 | /* success...*/ | 6143 | /* success...*/ |
6122 | /* activate any spares */ | 6144 | /* activate any spares */ |
6123 | if (mddev->pers->spare_active(mddev)) | 6145 | if (mddev->pers->spare_active(mddev)) |
@@ -6169,6 +6191,7 @@ void md_check_recovery(mddev_t *mddev) | |||
6169 | } else if ((spares = remove_and_add_spares(mddev))) { | 6191 | } else if ((spares = remove_and_add_spares(mddev))) { |
6170 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 6192 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
6171 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); | 6193 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
6194 | clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); | ||
6172 | set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); | 6195 | set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); |
6173 | } else if (mddev->recovery_cp < MaxSector) { | 6196 | } else if (mddev->recovery_cp < MaxSector) { |
6174 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 6197 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
@@ -6232,7 +6255,11 @@ static int md_notify_reboot(struct notifier_block *this, | |||
6232 | 6255 | ||
6233 | for_each_mddev(mddev, tmp) | 6256 | for_each_mddev(mddev, tmp) |
6234 | if (mddev_trylock(mddev)) { | 6257 | if (mddev_trylock(mddev)) { |
6235 | do_md_stop (mddev, 1, 0); | 6258 | /* Force a switch to readonly even array |
6259 | * appears to still be in use. Hence | ||
6260 | * the '100'. | ||
6261 | */ | ||
6262 | do_md_stop (mddev, 1, 100); | ||
6236 | mddev_unlock(mddev); | 6263 | mddev_unlock(mddev); |
6237 | } | 6264 | } |
6238 | /* | 6265 | /* |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d41bebb6da0f..e34cd0e62473 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -76,11 +76,13 @@ static void r10bio_pool_free(void *r10_bio, void *data) | |||
76 | kfree(r10_bio); | 76 | kfree(r10_bio); |
77 | } | 77 | } |
78 | 78 | ||
79 | /* Maximum size of each resync request */ | ||
79 | #define RESYNC_BLOCK_SIZE (64*1024) | 80 | #define RESYNC_BLOCK_SIZE (64*1024) |
80 | //#define RESYNC_BLOCK_SIZE PAGE_SIZE | ||
81 | #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) | ||
82 | #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) | 81 | #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) |
83 | #define RESYNC_WINDOW (2048*1024) | 82 | /* amount of memory to reserve for resync requests */ |
83 | #define RESYNC_WINDOW (1024*1024) | ||
84 | /* maximum number of concurrent requests, memory permitting */ | ||
85 | #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) | ||
84 | 86 | ||
85 | /* | 87 | /* |
86 | * When performing a resync, we need to read and compare, so | 88 | * When performing a resync, we need to read and compare, so |
@@ -690,7 +692,6 @@ static int flush_pending_writes(conf_t *conf) | |||
690 | * there is no normal IO happeing. It must arrange to call | 692 | * there is no normal IO happeing. It must arrange to call |
691 | * lower_barrier when the particular background IO completes. | 693 | * lower_barrier when the particular background IO completes. |
692 | */ | 694 | */ |
693 | #define RESYNC_DEPTH 32 | ||
694 | 695 | ||
695 | static void raise_barrier(conf_t *conf, int force) | 696 | static void raise_barrier(conf_t *conf, int force) |
696 | { | 697 | { |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 40e939675657..224de022e7c5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2568,10 +2568,10 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
2568 | if (dev->written) | 2568 | if (dev->written) |
2569 | s.written++; | 2569 | s.written++; |
2570 | rdev = rcu_dereference(conf->disks[i].rdev); | 2570 | rdev = rcu_dereference(conf->disks[i].rdev); |
2571 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { | 2571 | if (blocked_rdev == NULL && |
2572 | rdev && unlikely(test_bit(Blocked, &rdev->flags))) { | ||
2572 | blocked_rdev = rdev; | 2573 | blocked_rdev = rdev; |
2573 | atomic_inc(&rdev->nr_pending); | 2574 | atomic_inc(&rdev->nr_pending); |
2574 | break; | ||
2575 | } | 2575 | } |
2576 | if (!rdev || !test_bit(In_sync, &rdev->flags)) { | 2576 | if (!rdev || !test_bit(In_sync, &rdev->flags)) { |
2577 | /* The ReadError flag will just be confusing now */ | 2577 | /* The ReadError flag will just be confusing now */ |
@@ -2588,8 +2588,14 @@ static bool handle_stripe5(struct stripe_head *sh) | |||
2588 | rcu_read_unlock(); | 2588 | rcu_read_unlock(); |
2589 | 2589 | ||
2590 | if (unlikely(blocked_rdev)) { | 2590 | if (unlikely(blocked_rdev)) { |
2591 | set_bit(STRIPE_HANDLE, &sh->state); | 2591 | if (s.syncing || s.expanding || s.expanded || |
2592 | goto unlock; | 2592 | s.to_write || s.written) { |
2593 | set_bit(STRIPE_HANDLE, &sh->state); | ||
2594 | goto unlock; | ||
2595 | } | ||
2596 | /* There is nothing for the blocked_rdev to block */ | ||
2597 | rdev_dec_pending(blocked_rdev, conf->mddev); | ||
2598 | blocked_rdev = NULL; | ||
2593 | } | 2599 | } |
2594 | 2600 | ||
2595 | if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { | 2601 | if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { |
@@ -2832,10 +2838,10 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
2832 | if (dev->written) | 2838 | if (dev->written) |
2833 | s.written++; | 2839 | s.written++; |
2834 | rdev = rcu_dereference(conf->disks[i].rdev); | 2840 | rdev = rcu_dereference(conf->disks[i].rdev); |
2835 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { | 2841 | if (blocked_rdev == NULL && |
2842 | rdev && unlikely(test_bit(Blocked, &rdev->flags))) { | ||
2836 | blocked_rdev = rdev; | 2843 | blocked_rdev = rdev; |
2837 | atomic_inc(&rdev->nr_pending); | 2844 | atomic_inc(&rdev->nr_pending); |
2838 | break; | ||
2839 | } | 2845 | } |
2840 | if (!rdev || !test_bit(In_sync, &rdev->flags)) { | 2846 | if (!rdev || !test_bit(In_sync, &rdev->flags)) { |
2841 | /* The ReadError flag will just be confusing now */ | 2847 | /* The ReadError flag will just be confusing now */ |
@@ -2853,9 +2859,16 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
2853 | rcu_read_unlock(); | 2859 | rcu_read_unlock(); |
2854 | 2860 | ||
2855 | if (unlikely(blocked_rdev)) { | 2861 | if (unlikely(blocked_rdev)) { |
2856 | set_bit(STRIPE_HANDLE, &sh->state); | 2862 | if (s.syncing || s.expanding || s.expanded || |
2857 | goto unlock; | 2863 | s.to_write || s.written) { |
2864 | set_bit(STRIPE_HANDLE, &sh->state); | ||
2865 | goto unlock; | ||
2866 | } | ||
2867 | /* There is nothing for the blocked_rdev to block */ | ||
2868 | rdev_dec_pending(blocked_rdev, conf->mddev); | ||
2869 | blocked_rdev = NULL; | ||
2858 | } | 2870 | } |
2871 | |||
2859 | pr_debug("locked=%d uptodate=%d to_read=%d" | 2872 | pr_debug("locked=%d uptodate=%d to_read=%d" |
2860 | " to_write=%d failed=%d failed_num=%d,%d\n", | 2873 | " to_write=%d failed=%d failed_num=%d,%d\n", |
2861 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, | 2874 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, |
@@ -4446,6 +4459,9 @@ static int raid5_check_reshape(mddev_t *mddev) | |||
4446 | return -EINVAL; /* Cannot shrink array or change level yet */ | 4459 | return -EINVAL; /* Cannot shrink array or change level yet */ |
4447 | if (mddev->delta_disks == 0) | 4460 | if (mddev->delta_disks == 0) |
4448 | return 0; /* nothing to do */ | 4461 | return 0; /* nothing to do */ |
4462 | if (mddev->bitmap) | ||
4463 | /* Cannot grow a bitmap yet */ | ||
4464 | return -EBUSY; | ||
4449 | 4465 | ||
4450 | /* Can only proceed if there are plenty of stripe_heads. | 4466 | /* Can only proceed if there are plenty of stripe_heads. |
4451 | * We need a minimum of one full stripe,, and for sensible progress | 4467 | * We need a minimum of one full stripe,, and for sensible progress |
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index c6408a62d95e..bc2a807f210d 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c | |||
@@ -16,7 +16,6 @@ | |||
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/version.h> | ||
20 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
21 | #include <linux/irq.h> | 20 | #include <linux/irq.h> |
22 | #include <linux/gpio.h> | 21 | #include <linux/gpio.h> |
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c index e7a3fe508dff..c6c77a505ec1 100644 --- a/drivers/misc/acer-wmi.c +++ b/drivers/misc/acer-wmi.c | |||
@@ -192,6 +192,9 @@ static struct quirk_entry *quirks; | |||
192 | 192 | ||
193 | static void set_quirks(void) | 193 | static void set_quirks(void) |
194 | { | 194 | { |
195 | if (!interface) | ||
196 | return; | ||
197 | |||
195 | if (quirks->mailled) | 198 | if (quirks->mailled) |
196 | interface->capability |= ACER_CAP_MAILLED; | 199 | interface->capability |= ACER_CAP_MAILLED; |
197 | 200 | ||
@@ -803,11 +806,30 @@ static acpi_status get_u32(u32 *value, u32 cap) | |||
803 | 806 | ||
804 | static acpi_status set_u32(u32 value, u32 cap) | 807 | static acpi_status set_u32(u32 value, u32 cap) |
805 | { | 808 | { |
809 | acpi_status status; | ||
810 | |||
806 | if (interface->capability & cap) { | 811 | if (interface->capability & cap) { |
807 | switch (interface->type) { | 812 | switch (interface->type) { |
808 | case ACER_AMW0: | 813 | case ACER_AMW0: |
809 | return AMW0_set_u32(value, cap, interface); | 814 | return AMW0_set_u32(value, cap, interface); |
810 | case ACER_AMW0_V2: | 815 | case ACER_AMW0_V2: |
816 | if (cap == ACER_CAP_MAILLED) | ||
817 | return AMW0_set_u32(value, cap, interface); | ||
818 | |||
819 | /* | ||
820 | * On some models, some WMID methods don't toggle | ||
821 | * properly. For those cases, we want to run the AMW0 | ||
822 | * method afterwards to be certain we've really toggled | ||
823 | * the device state. | ||
824 | */ | ||
825 | if (cap == ACER_CAP_WIRELESS || | ||
826 | cap == ACER_CAP_BLUETOOTH) { | ||
827 | status = WMID_set_u32(value, cap, interface); | ||
828 | if (ACPI_FAILURE(status)) | ||
829 | return status; | ||
830 | |||
831 | return AMW0_set_u32(value, cap, interface); | ||
832 | } | ||
811 | case ACER_WMID: | 833 | case ACER_WMID: |
812 | return WMID_set_u32(value, cap, interface); | 834 | return WMID_set_u32(value, cap, interface); |
813 | default: | 835 | default: |
@@ -1218,6 +1240,8 @@ static int __init acer_wmi_init(void) | |||
1218 | return -ENODEV; | 1240 | return -ENODEV; |
1219 | } | 1241 | } |
1220 | 1242 | ||
1243 | set_quirks(); | ||
1244 | |||
1221 | if (platform_driver_register(&acer_platform_driver)) { | 1245 | if (platform_driver_register(&acer_platform_driver)) { |
1222 | printk(ACER_ERR "Unable to register platform driver.\n"); | 1246 | printk(ACER_ERR "Unable to register platform driver.\n"); |
1223 | goto error_platform_register; | 1247 | goto error_platform_register; |
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c index 9e8d79e7e9f4..facdb9893c84 100644 --- a/drivers/misc/eeepc-laptop.c +++ b/drivers/misc/eeepc-laptop.c | |||
@@ -553,9 +553,9 @@ static void eeepc_hwmon_exit(void) | |||
553 | hwmon = eeepc_hwmon_device; | 553 | hwmon = eeepc_hwmon_device; |
554 | if (!hwmon) | 554 | if (!hwmon) |
555 | return ; | 555 | return ; |
556 | hwmon_device_unregister(hwmon); | ||
557 | sysfs_remove_group(&hwmon->kobj, | 556 | sysfs_remove_group(&hwmon->kobj, |
558 | &hwmon_attribute_group); | 557 | &hwmon_attribute_group); |
558 | hwmon_device_unregister(hwmon); | ||
559 | eeepc_hwmon_device = NULL; | 559 | eeepc_hwmon_device = NULL; |
560 | } | 560 | } |
561 | 561 | ||
diff --git a/drivers/misc/eeprom_93cx6.c b/drivers/misc/eeprom_93cx6.c index ea55654e5948..15b1780025c8 100644 --- a/drivers/misc/eeprom_93cx6.c +++ b/drivers/misc/eeprom_93cx6.c | |||
@@ -26,7 +26,6 @@ | |||
26 | 26 | ||
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/version.h> | ||
30 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
31 | #include <linux/eeprom_93cx6.h> | 30 | #include <linux/eeprom_93cx6.h> |
32 | 31 | ||
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 4251018f70ff..a78f70deeb59 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h | |||
@@ -279,7 +279,7 @@ struct gru_stats_s { | |||
279 | #if defined CONFIG_IA64 | 279 | #if defined CONFIG_IA64 |
280 | #define VADDR_HI_BIT 64 | 280 | #define VADDR_HI_BIT 64 |
281 | #define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) | 281 | #define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) |
282 | #elif defined __x86_64 | 282 | #elif defined CONFIG_X86_64 |
283 | #define VADDR_HI_BIT 48 | 283 | #define VADDR_HI_BIT 48 |
284 | #define GRUREGION(addr) (0) /* ZZZ could do better */ | 284 | #define GRUREGION(addr) (0) /* ZZZ could do better */ |
285 | #else | 285 | #else |
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 7c994e1ae276..ae16d845d746 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c | |||
@@ -595,8 +595,9 @@ static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id) | |||
595 | return IRQ_HANDLED; | 595 | return IRQ_HANDLED; |
596 | } | 596 | } |
597 | 597 | ||
598 | void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch, void *buf_id, | 598 | static void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch, |
599 | int size, enum s3c2410_dma_buffresult result) | 599 | void *buf_id, int size, |
600 | enum s3c2410_dma_buffresult result) | ||
600 | { | 601 | { |
601 | struct s3cmci_host *host = buf_id; | 602 | struct s3cmci_host *host = buf_id; |
602 | unsigned long iflags; | 603 | unsigned long iflags; |
@@ -740,8 +741,8 @@ request_done: | |||
740 | mmc_request_done(host->mmc, mrq); | 741 | mmc_request_done(host->mmc, mrq); |
741 | } | 742 | } |
742 | 743 | ||
743 | 744 | static void s3cmci_dma_setup(struct s3cmci_host *host, | |
744 | void s3cmci_dma_setup(struct s3cmci_host *host, enum s3c2410_dmasrc source) | 745 | enum s3c2410_dmasrc source) |
745 | { | 746 | { |
746 | static enum s3c2410_dmasrc last_source = -1; | 747 | static enum s3c2410_dmasrc last_source = -1; |
747 | static int setup_ok; | 748 | static int setup_ok; |
@@ -1003,8 +1004,9 @@ static void s3cmci_send_request(struct mmc_host *mmc) | |||
1003 | enable_irq(host->irq); | 1004 | enable_irq(host->irq); |
1004 | } | 1005 | } |
1005 | 1006 | ||
1006 | static int s3cmci_card_present(struct s3cmci_host *host) | 1007 | static int s3cmci_card_present(struct mmc_host *mmc) |
1007 | { | 1008 | { |
1009 | struct s3cmci_host *host = mmc_priv(mmc); | ||
1008 | struct s3c24xx_mci_pdata *pdata = host->pdata; | 1010 | struct s3c24xx_mci_pdata *pdata = host->pdata; |
1009 | int ret; | 1011 | int ret; |
1010 | 1012 | ||
@@ -1023,7 +1025,7 @@ static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
1023 | host->cmd_is_stop = 0; | 1025 | host->cmd_is_stop = 0; |
1024 | host->mrq = mrq; | 1026 | host->mrq = mrq; |
1025 | 1027 | ||
1026 | if (s3cmci_card_present(host) == 0) { | 1028 | if (s3cmci_card_present(mmc) == 0) { |
1027 | dbg(host, dbg_err, "%s: no medium present\n", __func__); | 1029 | dbg(host, dbg_err, "%s: no medium present\n", __func__); |
1028 | host->mrq->cmd->error = -ENOMEDIUM; | 1030 | host->mrq->cmd->error = -ENOMEDIUM; |
1029 | mmc_request_done(mmc, mrq); | 1031 | mmc_request_done(mmc, mrq); |
@@ -1138,6 +1140,7 @@ static struct mmc_host_ops s3cmci_ops = { | |||
1138 | .request = s3cmci_request, | 1140 | .request = s3cmci_request, |
1139 | .set_ios = s3cmci_set_ios, | 1141 | .set_ios = s3cmci_set_ios, |
1140 | .get_ro = s3cmci_get_ro, | 1142 | .get_ro = s3cmci_get_ro, |
1143 | .get_cd = s3cmci_card_present, | ||
1141 | }; | 1144 | }; |
1142 | 1145 | ||
1143 | static struct s3c24xx_mci_pdata s3cmci_def_pdata = { | 1146 | static struct s3c24xx_mci_pdata s3cmci_def_pdata = { |
@@ -1206,7 +1209,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) | |||
1206 | } | 1209 | } |
1207 | 1210 | ||
1208 | host->base = ioremap(host->mem->start, RESSIZE(host->mem)); | 1211 | host->base = ioremap(host->mem->start, RESSIZE(host->mem)); |
1209 | if (host->base == 0) { | 1212 | if (!host->base) { |
1210 | dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); | 1213 | dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); |
1211 | ret = -EINVAL; | 1214 | ret = -EINVAL; |
1212 | goto probe_free_mem_region; | 1215 | goto probe_free_mem_region; |
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index f99e9f721629..1df44d966bdb 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
30 | #include <linux/ioport.h> | 30 | #include <linux/ioport.h> |
31 | #include <linux/scatterlist.h> | 31 | #include <linux/scatterlist.h> |
32 | #include <linux/version.h> | ||
33 | 32 | ||
34 | #include <pcmcia/cs_types.h> | 33 | #include <pcmcia/cs_types.h> |
35 | #include <pcmcia/cs.h> | 34 | #include <pcmcia/cs.h> |
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 64002488c6ee..917cf8d3ae95 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <asm/io.h> | 19 | #include <asm/io.h> |
20 | #include <asm/sizes.h> | 20 | #include <asm/sizes.h> |
21 | #include <mach/hardware.h> | 21 | #include <mach/hardware.h> |
22 | #include <asm/plat-orion/orion_nand.h> | 22 | #include <plat/orion_nand.h> |
23 | 23 | ||
24 | #ifdef CONFIG_MTD_CMDLINE_PARTS | 24 | #ifdef CONFIG_MTD_CMDLINE_PARTS |
25 | static const char *part_probes[] = { "cmdlinepart", NULL }; | 25 | static const char *part_probes[] = { "cmdlinepart", NULL }; |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 4b4cb2bf4f11..4a11296a9514 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -822,14 +822,14 @@ config ULTRA32 | |||
822 | will be called smc-ultra32. | 822 | will be called smc-ultra32. |
823 | 823 | ||
824 | config BFIN_MAC | 824 | config BFIN_MAC |
825 | tristate "Blackfin 527/536/537 on-chip mac support" | 825 | tristate "Blackfin on-chip MAC support" |
826 | depends on NET_ETHERNET && (BF527 || BF537 || BF536) | 826 | depends on NET_ETHERNET && (BF526 || BF527 || BF536 || BF537) |
827 | select CRC32 | 827 | select CRC32 |
828 | select MII | 828 | select MII |
829 | select PHYLIB | 829 | select PHYLIB |
830 | select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE | 830 | select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE |
831 | help | 831 | help |
832 | This is the driver for blackfin on-chip mac device. Say Y if you want it | 832 | This is the driver for Blackfin on-chip mac device. Say Y if you want it |
833 | compiled into the kernel. This driver is also available as a module | 833 | compiled into the kernel. This driver is also available as a module |
834 | ( = code which can be inserted in and removed from the running kernel | 834 | ( = code which can be inserted in and removed from the running kernel |
835 | whenever you want). The module will be called bfin_mac. | 835 | whenever you want). The module will be called bfin_mac. |
@@ -1172,7 +1172,7 @@ config ETH16I | |||
1172 | 1172 | ||
1173 | config NE2000 | 1173 | config NE2000 |
1174 | tristate "NE2000/NE1000 support" | 1174 | tristate "NE2000/NE1000 support" |
1175 | depends on NET_ISA || (Q40 && m) || M32R || TOSHIBA_RBTX4927 || TOSHIBA_RBTX4938 | 1175 | depends on NET_ISA || (Q40 && m) || M32R || MACH_TX49XX |
1176 | select CRC32 | 1176 | select CRC32 |
1177 | ---help--- | 1177 | ---help--- |
1178 | If you have a network (Ethernet) card of this type, say Y and read | 1178 | If you have a network (Ethernet) card of this type, say Y and read |
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index e4483de84e7f..66de80b64b92 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c | |||
@@ -52,7 +52,6 @@ | |||
52 | 52 | ||
53 | #include <linux/module.h> | 53 | #include <linux/module.h> |
54 | #include <linux/moduleparam.h> | 54 | #include <linux/moduleparam.h> |
55 | #include <linux/version.h> | ||
56 | #include <linux/types.h> | 55 | #include <linux/types.h> |
57 | #include <linux/errno.h> | 56 | #include <linux/errno.h> |
58 | #include <linux/ioport.h> | 57 | #include <linux/ioport.h> |
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 020771bfb603..e2d702b8b2e4 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c | |||
@@ -551,7 +551,7 @@ static int eth_poll(struct napi_struct *napi, int budget) | |||
551 | if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { | 551 | if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { |
552 | phys = dma_map_single(&dev->dev, skb->data, | 552 | phys = dma_map_single(&dev->dev, skb->data, |
553 | RX_BUFF_SIZE, DMA_FROM_DEVICE); | 553 | RX_BUFF_SIZE, DMA_FROM_DEVICE); |
554 | if (dma_mapping_error(phys)) { | 554 | if (dma_mapping_error(&dev->dev, phys)) { |
555 | dev_kfree_skb(skb); | 555 | dev_kfree_skb(skb); |
556 | skb = NULL; | 556 | skb = NULL; |
557 | } | 557 | } |
@@ -698,7 +698,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
698 | #endif | 698 | #endif |
699 | 699 | ||
700 | phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); | 700 | phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); |
701 | if (dma_mapping_error(phys)) { | 701 | if (dma_mapping_error(&dev->dev, phys)) { |
702 | #ifdef __ARMEB__ | 702 | #ifdef __ARMEB__ |
703 | dev_kfree_skb(skb); | 703 | dev_kfree_skb(skb); |
704 | #else | 704 | #else |
@@ -883,7 +883,7 @@ static int init_queues(struct port *port) | |||
883 | desc->buf_len = MAX_MRU; | 883 | desc->buf_len = MAX_MRU; |
884 | desc->data = dma_map_single(&port->netdev->dev, data, | 884 | desc->data = dma_map_single(&port->netdev->dev, data, |
885 | RX_BUFF_SIZE, DMA_FROM_DEVICE); | 885 | RX_BUFF_SIZE, DMA_FROM_DEVICE); |
886 | if (dma_mapping_error(desc->data)) { | 886 | if (dma_mapping_error(&port->netdev->dev, desc->data)) { |
887 | free_buffer(buff); | 887 | free_buffer(buff); |
888 | return -EIO; | 888 | return -EIO; |
889 | } | 889 | } |
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c index cdc3b85b10b9..619c6583e1aa 100644 --- a/drivers/net/atl1e/atl1e_ethtool.c +++ b/drivers/net/atl1e/atl1e_ethtool.c | |||
@@ -355,7 +355,7 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
355 | struct atl1e_adapter *adapter = netdev_priv(netdev); | 355 | struct atl1e_adapter *adapter = netdev_priv(netdev); |
356 | 356 | ||
357 | if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | | 357 | if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | |
358 | WAKE_MCAST | WAKE_BCAST | WAKE_MCAST)) | 358 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) |
359 | return -EOPNOTSUPP; | 359 | return -EOPNOTSUPP; |
360 | /* these settings will always override what we currently have */ | 360 | /* these settings will always override what we currently have */ |
361 | adapter->wol = 0; | 361 | adapter->wol = 0; |
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index 82d7be1655d3..7685b995ff9b 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c | |||
@@ -2232,10 +2232,11 @@ static int atl1e_resume(struct pci_dev *pdev) | |||
2232 | 2232 | ||
2233 | AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); | 2233 | AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); |
2234 | 2234 | ||
2235 | if (netif_running(netdev)) | 2235 | if (netif_running(netdev)) { |
2236 | err = atl1e_request_irq(adapter); | 2236 | err = atl1e_request_irq(adapter); |
2237 | if (err) | 2237 | if (err) |
2238 | return err; | 2238 | return err; |
2239 | } | ||
2239 | 2240 | ||
2240 | atl1e_reset_hw(&adapter->hw); | 2241 | atl1e_reset_hw(&adapter->hw); |
2241 | 2242 | ||
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index e6a7bb79d4df..e23ce77712f1 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c | |||
@@ -3022,7 +3022,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev, | |||
3022 | netdev->features = NETIF_F_HW_CSUM; | 3022 | netdev->features = NETIF_F_HW_CSUM; |
3023 | netdev->features |= NETIF_F_SG; | 3023 | netdev->features |= NETIF_F_SG; |
3024 | netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); | 3024 | netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); |
3025 | netdev->features |= NETIF_F_TSO; | ||
3026 | netdev->features |= NETIF_F_LLTX; | 3025 | netdev->features |= NETIF_F_LLTX; |
3027 | 3026 | ||
3028 | /* | 3027 | /* |
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index cb8be490e5ae..5ee1b0557a02 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -807,7 +807,7 @@ err_out: | |||
807 | static int au1000_init(struct net_device *dev) | 807 | static int au1000_init(struct net_device *dev) |
808 | { | 808 | { |
809 | struct au1000_private *aup = (struct au1000_private *) dev->priv; | 809 | struct au1000_private *aup = (struct au1000_private *) dev->priv; |
810 | u32 flags; | 810 | unsigned long flags; |
811 | int i; | 811 | int i; |
812 | u32 control; | 812 | u32 control; |
813 | 813 | ||
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 0b4adf4a0f7d..a886a4b9f7e5 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c | |||
@@ -554,7 +554,7 @@ static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
554 | 554 | ||
555 | spin_lock_irqsave(&ax->mii_lock, flags); | 555 | spin_lock_irqsave(&ax->mii_lock, flags); |
556 | mii_ethtool_gset(&ax->mii, cmd); | 556 | mii_ethtool_gset(&ax->mii, cmd); |
557 | spin_lock_irqsave(&ax->mii_lock, flags); | 557 | spin_unlock_irqrestore(&ax->mii_lock, flags); |
558 | 558 | ||
559 | return 0; | 559 | return 0; |
560 | } | 560 | } |
@@ -567,7 +567,7 @@ static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
567 | 567 | ||
568 | spin_lock_irqsave(&ax->mii_lock, flags); | 568 | spin_lock_irqsave(&ax->mii_lock, flags); |
569 | rc = mii_ethtool_sset(&ax->mii, cmd); | 569 | rc = mii_ethtool_sset(&ax->mii, cmd); |
570 | spin_lock_irqsave(&ax->mii_lock, flags); | 570 | spin_unlock_irqrestore(&ax->mii_lock, flags); |
571 | 571 | ||
572 | return rc; | 572 | return rc; |
573 | } | 573 | } |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 5ebde67d4297..2486a656f12d 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -35,8 +35,8 @@ | |||
35 | #include <linux/time.h> | 35 | #include <linux/time.h> |
36 | #include <linux/ethtool.h> | 36 | #include <linux/ethtool.h> |
37 | #include <linux/mii.h> | 37 | #include <linux/mii.h> |
38 | #ifdef NETIF_F_HW_VLAN_TX | ||
39 | #include <linux/if_vlan.h> | 38 | #include <linux/if_vlan.h> |
39 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
40 | #define BCM_VLAN 1 | 40 | #define BCM_VLAN 1 |
41 | #endif | 41 | #endif |
42 | #include <net/ip.h> | 42 | #include <net/ip.h> |
@@ -57,8 +57,8 @@ | |||
57 | 57 | ||
58 | #define DRV_MODULE_NAME "bnx2" | 58 | #define DRV_MODULE_NAME "bnx2" |
59 | #define PFX DRV_MODULE_NAME ": " | 59 | #define PFX DRV_MODULE_NAME ": " |
60 | #define DRV_MODULE_VERSION "1.7.9" | 60 | #define DRV_MODULE_VERSION "1.8.0" |
61 | #define DRV_MODULE_RELDATE "July 18, 2008" | 61 | #define DRV_MODULE_RELDATE "Aug 14, 2008" |
62 | 62 | ||
63 | #define RUN_AT(x) (jiffies + (x)) | 63 | #define RUN_AT(x) (jiffies + (x)) |
64 | 64 | ||
@@ -2876,6 +2876,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
2876 | struct sw_bd *rx_buf; | 2876 | struct sw_bd *rx_buf; |
2877 | struct sk_buff *skb; | 2877 | struct sk_buff *skb; |
2878 | dma_addr_t dma_addr; | 2878 | dma_addr_t dma_addr; |
2879 | u16 vtag = 0; | ||
2880 | int hw_vlan __maybe_unused = 0; | ||
2879 | 2881 | ||
2880 | sw_ring_cons = RX_RING_IDX(sw_cons); | 2882 | sw_ring_cons = RX_RING_IDX(sw_cons); |
2881 | sw_ring_prod = RX_RING_IDX(sw_prod); | 2883 | sw_ring_prod = RX_RING_IDX(sw_prod); |
@@ -2919,7 +2921,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
2919 | if (len <= bp->rx_copy_thresh) { | 2921 | if (len <= bp->rx_copy_thresh) { |
2920 | struct sk_buff *new_skb; | 2922 | struct sk_buff *new_skb; |
2921 | 2923 | ||
2922 | new_skb = netdev_alloc_skb(bp->dev, len + 2); | 2924 | new_skb = netdev_alloc_skb(bp->dev, len + 6); |
2923 | if (new_skb == NULL) { | 2925 | if (new_skb == NULL) { |
2924 | bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, | 2926 | bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, |
2925 | sw_ring_prod); | 2927 | sw_ring_prod); |
@@ -2928,9 +2930,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
2928 | 2930 | ||
2929 | /* aligned copy */ | 2931 | /* aligned copy */ |
2930 | skb_copy_from_linear_data_offset(skb, | 2932 | skb_copy_from_linear_data_offset(skb, |
2931 | BNX2_RX_OFFSET - 2, | 2933 | BNX2_RX_OFFSET - 6, |
2932 | new_skb->data, len + 2); | 2934 | new_skb->data, len + 6); |
2933 | skb_reserve(new_skb, 2); | 2935 | skb_reserve(new_skb, 6); |
2934 | skb_put(new_skb, len); | 2936 | skb_put(new_skb, len); |
2935 | 2937 | ||
2936 | bnx2_reuse_rx_skb(bp, rxr, skb, | 2938 | bnx2_reuse_rx_skb(bp, rxr, skb, |
@@ -2941,6 +2943,25 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
2941 | dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) | 2943 | dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) |
2942 | goto next_rx; | 2944 | goto next_rx; |
2943 | 2945 | ||
2946 | if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && | ||
2947 | !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) { | ||
2948 | vtag = rx_hdr->l2_fhdr_vlan_tag; | ||
2949 | #ifdef BCM_VLAN | ||
2950 | if (bp->vlgrp) | ||
2951 | hw_vlan = 1; | ||
2952 | else | ||
2953 | #endif | ||
2954 | { | ||
2955 | struct vlan_ethhdr *ve = (struct vlan_ethhdr *) | ||
2956 | __skb_push(skb, 4); | ||
2957 | |||
2958 | memmove(ve, skb->data + 4, ETH_ALEN * 2); | ||
2959 | ve->h_vlan_proto = htons(ETH_P_8021Q); | ||
2960 | ve->h_vlan_TCI = htons(vtag); | ||
2961 | len += 4; | ||
2962 | } | ||
2963 | } | ||
2964 | |||
2944 | skb->protocol = eth_type_trans(skb, bp->dev); | 2965 | skb->protocol = eth_type_trans(skb, bp->dev); |
2945 | 2966 | ||
2946 | if ((len > (bp->dev->mtu + ETH_HLEN)) && | 2967 | if ((len > (bp->dev->mtu + ETH_HLEN)) && |
@@ -2962,10 +2983,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
2962 | } | 2983 | } |
2963 | 2984 | ||
2964 | #ifdef BCM_VLAN | 2985 | #ifdef BCM_VLAN |
2965 | if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) { | 2986 | if (hw_vlan) |
2966 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, | 2987 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag); |
2967 | rx_hdr->l2_fhdr_vlan_tag); | ||
2968 | } | ||
2969 | else | 2988 | else |
2970 | #endif | 2989 | #endif |
2971 | netif_receive_skb(skb); | 2990 | netif_receive_skb(skb); |
@@ -3237,10 +3256,10 @@ bnx2_set_rx_mode(struct net_device *dev) | |||
3237 | BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); | 3256 | BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); |
3238 | sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; | 3257 | sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; |
3239 | #ifdef BCM_VLAN | 3258 | #ifdef BCM_VLAN |
3240 | if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE)) | 3259 | if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) |
3241 | rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; | 3260 | rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; |
3242 | #else | 3261 | #else |
3243 | if (!(bp->flags & BNX2_FLAG_ASF_ENABLE)) | 3262 | if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) |
3244 | rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; | 3263 | rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; |
3245 | #endif | 3264 | #endif |
3246 | if (dev->flags & IFF_PROMISC) { | 3265 | if (dev->flags & IFF_PROMISC) { |
@@ -5963,10 +5982,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
5963 | vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; | 5982 | vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; |
5964 | } | 5983 | } |
5965 | 5984 | ||
5985 | #ifdef BCM_VLAN | ||
5966 | if (bp->vlgrp && vlan_tx_tag_present(skb)) { | 5986 | if (bp->vlgrp && vlan_tx_tag_present(skb)) { |
5967 | vlan_tag_flags |= | 5987 | vlan_tag_flags |= |
5968 | (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); | 5988 | (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); |
5969 | } | 5989 | } |
5990 | #endif | ||
5970 | if ((mss = skb_shinfo(skb)->gso_size)) { | 5991 | if ((mss = skb_shinfo(skb)->gso_size)) { |
5971 | u32 tcp_opt_len, ip_tcp_len; | 5992 | u32 tcp_opt_len, ip_tcp_len; |
5972 | struct iphdr *iph; | 5993 | struct iphdr *iph; |
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index 4bf4f7b205f2..a14dba1afcc5 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h | |||
@@ -40,20 +40,20 @@ | |||
40 | #define DP(__mask, __fmt, __args...) do { \ | 40 | #define DP(__mask, __fmt, __args...) do { \ |
41 | if (bp->msglevel & (__mask)) \ | 41 | if (bp->msglevel & (__mask)) \ |
42 | printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ | 42 | printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ |
43 | bp->dev?(bp->dev->name):"?", ##__args); \ | 43 | bp->dev ? (bp->dev->name) : "?", ##__args); \ |
44 | } while (0) | 44 | } while (0) |
45 | 45 | ||
46 | /* errors debug print */ | 46 | /* errors debug print */ |
47 | #define BNX2X_DBG_ERR(__fmt, __args...) do { \ | 47 | #define BNX2X_DBG_ERR(__fmt, __args...) do { \ |
48 | if (bp->msglevel & NETIF_MSG_PROBE) \ | 48 | if (bp->msglevel & NETIF_MSG_PROBE) \ |
49 | printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ | 49 | printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ |
50 | bp->dev?(bp->dev->name):"?", ##__args); \ | 50 | bp->dev ? (bp->dev->name) : "?", ##__args); \ |
51 | } while (0) | 51 | } while (0) |
52 | 52 | ||
53 | /* for errors (never masked) */ | 53 | /* for errors (never masked) */ |
54 | #define BNX2X_ERR(__fmt, __args...) do { \ | 54 | #define BNX2X_ERR(__fmt, __args...) do { \ |
55 | printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ | 55 | printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ |
56 | bp->dev?(bp->dev->name):"?", ##__args); \ | 56 | bp->dev ? (bp->dev->name) : "?", ##__args); \ |
57 | } while (0) | 57 | } while (0) |
58 | 58 | ||
59 | /* before we have a dev->name use dev_info() */ | 59 | /* before we have a dev->name use dev_info() */ |
@@ -120,16 +120,8 @@ | |||
120 | #define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) | 120 | #define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) |
121 | #define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) | 121 | #define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) |
122 | 122 | ||
123 | #define NIG_WR(reg, val) REG_WR(bp, reg, val) | 123 | #define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) |
124 | #define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val) | 124 | #define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) |
125 | #define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val) | ||
126 | |||
127 | |||
128 | #define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++) | ||
129 | |||
130 | #define for_each_nondefault_queue(bp, var) \ | ||
131 | for (var = 1; var < bp->num_queues; var++) | ||
132 | #define is_multi(bp) (bp->num_queues > 1) | ||
133 | 125 | ||
134 | 126 | ||
135 | /* fast path */ | 127 | /* fast path */ |
@@ -163,7 +155,7 @@ struct sw_rx_page { | |||
163 | #define NUM_RX_SGE_PAGES 2 | 155 | #define NUM_RX_SGE_PAGES 2 |
164 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) | 156 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) |
165 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) | 157 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) |
166 | /* RX_SGE_CNT is promissed to be a power of 2 */ | 158 | /* RX_SGE_CNT is promised to be a power of 2 */ |
167 | #define RX_SGE_MASK (RX_SGE_CNT - 1) | 159 | #define RX_SGE_MASK (RX_SGE_CNT - 1) |
168 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) | 160 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) |
169 | #define MAX_RX_SGE (NUM_RX_SGE - 1) | 161 | #define MAX_RX_SGE (NUM_RX_SGE - 1) |
@@ -258,8 +250,7 @@ struct bnx2x_fastpath { | |||
258 | 250 | ||
259 | unsigned long tx_pkt, | 251 | unsigned long tx_pkt, |
260 | rx_pkt, | 252 | rx_pkt, |
261 | rx_calls, | 253 | rx_calls; |
262 | rx_alloc_failed; | ||
263 | /* TPA related */ | 254 | /* TPA related */ |
264 | struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H]; | 255 | struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H]; |
265 | u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H]; | 256 | u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H]; |
@@ -275,6 +266,15 @@ struct bnx2x_fastpath { | |||
275 | 266 | ||
276 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) | 267 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) |
277 | 268 | ||
269 | #define BNX2X_HAS_TX_WORK(fp) \ | ||
270 | ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \ | ||
271 | (fp->tx_pkt_prod != fp->tx_pkt_cons)) | ||
272 | |||
273 | #define BNX2X_HAS_RX_WORK(fp) \ | ||
274 | (fp->rx_comp_cons != rx_cons_sb) | ||
275 | |||
276 | #define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp)) | ||
277 | |||
278 | 278 | ||
279 | /* MC hsi */ | 279 | /* MC hsi */ |
280 | #define MAX_FETCH_BD 13 /* HW max BDs per packet */ | 280 | #define MAX_FETCH_BD 13 /* HW max BDs per packet */ |
@@ -317,7 +317,7 @@ struct bnx2x_fastpath { | |||
317 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) | 317 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) |
318 | 318 | ||
319 | 319 | ||
320 | /* This is needed for determening of last_max */ | 320 | /* This is needed for determining of last_max */ |
321 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) | 321 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) |
322 | 322 | ||
323 | #define __SGE_MASK_SET_BIT(el, bit) \ | 323 | #define __SGE_MASK_SET_BIT(el, bit) \ |
@@ -386,20 +386,28 @@ struct bnx2x_fastpath { | |||
386 | #define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \ | 386 | #define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \ |
387 | (TPA_TYPE_START | TPA_TYPE_END)) | 387 | (TPA_TYPE_START | TPA_TYPE_END)) |
388 | 388 | ||
389 | #define BNX2X_RX_SUM_OK(cqe) \ | 389 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG |
390 | (!(cqe->fast_path_cqe.status_flags & \ | 390 | |
391 | (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \ | 391 | #define BNX2X_IP_CSUM_ERR(cqe) \ |
392 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))) | 392 | (!((cqe)->fast_path_cqe.status_flags & \ |
393 | ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ | ||
394 | ((cqe)->fast_path_cqe.type_error_flags & \ | ||
395 | ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) | ||
396 | |||
397 | #define BNX2X_L4_CSUM_ERR(cqe) \ | ||
398 | (!((cqe)->fast_path_cqe.status_flags & \ | ||
399 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ | ||
400 | ((cqe)->fast_path_cqe.type_error_flags & \ | ||
401 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) | ||
402 | |||
403 | #define BNX2X_RX_CSUM_OK(cqe) \ | ||
404 | (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) | ||
393 | 405 | ||
394 | #define BNX2X_RX_SUM_FIX(cqe) \ | 406 | #define BNX2X_RX_SUM_FIX(cqe) \ |
395 | ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \ | 407 | ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \ |
396 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \ | 408 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \ |
397 | (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT)) | 409 | (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT)) |
398 | 410 | ||
399 | #define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \ | ||
400 | ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \ | ||
401 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG) | ||
402 | |||
403 | 411 | ||
404 | #define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES) | 412 | #define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES) |
405 | #define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES) | 413 | #define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES) |
@@ -647,6 +655,8 @@ struct bnx2x_eth_stats { | |||
647 | 655 | ||
648 | u32 brb_drop_hi; | 656 | u32 brb_drop_hi; |
649 | u32 brb_drop_lo; | 657 | u32 brb_drop_lo; |
658 | u32 brb_truncate_hi; | ||
659 | u32 brb_truncate_lo; | ||
650 | 660 | ||
651 | u32 jabber_packets_received; | 661 | u32 jabber_packets_received; |
652 | 662 | ||
@@ -663,6 +673,9 @@ struct bnx2x_eth_stats { | |||
663 | u32 mac_discard; | 673 | u32 mac_discard; |
664 | 674 | ||
665 | u32 driver_xoff; | 675 | u32 driver_xoff; |
676 | u32 rx_err_discard_pkt; | ||
677 | u32 rx_skb_alloc_failed; | ||
678 | u32 hw_csum_err; | ||
666 | }; | 679 | }; |
667 | 680 | ||
668 | #define STATS_OFFSET32(stat_name) \ | 681 | #define STATS_OFFSET32(stat_name) \ |
@@ -753,7 +766,6 @@ struct bnx2x { | |||
753 | u16 def_att_idx; | 766 | u16 def_att_idx; |
754 | u32 attn_state; | 767 | u32 attn_state; |
755 | struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; | 768 | struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; |
756 | u32 aeu_mask; | ||
757 | u32 nig_mask; | 769 | u32 nig_mask; |
758 | 770 | ||
759 | /* slow path ring */ | 771 | /* slow path ring */ |
@@ -772,7 +784,7 @@ struct bnx2x { | |||
772 | u8 stats_pending; | 784 | u8 stats_pending; |
773 | u8 set_mac_pending; | 785 | u8 set_mac_pending; |
774 | 786 | ||
775 | /* End of fileds used in the performance code paths */ | 787 | /* End of fields used in the performance code paths */ |
776 | 788 | ||
777 | int panic; | 789 | int panic; |
778 | int msglevel; | 790 | int msglevel; |
@@ -794,9 +806,6 @@ struct bnx2x { | |||
794 | #define BP_FUNC(bp) (bp->func) | 806 | #define BP_FUNC(bp) (bp->func) |
795 | #define BP_E1HVN(bp) (bp->func >> 1) | 807 | #define BP_E1HVN(bp) (bp->func >> 1) |
796 | #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) | 808 | #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) |
797 | /* assorted E1HVN */ | ||
798 | #define IS_E1HMF(bp) (bp->e1hmf != 0) | ||
799 | #define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16) | ||
800 | 809 | ||
801 | int pm_cap; | 810 | int pm_cap; |
802 | int pcie_cap; | 811 | int pcie_cap; |
@@ -821,6 +830,7 @@ struct bnx2x { | |||
821 | u32 mf_config; | 830 | u32 mf_config; |
822 | u16 e1hov; | 831 | u16 e1hov; |
823 | u8 e1hmf; | 832 | u8 e1hmf; |
833 | #define IS_E1HMF(bp) (bp->e1hmf != 0) | ||
824 | 834 | ||
825 | u8 wol; | 835 | u8 wol; |
826 | 836 | ||
@@ -836,7 +846,6 @@ struct bnx2x { | |||
836 | u16 rx_ticks_int; | 846 | u16 rx_ticks_int; |
837 | u16 rx_ticks; | 847 | u16 rx_ticks; |
838 | 848 | ||
839 | u32 stats_ticks; | ||
840 | u32 lin_cnt; | 849 | u32 lin_cnt; |
841 | 850 | ||
842 | int state; | 851 | int state; |
@@ -852,6 +861,7 @@ struct bnx2x { | |||
852 | #define BNX2X_STATE_ERROR 0xf000 | 861 | #define BNX2X_STATE_ERROR 0xf000 |
853 | 862 | ||
854 | int num_queues; | 863 | int num_queues; |
864 | #define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16) | ||
855 | 865 | ||
856 | u32 rx_mode; | 866 | u32 rx_mode; |
857 | #define BNX2X_RX_MODE_NONE 0 | 867 | #define BNX2X_RX_MODE_NONE 0 |
@@ -902,10 +912,17 @@ struct bnx2x { | |||
902 | }; | 912 | }; |
903 | 913 | ||
904 | 914 | ||
915 | #define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++) | ||
916 | |||
917 | #define for_each_nondefault_queue(bp, var) \ | ||
918 | for (var = 1; var < bp->num_queues; var++) | ||
919 | #define is_multi(bp) (bp->num_queues > 1) | ||
920 | |||
921 | |||
905 | void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); | 922 | void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); |
906 | void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, | 923 | void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, |
907 | u32 len32); | 924 | u32 len32); |
908 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); | 925 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); |
909 | 926 | ||
910 | static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | 927 | static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, |
911 | int wait) | 928 | int wait) |
@@ -976,7 +993,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
976 | #define PCICFG_LINK_SPEED_SHIFT 16 | 993 | #define PCICFG_LINK_SPEED_SHIFT 16 |
977 | 994 | ||
978 | 995 | ||
979 | #define BNX2X_NUM_STATS 39 | 996 | #define BNX2X_NUM_STATS 42 |
980 | #define BNX2X_NUM_TESTS 8 | 997 | #define BNX2X_NUM_TESTS 8 |
981 | 998 | ||
982 | #define BNX2X_MAC_LOOPBACK 0 | 999 | #define BNX2X_MAC_LOOPBACK 0 |
@@ -1007,10 +1024,10 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1007 | /* resolution of the rate shaping timer - 100 usec */ | 1024 | /* resolution of the rate shaping timer - 100 usec */ |
1008 | #define RS_PERIODIC_TIMEOUT_USEC 100 | 1025 | #define RS_PERIODIC_TIMEOUT_USEC 100 |
1009 | /* resolution of fairness algorithm in usecs - | 1026 | /* resolution of fairness algorithm in usecs - |
1010 | coefficient for clauclating the actuall t fair */ | 1027 | coefficient for calculating the actual t fair */ |
1011 | #define T_FAIR_COEF 10000000 | 1028 | #define T_FAIR_COEF 10000000 |
1012 | /* number of bytes in single QM arbitration cycle - | 1029 | /* number of bytes in single QM arbitration cycle - |
1013 | coeffiecnt for calculating the fairness timer */ | 1030 | coefficient for calculating the fairness timer */ |
1014 | #define QM_ARB_BYTES 40000 | 1031 | #define QM_ARB_BYTES 40000 |
1015 | #define FAIR_MEM 2 | 1032 | #define FAIR_MEM 2 |
1016 | 1033 | ||
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h index e3da7f69d27b..192fa981b930 100644 --- a/drivers/net/bnx2x_fw_defs.h +++ b/drivers/net/bnx2x_fw_defs.h | |||
@@ -9,165 +9,171 @@ | |||
9 | 9 | ||
10 | 10 | ||
11 | #define CSTORM_ASSERT_LIST_INDEX_OFFSET \ | 11 | #define CSTORM_ASSERT_LIST_INDEX_OFFSET \ |
12 | (IS_E1H_OFFSET? 0x7000 : 0x1000) | 12 | (IS_E1H_OFFSET ? 0x7000 : 0x1000) |
13 | #define CSTORM_ASSERT_LIST_OFFSET(idx) \ | 13 | #define CSTORM_ASSERT_LIST_OFFSET(idx) \ |
14 | (IS_E1H_OFFSET? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) | 14 | (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) |
15 | #define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ | 15 | #define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ |
16 | (IS_E1H_OFFSET? (0x8522 + ((function>>1) * 0x40) + ((function&1) \ | 16 | (IS_E1H_OFFSET ? (0x8522 + ((function>>1) * 0x40) + \ |
17 | * 0x100) + (index * 0x4)) : (0x1922 + (function * 0x40) + (index \ | 17 | ((function&1) * 0x100) + (index * 0x4)) : (0x1922 + (function * \ |
18 | * 0x4))) | 18 | 0x40) + (index * 0x4))) |
19 | #define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ | 19 | #define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ |
20 | (IS_E1H_OFFSET? (0x8500 + ((function>>1) * 0x40) + ((function&1) \ | 20 | (IS_E1H_OFFSET ? (0x8500 + ((function>>1) * 0x40) + \ |
21 | * 0x100)) : (0x1900 + (function * 0x40))) | 21 | ((function&1) * 0x100)) : (0x1900 + (function * 0x40))) |
22 | #define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ | 22 | #define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ |
23 | (IS_E1H_OFFSET? (0x8508 + ((function>>1) * 0x40) + ((function&1) \ | 23 | (IS_E1H_OFFSET ? (0x8508 + ((function>>1) * 0x40) + \ |
24 | * 0x100)) : (0x1908 + (function * 0x40))) | 24 | ((function&1) * 0x100)) : (0x1908 + (function * 0x40))) |
25 | #define CSTORM_FUNCTION_MODE_OFFSET \ | 25 | #define CSTORM_FUNCTION_MODE_OFFSET \ |
26 | (IS_E1H_OFFSET? 0x11e8 : 0xffffffff) | 26 | (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff) |
27 | #define CSTORM_HC_BTR_OFFSET(port) \ | 27 | #define CSTORM_HC_BTR_OFFSET(port) \ |
28 | (IS_E1H_OFFSET? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0))) | 28 | (IS_E1H_OFFSET ? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0))) |
29 | #define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ | 29 | #define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ |
30 | (IS_E1H_OFFSET? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \ | 30 | (IS_E1H_OFFSET ? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \ |
31 | (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ | 31 | (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ |
32 | (index * 0x4))) | 32 | (index * 0x4))) |
33 | #define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ | 33 | #define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ |
34 | (IS_E1H_OFFSET? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \ | 34 | (IS_E1H_OFFSET ? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \ |
35 | (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ | 35 | (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ |
36 | (index * 0x4))) | 36 | (index * 0x4))) |
37 | #define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ | 37 | #define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ |
38 | (IS_E1H_OFFSET? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \ | 38 | (IS_E1H_OFFSET ? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \ |
39 | (0x1400 + (port * 0x280) + (cpu_id * 0x28))) | 39 | (0x1400 + (port * 0x280) + (cpu_id * 0x28))) |
40 | #define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ | 40 | #define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ |
41 | (IS_E1H_OFFSET? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \ | 41 | (IS_E1H_OFFSET ? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \ |
42 | (0x1408 + (port * 0x280) + (cpu_id * 0x28))) | 42 | (0x1408 + (port * 0x280) + (cpu_id * 0x28))) |
43 | #define CSTORM_STATS_FLAGS_OFFSET(function) \ | 43 | #define CSTORM_STATS_FLAGS_OFFSET(function) \ |
44 | (IS_E1H_OFFSET? (0x1108 + (function * 0x8)) : (0x5108 + \ | 44 | (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \ |
45 | (function * 0x8))) | 45 | (function * 0x8))) |
46 | #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \ | 46 | #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \ |
47 | (IS_E1H_OFFSET? (0x31c0 + (function * 0x20)) : 0xffffffff) | 47 | (IS_E1H_OFFSET ? (0x31c0 + (function * 0x20)) : 0xffffffff) |
48 | #define TSTORM_ASSERT_LIST_INDEX_OFFSET \ | 48 | #define TSTORM_ASSERT_LIST_INDEX_OFFSET \ |
49 | (IS_E1H_OFFSET? 0xa000 : 0x1000) | 49 | (IS_E1H_OFFSET ? 0xa000 : 0x1000) |
50 | #define TSTORM_ASSERT_LIST_OFFSET(idx) \ | 50 | #define TSTORM_ASSERT_LIST_OFFSET(idx) \ |
51 | (IS_E1H_OFFSET? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) | 51 | (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) |
52 | #define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \ | 52 | #define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \ |
53 | (IS_E1H_OFFSET? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) : \ | 53 | (IS_E1H_OFFSET ? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) \ |
54 | (0x9c8 + (port * 0x2f8) + (client_id * 0x28))) | 54 | : (0x9c8 + (port * 0x2f8) + (client_id * 0x28))) |
55 | #define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ | 55 | #define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ |
56 | (IS_E1H_OFFSET? (0xb01a + ((function>>1) * 0x28) + ((function&1) \ | 56 | (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \ |
57 | * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \ | 57 | ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ |
58 | 0x4))) | 58 | 0x28) + (index * 0x4))) |
59 | #define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ | 59 | #define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ |
60 | (IS_E1H_OFFSET? (0xb000 + ((function>>1) * 0x28) + ((function&1) \ | 60 | (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \ |
61 | * 0xa0)) : (0x1400 + (function * 0x28))) | 61 | ((function&1) * 0xa0)) : (0x1400 + (function * 0x28))) |
62 | #define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ | 62 | #define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ |
63 | (IS_E1H_OFFSET? (0xb008 + ((function>>1) * 0x28) + ((function&1) \ | 63 | (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \ |
64 | * 0xa0)) : (0x1408 + (function * 0x28))) | 64 | ((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) |
65 | #define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ | 65 | #define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ |
66 | (IS_E1H_OFFSET? (0x2b80 + (function * 0x8)) : (0x4b68 + \ | 66 | (IS_E1H_OFFSET ? (0x2b80 + (function * 0x8)) : (0x4b68 + \ |
67 | (function * 0x8))) | 67 | (function * 0x8))) |
68 | #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \ | 68 | #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \ |
69 | (IS_E1H_OFFSET? (0x3000 + (function * 0x38)) : (0x1500 + \ | 69 | (IS_E1H_OFFSET ? (0x3000 + (function * 0x38)) : (0x1500 + \ |
70 | (function * 0x38))) | 70 | (function * 0x38))) |
71 | #define TSTORM_FUNCTION_MODE_OFFSET \ | 71 | #define TSTORM_FUNCTION_MODE_OFFSET \ |
72 | (IS_E1H_OFFSET? 0x1ad0 : 0xffffffff) | 72 | (IS_E1H_OFFSET ? 0x1ad0 : 0xffffffff) |
73 | #define TSTORM_HC_BTR_OFFSET(port) \ | 73 | #define TSTORM_HC_BTR_OFFSET(port) \ |
74 | (IS_E1H_OFFSET? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) | 74 | (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) |
75 | #define TSTORM_INDIRECTION_TABLE_OFFSET(function) \ | 75 | #define TSTORM_INDIRECTION_TABLE_OFFSET(function) \ |
76 | (IS_E1H_OFFSET? (0x12c8 + (function * 0x80)) : (0x22c8 + \ | 76 | (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \ |
77 | (function * 0x80))) | 77 | (function * 0x80))) |
78 | #define TSTORM_INDIRECTION_TABLE_SIZE 0x80 | 78 | #define TSTORM_INDIRECTION_TABLE_SIZE 0x80 |
79 | #define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \ | 79 | #define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \ |
80 | (IS_E1H_OFFSET? (0x3008 + (function * 0x38)) : (0x1508 + \ | 80 | (IS_E1H_OFFSET ? (0x3008 + (function * 0x38)) : (0x1508 + \ |
81 | (function * 0x38))) | 81 | (function * 0x38))) |
82 | #define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ | ||
83 | (IS_E1H_OFFSET ? (0x2010 + (port * 0x5b0) + (stats_counter_id * \ | ||
84 | 0x50)) : (0x4000 + (port * 0x3f0) + (stats_counter_id * 0x38))) | ||
82 | #define TSTORM_RX_PRODS_OFFSET(port, client_id) \ | 85 | #define TSTORM_RX_PRODS_OFFSET(port, client_id) \ |
83 | (IS_E1H_OFFSET? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) : \ | 86 | (IS_E1H_OFFSET ? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) \ |
84 | (0x9c0 + (port * 0x2f8) + (client_id * 0x28))) | 87 | : (0x9c0 + (port * 0x2f8) + (client_id * 0x28))) |
85 | #define TSTORM_STATS_FLAGS_OFFSET(function) \ | 88 | #define TSTORM_STATS_FLAGS_OFFSET(function) \ |
86 | (IS_E1H_OFFSET? (0x2c00 + (function * 0x8)) : (0x4b88 + \ | 89 | (IS_E1H_OFFSET ? (0x2c00 + (function * 0x8)) : (0x4b88 + \ |
87 | (function * 0x8))) | 90 | (function * 0x8))) |
88 | #define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET? 0x3b30 : 0x1c20) | 91 | #define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET ? 0x3b30 : 0x1c20) |
89 | #define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET? 0xa040 : 0x2c10) | 92 | #define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa040 : 0x2c10) |
90 | #define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET? 0x2440 : 0x1200) | 93 | #define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2440 : 0x1200) |
91 | #define USTORM_ASSERT_LIST_INDEX_OFFSET \ | 94 | #define USTORM_ASSERT_LIST_INDEX_OFFSET \ |
92 | (IS_E1H_OFFSET? 0x8000 : 0x1000) | 95 | (IS_E1H_OFFSET ? 0x8000 : 0x1000) |
93 | #define USTORM_ASSERT_LIST_OFFSET(idx) \ | 96 | #define USTORM_ASSERT_LIST_OFFSET(idx) \ |
94 | (IS_E1H_OFFSET? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) | 97 | (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) |
95 | #define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \ | 98 | #define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \ |
96 | (IS_E1H_OFFSET? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \ | 99 | (IS_E1H_OFFSET ? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \ |
97 | (0x5450 + (port * 0x1c8) + (clientId * 0x18))) | 100 | (0x5450 + (port * 0x1c8) + (clientId * 0x18))) |
98 | #define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ | 101 | #define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ |
99 | (IS_E1H_OFFSET? (0x951a + ((function>>1) * 0x28) + ((function&1) \ | 102 | (IS_E1H_OFFSET ? (0x951a + ((function>>1) * 0x28) + \ |
100 | * 0xa0) + (index * 0x4)) : (0x191a + (function * 0x28) + (index * \ | 103 | ((function&1) * 0xa0) + (index * 0x4)) : (0x191a + (function * \ |
101 | 0x4))) | 104 | 0x28) + (index * 0x4))) |
102 | #define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ | 105 | #define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ |
103 | (IS_E1H_OFFSET? (0x9500 + ((function>>1) * 0x28) + ((function&1) \ | 106 | (IS_E1H_OFFSET ? (0x9500 + ((function>>1) * 0x28) + \ |
104 | * 0xa0)) : (0x1900 + (function * 0x28))) | 107 | ((function&1) * 0xa0)) : (0x1900 + (function * 0x28))) |
105 | #define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ | 108 | #define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ |
106 | (IS_E1H_OFFSET? (0x9508 + ((function>>1) * 0x28) + ((function&1) \ | 109 | (IS_E1H_OFFSET ? (0x9508 + ((function>>1) * 0x28) + \ |
107 | * 0xa0)) : (0x1908 + (function * 0x28))) | 110 | ((function&1) * 0xa0)) : (0x1908 + (function * 0x28))) |
108 | #define USTORM_FUNCTION_MODE_OFFSET \ | 111 | #define USTORM_FUNCTION_MODE_OFFSET \ |
109 | (IS_E1H_OFFSET? 0x2448 : 0xffffffff) | 112 | (IS_E1H_OFFSET ? 0x2448 : 0xffffffff) |
110 | #define USTORM_HC_BTR_OFFSET(port) \ | 113 | #define USTORM_HC_BTR_OFFSET(port) \ |
111 | (IS_E1H_OFFSET? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8))) | 114 | (IS_E1H_OFFSET ? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8))) |
112 | #define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \ | 115 | #define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \ |
113 | (IS_E1H_OFFSET? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \ | 116 | (IS_E1H_OFFSET ? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \ |
114 | (0x5448 + (port * 0x1c8) + (clientId * 0x18))) | 117 | (0x5448 + (port * 0x1c8) + (clientId * 0x18))) |
115 | #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \ | 118 | #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \ |
116 | (IS_E1H_OFFSET? (0x2408 + (function * 0x8)) : (0x5408 + \ | 119 | (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x5408 + \ |
117 | (function * 0x8))) | 120 | (function * 0x8))) |
118 | #define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ | 121 | #define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ |
119 | (IS_E1H_OFFSET? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \ | 122 | (IS_E1H_OFFSET ? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \ |
120 | (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ | 123 | (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ |
121 | (index * 0x4))) | 124 | (index * 0x4))) |
122 | #define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ | 125 | #define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ |
123 | (IS_E1H_OFFSET? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \ | 126 | (IS_E1H_OFFSET ? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \ |
124 | (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ | 127 | (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ |
125 | (index * 0x4))) | 128 | (index * 0x4))) |
126 | #define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ | 129 | #define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ |
127 | (IS_E1H_OFFSET? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \ | 130 | (IS_E1H_OFFSET ? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \ |
128 | (0x1400 + (port * 0x280) + (cpu_id * 0x28))) | 131 | (0x1400 + (port * 0x280) + (cpu_id * 0x28))) |
129 | #define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ | 132 | #define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ |
130 | (IS_E1H_OFFSET? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \ | 133 | (IS_E1H_OFFSET ? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \ |
131 | (0x1408 + (port * 0x280) + (cpu_id * 0x28))) | 134 | (0x1408 + (port * 0x280) + (cpu_id * 0x28))) |
132 | #define XSTORM_ASSERT_LIST_INDEX_OFFSET \ | 135 | #define XSTORM_ASSERT_LIST_INDEX_OFFSET \ |
133 | (IS_E1H_OFFSET? 0x9000 : 0x1000) | 136 | (IS_E1H_OFFSET ? 0x9000 : 0x1000) |
134 | #define XSTORM_ASSERT_LIST_OFFSET(idx) \ | 137 | #define XSTORM_ASSERT_LIST_OFFSET(idx) \ |
135 | (IS_E1H_OFFSET? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) | 138 | (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) |
136 | #define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \ | 139 | #define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \ |
137 | (IS_E1H_OFFSET? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40))) | 140 | (IS_E1H_OFFSET ? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40))) |
138 | #define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ | 141 | #define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ |
139 | (IS_E1H_OFFSET? (0xa01a + ((function>>1) * 0x28) + ((function&1) \ | 142 | (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \ |
140 | * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \ | 143 | ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ |
141 | 0x4))) | 144 | 0x28) + (index * 0x4))) |
142 | #define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ | 145 | #define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ |
143 | (IS_E1H_OFFSET? (0xa000 + ((function>>1) * 0x28) + ((function&1) \ | 146 | (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \ |
144 | * 0xa0)) : (0x1400 + (function * 0x28))) | 147 | ((function&1) * 0xa0)) : (0x1400 + (function * 0x28))) |
145 | #define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ | 148 | #define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ |
146 | (IS_E1H_OFFSET? (0xa008 + ((function>>1) * 0x28) + ((function&1) \ | 149 | (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \ |
147 | * 0xa0)) : (0x1408 + (function * 0x28))) | 150 | ((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) |
148 | #define XSTORM_E1HOV_OFFSET(function) \ | 151 | #define XSTORM_E1HOV_OFFSET(function) \ |
149 | (IS_E1H_OFFSET? (0x2ab8 + (function * 0x2)) : 0xffffffff) | 152 | (IS_E1H_OFFSET ? (0x2ab8 + (function * 0x2)) : 0xffffffff) |
150 | #define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ | 153 | #define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ |
151 | (IS_E1H_OFFSET? (0x2418 + (function * 0x8)) : (0x3b70 + \ | 154 | (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3b70 + \ |
152 | (function * 0x8))) | 155 | (function * 0x8))) |
153 | #define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \ | 156 | #define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \ |
154 | (IS_E1H_OFFSET? (0x2568 + (function * 0x70)) : (0x3c60 + \ | 157 | (IS_E1H_OFFSET ? (0x2568 + (function * 0x70)) : (0x3c60 + \ |
155 | (function * 0x70))) | 158 | (function * 0x70))) |
156 | #define XSTORM_FUNCTION_MODE_OFFSET \ | 159 | #define XSTORM_FUNCTION_MODE_OFFSET \ |
157 | (IS_E1H_OFFSET? 0x2ac8 : 0xffffffff) | 160 | (IS_E1H_OFFSET ? 0x2ac8 : 0xffffffff) |
158 | #define XSTORM_HC_BTR_OFFSET(port) \ | 161 | #define XSTORM_HC_BTR_OFFSET(port) \ |
159 | (IS_E1H_OFFSET? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) | 162 | (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) |
163 | #define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ | ||
164 | (IS_E1H_OFFSET ? (0xc000 + (port * 0x3f0) + (stats_counter_id * \ | ||
165 | 0x38)) : (0x3378 + (port * 0x3f0) + (stats_counter_id * 0x38))) | ||
160 | #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \ | 166 | #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \ |
161 | (IS_E1H_OFFSET? (0x2528 + (function * 0x70)) : (0x3c20 + \ | 167 | (IS_E1H_OFFSET ? (0x2528 + (function * 0x70)) : (0x3c20 + \ |
162 | (function * 0x70))) | 168 | (function * 0x70))) |
163 | #define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \ | 169 | #define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \ |
164 | (IS_E1H_OFFSET? (0x2000 + (function * 0x10)) : (0x3328 + \ | 170 | (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \ |
165 | (function * 0x10))) | 171 | (function * 0x10))) |
166 | #define XSTORM_SPQ_PROD_OFFSET(function) \ | 172 | #define XSTORM_SPQ_PROD_OFFSET(function) \ |
167 | (IS_E1H_OFFSET? (0x2008 + (function * 0x10)) : (0x3330 + \ | 173 | (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \ |
168 | (function * 0x10))) | 174 | (function * 0x10))) |
169 | #define XSTORM_STATS_FLAGS_OFFSET(function) \ | 175 | #define XSTORM_STATS_FLAGS_OFFSET(function) \ |
170 | (IS_E1H_OFFSET? (0x23d8 + (function * 0x8)) : (0x3b60 + \ | 176 | (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3b60 + \ |
171 | (function * 0x8))) | 177 | (function * 0x8))) |
172 | #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 | 178 | #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 |
173 | 179 | ||
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h index d3e8198d7dba..efd764427fa1 100644 --- a/drivers/net/bnx2x_hsi.h +++ b/drivers/net/bnx2x_hsi.h | |||
@@ -1268,7 +1268,7 @@ struct doorbell { | |||
1268 | 1268 | ||
1269 | 1269 | ||
1270 | /* | 1270 | /* |
1271 | * IGU driver acknowlegement register | 1271 | * IGU driver acknowledgement register |
1272 | */ | 1272 | */ |
1273 | struct igu_ack_register { | 1273 | struct igu_ack_register { |
1274 | #if defined(__BIG_ENDIAN) | 1274 | #if defined(__BIG_ENDIAN) |
@@ -1882,7 +1882,7 @@ struct timers_block_context { | |||
1882 | }; | 1882 | }; |
1883 | 1883 | ||
1884 | /* | 1884 | /* |
1885 | * structure for easy accessability to assembler | 1885 | * structure for easy accessibility to assembler |
1886 | */ | 1886 | */ |
1887 | struct eth_tx_bd_flags { | 1887 | struct eth_tx_bd_flags { |
1888 | u8 as_bitfield; | 1888 | u8 as_bitfield; |
@@ -2044,7 +2044,7 @@ struct eth_context { | |||
2044 | 2044 | ||
2045 | 2045 | ||
2046 | /* | 2046 | /* |
2047 | * ethernet doorbell | 2047 | * Ethernet doorbell |
2048 | */ | 2048 | */ |
2049 | struct eth_tx_doorbell { | 2049 | struct eth_tx_doorbell { |
2050 | #if defined(__BIG_ENDIAN) | 2050 | #if defined(__BIG_ENDIAN) |
@@ -2256,7 +2256,7 @@ struct ramrod_data { | |||
2256 | }; | 2256 | }; |
2257 | 2257 | ||
2258 | /* | 2258 | /* |
2259 | * union for ramrod data for ethernet protocol (CQE) (force size of 16 bits) | 2259 | * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits) |
2260 | */ | 2260 | */ |
2261 | union eth_ramrod_data { | 2261 | union eth_ramrod_data { |
2262 | struct ramrod_data general; | 2262 | struct ramrod_data general; |
@@ -2330,7 +2330,7 @@ struct spe_hdr { | |||
2330 | }; | 2330 | }; |
2331 | 2331 | ||
2332 | /* | 2332 | /* |
2333 | * ethernet slow path element | 2333 | * Ethernet slow path element |
2334 | */ | 2334 | */ |
2335 | union eth_specific_data { | 2335 | union eth_specific_data { |
2336 | u8 protocol_data[8]; | 2336 | u8 protocol_data[8]; |
@@ -2343,7 +2343,7 @@ union eth_specific_data { | |||
2343 | }; | 2343 | }; |
2344 | 2344 | ||
2345 | /* | 2345 | /* |
2346 | * ethernet slow path element | 2346 | * Ethernet slow path element |
2347 | */ | 2347 | */ |
2348 | struct eth_spe { | 2348 | struct eth_spe { |
2349 | struct spe_hdr hdr; | 2349 | struct spe_hdr hdr; |
@@ -2615,7 +2615,7 @@ struct tstorm_eth_rx_producers { | |||
2615 | 2615 | ||
2616 | 2616 | ||
2617 | /* | 2617 | /* |
2618 | * common flag to indicate existance of TPA. | 2618 | * common flag to indicate existence of TPA. |
2619 | */ | 2619 | */ |
2620 | struct tstorm_eth_tpa_exist { | 2620 | struct tstorm_eth_tpa_exist { |
2621 | #if defined(__BIG_ENDIAN) | 2621 | #if defined(__BIG_ENDIAN) |
@@ -2765,7 +2765,7 @@ struct tstorm_common_stats { | |||
2765 | }; | 2765 | }; |
2766 | 2766 | ||
2767 | /* | 2767 | /* |
2768 | * Eth statistics query sturcture for the eth_stats_quesry ramrod | 2768 | * Eth statistics query structure for the eth_stats_query ramrod |
2769 | */ | 2769 | */ |
2770 | struct eth_stats_query { | 2770 | struct eth_stats_query { |
2771 | struct xstorm_common_stats xstorm_common; | 2771 | struct xstorm_common_stats xstorm_common; |
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h index 4c7750789b62..130927cfc75b 100644 --- a/drivers/net/bnx2x_init.h +++ b/drivers/net/bnx2x_init.h | |||
@@ -72,26 +72,26 @@ | |||
72 | 72 | ||
73 | 73 | ||
74 | struct raw_op { | 74 | struct raw_op { |
75 | u32 op :8; | 75 | u32 op:8; |
76 | u32 offset :24; | 76 | u32 offset:24; |
77 | u32 raw_data; | 77 | u32 raw_data; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | struct op_read { | 80 | struct op_read { |
81 | u32 op :8; | 81 | u32 op:8; |
82 | u32 offset :24; | 82 | u32 offset:24; |
83 | u32 pad; | 83 | u32 pad; |
84 | }; | 84 | }; |
85 | 85 | ||
86 | struct op_write { | 86 | struct op_write { |
87 | u32 op :8; | 87 | u32 op:8; |
88 | u32 offset :24; | 88 | u32 offset:24; |
89 | u32 val; | 89 | u32 val; |
90 | }; | 90 | }; |
91 | 91 | ||
92 | struct op_string_write { | 92 | struct op_string_write { |
93 | u32 op :8; | 93 | u32 op:8; |
94 | u32 offset :24; | 94 | u32 offset:24; |
95 | #ifdef __LITTLE_ENDIAN | 95 | #ifdef __LITTLE_ENDIAN |
96 | u16 data_off; | 96 | u16 data_off; |
97 | u16 data_len; | 97 | u16 data_len; |
@@ -102,8 +102,8 @@ struct op_string_write { | |||
102 | }; | 102 | }; |
103 | 103 | ||
104 | struct op_zero { | 104 | struct op_zero { |
105 | u32 op :8; | 105 | u32 op:8; |
106 | u32 offset :24; | 106 | u32 offset:24; |
107 | u32 len; | 107 | u32 len; |
108 | }; | 108 | }; |
109 | 109 | ||
@@ -208,7 +208,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data, | |||
208 | /********************************************************* | 208 | /********************************************************* |
209 | There are different blobs for each PRAM section. | 209 | There are different blobs for each PRAM section. |
210 | In addition, each blob write operation is divided into a few operations | 210 | In addition, each blob write operation is divided into a few operations |
211 | in order to decrease the amount of phys. contigious buffer needed. | 211 | in order to decrease the amount of phys. contiguous buffer needed. |
212 | Thus, when we select a blob the address may be with some offset | 212 | Thus, when we select a blob the address may be with some offset |
213 | from the beginning of PRAM section. | 213 | from the beginning of PRAM section. |
214 | The same holds for the INT_TABLE sections. | 214 | The same holds for the INT_TABLE sections. |
@@ -336,7 +336,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end) | |||
336 | len = op->str_wr.data_len; | 336 | len = op->str_wr.data_len; |
337 | data = data_base + op->str_wr.data_off; | 337 | data = data_base + op->str_wr.data_off; |
338 | 338 | ||
339 | /* carefull! it must be in order */ | 339 | /* careful! it must be in order */ |
340 | if (unlikely(op_type > OP_WB)) { | 340 | if (unlikely(op_type > OP_WB)) { |
341 | 341 | ||
342 | /* If E1 only */ | 342 | /* If E1 only */ |
@@ -740,7 +740,7 @@ static u8 calc_crc8(u32 data, u8 crc) | |||
740 | return crc_res; | 740 | return crc_res; |
741 | } | 741 | } |
742 | 742 | ||
743 | /* regiesers addresses are not in order | 743 | /* registers addresses are not in order |
744 | so these arrays help simplify the code */ | 744 | so these arrays help simplify the code */ |
745 | static const int cm_start[E1H_FUNC_MAX][9] = { | 745 | static const int cm_start[E1H_FUNC_MAX][9] = { |
746 | {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START, | 746 | {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START, |
diff --git a/drivers/net/bnx2x_init_values.h b/drivers/net/bnx2x_init_values.h index 63019055e4bb..9755bf6b08dd 100644 --- a/drivers/net/bnx2x_init_values.h +++ b/drivers/net/bnx2x_init_values.h | |||
@@ -901,31 +901,28 @@ static const struct raw_op init_ops[] = { | |||
901 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3760, 0x4}, | 901 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3760, 0x4}, |
902 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1e20, 0x42}, | 902 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1e20, 0x42}, |
903 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3738, 0x9}, | 903 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3738, 0x9}, |
904 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x400}, | 904 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2}, |
905 | {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293}, | 905 | {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293}, |
906 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c00, 0x2}, | 906 | {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x20278}, |
907 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42}, | 907 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42}, |
908 | {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2c00 + 0x8, 0x20278}, | 908 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2}, |
909 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400}, | 909 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400}, |
910 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2}, | 910 | {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027a}, |
911 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2}, | 911 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2}, |
912 | {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x2027a}, | ||
913 | {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294}, | 912 | {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294}, |
914 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2}, | ||
915 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2}, | 913 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2}, |
916 | {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027c}, | ||
917 | {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x20296}, | 914 | {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x20296}, |
918 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2}, | 915 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2}, |
919 | {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x20298}, | 916 | {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x20298}, |
920 | {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000}, | 917 | {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000}, |
921 | {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027e}, | 918 | {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027c}, |
922 | {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x10029a}, | 919 | {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x10029a}, |
923 | {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0}, | 920 | {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0}, |
924 | {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028e}, | 921 | {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028c}, |
925 | {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002aa}, | 922 | {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002aa}, |
926 | {OP_ZP_E1, USEM_REG_INT_TABLE, 0xc20000}, | 923 | {OP_ZP_E1, USEM_REG_INT_TABLE, 0xc20000}, |
927 | {OP_ZP_E1H, USEM_REG_INT_TABLE, 0xc40000}, | 924 | {OP_ZP_E1H, USEM_REG_INT_TABLE, 0xc40000}, |
928 | {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029e}, | 925 | {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029c}, |
929 | {OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x368, 0x1302ba}, | 926 | {OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x368, 0x1302ba}, |
930 | {OP_ZP_E1, USEM_REG_PRAM, 0x311c0000}, | 927 | {OP_ZP_E1, USEM_REG_PRAM, 0x311c0000}, |
931 | {OP_ZP_E1H, USEM_REG_PRAM, 0x31070000}, | 928 | {OP_ZP_E1H, USEM_REG_PRAM, 0x31070000}, |
@@ -933,11 +930,11 @@ static const struct raw_op init_ops[] = { | |||
933 | {OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x330e0c42}, | 930 | {OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x330e0c42}, |
934 | {OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x38561919}, | 931 | {OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x38561919}, |
935 | {OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x389b1906}, | 932 | {OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x389b1906}, |
936 | {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x500402a0}, | 933 | {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x5004029e}, |
937 | {OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x132272d}, | 934 | {OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x132272d}, |
938 | {OP_WR_64_E1H, USEM_REG_PRAM + 0x18250, 0x4fb602bc}, | 935 | {OP_WR_64_E1H, USEM_REG_PRAM + 0x18250, 0x4fb602bc}, |
939 | #define USEM_COMMON_END 790 | 936 | #define USEM_COMMON_END 787 |
940 | #define USEM_PORT0_START 790 | 937 | #define USEM_PORT0_START 787 |
941 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0}, | 938 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0}, |
942 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0}, | 939 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0}, |
943 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0xa}, | 940 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0xa}, |
@@ -950,44 +947,27 @@ static const struct raw_op init_ops[] = { | |||
950 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3288, 0x96}, | 947 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3288, 0x96}, |
951 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5440, 0x72}, | 948 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5440, 0x72}, |
952 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20}, | 949 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20}, |
953 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x20}, | 950 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52}, |
954 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20}, | 951 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20}, |
955 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3100, 0x20}, | 952 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc}, |
956 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20}, | 953 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20}, |
957 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3200, 0x20}, | ||
958 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20}, | 954 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20}, |
959 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3300, 0x20}, | ||
960 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20}, | 955 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20}, |
961 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3400, 0x20}, | ||
962 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20}, | 956 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20}, |
963 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3500, 0x20}, | ||
964 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20}, | 957 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20}, |
965 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3600, 0x20}, | ||
966 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20}, | 958 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20}, |
967 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3700, 0x20}, | ||
968 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20}, | 959 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20}, |
969 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3800, 0x20}, | ||
970 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20}, | 960 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20}, |
971 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3900, 0x20}, | ||
972 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20}, | 961 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20}, |
973 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a00, 0x20}, | ||
974 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20}, | 962 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20}, |
975 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b00, 0x20}, | ||
976 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20}, | 963 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20}, |
977 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c00, 0x20}, | ||
978 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20}, | 964 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20}, |
979 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d00, 0x20}, | ||
980 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20}, | 965 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20}, |
981 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e00, 0x20}, | ||
982 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20}, | 966 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20}, |
983 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f00, 0x20}, | ||
984 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52}, | 967 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52}, |
985 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c10, 0x2}, | ||
986 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc}, | 968 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc}, |
987 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52}, | 969 | #define USEM_PORT0_END 818 |
988 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc}, | 970 | #define USEM_PORT1_START 818 |
989 | #define USEM_PORT0_END 838 | ||
990 | #define USEM_PORT1_START 838 | ||
991 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0}, | 971 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0}, |
992 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0}, | 972 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0}, |
993 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1928, 0xa}, | 973 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1928, 0xa}, |
@@ -1000,76 +980,59 @@ static const struct raw_op init_ops[] = { | |||
1000 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x34e0, 0x96}, | 980 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x34e0, 0x96}, |
1001 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5608, 0x72}, | 981 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5608, 0x72}, |
1002 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20}, | 982 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20}, |
1003 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3080, 0x20}, | 983 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52}, |
1004 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20}, | 984 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20}, |
1005 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3180, 0x20}, | 985 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc}, |
1006 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20}, | 986 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20}, |
1007 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3280, 0x20}, | ||
1008 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20}, | 987 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20}, |
1009 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3380, 0x20}, | ||
1010 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20}, | 988 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20}, |
1011 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3480, 0x20}, | ||
1012 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20}, | 989 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20}, |
1013 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3580, 0x20}, | ||
1014 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20}, | 990 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20}, |
1015 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3680, 0x20}, | ||
1016 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20}, | 991 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20}, |
1017 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3780, 0x20}, | ||
1018 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20}, | 992 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20}, |
1019 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3880, 0x20}, | ||
1020 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20}, | 993 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20}, |
1021 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3980, 0x20}, | ||
1022 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20}, | 994 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20}, |
1023 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a80, 0x20}, | ||
1024 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20}, | 995 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20}, |
1025 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b80, 0x20}, | ||
1026 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20}, | 996 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20}, |
1027 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c80, 0x20}, | ||
1028 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20}, | 997 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20}, |
1029 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d80, 0x20}, | ||
1030 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20}, | 998 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20}, |
1031 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e80, 0x20}, | ||
1032 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20}, | 999 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20}, |
1033 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f80, 0x20}, | ||
1034 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52}, | 1000 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52}, |
1035 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c20, 0x2}, | ||
1036 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc}, | 1001 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc}, |
1037 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52}, | 1002 | #define USEM_PORT1_END 849 |
1038 | {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc}, | 1003 | #define USEM_FUNC0_START 849 |
1039 | #define USEM_PORT1_END 886 | ||
1040 | #define USEM_FUNC0_START 886 | ||
1041 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4}, | 1004 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4}, |
1042 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4010, 0x2}, | 1005 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4010, 0x2}, |
1043 | #define USEM_FUNC0_END 888 | 1006 | #define USEM_FUNC0_END 851 |
1044 | #define USEM_FUNC1_START 888 | 1007 | #define USEM_FUNC1_START 851 |
1045 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4}, | 1008 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4}, |
1046 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4020, 0x2}, | 1009 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4020, 0x2}, |
1047 | #define USEM_FUNC1_END 890 | 1010 | #define USEM_FUNC1_END 853 |
1048 | #define USEM_FUNC2_START 890 | 1011 | #define USEM_FUNC2_START 853 |
1049 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4}, | 1012 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4}, |
1050 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4030, 0x2}, | 1013 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4030, 0x2}, |
1051 | #define USEM_FUNC2_END 892 | 1014 | #define USEM_FUNC2_END 855 |
1052 | #define USEM_FUNC3_START 892 | 1015 | #define USEM_FUNC3_START 855 |
1053 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4}, | 1016 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4}, |
1054 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4040, 0x2}, | 1017 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4040, 0x2}, |
1055 | #define USEM_FUNC3_END 894 | 1018 | #define USEM_FUNC3_END 857 |
1056 | #define USEM_FUNC4_START 894 | 1019 | #define USEM_FUNC4_START 857 |
1057 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4}, | 1020 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4}, |
1058 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4050, 0x2}, | 1021 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4050, 0x2}, |
1059 | #define USEM_FUNC4_END 896 | 1022 | #define USEM_FUNC4_END 859 |
1060 | #define USEM_FUNC5_START 896 | 1023 | #define USEM_FUNC5_START 859 |
1061 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4}, | 1024 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4}, |
1062 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4060, 0x2}, | 1025 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4060, 0x2}, |
1063 | #define USEM_FUNC5_END 898 | 1026 | #define USEM_FUNC5_END 861 |
1064 | #define USEM_FUNC6_START 898 | 1027 | #define USEM_FUNC6_START 861 |
1065 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4}, | 1028 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4}, |
1066 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4070, 0x2}, | 1029 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4070, 0x2}, |
1067 | #define USEM_FUNC6_END 900 | 1030 | #define USEM_FUNC6_END 863 |
1068 | #define USEM_FUNC7_START 900 | 1031 | #define USEM_FUNC7_START 863 |
1069 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4}, | 1032 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4}, |
1070 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4080, 0x2}, | 1033 | {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4080, 0x2}, |
1071 | #define USEM_FUNC7_END 902 | 1034 | #define USEM_FUNC7_END 865 |
1072 | #define CSEM_COMMON_START 902 | 1035 | #define CSEM_COMMON_START 865 |
1073 | {OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0}, | 1036 | {OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0}, |
1074 | {OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0}, | 1037 | {OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0}, |
1075 | {OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0}, | 1038 | {OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0}, |
@@ -1128,29 +1091,29 @@ static const struct raw_op init_ops[] = { | |||
1128 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0}, | 1091 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0}, |
1129 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240}, | 1092 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240}, |
1130 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0}, | 1093 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0}, |
1131 | {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a2}, | 1094 | {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a0}, |
1132 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80}, | 1095 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80}, |
1133 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4}, | 1096 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4}, |
1134 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x240}, | 1097 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x240}, |
1135 | {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b88, 0x2002be}, | 1098 | {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b88, 0x2002be}, |
1136 | {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff}, | 1099 | {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff}, |
1137 | {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002aa}, | 1100 | {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002a8}, |
1138 | {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002de}, | 1101 | {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002de}, |
1139 | {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0}, | 1102 | {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0}, |
1140 | {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ba}, | 1103 | {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002b8}, |
1141 | {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ee}, | 1104 | {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ee}, |
1142 | {OP_ZP_E1, CSEM_REG_INT_TABLE, 0x6e0000}, | 1105 | {OP_ZP_E1, CSEM_REG_INT_TABLE, 0x6e0000}, |
1143 | {OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x6f0000}, | 1106 | {OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x6f0000}, |
1144 | {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002ca}, | 1107 | {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002c8}, |
1145 | {OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x1002fe}, | 1108 | {OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x1002fe}, |
1146 | {OP_ZP_E1, CSEM_REG_PRAM, 0x32580000}, | 1109 | {OP_ZP_E1, CSEM_REG_PRAM, 0x32580000}, |
1147 | {OP_ZP_E1H, CSEM_REG_PRAM, 0x31fa0000}, | 1110 | {OP_ZP_E1H, CSEM_REG_PRAM, 0x31fa0000}, |
1148 | {OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x18270c96}, | 1111 | {OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x18270c96}, |
1149 | {OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x19040c7f}, | 1112 | {OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x19040c7f}, |
1150 | {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402cc}, | 1113 | {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402ca}, |
1151 | {OP_WR_64_E1H, CSEM_REG_PRAM + 0xb430, 0x67e00300}, | 1114 | {OP_WR_64_E1H, CSEM_REG_PRAM + 0xb430, 0x67e00300}, |
1152 | #define CSEM_COMMON_END 981 | 1115 | #define CSEM_COMMON_END 944 |
1153 | #define CSEM_PORT0_START 981 | 1116 | #define CSEM_PORT0_START 944 |
1154 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0}, | 1117 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0}, |
1155 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0}, | 1118 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0}, |
1156 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10}, | 1119 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10}, |
@@ -1163,8 +1126,8 @@ static const struct raw_op init_ops[] = { | |||
1163 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30}, | 1126 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30}, |
1164 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6}, | 1127 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6}, |
1165 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30}, | 1128 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30}, |
1166 | #define CSEM_PORT0_END 993 | 1129 | #define CSEM_PORT0_END 956 |
1167 | #define CSEM_PORT1_START 993 | 1130 | #define CSEM_PORT1_START 956 |
1168 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0}, | 1131 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0}, |
1169 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0}, | 1132 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0}, |
1170 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10}, | 1133 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10}, |
@@ -1177,43 +1140,43 @@ static const struct raw_op init_ops[] = { | |||
1177 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30}, | 1140 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30}, |
1178 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6}, | 1141 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6}, |
1179 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30}, | 1142 | {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30}, |
1180 | #define CSEM_PORT1_END 1005 | 1143 | #define CSEM_PORT1_END 968 |
1181 | #define CSEM_FUNC0_START 1005 | 1144 | #define CSEM_FUNC0_START 968 |
1182 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0}, | 1145 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0}, |
1183 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2}, | 1146 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2}, |
1184 | #define CSEM_FUNC0_END 1007 | 1147 | #define CSEM_FUNC0_END 970 |
1185 | #define CSEM_FUNC1_START 1007 | 1148 | #define CSEM_FUNC1_START 970 |
1186 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0}, | 1149 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0}, |
1187 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2}, | 1150 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2}, |
1188 | #define CSEM_FUNC1_END 1009 | 1151 | #define CSEM_FUNC1_END 972 |
1189 | #define CSEM_FUNC2_START 1009 | 1152 | #define CSEM_FUNC2_START 972 |
1190 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0}, | 1153 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0}, |
1191 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2}, | 1154 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2}, |
1192 | #define CSEM_FUNC2_END 1011 | 1155 | #define CSEM_FUNC2_END 974 |
1193 | #define CSEM_FUNC3_START 1011 | 1156 | #define CSEM_FUNC3_START 974 |
1194 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0}, | 1157 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0}, |
1195 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2}, | 1158 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2}, |
1196 | #define CSEM_FUNC3_END 1013 | 1159 | #define CSEM_FUNC3_END 976 |
1197 | #define CSEM_FUNC4_START 1013 | 1160 | #define CSEM_FUNC4_START 976 |
1198 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0}, | 1161 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0}, |
1199 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2}, | 1162 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2}, |
1200 | #define CSEM_FUNC4_END 1015 | 1163 | #define CSEM_FUNC4_END 978 |
1201 | #define CSEM_FUNC5_START 1015 | 1164 | #define CSEM_FUNC5_START 978 |
1202 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0}, | 1165 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0}, |
1203 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2}, | 1166 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2}, |
1204 | #define CSEM_FUNC5_END 1017 | 1167 | #define CSEM_FUNC5_END 980 |
1205 | #define CSEM_FUNC6_START 1017 | 1168 | #define CSEM_FUNC6_START 980 |
1206 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0}, | 1169 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0}, |
1207 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2}, | 1170 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2}, |
1208 | #define CSEM_FUNC6_END 1019 | 1171 | #define CSEM_FUNC6_END 982 |
1209 | #define CSEM_FUNC7_START 1019 | 1172 | #define CSEM_FUNC7_START 982 |
1210 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0}, | 1173 | {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0}, |
1211 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2}, | 1174 | {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2}, |
1212 | #define CSEM_FUNC7_END 1021 | 1175 | #define CSEM_FUNC7_END 984 |
1213 | #define XPB_COMMON_START 1021 | 1176 | #define XPB_COMMON_START 984 |
1214 | {OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20}, | 1177 | {OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20}, |
1215 | #define XPB_COMMON_END 1022 | 1178 | #define XPB_COMMON_END 985 |
1216 | #define DQ_COMMON_START 1022 | 1179 | #define DQ_COMMON_START 985 |
1217 | {OP_WR, DORQ_REG_MODE_ACT, 0x2}, | 1180 | {OP_WR, DORQ_REG_MODE_ACT, 0x2}, |
1218 | {OP_WR, DORQ_REG_NORM_CID_OFST, 0x3}, | 1181 | {OP_WR, DORQ_REG_NORM_CID_OFST, 0x3}, |
1219 | {OP_WR, DORQ_REG_OUTST_REQ, 0x4}, | 1182 | {OP_WR, DORQ_REG_OUTST_REQ, 0x4}, |
@@ -1232,8 +1195,8 @@ static const struct raw_op init_ops[] = { | |||
1232 | {OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c}, | 1195 | {OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c}, |
1233 | {OP_WR, DORQ_REG_REGN, 0x7c1004}, | 1196 | {OP_WR, DORQ_REG_REGN, 0x7c1004}, |
1234 | {OP_WR, DORQ_REG_IF_EN, 0xf}, | 1197 | {OP_WR, DORQ_REG_IF_EN, 0xf}, |
1235 | #define DQ_COMMON_END 1040 | 1198 | #define DQ_COMMON_END 1003 |
1236 | #define TIMERS_COMMON_START 1040 | 1199 | #define TIMERS_COMMON_START 1003 |
1237 | {OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2}, | 1200 | {OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2}, |
1238 | {OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c}, | 1201 | {OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c}, |
1239 | {OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1}, | 1202 | {OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1}, |
@@ -1256,14 +1219,14 @@ static const struct raw_op init_ops[] = { | |||
1256 | {OP_WR, TM_REG_EN_CL0_INPUT, 0x1}, | 1219 | {OP_WR, TM_REG_EN_CL0_INPUT, 0x1}, |
1257 | {OP_WR, TM_REG_EN_CL1_INPUT, 0x1}, | 1220 | {OP_WR, TM_REG_EN_CL1_INPUT, 0x1}, |
1258 | {OP_WR, TM_REG_EN_CL2_INPUT, 0x1}, | 1221 | {OP_WR, TM_REG_EN_CL2_INPUT, 0x1}, |
1259 | #define TIMERS_COMMON_END 1062 | 1222 | #define TIMERS_COMMON_END 1025 |
1260 | #define TIMERS_PORT0_START 1062 | 1223 | #define TIMERS_PORT0_START 1025 |
1261 | {OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2}, | 1224 | {OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2}, |
1262 | #define TIMERS_PORT0_END 1063 | 1225 | #define TIMERS_PORT0_END 1026 |
1263 | #define TIMERS_PORT1_START 1063 | 1226 | #define TIMERS_PORT1_START 1026 |
1264 | {OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2}, | 1227 | {OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2}, |
1265 | #define TIMERS_PORT1_END 1064 | 1228 | #define TIMERS_PORT1_END 1027 |
1266 | #define XSDM_COMMON_START 1064 | 1229 | #define XSDM_COMMON_START 1027 |
1267 | {OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614}, | 1230 | {OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614}, |
1268 | {OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424}, | 1231 | {OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424}, |
1269 | {OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600}, | 1232 | {OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600}, |
@@ -1311,8 +1274,8 @@ static const struct raw_op init_ops[] = { | |||
1311 | {OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8}, | 1274 | {OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8}, |
1312 | {OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1}, | 1275 | {OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1}, |
1313 | {OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa}, | 1276 | {OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa}, |
1314 | #define XSDM_COMMON_END 1111 | 1277 | #define XSDM_COMMON_END 1074 |
1315 | #define QM_COMMON_START 1111 | 1278 | #define QM_COMMON_START 1074 |
1316 | {OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6}, | 1279 | {OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6}, |
1317 | {OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5}, | 1280 | {OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5}, |
1318 | {OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa}, | 1281 | {OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa}, |
@@ -1613,8 +1576,8 @@ static const struct raw_op init_ops[] = { | |||
1613 | {OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5}, | 1576 | {OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5}, |
1614 | {OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7}, | 1577 | {OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7}, |
1615 | {OP_WR, QM_REG_CMINTEN, 0xff}, | 1578 | {OP_WR, QM_REG_CMINTEN, 0xff}, |
1616 | #define QM_COMMON_END 1411 | 1579 | #define QM_COMMON_END 1374 |
1617 | #define PBF_COMMON_START 1411 | 1580 | #define PBF_COMMON_START 1374 |
1618 | {OP_WR, PBF_REG_INIT, 0x1}, | 1581 | {OP_WR, PBF_REG_INIT, 0x1}, |
1619 | {OP_WR, PBF_REG_INIT_P4, 0x1}, | 1582 | {OP_WR, PBF_REG_INIT_P4, 0x1}, |
1620 | {OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1}, | 1583 | {OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1}, |
@@ -1622,20 +1585,20 @@ static const struct raw_op init_ops[] = { | |||
1622 | {OP_WR, PBF_REG_INIT_P4, 0x0}, | 1585 | {OP_WR, PBF_REG_INIT_P4, 0x0}, |
1623 | {OP_WR, PBF_REG_INIT, 0x0}, | 1586 | {OP_WR, PBF_REG_INIT, 0x0}, |
1624 | {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0}, | 1587 | {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0}, |
1625 | #define PBF_COMMON_END 1418 | 1588 | #define PBF_COMMON_END 1381 |
1626 | #define PBF_PORT0_START 1418 | 1589 | #define PBF_PORT0_START 1381 |
1627 | {OP_WR, PBF_REG_INIT_P0, 0x1}, | 1590 | {OP_WR, PBF_REG_INIT_P0, 0x1}, |
1628 | {OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1}, | 1591 | {OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1}, |
1629 | {OP_WR, PBF_REG_INIT_P0, 0x0}, | 1592 | {OP_WR, PBF_REG_INIT_P0, 0x0}, |
1630 | {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0}, | 1593 | {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0}, |
1631 | #define PBF_PORT0_END 1422 | 1594 | #define PBF_PORT0_END 1385 |
1632 | #define PBF_PORT1_START 1422 | 1595 | #define PBF_PORT1_START 1385 |
1633 | {OP_WR, PBF_REG_INIT_P1, 0x1}, | 1596 | {OP_WR, PBF_REG_INIT_P1, 0x1}, |
1634 | {OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1}, | 1597 | {OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1}, |
1635 | {OP_WR, PBF_REG_INIT_P1, 0x0}, | 1598 | {OP_WR, PBF_REG_INIT_P1, 0x0}, |
1636 | {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0}, | 1599 | {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0}, |
1637 | #define PBF_PORT1_END 1426 | 1600 | #define PBF_PORT1_END 1389 |
1638 | #define XCM_COMMON_START 1426 | 1601 | #define XCM_COMMON_START 1389 |
1639 | {OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32}, | 1602 | {OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32}, |
1640 | {OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020}, | 1603 | {OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020}, |
1641 | {OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020}, | 1604 | {OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020}, |
@@ -1670,7 +1633,7 @@ static const struct raw_op init_ops[] = { | |||
1670 | {OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f}, | 1633 | {OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f}, |
1671 | {OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20}, | 1634 | {OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20}, |
1672 | {OP_ZR, XCM_REG_XX_TABLE, 0x12}, | 1635 | {OP_ZR, XCM_REG_XX_TABLE, 0x12}, |
1673 | {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02ce}, | 1636 | {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02cc}, |
1674 | {OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0302}, | 1637 | {OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0302}, |
1675 | {OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf}, | 1638 | {OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf}, |
1676 | {OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7}, | 1639 | {OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7}, |
@@ -1700,8 +1663,8 @@ static const struct raw_op init_ops[] = { | |||
1700 | {OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1}, | 1663 | {OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1}, |
1701 | {OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1}, | 1664 | {OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1}, |
1702 | {OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1}, | 1665 | {OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1}, |
1703 | #define XCM_COMMON_END 1490 | 1666 | #define XCM_COMMON_END 1453 |
1704 | #define XCM_PORT0_START 1490 | 1667 | #define XCM_PORT0_START 1453 |
1705 | {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, | 1668 | {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, |
1706 | {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, | 1669 | {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, |
1707 | {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, | 1670 | {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, |
@@ -1710,8 +1673,8 @@ static const struct raw_op init_ops[] = { | |||
1710 | {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2}, | 1673 | {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2}, |
1711 | {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, | 1674 | {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, |
1712 | {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, | 1675 | {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, |
1713 | #define XCM_PORT0_END 1498 | 1676 | #define XCM_PORT0_END 1461 |
1714 | #define XCM_PORT1_START 1498 | 1677 | #define XCM_PORT1_START 1461 |
1715 | {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, | 1678 | {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, |
1716 | {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, | 1679 | {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, |
1717 | {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, | 1680 | {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, |
@@ -1720,8 +1683,8 @@ static const struct raw_op init_ops[] = { | |||
1720 | {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2}, | 1683 | {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2}, |
1721 | {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, | 1684 | {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, |
1722 | {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, | 1685 | {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, |
1723 | #define XCM_PORT1_END 1506 | 1686 | #define XCM_PORT1_END 1469 |
1724 | #define XCM_FUNC0_START 1506 | 1687 | #define XCM_FUNC0_START 1469 |
1725 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, | 1688 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, |
1726 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, | 1689 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, |
1727 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, | 1690 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, |
@@ -1731,8 +1694,8 @@ static const struct raw_op init_ops[] = { | |||
1731 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, | 1694 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, |
1732 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, | 1695 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, |
1733 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, | 1696 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, |
1734 | #define XCM_FUNC0_END 1515 | 1697 | #define XCM_FUNC0_END 1478 |
1735 | #define XCM_FUNC1_START 1515 | 1698 | #define XCM_FUNC1_START 1478 |
1736 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, | 1699 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, |
1737 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, | 1700 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, |
1738 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, | 1701 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, |
@@ -1742,8 +1705,8 @@ static const struct raw_op init_ops[] = { | |||
1742 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, | 1705 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, |
1743 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, | 1706 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, |
1744 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, | 1707 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, |
1745 | #define XCM_FUNC1_END 1524 | 1708 | #define XCM_FUNC1_END 1487 |
1746 | #define XCM_FUNC2_START 1524 | 1709 | #define XCM_FUNC2_START 1487 |
1747 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, | 1710 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, |
1748 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, | 1711 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, |
1749 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, | 1712 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, |
@@ -1753,8 +1716,8 @@ static const struct raw_op init_ops[] = { | |||
1753 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, | 1716 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, |
1754 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, | 1717 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, |
1755 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, | 1718 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, |
1756 | #define XCM_FUNC2_END 1533 | 1719 | #define XCM_FUNC2_END 1496 |
1757 | #define XCM_FUNC3_START 1533 | 1720 | #define XCM_FUNC3_START 1496 |
1758 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, | 1721 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, |
1759 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, | 1722 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, |
1760 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, | 1723 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, |
@@ -1764,8 +1727,8 @@ static const struct raw_op init_ops[] = { | |||
1764 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, | 1727 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, |
1765 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, | 1728 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, |
1766 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, | 1729 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, |
1767 | #define XCM_FUNC3_END 1542 | 1730 | #define XCM_FUNC3_END 1505 |
1768 | #define XCM_FUNC4_START 1542 | 1731 | #define XCM_FUNC4_START 1505 |
1769 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, | 1732 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, |
1770 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, | 1733 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, |
1771 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, | 1734 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, |
@@ -1775,8 +1738,8 @@ static const struct raw_op init_ops[] = { | |||
1775 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, | 1738 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, |
1776 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, | 1739 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, |
1777 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, | 1740 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, |
1778 | #define XCM_FUNC4_END 1551 | 1741 | #define XCM_FUNC4_END 1514 |
1779 | #define XCM_FUNC5_START 1551 | 1742 | #define XCM_FUNC5_START 1514 |
1780 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, | 1743 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, |
1781 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, | 1744 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, |
1782 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, | 1745 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, |
@@ -1786,8 +1749,8 @@ static const struct raw_op init_ops[] = { | |||
1786 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, | 1749 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, |
1787 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, | 1750 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, |
1788 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, | 1751 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, |
1789 | #define XCM_FUNC5_END 1560 | 1752 | #define XCM_FUNC5_END 1523 |
1790 | #define XCM_FUNC6_START 1560 | 1753 | #define XCM_FUNC6_START 1523 |
1791 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, | 1754 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, |
1792 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, | 1755 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, |
1793 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, | 1756 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, |
@@ -1797,8 +1760,8 @@ static const struct raw_op init_ops[] = { | |||
1797 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, | 1760 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, |
1798 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, | 1761 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, |
1799 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, | 1762 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, |
1800 | #define XCM_FUNC6_END 1569 | 1763 | #define XCM_FUNC6_END 1532 |
1801 | #define XCM_FUNC7_START 1569 | 1764 | #define XCM_FUNC7_START 1532 |
1802 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, | 1765 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, |
1803 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, | 1766 | {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, |
1804 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, | 1767 | {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, |
@@ -1808,8 +1771,8 @@ static const struct raw_op init_ops[] = { | |||
1808 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, | 1771 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, |
1809 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, | 1772 | {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, |
1810 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, | 1773 | {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, |
1811 | #define XCM_FUNC7_END 1578 | 1774 | #define XCM_FUNC7_END 1541 |
1812 | #define XSEM_COMMON_START 1578 | 1775 | #define XSEM_COMMON_START 1541 |
1813 | {OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0}, | 1776 | {OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0}, |
1814 | {OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0}, | 1777 | {OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0}, |
1815 | {OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0}, | 1778 | {OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0}, |
@@ -1876,9 +1839,9 @@ static const struct raw_op init_ops[] = { | |||
1876 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2}, | 1839 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2}, |
1877 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0}, | 1840 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0}, |
1878 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86}, | 1841 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86}, |
1879 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202ed}, | 1842 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202eb}, |
1880 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20}, | 1843 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20}, |
1881 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ef}, | 1844 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ed}, |
1882 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0}, | 1845 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0}, |
1883 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1}, | 1846 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1}, |
1884 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x20321}, | 1847 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x20321}, |
@@ -1886,29 +1849,29 @@ static const struct raw_op init_ops[] = { | |||
1886 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x40323}, | 1849 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x40323}, |
1887 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0}, | 1850 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0}, |
1888 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ac8, 0x0}, | 1851 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ac8, 0x0}, |
1889 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f3}, | 1852 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f1}, |
1890 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ab8, 0x0}, | 1853 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ab8, 0x0}, |
1891 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2}, | 1854 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2}, |
1892 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1}, | 1855 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1}, |
1893 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4}, | 1856 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4}, |
1894 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10}, | 1857 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10}, |
1895 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f5}, | 1858 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f3}, |
1896 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100327}, | 1859 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100327}, |
1897 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2}, | 1860 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2}, |
1898 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4}, | 1861 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4}, |
1899 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x83b0, 0x20337}, | 1862 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x83b0, 0x20337}, |
1900 | {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0}, | 1863 | {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0}, |
1901 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f7}, | 1864 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f5}, |
1902 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100339}, | 1865 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100339}, |
1903 | {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000}, | 1866 | {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000}, |
1904 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80307}, | 1867 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80305}, |
1905 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80349}, | 1868 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80349}, |
1906 | {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000}, | 1869 | {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000}, |
1907 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030f}, | 1870 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030d}, |
1908 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80351}, | 1871 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80351}, |
1909 | {OP_ZP_E1, XSEM_REG_INT_TABLE, 0xa90000}, | 1872 | {OP_ZP_E1, XSEM_REG_INT_TABLE, 0xa90000}, |
1910 | {OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xac0000}, | 1873 | {OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xac0000}, |
1911 | {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130317}, | 1874 | {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130315}, |
1912 | {OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x368, 0x130359}, | 1875 | {OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x368, 0x130359}, |
1913 | {OP_ZP_E1, XSEM_REG_PRAM, 0x344e0000}, | 1876 | {OP_ZP_E1, XSEM_REG_PRAM, 0x344e0000}, |
1914 | {OP_ZP_E1H, XSEM_REG_PRAM, 0x34620000}, | 1877 | {OP_ZP_E1H, XSEM_REG_PRAM, 0x34620000}, |
@@ -1918,10 +1881,10 @@ static const struct raw_op init_ops[] = { | |||
1918 | {OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3e971b22}, | 1881 | {OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3e971b22}, |
1919 | {OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x1dd02ad2}, | 1882 | {OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x1dd02ad2}, |
1920 | {OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x21542ac8}, | 1883 | {OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x21542ac8}, |
1921 | {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60319}, | 1884 | {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60317}, |
1922 | {OP_WR_64_E1H, XSEM_REG_PRAM + 0x1c8d0, 0x46e6035b}, | 1885 | {OP_WR_64_E1H, XSEM_REG_PRAM + 0x1c8d0, 0x46e6035b}, |
1923 | #define XSEM_COMMON_END 1688 | 1886 | #define XSEM_COMMON_END 1651 |
1924 | #define XSEM_PORT0_START 1688 | 1887 | #define XSEM_PORT0_START 1651 |
1925 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x10}, | 1888 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x10}, |
1926 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc}, | 1889 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc}, |
1927 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c20, 0x1c}, | 1890 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c20, 0x1c}, |
@@ -1934,7 +1897,7 @@ static const struct raw_op init_ops[] = { | |||
1934 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26e8, 0x1c}, | 1897 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26e8, 0x1c}, |
1935 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0}, | 1898 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0}, |
1936 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x27c8, 0x1c}, | 1899 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x27c8, 0x1c}, |
1937 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x10031b}, | 1900 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x100319}, |
1938 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28}, | 1901 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28}, |
1939 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0}, | 1902 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0}, |
1940 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc}, | 1903 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc}, |
@@ -1950,12 +1913,12 @@ static const struct raw_op init_ops[] = { | |||
1950 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2035d}, | 1913 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2035d}, |
1951 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1}, | 1914 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1}, |
1952 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42}, | 1915 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42}, |
1953 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x2032b}, | 1916 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x20329}, |
1954 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4}, | 1917 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4}, |
1955 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42}, | 1918 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42}, |
1956 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4}, | 1919 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4}, |
1957 | #define XSEM_PORT0_END 1720 | 1920 | #define XSEM_PORT0_END 1683 |
1958 | #define XSEM_PORT1_START 1720 | 1921 | #define XSEM_PORT1_START 1683 |
1959 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3be0, 0x10}, | 1922 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3be0, 0x10}, |
1960 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc}, | 1923 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc}, |
1961 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c90, 0x1c}, | 1924 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c90, 0x1c}, |
@@ -1968,7 +1931,7 @@ static const struct raw_op init_ops[] = { | |||
1968 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2758, 0x1c}, | 1931 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2758, 0x1c}, |
1969 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0}, | 1932 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0}, |
1970 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2838, 0x1c}, | 1933 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2838, 0x1c}, |
1971 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032d}, | 1934 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032b}, |
1972 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28}, | 1935 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28}, |
1973 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0}, | 1936 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0}, |
1974 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc}, | 1937 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc}, |
@@ -1984,65 +1947,65 @@ static const struct raw_op init_ops[] = { | |||
1984 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2035f}, | 1947 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2035f}, |
1985 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1}, | 1948 | {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1}, |
1986 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42}, | 1949 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42}, |
1987 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033d}, | 1950 | {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033b}, |
1988 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4}, | 1951 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4}, |
1989 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42}, | 1952 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42}, |
1990 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4}, | 1953 | {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4}, |
1991 | #define XSEM_PORT1_END 1752 | 1954 | #define XSEM_PORT1_END 1715 |
1992 | #define XSEM_FUNC0_START 1752 | 1955 | #define XSEM_FUNC0_START 1715 |
1993 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0}, | 1956 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0}, |
1994 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28b8, 0x100361}, | 1957 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28b8, 0x100361}, |
1995 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe}, | 1958 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe}, |
1996 | #define XSEM_FUNC0_END 1755 | 1959 | #define XSEM_FUNC0_END 1718 |
1997 | #define XSEM_FUNC1_START 1755 | 1960 | #define XSEM_FUNC1_START 1718 |
1998 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0}, | 1961 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0}, |
1999 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28f8, 0x100371}, | 1962 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28f8, 0x100371}, |
2000 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe}, | 1963 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe}, |
2001 | #define XSEM_FUNC1_END 1758 | 1964 | #define XSEM_FUNC1_END 1721 |
2002 | #define XSEM_FUNC2_START 1758 | 1965 | #define XSEM_FUNC2_START 1721 |
2003 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0}, | 1966 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0}, |
2004 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x100381}, | 1967 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x100381}, |
2005 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe}, | 1968 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe}, |
2006 | #define XSEM_FUNC2_END 1761 | 1969 | #define XSEM_FUNC2_END 1724 |
2007 | #define XSEM_FUNC3_START 1761 | 1970 | #define XSEM_FUNC3_START 1724 |
2008 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0}, | 1971 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0}, |
2009 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2978, 0x100391}, | 1972 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2978, 0x100391}, |
2010 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe}, | 1973 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe}, |
2011 | #define XSEM_FUNC3_END 1764 | 1974 | #define XSEM_FUNC3_END 1727 |
2012 | #define XSEM_FUNC4_START 1764 | 1975 | #define XSEM_FUNC4_START 1727 |
2013 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0}, | 1976 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0}, |
2014 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29b8, 0x1003a1}, | 1977 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29b8, 0x1003a1}, |
2015 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe}, | 1978 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe}, |
2016 | #define XSEM_FUNC4_END 1767 | 1979 | #define XSEM_FUNC4_END 1730 |
2017 | #define XSEM_FUNC5_START 1767 | 1980 | #define XSEM_FUNC5_START 1730 |
2018 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0}, | 1981 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0}, |
2019 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f8, 0x1003b1}, | 1982 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f8, 0x1003b1}, |
2020 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe}, | 1983 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe}, |
2021 | #define XSEM_FUNC5_END 1770 | 1984 | #define XSEM_FUNC5_END 1733 |
2022 | #define XSEM_FUNC6_START 1770 | 1985 | #define XSEM_FUNC6_START 1733 |
2023 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0}, | 1986 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0}, |
2024 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a38, 0x1003c1}, | 1987 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a38, 0x1003c1}, |
2025 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe}, | 1988 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe}, |
2026 | #define XSEM_FUNC6_END 1773 | 1989 | #define XSEM_FUNC6_END 1736 |
2027 | #define XSEM_FUNC7_START 1773 | 1990 | #define XSEM_FUNC7_START 1736 |
2028 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0}, | 1991 | {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0}, |
2029 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a78, 0x1003d1}, | 1992 | {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a78, 0x1003d1}, |
2030 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe}, | 1993 | {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe}, |
2031 | #define XSEM_FUNC7_END 1776 | 1994 | #define XSEM_FUNC7_END 1739 |
2032 | #define CDU_COMMON_START 1776 | 1995 | #define CDU_COMMON_START 1739 |
2033 | {OP_WR, CDU_REG_CDU_CONTROL0, 0x1}, | 1996 | {OP_WR, CDU_REG_CDU_CONTROL0, 0x1}, |
2034 | {OP_WR_E1H, CDU_REG_MF_MODE, 0x1}, | 1997 | {OP_WR_E1H, CDU_REG_MF_MODE, 0x1}, |
2035 | {OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000}, | 1998 | {OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000}, |
2036 | {OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d}, | 1999 | {OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d}, |
2037 | {OP_WB_E1, CDU_REG_L1TT, 0x200033f}, | 2000 | {OP_WB_E1, CDU_REG_L1TT, 0x200033d}, |
2038 | {OP_WB_E1H, CDU_REG_L1TT, 0x20003e1}, | 2001 | {OP_WB_E1H, CDU_REG_L1TT, 0x20003e1}, |
2039 | {OP_WB_E1, CDU_REG_MATT, 0x20053f}, | 2002 | {OP_WB_E1, CDU_REG_MATT, 0x20053d}, |
2040 | {OP_WB_E1H, CDU_REG_MATT, 0x2805e1}, | 2003 | {OP_WB_E1H, CDU_REG_MATT, 0x2805e1}, |
2041 | {OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2}, | 2004 | {OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2}, |
2042 | {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055f}, | 2005 | {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055d}, |
2043 | {OP_ZR, CDU_REG_MATT + 0xa0, 0x18}, | 2006 | {OP_ZR, CDU_REG_MATT + 0xa0, 0x18}, |
2044 | #define CDU_COMMON_END 1787 | 2007 | #define CDU_COMMON_END 1750 |
2045 | #define DMAE_COMMON_START 1787 | 2008 | #define DMAE_COMMON_START 1750 |
2046 | {OP_ZR, DMAE_REG_CMD_MEM, 0xe0}, | 2009 | {OP_ZR, DMAE_REG_CMD_MEM, 0xe0}, |
2047 | {OP_WR, DMAE_REG_CRC16C_INIT, 0x0}, | 2010 | {OP_WR, DMAE_REG_CRC16C_INIT, 0x0}, |
2048 | {OP_WR, DMAE_REG_CRC16T10_INIT, 0x1}, | 2011 | {OP_WR, DMAE_REG_CRC16T10_INIT, 0x1}, |
@@ -2050,24 +2013,24 @@ static const struct raw_op init_ops[] = { | |||
2050 | {OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2}, | 2013 | {OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2}, |
2051 | {OP_WR, DMAE_REG_PCI_IFEN, 0x1}, | 2014 | {OP_WR, DMAE_REG_PCI_IFEN, 0x1}, |
2052 | {OP_WR, DMAE_REG_GRC_IFEN, 0x1}, | 2015 | {OP_WR, DMAE_REG_GRC_IFEN, 0x1}, |
2053 | #define DMAE_COMMON_END 1794 | 2016 | #define DMAE_COMMON_END 1757 |
2054 | #define PXP_COMMON_START 1794 | 2017 | #define PXP_COMMON_START 1757 |
2055 | {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50565}, | 2018 | {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50563}, |
2056 | {OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50609}, | 2019 | {OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50609}, |
2057 | {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x5056a}, | 2020 | {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x50568}, |
2058 | {OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5060e}, | 2021 | {OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5060e}, |
2059 | {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056f}, | 2022 | {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056d}, |
2060 | #define PXP_COMMON_END 1799 | 2023 | #define PXP_COMMON_END 1762 |
2061 | #define CFC_COMMON_START 1799 | 2024 | #define CFC_COMMON_START 1762 |
2062 | {OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100}, | 2025 | {OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100}, |
2063 | {OP_WR, CFC_REG_CONTROL0, 0x10}, | 2026 | {OP_WR, CFC_REG_CONTROL0, 0x10}, |
2064 | {OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff}, | 2027 | {OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff}, |
2065 | {OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a}, | 2028 | {OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a}, |
2066 | #define CFC_COMMON_END 1803 | 2029 | #define CFC_COMMON_END 1766 |
2067 | #define HC_COMMON_START 1803 | 2030 | #define HC_COMMON_START 1766 |
2068 | {OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4}, | 2031 | {OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4}, |
2069 | #define HC_COMMON_END 1804 | 2032 | #define HC_COMMON_END 1767 |
2070 | #define HC_PORT0_START 1804 | 2033 | #define HC_PORT0_START 1767 |
2071 | {OP_WR_E1, HC_REG_CONFIG_0, 0x1080}, | 2034 | {OP_WR_E1, HC_REG_CONFIG_0, 0x1080}, |
2072 | {OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2}, | 2035 | {OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2}, |
2073 | {OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10}, | 2036 | {OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10}, |
@@ -2086,8 +2049,8 @@ static const struct raw_op init_ops[] = { | |||
2086 | {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, | 2049 | {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, |
2087 | {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, | 2050 | {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, |
2088 | {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, | 2051 | {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, |
2089 | #define HC_PORT0_END 1822 | 2052 | #define HC_PORT0_END 1785 |
2090 | #define HC_PORT1_START 1822 | 2053 | #define HC_PORT1_START 1785 |
2091 | {OP_WR_E1, HC_REG_CONFIG_1, 0x1080}, | 2054 | {OP_WR_E1, HC_REG_CONFIG_1, 0x1080}, |
2092 | {OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2}, | 2055 | {OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2}, |
2093 | {OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10}, | 2056 | {OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10}, |
@@ -2106,8 +2069,8 @@ static const struct raw_op init_ops[] = { | |||
2106 | {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, | 2069 | {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, |
2107 | {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, | 2070 | {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, |
2108 | {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, | 2071 | {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, |
2109 | #define HC_PORT1_END 1840 | 2072 | #define HC_PORT1_END 1803 |
2110 | #define HC_FUNC0_START 1840 | 2073 | #define HC_FUNC0_START 1803 |
2111 | {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, | 2074 | {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, |
2112 | {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0}, | 2075 | {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0}, |
2113 | {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, | 2076 | {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, |
@@ -2123,8 +2086,8 @@ static const struct raw_op init_ops[] = { | |||
2123 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, | 2086 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, |
2124 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, | 2087 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, |
2125 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, | 2088 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, |
2126 | #define HC_FUNC0_END 1855 | 2089 | #define HC_FUNC0_END 1818 |
2127 | #define HC_FUNC1_START 1855 | 2090 | #define HC_FUNC1_START 1818 |
2128 | {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, | 2091 | {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, |
2129 | {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1}, | 2092 | {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1}, |
2130 | {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, | 2093 | {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, |
@@ -2140,8 +2103,8 @@ static const struct raw_op init_ops[] = { | |||
2140 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, | 2103 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, |
2141 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, | 2104 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, |
2142 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, | 2105 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, |
2143 | #define HC_FUNC1_END 1870 | 2106 | #define HC_FUNC1_END 1833 |
2144 | #define HC_FUNC2_START 1870 | 2107 | #define HC_FUNC2_START 1833 |
2145 | {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, | 2108 | {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, |
2146 | {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2}, | 2109 | {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2}, |
2147 | {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, | 2110 | {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, |
@@ -2157,8 +2120,8 @@ static const struct raw_op init_ops[] = { | |||
2157 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, | 2120 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, |
2158 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, | 2121 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, |
2159 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, | 2122 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, |
2160 | #define HC_FUNC2_END 1885 | 2123 | #define HC_FUNC2_END 1848 |
2161 | #define HC_FUNC3_START 1885 | 2124 | #define HC_FUNC3_START 1848 |
2162 | {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, | 2125 | {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, |
2163 | {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3}, | 2126 | {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3}, |
2164 | {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, | 2127 | {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, |
@@ -2174,8 +2137,8 @@ static const struct raw_op init_ops[] = { | |||
2174 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, | 2137 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, |
2175 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, | 2138 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, |
2176 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, | 2139 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, |
2177 | #define HC_FUNC3_END 1900 | 2140 | #define HC_FUNC3_END 1863 |
2178 | #define HC_FUNC4_START 1900 | 2141 | #define HC_FUNC4_START 1863 |
2179 | {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, | 2142 | {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, |
2180 | {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4}, | 2143 | {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4}, |
2181 | {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, | 2144 | {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, |
@@ -2191,8 +2154,8 @@ static const struct raw_op init_ops[] = { | |||
2191 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, | 2154 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, |
2192 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, | 2155 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, |
2193 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, | 2156 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, |
2194 | #define HC_FUNC4_END 1915 | 2157 | #define HC_FUNC4_END 1878 |
2195 | #define HC_FUNC5_START 1915 | 2158 | #define HC_FUNC5_START 1878 |
2196 | {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, | 2159 | {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, |
2197 | {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5}, | 2160 | {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5}, |
2198 | {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, | 2161 | {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, |
@@ -2208,8 +2171,8 @@ static const struct raw_op init_ops[] = { | |||
2208 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, | 2171 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, |
2209 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, | 2172 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, |
2210 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, | 2173 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, |
2211 | #define HC_FUNC5_END 1930 | 2174 | #define HC_FUNC5_END 1893 |
2212 | #define HC_FUNC6_START 1930 | 2175 | #define HC_FUNC6_START 1893 |
2213 | {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, | 2176 | {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, |
2214 | {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6}, | 2177 | {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6}, |
2215 | {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, | 2178 | {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, |
@@ -2225,8 +2188,8 @@ static const struct raw_op init_ops[] = { | |||
2225 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, | 2188 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, |
2226 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, | 2189 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, |
2227 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, | 2190 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, |
2228 | #define HC_FUNC6_END 1945 | 2191 | #define HC_FUNC6_END 1908 |
2229 | #define HC_FUNC7_START 1945 | 2192 | #define HC_FUNC7_START 1908 |
2230 | {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, | 2193 | {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, |
2231 | {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7}, | 2194 | {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7}, |
2232 | {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, | 2195 | {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, |
@@ -2242,8 +2205,8 @@ static const struct raw_op init_ops[] = { | |||
2242 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, | 2205 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, |
2243 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, | 2206 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, |
2244 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, | 2207 | {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, |
2245 | #define HC_FUNC7_END 1960 | 2208 | #define HC_FUNC7_END 1923 |
2246 | #define PXP2_COMMON_START 1960 | 2209 | #define PXP2_COMMON_START 1923 |
2247 | {OP_WR_E1, PXP2_REG_PGL_CONTROL0, 0xe38340}, | 2210 | {OP_WR_E1, PXP2_REG_PGL_CONTROL0, 0xe38340}, |
2248 | {OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1}, | 2211 | {OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1}, |
2249 | {OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10}, | 2212 | {OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10}, |
@@ -2361,8 +2324,8 @@ static const struct raw_op init_ops[] = { | |||
2361 | {OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1}, | 2324 | {OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1}, |
2362 | {OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1}, | 2325 | {OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1}, |
2363 | {OP_WR_E1H, PXP2_REG_PGL_CONTROL0, 0xe38340}, | 2326 | {OP_WR_E1H, PXP2_REG_PGL_CONTROL0, 0xe38340}, |
2364 | #define PXP2_COMMON_END 2077 | 2327 | #define PXP2_COMMON_END 2040 |
2365 | #define MISC_AEU_COMMON_START 2077 | 2328 | #define MISC_AEU_COMMON_START 2040 |
2366 | {OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16}, | 2329 | {OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16}, |
2367 | {OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000}, | 2330 | {OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000}, |
2368 | {OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555}, | 2331 | {OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555}, |
@@ -2382,8 +2345,8 @@ static const struct raw_op init_ops[] = { | |||
2382 | {OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0}, | 2345 | {OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0}, |
2383 | {OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00}, | 2346 | {OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00}, |
2384 | {OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3}, | 2347 | {OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3}, |
2385 | #define MISC_AEU_COMMON_END 2096 | 2348 | #define MISC_AEU_COMMON_END 2059 |
2386 | #define MISC_AEU_PORT0_START 2096 | 2349 | #define MISC_AEU_PORT0_START 2059 |
2387 | {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000}, | 2350 | {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000}, |
2388 | {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000}, | 2351 | {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000}, |
2389 | {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef}, | 2352 | {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef}, |
@@ -2416,8 +2379,8 @@ static const struct raw_op init_ops[] = { | |||
2416 | {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0}, | 2379 | {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0}, |
2417 | {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3}, | 2380 | {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3}, |
2418 | {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7}, | 2381 | {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7}, |
2419 | #define MISC_AEU_PORT0_END 2128 | 2382 | #define MISC_AEU_PORT0_END 2091 |
2420 | #define MISC_AEU_PORT1_START 2128 | 2383 | #define MISC_AEU_PORT1_START 2091 |
2421 | {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000}, | 2384 | {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000}, |
2422 | {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000}, | 2385 | {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000}, |
2423 | {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef}, | 2386 | {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef}, |
@@ -2450,7 +2413,7 @@ static const struct raw_op init_ops[] = { | |||
2450 | {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0}, | 2413 | {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0}, |
2451 | {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3}, | 2414 | {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3}, |
2452 | {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7}, | 2415 | {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7}, |
2453 | #define MISC_AEU_PORT1_END 2160 | 2416 | #define MISC_AEU_PORT1_END 2123 |
2454 | 2417 | ||
2455 | }; | 2418 | }; |
2456 | 2419 | ||
@@ -2560,103 +2523,92 @@ static const u32 init_data_e1[] = { | |||
2560 | 0x00049c00, 0x00051f80, 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80, | 2523 | 0x00049c00, 0x00051f80, 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80, |
2561 | 0x0007b100, 0x00083480, 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280, | 2524 | 0x0007b100, 0x00083480, 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280, |
2562 | 0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780, | 2525 | 0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780, |
2563 | 0x000ddb00, 0x00001900, 0x00000028, 0x00000000, 0x00100000, 0x00000000, | 2526 | 0x000ddb00, 0x00001900, 0x00100000, 0x00000000, 0x00000000, 0xffffffff, |
2564 | 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | ||
2565 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2527 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2566 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2528 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2567 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2529 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2568 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2530 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2569 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, | ||
2570 | 0x00000000, 0x00001500, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, | ||
2571 | 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x40000000, 0x40000000, | ||
2572 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2531 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2532 | 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000, 0x00001500, | ||
2533 | 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, | ||
2534 | 0xffffffff, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | ||
2573 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2535 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2574 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2536 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2575 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2537 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2576 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2538 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2577 | 0x00000000, 0x00007ff8, 0x00000000, 0x00003500, 0x00001000, 0x00002080, | 2539 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, |
2578 | 0x00003100, 0x00004180, 0x00005200, 0x00006280, 0x00007300, 0x00008380, | 2540 | 0x00000000, 0x00003500, 0x00001000, 0x00002080, 0x00003100, 0x00004180, |
2579 | 0x00009400, 0x0000a480, 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, | 2541 | 0x00005200, 0x00006280, 0x00007300, 0x00008380, 0x00009400, 0x0000a480, |
2580 | 0x0000f700, 0x00010780, 0x00011800, 0x00012880, 0x00013900, 0x00014980, | 2542 | 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, 0x0000f700, 0x00010780, |
2581 | 0x00015a00, 0x00016a80, 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, | 2543 | 0x00011800, 0x00012880, 0x00013900, 0x00014980, 0x00015a00, 0x00016a80, |
2582 | 0x0001bd00, 0x0001cd80, 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, | 2544 | 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, 0x0001bd00, 0x0001cd80, |
2583 | 0x00010001, 0x00000604, 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, | 2545 | 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, 0x00010001, 0x00000604, |
2584 | 0xcccccccc, 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, | 2546 | 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, 0xcccccccc, 0x00000000, |
2547 | 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | ||
2585 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2548 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2586 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2549 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2587 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2550 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2588 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, | 2551 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, |
2589 | 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, | 2552 | 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000, |
2590 | 0x00007ff8, 0x00000000, 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, | 2553 | 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, |
2591 | 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, | 2554 | 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, |
2555 | 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000, | ||
2592 | 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, | 2556 | 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, |
2593 | 0x00000000, 0x00100000, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, | ||
2594 | 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, | 2557 | 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, |
2595 | 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, | 2558 | 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000, |
2596 | 0x00000000, 0x00100000, 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, | 2559 | 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, |
2597 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, | 2560 | 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x30efffff, 0x0c30c30c, |
2598 | 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, | ||
2599 | 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, | ||
2600 | 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, | ||
2601 | 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, | ||
2602 | 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, | ||
2603 | 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, | ||
2604 | 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c, | ||
2605 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, | ||
2606 | 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, | ||
2607 | 0xcdcdcdcd, 0xfffffff3, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, | ||
2608 | 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, | ||
2609 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, | 2561 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, |
2610 | 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, | 2562 | 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, |
2611 | 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, | 2563 | 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, |
2612 | 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, | 2564 | 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, |
2613 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, | 2565 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, |
2614 | 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, | 2566 | 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, |
2615 | 0xcdcdcdcd, 0xfffffff7, 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, | 2567 | 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, |
2616 | 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, | 2568 | 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x302fffff, 0x0c30c30c, |
2617 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, | 2569 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, |
2618 | 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, | 2570 | 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, |
2619 | 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, | 2571 | 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, |
2620 | 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, | 2572 | 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, |
2621 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, | 2573 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, |
2622 | 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, | 2574 | 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, |
2623 | 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, | 2575 | 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, |
2624 | 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, | 2576 | 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, |
2625 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, | 2577 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7, |
2626 | 0x056fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, | 2578 | 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c, |
2627 | 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, | 2579 | 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, |
2628 | 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, | 2580 | 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x31efffff, 0x0c30c30c, |
2629 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, | 2581 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, |
2630 | 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, | 2582 | 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, |
2631 | 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, | 2583 | 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, |
2632 | 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, | 2584 | 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, |
2633 | 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, | 2585 | 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, |
2634 | 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, | 2586 | 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, |
2635 | 0xcdcdcdcd, 0xffffff8a, 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, | 2587 | 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, |
2636 | 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, | 2588 | 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x056fffff, 0x0c30c30c, |
2637 | 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, | 2589 | 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, |
2638 | 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, | 2590 | 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, |
2639 | 0xcdcdcdcd, 0xfffffff3, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, | 2591 | 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, |
2640 | 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, | 2592 | 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, |
2641 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, | 2593 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, |
2642 | 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, | 2594 | 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, |
2643 | 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, | 2595 | 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, |
2644 | 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, | 2596 | 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, |
2645 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, | 2597 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffff8a, |
2646 | 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, | 2598 | 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0010cf3c, |
2647 | 0xcdcdcdcd, 0xffffff97, 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, | 2599 | 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, |
2648 | 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, | 2600 | 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c, |
2649 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, | 2601 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, |
2650 | 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, | 2602 | 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, |
2651 | 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, | 2603 | 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, |
2652 | 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, | 2604 | 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, |
2653 | 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff, | 2605 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, |
2654 | 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c, | 2606 | 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, |
2655 | 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, | 2607 | 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, |
2656 | 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, | 2608 | 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, |
2657 | 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff, | 2609 | 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, |
2658 | 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c, | 2610 | 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, |
2659 | 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, | 2611 | 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, |
2660 | 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, | 2612 | 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, |
2661 | 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff, | 2613 | 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff, |
2662 | 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c, | 2614 | 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c, |
@@ -2678,16 +2630,27 @@ static const u32 init_data_e1[] = { | |||
2678 | 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, | 2630 | 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, |
2679 | 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, | 2631 | 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, |
2680 | 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, | 2632 | 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, |
2681 | 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, | 2633 | 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, |
2682 | 0x00070100, 0x00028170, 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, | 2634 | 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, |
2683 | 0x00010370, 0x00080000, 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, | 2635 | 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, |
2684 | 0x00010200, 0x00070210, 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, | 2636 | 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, |
2685 | 0x000b8198, 0x00020250, 0x00010270, 0x000b8280, 0x00080338, 0x00100000, | 2637 | 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff, |
2686 | 0x00080100, 0x00028180, 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, | 2638 | 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c, |
2687 | 0x00080380, 0x00028000, 0x000b8028, 0x000200e0, 0x00010100, 0x00008110, | 2639 | 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, |
2688 | 0x00000118, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, | 2640 | 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, |
2689 | 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, | 2641 | 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff, |
2690 | 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000 | 2642 | 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c, |
2643 | 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, | ||
2644 | 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, 0x00070100, 0x00028170, | ||
2645 | 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, 0x00010370, 0x00080000, | ||
2646 | 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, 0x00010200, 0x00070210, | ||
2647 | 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, 0x000b8198, 0x00020250, | ||
2648 | 0x00010270, 0x000b8280, 0x00080338, 0x00100000, 0x00080100, 0x00028180, | ||
2649 | 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, 0x00080380, 0x00028000, | ||
2650 | 0x000b8028, 0x000200e0, 0x00010100, 0x00008110, 0x00000118, 0xcccccccc, | ||
2651 | 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc, | ||
2652 | 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc, 0xcccccccc, | ||
2653 | 0xcccccccc, 0x00002000 | ||
2691 | }; | 2654 | }; |
2692 | 2655 | ||
2693 | static const u32 init_data_e1h[] = { | 2656 | static const u32 init_data_e1h[] = { |
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c index ff2743db10d9..4ce7fe9c5251 100644 --- a/drivers/net/bnx2x_link.c +++ b/drivers/net/bnx2x_link.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/ethtool.h> | 22 | #include <linux/ethtool.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/version.h> | ||
25 | 24 | ||
26 | #include "bnx2x_reg.h" | 25 | #include "bnx2x_reg.h" |
27 | #include "bnx2x_fw_defs.h" | 26 | #include "bnx2x_fw_defs.h" |
@@ -31,17 +30,16 @@ | |||
31 | 30 | ||
32 | /********************************************************/ | 31 | /********************************************************/ |
33 | #define SUPPORT_CL73 0 /* Currently no */ | 32 | #define SUPPORT_CL73 0 /* Currently no */ |
34 | #define ETH_HLEN 14 | 33 | #define ETH_HLEN 14 |
35 | #define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/ | 34 | #define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/ |
36 | #define ETH_MIN_PACKET_SIZE 60 | 35 | #define ETH_MIN_PACKET_SIZE 60 |
37 | #define ETH_MAX_PACKET_SIZE 1500 | 36 | #define ETH_MAX_PACKET_SIZE 1500 |
38 | #define ETH_MAX_JUMBO_PACKET_SIZE 9600 | 37 | #define ETH_MAX_JUMBO_PACKET_SIZE 9600 |
39 | #define MDIO_ACCESS_TIMEOUT 1000 | 38 | #define MDIO_ACCESS_TIMEOUT 1000 |
40 | #define BMAC_CONTROL_RX_ENABLE 2 | 39 | #define BMAC_CONTROL_RX_ENABLE 2 |
41 | #define MAX_MTU_SIZE 5000 | ||
42 | 40 | ||
43 | /***********************************************************/ | 41 | /***********************************************************/ |
44 | /* Shortcut definitions */ | 42 | /* Shortcut definitions */ |
45 | /***********************************************************/ | 43 | /***********************************************************/ |
46 | 44 | ||
47 | #define NIG_STATUS_XGXS0_LINK10G \ | 45 | #define NIG_STATUS_XGXS0_LINK10G \ |
@@ -80,12 +78,12 @@ | |||
80 | 78 | ||
81 | #define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 | 79 | #define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 |
82 | #define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 | 80 | #define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 |
83 | #define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM | 81 | #define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM |
84 | #define AUTONEG_PARALLEL \ | 82 | #define AUTONEG_PARALLEL \ |
85 | SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION | 83 | SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION |
86 | #define AUTONEG_SGMII_FIBER_AUTODET \ | 84 | #define AUTONEG_SGMII_FIBER_AUTODET \ |
87 | SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT | 85 | SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT |
88 | #define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY | 86 | #define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY |
89 | 87 | ||
90 | #define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \ | 88 | #define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \ |
91 | MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE | 89 | MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE |
@@ -202,11 +200,10 @@ static void bnx2x_emac_init(struct link_params *params, | |||
202 | /* init emac - use read-modify-write */ | 200 | /* init emac - use read-modify-write */ |
203 | /* self clear reset */ | 201 | /* self clear reset */ |
204 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); | 202 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); |
205 | EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); | 203 | EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); |
206 | 204 | ||
207 | timeout = 200; | 205 | timeout = 200; |
208 | do | 206 | do { |
209 | { | ||
210 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); | 207 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); |
211 | DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val); | 208 | DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val); |
212 | if (!timeout) { | 209 | if (!timeout) { |
@@ -214,18 +211,18 @@ static void bnx2x_emac_init(struct link_params *params, | |||
214 | return; | 211 | return; |
215 | } | 212 | } |
216 | timeout--; | 213 | timeout--; |
217 | }while (val & EMAC_MODE_RESET); | 214 | } while (val & EMAC_MODE_RESET); |
218 | 215 | ||
219 | /* Set mac address */ | 216 | /* Set mac address */ |
220 | val = ((params->mac_addr[0] << 8) | | 217 | val = ((params->mac_addr[0] << 8) | |
221 | params->mac_addr[1]); | 218 | params->mac_addr[1]); |
222 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val); | 219 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val); |
223 | 220 | ||
224 | val = ((params->mac_addr[2] << 24) | | 221 | val = ((params->mac_addr[2] << 24) | |
225 | (params->mac_addr[3] << 16) | | 222 | (params->mac_addr[3] << 16) | |
226 | (params->mac_addr[4] << 8) | | 223 | (params->mac_addr[4] << 8) | |
227 | params->mac_addr[5]); | 224 | params->mac_addr[5]); |
228 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val); | 225 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val); |
229 | } | 226 | } |
230 | 227 | ||
231 | static u8 bnx2x_emac_enable(struct link_params *params, | 228 | static u8 bnx2x_emac_enable(struct link_params *params, |
@@ -286,7 +283,7 @@ static u8 bnx2x_emac_enable(struct link_params *params, | |||
286 | if (CHIP_REV_IS_SLOW(bp)) { | 283 | if (CHIP_REV_IS_SLOW(bp)) { |
287 | /* config GMII mode */ | 284 | /* config GMII mode */ |
288 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); | 285 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); |
289 | EMAC_WR(EMAC_REG_EMAC_MODE, | 286 | EMAC_WR(bp, EMAC_REG_EMAC_MODE, |
290 | (val | EMAC_MODE_PORT_GMII)); | 287 | (val | EMAC_MODE_PORT_GMII)); |
291 | } else { /* ASIC */ | 288 | } else { /* ASIC */ |
292 | /* pause enable/disable */ | 289 | /* pause enable/disable */ |
@@ -298,17 +295,19 @@ static u8 bnx2x_emac_enable(struct link_params *params, | |||
298 | EMAC_RX_MODE_FLOW_EN); | 295 | EMAC_RX_MODE_FLOW_EN); |
299 | 296 | ||
300 | bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, | 297 | bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, |
301 | EMAC_TX_MODE_EXT_PAUSE_EN); | 298 | (EMAC_TX_MODE_EXT_PAUSE_EN | |
299 | EMAC_TX_MODE_FLOW_EN)); | ||
302 | if (vars->flow_ctrl & FLOW_CTRL_TX) | 300 | if (vars->flow_ctrl & FLOW_CTRL_TX) |
303 | bnx2x_bits_en(bp, emac_base + | 301 | bnx2x_bits_en(bp, emac_base + |
304 | EMAC_REG_EMAC_TX_MODE, | 302 | EMAC_REG_EMAC_TX_MODE, |
305 | EMAC_TX_MODE_EXT_PAUSE_EN); | 303 | (EMAC_TX_MODE_EXT_PAUSE_EN | |
304 | EMAC_TX_MODE_FLOW_EN)); | ||
306 | } | 305 | } |
307 | 306 | ||
308 | /* KEEP_VLAN_TAG, promiscuous */ | 307 | /* KEEP_VLAN_TAG, promiscuous */ |
309 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); | 308 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); |
310 | val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; | 309 | val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; |
311 | EMAC_WR(EMAC_REG_EMAC_RX_MODE, val); | 310 | EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val); |
312 | 311 | ||
313 | /* Set Loopback */ | 312 | /* Set Loopback */ |
314 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); | 313 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); |
@@ -316,10 +315,10 @@ static u8 bnx2x_emac_enable(struct link_params *params, | |||
316 | val |= 0x810; | 315 | val |= 0x810; |
317 | else | 316 | else |
318 | val &= ~0x810; | 317 | val &= ~0x810; |
319 | EMAC_WR(EMAC_REG_EMAC_MODE, val); | 318 | EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); |
320 | 319 | ||
321 | /* enable emac for jumbo packets */ | 320 | /* enable emac for jumbo packets */ |
322 | EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE, | 321 | EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, |
323 | (EMAC_RX_MTU_SIZE_JUMBO_ENA | | 322 | (EMAC_RX_MTU_SIZE_JUMBO_ENA | |
324 | (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); | 323 | (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); |
325 | 324 | ||
@@ -591,9 +590,9 @@ void bnx2x_link_status_update(struct link_params *params, | |||
591 | vars->flow_ctrl &= ~FLOW_CTRL_RX; | 590 | vars->flow_ctrl &= ~FLOW_CTRL_RX; |
592 | 591 | ||
593 | if (vars->phy_flags & PHY_XGXS_FLAG) { | 592 | if (vars->phy_flags & PHY_XGXS_FLAG) { |
594 | if (params->req_line_speed && | 593 | if (vars->line_speed && |
595 | ((params->req_line_speed == SPEED_10) || | 594 | ((vars->line_speed == SPEED_10) || |
596 | (params->req_line_speed == SPEED_100))) { | 595 | (vars->line_speed == SPEED_100))) { |
597 | vars->phy_flags |= PHY_SGMII_FLAG; | 596 | vars->phy_flags |= PHY_SGMII_FLAG; |
598 | } else { | 597 | } else { |
599 | vars->phy_flags &= ~PHY_SGMII_FLAG; | 598 | vars->phy_flags &= ~PHY_SGMII_FLAG; |
@@ -645,7 +644,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) | |||
645 | u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : | 644 | u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : |
646 | NIG_REG_INGRESS_BMAC0_MEM; | 645 | NIG_REG_INGRESS_BMAC0_MEM; |
647 | u32 wb_data[2]; | 646 | u32 wb_data[2]; |
648 | u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); | 647 | u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); |
649 | 648 | ||
650 | /* Only if the bmac is out of reset */ | 649 | /* Only if the bmac is out of reset */ |
651 | if (REG_RD(bp, MISC_REG_RESET_REG_2) & | 650 | if (REG_RD(bp, MISC_REG_RESET_REG_2) & |
@@ -670,7 +669,6 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, | |||
670 | u8 port = params->port; | 669 | u8 port = params->port; |
671 | u32 init_crd, crd; | 670 | u32 init_crd, crd; |
672 | u32 count = 1000; | 671 | u32 count = 1000; |
673 | u32 pause = 0; | ||
674 | 672 | ||
675 | /* disable port */ | 673 | /* disable port */ |
676 | REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); | 674 | REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); |
@@ -693,33 +691,25 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, | |||
693 | return -EINVAL; | 691 | return -EINVAL; |
694 | } | 692 | } |
695 | 693 | ||
696 | if (flow_ctrl & FLOW_CTRL_RX) | 694 | if (flow_ctrl & FLOW_CTRL_RX || |
697 | pause = 1; | 695 | line_speed == SPEED_10 || |
698 | REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause); | 696 | line_speed == SPEED_100 || |
699 | if (pause) { | 697 | line_speed == SPEED_1000 || |
698 | line_speed == SPEED_2500) { | ||
699 | REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); | ||
700 | /* update threshold */ | 700 | /* update threshold */ |
701 | REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); | 701 | REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); |
702 | /* update init credit */ | 702 | /* update init credit */ |
703 | init_crd = 778; /* (800-18-4) */ | 703 | init_crd = 778; /* (800-18-4) */ |
704 | 704 | ||
705 | } else { | 705 | } else { |
706 | u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + | 706 | u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + |
707 | ETH_OVREHEAD)/16; | 707 | ETH_OVREHEAD)/16; |
708 | 708 | REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); | |
709 | /* update threshold */ | 709 | /* update threshold */ |
710 | REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); | 710 | REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); |
711 | /* update init credit */ | 711 | /* update init credit */ |
712 | switch (line_speed) { | 712 | switch (line_speed) { |
713 | case SPEED_10: | ||
714 | case SPEED_100: | ||
715 | case SPEED_1000: | ||
716 | init_crd = thresh + 55 - 22; | ||
717 | break; | ||
718 | |||
719 | case SPEED_2500: | ||
720 | init_crd = thresh + 138 - 22; | ||
721 | break; | ||
722 | |||
723 | case SPEED_10000: | 713 | case SPEED_10000: |
724 | init_crd = thresh + 553 - 22; | 714 | init_crd = thresh + 553 - 22; |
725 | break; | 715 | break; |
@@ -764,10 +754,10 @@ static u32 bnx2x_get_emac_base(u32 ext_phy_type, u8 port) | |||
764 | emac_base = GRCBASE_EMAC0; | 754 | emac_base = GRCBASE_EMAC0; |
765 | break; | 755 | break; |
766 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: | 756 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: |
767 | emac_base = (port) ? GRCBASE_EMAC0: GRCBASE_EMAC1; | 757 | emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1; |
768 | break; | 758 | break; |
769 | default: | 759 | default: |
770 | emac_base = (port) ? GRCBASE_EMAC1: GRCBASE_EMAC0; | 760 | emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
771 | break; | 761 | break; |
772 | } | 762 | } |
773 | return emac_base; | 763 | return emac_base; |
@@ -1044,7 +1034,7 @@ static void bnx2x_set_swap_lanes(struct link_params *params) | |||
1044 | } | 1034 | } |
1045 | 1035 | ||
1046 | static void bnx2x_set_parallel_detection(struct link_params *params, | 1036 | static void bnx2x_set_parallel_detection(struct link_params *params, |
1047 | u8 phy_flags) | 1037 | u8 phy_flags) |
1048 | { | 1038 | { |
1049 | struct bnx2x *bp = params->bp; | 1039 | struct bnx2x *bp = params->bp; |
1050 | u16 control2; | 1040 | u16 control2; |
@@ -1114,7 +1104,7 @@ static void bnx2x_set_autoneg(struct link_params *params, | |||
1114 | MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); | 1104 | MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); |
1115 | 1105 | ||
1116 | /* CL37 Autoneg Enabled */ | 1106 | /* CL37 Autoneg Enabled */ |
1117 | if (params->req_line_speed == SPEED_AUTO_NEG) | 1107 | if (vars->line_speed == SPEED_AUTO_NEG) |
1118 | reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN; | 1108 | reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN; |
1119 | else /* CL37 Autoneg Disabled */ | 1109 | else /* CL37 Autoneg Disabled */ |
1120 | reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | | 1110 | reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | |
@@ -1132,7 +1122,7 @@ static void bnx2x_set_autoneg(struct link_params *params, | |||
1132 | MDIO_REG_BANK_SERDES_DIGITAL, | 1122 | MDIO_REG_BANK_SERDES_DIGITAL, |
1133 | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); | 1123 | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); |
1134 | reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN; | 1124 | reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN; |
1135 | if (params->req_line_speed == SPEED_AUTO_NEG) | 1125 | if (vars->line_speed == SPEED_AUTO_NEG) |
1136 | reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; | 1126 | reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; |
1137 | else | 1127 | else |
1138 | reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; | 1128 | reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; |
@@ -1148,7 +1138,7 @@ static void bnx2x_set_autoneg(struct link_params *params, | |||
1148 | MDIO_REG_BANK_BAM_NEXT_PAGE, | 1138 | MDIO_REG_BANK_BAM_NEXT_PAGE, |
1149 | MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, | 1139 | MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, |
1150 | ®_val); | 1140 | ®_val); |
1151 | if (params->req_line_speed == SPEED_AUTO_NEG) { | 1141 | if (vars->line_speed == SPEED_AUTO_NEG) { |
1152 | /* Enable BAM aneg Mode and TetonII aneg Mode */ | 1142 | /* Enable BAM aneg Mode and TetonII aneg Mode */ |
1153 | reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | | 1143 | reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | |
1154 | MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); | 1144 | MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); |
@@ -1164,7 +1154,7 @@ static void bnx2x_set_autoneg(struct link_params *params, | |||
1164 | reg_val); | 1154 | reg_val); |
1165 | 1155 | ||
1166 | /* Enable Clause 73 Aneg */ | 1156 | /* Enable Clause 73 Aneg */ |
1167 | if ((params->req_line_speed == SPEED_AUTO_NEG) && | 1157 | if ((vars->line_speed == SPEED_AUTO_NEG) && |
1168 | (SUPPORT_CL73)) { | 1158 | (SUPPORT_CL73)) { |
1169 | /* Enable BAM Station Manager */ | 1159 | /* Enable BAM Station Manager */ |
1170 | 1160 | ||
@@ -1226,7 +1216,8 @@ static void bnx2x_set_autoneg(struct link_params *params, | |||
1226 | } | 1216 | } |
1227 | 1217 | ||
1228 | /* program SerDes, forced speed */ | 1218 | /* program SerDes, forced speed */ |
1229 | static void bnx2x_program_serdes(struct link_params *params) | 1219 | static void bnx2x_program_serdes(struct link_params *params, |
1220 | struct link_vars *vars) | ||
1230 | { | 1221 | { |
1231 | struct bnx2x *bp = params->bp; | 1222 | struct bnx2x *bp = params->bp; |
1232 | u16 reg_val; | 1223 | u16 reg_val; |
@@ -1248,28 +1239,35 @@ static void bnx2x_program_serdes(struct link_params *params) | |||
1248 | 1239 | ||
1249 | /* program speed | 1240 | /* program speed |
1250 | - needed only if the speed is greater than 1G (2.5G or 10G) */ | 1241 | - needed only if the speed is greater than 1G (2.5G or 10G) */ |
1251 | if (!((params->req_line_speed == SPEED_1000) || | 1242 | CL45_RD_OVER_CL22(bp, params->port, |
1252 | (params->req_line_speed == SPEED_100) || | ||
1253 | (params->req_line_speed == SPEED_10))) { | ||
1254 | CL45_RD_OVER_CL22(bp, params->port, | ||
1255 | params->phy_addr, | 1243 | params->phy_addr, |
1256 | MDIO_REG_BANK_SERDES_DIGITAL, | 1244 | MDIO_REG_BANK_SERDES_DIGITAL, |
1257 | MDIO_SERDES_DIGITAL_MISC1, ®_val); | 1245 | MDIO_SERDES_DIGITAL_MISC1, ®_val); |
1258 | /* clearing the speed value before setting the right speed */ | 1246 | /* clearing the speed value before setting the right speed */ |
1259 | reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK; | 1247 | DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); |
1248 | |||
1249 | reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | | ||
1250 | MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); | ||
1251 | |||
1252 | if (!((vars->line_speed == SPEED_1000) || | ||
1253 | (vars->line_speed == SPEED_100) || | ||
1254 | (vars->line_speed == SPEED_10))) { | ||
1255 | |||
1260 | reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M | | 1256 | reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M | |
1261 | MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); | 1257 | MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); |
1262 | if (params->req_line_speed == SPEED_10000) | 1258 | if (vars->line_speed == SPEED_10000) |
1263 | reg_val |= | 1259 | reg_val |= |
1264 | MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; | 1260 | MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; |
1265 | if (params->req_line_speed == SPEED_13000) | 1261 | if (vars->line_speed == SPEED_13000) |
1266 | reg_val |= | 1262 | reg_val |= |
1267 | MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; | 1263 | MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; |
1268 | CL45_WR_OVER_CL22(bp, params->port, | 1264 | } |
1265 | |||
1266 | CL45_WR_OVER_CL22(bp, params->port, | ||
1269 | params->phy_addr, | 1267 | params->phy_addr, |
1270 | MDIO_REG_BANK_SERDES_DIGITAL, | 1268 | MDIO_REG_BANK_SERDES_DIGITAL, |
1271 | MDIO_SERDES_DIGITAL_MISC1, reg_val); | 1269 | MDIO_SERDES_DIGITAL_MISC1, reg_val); |
1272 | } | 1270 | |
1273 | } | 1271 | } |
1274 | 1272 | ||
1275 | static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params) | 1273 | static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params) |
@@ -1295,48 +1293,49 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params) | |||
1295 | MDIO_OVER_1G_UP3, 0); | 1293 | MDIO_OVER_1G_UP3, 0); |
1296 | } | 1294 | } |
1297 | 1295 | ||
1298 | static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, | 1296 | static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u32 *ieee_fc) |
1299 | u32 *ieee_fc) | ||
1300 | { | 1297 | { |
1301 | struct bnx2x *bp = params->bp; | 1298 | *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; |
1302 | /* for AN, we are always publishing full duplex */ | ||
1303 | u16 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; | ||
1304 | |||
1305 | /* resolve pause mode and advertisement | 1299 | /* resolve pause mode and advertisement |
1306 | * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ | 1300 | * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ |
1307 | 1301 | ||
1308 | switch (params->req_flow_ctrl) { | 1302 | switch (params->req_flow_ctrl) { |
1309 | case FLOW_CTRL_AUTO: | 1303 | case FLOW_CTRL_AUTO: |
1310 | if (params->mtu <= MAX_MTU_SIZE) { | 1304 | if (params->req_fc_auto_adv == FLOW_CTRL_BOTH) { |
1311 | an_adv |= | 1305 | *ieee_fc |= |
1312 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; | 1306 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; |
1313 | } else { | 1307 | } else { |
1314 | an_adv |= | 1308 | *ieee_fc |= |
1315 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; | 1309 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; |
1316 | } | 1310 | } |
1317 | break; | 1311 | break; |
1318 | case FLOW_CTRL_TX: | 1312 | case FLOW_CTRL_TX: |
1319 | an_adv |= | 1313 | *ieee_fc |= |
1320 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; | 1314 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; |
1321 | break; | 1315 | break; |
1322 | 1316 | ||
1323 | case FLOW_CTRL_RX: | 1317 | case FLOW_CTRL_RX: |
1324 | case FLOW_CTRL_BOTH: | 1318 | case FLOW_CTRL_BOTH: |
1325 | an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; | 1319 | *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; |
1326 | break; | 1320 | break; |
1327 | 1321 | ||
1328 | case FLOW_CTRL_NONE: | 1322 | case FLOW_CTRL_NONE: |
1329 | default: | 1323 | default: |
1330 | an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; | 1324 | *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; |
1331 | break; | 1325 | break; |
1332 | } | 1326 | } |
1327 | } | ||
1333 | 1328 | ||
1334 | *ieee_fc = an_adv; | 1329 | static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, |
1330 | u32 ieee_fc) | ||
1331 | { | ||
1332 | struct bnx2x *bp = params->bp; | ||
1333 | /* for AN, we are always publishing full duplex */ | ||
1335 | 1334 | ||
1336 | CL45_WR_OVER_CL22(bp, params->port, | 1335 | CL45_WR_OVER_CL22(bp, params->port, |
1337 | params->phy_addr, | 1336 | params->phy_addr, |
1338 | MDIO_REG_BANK_COMBO_IEEE0, | 1337 | MDIO_REG_BANK_COMBO_IEEE0, |
1339 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv); | 1338 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV, (u16)ieee_fc); |
1340 | } | 1339 | } |
1341 | 1340 | ||
1342 | static void bnx2x_restart_autoneg(struct link_params *params) | 1341 | static void bnx2x_restart_autoneg(struct link_params *params) |
@@ -1382,7 +1381,8 @@ static void bnx2x_restart_autoneg(struct link_params *params) | |||
1382 | } | 1381 | } |
1383 | } | 1382 | } |
1384 | 1383 | ||
1385 | static void bnx2x_initialize_sgmii_process(struct link_params *params) | 1384 | static void bnx2x_initialize_sgmii_process(struct link_params *params, |
1385 | struct link_vars *vars) | ||
1386 | { | 1386 | { |
1387 | struct bnx2x *bp = params->bp; | 1387 | struct bnx2x *bp = params->bp; |
1388 | u16 control1; | 1388 | u16 control1; |
@@ -1406,7 +1406,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params) | |||
1406 | control1); | 1406 | control1); |
1407 | 1407 | ||
1408 | /* if forced speed */ | 1408 | /* if forced speed */ |
1409 | if (!(params->req_line_speed == SPEED_AUTO_NEG)) { | 1409 | if (!(vars->line_speed == SPEED_AUTO_NEG)) { |
1410 | /* set speed, disable autoneg */ | 1410 | /* set speed, disable autoneg */ |
1411 | u16 mii_control; | 1411 | u16 mii_control; |
1412 | 1412 | ||
@@ -1419,7 +1419,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params) | |||
1419 | MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| | 1419 | MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| |
1420 | MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); | 1420 | MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); |
1421 | 1421 | ||
1422 | switch (params->req_line_speed) { | 1422 | switch (vars->line_speed) { |
1423 | case SPEED_100: | 1423 | case SPEED_100: |
1424 | mii_control |= | 1424 | mii_control |= |
1425 | MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100; | 1425 | MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100; |
@@ -1433,8 +1433,8 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params) | |||
1433 | break; | 1433 | break; |
1434 | default: | 1434 | default: |
1435 | /* invalid speed for SGMII */ | 1435 | /* invalid speed for SGMII */ |
1436 | DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n", | 1436 | DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", |
1437 | params->req_line_speed); | 1437 | vars->line_speed); |
1438 | break; | 1438 | break; |
1439 | } | 1439 | } |
1440 | 1440 | ||
@@ -1460,20 +1460,20 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params) | |||
1460 | */ | 1460 | */ |
1461 | 1461 | ||
1462 | static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) | 1462 | static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) |
1463 | { | 1463 | { /* LD LP */ |
1464 | switch (pause_result) { /* ASYM P ASYM P */ | 1464 | switch (pause_result) { /* ASYM P ASYM P */ |
1465 | case 0xb: /* 1 0 1 1 */ | 1465 | case 0xb: /* 1 0 1 1 */ |
1466 | vars->flow_ctrl = FLOW_CTRL_TX; | 1466 | vars->flow_ctrl = FLOW_CTRL_TX; |
1467 | break; | 1467 | break; |
1468 | 1468 | ||
1469 | case 0xe: /* 1 1 1 0 */ | 1469 | case 0xe: /* 1 1 1 0 */ |
1470 | vars->flow_ctrl = FLOW_CTRL_RX; | 1470 | vars->flow_ctrl = FLOW_CTRL_RX; |
1471 | break; | 1471 | break; |
1472 | 1472 | ||
1473 | case 0x5: /* 0 1 0 1 */ | 1473 | case 0x5: /* 0 1 0 1 */ |
1474 | case 0x7: /* 0 1 1 1 */ | 1474 | case 0x7: /* 0 1 1 1 */ |
1475 | case 0xd: /* 1 1 0 1 */ | 1475 | case 0xd: /* 1 1 0 1 */ |
1476 | case 0xf: /* 1 1 1 1 */ | 1476 | case 0xf: /* 1 1 1 1 */ |
1477 | vars->flow_ctrl = FLOW_CTRL_BOTH; | 1477 | vars->flow_ctrl = FLOW_CTRL_BOTH; |
1478 | break; | 1478 | break; |
1479 | 1479 | ||
@@ -1531,6 +1531,28 @@ static u8 bnx2x_ext_phy_resove_fc(struct link_params *params, | |||
1531 | DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n", | 1531 | DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n", |
1532 | pause_result); | 1532 | pause_result); |
1533 | bnx2x_pause_resolve(vars, pause_result); | 1533 | bnx2x_pause_resolve(vars, pause_result); |
1534 | if (vars->flow_ctrl == FLOW_CTRL_NONE && | ||
1535 | ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { | ||
1536 | bnx2x_cl45_read(bp, port, | ||
1537 | ext_phy_type, | ||
1538 | ext_phy_addr, | ||
1539 | MDIO_AN_DEVAD, | ||
1540 | MDIO_AN_REG_CL37_FC_LD, &ld_pause); | ||
1541 | |||
1542 | bnx2x_cl45_read(bp, port, | ||
1543 | ext_phy_type, | ||
1544 | ext_phy_addr, | ||
1545 | MDIO_AN_DEVAD, | ||
1546 | MDIO_AN_REG_CL37_FC_LP, &lp_pause); | ||
1547 | pause_result = (ld_pause & | ||
1548 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5; | ||
1549 | pause_result |= (lp_pause & | ||
1550 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; | ||
1551 | |||
1552 | bnx2x_pause_resolve(vars, pause_result); | ||
1553 | DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n", | ||
1554 | pause_result); | ||
1555 | } | ||
1534 | } | 1556 | } |
1535 | return ret; | 1557 | return ret; |
1536 | } | 1558 | } |
@@ -1541,8 +1563,8 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params, | |||
1541 | u32 gp_status) | 1563 | u32 gp_status) |
1542 | { | 1564 | { |
1543 | struct bnx2x *bp = params->bp; | 1565 | struct bnx2x *bp = params->bp; |
1544 | u16 ld_pause; /* local driver */ | 1566 | u16 ld_pause; /* local driver */ |
1545 | u16 lp_pause; /* link partner */ | 1567 | u16 lp_pause; /* link partner */ |
1546 | u16 pause_result; | 1568 | u16 pause_result; |
1547 | 1569 | ||
1548 | vars->flow_ctrl = FLOW_CTRL_NONE; | 1570 | vars->flow_ctrl = FLOW_CTRL_NONE; |
@@ -1573,13 +1595,10 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params, | |||
1573 | (bnx2x_ext_phy_resove_fc(params, vars))) { | 1595 | (bnx2x_ext_phy_resove_fc(params, vars))) { |
1574 | return; | 1596 | return; |
1575 | } else { | 1597 | } else { |
1576 | vars->flow_ctrl = params->req_flow_ctrl; | 1598 | if (params->req_flow_ctrl == FLOW_CTRL_AUTO) |
1577 | if (vars->flow_ctrl == FLOW_CTRL_AUTO) { | 1599 | vars->flow_ctrl = params->req_fc_auto_adv; |
1578 | if (params->mtu <= MAX_MTU_SIZE) | 1600 | else |
1579 | vars->flow_ctrl = FLOW_CTRL_BOTH; | 1601 | vars->flow_ctrl = params->req_flow_ctrl; |
1580 | else | ||
1581 | vars->flow_ctrl = FLOW_CTRL_TX; | ||
1582 | } | ||
1583 | } | 1602 | } |
1584 | DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); | 1603 | DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); |
1585 | } | 1604 | } |
@@ -1590,6 +1609,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params, | |||
1590 | u32 gp_status) | 1609 | u32 gp_status) |
1591 | { | 1610 | { |
1592 | struct bnx2x *bp = params->bp; | 1611 | struct bnx2x *bp = params->bp; |
1612 | |||
1593 | u8 rc = 0; | 1613 | u8 rc = 0; |
1594 | vars->link_status = 0; | 1614 | vars->link_status = 0; |
1595 | 1615 | ||
@@ -1690,7 +1710,11 @@ static u8 bnx2x_link_settings_status(struct link_params *params, | |||
1690 | 1710 | ||
1691 | vars->link_status |= LINK_STATUS_SERDES_LINK; | 1711 | vars->link_status |= LINK_STATUS_SERDES_LINK; |
1692 | 1712 | ||
1693 | if (params->req_line_speed == SPEED_AUTO_NEG) { | 1713 | if ((params->req_line_speed == SPEED_AUTO_NEG) && |
1714 | ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) == | ||
1715 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) || | ||
1716 | (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == | ||
1717 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705))) { | ||
1694 | vars->autoneg = AUTO_NEG_ENABLED; | 1718 | vars->autoneg = AUTO_NEG_ENABLED; |
1695 | 1719 | ||
1696 | if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { | 1720 | if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { |
@@ -1705,18 +1729,18 @@ static u8 bnx2x_link_settings_status(struct link_params *params, | |||
1705 | 1729 | ||
1706 | } | 1730 | } |
1707 | if (vars->flow_ctrl & FLOW_CTRL_TX) | 1731 | if (vars->flow_ctrl & FLOW_CTRL_TX) |
1708 | vars->link_status |= | 1732 | vars->link_status |= |
1709 | LINK_STATUS_TX_FLOW_CONTROL_ENABLED; | 1733 | LINK_STATUS_TX_FLOW_CONTROL_ENABLED; |
1710 | 1734 | ||
1711 | if (vars->flow_ctrl & FLOW_CTRL_RX) | 1735 | if (vars->flow_ctrl & FLOW_CTRL_RX) |
1712 | vars->link_status |= | 1736 | vars->link_status |= |
1713 | LINK_STATUS_RX_FLOW_CONTROL_ENABLED; | 1737 | LINK_STATUS_RX_FLOW_CONTROL_ENABLED; |
1714 | 1738 | ||
1715 | } else { /* link_down */ | 1739 | } else { /* link_down */ |
1716 | DP(NETIF_MSG_LINK, "phy link down\n"); | 1740 | DP(NETIF_MSG_LINK, "phy link down\n"); |
1717 | 1741 | ||
1718 | vars->phy_link_up = 0; | 1742 | vars->phy_link_up = 0; |
1719 | vars->line_speed = 0; | 1743 | |
1720 | vars->duplex = DUPLEX_FULL; | 1744 | vars->duplex = DUPLEX_FULL; |
1721 | vars->flow_ctrl = FLOW_CTRL_NONE; | 1745 | vars->flow_ctrl = FLOW_CTRL_NONE; |
1722 | vars->autoneg = AUTO_NEG_DISABLED; | 1746 | vars->autoneg = AUTO_NEG_DISABLED; |
@@ -1817,15 +1841,15 @@ static u8 bnx2x_emac_program(struct link_params *params, | |||
1817 | } | 1841 | } |
1818 | 1842 | ||
1819 | /*****************************************************************************/ | 1843 | /*****************************************************************************/ |
1820 | /* External Phy section */ | 1844 | /* External Phy section */ |
1821 | /*****************************************************************************/ | 1845 | /*****************************************************************************/ |
1822 | static void bnx2x_hw_reset(struct bnx2x *bp) | 1846 | static void bnx2x_hw_reset(struct bnx2x *bp, u8 port) |
1823 | { | 1847 | { |
1824 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, | 1848 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, |
1825 | MISC_REGISTERS_GPIO_OUTPUT_LOW); | 1849 | MISC_REGISTERS_GPIO_OUTPUT_LOW, port); |
1826 | msleep(1); | 1850 | msleep(1); |
1827 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, | 1851 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, |
1828 | MISC_REGISTERS_GPIO_OUTPUT_HIGH); | 1852 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); |
1829 | } | 1853 | } |
1830 | 1854 | ||
1831 | static void bnx2x_ext_phy_reset(struct link_params *params, | 1855 | static void bnx2x_ext_phy_reset(struct link_params *params, |
@@ -1854,10 +1878,11 @@ static void bnx2x_ext_phy_reset(struct link_params *params, | |||
1854 | 1878 | ||
1855 | /* Restore normal power mode*/ | 1879 | /* Restore normal power mode*/ |
1856 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, | 1880 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, |
1857 | MISC_REGISTERS_GPIO_OUTPUT_HIGH); | 1881 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, |
1882 | params->port); | ||
1858 | 1883 | ||
1859 | /* HW reset */ | 1884 | /* HW reset */ |
1860 | bnx2x_hw_reset(bp); | 1885 | bnx2x_hw_reset(bp, params->port); |
1861 | 1886 | ||
1862 | bnx2x_cl45_write(bp, params->port, | 1887 | bnx2x_cl45_write(bp, params->port, |
1863 | ext_phy_type, | 1888 | ext_phy_type, |
@@ -1869,7 +1894,8 @@ static void bnx2x_ext_phy_reset(struct link_params *params, | |||
1869 | /* Unset Low Power Mode and SW reset */ | 1894 | /* Unset Low Power Mode and SW reset */ |
1870 | /* Restore normal power mode*/ | 1895 | /* Restore normal power mode*/ |
1871 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, | 1896 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, |
1872 | MISC_REGISTERS_GPIO_OUTPUT_HIGH); | 1897 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, |
1898 | params->port); | ||
1873 | 1899 | ||
1874 | DP(NETIF_MSG_LINK, "XGXS 8072\n"); | 1900 | DP(NETIF_MSG_LINK, "XGXS 8072\n"); |
1875 | bnx2x_cl45_write(bp, params->port, | 1901 | bnx2x_cl45_write(bp, params->port, |
@@ -1887,19 +1913,14 @@ static void bnx2x_ext_phy_reset(struct link_params *params, | |||
1887 | 1913 | ||
1888 | /* Restore normal power mode*/ | 1914 | /* Restore normal power mode*/ |
1889 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, | 1915 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, |
1890 | MISC_REGISTERS_GPIO_OUTPUT_HIGH); | 1916 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, |
1917 | params->port); | ||
1891 | 1918 | ||
1892 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, | 1919 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, |
1893 | MISC_REGISTERS_GPIO_OUTPUT_HIGH); | 1920 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, |
1921 | params->port); | ||
1894 | 1922 | ||
1895 | DP(NETIF_MSG_LINK, "XGXS 8073\n"); | 1923 | DP(NETIF_MSG_LINK, "XGXS 8073\n"); |
1896 | bnx2x_cl45_write(bp, | ||
1897 | params->port, | ||
1898 | ext_phy_type, | ||
1899 | ext_phy_addr, | ||
1900 | MDIO_PMA_DEVAD, | ||
1901 | MDIO_PMA_REG_CTRL, | ||
1902 | 1<<15); | ||
1903 | } | 1924 | } |
1904 | break; | 1925 | break; |
1905 | 1926 | ||
@@ -1908,10 +1929,11 @@ static void bnx2x_ext_phy_reset(struct link_params *params, | |||
1908 | 1929 | ||
1909 | /* Restore normal power mode*/ | 1930 | /* Restore normal power mode*/ |
1910 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, | 1931 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, |
1911 | MISC_REGISTERS_GPIO_OUTPUT_HIGH); | 1932 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, |
1933 | params->port); | ||
1912 | 1934 | ||
1913 | /* HW reset */ | 1935 | /* HW reset */ |
1914 | bnx2x_hw_reset(bp); | 1936 | bnx2x_hw_reset(bp, params->port); |
1915 | 1937 | ||
1916 | break; | 1938 | break; |
1917 | 1939 | ||
@@ -1934,7 +1956,7 @@ static void bnx2x_ext_phy_reset(struct link_params *params, | |||
1934 | 1956 | ||
1935 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: | 1957 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: |
1936 | DP(NETIF_MSG_LINK, "SerDes 5482\n"); | 1958 | DP(NETIF_MSG_LINK, "SerDes 5482\n"); |
1937 | bnx2x_hw_reset(bp); | 1959 | bnx2x_hw_reset(bp, params->port); |
1938 | break; | 1960 | break; |
1939 | 1961 | ||
1940 | default: | 1962 | default: |
@@ -2098,42 +2120,45 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params) | |||
2098 | 2120 | ||
2099 | } | 2121 | } |
2100 | 2122 | ||
2101 | static void bnx2x_bcm8073_external_rom_boot(struct link_params *params) | 2123 | static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port, |
2124 | u8 ext_phy_addr) | ||
2102 | { | 2125 | { |
2103 | struct bnx2x *bp = params->bp; | 2126 | u16 fw_ver1, fw_ver2; |
2104 | u8 port = params->port; | 2127 | /* Boot port from external ROM */ |
2105 | u8 ext_phy_addr = ((params->ext_phy_config & | ||
2106 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> | ||
2107 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); | ||
2108 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); | ||
2109 | u16 fw_ver1, fw_ver2, val; | ||
2110 | /* Need to wait 100ms after reset */ | ||
2111 | msleep(100); | ||
2112 | /* Boot port from external ROM */ | ||
2113 | /* EDC grst */ | 2128 | /* EDC grst */ |
2114 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2129 | bnx2x_cl45_write(bp, port, |
2130 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, | ||
2131 | ext_phy_addr, | ||
2115 | MDIO_PMA_DEVAD, | 2132 | MDIO_PMA_DEVAD, |
2116 | MDIO_PMA_REG_GEN_CTRL, | 2133 | MDIO_PMA_REG_GEN_CTRL, |
2117 | 0x0001); | 2134 | 0x0001); |
2118 | 2135 | ||
2119 | /* ucode reboot and rst */ | 2136 | /* ucode reboot and rst */ |
2120 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2137 | bnx2x_cl45_write(bp, port, |
2138 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, | ||
2139 | ext_phy_addr, | ||
2121 | MDIO_PMA_DEVAD, | 2140 | MDIO_PMA_DEVAD, |
2122 | MDIO_PMA_REG_GEN_CTRL, | 2141 | MDIO_PMA_REG_GEN_CTRL, |
2123 | 0x008c); | 2142 | 0x008c); |
2124 | 2143 | ||
2125 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2144 | bnx2x_cl45_write(bp, port, |
2145 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, | ||
2146 | ext_phy_addr, | ||
2126 | MDIO_PMA_DEVAD, | 2147 | MDIO_PMA_DEVAD, |
2127 | MDIO_PMA_REG_MISC_CTRL1, 0x0001); | 2148 | MDIO_PMA_REG_MISC_CTRL1, 0x0001); |
2128 | 2149 | ||
2129 | /* Reset internal microprocessor */ | 2150 | /* Reset internal microprocessor */ |
2130 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2151 | bnx2x_cl45_write(bp, port, |
2152 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, | ||
2153 | ext_phy_addr, | ||
2131 | MDIO_PMA_DEVAD, | 2154 | MDIO_PMA_DEVAD, |
2132 | MDIO_PMA_REG_GEN_CTRL, | 2155 | MDIO_PMA_REG_GEN_CTRL, |
2133 | MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); | 2156 | MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); |
2134 | 2157 | ||
2135 | /* Release srst bit */ | 2158 | /* Release srst bit */ |
2136 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2159 | bnx2x_cl45_write(bp, port, |
2160 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, | ||
2161 | ext_phy_addr, | ||
2137 | MDIO_PMA_DEVAD, | 2162 | MDIO_PMA_DEVAD, |
2138 | MDIO_PMA_REG_GEN_CTRL, | 2163 | MDIO_PMA_REG_GEN_CTRL, |
2139 | MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); | 2164 | MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); |
@@ -2142,35 +2167,52 @@ static void bnx2x_bcm8073_external_rom_boot(struct link_params *params) | |||
2142 | msleep(100); | 2167 | msleep(100); |
2143 | 2168 | ||
2144 | /* Clear ser_boot_ctl bit */ | 2169 | /* Clear ser_boot_ctl bit */ |
2145 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2170 | bnx2x_cl45_write(bp, port, |
2171 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, | ||
2172 | ext_phy_addr, | ||
2146 | MDIO_PMA_DEVAD, | 2173 | MDIO_PMA_DEVAD, |
2147 | MDIO_PMA_REG_MISC_CTRL1, 0x0000); | 2174 | MDIO_PMA_REG_MISC_CTRL1, 0x0000); |
2148 | 2175 | ||
2149 | bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, | 2176 | bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, |
2150 | MDIO_PMA_DEVAD, | 2177 | ext_phy_addr, |
2151 | MDIO_PMA_REG_ROM_VER1, &fw_ver1); | 2178 | MDIO_PMA_DEVAD, |
2152 | bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, | 2179 | MDIO_PMA_REG_ROM_VER1, &fw_ver1); |
2153 | MDIO_PMA_DEVAD, | 2180 | bnx2x_cl45_read(bp, port, |
2154 | MDIO_PMA_REG_ROM_VER2, &fw_ver2); | 2181 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, |
2182 | ext_phy_addr, | ||
2183 | MDIO_PMA_DEVAD, | ||
2184 | MDIO_PMA_REG_ROM_VER2, &fw_ver2); | ||
2155 | DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2); | 2185 | DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2); |
2156 | 2186 | ||
2157 | /* Only set bit 10 = 1 (Tx power down) */ | 2187 | } |
2158 | bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, | ||
2159 | MDIO_PMA_DEVAD, | ||
2160 | MDIO_PMA_REG_TX_POWER_DOWN, &val); | ||
2161 | 2188 | ||
2189 | static void bnx2x_bcm807x_force_10G(struct link_params *params) | ||
2190 | { | ||
2191 | struct bnx2x *bp = params->bp; | ||
2192 | u8 port = params->port; | ||
2193 | u8 ext_phy_addr = ((params->ext_phy_config & | ||
2194 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> | ||
2195 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); | ||
2196 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); | ||
2197 | |||
2198 | /* Force KR or KX */ | ||
2162 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2199 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, |
2163 | MDIO_PMA_DEVAD, | 2200 | MDIO_PMA_DEVAD, |
2164 | MDIO_PMA_REG_TX_POWER_DOWN, (val | 1<<10)); | 2201 | MDIO_PMA_REG_CTRL, |
2165 | 2202 | 0x2040); | |
2166 | msleep(600); | ||
2167 | /* Release bit 10 (Release Tx power down) */ | ||
2168 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2203 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, |
2169 | MDIO_PMA_DEVAD, | 2204 | MDIO_PMA_DEVAD, |
2170 | MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); | 2205 | MDIO_PMA_REG_10G_CTRL2, |
2171 | 2206 | 0x000b); | |
2207 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | ||
2208 | MDIO_PMA_DEVAD, | ||
2209 | MDIO_PMA_REG_BCM_CTRL, | ||
2210 | 0x0000); | ||
2211 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | ||
2212 | MDIO_AN_DEVAD, | ||
2213 | MDIO_AN_REG_CTRL, | ||
2214 | 0x0000); | ||
2172 | } | 2215 | } |
2173 | |||
2174 | static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params) | 2216 | static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params) |
2175 | { | 2217 | { |
2176 | struct bnx2x *bp = params->bp; | 2218 | struct bnx2x *bp = params->bp; |
@@ -2236,32 +2278,51 @@ static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params) | |||
2236 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2278 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, |
2237 | MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); | 2279 | MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); |
2238 | } | 2280 | } |
2239 | static void bnx2x_bcm807x_force_10G(struct link_params *params) | 2281 | |
2282 | static void bnx2x_8073_set_pause_cl37(struct link_params *params, | ||
2283 | struct link_vars *vars) | ||
2240 | { | 2284 | { |
2285 | |||
2241 | struct bnx2x *bp = params->bp; | 2286 | struct bnx2x *bp = params->bp; |
2242 | u8 port = params->port; | 2287 | u16 cl37_val; |
2243 | u8 ext_phy_addr = ((params->ext_phy_config & | 2288 | u8 ext_phy_addr = ((params->ext_phy_config & |
2244 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> | 2289 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> |
2245 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); | 2290 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); |
2246 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); | 2291 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); |
2247 | 2292 | ||
2248 | /* Force KR or KX */ | 2293 | bnx2x_cl45_read(bp, params->port, |
2249 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2294 | ext_phy_type, |
2250 | MDIO_PMA_DEVAD, | 2295 | ext_phy_addr, |
2251 | MDIO_PMA_REG_CTRL, | 2296 | MDIO_AN_DEVAD, |
2252 | 0x2040); | 2297 | MDIO_AN_REG_CL37_FC_LD, &cl37_val); |
2253 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2298 | |
2254 | MDIO_PMA_DEVAD, | 2299 | cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; |
2255 | MDIO_PMA_REG_10G_CTRL2, | 2300 | /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ |
2256 | 0x000b); | 2301 | |
2257 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2302 | if ((vars->ieee_fc & |
2258 | MDIO_PMA_DEVAD, | 2303 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) == |
2259 | MDIO_PMA_REG_BCM_CTRL, | 2304 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) { |
2260 | 0x0000); | 2305 | cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; |
2261 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | 2306 | } |
2307 | if ((vars->ieee_fc & | ||
2308 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == | ||
2309 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { | ||
2310 | cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; | ||
2311 | } | ||
2312 | if ((vars->ieee_fc & | ||
2313 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == | ||
2314 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { | ||
2315 | cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; | ||
2316 | } | ||
2317 | DP(NETIF_MSG_LINK, | ||
2318 | "Ext phy AN advertize cl37 0x%x\n", cl37_val); | ||
2319 | |||
2320 | bnx2x_cl45_write(bp, params->port, | ||
2321 | ext_phy_type, | ||
2322 | ext_phy_addr, | ||
2262 | MDIO_AN_DEVAD, | 2323 | MDIO_AN_DEVAD, |
2263 | MDIO_AN_REG_CTRL, | 2324 | MDIO_AN_REG_CL37_FC_LD, cl37_val); |
2264 | 0x0000); | 2325 | msleep(500); |
2265 | } | 2326 | } |
2266 | 2327 | ||
2267 | static void bnx2x_ext_phy_set_pause(struct link_params *params, | 2328 | static void bnx2x_ext_phy_set_pause(struct link_params *params, |
@@ -2282,13 +2343,16 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params, | |||
2282 | MDIO_AN_REG_ADV_PAUSE, &val); | 2343 | MDIO_AN_REG_ADV_PAUSE, &val); |
2283 | 2344 | ||
2284 | val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; | 2345 | val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; |
2346 | |||
2285 | /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ | 2347 | /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ |
2286 | 2348 | ||
2287 | if (vars->ieee_fc & | 2349 | if ((vars->ieee_fc & |
2350 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == | ||
2288 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { | 2351 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { |
2289 | val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; | 2352 | val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; |
2290 | } | 2353 | } |
2291 | if (vars->ieee_fc & | 2354 | if ((vars->ieee_fc & |
2355 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == | ||
2292 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { | 2356 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { |
2293 | val |= | 2357 | val |= |
2294 | MDIO_AN_REG_ADV_PAUSE_PAUSE; | 2358 | MDIO_AN_REG_ADV_PAUSE_PAUSE; |
@@ -2302,6 +2366,65 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params, | |||
2302 | MDIO_AN_REG_ADV_PAUSE, val); | 2366 | MDIO_AN_REG_ADV_PAUSE, val); |
2303 | } | 2367 | } |
2304 | 2368 | ||
2369 | |||
2370 | static void bnx2x_init_internal_phy(struct link_params *params, | ||
2371 | struct link_vars *vars) | ||
2372 | { | ||
2373 | struct bnx2x *bp = params->bp; | ||
2374 | u8 port = params->port; | ||
2375 | if (!(vars->phy_flags & PHY_SGMII_FLAG)) { | ||
2376 | u16 bank, rx_eq; | ||
2377 | |||
2378 | rx_eq = ((params->serdes_config & | ||
2379 | PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >> | ||
2380 | PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT); | ||
2381 | |||
2382 | DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq); | ||
2383 | for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL; | ||
2384 | bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) { | ||
2385 | CL45_WR_OVER_CL22(bp, port, | ||
2386 | params->phy_addr, | ||
2387 | bank , | ||
2388 | MDIO_RX0_RX_EQ_BOOST, | ||
2389 | ((rx_eq & | ||
2390 | MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) | | ||
2391 | MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL)); | ||
2392 | } | ||
2393 | |||
2394 | /* forced speed requested? */ | ||
2395 | if (vars->line_speed != SPEED_AUTO_NEG) { | ||
2396 | DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); | ||
2397 | |||
2398 | /* disable autoneg */ | ||
2399 | bnx2x_set_autoneg(params, vars); | ||
2400 | |||
2401 | /* program speed and duplex */ | ||
2402 | bnx2x_program_serdes(params, vars); | ||
2403 | |||
2404 | } else { /* AN_mode */ | ||
2405 | DP(NETIF_MSG_LINK, "not SGMII, AN\n"); | ||
2406 | |||
2407 | /* AN enabled */ | ||
2408 | bnx2x_set_brcm_cl37_advertisment(params); | ||
2409 | |||
2410 | /* program duplex & pause advertisement (for aneg) */ | ||
2411 | bnx2x_set_ieee_aneg_advertisment(params, | ||
2412 | vars->ieee_fc); | ||
2413 | |||
2414 | /* enable autoneg */ | ||
2415 | bnx2x_set_autoneg(params, vars); | ||
2416 | |||
2417 | /* enable and restart AN */ | ||
2418 | bnx2x_restart_autoneg(params); | ||
2419 | } | ||
2420 | |||
2421 | } else { /* SGMII mode */ | ||
2422 | DP(NETIF_MSG_LINK, "SGMII\n"); | ||
2423 | |||
2424 | bnx2x_initialize_sgmii_process(params, vars); | ||
2425 | } | ||
2426 | } | ||
2427 | |||
2305 | static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) | 2428 | static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) |
2306 | { | 2429 | { |
2307 | struct bnx2x *bp = params->bp; | 2430 | struct bnx2x *bp = params->bp; |
@@ -2343,7 +2466,6 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) | |||
2343 | 2466 | ||
2344 | switch (ext_phy_type) { | 2467 | switch (ext_phy_type) { |
2345 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: | 2468 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: |
2346 | DP(NETIF_MSG_LINK, "XGXS Direct\n"); | ||
2347 | break; | 2469 | break; |
2348 | 2470 | ||
2349 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: | 2471 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: |
@@ -2419,7 +2541,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) | |||
2419 | ext_phy_type, | 2541 | ext_phy_type, |
2420 | ext_phy_addr, | 2542 | ext_phy_addr, |
2421 | MDIO_AN_DEVAD, | 2543 | MDIO_AN_DEVAD, |
2422 | MDIO_AN_REG_CL37_FD, | 2544 | MDIO_AN_REG_CL37_FC_LP, |
2423 | 0x0020); | 2545 | 0x0020); |
2424 | /* Enable CL37 AN */ | 2546 | /* Enable CL37 AN */ |
2425 | bnx2x_cl45_write(bp, params->port, | 2547 | bnx2x_cl45_write(bp, params->port, |
@@ -2458,54 +2580,43 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) | |||
2458 | rx_alarm_ctrl_val = 0x400; | 2580 | rx_alarm_ctrl_val = 0x400; |
2459 | lasi_ctrl_val = 0x0004; | 2581 | lasi_ctrl_val = 0x0004; |
2460 | } else { | 2582 | } else { |
2461 | /* In 8073, port1 is directed through emac0 and | ||
2462 | * port0 is directed through emac1 | ||
2463 | */ | ||
2464 | rx_alarm_ctrl_val = (1<<2); | 2583 | rx_alarm_ctrl_val = (1<<2); |
2465 | /*lasi_ctrl_val = 0x0005;*/ | ||
2466 | lasi_ctrl_val = 0x0004; | 2584 | lasi_ctrl_val = 0x0004; |
2467 | } | 2585 | } |
2468 | 2586 | ||
2469 | /* Wait for soft reset to get cleared upto 1 sec */ | 2587 | /* enable LASI */ |
2470 | for (cnt = 0; cnt < 1000; cnt++) { | 2588 | bnx2x_cl45_write(bp, params->port, |
2471 | bnx2x_cl45_read(bp, params->port, | 2589 | ext_phy_type, |
2472 | ext_phy_type, | 2590 | ext_phy_addr, |
2473 | ext_phy_addr, | 2591 | MDIO_PMA_DEVAD, |
2474 | MDIO_PMA_DEVAD, | 2592 | MDIO_PMA_REG_RX_ALARM_CTRL, |
2475 | MDIO_PMA_REG_CTRL, | 2593 | rx_alarm_ctrl_val); |
2476 | &ctrl); | 2594 | |
2477 | if (!(ctrl & (1<<15))) | 2595 | bnx2x_cl45_write(bp, params->port, |
2478 | break; | 2596 | ext_phy_type, |
2479 | msleep(1); | 2597 | ext_phy_addr, |
2480 | } | 2598 | MDIO_PMA_DEVAD, |
2481 | DP(NETIF_MSG_LINK, | 2599 | MDIO_PMA_REG_LASI_CTRL, |
2482 | "807x control reg 0x%x (after %d ms)\n", | 2600 | lasi_ctrl_val); |
2483 | ctrl, cnt); | 2601 | |
2602 | bnx2x_8073_set_pause_cl37(params, vars); | ||
2484 | 2603 | ||
2485 | if (ext_phy_type == | 2604 | if (ext_phy_type == |
2486 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){ | 2605 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){ |
2487 | bnx2x_bcm8072_external_rom_boot(params); | 2606 | bnx2x_bcm8072_external_rom_boot(params); |
2488 | } else { | 2607 | } else { |
2489 | bnx2x_bcm8073_external_rom_boot(params); | 2608 | |
2490 | /* In case of 8073 with long xaui lines, | 2609 | /* In case of 8073 with long xaui lines, |
2491 | don't set the 8073 xaui low power*/ | 2610 | don't set the 8073 xaui low power*/ |
2492 | bnx2x_bcm8073_set_xaui_low_power_mode(params); | 2611 | bnx2x_bcm8073_set_xaui_low_power_mode(params); |
2493 | } | 2612 | } |
2494 | 2613 | ||
2495 | /* enable LASI */ | 2614 | bnx2x_cl45_read(bp, params->port, |
2496 | bnx2x_cl45_write(bp, params->port, | 2615 | ext_phy_type, |
2497 | ext_phy_type, | 2616 | ext_phy_addr, |
2498 | ext_phy_addr, | 2617 | MDIO_PMA_DEVAD, |
2499 | MDIO_PMA_DEVAD, | 2618 | 0xca13, |
2500 | MDIO_PMA_REG_RX_ALARM_CTRL, | 2619 | &tmp1); |
2501 | rx_alarm_ctrl_val); | ||
2502 | |||
2503 | bnx2x_cl45_write(bp, params->port, | ||
2504 | ext_phy_type, | ||
2505 | ext_phy_addr, | ||
2506 | MDIO_PMA_DEVAD, | ||
2507 | MDIO_PMA_REG_LASI_CTRL, | ||
2508 | lasi_ctrl_val); | ||
2509 | 2620 | ||
2510 | bnx2x_cl45_read(bp, params->port, | 2621 | bnx2x_cl45_read(bp, params->port, |
2511 | ext_phy_type, | 2622 | ext_phy_type, |
@@ -2519,12 +2630,21 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) | |||
2519 | /* If this is forced speed, set to KR or KX | 2630 | /* If this is forced speed, set to KR or KX |
2520 | * (all other are not supported) | 2631 | * (all other are not supported) |
2521 | */ | 2632 | */ |
2522 | if (!(params->req_line_speed == SPEED_AUTO_NEG)) { | 2633 | if (params->loopback_mode == LOOPBACK_EXT) { |
2523 | if (params->req_line_speed == SPEED_10000) { | 2634 | bnx2x_bcm807x_force_10G(params); |
2524 | bnx2x_bcm807x_force_10G(params); | 2635 | DP(NETIF_MSG_LINK, |
2525 | DP(NETIF_MSG_LINK, | 2636 | "Forced speed 10G on 807X\n"); |
2526 | "Forced speed 10G on 807X\n"); | 2637 | break; |
2527 | break; | 2638 | } else { |
2639 | bnx2x_cl45_write(bp, params->port, | ||
2640 | ext_phy_type, ext_phy_addr, | ||
2641 | MDIO_PMA_DEVAD, | ||
2642 | MDIO_PMA_REG_BCM_CTRL, | ||
2643 | 0x0002); | ||
2644 | } | ||
2645 | if (params->req_line_speed != SPEED_AUTO_NEG) { | ||
2646 | if (params->req_line_speed == SPEED_10000) { | ||
2647 | val = (1<<7); | ||
2528 | } else if (params->req_line_speed == | 2648 | } else if (params->req_line_speed == |
2529 | SPEED_2500) { | 2649 | SPEED_2500) { |
2530 | val = (1<<5); | 2650 | val = (1<<5); |
@@ -2539,11 +2659,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) | |||
2539 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) | 2659 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) |
2540 | val |= (1<<7); | 2660 | val |= (1<<7); |
2541 | 2661 | ||
2662 | /* Note that 2.5G works only when | ||
2663 | used with 1G advertisment */ | ||
2542 | if (params->speed_cap_mask & | 2664 | if (params->speed_cap_mask & |
2543 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) | 2665 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | |
2666 | PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) | ||
2544 | val |= (1<<5); | 2667 | val |= (1<<5); |
2545 | DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val); | 2668 | DP(NETIF_MSG_LINK, |
2546 | /*val = ((1<<5)|(1<<7));*/ | 2669 | "807x autoneg val = 0x%x\n", val); |
2547 | } | 2670 | } |
2548 | 2671 | ||
2549 | bnx2x_cl45_write(bp, params->port, | 2672 | bnx2x_cl45_write(bp, params->port, |
@@ -2554,20 +2677,19 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) | |||
2554 | 2677 | ||
2555 | if (ext_phy_type == | 2678 | if (ext_phy_type == |
2556 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { | 2679 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { |
2557 | /* Disable 2.5Ghz */ | 2680 | |
2558 | bnx2x_cl45_read(bp, params->port, | 2681 | bnx2x_cl45_read(bp, params->port, |
2559 | ext_phy_type, | 2682 | ext_phy_type, |
2560 | ext_phy_addr, | 2683 | ext_phy_addr, |
2561 | MDIO_AN_DEVAD, | 2684 | MDIO_AN_DEVAD, |
2562 | 0x8329, &tmp1); | 2685 | 0x8329, &tmp1); |
2563 | /* SUPPORT_SPEED_CAPABILITY | 2686 | |
2564 | (Due to the nature of the link order, its not | 2687 | if (((params->speed_cap_mask & |
2565 | possible to enable 2.5G within the autoneg | 2688 | PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) && |
2566 | capabilities) | 2689 | (params->req_line_speed == |
2567 | if (params->speed_cap_mask & | 2690 | SPEED_AUTO_NEG)) || |
2568 | PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) | 2691 | (params->req_line_speed == |
2569 | */ | 2692 | SPEED_2500)) { |
2570 | if (params->req_line_speed == SPEED_2500) { | ||
2571 | u16 phy_ver; | 2693 | u16 phy_ver; |
2572 | /* Allow 2.5G for A1 and above */ | 2694 | /* Allow 2.5G for A1 and above */ |
2573 | bnx2x_cl45_read(bp, params->port, | 2695 | bnx2x_cl45_read(bp, params->port, |
@@ -2575,49 +2697,53 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) | |||
2575 | ext_phy_addr, | 2697 | ext_phy_addr, |
2576 | MDIO_PMA_DEVAD, | 2698 | MDIO_PMA_DEVAD, |
2577 | 0xc801, &phy_ver); | 2699 | 0xc801, &phy_ver); |
2578 | 2700 | DP(NETIF_MSG_LINK, "Add 2.5G\n"); | |
2579 | if (phy_ver > 0) | 2701 | if (phy_ver > 0) |
2580 | tmp1 |= 1; | 2702 | tmp1 |= 1; |
2581 | else | 2703 | else |
2582 | tmp1 &= 0xfffe; | 2704 | tmp1 &= 0xfffe; |
2583 | } | 2705 | } else { |
2584 | else | 2706 | DP(NETIF_MSG_LINK, "Disable 2.5G\n"); |
2585 | tmp1 &= 0xfffe; | 2707 | tmp1 &= 0xfffe; |
2708 | } | ||
2586 | 2709 | ||
2587 | bnx2x_cl45_write(bp, params->port, | 2710 | bnx2x_cl45_write(bp, params->port, |
2588 | ext_phy_type, | 2711 | ext_phy_type, |
2589 | ext_phy_addr, | 2712 | ext_phy_addr, |
2590 | MDIO_AN_DEVAD, | 2713 | MDIO_AN_DEVAD, |
2591 | 0x8329, tmp1); | 2714 | 0x8329, tmp1); |
2592 | } | 2715 | } |
2593 | /* Add support for CL37 (passive mode) I */ | 2716 | |
2594 | bnx2x_cl45_write(bp, params->port, | 2717 | /* Add support for CL37 (passive mode) II */ |
2718 | |||
2719 | bnx2x_cl45_read(bp, params->port, | ||
2595 | ext_phy_type, | 2720 | ext_phy_type, |
2596 | ext_phy_addr, | 2721 | ext_phy_addr, |
2597 | MDIO_AN_DEVAD, | 2722 | MDIO_AN_DEVAD, |
2598 | MDIO_AN_REG_CL37_CL73, 0x040c); | 2723 | MDIO_AN_REG_CL37_FC_LD, |
2599 | /* Add support for CL37 (passive mode) II */ | 2724 | &tmp1); |
2725 | |||
2600 | bnx2x_cl45_write(bp, params->port, | 2726 | bnx2x_cl45_write(bp, params->port, |
2601 | ext_phy_type, | 2727 | ext_phy_type, |
2602 | ext_phy_addr, | 2728 | ext_phy_addr, |
2603 | MDIO_AN_DEVAD, | 2729 | MDIO_AN_DEVAD, |
2604 | MDIO_AN_REG_CL37_FD, 0x20); | 2730 | MDIO_AN_REG_CL37_FC_LD, (tmp1 | |
2731 | ((params->req_duplex == DUPLEX_FULL) ? | ||
2732 | 0x20 : 0x40))); | ||
2733 | |||
2605 | /* Add support for CL37 (passive mode) III */ | 2734 | /* Add support for CL37 (passive mode) III */ |
2606 | bnx2x_cl45_write(bp, params->port, | 2735 | bnx2x_cl45_write(bp, params->port, |
2607 | ext_phy_type, | 2736 | ext_phy_type, |
2608 | ext_phy_addr, | 2737 | ext_phy_addr, |
2609 | MDIO_AN_DEVAD, | 2738 | MDIO_AN_DEVAD, |
2610 | MDIO_AN_REG_CL37_AN, 0x1000); | 2739 | MDIO_AN_REG_CL37_AN, 0x1000); |
2611 | /* Restart autoneg */ | ||
2612 | msleep(500); | ||
2613 | 2740 | ||
2614 | if (ext_phy_type == | 2741 | if (ext_phy_type == |
2615 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { | 2742 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { |
2616 | 2743 | /* The SNR will improve about 2db by changing | |
2617 | /* The SNR will improve about 2db by changing the | ||
2618 | BW and FEE main tap. Rest commands are executed | 2744 | BW and FEE main tap. Rest commands are executed |
2619 | after link is up*/ | 2745 | after link is up*/ |
2620 | /* Change FFE main cursor to 5 in EDC register */ | 2746 | /*Change FFE main cursor to 5 in EDC register*/ |
2621 | if (bnx2x_8073_is_snr_needed(params)) | 2747 | if (bnx2x_8073_is_snr_needed(params)) |
2622 | bnx2x_cl45_write(bp, params->port, | 2748 | bnx2x_cl45_write(bp, params->port, |
2623 | ext_phy_type, | 2749 | ext_phy_type, |
@@ -2626,25 +2752,28 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) | |||
2626 | MDIO_PMA_REG_EDC_FFE_MAIN, | 2752 | MDIO_PMA_REG_EDC_FFE_MAIN, |
2627 | 0xFB0C); | 2753 | 0xFB0C); |
2628 | 2754 | ||
2629 | /* Enable FEC (Forware Error Correction) | 2755 | /* Enable FEC (Forware Error Correction) |
2630 | Request in the AN */ | 2756 | Request in the AN */ |
2631 | bnx2x_cl45_read(bp, params->port, | 2757 | bnx2x_cl45_read(bp, params->port, |
2632 | ext_phy_type, | 2758 | ext_phy_type, |
2633 | ext_phy_addr, | 2759 | ext_phy_addr, |
2634 | MDIO_AN_DEVAD, | 2760 | MDIO_AN_DEVAD, |
2635 | MDIO_AN_REG_ADV2, &tmp1); | 2761 | MDIO_AN_REG_ADV2, &tmp1); |
2636 | 2762 | ||
2637 | tmp1 |= (1<<15); | 2763 | tmp1 |= (1<<15); |
2764 | |||
2765 | bnx2x_cl45_write(bp, params->port, | ||
2766 | ext_phy_type, | ||
2767 | ext_phy_addr, | ||
2768 | MDIO_AN_DEVAD, | ||
2769 | MDIO_AN_REG_ADV2, tmp1); | ||
2638 | 2770 | ||
2639 | bnx2x_cl45_write(bp, params->port, | ||
2640 | ext_phy_type, | ||
2641 | ext_phy_addr, | ||
2642 | MDIO_AN_DEVAD, | ||
2643 | MDIO_AN_REG_ADV2, tmp1); | ||
2644 | } | 2771 | } |
2645 | 2772 | ||
2646 | bnx2x_ext_phy_set_pause(params, vars); | 2773 | bnx2x_ext_phy_set_pause(params, vars); |
2647 | 2774 | ||
2775 | /* Restart autoneg */ | ||
2776 | msleep(500); | ||
2648 | bnx2x_cl45_write(bp, params->port, | 2777 | bnx2x_cl45_write(bp, params->port, |
2649 | ext_phy_type, | 2778 | ext_phy_type, |
2650 | ext_phy_addr, | 2779 | ext_phy_addr, |
@@ -2701,10 +2830,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) | |||
2701 | } | 2830 | } |
2702 | 2831 | ||
2703 | } else { /* SerDes */ | 2832 | } else { /* SerDes */ |
2704 | /* ext_phy_addr = ((bp->ext_phy_config & | 2833 | |
2705 | PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >> | ||
2706 | PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT); | ||
2707 | */ | ||
2708 | ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); | 2834 | ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); |
2709 | switch (ext_phy_type) { | 2835 | switch (ext_phy_type) { |
2710 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: | 2836 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: |
@@ -2726,7 +2852,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) | |||
2726 | 2852 | ||
2727 | 2853 | ||
2728 | static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, | 2854 | static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, |
2729 | struct link_vars *vars) | 2855 | struct link_vars *vars) |
2730 | { | 2856 | { |
2731 | struct bnx2x *bp = params->bp; | 2857 | struct bnx2x *bp = params->bp; |
2732 | u32 ext_phy_type; | 2858 | u32 ext_phy_type; |
@@ -2767,6 +2893,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, | |||
2767 | MDIO_PMA_REG_RX_SD, &rx_sd); | 2893 | MDIO_PMA_REG_RX_SD, &rx_sd); |
2768 | DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd); | 2894 | DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd); |
2769 | ext_phy_link_up = (rx_sd & 0x1); | 2895 | ext_phy_link_up = (rx_sd & 0x1); |
2896 | if (ext_phy_link_up) | ||
2897 | vars->line_speed = SPEED_10000; | ||
2770 | break; | 2898 | break; |
2771 | 2899 | ||
2772 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: | 2900 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: |
@@ -2810,6 +2938,13 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, | |||
2810 | */ | 2938 | */ |
2811 | ext_phy_link_up = ((rx_sd & pcs_status & 0x1) || | 2939 | ext_phy_link_up = ((rx_sd & pcs_status & 0x1) || |
2812 | (val2 & (1<<1))); | 2940 | (val2 & (1<<1))); |
2941 | if (ext_phy_link_up) { | ||
2942 | if (val2 & (1<<1)) | ||
2943 | vars->line_speed = SPEED_1000; | ||
2944 | else | ||
2945 | vars->line_speed = SPEED_10000; | ||
2946 | } | ||
2947 | |||
2813 | /* clear LASI indication*/ | 2948 | /* clear LASI indication*/ |
2814 | bnx2x_cl45_read(bp, params->port, ext_phy_type, | 2949 | bnx2x_cl45_read(bp, params->port, ext_phy_type, |
2815 | ext_phy_addr, | 2950 | ext_phy_addr, |
@@ -2820,6 +2955,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, | |||
2820 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: | 2955 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: |
2821 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: | 2956 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: |
2822 | { | 2957 | { |
2958 | u16 link_status = 0; | ||
2959 | u16 an1000_status = 0; | ||
2823 | if (ext_phy_type == | 2960 | if (ext_phy_type == |
2824 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) { | 2961 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) { |
2825 | bnx2x_cl45_read(bp, params->port, | 2962 | bnx2x_cl45_read(bp, params->port, |
@@ -2846,14 +2983,9 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, | |||
2846 | MDIO_PMA_DEVAD, | 2983 | MDIO_PMA_DEVAD, |
2847 | MDIO_PMA_REG_LASI_STATUS, &val1); | 2984 | MDIO_PMA_REG_LASI_STATUS, &val1); |
2848 | 2985 | ||
2849 | bnx2x_cl45_read(bp, params->port, | ||
2850 | ext_phy_type, | ||
2851 | ext_phy_addr, | ||
2852 | MDIO_PMA_DEVAD, | ||
2853 | MDIO_PMA_REG_LASI_STATUS, &val2); | ||
2854 | DP(NETIF_MSG_LINK, | 2986 | DP(NETIF_MSG_LINK, |
2855 | "8703 LASI status 0x%x->0x%x\n", | 2987 | "8703 LASI status 0x%x\n", |
2856 | val1, val2); | 2988 | val1); |
2857 | } | 2989 | } |
2858 | 2990 | ||
2859 | /* clear the interrupt LASI status register */ | 2991 | /* clear the interrupt LASI status register */ |
@@ -2869,20 +3001,23 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, | |||
2869 | MDIO_PCS_REG_STATUS, &val1); | 3001 | MDIO_PCS_REG_STATUS, &val1); |
2870 | DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", | 3002 | DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", |
2871 | val2, val1); | 3003 | val2, val1); |
2872 | /* Check the LASI */ | 3004 | /* Clear MSG-OUT */ |
2873 | bnx2x_cl45_read(bp, params->port, | 3005 | bnx2x_cl45_read(bp, params->port, |
2874 | ext_phy_type, | 3006 | ext_phy_type, |
2875 | ext_phy_addr, | 3007 | ext_phy_addr, |
2876 | MDIO_PMA_DEVAD, | 3008 | MDIO_PMA_DEVAD, |
2877 | MDIO_PMA_REG_RX_ALARM, &val2); | 3009 | 0xca13, |
3010 | &val1); | ||
3011 | |||
3012 | /* Check the LASI */ | ||
2878 | bnx2x_cl45_read(bp, params->port, | 3013 | bnx2x_cl45_read(bp, params->port, |
2879 | ext_phy_type, | 3014 | ext_phy_type, |
2880 | ext_phy_addr, | 3015 | ext_phy_addr, |
2881 | MDIO_PMA_DEVAD, | 3016 | MDIO_PMA_DEVAD, |
2882 | MDIO_PMA_REG_RX_ALARM, | 3017 | MDIO_PMA_REG_RX_ALARM, &val2); |
2883 | &val1); | 3018 | |
2884 | DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n", | 3019 | DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2); |
2885 | val2, val1); | 3020 | |
2886 | /* Check the link status */ | 3021 | /* Check the link status */ |
2887 | bnx2x_cl45_read(bp, params->port, | 3022 | bnx2x_cl45_read(bp, params->port, |
2888 | ext_phy_type, | 3023 | ext_phy_type, |
@@ -2905,29 +3040,29 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, | |||
2905 | DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1); | 3040 | DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1); |
2906 | if (ext_phy_type == | 3041 | if (ext_phy_type == |
2907 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { | 3042 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { |
2908 | u16 an1000_status = 0; | 3043 | |
2909 | if (ext_phy_link_up && | 3044 | if (ext_phy_link_up && |
2910 | ( | 3045 | ((params->req_line_speed != |
2911 | (params->req_line_speed != SPEED_10000) | 3046 | SPEED_10000))) { |
2912 | )) { | ||
2913 | if (bnx2x_bcm8073_xaui_wa(params) | 3047 | if (bnx2x_bcm8073_xaui_wa(params) |
2914 | != 0) { | 3048 | != 0) { |
2915 | ext_phy_link_up = 0; | 3049 | ext_phy_link_up = 0; |
2916 | break; | 3050 | break; |
2917 | } | 3051 | } |
2918 | bnx2x_cl45_read(bp, params->port, | 3052 | } |
3053 | bnx2x_cl45_read(bp, params->port, | ||
2919 | ext_phy_type, | 3054 | ext_phy_type, |
2920 | ext_phy_addr, | 3055 | ext_phy_addr, |
2921 | MDIO_XS_DEVAD, | 3056 | MDIO_AN_DEVAD, |
2922 | 0x8304, | 3057 | 0x8304, |
2923 | &an1000_status); | 3058 | &an1000_status); |
2924 | bnx2x_cl45_read(bp, params->port, | 3059 | bnx2x_cl45_read(bp, params->port, |
2925 | ext_phy_type, | 3060 | ext_phy_type, |
2926 | ext_phy_addr, | 3061 | ext_phy_addr, |
2927 | MDIO_XS_DEVAD, | 3062 | MDIO_AN_DEVAD, |
2928 | 0x8304, | 3063 | 0x8304, |
2929 | &an1000_status); | 3064 | &an1000_status); |
2930 | } | 3065 | |
2931 | /* Check the link status on 1.1.2 */ | 3066 | /* Check the link status on 1.1.2 */ |
2932 | bnx2x_cl45_read(bp, params->port, | 3067 | bnx2x_cl45_read(bp, params->port, |
2933 | ext_phy_type, | 3068 | ext_phy_type, |
@@ -2943,8 +3078,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, | |||
2943 | "an_link_status=0x%x\n", | 3078 | "an_link_status=0x%x\n", |
2944 | val2, val1, an1000_status); | 3079 | val2, val1, an1000_status); |
2945 | 3080 | ||
2946 | ext_phy_link_up = (((val1 & 4) == 4) || | 3081 | ext_phy_link_up = (((val1 & 4) == 4) || |
2947 | (an1000_status & (1<<1))); | 3082 | (an1000_status & (1<<1))); |
2948 | if (ext_phy_link_up && | 3083 | if (ext_phy_link_up && |
2949 | bnx2x_8073_is_snr_needed(params)) { | 3084 | bnx2x_8073_is_snr_needed(params)) { |
2950 | /* The SNR will improve about 2dbby | 3085 | /* The SNR will improve about 2dbby |
@@ -2968,8 +3103,74 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, | |||
2968 | MDIO_PMA_REG_CDR_BANDWIDTH, | 3103 | MDIO_PMA_REG_CDR_BANDWIDTH, |
2969 | 0x0333); | 3104 | 0x0333); |
2970 | 3105 | ||
3106 | |||
3107 | } | ||
3108 | bnx2x_cl45_read(bp, params->port, | ||
3109 | ext_phy_type, | ||
3110 | ext_phy_addr, | ||
3111 | MDIO_PMA_DEVAD, | ||
3112 | 0xc820, | ||
3113 | &link_status); | ||
3114 | |||
3115 | /* Bits 0..2 --> speed detected, | ||
3116 | bits 13..15--> link is down */ | ||
3117 | if ((link_status & (1<<2)) && | ||
3118 | (!(link_status & (1<<15)))) { | ||
3119 | ext_phy_link_up = 1; | ||
3120 | vars->line_speed = SPEED_10000; | ||
3121 | DP(NETIF_MSG_LINK, | ||
3122 | "port %x: External link" | ||
3123 | " up in 10G\n", params->port); | ||
3124 | } else if ((link_status & (1<<1)) && | ||
3125 | (!(link_status & (1<<14)))) { | ||
3126 | ext_phy_link_up = 1; | ||
3127 | vars->line_speed = SPEED_2500; | ||
3128 | DP(NETIF_MSG_LINK, | ||
3129 | "port %x: External link" | ||
3130 | " up in 2.5G\n", params->port); | ||
3131 | } else if ((link_status & (1<<0)) && | ||
3132 | (!(link_status & (1<<13)))) { | ||
3133 | ext_phy_link_up = 1; | ||
3134 | vars->line_speed = SPEED_1000; | ||
3135 | DP(NETIF_MSG_LINK, | ||
3136 | "port %x: External link" | ||
3137 | " up in 1G\n", params->port); | ||
3138 | } else { | ||
3139 | ext_phy_link_up = 0; | ||
3140 | DP(NETIF_MSG_LINK, | ||
3141 | "port %x: External link" | ||
3142 | " is down\n", params->port); | ||
3143 | } | ||
3144 | } else { | ||
3145 | /* See if 1G link is up for the 8072 */ | ||
3146 | bnx2x_cl45_read(bp, params->port, | ||
3147 | ext_phy_type, | ||
3148 | ext_phy_addr, | ||
3149 | MDIO_AN_DEVAD, | ||
3150 | 0x8304, | ||
3151 | &an1000_status); | ||
3152 | bnx2x_cl45_read(bp, params->port, | ||
3153 | ext_phy_type, | ||
3154 | ext_phy_addr, | ||
3155 | MDIO_AN_DEVAD, | ||
3156 | 0x8304, | ||
3157 | &an1000_status); | ||
3158 | if (an1000_status & (1<<1)) { | ||
3159 | ext_phy_link_up = 1; | ||
3160 | vars->line_speed = SPEED_1000; | ||
3161 | DP(NETIF_MSG_LINK, | ||
3162 | "port %x: External link" | ||
3163 | " up in 1G\n", params->port); | ||
3164 | } else if (ext_phy_link_up) { | ||
3165 | ext_phy_link_up = 1; | ||
3166 | vars->line_speed = SPEED_10000; | ||
3167 | DP(NETIF_MSG_LINK, | ||
3168 | "port %x: External link" | ||
3169 | " up in 10G\n", params->port); | ||
2971 | } | 3170 | } |
2972 | } | 3171 | } |
3172 | |||
3173 | |||
2973 | break; | 3174 | break; |
2974 | } | 3175 | } |
2975 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | 3176 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: |
@@ -3006,6 +3207,7 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, | |||
3006 | MDIO_AN_DEVAD, | 3207 | MDIO_AN_DEVAD, |
3007 | MDIO_AN_REG_MASTER_STATUS, | 3208 | MDIO_AN_REG_MASTER_STATUS, |
3008 | &val2); | 3209 | &val2); |
3210 | vars->line_speed = SPEED_10000; | ||
3009 | DP(NETIF_MSG_LINK, | 3211 | DP(NETIF_MSG_LINK, |
3010 | "SFX7101 AN status 0x%x->Master=%x\n", | 3212 | "SFX7101 AN status 0x%x->Master=%x\n", |
3011 | val2, | 3213 | val2, |
@@ -3100,7 +3302,7 @@ static void bnx2x_link_int_enable(struct link_params *params) | |||
3100 | * link management | 3302 | * link management |
3101 | */ | 3303 | */ |
3102 | static void bnx2x_link_int_ack(struct link_params *params, | 3304 | static void bnx2x_link_int_ack(struct link_params *params, |
3103 | struct link_vars *vars, u16 is_10g) | 3305 | struct link_vars *vars, u8 is_10g) |
3104 | { | 3306 | { |
3105 | struct bnx2x *bp = params->bp; | 3307 | struct bnx2x *bp = params->bp; |
3106 | u8 port = params->port; | 3308 | u8 port = params->port; |
@@ -3181,7 +3383,8 @@ static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len) | |||
3181 | } | 3383 | } |
3182 | 3384 | ||
3183 | 3385 | ||
3184 | static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr) | 3386 | static void bnx2x_turn_on_ef(struct bnx2x *bp, u8 port, u8 ext_phy_addr, |
3387 | u32 ext_phy_type) | ||
3185 | { | 3388 | { |
3186 | u32 cnt = 0; | 3389 | u32 cnt = 0; |
3187 | u16 ctrl = 0; | 3390 | u16 ctrl = 0; |
@@ -3192,12 +3395,14 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr) | |||
3192 | 3395 | ||
3193 | /* take ext phy out of reset */ | 3396 | /* take ext phy out of reset */ |
3194 | bnx2x_set_gpio(bp, | 3397 | bnx2x_set_gpio(bp, |
3195 | MISC_REGISTERS_GPIO_2, | 3398 | MISC_REGISTERS_GPIO_2, |
3196 | MISC_REGISTERS_GPIO_HIGH); | 3399 | MISC_REGISTERS_GPIO_HIGH, |
3400 | port); | ||
3197 | 3401 | ||
3198 | bnx2x_set_gpio(bp, | 3402 | bnx2x_set_gpio(bp, |
3199 | MISC_REGISTERS_GPIO_1, | 3403 | MISC_REGISTERS_GPIO_1, |
3200 | MISC_REGISTERS_GPIO_HIGH); | 3404 | MISC_REGISTERS_GPIO_HIGH, |
3405 | port); | ||
3201 | 3406 | ||
3202 | /* wait for 5ms */ | 3407 | /* wait for 5ms */ |
3203 | msleep(5); | 3408 | msleep(5); |
@@ -3205,7 +3410,7 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr) | |||
3205 | for (cnt = 0; cnt < 1000; cnt++) { | 3410 | for (cnt = 0; cnt < 1000; cnt++) { |
3206 | msleep(1); | 3411 | msleep(1); |
3207 | bnx2x_cl45_read(bp, port, | 3412 | bnx2x_cl45_read(bp, port, |
3208 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, | 3413 | ext_phy_type, |
3209 | ext_phy_addr, | 3414 | ext_phy_addr, |
3210 | MDIO_PMA_DEVAD, | 3415 | MDIO_PMA_DEVAD, |
3211 | MDIO_PMA_REG_CTRL, | 3416 | MDIO_PMA_REG_CTRL, |
@@ -3217,13 +3422,17 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr) | |||
3217 | } | 3422 | } |
3218 | } | 3423 | } |
3219 | 3424 | ||
3220 | static void bnx2x_turn_off_sf(struct bnx2x *bp) | 3425 | static void bnx2x_turn_off_sf(struct bnx2x *bp, u8 port) |
3221 | { | 3426 | { |
3222 | /* put sf to reset */ | 3427 | /* put sf to reset */ |
3223 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_LOW); | ||
3224 | bnx2x_set_gpio(bp, | 3428 | bnx2x_set_gpio(bp, |
3225 | MISC_REGISTERS_GPIO_2, | 3429 | MISC_REGISTERS_GPIO_1, |
3226 | MISC_REGISTERS_GPIO_LOW); | 3430 | MISC_REGISTERS_GPIO_LOW, |
3431 | port); | ||
3432 | bnx2x_set_gpio(bp, | ||
3433 | MISC_REGISTERS_GPIO_2, | ||
3434 | MISC_REGISTERS_GPIO_LOW, | ||
3435 | port); | ||
3227 | } | 3436 | } |
3228 | 3437 | ||
3229 | u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, | 3438 | u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, |
@@ -3253,7 +3462,8 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, | |||
3253 | 3462 | ||
3254 | /* Take ext phy out of reset */ | 3463 | /* Take ext phy out of reset */ |
3255 | if (!driver_loaded) | 3464 | if (!driver_loaded) |
3256 | bnx2x_turn_on_sf(bp, params->port, ext_phy_addr); | 3465 | bnx2x_turn_on_ef(bp, params->port, ext_phy_addr, |
3466 | ext_phy_type); | ||
3257 | 3467 | ||
3258 | /* wait for 1ms */ | 3468 | /* wait for 1ms */ |
3259 | msleep(1); | 3469 | msleep(1); |
@@ -3276,11 +3486,16 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, | |||
3276 | version[4] = '\0'; | 3486 | version[4] = '\0'; |
3277 | 3487 | ||
3278 | if (!driver_loaded) | 3488 | if (!driver_loaded) |
3279 | bnx2x_turn_off_sf(bp); | 3489 | bnx2x_turn_off_sf(bp, params->port); |
3280 | break; | 3490 | break; |
3281 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: | 3491 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: |
3282 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: | 3492 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: |
3283 | { | 3493 | { |
3494 | /* Take ext phy out of reset */ | ||
3495 | if (!driver_loaded) | ||
3496 | bnx2x_turn_on_ef(bp, params->port, ext_phy_addr, | ||
3497 | ext_phy_type); | ||
3498 | |||
3284 | bnx2x_cl45_read(bp, params->port, ext_phy_type, | 3499 | bnx2x_cl45_read(bp, params->port, ext_phy_type, |
3285 | ext_phy_addr, | 3500 | ext_phy_addr, |
3286 | MDIO_PMA_DEVAD, | 3501 | MDIO_PMA_DEVAD, |
@@ -3333,7 +3548,7 @@ static void bnx2x_set_xgxs_loopback(struct link_params *params, | |||
3333 | struct bnx2x *bp = params->bp; | 3548 | struct bnx2x *bp = params->bp; |
3334 | 3549 | ||
3335 | if (is_10g) { | 3550 | if (is_10g) { |
3336 | u32 md_devad; | 3551 | u32 md_devad; |
3337 | 3552 | ||
3338 | DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); | 3553 | DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); |
3339 | 3554 | ||
@@ -3553,6 +3768,8 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, | |||
3553 | u16 hw_led_mode, u32 chip_id) | 3768 | u16 hw_led_mode, u32 chip_id) |
3554 | { | 3769 | { |
3555 | u8 rc = 0; | 3770 | u8 rc = 0; |
3771 | u32 tmp; | ||
3772 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | ||
3556 | DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); | 3773 | DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); |
3557 | DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", | 3774 | DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", |
3558 | speed, hw_led_mode); | 3775 | speed, hw_led_mode); |
@@ -3561,6 +3778,9 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, | |||
3561 | REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); | 3778 | REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); |
3562 | REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, | 3779 | REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, |
3563 | SHARED_HW_CFG_LED_MAC1); | 3780 | SHARED_HW_CFG_LED_MAC1); |
3781 | |||
3782 | tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); | ||
3783 | EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); | ||
3564 | break; | 3784 | break; |
3565 | 3785 | ||
3566 | case LED_MODE_OPER: | 3786 | case LED_MODE_OPER: |
@@ -3572,6 +3792,10 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, | |||
3572 | LED_BLINK_RATE_VAL); | 3792 | LED_BLINK_RATE_VAL); |
3573 | REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + | 3793 | REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + |
3574 | port*4, 1); | 3794 | port*4, 1); |
3795 | tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); | ||
3796 | EMAC_WR(bp, EMAC_REG_EMAC_LED, | ||
3797 | (tmp & (~EMAC_LED_OVERRIDE))); | ||
3798 | |||
3575 | if (!CHIP_IS_E1H(bp) && | 3799 | if (!CHIP_IS_E1H(bp) && |
3576 | ((speed == SPEED_2500) || | 3800 | ((speed == SPEED_2500) || |
3577 | (speed == SPEED_1000) || | 3801 | (speed == SPEED_1000) || |
@@ -3622,7 +3846,8 @@ static u8 bnx2x_link_initialize(struct link_params *params, | |||
3622 | struct bnx2x *bp = params->bp; | 3846 | struct bnx2x *bp = params->bp; |
3623 | u8 port = params->port; | 3847 | u8 port = params->port; |
3624 | u8 rc = 0; | 3848 | u8 rc = 0; |
3625 | 3849 | u8 non_ext_phy; | |
3850 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); | ||
3626 | /* Activate the external PHY */ | 3851 | /* Activate the external PHY */ |
3627 | bnx2x_ext_phy_reset(params, vars); | 3852 | bnx2x_ext_phy_reset(params, vars); |
3628 | 3853 | ||
@@ -3644,10 +3869,6 @@ static u8 bnx2x_link_initialize(struct link_params *params, | |||
3644 | bnx2x_set_swap_lanes(params); | 3869 | bnx2x_set_swap_lanes(params); |
3645 | } | 3870 | } |
3646 | 3871 | ||
3647 | /* Set Parallel Detect */ | ||
3648 | if (params->req_line_speed == SPEED_AUTO_NEG) | ||
3649 | bnx2x_set_parallel_detection(params, vars->phy_flags); | ||
3650 | |||
3651 | if (vars->phy_flags & PHY_XGXS_FLAG) { | 3872 | if (vars->phy_flags & PHY_XGXS_FLAG) { |
3652 | if (params->req_line_speed && | 3873 | if (params->req_line_speed && |
3653 | ((params->req_line_speed == SPEED_100) || | 3874 | ((params->req_line_speed == SPEED_100) || |
@@ -3657,68 +3878,33 @@ static u8 bnx2x_link_initialize(struct link_params *params, | |||
3657 | vars->phy_flags &= ~PHY_SGMII_FLAG; | 3878 | vars->phy_flags &= ~PHY_SGMII_FLAG; |
3658 | } | 3879 | } |
3659 | } | 3880 | } |
3881 | /* In case of external phy existance, the line speed would be the | ||
3882 | line speed linked up by the external phy. In case it is direct only, | ||
3883 | then the line_speed during initialization will be equal to the | ||
3884 | req_line_speed*/ | ||
3885 | vars->line_speed = params->req_line_speed; | ||
3660 | 3886 | ||
3661 | if (!(vars->phy_flags & PHY_SGMII_FLAG)) { | 3887 | bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc); |
3662 | u16 bank, rx_eq; | ||
3663 | |||
3664 | rx_eq = ((params->serdes_config & | ||
3665 | PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >> | ||
3666 | PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT); | ||
3667 | 3888 | ||
3668 | DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq); | 3889 | /* init ext phy and enable link state int */ |
3669 | for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL; | 3890 | non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) || |
3670 | bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) { | 3891 | (params->loopback_mode == LOOPBACK_XGXS_10) || |
3671 | CL45_WR_OVER_CL22(bp, port, | 3892 | (params->loopback_mode == LOOPBACK_EXT_PHY)); |
3672 | params->phy_addr, | 3893 | |
3673 | bank , | 3894 | if (non_ext_phy || |
3674 | MDIO_RX0_RX_EQ_BOOST, | 3895 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705)) { |
3675 | ((rx_eq & | 3896 | if (params->req_line_speed == SPEED_AUTO_NEG) |
3676 | MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) | | 3897 | bnx2x_set_parallel_detection(params, vars->phy_flags); |
3677 | MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL)); | 3898 | bnx2x_init_internal_phy(params, vars); |
3678 | } | ||
3679 | |||
3680 | /* forced speed requested? */ | ||
3681 | if (params->req_line_speed != SPEED_AUTO_NEG) { | ||
3682 | DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); | ||
3683 | |||
3684 | /* disable autoneg */ | ||
3685 | bnx2x_set_autoneg(params, vars); | ||
3686 | |||
3687 | /* program speed and duplex */ | ||
3688 | bnx2x_program_serdes(params); | ||
3689 | vars->ieee_fc = | ||
3690 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; | ||
3691 | |||
3692 | } else { /* AN_mode */ | ||
3693 | DP(NETIF_MSG_LINK, "not SGMII, AN\n"); | ||
3694 | |||
3695 | /* AN enabled */ | ||
3696 | bnx2x_set_brcm_cl37_advertisment(params); | ||
3697 | |||
3698 | /* program duplex & pause advertisement (for aneg) */ | ||
3699 | bnx2x_set_ieee_aneg_advertisment(params, | ||
3700 | &vars->ieee_fc); | ||
3701 | |||
3702 | /* enable autoneg */ | ||
3703 | bnx2x_set_autoneg(params, vars); | ||
3704 | |||
3705 | /* enable and restart AN */ | ||
3706 | bnx2x_restart_autoneg(params); | ||
3707 | } | ||
3708 | |||
3709 | } else { /* SGMII mode */ | ||
3710 | DP(NETIF_MSG_LINK, "SGMII\n"); | ||
3711 | |||
3712 | bnx2x_initialize_sgmii_process(params); | ||
3713 | } | 3899 | } |
3714 | 3900 | ||
3715 | /* init ext phy and enable link state int */ | 3901 | if (!non_ext_phy) |
3716 | rc |= bnx2x_ext_phy_init(params, vars); | 3902 | rc |= bnx2x_ext_phy_init(params, vars); |
3717 | 3903 | ||
3718 | bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, | 3904 | bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, |
3719 | (NIG_STATUS_XGXS0_LINK10G | | 3905 | (NIG_STATUS_XGXS0_LINK10G | |
3720 | NIG_STATUS_XGXS0_LINK_STATUS | | 3906 | NIG_STATUS_XGXS0_LINK_STATUS | |
3721 | NIG_STATUS_SERDES0_LINK_STATUS)); | 3907 | NIG_STATUS_SERDES0_LINK_STATUS)); |
3722 | 3908 | ||
3723 | return rc; | 3909 | return rc; |
3724 | 3910 | ||
@@ -3730,15 +3916,23 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) | |||
3730 | struct bnx2x *bp = params->bp; | 3916 | struct bnx2x *bp = params->bp; |
3731 | 3917 | ||
3732 | u32 val; | 3918 | u32 val; |
3733 | DP(NETIF_MSG_LINK, "Phy Initialization started\n"); | 3919 | DP(NETIF_MSG_LINK, "Phy Initialization started \n"); |
3734 | DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n", | 3920 | DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n", |
3735 | params->req_line_speed, params->req_flow_ctrl); | 3921 | params->req_line_speed, params->req_flow_ctrl); |
3736 | vars->link_status = 0; | 3922 | vars->link_status = 0; |
3923 | vars->phy_link_up = 0; | ||
3924 | vars->link_up = 0; | ||
3925 | vars->line_speed = 0; | ||
3926 | vars->duplex = DUPLEX_FULL; | ||
3927 | vars->flow_ctrl = FLOW_CTRL_NONE; | ||
3928 | vars->mac_type = MAC_TYPE_NONE; | ||
3929 | |||
3737 | if (params->switch_cfg == SWITCH_CFG_1G) | 3930 | if (params->switch_cfg == SWITCH_CFG_1G) |
3738 | vars->phy_flags = PHY_SERDES_FLAG; | 3931 | vars->phy_flags = PHY_SERDES_FLAG; |
3739 | else | 3932 | else |
3740 | vars->phy_flags = PHY_XGXS_FLAG; | 3933 | vars->phy_flags = PHY_XGXS_FLAG; |
3741 | 3934 | ||
3935 | |||
3742 | /* disable attentions */ | 3936 | /* disable attentions */ |
3743 | bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, | 3937 | bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, |
3744 | (NIG_MASK_XGXS0_LINK_STATUS | | 3938 | (NIG_MASK_XGXS0_LINK_STATUS | |
@@ -3894,6 +4088,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) | |||
3894 | } | 4088 | } |
3895 | 4089 | ||
3896 | bnx2x_link_initialize(params, vars); | 4090 | bnx2x_link_initialize(params, vars); |
4091 | msleep(30); | ||
3897 | bnx2x_link_int_enable(params); | 4092 | bnx2x_link_int_enable(params); |
3898 | } | 4093 | } |
3899 | return 0; | 4094 | return 0; |
@@ -3943,39 +4138,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars) | |||
3943 | /* HW reset */ | 4138 | /* HW reset */ |
3944 | 4139 | ||
3945 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, | 4140 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, |
3946 | MISC_REGISTERS_GPIO_OUTPUT_LOW); | 4141 | MISC_REGISTERS_GPIO_OUTPUT_LOW, |
4142 | port); | ||
3947 | 4143 | ||
3948 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, | 4144 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, |
3949 | MISC_REGISTERS_GPIO_OUTPUT_LOW); | 4145 | MISC_REGISTERS_GPIO_OUTPUT_LOW, |
4146 | port); | ||
3950 | 4147 | ||
3951 | DP(NETIF_MSG_LINK, "reset external PHY\n"); | 4148 | DP(NETIF_MSG_LINK, "reset external PHY\n"); |
3952 | } else { | 4149 | } else if (ext_phy_type == |
3953 | 4150 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { | |
3954 | u8 ext_phy_addr = ((ext_phy_config & | 4151 | DP(NETIF_MSG_LINK, "Setting 8073 port %d into " |
3955 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> | ||
3956 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); | ||
3957 | |||
3958 | /* SW reset */ | ||
3959 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | ||
3960 | MDIO_PMA_DEVAD, | ||
3961 | MDIO_PMA_REG_CTRL, | ||
3962 | 1<<15); | ||
3963 | |||
3964 | /* Set Low Power Mode */ | ||
3965 | bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, | ||
3966 | MDIO_PMA_DEVAD, | ||
3967 | MDIO_PMA_REG_CTRL, | ||
3968 | 1<<11); | ||
3969 | |||
3970 | |||
3971 | if (ext_phy_type == | ||
3972 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { | ||
3973 | DP(NETIF_MSG_LINK, "Setting 8073 port %d into" | ||
3974 | "low power mode\n", | 4152 | "low power mode\n", |
3975 | port); | 4153 | port); |
3976 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, | 4154 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, |
3977 | MISC_REGISTERS_GPIO_OUTPUT_LOW); | 4155 | MISC_REGISTERS_GPIO_OUTPUT_LOW, |
3978 | } | 4156 | port); |
3979 | } | 4157 | } |
3980 | } | 4158 | } |
3981 | /* reset the SerDes/XGXS */ | 4159 | /* reset the SerDes/XGXS */ |
@@ -3995,6 +4173,73 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars) | |||
3995 | return 0; | 4173 | return 0; |
3996 | } | 4174 | } |
3997 | 4175 | ||
4176 | static u8 bnx2x_update_link_down(struct link_params *params, | ||
4177 | struct link_vars *vars) | ||
4178 | { | ||
4179 | struct bnx2x *bp = params->bp; | ||
4180 | u8 port = params->port; | ||
4181 | DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); | ||
4182 | bnx2x_set_led(bp, port, LED_MODE_OFF, | ||
4183 | 0, params->hw_led_mode, | ||
4184 | params->chip_id); | ||
4185 | |||
4186 | /* indicate no mac active */ | ||
4187 | vars->mac_type = MAC_TYPE_NONE; | ||
4188 | |||
4189 | /* update shared memory */ | ||
4190 | vars->link_status = 0; | ||
4191 | vars->line_speed = 0; | ||
4192 | bnx2x_update_mng(params, vars->link_status); | ||
4193 | |||
4194 | /* activate nig drain */ | ||
4195 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); | ||
4196 | |||
4197 | /* reset BigMac */ | ||
4198 | bnx2x_bmac_rx_disable(bp, params->port); | ||
4199 | REG_WR(bp, GRCBASE_MISC + | ||
4200 | MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
4201 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); | ||
4202 | return 0; | ||
4203 | } | ||
4204 | |||
4205 | static u8 bnx2x_update_link_up(struct link_params *params, | ||
4206 | struct link_vars *vars, | ||
4207 | u8 link_10g, u32 gp_status) | ||
4208 | { | ||
4209 | struct bnx2x *bp = params->bp; | ||
4210 | u8 port = params->port; | ||
4211 | u8 rc = 0; | ||
4212 | vars->link_status |= LINK_STATUS_LINK_UP; | ||
4213 | if (link_10g) { | ||
4214 | bnx2x_bmac_enable(params, vars, 0); | ||
4215 | bnx2x_set_led(bp, port, LED_MODE_OPER, | ||
4216 | SPEED_10000, params->hw_led_mode, | ||
4217 | params->chip_id); | ||
4218 | |||
4219 | } else { | ||
4220 | bnx2x_emac_enable(params, vars, 0); | ||
4221 | rc = bnx2x_emac_program(params, vars->line_speed, | ||
4222 | vars->duplex); | ||
4223 | |||
4224 | /* AN complete? */ | ||
4225 | if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { | ||
4226 | if (!(vars->phy_flags & | ||
4227 | PHY_SGMII_FLAG)) | ||
4228 | bnx2x_set_sgmii_tx_driver(params); | ||
4229 | } | ||
4230 | } | ||
4231 | |||
4232 | /* PBF - link up */ | ||
4233 | rc |= bnx2x_pbf_update(params, vars->flow_ctrl, | ||
4234 | vars->line_speed); | ||
4235 | |||
4236 | /* disable drain */ | ||
4237 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); | ||
4238 | |||
4239 | /* update shared memory */ | ||
4240 | bnx2x_update_mng(params, vars->link_status); | ||
4241 | return rc; | ||
4242 | } | ||
3998 | /* This function should called upon link interrupt */ | 4243 | /* This function should called upon link interrupt */ |
3999 | /* In case vars->link_up, driver needs to | 4244 | /* In case vars->link_up, driver needs to |
4000 | 1. Update the pbf | 4245 | 1. Update the pbf |
@@ -4012,10 +4257,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) | |||
4012 | { | 4257 | { |
4013 | struct bnx2x *bp = params->bp; | 4258 | struct bnx2x *bp = params->bp; |
4014 | u8 port = params->port; | 4259 | u8 port = params->port; |
4015 | u16 i; | ||
4016 | u16 gp_status; | 4260 | u16 gp_status; |
4017 | u16 link_10g; | 4261 | u8 link_10g; |
4018 | u8 rc = 0; | 4262 | u8 ext_phy_link_up, rc = 0; |
4263 | u32 ext_phy_type; | ||
4019 | 4264 | ||
4020 | DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", | 4265 | DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", |
4021 | port, | 4266 | port, |
@@ -4031,15 +4276,16 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) | |||
4031 | REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), | 4276 | REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), |
4032 | REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); | 4277 | REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); |
4033 | 4278 | ||
4279 | ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); | ||
4034 | 4280 | ||
4035 | /* avoid fast toggling */ | 4281 | /* Check external link change only for non-direct */ |
4036 | for (i = 0; i < 10; i++) { | 4282 | ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars); |
4037 | msleep(10); | 4283 | |
4038 | CL45_RD_OVER_CL22(bp, port, params->phy_addr, | 4284 | /* Read gp_status */ |
4039 | MDIO_REG_BANK_GP_STATUS, | 4285 | CL45_RD_OVER_CL22(bp, port, params->phy_addr, |
4040 | MDIO_GP_STATUS_TOP_AN_STATUS1, | 4286 | MDIO_REG_BANK_GP_STATUS, |
4041 | &gp_status); | 4287 | MDIO_GP_STATUS_TOP_AN_STATUS1, |
4042 | } | 4288 | &gp_status); |
4043 | 4289 | ||
4044 | rc = bnx2x_link_settings_status(params, vars, gp_status); | 4290 | rc = bnx2x_link_settings_status(params, vars, gp_status); |
4045 | if (rc != 0) | 4291 | if (rc != 0) |
@@ -4055,73 +4301,177 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) | |||
4055 | 4301 | ||
4056 | bnx2x_link_int_ack(params, vars, link_10g); | 4302 | bnx2x_link_int_ack(params, vars, link_10g); |
4057 | 4303 | ||
4304 | /* In case external phy link is up, and internal link is down | ||
4305 | ( not initialized yet probably after link initialization, it needs | ||
4306 | to be initialized. | ||
4307 | Note that after link down-up as result of cable plug, | ||
4308 | the xgxs link would probably become up again without the need to | ||
4309 | initialize it*/ | ||
4310 | |||
4311 | if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) && | ||
4312 | (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) && | ||
4313 | (ext_phy_link_up && !vars->phy_link_up)) | ||
4314 | bnx2x_init_internal_phy(params, vars); | ||
4315 | |||
4058 | /* link is up only if both local phy and external phy are up */ | 4316 | /* link is up only if both local phy and external phy are up */ |
4059 | vars->link_up = (vars->phy_link_up && | 4317 | vars->link_up = (ext_phy_link_up && vars->phy_link_up); |
4060 | bnx2x_ext_phy_is_link_up(params, vars)); | ||
4061 | 4318 | ||
4062 | if (!vars->phy_link_up && | 4319 | if (vars->link_up) |
4063 | REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18)) { | 4320 | rc = bnx2x_update_link_up(params, vars, link_10g, gp_status); |
4064 | bnx2x_ext_phy_is_link_up(params, vars); /* Clear interrupt */ | 4321 | else |
4322 | rc = bnx2x_update_link_down(params, vars); | ||
4323 | |||
4324 | return rc; | ||
4325 | } | ||
4326 | |||
4327 | static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base) | ||
4328 | { | ||
4329 | u8 ext_phy_addr[PORT_MAX]; | ||
4330 | u16 val; | ||
4331 | s8 port; | ||
4332 | |||
4333 | /* PART1 - Reset both phys */ | ||
4334 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { | ||
4335 | /* Extract the ext phy address for the port */ | ||
4336 | u32 ext_phy_config = REG_RD(bp, shmem_base + | ||
4337 | offsetof(struct shmem_region, | ||
4338 | dev_info.port_hw_config[port].external_phy_config)); | ||
4339 | |||
4340 | /* disable attentions */ | ||
4341 | bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, | ||
4342 | (NIG_MASK_XGXS0_LINK_STATUS | | ||
4343 | NIG_MASK_XGXS0_LINK10G | | ||
4344 | NIG_MASK_SERDES0_LINK_STATUS | | ||
4345 | NIG_MASK_MI_INT)); | ||
4346 | |||
4347 | ext_phy_addr[port] = | ||
4348 | ((ext_phy_config & | ||
4349 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> | ||
4350 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); | ||
4351 | |||
4352 | /* Need to take the phy out of low power mode in order | ||
4353 | to write to access its registers */ | ||
4354 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, | ||
4355 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); | ||
4356 | |||
4357 | /* Reset the phy */ | ||
4358 | bnx2x_cl45_write(bp, port, | ||
4359 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, | ||
4360 | ext_phy_addr[port], | ||
4361 | MDIO_PMA_DEVAD, | ||
4362 | MDIO_PMA_REG_CTRL, | ||
4363 | 1<<15); | ||
4065 | } | 4364 | } |
4066 | 4365 | ||
4067 | if (vars->link_up) { | 4366 | /* Add delay of 150ms after reset */ |
4068 | vars->link_status |= LINK_STATUS_LINK_UP; | 4367 | msleep(150); |
4069 | if (link_10g) { | ||
4070 | bnx2x_bmac_enable(params, vars, 0); | ||
4071 | bnx2x_set_led(bp, port, LED_MODE_OPER, | ||
4072 | SPEED_10000, params->hw_led_mode, | ||
4073 | params->chip_id); | ||
4074 | 4368 | ||
4075 | } else { | 4369 | /* PART2 - Download firmware to both phys */ |
4076 | bnx2x_emac_enable(params, vars, 0); | 4370 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { |
4077 | rc = bnx2x_emac_program(params, vars->line_speed, | 4371 | u16 fw_ver1; |
4078 | vars->duplex); | ||
4079 | 4372 | ||
4080 | /* AN complete? */ | 4373 | bnx2x_bcm8073_external_rom_boot(bp, port, |
4081 | if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { | 4374 | ext_phy_addr[port]); |
4082 | if (!(vars->phy_flags & | 4375 | |
4083 | PHY_SGMII_FLAG)) | 4376 | bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, |
4084 | bnx2x_set_sgmii_tx_driver(params); | 4377 | ext_phy_addr[port], |
4085 | } | 4378 | MDIO_PMA_DEVAD, |
4379 | MDIO_PMA_REG_ROM_VER1, &fw_ver1); | ||
4380 | if (fw_ver1 == 0) { | ||
4381 | DP(NETIF_MSG_LINK, | ||
4382 | "bnx2x_8073_common_init_phy port %x " | ||
4383 | "fw Download failed\n", port); | ||
4384 | return -EINVAL; | ||
4086 | } | 4385 | } |
4087 | 4386 | ||
4088 | /* PBF - link up */ | 4387 | /* Only set bit 10 = 1 (Tx power down) */ |
4089 | rc |= bnx2x_pbf_update(params, vars->flow_ctrl, | 4388 | bnx2x_cl45_read(bp, port, |
4090 | vars->line_speed); | 4389 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, |
4390 | ext_phy_addr[port], | ||
4391 | MDIO_PMA_DEVAD, | ||
4392 | MDIO_PMA_REG_TX_POWER_DOWN, &val); | ||
4091 | 4393 | ||
4092 | /* disable drain */ | 4394 | /* Phase1 of TX_POWER_DOWN reset */ |
4093 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); | 4395 | bnx2x_cl45_write(bp, port, |
4396 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, | ||
4397 | ext_phy_addr[port], | ||
4398 | MDIO_PMA_DEVAD, | ||
4399 | MDIO_PMA_REG_TX_POWER_DOWN, | ||
4400 | (val | 1<<10)); | ||
4401 | } | ||
4094 | 4402 | ||
4095 | /* update shared memory */ | 4403 | /* Toggle Transmitter: Power down and then up with 600ms |
4096 | bnx2x_update_mng(params, vars->link_status); | 4404 | delay between */ |
4405 | msleep(600); | ||
4097 | 4406 | ||
4098 | } else { /* link down */ | 4407 | /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ |
4099 | DP(NETIF_MSG_LINK, "Port %x: Link is down\n", params->port); | 4408 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { |
4100 | bnx2x_set_led(bp, port, LED_MODE_OFF, | 4409 | /* Phase2 of POWER_DOWN_RESET*/ |
4101 | 0, params->hw_led_mode, | 4410 | /* Release bit 10 (Release Tx power down) */ |
4102 | params->chip_id); | 4411 | bnx2x_cl45_read(bp, port, |
4412 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, | ||
4413 | ext_phy_addr[port], | ||
4414 | MDIO_PMA_DEVAD, | ||
4415 | MDIO_PMA_REG_TX_POWER_DOWN, &val); | ||
4103 | 4416 | ||
4104 | /* indicate no mac active */ | 4417 | bnx2x_cl45_write(bp, port, |
4105 | vars->mac_type = MAC_TYPE_NONE; | 4418 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, |
4419 | ext_phy_addr[port], | ||
4420 | MDIO_PMA_DEVAD, | ||
4421 | MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); | ||
4422 | msleep(15); | ||
4106 | 4423 | ||
4107 | /* update shared memory */ | 4424 | /* Read modify write the SPI-ROM version select register */ |
4108 | vars->link_status = 0; | 4425 | bnx2x_cl45_read(bp, port, |
4109 | bnx2x_update_mng(params, vars->link_status); | 4426 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, |
4427 | ext_phy_addr[port], | ||
4428 | MDIO_PMA_DEVAD, | ||
4429 | MDIO_PMA_REG_EDC_FFE_MAIN, &val); | ||
4430 | bnx2x_cl45_write(bp, port, | ||
4431 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, | ||
4432 | ext_phy_addr[port], | ||
4433 | MDIO_PMA_DEVAD, | ||
4434 | MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); | ||
4110 | 4435 | ||
4111 | /* activate nig drain */ | 4436 | /* set GPIO2 back to LOW */ |
4112 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); | 4437 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, |
4438 | MISC_REGISTERS_GPIO_OUTPUT_LOW, port); | ||
4439 | } | ||
4440 | return 0; | ||
4113 | 4441 | ||
4114 | /* reset BigMac */ | 4442 | } |
4115 | bnx2x_bmac_rx_disable(bp, params->port); | ||
4116 | REG_WR(bp, GRCBASE_MISC + | ||
4117 | MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
4118 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); | ||
4119 | 4443 | ||
4444 | u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base) | ||
4445 | { | ||
4446 | u8 rc = 0; | ||
4447 | u32 ext_phy_type; | ||
4448 | |||
4449 | DP(NETIF_MSG_LINK, "bnx2x_common_init_phy\n"); | ||
4450 | |||
4451 | /* Read the ext_phy_type for arbitrary port(0) */ | ||
4452 | ext_phy_type = XGXS_EXT_PHY_TYPE( | ||
4453 | REG_RD(bp, shmem_base + | ||
4454 | offsetof(struct shmem_region, | ||
4455 | dev_info.port_hw_config[0].external_phy_config))); | ||
4456 | |||
4457 | switch (ext_phy_type) { | ||
4458 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: | ||
4459 | { | ||
4460 | rc = bnx2x_8073_common_init_phy(bp, shmem_base); | ||
4461 | break; | ||
4462 | } | ||
4463 | default: | ||
4464 | DP(NETIF_MSG_LINK, | ||
4465 | "bnx2x_common_init_phy: ext_phy 0x%x not required\n", | ||
4466 | ext_phy_type); | ||
4467 | break; | ||
4120 | } | 4468 | } |
4121 | 4469 | ||
4122 | return rc; | 4470 | return rc; |
4123 | } | 4471 | } |
4124 | 4472 | ||
4473 | |||
4474 | |||
4125 | static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr) | 4475 | static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr) |
4126 | { | 4476 | { |
4127 | u16 val, cnt; | 4477 | u16 val, cnt; |
@@ -4154,7 +4504,7 @@ static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr) | |||
4154 | } | 4504 | } |
4155 | #define RESERVED_SIZE 256 | 4505 | #define RESERVED_SIZE 256 |
4156 | /* max application is 160K bytes - data at end of RAM */ | 4506 | /* max application is 160K bytes - data at end of RAM */ |
4157 | #define MAX_APP_SIZE 160*1024 - RESERVED_SIZE | 4507 | #define MAX_APP_SIZE (160*1024 - RESERVED_SIZE) |
4158 | 4508 | ||
4159 | /* Header is 14 bytes */ | 4509 | /* Header is 14 bytes */ |
4160 | #define HEADER_SIZE 14 | 4510 | #define HEADER_SIZE 14 |
@@ -4192,12 +4542,12 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port, | |||
4192 | size = MAX_APP_SIZE+HEADER_SIZE; | 4542 | size = MAX_APP_SIZE+HEADER_SIZE; |
4193 | } | 4543 | } |
4194 | DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]); | 4544 | DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]); |
4195 | DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]); | 4545 | DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]); |
4196 | /* Put the DSP in download mode by setting FLASH_CFG[2] to 1 | 4546 | /* Put the DSP in download mode by setting FLASH_CFG[2] to 1 |
4197 | and issuing a reset.*/ | 4547 | and issuing a reset.*/ |
4198 | 4548 | ||
4199 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, | 4549 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, |
4200 | MISC_REGISTERS_GPIO_HIGH); | 4550 | MISC_REGISTERS_GPIO_HIGH, port); |
4201 | 4551 | ||
4202 | bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); | 4552 | bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); |
4203 | 4553 | ||
@@ -4429,7 +4779,8 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port, | |||
4429 | } | 4779 | } |
4430 | 4780 | ||
4431 | /* DSP Remove Download Mode */ | 4781 | /* DSP Remove Download Mode */ |
4432 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, MISC_REGISTERS_GPIO_LOW); | 4782 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, |
4783 | MISC_REGISTERS_GPIO_LOW, port); | ||
4433 | 4784 | ||
4434 | bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); | 4785 | bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); |
4435 | 4786 | ||
@@ -4437,7 +4788,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port, | |||
4437 | for (cnt = 0; cnt < 100; cnt++) | 4788 | for (cnt = 0; cnt < 100; cnt++) |
4438 | msleep(5); | 4789 | msleep(5); |
4439 | 4790 | ||
4440 | bnx2x_hw_reset(bp); | 4791 | bnx2x_hw_reset(bp, port); |
4441 | 4792 | ||
4442 | for (cnt = 0; cnt < 100; cnt++) | 4793 | for (cnt = 0; cnt < 100; cnt++) |
4443 | msleep(5); | 4794 | msleep(5); |
@@ -4473,7 +4824,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port, | |||
4473 | MDIO_PMA_REG_7101_VER2, | 4824 | MDIO_PMA_REG_7101_VER2, |
4474 | &image_revision2); | 4825 | &image_revision2); |
4475 | 4826 | ||
4476 | if (data[0x14e] != (image_revision2&0xFF) || | 4827 | if (data[0x14e] != (image_revision2&0xFF) || |
4477 | data[0x14f] != ((image_revision2&0xFF00)>>8) || | 4828 | data[0x14f] != ((image_revision2&0xFF00)>>8) || |
4478 | data[0x150] != (image_revision1&0xFF) || | 4829 | data[0x150] != (image_revision1&0xFF) || |
4479 | data[0x151] != ((image_revision1&0xFF00)>>8)) { | 4830 | data[0x151] != ((image_revision1&0xFF00)>>8)) { |
@@ -4508,11 +4859,11 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config, | |||
4508 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | 4859 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: |
4509 | /* Take ext phy out of reset */ | 4860 | /* Take ext phy out of reset */ |
4510 | if (!driver_loaded) | 4861 | if (!driver_loaded) |
4511 | bnx2x_turn_on_sf(bp, port, ext_phy_addr); | 4862 | bnx2x_turn_on_ef(bp, port, ext_phy_addr, ext_phy_type); |
4512 | rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr, | 4863 | rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr, |
4513 | data, size); | 4864 | data, size); |
4514 | if (!driver_loaded) | 4865 | if (!driver_loaded) |
4515 | bnx2x_turn_off_sf(bp); | 4866 | bnx2x_turn_off_sf(bp, port); |
4516 | break; | 4867 | break; |
4517 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: | 4868 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: |
4518 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: | 4869 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: |
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h index 714d37ac95de..86d54a17b411 100644 --- a/drivers/net/bnx2x_link.h +++ b/drivers/net/bnx2x_link.h | |||
@@ -55,14 +55,17 @@ struct link_params { | |||
55 | #define LOOPBACK_BMAC 2 | 55 | #define LOOPBACK_BMAC 2 |
56 | #define LOOPBACK_XGXS_10 3 | 56 | #define LOOPBACK_XGXS_10 3 |
57 | #define LOOPBACK_EXT_PHY 4 | 57 | #define LOOPBACK_EXT_PHY 4 |
58 | #define LOOPBACK_EXT 5 | ||
58 | 59 | ||
59 | u16 req_duplex; | 60 | u16 req_duplex; |
60 | u16 req_flow_ctrl; | 61 | u16 req_flow_ctrl; |
62 | u16 req_fc_auto_adv; /* Should be set to TX / BOTH when | ||
63 | req_flow_ctrl is set to AUTO */ | ||
61 | u16 req_line_speed; /* Also determine AutoNeg */ | 64 | u16 req_line_speed; /* Also determine AutoNeg */ |
62 | 65 | ||
63 | /* Device parameters */ | 66 | /* Device parameters */ |
64 | u8 mac_addr[6]; | 67 | u8 mac_addr[6]; |
65 | u16 mtu; | 68 | |
66 | 69 | ||
67 | 70 | ||
68 | /* shmem parameters */ | 71 | /* shmem parameters */ |
@@ -140,7 +143,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type, | |||
140 | u8 phy_addr, u8 devad, u16 reg, u16 val); | 143 | u8 phy_addr, u8 devad, u16 reg, u16 val); |
141 | 144 | ||
142 | /* Reads the link_status from the shmem, | 145 | /* Reads the link_status from the shmem, |
143 | and update the link vars accordinaly */ | 146 | and update the link vars accordingly */ |
144 | void bnx2x_link_status_update(struct link_params *input, | 147 | void bnx2x_link_status_update(struct link_params *input, |
145 | struct link_vars *output); | 148 | struct link_vars *output); |
146 | /* returns string representing the fw_version of the external phy */ | 149 | /* returns string representing the fw_version of the external phy */ |
@@ -149,7 +152,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, | |||
149 | 152 | ||
150 | /* Set/Unset the led | 153 | /* Set/Unset the led |
151 | Basically, the CLC takes care of the led for the link, but in case one needs | 154 | Basically, the CLC takes care of the led for the link, but in case one needs |
152 | to set/unset the led unnatually, set the "mode" to LED_MODE_OPER to | 155 | to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to |
153 | blink the led, and LED_MODE_OFF to set the led off.*/ | 156 | blink the led, and LED_MODE_OFF to set the led off.*/ |
154 | u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, | 157 | u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, |
155 | u16 hw_led_mode, u32 chip_id); | 158 | u16 hw_led_mode, u32 chip_id); |
@@ -164,5 +167,7 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config, | |||
164 | otherwise link is down*/ | 167 | otherwise link is down*/ |
165 | u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars); | 168 | u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars); |
166 | 169 | ||
170 | /* One-time initialization for external phy after power up */ | ||
171 | u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base); | ||
167 | 172 | ||
168 | #endif /* BNX2X_LINK_H */ | 173 | #endif /* BNX2X_LINK_H */ |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 272a4bd25953..82deea0a63f5 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <net/ip.h> | 44 | #include <net/ip.h> |
45 | #include <net/tcp.h> | 45 | #include <net/tcp.h> |
46 | #include <net/checksum.h> | 46 | #include <net/checksum.h> |
47 | #include <linux/version.h> | ||
48 | #include <net/ip6_checksum.h> | 47 | #include <net/ip6_checksum.h> |
49 | #include <linux/workqueue.h> | 48 | #include <linux/workqueue.h> |
50 | #include <linux/crc32.h> | 49 | #include <linux/crc32.h> |
@@ -60,8 +59,8 @@ | |||
60 | #include "bnx2x.h" | 59 | #include "bnx2x.h" |
61 | #include "bnx2x_init.h" | 60 | #include "bnx2x_init.h" |
62 | 61 | ||
63 | #define DRV_MODULE_VERSION "1.45.6" | 62 | #define DRV_MODULE_VERSION "1.45.20" |
64 | #define DRV_MODULE_RELDATE "2008/06/23" | 63 | #define DRV_MODULE_RELDATE "2008/08/25" |
65 | #define BNX2X_BC_VER 0x040200 | 64 | #define BNX2X_BC_VER 0x040200 |
66 | 65 | ||
67 | /* Time in jiffies before concluding the transmitter is hung */ | 66 | /* Time in jiffies before concluding the transmitter is hung */ |
@@ -76,23 +75,21 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); | |||
76 | MODULE_LICENSE("GPL"); | 75 | MODULE_LICENSE("GPL"); |
77 | MODULE_VERSION(DRV_MODULE_VERSION); | 76 | MODULE_VERSION(DRV_MODULE_VERSION); |
78 | 77 | ||
78 | static int disable_tpa; | ||
79 | static int use_inta; | 79 | static int use_inta; |
80 | static int poll; | 80 | static int poll; |
81 | static int debug; | 81 | static int debug; |
82 | static int disable_tpa; | ||
83 | static int nomcp; | ||
84 | static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ | 82 | static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ |
85 | static int use_multi; | 83 | static int use_multi; |
86 | 84 | ||
85 | module_param(disable_tpa, int, 0); | ||
87 | module_param(use_inta, int, 0); | 86 | module_param(use_inta, int, 0); |
88 | module_param(poll, int, 0); | 87 | module_param(poll, int, 0); |
89 | module_param(debug, int, 0); | 88 | module_param(debug, int, 0); |
90 | module_param(disable_tpa, int, 0); | 89 | MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature"); |
91 | module_param(nomcp, int, 0); | ||
92 | MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); | 90 | MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); |
93 | MODULE_PARM_DESC(poll, "use polling (for debug)"); | 91 | MODULE_PARM_DESC(poll, "use polling (for debug)"); |
94 | MODULE_PARM_DESC(debug, "default debug msglevel"); | 92 | MODULE_PARM_DESC(debug, "default debug msglevel"); |
95 | MODULE_PARM_DESC(nomcp, "ignore management CPU"); | ||
96 | 93 | ||
97 | #ifdef BNX2X_MULTI | 94 | #ifdef BNX2X_MULTI |
98 | module_param(use_multi, int, 0); | 95 | module_param(use_multi, int, 0); |
@@ -237,17 +234,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, | |||
237 | while (*wb_comp != DMAE_COMP_VAL) { | 234 | while (*wb_comp != DMAE_COMP_VAL) { |
238 | DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); | 235 | DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); |
239 | 236 | ||
240 | /* adjust delay for emulation/FPGA */ | ||
241 | if (CHIP_REV_IS_SLOW(bp)) | ||
242 | msleep(100); | ||
243 | else | ||
244 | udelay(5); | ||
245 | |||
246 | if (!cnt) { | 237 | if (!cnt) { |
247 | BNX2X_ERR("dmae timeout!\n"); | 238 | BNX2X_ERR("dmae timeout!\n"); |
248 | break; | 239 | break; |
249 | } | 240 | } |
250 | cnt--; | 241 | cnt--; |
242 | /* adjust delay for emulation/FPGA */ | ||
243 | if (CHIP_REV_IS_SLOW(bp)) | ||
244 | msleep(100); | ||
245 | else | ||
246 | udelay(5); | ||
251 | } | 247 | } |
252 | 248 | ||
253 | mutex_unlock(&bp->dmae_mutex); | 249 | mutex_unlock(&bp->dmae_mutex); |
@@ -310,17 +306,16 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) | |||
310 | 306 | ||
311 | while (*wb_comp != DMAE_COMP_VAL) { | 307 | while (*wb_comp != DMAE_COMP_VAL) { |
312 | 308 | ||
313 | /* adjust delay for emulation/FPGA */ | ||
314 | if (CHIP_REV_IS_SLOW(bp)) | ||
315 | msleep(100); | ||
316 | else | ||
317 | udelay(5); | ||
318 | |||
319 | if (!cnt) { | 309 | if (!cnt) { |
320 | BNX2X_ERR("dmae timeout!\n"); | 310 | BNX2X_ERR("dmae timeout!\n"); |
321 | break; | 311 | break; |
322 | } | 312 | } |
323 | cnt--; | 313 | cnt--; |
314 | /* adjust delay for emulation/FPGA */ | ||
315 | if (CHIP_REV_IS_SLOW(bp)) | ||
316 | msleep(100); | ||
317 | else | ||
318 | udelay(5); | ||
324 | } | 319 | } |
325 | DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", | 320 | DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", |
326 | bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], | 321 | bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], |
@@ -503,6 +498,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
503 | int i; | 498 | int i; |
504 | u16 j, start, end; | 499 | u16 j, start, end; |
505 | 500 | ||
501 | bp->stats_state = STATS_STATE_DISABLED; | ||
502 | DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); | ||
503 | |||
506 | BNX2X_ERR("begin crash dump -----------------\n"); | 504 | BNX2X_ERR("begin crash dump -----------------\n"); |
507 | 505 | ||
508 | for_each_queue(bp, i) { | 506 | for_each_queue(bp, i) { |
@@ -513,17 +511,20 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
513 | " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", | 511 | " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", |
514 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, | 512 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, |
515 | fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); | 513 | fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); |
516 | BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)" | 514 | BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)" |
517 | " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)" | 515 | " *rx_bd_cons_sb(%x) rx_comp_prod(%x)" |
518 | " rx_sge_prod(%x) last_max_sge(%x)\n", | 516 | " rx_comp_cons(%x) *rx_cons_sb(%x)\n", |
519 | fp->rx_comp_prod, fp->rx_comp_cons, | 517 | fp->rx_bd_prod, fp->rx_bd_cons, |
520 | le16_to_cpu(*fp->rx_cons_sb), | 518 | le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod, |
521 | le16_to_cpu(*fp->rx_bd_cons_sb), | 519 | fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); |
522 | fp->rx_sge_prod, fp->last_max_sge); | 520 | BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)" |
523 | BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)" | 521 | " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)" |
524 | " bd data(%x,%x) rx_alloc_failed(%lx)\n", | 522 | " *sb_u_idx(%x) bd data(%x,%x)\n", |
525 | fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod, | 523 | fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx, |
526 | hw_prods->bds_prod, fp->rx_alloc_failed); | 524 | fp->status_blk->c_status_block.status_block_index, |
525 | fp->fp_u_idx, | ||
526 | fp->status_blk->u_status_block.status_block_index, | ||
527 | hw_prods->packets_prod, hw_prods->bds_prod); | ||
527 | 528 | ||
528 | start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); | 529 | start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); |
529 | end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); | 530 | end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); |
@@ -553,8 +554,8 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
553 | j, rx_bd[1], rx_bd[0], sw_bd->skb); | 554 | j, rx_bd[1], rx_bd[0], sw_bd->skb); |
554 | } | 555 | } |
555 | 556 | ||
556 | start = 0; | 557 | start = RX_SGE(fp->rx_sge_prod); |
557 | end = RX_SGE_CNT*NUM_RX_SGE_PAGES; | 558 | end = RX_SGE(fp->last_max_sge); |
558 | for (j = start; j < end; j++) { | 559 | for (j = start; j < end; j++) { |
559 | u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; | 560 | u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; |
560 | struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; | 561 | struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; |
@@ -582,9 +583,6 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
582 | bnx2x_fw_dump(bp); | 583 | bnx2x_fw_dump(bp); |
583 | bnx2x_mc_assert(bp); | 584 | bnx2x_mc_assert(bp); |
584 | BNX2X_ERR("end crash dump -----------------\n"); | 585 | BNX2X_ERR("end crash dump -----------------\n"); |
585 | |||
586 | bp->stats_state = STATS_STATE_DISABLED; | ||
587 | DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); | ||
588 | } | 586 | } |
589 | 587 | ||
590 | static void bnx2x_int_enable(struct bnx2x *bp) | 588 | static void bnx2x_int_enable(struct bnx2x *bp) |
@@ -684,7 +682,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp) | |||
684 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, | 682 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, |
685 | u8 storm, u16 index, u8 op, u8 update) | 683 | u8 storm, u16 index, u8 op, u8 update) |
686 | { | 684 | { |
687 | u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; | 685 | u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + |
686 | COMMAND_REG_INT_ACK); | ||
688 | struct igu_ack_register igu_ack; | 687 | struct igu_ack_register igu_ack; |
689 | 688 | ||
690 | igu_ack.status_block_index = index; | 689 | igu_ack.status_block_index = index; |
@@ -694,9 +693,9 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, | |||
694 | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | | 693 | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | |
695 | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); | 694 | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); |
696 | 695 | ||
697 | DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n", | 696 | DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", |
698 | (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); | 697 | (*(u32 *)&igu_ack), hc_addr); |
699 | REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack)); | 698 | REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); |
700 | } | 699 | } |
701 | 700 | ||
702 | static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) | 701 | static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) |
@@ -716,36 +715,15 @@ static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) | |||
716 | return rc; | 715 | return rc; |
717 | } | 716 | } |
718 | 717 | ||
719 | static inline int bnx2x_has_work(struct bnx2x_fastpath *fp) | ||
720 | { | ||
721 | u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); | ||
722 | |||
723 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
724 | rx_cons_sb++; | ||
725 | |||
726 | if ((fp->rx_comp_cons != rx_cons_sb) || | ||
727 | (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || | ||
728 | (fp->tx_pkt_prod != fp->tx_pkt_cons)) | ||
729 | return 1; | ||
730 | |||
731 | return 0; | ||
732 | } | ||
733 | |||
734 | static u16 bnx2x_ack_int(struct bnx2x *bp) | 718 | static u16 bnx2x_ack_int(struct bnx2x *bp) |
735 | { | 719 | { |
736 | u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; | 720 | u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + |
737 | u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr); | 721 | COMMAND_REG_SIMD_MASK); |
722 | u32 result = REG_RD(bp, hc_addr); | ||
738 | 723 | ||
739 | DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n", | 724 | DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", |
740 | result, BAR_IGU_INTMEM + igu_addr); | 725 | result, hc_addr); |
741 | 726 | ||
742 | #ifdef IGU_DEBUG | ||
743 | #warning IGU_DEBUG active | ||
744 | if (result == 0) { | ||
745 | BNX2X_ERR("read %x from IGU\n", result); | ||
746 | REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0); | ||
747 | } | ||
748 | #endif | ||
749 | return result; | 727 | return result; |
750 | } | 728 | } |
751 | 729 | ||
@@ -898,6 +876,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) | |||
898 | netif_tx_lock(bp->dev); | 876 | netif_tx_lock(bp->dev); |
899 | 877 | ||
900 | if (netif_queue_stopped(bp->dev) && | 878 | if (netif_queue_stopped(bp->dev) && |
879 | (bp->state == BNX2X_STATE_OPEN) && | ||
901 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) | 880 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) |
902 | netif_wake_queue(bp->dev); | 881 | netif_wake_queue(bp->dev); |
903 | 882 | ||
@@ -905,6 +884,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) | |||
905 | } | 884 | } |
906 | } | 885 | } |
907 | 886 | ||
887 | |||
908 | static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | 888 | static void bnx2x_sp_event(struct bnx2x_fastpath *fp, |
909 | union eth_rx_cqe *rr_cqe) | 889 | union eth_rx_cqe *rr_cqe) |
910 | { | 890 | { |
@@ -960,6 +940,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
960 | bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; | 940 | bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; |
961 | break; | 941 | break; |
962 | 942 | ||
943 | |||
963 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): | 944 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): |
964 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): | 945 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): |
965 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); | 946 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); |
@@ -1169,8 +1150,8 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) | |||
1169 | memset(fp->sge_mask, 0xff, | 1150 | memset(fp->sge_mask, 0xff, |
1170 | (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); | 1151 | (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); |
1171 | 1152 | ||
1172 | /* Clear the two last indeces in the page to 1: | 1153 | /* Clear the two last indices in the page to 1: |
1173 | these are the indeces that correspond to the "next" element, | 1154 | these are the indices that correspond to the "next" element, |
1174 | hence will never be indicated and should be removed from | 1155 | hence will never be indicated and should be removed from |
1175 | the calculations. */ | 1156 | the calculations. */ |
1176 | bnx2x_clear_sge_mask_next_elems(fp); | 1157 | bnx2x_clear_sge_mask_next_elems(fp); |
@@ -1261,7 +1242,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1261 | where we are and drop the whole packet */ | 1242 | where we are and drop the whole packet */ |
1262 | err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); | 1243 | err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); |
1263 | if (unlikely(err)) { | 1244 | if (unlikely(err)) { |
1264 | fp->rx_alloc_failed++; | 1245 | bp->eth_stats.rx_skb_alloc_failed++; |
1265 | return err; | 1246 | return err; |
1266 | } | 1247 | } |
1267 | 1248 | ||
@@ -1297,14 +1278,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1297 | pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), | 1278 | pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), |
1298 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); | 1279 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); |
1299 | 1280 | ||
1300 | /* if alloc failed drop the packet and keep the buffer in the bin */ | ||
1301 | if (likely(new_skb)) { | 1281 | if (likely(new_skb)) { |
1282 | /* fix ip xsum and give it to the stack */ | ||
1283 | /* (no need to map the new skb) */ | ||
1302 | 1284 | ||
1303 | prefetch(skb); | 1285 | prefetch(skb); |
1304 | prefetch(((char *)(skb)) + 128); | 1286 | prefetch(((char *)(skb)) + 128); |
1305 | 1287 | ||
1306 | /* else fix ip xsum and give it to the stack */ | ||
1307 | /* (no need to map the new skb) */ | ||
1308 | #ifdef BNX2X_STOP_ON_ERROR | 1288 | #ifdef BNX2X_STOP_ON_ERROR |
1309 | if (pad + len > bp->rx_buf_size) { | 1289 | if (pad + len > bp->rx_buf_size) { |
1310 | BNX2X_ERR("skb_put is about to fail... " | 1290 | BNX2X_ERR("skb_put is about to fail... " |
@@ -1353,9 +1333,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1353 | fp->tpa_pool[queue].skb = new_skb; | 1333 | fp->tpa_pool[queue].skb = new_skb; |
1354 | 1334 | ||
1355 | } else { | 1335 | } else { |
1336 | /* else drop the packet and keep the buffer in the bin */ | ||
1356 | DP(NETIF_MSG_RX_STATUS, | 1337 | DP(NETIF_MSG_RX_STATUS, |
1357 | "Failed to allocate new skb - dropping packet!\n"); | 1338 | "Failed to allocate new skb - dropping packet!\n"); |
1358 | fp->rx_alloc_failed++; | 1339 | bp->eth_stats.rx_skb_alloc_failed++; |
1359 | } | 1340 | } |
1360 | 1341 | ||
1361 | fp->tpa_state[queue] = BNX2X_TPA_STOP; | 1342 | fp->tpa_state[queue] = BNX2X_TPA_STOP; |
@@ -1390,7 +1371,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1390 | u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; | 1371 | u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; |
1391 | u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; | 1372 | u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; |
1392 | int rx_pkt = 0; | 1373 | int rx_pkt = 0; |
1393 | u16 queue; | ||
1394 | 1374 | ||
1395 | #ifdef BNX2X_STOP_ON_ERROR | 1375 | #ifdef BNX2X_STOP_ON_ERROR |
1396 | if (unlikely(bp->panic)) | 1376 | if (unlikely(bp->panic)) |
@@ -1456,7 +1436,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1456 | if ((!fp->disable_tpa) && | 1436 | if ((!fp->disable_tpa) && |
1457 | (TPA_TYPE(cqe_fp_flags) != | 1437 | (TPA_TYPE(cqe_fp_flags) != |
1458 | (TPA_TYPE_START | TPA_TYPE_END))) { | 1438 | (TPA_TYPE_START | TPA_TYPE_END))) { |
1459 | queue = cqe->fast_path_cqe.queue_index; | 1439 | u16 queue = cqe->fast_path_cqe.queue_index; |
1460 | 1440 | ||
1461 | if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { | 1441 | if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { |
1462 | DP(NETIF_MSG_RX_STATUS, | 1442 | DP(NETIF_MSG_RX_STATUS, |
@@ -1503,11 +1483,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1503 | 1483 | ||
1504 | /* is this an error packet? */ | 1484 | /* is this an error packet? */ |
1505 | if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { | 1485 | if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { |
1506 | /* do we sometimes forward error packets anyway? */ | ||
1507 | DP(NETIF_MSG_RX_ERR, | 1486 | DP(NETIF_MSG_RX_ERR, |
1508 | "ERROR flags %x rx packet %u\n", | 1487 | "ERROR flags %x rx packet %u\n", |
1509 | cqe_fp_flags, sw_comp_cons); | 1488 | cqe_fp_flags, sw_comp_cons); |
1510 | /* TBD make sure MC counts this as a drop */ | 1489 | bp->eth_stats.rx_err_discard_pkt++; |
1511 | goto reuse_rx; | 1490 | goto reuse_rx; |
1512 | } | 1491 | } |
1513 | 1492 | ||
@@ -1524,7 +1503,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1524 | DP(NETIF_MSG_RX_ERR, | 1503 | DP(NETIF_MSG_RX_ERR, |
1525 | "ERROR packet dropped " | 1504 | "ERROR packet dropped " |
1526 | "because of alloc failure\n"); | 1505 | "because of alloc failure\n"); |
1527 | fp->rx_alloc_failed++; | 1506 | bp->eth_stats.rx_skb_alloc_failed++; |
1528 | goto reuse_rx; | 1507 | goto reuse_rx; |
1529 | } | 1508 | } |
1530 | 1509 | ||
@@ -1550,7 +1529,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1550 | DP(NETIF_MSG_RX_ERR, | 1529 | DP(NETIF_MSG_RX_ERR, |
1551 | "ERROR packet dropped because " | 1530 | "ERROR packet dropped because " |
1552 | "of alloc failure\n"); | 1531 | "of alloc failure\n"); |
1553 | fp->rx_alloc_failed++; | 1532 | bp->eth_stats.rx_skb_alloc_failed++; |
1554 | reuse_rx: | 1533 | reuse_rx: |
1555 | bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); | 1534 | bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); |
1556 | goto next_rx; | 1535 | goto next_rx; |
@@ -1559,10 +1538,12 @@ reuse_rx: | |||
1559 | skb->protocol = eth_type_trans(skb, bp->dev); | 1538 | skb->protocol = eth_type_trans(skb, bp->dev); |
1560 | 1539 | ||
1561 | skb->ip_summed = CHECKSUM_NONE; | 1540 | skb->ip_summed = CHECKSUM_NONE; |
1562 | if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe)) | 1541 | if (bp->rx_csum) { |
1563 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1542 | if (likely(BNX2X_RX_CSUM_OK(cqe))) |
1564 | 1543 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1565 | /* TBD do we pass bad csum packets in promisc */ | 1544 | else |
1545 | bp->eth_stats.hw_csum_err++; | ||
1546 | } | ||
1566 | } | 1547 | } |
1567 | 1548 | ||
1568 | #ifdef BCM_VLAN | 1549 | #ifdef BCM_VLAN |
@@ -1615,6 +1596,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
1615 | struct net_device *dev = bp->dev; | 1596 | struct net_device *dev = bp->dev; |
1616 | int index = FP_IDX(fp); | 1597 | int index = FP_IDX(fp); |
1617 | 1598 | ||
1599 | /* Return here if interrupt is disabled */ | ||
1600 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | ||
1601 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | ||
1602 | return IRQ_HANDLED; | ||
1603 | } | ||
1604 | |||
1618 | DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", | 1605 | DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", |
1619 | index, FP_SB_ID(fp)); | 1606 | index, FP_SB_ID(fp)); |
1620 | bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); | 1607 | bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); |
@@ -1648,17 +1635,17 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1648 | } | 1635 | } |
1649 | DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status); | 1636 | DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status); |
1650 | 1637 | ||
1651 | #ifdef BNX2X_STOP_ON_ERROR | ||
1652 | if (unlikely(bp->panic)) | ||
1653 | return IRQ_HANDLED; | ||
1654 | #endif | ||
1655 | |||
1656 | /* Return here if interrupt is disabled */ | 1638 | /* Return here if interrupt is disabled */ |
1657 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | 1639 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { |
1658 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | 1640 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); |
1659 | return IRQ_HANDLED; | 1641 | return IRQ_HANDLED; |
1660 | } | 1642 | } |
1661 | 1643 | ||
1644 | #ifdef BNX2X_STOP_ON_ERROR | ||
1645 | if (unlikely(bp->panic)) | ||
1646 | return IRQ_HANDLED; | ||
1647 | #endif | ||
1648 | |||
1662 | mask = 0x2 << bp->fp[0].sb_id; | 1649 | mask = 0x2 << bp->fp[0].sb_id; |
1663 | if (status & mask) { | 1650 | if (status & mask) { |
1664 | struct bnx2x_fastpath *fp = &bp->fp[0]; | 1651 | struct bnx2x_fastpath *fp = &bp->fp[0]; |
@@ -1699,11 +1686,12 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | |||
1699 | * General service functions | 1686 | * General service functions |
1700 | */ | 1687 | */ |
1701 | 1688 | ||
1702 | static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) | 1689 | static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) |
1703 | { | 1690 | { |
1704 | u32 lock_status; | 1691 | u32 lock_status; |
1705 | u32 resource_bit = (1 << resource); | 1692 | u32 resource_bit = (1 << resource); |
1706 | u8 port = BP_PORT(bp); | 1693 | int func = BP_FUNC(bp); |
1694 | u32 hw_lock_control_reg; | ||
1707 | int cnt; | 1695 | int cnt; |
1708 | 1696 | ||
1709 | /* Validating that the resource is within range */ | 1697 | /* Validating that the resource is within range */ |
@@ -1714,20 +1702,26 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) | |||
1714 | return -EINVAL; | 1702 | return -EINVAL; |
1715 | } | 1703 | } |
1716 | 1704 | ||
1705 | if (func <= 5) { | ||
1706 | hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); | ||
1707 | } else { | ||
1708 | hw_lock_control_reg = | ||
1709 | (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); | ||
1710 | } | ||
1711 | |||
1717 | /* Validating that the resource is not already taken */ | 1712 | /* Validating that the resource is not already taken */ |
1718 | lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); | 1713 | lock_status = REG_RD(bp, hw_lock_control_reg); |
1719 | if (lock_status & resource_bit) { | 1714 | if (lock_status & resource_bit) { |
1720 | DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", | 1715 | DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", |
1721 | lock_status, resource_bit); | 1716 | lock_status, resource_bit); |
1722 | return -EEXIST; | 1717 | return -EEXIST; |
1723 | } | 1718 | } |
1724 | 1719 | ||
1725 | /* Try for 1 second every 5ms */ | 1720 | /* Try for 5 second every 5ms */ |
1726 | for (cnt = 0; cnt < 200; cnt++) { | 1721 | for (cnt = 0; cnt < 1000; cnt++) { |
1727 | /* Try to acquire the lock */ | 1722 | /* Try to acquire the lock */ |
1728 | REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4, | 1723 | REG_WR(bp, hw_lock_control_reg + 4, resource_bit); |
1729 | resource_bit); | 1724 | lock_status = REG_RD(bp, hw_lock_control_reg); |
1730 | lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); | ||
1731 | if (lock_status & resource_bit) | 1725 | if (lock_status & resource_bit) |
1732 | return 0; | 1726 | return 0; |
1733 | 1727 | ||
@@ -1737,11 +1731,12 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) | |||
1737 | return -EAGAIN; | 1731 | return -EAGAIN; |
1738 | } | 1732 | } |
1739 | 1733 | ||
1740 | static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource) | 1734 | static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) |
1741 | { | 1735 | { |
1742 | u32 lock_status; | 1736 | u32 lock_status; |
1743 | u32 resource_bit = (1 << resource); | 1737 | u32 resource_bit = (1 << resource); |
1744 | u8 port = BP_PORT(bp); | 1738 | int func = BP_FUNC(bp); |
1739 | u32 hw_lock_control_reg; | ||
1745 | 1740 | ||
1746 | /* Validating that the resource is within range */ | 1741 | /* Validating that the resource is within range */ |
1747 | if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { | 1742 | if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { |
@@ -1751,20 +1746,27 @@ static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource) | |||
1751 | return -EINVAL; | 1746 | return -EINVAL; |
1752 | } | 1747 | } |
1753 | 1748 | ||
1749 | if (func <= 5) { | ||
1750 | hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); | ||
1751 | } else { | ||
1752 | hw_lock_control_reg = | ||
1753 | (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); | ||
1754 | } | ||
1755 | |||
1754 | /* Validating that the resource is currently taken */ | 1756 | /* Validating that the resource is currently taken */ |
1755 | lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); | 1757 | lock_status = REG_RD(bp, hw_lock_control_reg); |
1756 | if (!(lock_status & resource_bit)) { | 1758 | if (!(lock_status & resource_bit)) { |
1757 | DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", | 1759 | DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", |
1758 | lock_status, resource_bit); | 1760 | lock_status, resource_bit); |
1759 | return -EFAULT; | 1761 | return -EFAULT; |
1760 | } | 1762 | } |
1761 | 1763 | ||
1762 | REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit); | 1764 | REG_WR(bp, hw_lock_control_reg, resource_bit); |
1763 | return 0; | 1765 | return 0; |
1764 | } | 1766 | } |
1765 | 1767 | ||
1766 | /* HW Lock for shared dual port PHYs */ | 1768 | /* HW Lock for shared dual port PHYs */ |
1767 | static void bnx2x_phy_hw_lock(struct bnx2x *bp) | 1769 | static void bnx2x_acquire_phy_lock(struct bnx2x *bp) |
1768 | { | 1770 | { |
1769 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); | 1771 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); |
1770 | 1772 | ||
@@ -1772,25 +1774,25 @@ static void bnx2x_phy_hw_lock(struct bnx2x *bp) | |||
1772 | 1774 | ||
1773 | if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || | 1775 | if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || |
1774 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) | 1776 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) |
1775 | bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); | 1777 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); |
1776 | } | 1778 | } |
1777 | 1779 | ||
1778 | static void bnx2x_phy_hw_unlock(struct bnx2x *bp) | 1780 | static void bnx2x_release_phy_lock(struct bnx2x *bp) |
1779 | { | 1781 | { |
1780 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); | 1782 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); |
1781 | 1783 | ||
1782 | if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || | 1784 | if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || |
1783 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) | 1785 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) |
1784 | bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); | 1786 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); |
1785 | 1787 | ||
1786 | mutex_unlock(&bp->port.phy_mutex); | 1788 | mutex_unlock(&bp->port.phy_mutex); |
1787 | } | 1789 | } |
1788 | 1790 | ||
1789 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) | 1791 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) |
1790 | { | 1792 | { |
1791 | /* The GPIO should be swapped if swap register is set and active */ | 1793 | /* The GPIO should be swapped if swap register is set and active */ |
1792 | int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && | 1794 | int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && |
1793 | REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp); | 1795 | REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; |
1794 | int gpio_shift = gpio_num + | 1796 | int gpio_shift = gpio_num + |
1795 | (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); | 1797 | (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); |
1796 | u32 gpio_mask = (1 << gpio_shift); | 1798 | u32 gpio_mask = (1 << gpio_shift); |
@@ -1801,7 +1803,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) | |||
1801 | return -EINVAL; | 1803 | return -EINVAL; |
1802 | } | 1804 | } |
1803 | 1805 | ||
1804 | bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); | 1806 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); |
1805 | /* read GPIO and mask except the float bits */ | 1807 | /* read GPIO and mask except the float bits */ |
1806 | gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); | 1808 | gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); |
1807 | 1809 | ||
@@ -1822,7 +1824,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) | |||
1822 | gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); | 1824 | gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); |
1823 | break; | 1825 | break; |
1824 | 1826 | ||
1825 | case MISC_REGISTERS_GPIO_INPUT_HI_Z : | 1827 | case MISC_REGISTERS_GPIO_INPUT_HI_Z: |
1826 | DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", | 1828 | DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", |
1827 | gpio_num, gpio_shift); | 1829 | gpio_num, gpio_shift); |
1828 | /* set FLOAT */ | 1830 | /* set FLOAT */ |
@@ -1834,7 +1836,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) | |||
1834 | } | 1836 | } |
1835 | 1837 | ||
1836 | REG_WR(bp, MISC_REG_GPIO, gpio_reg); | 1838 | REG_WR(bp, MISC_REG_GPIO, gpio_reg); |
1837 | bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO); | 1839 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); |
1838 | 1840 | ||
1839 | return 0; | 1841 | return 0; |
1840 | } | 1842 | } |
@@ -1850,19 +1852,19 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) | |||
1850 | return -EINVAL; | 1852 | return -EINVAL; |
1851 | } | 1853 | } |
1852 | 1854 | ||
1853 | bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); | 1855 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); |
1854 | /* read SPIO and mask except the float bits */ | 1856 | /* read SPIO and mask except the float bits */ |
1855 | spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); | 1857 | spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); |
1856 | 1858 | ||
1857 | switch (mode) { | 1859 | switch (mode) { |
1858 | case MISC_REGISTERS_SPIO_OUTPUT_LOW : | 1860 | case MISC_REGISTERS_SPIO_OUTPUT_LOW: |
1859 | DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); | 1861 | DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); |
1860 | /* clear FLOAT and set CLR */ | 1862 | /* clear FLOAT and set CLR */ |
1861 | spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); | 1863 | spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); |
1862 | spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); | 1864 | spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); |
1863 | break; | 1865 | break; |
1864 | 1866 | ||
1865 | case MISC_REGISTERS_SPIO_OUTPUT_HIGH : | 1867 | case MISC_REGISTERS_SPIO_OUTPUT_HIGH: |
1866 | DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); | 1868 | DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); |
1867 | /* clear FLOAT and set SET */ | 1869 | /* clear FLOAT and set SET */ |
1868 | spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); | 1870 | spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); |
@@ -1880,7 +1882,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) | |||
1880 | } | 1882 | } |
1881 | 1883 | ||
1882 | REG_WR(bp, MISC_REG_SPIO, spio_reg); | 1884 | REG_WR(bp, MISC_REG_SPIO, spio_reg); |
1883 | bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO); | 1885 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); |
1884 | 1886 | ||
1885 | return 0; | 1887 | return 0; |
1886 | } | 1888 | } |
@@ -1940,46 +1942,63 @@ static void bnx2x_link_report(struct bnx2x *bp) | |||
1940 | 1942 | ||
1941 | static u8 bnx2x_initial_phy_init(struct bnx2x *bp) | 1943 | static u8 bnx2x_initial_phy_init(struct bnx2x *bp) |
1942 | { | 1944 | { |
1943 | u8 rc; | 1945 | if (!BP_NOMCP(bp)) { |
1946 | u8 rc; | ||
1944 | 1947 | ||
1945 | /* Initialize link parameters structure variables */ | 1948 | /* Initialize link parameters structure variables */ |
1946 | bp->link_params.mtu = bp->dev->mtu; | 1949 | /* It is recommended to turn off RX FC for jumbo frames |
1950 | for better performance */ | ||
1951 | if (IS_E1HMF(bp)) | ||
1952 | bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH; | ||
1953 | else if (bp->dev->mtu > 5000) | ||
1954 | bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX; | ||
1955 | else | ||
1956 | bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH; | ||
1947 | 1957 | ||
1948 | bnx2x_phy_hw_lock(bp); | 1958 | bnx2x_acquire_phy_lock(bp); |
1949 | rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 1959 | rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
1950 | bnx2x_phy_hw_unlock(bp); | 1960 | bnx2x_release_phy_lock(bp); |
1951 | 1961 | ||
1952 | if (bp->link_vars.link_up) | 1962 | if (bp->link_vars.link_up) |
1953 | bnx2x_link_report(bp); | 1963 | bnx2x_link_report(bp); |
1954 | 1964 | ||
1955 | bnx2x_calc_fc_adv(bp); | 1965 | bnx2x_calc_fc_adv(bp); |
1956 | 1966 | ||
1957 | return rc; | 1967 | return rc; |
1968 | } | ||
1969 | BNX2X_ERR("Bootcode is missing -not initializing link\n"); | ||
1970 | return -EINVAL; | ||
1958 | } | 1971 | } |
1959 | 1972 | ||
1960 | static void bnx2x_link_set(struct bnx2x *bp) | 1973 | static void bnx2x_link_set(struct bnx2x *bp) |
1961 | { | 1974 | { |
1962 | bnx2x_phy_hw_lock(bp); | 1975 | if (!BP_NOMCP(bp)) { |
1963 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 1976 | bnx2x_acquire_phy_lock(bp); |
1964 | bnx2x_phy_hw_unlock(bp); | 1977 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
1978 | bnx2x_release_phy_lock(bp); | ||
1965 | 1979 | ||
1966 | bnx2x_calc_fc_adv(bp); | 1980 | bnx2x_calc_fc_adv(bp); |
1981 | } else | ||
1982 | BNX2X_ERR("Bootcode is missing -not setting link\n"); | ||
1967 | } | 1983 | } |
1968 | 1984 | ||
1969 | static void bnx2x__link_reset(struct bnx2x *bp) | 1985 | static void bnx2x__link_reset(struct bnx2x *bp) |
1970 | { | 1986 | { |
1971 | bnx2x_phy_hw_lock(bp); | 1987 | if (!BP_NOMCP(bp)) { |
1972 | bnx2x_link_reset(&bp->link_params, &bp->link_vars); | 1988 | bnx2x_acquire_phy_lock(bp); |
1973 | bnx2x_phy_hw_unlock(bp); | 1989 | bnx2x_link_reset(&bp->link_params, &bp->link_vars); |
1990 | bnx2x_release_phy_lock(bp); | ||
1991 | } else | ||
1992 | BNX2X_ERR("Bootcode is missing -not resetting link\n"); | ||
1974 | } | 1993 | } |
1975 | 1994 | ||
1976 | static u8 bnx2x_link_test(struct bnx2x *bp) | 1995 | static u8 bnx2x_link_test(struct bnx2x *bp) |
1977 | { | 1996 | { |
1978 | u8 rc; | 1997 | u8 rc; |
1979 | 1998 | ||
1980 | bnx2x_phy_hw_lock(bp); | 1999 | bnx2x_acquire_phy_lock(bp); |
1981 | rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); | 2000 | rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); |
1982 | bnx2x_phy_hw_unlock(bp); | 2001 | bnx2x_release_phy_lock(bp); |
1983 | 2002 | ||
1984 | return rc; | 2003 | return rc; |
1985 | } | 2004 | } |
@@ -1991,7 +2010,7 @@ static u8 bnx2x_link_test(struct bnx2x *bp) | |||
1991 | sum of vn_min_rates | 2010 | sum of vn_min_rates |
1992 | or | 2011 | or |
1993 | 0 - if all the min_rates are 0. | 2012 | 0 - if all the min_rates are 0. |
1994 | In the later case fainess algorithm should be deactivated. | 2013 | In the later case fairness algorithm should be deactivated. |
1995 | If not all min_rates are zero then those that are zeroes will | 2014 | If not all min_rates are zero then those that are zeroes will |
1996 | be set to 1. | 2015 | be set to 1. |
1997 | */ | 2016 | */ |
@@ -2114,7 +2133,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func, | |||
2114 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | 2133 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; |
2115 | /* If FAIRNESS is enabled (not all min rates are zeroes) and | 2134 | /* If FAIRNESS is enabled (not all min rates are zeroes) and |
2116 | if current min rate is zero - set it to 1. | 2135 | if current min rate is zero - set it to 1. |
2117 | This is a requirment of the algorithm. */ | 2136 | This is a requirement of the algorithm. */ |
2118 | if ((vn_min_rate == 0) && wsum) | 2137 | if ((vn_min_rate == 0) && wsum) |
2119 | vn_min_rate = DEF_MIN_RATE; | 2138 | vn_min_rate = DEF_MIN_RATE; |
2120 | vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> | 2139 | vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> |
@@ -2203,9 +2222,9 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2203 | /* Make sure that we are synced with the current statistics */ | 2222 | /* Make sure that we are synced with the current statistics */ |
2204 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | 2223 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); |
2205 | 2224 | ||
2206 | bnx2x_phy_hw_lock(bp); | 2225 | bnx2x_acquire_phy_lock(bp); |
2207 | bnx2x_link_update(&bp->link_params, &bp->link_vars); | 2226 | bnx2x_link_update(&bp->link_params, &bp->link_vars); |
2208 | bnx2x_phy_hw_unlock(bp); | 2227 | bnx2x_release_phy_lock(bp); |
2209 | 2228 | ||
2210 | if (bp->link_vars.link_up) { | 2229 | if (bp->link_vars.link_up) { |
2211 | 2230 | ||
@@ -2357,7 +2376,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
2357 | } | 2376 | } |
2358 | 2377 | ||
2359 | /* acquire split MCP access lock register */ | 2378 | /* acquire split MCP access lock register */ |
2360 | static int bnx2x_lock_alr(struct bnx2x *bp) | 2379 | static int bnx2x_acquire_alr(struct bnx2x *bp) |
2361 | { | 2380 | { |
2362 | u32 i, j, val; | 2381 | u32 i, j, val; |
2363 | int rc = 0; | 2382 | int rc = 0; |
@@ -2374,15 +2393,15 @@ static int bnx2x_lock_alr(struct bnx2x *bp) | |||
2374 | msleep(5); | 2393 | msleep(5); |
2375 | } | 2394 | } |
2376 | if (!(val & (1L << 31))) { | 2395 | if (!(val & (1L << 31))) { |
2377 | BNX2X_ERR("Cannot acquire nvram interface\n"); | 2396 | BNX2X_ERR("Cannot acquire MCP access lock register\n"); |
2378 | rc = -EBUSY; | 2397 | rc = -EBUSY; |
2379 | } | 2398 | } |
2380 | 2399 | ||
2381 | return rc; | 2400 | return rc; |
2382 | } | 2401 | } |
2383 | 2402 | ||
2384 | /* Release split MCP access lock register */ | 2403 | /* release split MCP access lock register */ |
2385 | static void bnx2x_unlock_alr(struct bnx2x *bp) | 2404 | static void bnx2x_release_alr(struct bnx2x *bp) |
2386 | { | 2405 | { |
2387 | u32 val = 0; | 2406 | u32 val = 0; |
2388 | 2407 | ||
@@ -2395,7 +2414,6 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) | |||
2395 | u16 rc = 0; | 2414 | u16 rc = 0; |
2396 | 2415 | ||
2397 | barrier(); /* status block is written to by the chip */ | 2416 | barrier(); /* status block is written to by the chip */ |
2398 | |||
2399 | if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { | 2417 | if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { |
2400 | bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; | 2418 | bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; |
2401 | rc |= 1; | 2419 | rc |= 1; |
@@ -2426,26 +2444,31 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) | |||
2426 | static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | 2444 | static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) |
2427 | { | 2445 | { |
2428 | int port = BP_PORT(bp); | 2446 | int port = BP_PORT(bp); |
2429 | int func = BP_FUNC(bp); | 2447 | u32 hc_addr = (HC_REG_COMMAND_REG + port*32 + |
2430 | u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8; | 2448 | COMMAND_REG_ATTN_BITS_SET); |
2431 | u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : | 2449 | u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : |
2432 | MISC_REG_AEU_MASK_ATTN_FUNC_0; | 2450 | MISC_REG_AEU_MASK_ATTN_FUNC_0; |
2433 | u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : | 2451 | u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : |
2434 | NIG_REG_MASK_INTERRUPT_PORT0; | 2452 | NIG_REG_MASK_INTERRUPT_PORT0; |
2453 | u32 aeu_mask; | ||
2435 | 2454 | ||
2436 | if (~bp->aeu_mask & (asserted & 0xff)) | ||
2437 | BNX2X_ERR("IGU ERROR\n"); | ||
2438 | if (bp->attn_state & asserted) | 2455 | if (bp->attn_state & asserted) |
2439 | BNX2X_ERR("IGU ERROR\n"); | 2456 | BNX2X_ERR("IGU ERROR\n"); |
2440 | 2457 | ||
2458 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); | ||
2459 | aeu_mask = REG_RD(bp, aeu_addr); | ||
2460 | |||
2441 | DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", | 2461 | DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", |
2442 | bp->aeu_mask, asserted); | 2462 | aeu_mask, asserted); |
2443 | bp->aeu_mask &= ~(asserted & 0xff); | 2463 | aeu_mask &= ~(asserted & 0xff); |
2444 | DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask); | 2464 | DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); |
2445 | 2465 | ||
2446 | REG_WR(bp, aeu_addr, bp->aeu_mask); | 2466 | REG_WR(bp, aeu_addr, aeu_mask); |
2467 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); | ||
2447 | 2468 | ||
2469 | DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); | ||
2448 | bp->attn_state |= asserted; | 2470 | bp->attn_state |= asserted; |
2471 | DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); | ||
2449 | 2472 | ||
2450 | if (asserted & ATTN_HARD_WIRED_MASK) { | 2473 | if (asserted & ATTN_HARD_WIRED_MASK) { |
2451 | if (asserted & ATTN_NIG_FOR_FUNC) { | 2474 | if (asserted & ATTN_NIG_FOR_FUNC) { |
@@ -2500,9 +2523,9 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | |||
2500 | 2523 | ||
2501 | } /* if hardwired */ | 2524 | } /* if hardwired */ |
2502 | 2525 | ||
2503 | DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n", | 2526 | DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", |
2504 | asserted, BAR_IGU_INTMEM + igu_addr); | 2527 | asserted, hc_addr); |
2505 | REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted); | 2528 | REG_WR(bp, hc_addr, asserted); |
2506 | 2529 | ||
2507 | /* now set back the mask */ | 2530 | /* now set back the mask */ |
2508 | if (asserted & ATTN_NIG_FOR_FUNC) | 2531 | if (asserted & ATTN_NIG_FOR_FUNC) |
@@ -2527,15 +2550,16 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) | |||
2527 | BNX2X_ERR("SPIO5 hw attention\n"); | 2550 | BNX2X_ERR("SPIO5 hw attention\n"); |
2528 | 2551 | ||
2529 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 2552 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
2553 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: | ||
2530 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 2554 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
2531 | /* Fan failure attention */ | 2555 | /* Fan failure attention */ |
2532 | 2556 | ||
2533 | /* The PHY reset is controled by GPIO 1 */ | 2557 | /* The PHY reset is controlled by GPIO 1 */ |
2534 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, | 2558 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, |
2535 | MISC_REGISTERS_GPIO_OUTPUT_LOW); | 2559 | MISC_REGISTERS_GPIO_OUTPUT_LOW, port); |
2536 | /* Low power mode is controled by GPIO 2 */ | 2560 | /* Low power mode is controlled by GPIO 2 */ |
2537 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, | 2561 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, |
2538 | MISC_REGISTERS_GPIO_OUTPUT_LOW); | 2562 | MISC_REGISTERS_GPIO_OUTPUT_LOW, port); |
2539 | /* mark the failure */ | 2563 | /* mark the failure */ |
2540 | bp->link_params.ext_phy_config &= | 2564 | bp->link_params.ext_phy_config &= |
2541 | ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; | 2565 | ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; |
@@ -2699,10 +2723,11 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
2699 | int index; | 2723 | int index; |
2700 | u32 reg_addr; | 2724 | u32 reg_addr; |
2701 | u32 val; | 2725 | u32 val; |
2726 | u32 aeu_mask; | ||
2702 | 2727 | ||
2703 | /* need to take HW lock because MCP or other port might also | 2728 | /* need to take HW lock because MCP or other port might also |
2704 | try to handle this event */ | 2729 | try to handle this event */ |
2705 | bnx2x_lock_alr(bp); | 2730 | bnx2x_acquire_alr(bp); |
2706 | 2731 | ||
2707 | attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); | 2732 | attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); |
2708 | attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); | 2733 | attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); |
@@ -2734,32 +2759,35 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
2734 | HW_PRTY_ASSERT_SET_1) || | 2759 | HW_PRTY_ASSERT_SET_1) || |
2735 | (attn.sig[2] & group_mask.sig[2] & | 2760 | (attn.sig[2] & group_mask.sig[2] & |
2736 | HW_PRTY_ASSERT_SET_2)) | 2761 | HW_PRTY_ASSERT_SET_2)) |
2737 | BNX2X_ERR("FATAL HW block parity attention\n"); | 2762 | BNX2X_ERR("FATAL HW block parity attention\n"); |
2738 | } | 2763 | } |
2739 | } | 2764 | } |
2740 | 2765 | ||
2741 | bnx2x_unlock_alr(bp); | 2766 | bnx2x_release_alr(bp); |
2742 | 2767 | ||
2743 | reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; | 2768 | reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); |
2744 | 2769 | ||
2745 | val = ~deasserted; | 2770 | val = ~deasserted; |
2746 | /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", | 2771 | DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", |
2747 | val, BAR_IGU_INTMEM + reg_addr); */ | 2772 | val, reg_addr); |
2748 | REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val); | 2773 | REG_WR(bp, reg_addr, val); |
2749 | 2774 | ||
2750 | if (bp->aeu_mask & (deasserted & 0xff)) | ||
2751 | BNX2X_ERR("IGU BUG!\n"); | ||
2752 | if (~bp->attn_state & deasserted) | 2775 | if (~bp->attn_state & deasserted) |
2753 | BNX2X_ERR("IGU BUG!\n"); | 2776 | BNX2X_ERR("IGU ERROR\n"); |
2754 | 2777 | ||
2755 | reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : | 2778 | reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : |
2756 | MISC_REG_AEU_MASK_ATTN_FUNC_0; | 2779 | MISC_REG_AEU_MASK_ATTN_FUNC_0; |
2757 | 2780 | ||
2758 | DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask); | 2781 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); |
2759 | bp->aeu_mask |= (deasserted & 0xff); | 2782 | aeu_mask = REG_RD(bp, reg_addr); |
2760 | 2783 | ||
2761 | DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask); | 2784 | DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", |
2762 | REG_WR(bp, reg_addr, bp->aeu_mask); | 2785 | aeu_mask, deasserted); |
2786 | aeu_mask |= (deasserted & 0xff); | ||
2787 | DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); | ||
2788 | |||
2789 | REG_WR(bp, reg_addr, aeu_mask); | ||
2790 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); | ||
2763 | 2791 | ||
2764 | DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); | 2792 | DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); |
2765 | bp->attn_state &= ~deasserted; | 2793 | bp->attn_state &= ~deasserted; |
@@ -2800,7 +2828,7 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
2800 | 2828 | ||
2801 | /* Return here if interrupt is disabled */ | 2829 | /* Return here if interrupt is disabled */ |
2802 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | 2830 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { |
2803 | DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); | 2831 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); |
2804 | return; | 2832 | return; |
2805 | } | 2833 | } |
2806 | 2834 | ||
@@ -2808,7 +2836,7 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
2808 | /* if (status == 0) */ | 2836 | /* if (status == 0) */ |
2809 | /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ | 2837 | /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ |
2810 | 2838 | ||
2811 | DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status); | 2839 | DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status); |
2812 | 2840 | ||
2813 | /* HW attentions */ | 2841 | /* HW attentions */ |
2814 | if (status & 0x1) | 2842 | if (status & 0x1) |
@@ -2838,7 +2866,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
2838 | 2866 | ||
2839 | /* Return here if interrupt is disabled */ | 2867 | /* Return here if interrupt is disabled */ |
2840 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | 2868 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { |
2841 | DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); | 2869 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); |
2842 | return IRQ_HANDLED; | 2870 | return IRQ_HANDLED; |
2843 | } | 2871 | } |
2844 | 2872 | ||
@@ -2876,11 +2904,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
2876 | /* underflow */ \ | 2904 | /* underflow */ \ |
2877 | d_hi = m_hi - s_hi; \ | 2905 | d_hi = m_hi - s_hi; \ |
2878 | if (d_hi > 0) { \ | 2906 | if (d_hi > 0) { \ |
2879 | /* we can 'loan' 1 */ \ | 2907 | /* we can 'loan' 1 */ \ |
2880 | d_hi--; \ | 2908 | d_hi--; \ |
2881 | d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ | 2909 | d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ |
2882 | } else { \ | 2910 | } else { \ |
2883 | /* m_hi <= s_hi */ \ | 2911 | /* m_hi <= s_hi */ \ |
2884 | d_hi = 0; \ | 2912 | d_hi = 0; \ |
2885 | d_lo = 0; \ | 2913 | d_lo = 0; \ |
2886 | } \ | 2914 | } \ |
@@ -2890,7 +2918,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
2890 | d_hi = 0; \ | 2918 | d_hi = 0; \ |
2891 | d_lo = 0; \ | 2919 | d_lo = 0; \ |
2892 | } else { \ | 2920 | } else { \ |
2893 | /* m_hi >= s_hi */ \ | 2921 | /* m_hi >= s_hi */ \ |
2894 | d_hi = m_hi - s_hi; \ | 2922 | d_hi = m_hi - s_hi; \ |
2895 | d_lo = m_lo - s_lo; \ | 2923 | d_lo = m_lo - s_lo; \ |
2896 | } \ | 2924 | } \ |
@@ -2963,37 +2991,6 @@ static inline long bnx2x_hilo(u32 *hiref) | |||
2963 | * Init service functions | 2991 | * Init service functions |
2964 | */ | 2992 | */ |
2965 | 2993 | ||
2966 | static void bnx2x_storm_stats_init(struct bnx2x *bp) | ||
2967 | { | ||
2968 | int func = BP_FUNC(bp); | ||
2969 | |||
2970 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1); | ||
2971 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
2972 | XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0); | ||
2973 | |||
2974 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1); | ||
2975 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
2976 | TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0); | ||
2977 | |||
2978 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0); | ||
2979 | REG_WR(bp, BAR_CSTRORM_INTMEM + | ||
2980 | CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0); | ||
2981 | |||
2982 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
2983 | XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), | ||
2984 | U64_LO(bnx2x_sp_mapping(bp, fw_stats))); | ||
2985 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
2986 | XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, | ||
2987 | U64_HI(bnx2x_sp_mapping(bp, fw_stats))); | ||
2988 | |||
2989 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
2990 | TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), | ||
2991 | U64_LO(bnx2x_sp_mapping(bp, fw_stats))); | ||
2992 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
2993 | TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, | ||
2994 | U64_HI(bnx2x_sp_mapping(bp, fw_stats))); | ||
2995 | } | ||
2996 | |||
2997 | static void bnx2x_storm_stats_post(struct bnx2x *bp) | 2994 | static void bnx2x_storm_stats_post(struct bnx2x *bp) |
2998 | { | 2995 | { |
2999 | if (!bp->stats_pending) { | 2996 | if (!bp->stats_pending) { |
@@ -3032,6 +3029,8 @@ static void bnx2x_stats_init(struct bnx2x *bp) | |||
3032 | memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); | 3029 | memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); |
3033 | bp->port.old_nig_stats.brb_discard = | 3030 | bp->port.old_nig_stats.brb_discard = |
3034 | REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); | 3031 | REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); |
3032 | bp->port.old_nig_stats.brb_truncate = | ||
3033 | REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); | ||
3035 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, | 3034 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, |
3036 | &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); | 3035 | &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); |
3037 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, | 3036 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, |
@@ -3101,12 +3100,12 @@ static int bnx2x_stats_comp(struct bnx2x *bp) | |||
3101 | 3100 | ||
3102 | might_sleep(); | 3101 | might_sleep(); |
3103 | while (*stats_comp != DMAE_COMP_VAL) { | 3102 | while (*stats_comp != DMAE_COMP_VAL) { |
3104 | msleep(1); | ||
3105 | if (!cnt) { | 3103 | if (!cnt) { |
3106 | BNX2X_ERR("timeout waiting for stats finished\n"); | 3104 | BNX2X_ERR("timeout waiting for stats finished\n"); |
3107 | break; | 3105 | break; |
3108 | } | 3106 | } |
3109 | cnt--; | 3107 | cnt--; |
3108 | msleep(1); | ||
3110 | } | 3109 | } |
3111 | return 1; | 3110 | return 1; |
3112 | } | 3111 | } |
@@ -3451,8 +3450,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) | |||
3451 | UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); | 3450 | UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); |
3452 | UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); | 3451 | UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); |
3453 | UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); | 3452 | UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); |
3454 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); | 3453 | UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); |
3455 | UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf); | ||
3456 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); | 3454 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); |
3457 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived); | 3455 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived); |
3458 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); | 3456 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); |
@@ -3536,6 +3534,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) | |||
3536 | 3534 | ||
3537 | ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, | 3535 | ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, |
3538 | new->brb_discard - old->brb_discard); | 3536 | new->brb_discard - old->brb_discard); |
3537 | ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, | ||
3538 | new->brb_truncate - old->brb_truncate); | ||
3539 | 3539 | ||
3540 | UPDATE_STAT64_NIG(egress_mac_pkt0, | 3540 | UPDATE_STAT64_NIG(egress_mac_pkt0, |
3541 | etherstatspkts1024octetsto1522octets); | 3541 | etherstatspkts1024octetsto1522octets); |
@@ -3713,8 +3713,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) | |||
3713 | nstats->rx_length_errors = | 3713 | nstats->rx_length_errors = |
3714 | estats->rx_stat_etherstatsundersizepkts_lo + | 3714 | estats->rx_stat_etherstatsundersizepkts_lo + |
3715 | estats->jabber_packets_received; | 3715 | estats->jabber_packets_received; |
3716 | nstats->rx_over_errors = estats->brb_drop_lo + | 3716 | nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo; |
3717 | estats->brb_truncate_discard; | ||
3718 | nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo; | 3717 | nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo; |
3719 | nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo; | 3718 | nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo; |
3720 | nstats->rx_fifo_errors = old_tclient->no_buff_discard; | 3719 | nstats->rx_fifo_errors = old_tclient->no_buff_discard; |
@@ -3783,7 +3782,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
3783 | bp->fp->rx_comp_cons), | 3782 | bp->fp->rx_comp_cons), |
3784 | le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets); | 3783 | le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets); |
3785 | printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n", | 3784 | printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n", |
3786 | netif_queue_stopped(bp->dev)? "Xoff" : "Xon", | 3785 | netif_queue_stopped(bp->dev) ? "Xoff" : "Xon", |
3787 | estats->driver_xoff, estats->brb_drop_lo); | 3786 | estats->driver_xoff, estats->brb_drop_lo); |
3788 | printk(KERN_DEBUG "tstats: checksum_discard %u " | 3787 | printk(KERN_DEBUG "tstats: checksum_discard %u " |
3789 | "packets_too_big_discard %u no_buff_discard %u " | 3788 | "packets_too_big_discard %u no_buff_discard %u " |
@@ -3994,14 +3993,14 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) | |||
3994 | 3993 | ||
3995 | bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + | 3994 | bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + |
3996 | USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, | 3995 | USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, |
3997 | sizeof(struct ustorm_def_status_block)/4); | 3996 | sizeof(struct ustorm_status_block)/4); |
3998 | bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + | 3997 | bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + |
3999 | CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, | 3998 | CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, |
4000 | sizeof(struct cstorm_def_status_block)/4); | 3999 | sizeof(struct cstorm_status_block)/4); |
4001 | } | 4000 | } |
4002 | 4001 | ||
4003 | static void bnx2x_init_sb(struct bnx2x *bp, int sb_id, | 4002 | static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, |
4004 | struct host_status_block *sb, dma_addr_t mapping) | 4003 | dma_addr_t mapping, int sb_id) |
4005 | { | 4004 | { |
4006 | int port = BP_PORT(bp); | 4005 | int port = BP_PORT(bp); |
4007 | int func = BP_FUNC(bp); | 4006 | int func = BP_FUNC(bp); |
@@ -4077,7 +4076,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4077 | atten_status_block); | 4076 | atten_status_block); |
4078 | def_sb->atten_status_block.status_block_id = sb_id; | 4077 | def_sb->atten_status_block.status_block_id = sb_id; |
4079 | 4078 | ||
4080 | bp->def_att_idx = 0; | ||
4081 | bp->attn_state = 0; | 4079 | bp->attn_state = 0; |
4082 | 4080 | ||
4083 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 4081 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
@@ -4094,9 +4092,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4094 | reg_offset + 0xc + 0x10*index); | 4092 | reg_offset + 0xc + 0x10*index); |
4095 | } | 4093 | } |
4096 | 4094 | ||
4097 | bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : | ||
4098 | MISC_REG_AEU_MASK_ATTN_FUNC_0)); | ||
4099 | |||
4100 | reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : | 4095 | reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : |
4101 | HC_REG_ATTN_MSG0_ADDR_L); | 4096 | HC_REG_ATTN_MSG0_ADDR_L); |
4102 | 4097 | ||
@@ -4114,17 +4109,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4114 | u_def_status_block); | 4109 | u_def_status_block); |
4115 | def_sb->u_def_status_block.status_block_id = sb_id; | 4110 | def_sb->u_def_status_block.status_block_id = sb_id; |
4116 | 4111 | ||
4117 | bp->def_u_idx = 0; | ||
4118 | |||
4119 | REG_WR(bp, BAR_USTRORM_INTMEM + | 4112 | REG_WR(bp, BAR_USTRORM_INTMEM + |
4120 | USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); | 4113 | USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); |
4121 | REG_WR(bp, BAR_USTRORM_INTMEM + | 4114 | REG_WR(bp, BAR_USTRORM_INTMEM + |
4122 | ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), | 4115 | ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), |
4123 | U64_HI(section)); | 4116 | U64_HI(section)); |
4124 | REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + | 4117 | REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + |
4125 | USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | 4118 | USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); |
4126 | REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func), | ||
4127 | BNX2X_BTR); | ||
4128 | 4119 | ||
4129 | for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) | 4120 | for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) |
4130 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 4121 | REG_WR16(bp, BAR_USTRORM_INTMEM + |
@@ -4135,17 +4126,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4135 | c_def_status_block); | 4126 | c_def_status_block); |
4136 | def_sb->c_def_status_block.status_block_id = sb_id; | 4127 | def_sb->c_def_status_block.status_block_id = sb_id; |
4137 | 4128 | ||
4138 | bp->def_c_idx = 0; | ||
4139 | |||
4140 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 4129 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
4141 | CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); | 4130 | CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); |
4142 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 4131 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
4143 | ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), | 4132 | ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), |
4144 | U64_HI(section)); | 4133 | U64_HI(section)); |
4145 | REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + | 4134 | REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + |
4146 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | 4135 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); |
4147 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func), | ||
4148 | BNX2X_BTR); | ||
4149 | 4136 | ||
4150 | for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) | 4137 | for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) |
4151 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 4138 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
@@ -4156,17 +4143,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4156 | t_def_status_block); | 4143 | t_def_status_block); |
4157 | def_sb->t_def_status_block.status_block_id = sb_id; | 4144 | def_sb->t_def_status_block.status_block_id = sb_id; |
4158 | 4145 | ||
4159 | bp->def_t_idx = 0; | ||
4160 | |||
4161 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 4146 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
4162 | TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); | 4147 | TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); |
4163 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 4148 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
4164 | ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), | 4149 | ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), |
4165 | U64_HI(section)); | 4150 | U64_HI(section)); |
4166 | REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + | 4151 | REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + |
4167 | TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | 4152 | TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); |
4168 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func), | ||
4169 | BNX2X_BTR); | ||
4170 | 4153 | ||
4171 | for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) | 4154 | for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) |
4172 | REG_WR16(bp, BAR_TSTRORM_INTMEM + | 4155 | REG_WR16(bp, BAR_TSTRORM_INTMEM + |
@@ -4177,23 +4160,20 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4177 | x_def_status_block); | 4160 | x_def_status_block); |
4178 | def_sb->x_def_status_block.status_block_id = sb_id; | 4161 | def_sb->x_def_status_block.status_block_id = sb_id; |
4179 | 4162 | ||
4180 | bp->def_x_idx = 0; | ||
4181 | |||
4182 | REG_WR(bp, BAR_XSTRORM_INTMEM + | 4163 | REG_WR(bp, BAR_XSTRORM_INTMEM + |
4183 | XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); | 4164 | XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); |
4184 | REG_WR(bp, BAR_XSTRORM_INTMEM + | 4165 | REG_WR(bp, BAR_XSTRORM_INTMEM + |
4185 | ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), | 4166 | ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), |
4186 | U64_HI(section)); | 4167 | U64_HI(section)); |
4187 | REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + | 4168 | REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + |
4188 | XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | 4169 | XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); |
4189 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func), | ||
4190 | BNX2X_BTR); | ||
4191 | 4170 | ||
4192 | for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) | 4171 | for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) |
4193 | REG_WR16(bp, BAR_XSTRORM_INTMEM + | 4172 | REG_WR16(bp, BAR_XSTRORM_INTMEM + |
4194 | XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); | 4173 | XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); |
4195 | 4174 | ||
4196 | bp->stats_pending = 0; | 4175 | bp->stats_pending = 0; |
4176 | bp->set_mac_pending = 0; | ||
4197 | 4177 | ||
4198 | bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 4178 | bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
4199 | } | 4179 | } |
@@ -4209,21 +4189,25 @@ static void bnx2x_update_coalesce(struct bnx2x *bp) | |||
4209 | /* HC_INDEX_U_ETH_RX_CQ_CONS */ | 4189 | /* HC_INDEX_U_ETH_RX_CQ_CONS */ |
4210 | REG_WR8(bp, BAR_USTRORM_INTMEM + | 4190 | REG_WR8(bp, BAR_USTRORM_INTMEM + |
4211 | USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, | 4191 | USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, |
4212 | HC_INDEX_U_ETH_RX_CQ_CONS), | 4192 | U_SB_ETH_RX_CQ_INDEX), |
4213 | bp->rx_ticks/12); | 4193 | bp->rx_ticks/12); |
4214 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 4194 | REG_WR16(bp, BAR_USTRORM_INTMEM + |
4215 | USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, | 4195 | USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, |
4216 | HC_INDEX_U_ETH_RX_CQ_CONS), | 4196 | U_SB_ETH_RX_CQ_INDEX), |
4197 | bp->rx_ticks ? 0 : 1); | ||
4198 | REG_WR16(bp, BAR_USTRORM_INTMEM + | ||
4199 | USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, | ||
4200 | U_SB_ETH_RX_BD_INDEX), | ||
4217 | bp->rx_ticks ? 0 : 1); | 4201 | bp->rx_ticks ? 0 : 1); |
4218 | 4202 | ||
4219 | /* HC_INDEX_C_ETH_TX_CQ_CONS */ | 4203 | /* HC_INDEX_C_ETH_TX_CQ_CONS */ |
4220 | REG_WR8(bp, BAR_CSTRORM_INTMEM + | 4204 | REG_WR8(bp, BAR_CSTRORM_INTMEM + |
4221 | CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, | 4205 | CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, |
4222 | HC_INDEX_C_ETH_TX_CQ_CONS), | 4206 | C_SB_ETH_TX_CQ_INDEX), |
4223 | bp->tx_ticks/12); | 4207 | bp->tx_ticks/12); |
4224 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 4208 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
4225 | CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, | 4209 | CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, |
4226 | HC_INDEX_C_ETH_TX_CQ_CONS), | 4210 | C_SB_ETH_TX_CQ_INDEX), |
4227 | bp->tx_ticks ? 0 : 1); | 4211 | bp->tx_ticks ? 0 : 1); |
4228 | } | 4212 | } |
4229 | } | 4213 | } |
@@ -4256,7 +4240,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, | |||
4256 | static void bnx2x_init_rx_rings(struct bnx2x *bp) | 4240 | static void bnx2x_init_rx_rings(struct bnx2x *bp) |
4257 | { | 4241 | { |
4258 | int func = BP_FUNC(bp); | 4242 | int func = BP_FUNC(bp); |
4259 | u16 ring_prod, cqe_ring_prod = 0; | 4243 | int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : |
4244 | ETH_MAX_AGGREGATION_QUEUES_E1H; | ||
4245 | u16 ring_prod, cqe_ring_prod; | ||
4260 | int i, j; | 4246 | int i, j; |
4261 | 4247 | ||
4262 | bp->rx_buf_use_size = bp->dev->mtu; | 4248 | bp->rx_buf_use_size = bp->dev->mtu; |
@@ -4270,9 +4256,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4270 | bp->dev->mtu + ETH_OVREHEAD); | 4256 | bp->dev->mtu + ETH_OVREHEAD); |
4271 | 4257 | ||
4272 | for_each_queue(bp, j) { | 4258 | for_each_queue(bp, j) { |
4273 | for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) { | 4259 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
4274 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
4275 | 4260 | ||
4261 | for (i = 0; i < max_agg_queues; i++) { | ||
4276 | fp->tpa_pool[i].skb = | 4262 | fp->tpa_pool[i].skb = |
4277 | netdev_alloc_skb(bp->dev, bp->rx_buf_size); | 4263 | netdev_alloc_skb(bp->dev, bp->rx_buf_size); |
4278 | if (!fp->tpa_pool[i].skb) { | 4264 | if (!fp->tpa_pool[i].skb) { |
@@ -4352,8 +4338,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4352 | BNX2X_ERR("disabling TPA for queue[%d]\n", j); | 4338 | BNX2X_ERR("disabling TPA for queue[%d]\n", j); |
4353 | /* Cleanup already allocated elements */ | 4339 | /* Cleanup already allocated elements */ |
4354 | bnx2x_free_rx_sge_range(bp, fp, ring_prod); | 4340 | bnx2x_free_rx_sge_range(bp, fp, ring_prod); |
4355 | bnx2x_free_tpa_pool(bp, fp, | 4341 | bnx2x_free_tpa_pool(bp, fp, max_agg_queues); |
4356 | ETH_MAX_AGGREGATION_QUEUES_E1H); | ||
4357 | fp->disable_tpa = 1; | 4342 | fp->disable_tpa = 1; |
4358 | ring_prod = 0; | 4343 | ring_prod = 0; |
4359 | break; | 4344 | break; |
@@ -4363,13 +4348,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4363 | fp->rx_sge_prod = ring_prod; | 4348 | fp->rx_sge_prod = ring_prod; |
4364 | 4349 | ||
4365 | /* Allocate BDs and initialize BD ring */ | 4350 | /* Allocate BDs and initialize BD ring */ |
4366 | fp->rx_comp_cons = fp->rx_alloc_failed = 0; | 4351 | fp->rx_comp_cons = 0; |
4367 | cqe_ring_prod = ring_prod = 0; | 4352 | cqe_ring_prod = ring_prod = 0; |
4368 | for (i = 0; i < bp->rx_ring_size; i++) { | 4353 | for (i = 0; i < bp->rx_ring_size; i++) { |
4369 | if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { | 4354 | if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { |
4370 | BNX2X_ERR("was only able to allocate " | 4355 | BNX2X_ERR("was only able to allocate " |
4371 | "%d rx skbs\n", i); | 4356 | "%d rx skbs\n", i); |
4372 | fp->rx_alloc_failed++; | 4357 | bp->eth_stats.rx_skb_alloc_failed++; |
4373 | break; | 4358 | break; |
4374 | } | 4359 | } |
4375 | ring_prod = NEXT_RX_IDX(ring_prod); | 4360 | ring_prod = NEXT_RX_IDX(ring_prod); |
@@ -4497,7 +4482,7 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
4497 | } | 4482 | } |
4498 | 4483 | ||
4499 | context->cstorm_st_context.sb_index_number = | 4484 | context->cstorm_st_context.sb_index_number = |
4500 | HC_INDEX_C_ETH_TX_CQ_CONS; | 4485 | C_SB_ETH_TX_CQ_INDEX; |
4501 | context->cstorm_st_context.status_block_id = sb_id; | 4486 | context->cstorm_st_context.status_block_id = sb_id; |
4502 | 4487 | ||
4503 | context->xstorm_ag_context.cdu_reserved = | 4488 | context->xstorm_ag_context.cdu_reserved = |
@@ -4535,7 +4520,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp) | |||
4535 | int i; | 4520 | int i; |
4536 | 4521 | ||
4537 | tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; | 4522 | tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; |
4538 | tstorm_client.statistics_counter_id = 0; | 4523 | tstorm_client.statistics_counter_id = BP_CL_ID(bp); |
4539 | tstorm_client.config_flags = | 4524 | tstorm_client.config_flags = |
4540 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; | 4525 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; |
4541 | #ifdef BCM_VLAN | 4526 | #ifdef BCM_VLAN |
@@ -4579,7 +4564,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
4579 | int func = BP_FUNC(bp); | 4564 | int func = BP_FUNC(bp); |
4580 | int i; | 4565 | int i; |
4581 | 4566 | ||
4582 | DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode); | 4567 | DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask); |
4583 | 4568 | ||
4584 | switch (mode) { | 4569 | switch (mode) { |
4585 | case BNX2X_RX_MODE_NONE: /* no Rx */ | 4570 | case BNX2X_RX_MODE_NONE: /* no Rx */ |
@@ -4617,13 +4602,46 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
4617 | bnx2x_set_client_config(bp); | 4602 | bnx2x_set_client_config(bp); |
4618 | } | 4603 | } |
4619 | 4604 | ||
4620 | static void bnx2x_init_internal(struct bnx2x *bp) | 4605 | static void bnx2x_init_internal_common(struct bnx2x *bp) |
4606 | { | ||
4607 | int i; | ||
4608 | |||
4609 | if (bp->flags & TPA_ENABLE_FLAG) { | ||
4610 | struct tstorm_eth_tpa_exist tpa = {0}; | ||
4611 | |||
4612 | tpa.tpa_exist = 1; | ||
4613 | |||
4614 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, | ||
4615 | ((u32 *)&tpa)[0]); | ||
4616 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4, | ||
4617 | ((u32 *)&tpa)[1]); | ||
4618 | } | ||
4619 | |||
4620 | /* Zero this manually as its initialization is | ||
4621 | currently missing in the initTool */ | ||
4622 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) | ||
4623 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
4624 | USTORM_AGG_DATA_OFFSET + i * 4, 0); | ||
4625 | } | ||
4626 | |||
4627 | static void bnx2x_init_internal_port(struct bnx2x *bp) | ||
4628 | { | ||
4629 | int port = BP_PORT(bp); | ||
4630 | |||
4631 | REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR); | ||
4632 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); | ||
4633 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); | ||
4634 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); | ||
4635 | } | ||
4636 | |||
4637 | static void bnx2x_init_internal_func(struct bnx2x *bp) | ||
4621 | { | 4638 | { |
4622 | struct tstorm_eth_function_common_config tstorm_config = {0}; | 4639 | struct tstorm_eth_function_common_config tstorm_config = {0}; |
4623 | struct stats_indication_flags stats_flags = {0}; | 4640 | struct stats_indication_flags stats_flags = {0}; |
4624 | int port = BP_PORT(bp); | 4641 | int port = BP_PORT(bp); |
4625 | int func = BP_FUNC(bp); | 4642 | int func = BP_FUNC(bp); |
4626 | int i; | 4643 | int i; |
4644 | u16 max_agg_size; | ||
4627 | 4645 | ||
4628 | if (is_multi(bp)) { | 4646 | if (is_multi(bp)) { |
4629 | tstorm_config.config_flags = MULTI_FLAGS; | 4647 | tstorm_config.config_flags = MULTI_FLAGS; |
@@ -4636,31 +4654,53 @@ static void bnx2x_init_internal(struct bnx2x *bp) | |||
4636 | TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), | 4654 | TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), |
4637 | (*(u32 *)&tstorm_config)); | 4655 | (*(u32 *)&tstorm_config)); |
4638 | 4656 | ||
4639 | /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n", | ||
4640 | (*(u32 *)&tstorm_config)); */ | ||
4641 | |||
4642 | bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ | 4657 | bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ |
4643 | bnx2x_set_storm_rx_mode(bp); | 4658 | bnx2x_set_storm_rx_mode(bp); |
4644 | 4659 | ||
4660 | /* reset xstorm per client statistics */ | ||
4661 | for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) { | ||
4662 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
4663 | XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) + | ||
4664 | i*4, 0); | ||
4665 | } | ||
4666 | /* reset tstorm per client statistics */ | ||
4667 | for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) { | ||
4668 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
4669 | TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) + | ||
4670 | i*4, 0); | ||
4671 | } | ||
4672 | |||
4673 | /* Init statistics related context */ | ||
4645 | stats_flags.collect_eth = 1; | 4674 | stats_flags.collect_eth = 1; |
4646 | 4675 | ||
4647 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), | 4676 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), |
4648 | ((u32 *)&stats_flags)[0]); | 4677 | ((u32 *)&stats_flags)[0]); |
4649 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4, | 4678 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4, |
4650 | ((u32 *)&stats_flags)[1]); | 4679 | ((u32 *)&stats_flags)[1]); |
4651 | 4680 | ||
4652 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), | 4681 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), |
4653 | ((u32 *)&stats_flags)[0]); | 4682 | ((u32 *)&stats_flags)[0]); |
4654 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4, | 4683 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4, |
4655 | ((u32 *)&stats_flags)[1]); | 4684 | ((u32 *)&stats_flags)[1]); |
4656 | 4685 | ||
4657 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), | 4686 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), |
4658 | ((u32 *)&stats_flags)[0]); | 4687 | ((u32 *)&stats_flags)[0]); |
4659 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4, | 4688 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4, |
4660 | ((u32 *)&stats_flags)[1]); | 4689 | ((u32 *)&stats_flags)[1]); |
4661 | 4690 | ||
4662 | /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n", | 4691 | REG_WR(bp, BAR_XSTRORM_INTMEM + |
4663 | ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */ | 4692 | XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), |
4693 | U64_LO(bnx2x_sp_mapping(bp, fw_stats))); | ||
4694 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
4695 | XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, | ||
4696 | U64_HI(bnx2x_sp_mapping(bp, fw_stats))); | ||
4697 | |||
4698 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
4699 | TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), | ||
4700 | U64_LO(bnx2x_sp_mapping(bp, fw_stats))); | ||
4701 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
4702 | TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, | ||
4703 | U64_HI(bnx2x_sp_mapping(bp, fw_stats))); | ||
4664 | 4704 | ||
4665 | if (CHIP_IS_E1H(bp)) { | 4705 | if (CHIP_IS_E1H(bp)) { |
4666 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, | 4706 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, |
@@ -4676,15 +4716,12 @@ static void bnx2x_init_internal(struct bnx2x *bp) | |||
4676 | bp->e1hov); | 4716 | bp->e1hov); |
4677 | } | 4717 | } |
4678 | 4718 | ||
4679 | /* Zero this manualy as its initialization is | 4719 | /* Init CQ ring mapping and aggregation size */ |
4680 | currently missing in the initTool */ | 4720 | max_agg_size = min((u32)(bp->rx_buf_use_size + |
4681 | for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++) | 4721 | 8*BCM_PAGE_SIZE*PAGES_PER_SGE), |
4682 | REG_WR(bp, BAR_USTRORM_INTMEM + | 4722 | (u32)0xffff); |
4683 | USTORM_AGG_DATA_OFFSET + 4*i, 0); | ||
4684 | |||
4685 | for_each_queue(bp, i) { | 4723 | for_each_queue(bp, i) { |
4686 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 4724 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
4687 | u16 max_agg_size; | ||
4688 | 4725 | ||
4689 | REG_WR(bp, BAR_USTRORM_INTMEM + | 4726 | REG_WR(bp, BAR_USTRORM_INTMEM + |
4690 | USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)), | 4727 | USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)), |
@@ -4693,16 +4730,34 @@ static void bnx2x_init_internal(struct bnx2x *bp) | |||
4693 | USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4, | 4730 | USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4, |
4694 | U64_HI(fp->rx_comp_mapping)); | 4731 | U64_HI(fp->rx_comp_mapping)); |
4695 | 4732 | ||
4696 | max_agg_size = min((u32)(bp->rx_buf_use_size + | ||
4697 | 8*BCM_PAGE_SIZE*PAGES_PER_SGE), | ||
4698 | (u32)0xffff); | ||
4699 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 4733 | REG_WR16(bp, BAR_USTRORM_INTMEM + |
4700 | USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)), | 4734 | USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)), |
4701 | max_agg_size); | 4735 | max_agg_size); |
4702 | } | 4736 | } |
4703 | } | 4737 | } |
4704 | 4738 | ||
4705 | static void bnx2x_nic_init(struct bnx2x *bp) | 4739 | static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) |
4740 | { | ||
4741 | switch (load_code) { | ||
4742 | case FW_MSG_CODE_DRV_LOAD_COMMON: | ||
4743 | bnx2x_init_internal_common(bp); | ||
4744 | /* no break */ | ||
4745 | |||
4746 | case FW_MSG_CODE_DRV_LOAD_PORT: | ||
4747 | bnx2x_init_internal_port(bp); | ||
4748 | /* no break */ | ||
4749 | |||
4750 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: | ||
4751 | bnx2x_init_internal_func(bp); | ||
4752 | break; | ||
4753 | |||
4754 | default: | ||
4755 | BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); | ||
4756 | break; | ||
4757 | } | ||
4758 | } | ||
4759 | |||
4760 | static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | ||
4706 | { | 4761 | { |
4707 | int i; | 4762 | int i; |
4708 | 4763 | ||
@@ -4717,19 +4772,20 @@ static void bnx2x_nic_init(struct bnx2x *bp) | |||
4717 | DP(NETIF_MSG_IFUP, | 4772 | DP(NETIF_MSG_IFUP, |
4718 | "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n", | 4773 | "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n", |
4719 | bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp)); | 4774 | bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp)); |
4720 | bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk, | 4775 | bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, |
4721 | fp->status_blk_mapping); | 4776 | FP_SB_ID(fp)); |
4777 | bnx2x_update_fpsb_idx(fp); | ||
4722 | } | 4778 | } |
4723 | 4779 | ||
4724 | bnx2x_init_def_sb(bp, bp->def_status_blk, | 4780 | bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping, |
4725 | bp->def_status_blk_mapping, DEF_SB_ID); | 4781 | DEF_SB_ID); |
4782 | bnx2x_update_dsb_idx(bp); | ||
4726 | bnx2x_update_coalesce(bp); | 4783 | bnx2x_update_coalesce(bp); |
4727 | bnx2x_init_rx_rings(bp); | 4784 | bnx2x_init_rx_rings(bp); |
4728 | bnx2x_init_tx_ring(bp); | 4785 | bnx2x_init_tx_ring(bp); |
4729 | bnx2x_init_sp_ring(bp); | 4786 | bnx2x_init_sp_ring(bp); |
4730 | bnx2x_init_context(bp); | 4787 | bnx2x_init_context(bp); |
4731 | bnx2x_init_internal(bp); | 4788 | bnx2x_init_internal(bp, load_code); |
4732 | bnx2x_storm_stats_init(bp); | ||
4733 | bnx2x_init_ind_table(bp); | 4789 | bnx2x_init_ind_table(bp); |
4734 | bnx2x_int_enable(bp); | 4790 | bnx2x_int_enable(bp); |
4735 | } | 4791 | } |
@@ -4878,7 +4934,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
4878 | REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); | 4934 | REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); |
4879 | REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); | 4935 | REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); |
4880 | REG_WR(bp, CFC_REG_DEBUG0, 0x1); | 4936 | REG_WR(bp, CFC_REG_DEBUG0, 0x1); |
4881 | NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); | 4937 | REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); |
4882 | 4938 | ||
4883 | /* Write 0 to parser credits for CFC search request */ | 4939 | /* Write 0 to parser credits for CFC search request */ |
4884 | REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); | 4940 | REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); |
@@ -4933,7 +4989,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
4933 | REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); | 4989 | REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); |
4934 | REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); | 4990 | REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); |
4935 | REG_WR(bp, CFC_REG_DEBUG0, 0x1); | 4991 | REG_WR(bp, CFC_REG_DEBUG0, 0x1); |
4936 | NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); | 4992 | REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); |
4937 | 4993 | ||
4938 | /* Write 0 to parser credits for CFC search request */ | 4994 | /* Write 0 to parser credits for CFC search request */ |
4939 | REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); | 4995 | REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); |
@@ -5000,7 +5056,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
5000 | REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); | 5056 | REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); |
5001 | REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); | 5057 | REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); |
5002 | REG_WR(bp, CFC_REG_DEBUG0, 0x0); | 5058 | REG_WR(bp, CFC_REG_DEBUG0, 0x0); |
5003 | NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1); | 5059 | REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); |
5004 | 5060 | ||
5005 | DP(NETIF_MSG_HW, "done\n"); | 5061 | DP(NETIF_MSG_HW, "done\n"); |
5006 | 5062 | ||
@@ -5089,11 +5145,6 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
5089 | REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); | 5145 | REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); |
5090 | #endif | 5146 | #endif |
5091 | 5147 | ||
5092 | #ifndef BCM_ISCSI | ||
5093 | /* set NIC mode */ | ||
5094 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | ||
5095 | #endif | ||
5096 | |||
5097 | REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); | 5148 | REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); |
5098 | #ifdef BCM_ISCSI | 5149 | #ifdef BCM_ISCSI |
5099 | REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); | 5150 | REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); |
@@ -5163,6 +5214,8 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
5163 | } | 5214 | } |
5164 | 5215 | ||
5165 | bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); | 5216 | bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); |
5217 | /* set NIC mode */ | ||
5218 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | ||
5166 | if (CHIP_IS_E1H(bp)) | 5219 | if (CHIP_IS_E1H(bp)) |
5167 | REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); | 5220 | REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); |
5168 | 5221 | ||
@@ -5296,6 +5349,7 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
5296 | } | 5349 | } |
5297 | 5350 | ||
5298 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 5351 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
5352 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: | ||
5299 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 5353 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
5300 | /* Fan failure is indicated by SPIO 5 */ | 5354 | /* Fan failure is indicated by SPIO 5 */ |
5301 | bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, | 5355 | bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, |
@@ -5322,16 +5376,12 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
5322 | 5376 | ||
5323 | enable_blocks_attention(bp); | 5377 | enable_blocks_attention(bp); |
5324 | 5378 | ||
5325 | if (bp->flags & TPA_ENABLE_FLAG) { | 5379 | if (!BP_NOMCP(bp)) { |
5326 | struct tstorm_eth_tpa_exist tmp = {0}; | 5380 | bnx2x_acquire_phy_lock(bp); |
5327 | 5381 | bnx2x_common_init_phy(bp, bp->common.shmem_base); | |
5328 | tmp.tpa_exist = 1; | 5382 | bnx2x_release_phy_lock(bp); |
5329 | 5383 | } else | |
5330 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, | 5384 | BNX2X_ERR("Bootcode is missing - can not initialize link\n"); |
5331 | ((u32 *)&tmp)[0]); | ||
5332 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4, | ||
5333 | ((u32 *)&tmp)[1]); | ||
5334 | } | ||
5335 | 5385 | ||
5336 | return 0; | 5386 | return 0; |
5337 | } | 5387 | } |
@@ -5483,6 +5533,7 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
5483 | /* Port DMAE comes here */ | 5533 | /* Port DMAE comes here */ |
5484 | 5534 | ||
5485 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 5535 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
5536 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: | ||
5486 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 5537 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
5487 | /* add SPIO 5 to group 0 */ | 5538 | /* add SPIO 5 to group 0 */ |
5488 | val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 5539 | val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
@@ -5638,18 +5689,23 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) | |||
5638 | int func = BP_FUNC(bp); | 5689 | int func = BP_FUNC(bp); |
5639 | u32 seq = ++bp->fw_seq; | 5690 | u32 seq = ++bp->fw_seq; |
5640 | u32 rc = 0; | 5691 | u32 rc = 0; |
5692 | u32 cnt = 1; | ||
5693 | u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; | ||
5641 | 5694 | ||
5642 | SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); | 5695 | SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); |
5643 | DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); | 5696 | DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); |
5644 | 5697 | ||
5645 | /* let the FW do it's magic ... */ | 5698 | do { |
5646 | msleep(100); /* TBD */ | 5699 | /* let the FW do it's magic ... */ |
5700 | msleep(delay); | ||
5647 | 5701 | ||
5648 | if (CHIP_REV_IS_SLOW(bp)) | 5702 | rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); |
5649 | msleep(900); | ||
5650 | 5703 | ||
5651 | rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); | 5704 | /* Give the FW up to 2 second (200*10ms) */ |
5652 | DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); | 5705 | } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200)); |
5706 | |||
5707 | DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", | ||
5708 | cnt*delay, rc, seq); | ||
5653 | 5709 | ||
5654 | /* is this a reply to our command? */ | 5710 | /* is this a reply to our command? */ |
5655 | if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { | 5711 | if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { |
@@ -5713,6 +5769,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
5713 | NUM_RCQ_BD); | 5769 | NUM_RCQ_BD); |
5714 | 5770 | ||
5715 | /* SGE ring */ | 5771 | /* SGE ring */ |
5772 | BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring)); | ||
5716 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), | 5773 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), |
5717 | bnx2x_fp(bp, i, rx_sge_mapping), | 5774 | bnx2x_fp(bp, i, rx_sge_mapping), |
5718 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | 5775 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
@@ -5890,7 +5947,8 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
5890 | dev_kfree_skb(skb); | 5947 | dev_kfree_skb(skb); |
5891 | } | 5948 | } |
5892 | if (!fp->disable_tpa) | 5949 | if (!fp->disable_tpa) |
5893 | bnx2x_free_tpa_pool(bp, fp, | 5950 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? |
5951 | ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
5894 | ETH_MAX_AGGREGATION_QUEUES_E1H); | 5952 | ETH_MAX_AGGREGATION_QUEUES_E1H); |
5895 | } | 5953 | } |
5896 | } | 5954 | } |
@@ -5976,8 +6034,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |||
5976 | bnx2x_msix_fp_int, 0, | 6034 | bnx2x_msix_fp_int, 0, |
5977 | bp->dev->name, &bp->fp[i]); | 6035 | bp->dev->name, &bp->fp[i]); |
5978 | if (rc) { | 6036 | if (rc) { |
5979 | BNX2X_ERR("request fp #%d irq failed rc %d\n", | 6037 | BNX2X_ERR("request fp #%d irq failed rc -%d\n", |
5980 | i + offset, rc); | 6038 | i + offset, -rc); |
5981 | bnx2x_free_msix_irqs(bp); | 6039 | bnx2x_free_msix_irqs(bp); |
5982 | return -EBUSY; | 6040 | return -EBUSY; |
5983 | } | 6041 | } |
@@ -6000,11 +6058,49 @@ static int bnx2x_req_irq(struct bnx2x *bp) | |||
6000 | return rc; | 6058 | return rc; |
6001 | } | 6059 | } |
6002 | 6060 | ||
6061 | static void bnx2x_napi_enable(struct bnx2x *bp) | ||
6062 | { | ||
6063 | int i; | ||
6064 | |||
6065 | for_each_queue(bp, i) | ||
6066 | napi_enable(&bnx2x_fp(bp, i, napi)); | ||
6067 | } | ||
6068 | |||
6069 | static void bnx2x_napi_disable(struct bnx2x *bp) | ||
6070 | { | ||
6071 | int i; | ||
6072 | |||
6073 | for_each_queue(bp, i) | ||
6074 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
6075 | } | ||
6076 | |||
6077 | static void bnx2x_netif_start(struct bnx2x *bp) | ||
6078 | { | ||
6079 | if (atomic_dec_and_test(&bp->intr_sem)) { | ||
6080 | if (netif_running(bp->dev)) { | ||
6081 | if (bp->state == BNX2X_STATE_OPEN) | ||
6082 | netif_wake_queue(bp->dev); | ||
6083 | bnx2x_napi_enable(bp); | ||
6084 | bnx2x_int_enable(bp); | ||
6085 | } | ||
6086 | } | ||
6087 | } | ||
6088 | |||
6089 | static void bnx2x_netif_stop(struct bnx2x *bp) | ||
6090 | { | ||
6091 | bnx2x_int_disable_sync(bp); | ||
6092 | if (netif_running(bp->dev)) { | ||
6093 | bnx2x_napi_disable(bp); | ||
6094 | netif_tx_disable(bp->dev); | ||
6095 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
6096 | } | ||
6097 | } | ||
6098 | |||
6003 | /* | 6099 | /* |
6004 | * Init service functions | 6100 | * Init service functions |
6005 | */ | 6101 | */ |
6006 | 6102 | ||
6007 | static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) | 6103 | static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) |
6008 | { | 6104 | { |
6009 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); | 6105 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); |
6010 | int port = BP_PORT(bp); | 6106 | int port = BP_PORT(bp); |
@@ -6026,11 +6122,15 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) | |||
6026 | config->config_table[0].cam_entry.lsb_mac_addr = | 6122 | config->config_table[0].cam_entry.lsb_mac_addr = |
6027 | swab16(*(u16 *)&bp->dev->dev_addr[4]); | 6123 | swab16(*(u16 *)&bp->dev->dev_addr[4]); |
6028 | config->config_table[0].cam_entry.flags = cpu_to_le16(port); | 6124 | config->config_table[0].cam_entry.flags = cpu_to_le16(port); |
6029 | config->config_table[0].target_table_entry.flags = 0; | 6125 | if (set) |
6126 | config->config_table[0].target_table_entry.flags = 0; | ||
6127 | else | ||
6128 | CAM_INVALIDATE(config->config_table[0]); | ||
6030 | config->config_table[0].target_table_entry.client_id = 0; | 6129 | config->config_table[0].target_table_entry.client_id = 0; |
6031 | config->config_table[0].target_table_entry.vlan_id = 0; | 6130 | config->config_table[0].target_table_entry.vlan_id = 0; |
6032 | 6131 | ||
6033 | DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n", | 6132 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", |
6133 | (set ? "setting" : "clearing"), | ||
6034 | config->config_table[0].cam_entry.msb_mac_addr, | 6134 | config->config_table[0].cam_entry.msb_mac_addr, |
6035 | config->config_table[0].cam_entry.middle_mac_addr, | 6135 | config->config_table[0].cam_entry.middle_mac_addr, |
6036 | config->config_table[0].cam_entry.lsb_mac_addr); | 6136 | config->config_table[0].cam_entry.lsb_mac_addr); |
@@ -6040,8 +6140,11 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) | |||
6040 | config->config_table[1].cam_entry.middle_mac_addr = 0xffff; | 6140 | config->config_table[1].cam_entry.middle_mac_addr = 0xffff; |
6041 | config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; | 6141 | config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; |
6042 | config->config_table[1].cam_entry.flags = cpu_to_le16(port); | 6142 | config->config_table[1].cam_entry.flags = cpu_to_le16(port); |
6043 | config->config_table[1].target_table_entry.flags = | 6143 | if (set) |
6144 | config->config_table[1].target_table_entry.flags = | ||
6044 | TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; | 6145 | TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; |
6146 | else | ||
6147 | CAM_INVALIDATE(config->config_table[1]); | ||
6045 | config->config_table[1].target_table_entry.client_id = 0; | 6148 | config->config_table[1].target_table_entry.client_id = 0; |
6046 | config->config_table[1].target_table_entry.vlan_id = 0; | 6149 | config->config_table[1].target_table_entry.vlan_id = 0; |
6047 | 6150 | ||
@@ -6050,12 +6153,12 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) | |||
6050 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); | 6153 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); |
6051 | } | 6154 | } |
6052 | 6155 | ||
6053 | static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp) | 6156 | static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set) |
6054 | { | 6157 | { |
6055 | struct mac_configuration_cmd_e1h *config = | 6158 | struct mac_configuration_cmd_e1h *config = |
6056 | (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); | 6159 | (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); |
6057 | 6160 | ||
6058 | if (bp->state != BNX2X_STATE_OPEN) { | 6161 | if (set && (bp->state != BNX2X_STATE_OPEN)) { |
6059 | DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); | 6162 | DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); |
6060 | return; | 6163 | return; |
6061 | } | 6164 | } |
@@ -6079,9 +6182,14 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp) | |||
6079 | config->config_table[0].client_id = BP_L_ID(bp); | 6182 | config->config_table[0].client_id = BP_L_ID(bp); |
6080 | config->config_table[0].vlan_id = 0; | 6183 | config->config_table[0].vlan_id = 0; |
6081 | config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); | 6184 | config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); |
6082 | config->config_table[0].flags = BP_PORT(bp); | 6185 | if (set) |
6186 | config->config_table[0].flags = BP_PORT(bp); | ||
6187 | else | ||
6188 | config->config_table[0].flags = | ||
6189 | MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; | ||
6083 | 6190 | ||
6084 | DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", | 6191 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", |
6192 | (set ? "setting" : "clearing"), | ||
6085 | config->config_table[0].msb_mac_addr, | 6193 | config->config_table[0].msb_mac_addr, |
6086 | config->config_table[0].middle_mac_addr, | 6194 | config->config_table[0].middle_mac_addr, |
6087 | config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); | 6195 | config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); |
@@ -6106,13 +6214,13 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | |||
6106 | bnx2x_rx_int(bp->fp, 10); | 6214 | bnx2x_rx_int(bp->fp, 10); |
6107 | /* if index is different from 0 | 6215 | /* if index is different from 0 |
6108 | * the reply for some commands will | 6216 | * the reply for some commands will |
6109 | * be on the none default queue | 6217 | * be on the non default queue |
6110 | */ | 6218 | */ |
6111 | if (idx) | 6219 | if (idx) |
6112 | bnx2x_rx_int(&bp->fp[idx], 10); | 6220 | bnx2x_rx_int(&bp->fp[idx], 10); |
6113 | } | 6221 | } |
6114 | mb(); /* state is changed by bnx2x_sp_event() */ | ||
6115 | 6222 | ||
6223 | mb(); /* state is changed by bnx2x_sp_event() */ | ||
6116 | if (*state_p == state) | 6224 | if (*state_p == state) |
6117 | return 0; | 6225 | return 0; |
6118 | 6226 | ||
@@ -6167,7 +6275,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6167 | { | 6275 | { |
6168 | u32 load_code; | 6276 | u32 load_code; |
6169 | int i, rc; | 6277 | int i, rc; |
6170 | |||
6171 | #ifdef BNX2X_STOP_ON_ERROR | 6278 | #ifdef BNX2X_STOP_ON_ERROR |
6172 | if (unlikely(bp->panic)) | 6279 | if (unlikely(bp->panic)) |
6173 | return -EPERM; | 6280 | return -EPERM; |
@@ -6183,22 +6290,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6183 | if (!BP_NOMCP(bp)) { | 6290 | if (!BP_NOMCP(bp)) { |
6184 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); | 6291 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); |
6185 | if (!load_code) { | 6292 | if (!load_code) { |
6186 | BNX2X_ERR("MCP response failure, unloading\n"); | 6293 | BNX2X_ERR("MCP response failure, aborting\n"); |
6187 | return -EBUSY; | 6294 | return -EBUSY; |
6188 | } | 6295 | } |
6189 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) | 6296 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) |
6190 | return -EBUSY; /* other port in diagnostic mode */ | 6297 | return -EBUSY; /* other port in diagnostic mode */ |
6191 | 6298 | ||
6192 | } else { | 6299 | } else { |
6300 | int port = BP_PORT(bp); | ||
6301 | |||
6193 | DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", | 6302 | DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", |
6194 | load_count[0], load_count[1], load_count[2]); | 6303 | load_count[0], load_count[1], load_count[2]); |
6195 | load_count[0]++; | 6304 | load_count[0]++; |
6196 | load_count[1 + BP_PORT(bp)]++; | 6305 | load_count[1 + port]++; |
6197 | DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", | 6306 | DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", |
6198 | load_count[0], load_count[1], load_count[2]); | 6307 | load_count[0], load_count[1], load_count[2]); |
6199 | if (load_count[0] == 1) | 6308 | if (load_count[0] == 1) |
6200 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; | 6309 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; |
6201 | else if (load_count[1 + BP_PORT(bp)] == 1) | 6310 | else if (load_count[1 + port] == 1) |
6202 | load_code = FW_MSG_CODE_DRV_LOAD_PORT; | 6311 | load_code = FW_MSG_CODE_DRV_LOAD_PORT; |
6203 | else | 6312 | else |
6204 | load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; | 6313 | load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; |
@@ -6247,9 +6356,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6247 | bnx2x_fp(bp, i, disable_tpa) = | 6356 | bnx2x_fp(bp, i, disable_tpa) = |
6248 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | 6357 | ((bp->flags & TPA_ENABLE_FLAG) == 0); |
6249 | 6358 | ||
6250 | /* Disable interrupt handling until HW is initialized */ | ||
6251 | atomic_set(&bp->intr_sem, 1); | ||
6252 | |||
6253 | if (bp->flags & USING_MSIX_FLAG) { | 6359 | if (bp->flags & USING_MSIX_FLAG) { |
6254 | rc = bnx2x_req_msix_irqs(bp); | 6360 | rc = bnx2x_req_msix_irqs(bp); |
6255 | if (rc) { | 6361 | if (rc) { |
@@ -6273,22 +6379,19 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6273 | rc = bnx2x_init_hw(bp, load_code); | 6379 | rc = bnx2x_init_hw(bp, load_code); |
6274 | if (rc) { | 6380 | if (rc) { |
6275 | BNX2X_ERR("HW init failed, aborting\n"); | 6381 | BNX2X_ERR("HW init failed, aborting\n"); |
6276 | goto load_error; | 6382 | goto load_int_disable; |
6277 | } | 6383 | } |
6278 | 6384 | ||
6279 | /* Enable interrupt handling */ | ||
6280 | atomic_set(&bp->intr_sem, 0); | ||
6281 | |||
6282 | /* Setup NIC internals and enable interrupts */ | 6385 | /* Setup NIC internals and enable interrupts */ |
6283 | bnx2x_nic_init(bp); | 6386 | bnx2x_nic_init(bp, load_code); |
6284 | 6387 | ||
6285 | /* Send LOAD_DONE command to MCP */ | 6388 | /* Send LOAD_DONE command to MCP */ |
6286 | if (!BP_NOMCP(bp)) { | 6389 | if (!BP_NOMCP(bp)) { |
6287 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); | 6390 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); |
6288 | if (!load_code) { | 6391 | if (!load_code) { |
6289 | BNX2X_ERR("MCP response failure, unloading\n"); | 6392 | BNX2X_ERR("MCP response failure, aborting\n"); |
6290 | rc = -EBUSY; | 6393 | rc = -EBUSY; |
6291 | goto load_int_disable; | 6394 | goto load_rings_free; |
6292 | } | 6395 | } |
6293 | } | 6396 | } |
6294 | 6397 | ||
@@ -6298,15 +6401,15 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6298 | 6401 | ||
6299 | /* Enable Rx interrupt handling before sending the ramrod | 6402 | /* Enable Rx interrupt handling before sending the ramrod |
6300 | as it's completed on Rx FP queue */ | 6403 | as it's completed on Rx FP queue */ |
6301 | for_each_queue(bp, i) | 6404 | bnx2x_napi_enable(bp); |
6302 | napi_enable(&bnx2x_fp(bp, i, napi)); | 6405 | |
6406 | /* Enable interrupt handling */ | ||
6407 | atomic_set(&bp->intr_sem, 0); | ||
6303 | 6408 | ||
6304 | rc = bnx2x_setup_leading(bp); | 6409 | rc = bnx2x_setup_leading(bp); |
6305 | if (rc) { | 6410 | if (rc) { |
6306 | #ifdef BNX2X_STOP_ON_ERROR | 6411 | BNX2X_ERR("Setup leading failed!\n"); |
6307 | bp->panic = 1; | 6412 | goto load_netif_stop; |
6308 | #endif | ||
6309 | goto load_stop_netif; | ||
6310 | } | 6413 | } |
6311 | 6414 | ||
6312 | if (CHIP_IS_E1H(bp)) | 6415 | if (CHIP_IS_E1H(bp)) |
@@ -6319,13 +6422,13 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6319 | for_each_nondefault_queue(bp, i) { | 6422 | for_each_nondefault_queue(bp, i) { |
6320 | rc = bnx2x_setup_multi(bp, i); | 6423 | rc = bnx2x_setup_multi(bp, i); |
6321 | if (rc) | 6424 | if (rc) |
6322 | goto load_stop_netif; | 6425 | goto load_netif_stop; |
6323 | } | 6426 | } |
6324 | 6427 | ||
6325 | if (CHIP_IS_E1(bp)) | 6428 | if (CHIP_IS_E1(bp)) |
6326 | bnx2x_set_mac_addr_e1(bp); | 6429 | bnx2x_set_mac_addr_e1(bp, 1); |
6327 | else | 6430 | else |
6328 | bnx2x_set_mac_addr_e1h(bp); | 6431 | bnx2x_set_mac_addr_e1h(bp, 1); |
6329 | 6432 | ||
6330 | if (bp->port.pmf) | 6433 | if (bp->port.pmf) |
6331 | bnx2x_initial_phy_init(bp); | 6434 | bnx2x_initial_phy_init(bp); |
@@ -6339,7 +6442,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6339 | break; | 6442 | break; |
6340 | 6443 | ||
6341 | case LOAD_OPEN: | 6444 | case LOAD_OPEN: |
6342 | /* IRQ is only requested from bnx2x_open */ | ||
6343 | netif_start_queue(bp->dev); | 6445 | netif_start_queue(bp->dev); |
6344 | bnx2x_set_rx_mode(bp->dev); | 6446 | bnx2x_set_rx_mode(bp->dev); |
6345 | if (bp->flags & USING_MSIX_FLAG) | 6447 | if (bp->flags & USING_MSIX_FLAG) |
@@ -6365,21 +6467,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6365 | 6467 | ||
6366 | return 0; | 6468 | return 0; |
6367 | 6469 | ||
6368 | load_stop_netif: | 6470 | load_netif_stop: |
6471 | bnx2x_napi_disable(bp); | ||
6472 | load_rings_free: | ||
6473 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
6474 | bnx2x_free_skbs(bp); | ||
6369 | for_each_queue(bp, i) | 6475 | for_each_queue(bp, i) |
6370 | napi_disable(&bnx2x_fp(bp, i, napi)); | 6476 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
6371 | |||
6372 | load_int_disable: | 6477 | load_int_disable: |
6373 | bnx2x_int_disable_sync(bp); | 6478 | bnx2x_int_disable_sync(bp); |
6374 | |||
6375 | /* Release IRQs */ | 6479 | /* Release IRQs */ |
6376 | bnx2x_free_irq(bp); | 6480 | bnx2x_free_irq(bp); |
6377 | |||
6378 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
6379 | bnx2x_free_skbs(bp); | ||
6380 | for_each_queue(bp, i) | ||
6381 | bnx2x_free_rx_sge_range(bp, bp->fp + i, | ||
6382 | RX_SGE_CNT*NUM_RX_SGE_PAGES); | ||
6383 | load_error: | 6481 | load_error: |
6384 | bnx2x_free_mem(bp); | 6482 | bnx2x_free_mem(bp); |
6385 | 6483 | ||
@@ -6394,7 +6492,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index) | |||
6394 | 6492 | ||
6395 | /* halt the connection */ | 6493 | /* halt the connection */ |
6396 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; | 6494 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; |
6397 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); | 6495 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0); |
6398 | 6496 | ||
6399 | /* Wait for completion */ | 6497 | /* Wait for completion */ |
6400 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, | 6498 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, |
@@ -6411,7 +6509,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index) | |||
6411 | return rc; | 6509 | return rc; |
6412 | } | 6510 | } |
6413 | 6511 | ||
6414 | static void bnx2x_stop_leading(struct bnx2x *bp) | 6512 | static int bnx2x_stop_leading(struct bnx2x *bp) |
6415 | { | 6513 | { |
6416 | u16 dsb_sp_prod_idx; | 6514 | u16 dsb_sp_prod_idx; |
6417 | /* if the other port is handling traffic, | 6515 | /* if the other port is handling traffic, |
@@ -6429,7 +6527,7 @@ static void bnx2x_stop_leading(struct bnx2x *bp) | |||
6429 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, | 6527 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, |
6430 | &(bp->fp[0].state), 1); | 6528 | &(bp->fp[0].state), 1); |
6431 | if (rc) /* timeout */ | 6529 | if (rc) /* timeout */ |
6432 | return; | 6530 | return rc; |
6433 | 6531 | ||
6434 | dsb_sp_prod_idx = *bp->dsb_sp_prod; | 6532 | dsb_sp_prod_idx = *bp->dsb_sp_prod; |
6435 | 6533 | ||
@@ -6441,20 +6539,24 @@ static void bnx2x_stop_leading(struct bnx2x *bp) | |||
6441 | so there is not much to do if this times out | 6539 | so there is not much to do if this times out |
6442 | */ | 6540 | */ |
6443 | while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { | 6541 | while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { |
6444 | msleep(1); | ||
6445 | if (!cnt) { | 6542 | if (!cnt) { |
6446 | DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " | 6543 | DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " |
6447 | "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", | 6544 | "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", |
6448 | *bp->dsb_sp_prod, dsb_sp_prod_idx); | 6545 | *bp->dsb_sp_prod, dsb_sp_prod_idx); |
6449 | #ifdef BNX2X_STOP_ON_ERROR | 6546 | #ifdef BNX2X_STOP_ON_ERROR |
6450 | bnx2x_panic(); | 6547 | bnx2x_panic(); |
6548 | #else | ||
6549 | rc = -EBUSY; | ||
6451 | #endif | 6550 | #endif |
6452 | break; | 6551 | break; |
6453 | } | 6552 | } |
6454 | cnt--; | 6553 | cnt--; |
6554 | msleep(1); | ||
6455 | } | 6555 | } |
6456 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; | 6556 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; |
6457 | bp->fp[0].state = BNX2X_FP_STATE_CLOSED; | 6557 | bp->fp[0].state = BNX2X_FP_STATE_CLOSED; |
6558 | |||
6559 | return rc; | ||
6458 | } | 6560 | } |
6459 | 6561 | ||
6460 | static void bnx2x_reset_func(struct bnx2x *bp) | 6562 | static void bnx2x_reset_func(struct bnx2x *bp) |
@@ -6496,7 +6598,7 @@ static void bnx2x_reset_port(struct bnx2x *bp) | |||
6496 | val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); | 6598 | val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); |
6497 | if (val) | 6599 | if (val) |
6498 | DP(NETIF_MSG_IFDOWN, | 6600 | DP(NETIF_MSG_IFDOWN, |
6499 | "BRB1 is not empty %d blooks are occupied\n", val); | 6601 | "BRB1 is not empty %d blocks are occupied\n", val); |
6500 | 6602 | ||
6501 | /* TODO: Close Doorbell port? */ | 6603 | /* TODO: Close Doorbell port? */ |
6502 | } | 6604 | } |
@@ -6536,43 +6638,35 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) | |||
6536 | } | 6638 | } |
6537 | } | 6639 | } |
6538 | 6640 | ||
6539 | /* msut be called with rtnl_lock */ | 6641 | /* must be called with rtnl_lock */ |
6540 | static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | 6642 | static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) |
6541 | { | 6643 | { |
6644 | int port = BP_PORT(bp); | ||
6542 | u32 reset_code = 0; | 6645 | u32 reset_code = 0; |
6543 | int i, cnt; | 6646 | int i, cnt, rc; |
6544 | 6647 | ||
6545 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | 6648 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; |
6546 | 6649 | ||
6547 | bp->rx_mode = BNX2X_RX_MODE_NONE; | 6650 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
6548 | bnx2x_set_storm_rx_mode(bp); | 6651 | bnx2x_set_storm_rx_mode(bp); |
6549 | 6652 | ||
6550 | if (netif_running(bp->dev)) { | 6653 | bnx2x_netif_stop(bp); |
6551 | netif_tx_disable(bp->dev); | 6654 | if (!netif_running(bp->dev)) |
6552 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | 6655 | bnx2x_napi_disable(bp); |
6553 | } | ||
6554 | |||
6555 | del_timer_sync(&bp->timer); | 6656 | del_timer_sync(&bp->timer); |
6556 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, | 6657 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, |
6557 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | 6658 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); |
6558 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | 6659 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); |
6559 | 6660 | ||
6560 | /* Wait until all fast path tasks complete */ | 6661 | /* Wait until tx fast path tasks complete */ |
6561 | for_each_queue(bp, i) { | 6662 | for_each_queue(bp, i) { |
6562 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 6663 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
6563 | 6664 | ||
6564 | #ifdef BNX2X_STOP_ON_ERROR | ||
6565 | #ifdef __powerpc64__ | ||
6566 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", | ||
6567 | #else | ||
6568 | DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n", | ||
6569 | #endif | ||
6570 | fp->tpa_queue_used); | ||
6571 | #endif | ||
6572 | cnt = 1000; | 6665 | cnt = 1000; |
6573 | smp_rmb(); | 6666 | smp_rmb(); |
6574 | while (bnx2x_has_work(fp)) { | 6667 | while (BNX2X_HAS_TX_WORK(fp)) { |
6575 | msleep(1); | 6668 | |
6669 | bnx2x_tx_int(fp, 1000); | ||
6576 | if (!cnt) { | 6670 | if (!cnt) { |
6577 | BNX2X_ERR("timeout waiting for queue[%d]\n", | 6671 | BNX2X_ERR("timeout waiting for queue[%d]\n", |
6578 | i); | 6672 | i); |
@@ -6584,40 +6678,68 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
6584 | #endif | 6678 | #endif |
6585 | } | 6679 | } |
6586 | cnt--; | 6680 | cnt--; |
6681 | msleep(1); | ||
6587 | smp_rmb(); | 6682 | smp_rmb(); |
6588 | } | 6683 | } |
6589 | } | 6684 | } |
6590 | 6685 | /* Give HW time to discard old tx messages */ | |
6591 | /* Wait until all slow path tasks complete */ | 6686 | msleep(1); |
6592 | cnt = 1000; | ||
6593 | while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--) | ||
6594 | msleep(1); | ||
6595 | |||
6596 | for_each_queue(bp, i) | ||
6597 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
6598 | /* Disable interrupts after Tx and Rx are disabled on stack level */ | ||
6599 | bnx2x_int_disable_sync(bp); | ||
6600 | 6687 | ||
6601 | /* Release IRQs */ | 6688 | /* Release IRQs */ |
6602 | bnx2x_free_irq(bp); | 6689 | bnx2x_free_irq(bp); |
6603 | 6690 | ||
6604 | if (bp->flags & NO_WOL_FLAG) | 6691 | if (CHIP_IS_E1(bp)) { |
6692 | struct mac_configuration_cmd *config = | ||
6693 | bnx2x_sp(bp, mcast_config); | ||
6694 | |||
6695 | bnx2x_set_mac_addr_e1(bp, 0); | ||
6696 | |||
6697 | for (i = 0; i < config->hdr.length_6b; i++) | ||
6698 | CAM_INVALIDATE(config->config_table[i]); | ||
6699 | |||
6700 | config->hdr.length_6b = i; | ||
6701 | if (CHIP_REV_IS_SLOW(bp)) | ||
6702 | config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port); | ||
6703 | else | ||
6704 | config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port); | ||
6705 | config->hdr.client_id = BP_CL_ID(bp); | ||
6706 | config->hdr.reserved1 = 0; | ||
6707 | |||
6708 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | ||
6709 | U64_HI(bnx2x_sp_mapping(bp, mcast_config)), | ||
6710 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); | ||
6711 | |||
6712 | } else { /* E1H */ | ||
6713 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | ||
6714 | |||
6715 | bnx2x_set_mac_addr_e1h(bp, 0); | ||
6716 | |||
6717 | for (i = 0; i < MC_HASH_SIZE; i++) | ||
6718 | REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); | ||
6719 | } | ||
6720 | |||
6721 | if (unload_mode == UNLOAD_NORMAL) | ||
6722 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | ||
6723 | |||
6724 | else if (bp->flags & NO_WOL_FLAG) { | ||
6605 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; | 6725 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; |
6726 | if (CHIP_IS_E1H(bp)) | ||
6727 | REG_WR(bp, MISC_REG_E1HMF_MODE, 0); | ||
6606 | 6728 | ||
6607 | else if (bp->wol) { | 6729 | } else if (bp->wol) { |
6608 | u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | 6730 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
6609 | u8 *mac_addr = bp->dev->dev_addr; | 6731 | u8 *mac_addr = bp->dev->dev_addr; |
6610 | u32 val; | 6732 | u32 val; |
6611 | |||
6612 | /* The mac address is written to entries 1-4 to | 6733 | /* The mac address is written to entries 1-4 to |
6613 | preserve entry 0 which is used by the PMF */ | 6734 | preserve entry 0 which is used by the PMF */ |
6735 | u8 entry = (BP_E1HVN(bp) + 1)*8; | ||
6736 | |||
6614 | val = (mac_addr[0] << 8) | mac_addr[1]; | 6737 | val = (mac_addr[0] << 8) | mac_addr[1]; |
6615 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val); | 6738 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); |
6616 | 6739 | ||
6617 | val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | | 6740 | val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | |
6618 | (mac_addr[4] << 8) | mac_addr[5]; | 6741 | (mac_addr[4] << 8) | mac_addr[5]; |
6619 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4, | 6742 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); |
6620 | val); | ||
6621 | 6743 | ||
6622 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | 6744 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; |
6623 | 6745 | ||
@@ -6630,23 +6752,14 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
6630 | if (bnx2x_stop_multi(bp, i)) | 6752 | if (bnx2x_stop_multi(bp, i)) |
6631 | goto unload_error; | 6753 | goto unload_error; |
6632 | 6754 | ||
6633 | if (CHIP_IS_E1H(bp)) | 6755 | rc = bnx2x_stop_leading(bp); |
6634 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0); | 6756 | if (rc) { |
6635 | |||
6636 | bnx2x_stop_leading(bp); | ||
6637 | #ifdef BNX2X_STOP_ON_ERROR | ||
6638 | /* If ramrod completion timed out - break here! */ | ||
6639 | if (bp->panic) { | ||
6640 | BNX2X_ERR("Stop leading failed!\n"); | 6757 | BNX2X_ERR("Stop leading failed!\n"); |
6758 | #ifdef BNX2X_STOP_ON_ERROR | ||
6641 | return -EBUSY; | 6759 | return -EBUSY; |
6642 | } | 6760 | #else |
6761 | goto unload_error; | ||
6643 | #endif | 6762 | #endif |
6644 | |||
6645 | if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) || | ||
6646 | (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) { | ||
6647 | DP(NETIF_MSG_IFDOWN, "failed to close leading properly! " | ||
6648 | "state 0x%x fp[0].state 0x%x\n", | ||
6649 | bp->state, bp->fp[0].state); | ||
6650 | } | 6763 | } |
6651 | 6764 | ||
6652 | unload_error: | 6765 | unload_error: |
@@ -6656,12 +6769,12 @@ unload_error: | |||
6656 | DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n", | 6769 | DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n", |
6657 | load_count[0], load_count[1], load_count[2]); | 6770 | load_count[0], load_count[1], load_count[2]); |
6658 | load_count[0]--; | 6771 | load_count[0]--; |
6659 | load_count[1 + BP_PORT(bp)]--; | 6772 | load_count[1 + port]--; |
6660 | DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n", | 6773 | DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n", |
6661 | load_count[0], load_count[1], load_count[2]); | 6774 | load_count[0], load_count[1], load_count[2]); |
6662 | if (load_count[0] == 0) | 6775 | if (load_count[0] == 0) |
6663 | reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; | 6776 | reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; |
6664 | else if (load_count[1 + BP_PORT(bp)] == 0) | 6777 | else if (load_count[1 + port] == 0) |
6665 | reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; | 6778 | reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; |
6666 | else | 6779 | else |
6667 | reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; | 6780 | reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; |
@@ -6681,8 +6794,7 @@ unload_error: | |||
6681 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 6794 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
6682 | bnx2x_free_skbs(bp); | 6795 | bnx2x_free_skbs(bp); |
6683 | for_each_queue(bp, i) | 6796 | for_each_queue(bp, i) |
6684 | bnx2x_free_rx_sge_range(bp, bp->fp + i, | 6797 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
6685 | RX_SGE_CNT*NUM_RX_SGE_PAGES); | ||
6686 | bnx2x_free_mem(bp); | 6798 | bnx2x_free_mem(bp); |
6687 | 6799 | ||
6688 | bp->state = BNX2X_STATE_CLOSED; | 6800 | bp->state = BNX2X_STATE_CLOSED; |
@@ -6733,49 +6845,88 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
6733 | /* Check if it is the UNDI driver | 6845 | /* Check if it is the UNDI driver |
6734 | * UNDI driver initializes CID offset for normal bell to 0x7 | 6846 | * UNDI driver initializes CID offset for normal bell to 0x7 |
6735 | */ | 6847 | */ |
6848 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
6736 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); | 6849 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); |
6850 | if (val == 0x7) | ||
6851 | REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); | ||
6852 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
6853 | |||
6737 | if (val == 0x7) { | 6854 | if (val == 0x7) { |
6738 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 6855 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
6739 | /* save our func and fw_seq */ | 6856 | /* save our func */ |
6740 | int func = BP_FUNC(bp); | 6857 | int func = BP_FUNC(bp); |
6741 | u16 fw_seq = bp->fw_seq; | 6858 | u32 swap_en; |
6859 | u32 swap_val; | ||
6742 | 6860 | ||
6743 | BNX2X_DEV_INFO("UNDI is active! reset device\n"); | 6861 | BNX2X_DEV_INFO("UNDI is active! reset device\n"); |
6744 | 6862 | ||
6745 | /* try unload UNDI on port 0 */ | 6863 | /* try unload UNDI on port 0 */ |
6746 | bp->func = 0; | 6864 | bp->func = 0; |
6747 | bp->fw_seq = (SHMEM_RD(bp, | 6865 | bp->fw_seq = |
6748 | func_mb[bp->func].drv_mb_header) & | 6866 | (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & |
6749 | DRV_MSG_SEQ_NUMBER_MASK); | 6867 | DRV_MSG_SEQ_NUMBER_MASK); |
6750 | |||
6751 | reset_code = bnx2x_fw_command(bp, reset_code); | 6868 | reset_code = bnx2x_fw_command(bp, reset_code); |
6752 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | ||
6753 | 6869 | ||
6754 | /* if UNDI is loaded on the other port */ | 6870 | /* if UNDI is loaded on the other port */ |
6755 | if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { | 6871 | if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { |
6756 | 6872 | ||
6873 | /* send "DONE" for previous unload */ | ||
6874 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | ||
6875 | |||
6876 | /* unload UNDI on port 1 */ | ||
6757 | bp->func = 1; | 6877 | bp->func = 1; |
6758 | bp->fw_seq = (SHMEM_RD(bp, | 6878 | bp->fw_seq = |
6759 | func_mb[bp->func].drv_mb_header) & | 6879 | (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & |
6760 | DRV_MSG_SEQ_NUMBER_MASK); | 6880 | DRV_MSG_SEQ_NUMBER_MASK); |
6761 | 6881 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | |
6762 | bnx2x_fw_command(bp, | 6882 | |
6763 | DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS); | 6883 | bnx2x_fw_command(bp, reset_code); |
6764 | bnx2x_fw_command(bp, | ||
6765 | DRV_MSG_CODE_UNLOAD_DONE); | ||
6766 | |||
6767 | /* restore our func and fw_seq */ | ||
6768 | bp->func = func; | ||
6769 | bp->fw_seq = fw_seq; | ||
6770 | } | 6884 | } |
6771 | 6885 | ||
6886 | REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 : | ||
6887 | HC_REG_CONFIG_0), 0x1000); | ||
6888 | |||
6889 | /* close input traffic and wait for it */ | ||
6890 | /* Do not rcv packets to BRB */ | ||
6891 | REG_WR(bp, | ||
6892 | (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK : | ||
6893 | NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); | ||
6894 | /* Do not direct rcv packets that are not for MCP to | ||
6895 | * the BRB */ | ||
6896 | REG_WR(bp, | ||
6897 | (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP : | ||
6898 | NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); | ||
6899 | /* clear AEU */ | ||
6900 | REG_WR(bp, | ||
6901 | (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : | ||
6902 | MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); | ||
6903 | msleep(10); | ||
6904 | |||
6905 | /* save NIG port swap info */ | ||
6906 | swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); | ||
6907 | swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); | ||
6772 | /* reset device */ | 6908 | /* reset device */ |
6773 | REG_WR(bp, | 6909 | REG_WR(bp, |
6774 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | 6910 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, |
6775 | 0xd3ffff7f); | 6911 | 0xd3ffffff); |
6776 | REG_WR(bp, | 6912 | REG_WR(bp, |
6777 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | 6913 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, |
6778 | 0x1403); | 6914 | 0x1403); |
6915 | /* take the NIG out of reset and restore swap values */ | ||
6916 | REG_WR(bp, | ||
6917 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, | ||
6918 | MISC_REGISTERS_RESET_REG_1_RST_NIG); | ||
6919 | REG_WR(bp, NIG_REG_PORT_SWAP, swap_val); | ||
6920 | REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en); | ||
6921 | |||
6922 | /* send unload done to the MCP */ | ||
6923 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | ||
6924 | |||
6925 | /* restore our func and fw_seq */ | ||
6926 | bp->func = func; | ||
6927 | bp->fw_seq = | ||
6928 | (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & | ||
6929 | DRV_MSG_SEQ_NUMBER_MASK); | ||
6779 | } | 6930 | } |
6780 | } | 6931 | } |
6781 | } | 6932 | } |
@@ -6783,6 +6934,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
6783 | static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | 6934 | static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) |
6784 | { | 6935 | { |
6785 | u32 val, val2, val3, val4, id; | 6936 | u32 val, val2, val3, val4, id; |
6937 | u16 pmc; | ||
6786 | 6938 | ||
6787 | /* Get the chip revision id and number. */ | 6939 | /* Get the chip revision id and number. */ |
6788 | /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ | 6940 | /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ |
@@ -6840,8 +6992,16 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
6840 | BNX2X_ERR("This driver needs bc_ver %X but found %X," | 6992 | BNX2X_ERR("This driver needs bc_ver %X but found %X," |
6841 | " please upgrade BC\n", BNX2X_BC_VER, val); | 6993 | " please upgrade BC\n", BNX2X_BC_VER, val); |
6842 | } | 6994 | } |
6843 | BNX2X_DEV_INFO("%sWoL Capable\n", | 6995 | |
6844 | (bp->flags & NO_WOL_FLAG)? "Not " : ""); | 6996 | if (BP_E1HVN(bp) == 0) { |
6997 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); | ||
6998 | bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; | ||
6999 | } else { | ||
7000 | /* no WOL capability for E1HVN != 0 */ | ||
7001 | bp->flags |= NO_WOL_FLAG; | ||
7002 | } | ||
7003 | BNX2X_DEV_INFO("%sWoL capable\n", | ||
7004 | (bp->flags & NO_WOL_FLAG) ? "Not " : ""); | ||
6845 | 7005 | ||
6846 | val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); | 7006 | val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); |
6847 | val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); | 7007 | val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); |
@@ -7274,9 +7434,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
7274 | bp->mf_config = | 7434 | bp->mf_config = |
7275 | SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); | 7435 | SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); |
7276 | 7436 | ||
7277 | val = | 7437 | val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) & |
7278 | (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) & | 7438 | FUNC_MF_CFG_E1HOV_TAG_MASK); |
7279 | FUNC_MF_CFG_E1HOV_TAG_MASK); | ||
7280 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { | 7439 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { |
7281 | 7440 | ||
7282 | bp->e1hov = val; | 7441 | bp->e1hov = val; |
@@ -7324,7 +7483,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
7324 | 7483 | ||
7325 | if (BP_NOMCP(bp)) { | 7484 | if (BP_NOMCP(bp)) { |
7326 | /* only supposed to happen on emulation/FPGA */ | 7485 | /* only supposed to happen on emulation/FPGA */ |
7327 | BNX2X_ERR("warning rendom MAC workaround active\n"); | 7486 | BNX2X_ERR("warning random MAC workaround active\n"); |
7328 | random_ether_addr(bp->dev->dev_addr); | 7487 | random_ether_addr(bp->dev->dev_addr); |
7329 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); | 7488 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); |
7330 | } | 7489 | } |
@@ -7337,8 +7496,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
7337 | int func = BP_FUNC(bp); | 7496 | int func = BP_FUNC(bp); |
7338 | int rc; | 7497 | int rc; |
7339 | 7498 | ||
7340 | if (nomcp) | 7499 | /* Disable interrupt handling until HW is initialized */ |
7341 | bp->flags |= NO_MCP_FLAG; | 7500 | atomic_set(&bp->intr_sem, 1); |
7342 | 7501 | ||
7343 | mutex_init(&bp->port.phy_mutex); | 7502 | mutex_init(&bp->port.phy_mutex); |
7344 | 7503 | ||
@@ -7377,8 +7536,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
7377 | bp->tx_ticks = 50; | 7536 | bp->tx_ticks = 50; |
7378 | bp->rx_ticks = 25; | 7537 | bp->rx_ticks = 25; |
7379 | 7538 | ||
7380 | bp->stats_ticks = 1000000 & 0xffff00; | ||
7381 | |||
7382 | bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); | 7539 | bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); |
7383 | bp->current_interval = (poll ? poll : bp->timer_interval); | 7540 | bp->current_interval = (poll ? poll : bp->timer_interval); |
7384 | 7541 | ||
@@ -7628,25 +7785,25 @@ static void bnx2x_get_drvinfo(struct net_device *dev, | |||
7628 | struct ethtool_drvinfo *info) | 7785 | struct ethtool_drvinfo *info) |
7629 | { | 7786 | { |
7630 | struct bnx2x *bp = netdev_priv(dev); | 7787 | struct bnx2x *bp = netdev_priv(dev); |
7631 | char phy_fw_ver[PHY_FW_VER_LEN]; | 7788 | u8 phy_fw_ver[PHY_FW_VER_LEN]; |
7632 | 7789 | ||
7633 | strcpy(info->driver, DRV_MODULE_NAME); | 7790 | strcpy(info->driver, DRV_MODULE_NAME); |
7634 | strcpy(info->version, DRV_MODULE_VERSION); | 7791 | strcpy(info->version, DRV_MODULE_VERSION); |
7635 | 7792 | ||
7636 | phy_fw_ver[0] = '\0'; | 7793 | phy_fw_ver[0] = '\0'; |
7637 | if (bp->port.pmf) { | 7794 | if (bp->port.pmf) { |
7638 | bnx2x_phy_hw_lock(bp); | 7795 | bnx2x_acquire_phy_lock(bp); |
7639 | bnx2x_get_ext_phy_fw_version(&bp->link_params, | 7796 | bnx2x_get_ext_phy_fw_version(&bp->link_params, |
7640 | (bp->state != BNX2X_STATE_CLOSED), | 7797 | (bp->state != BNX2X_STATE_CLOSED), |
7641 | phy_fw_ver, PHY_FW_VER_LEN); | 7798 | phy_fw_ver, PHY_FW_VER_LEN); |
7642 | bnx2x_phy_hw_unlock(bp); | 7799 | bnx2x_release_phy_lock(bp); |
7643 | } | 7800 | } |
7644 | 7801 | ||
7645 | snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s", | 7802 | snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s", |
7646 | BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, | 7803 | (bp->common.bc_ver & 0xff0000) >> 16, |
7647 | BCM_5710_FW_REVISION_VERSION, | 7804 | (bp->common.bc_ver & 0xff00) >> 8, |
7648 | BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver, | 7805 | (bp->common.bc_ver & 0xff), |
7649 | ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver); | 7806 | ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver); |
7650 | strcpy(info->bus_info, pci_name(bp->pdev)); | 7807 | strcpy(info->bus_info, pci_name(bp->pdev)); |
7651 | info->n_stats = BNX2X_NUM_STATS; | 7808 | info->n_stats = BNX2X_NUM_STATS; |
7652 | info->testinfo_len = BNX2X_NUM_TESTS; | 7809 | info->testinfo_len = BNX2X_NUM_TESTS; |
@@ -8097,7 +8254,7 @@ static int bnx2x_set_eeprom(struct net_device *dev, | |||
8097 | if (eeprom->magic == 0x00504859) | 8254 | if (eeprom->magic == 0x00504859) |
8098 | if (bp->port.pmf) { | 8255 | if (bp->port.pmf) { |
8099 | 8256 | ||
8100 | bnx2x_phy_hw_lock(bp); | 8257 | bnx2x_acquire_phy_lock(bp); |
8101 | rc = bnx2x_flash_download(bp, BP_PORT(bp), | 8258 | rc = bnx2x_flash_download(bp, BP_PORT(bp), |
8102 | bp->link_params.ext_phy_config, | 8259 | bp->link_params.ext_phy_config, |
8103 | (bp->state != BNX2X_STATE_CLOSED), | 8260 | (bp->state != BNX2X_STATE_CLOSED), |
@@ -8109,7 +8266,7 @@ static int bnx2x_set_eeprom(struct net_device *dev, | |||
8109 | rc |= bnx2x_phy_init(&bp->link_params, | 8266 | rc |= bnx2x_phy_init(&bp->link_params, |
8110 | &bp->link_vars); | 8267 | &bp->link_vars); |
8111 | } | 8268 | } |
8112 | bnx2x_phy_hw_unlock(bp); | 8269 | bnx2x_release_phy_lock(bp); |
8113 | 8270 | ||
8114 | } else /* Only the PMF can access the PHY */ | 8271 | } else /* Only the PMF can access the PHY */ |
8115 | return -EINVAL; | 8272 | return -EINVAL; |
@@ -8128,7 +8285,6 @@ static int bnx2x_get_coalesce(struct net_device *dev, | |||
8128 | 8285 | ||
8129 | coal->rx_coalesce_usecs = bp->rx_ticks; | 8286 | coal->rx_coalesce_usecs = bp->rx_ticks; |
8130 | coal->tx_coalesce_usecs = bp->tx_ticks; | 8287 | coal->tx_coalesce_usecs = bp->tx_ticks; |
8131 | coal->stats_block_coalesce_usecs = bp->stats_ticks; | ||
8132 | 8288 | ||
8133 | return 0; | 8289 | return 0; |
8134 | } | 8290 | } |
@@ -8146,44 +8302,12 @@ static int bnx2x_set_coalesce(struct net_device *dev, | |||
8146 | if (bp->tx_ticks > 0x3000) | 8302 | if (bp->tx_ticks > 0x3000) |
8147 | bp->tx_ticks = 0x3000; | 8303 | bp->tx_ticks = 0x3000; |
8148 | 8304 | ||
8149 | bp->stats_ticks = coal->stats_block_coalesce_usecs; | ||
8150 | if (bp->stats_ticks > 0xffff00) | ||
8151 | bp->stats_ticks = 0xffff00; | ||
8152 | bp->stats_ticks &= 0xffff00; | ||
8153 | |||
8154 | if (netif_running(dev)) | 8305 | if (netif_running(dev)) |
8155 | bnx2x_update_coalesce(bp); | 8306 | bnx2x_update_coalesce(bp); |
8156 | 8307 | ||
8157 | return 0; | 8308 | return 0; |
8158 | } | 8309 | } |
8159 | 8310 | ||
8160 | static int bnx2x_set_flags(struct net_device *dev, u32 data) | ||
8161 | { | ||
8162 | struct bnx2x *bp = netdev_priv(dev); | ||
8163 | int changed = 0; | ||
8164 | int rc = 0; | ||
8165 | |||
8166 | if (data & ETH_FLAG_LRO) { | ||
8167 | if (!(dev->features & NETIF_F_LRO)) { | ||
8168 | dev->features |= NETIF_F_LRO; | ||
8169 | bp->flags |= TPA_ENABLE_FLAG; | ||
8170 | changed = 1; | ||
8171 | } | ||
8172 | |||
8173 | } else if (dev->features & NETIF_F_LRO) { | ||
8174 | dev->features &= ~NETIF_F_LRO; | ||
8175 | bp->flags &= ~TPA_ENABLE_FLAG; | ||
8176 | changed = 1; | ||
8177 | } | ||
8178 | |||
8179 | if (changed && netif_running(dev)) { | ||
8180 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
8181 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); | ||
8182 | } | ||
8183 | |||
8184 | return rc; | ||
8185 | } | ||
8186 | |||
8187 | static void bnx2x_get_ringparam(struct net_device *dev, | 8311 | static void bnx2x_get_ringparam(struct net_device *dev, |
8188 | struct ethtool_ringparam *ering) | 8312 | struct ethtool_ringparam *ering) |
8189 | { | 8313 | { |
@@ -8266,7 +8390,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev, | |||
8266 | 8390 | ||
8267 | if (epause->autoneg) { | 8391 | if (epause->autoneg) { |
8268 | if (!(bp->port.supported & SUPPORTED_Autoneg)) { | 8392 | if (!(bp->port.supported & SUPPORTED_Autoneg)) { |
8269 | DP(NETIF_MSG_LINK, "Autoneg not supported\n"); | 8393 | DP(NETIF_MSG_LINK, "autoneg not supported\n"); |
8270 | return -EINVAL; | 8394 | return -EINVAL; |
8271 | } | 8395 | } |
8272 | 8396 | ||
@@ -8285,6 +8409,34 @@ static int bnx2x_set_pauseparam(struct net_device *dev, | |||
8285 | return 0; | 8409 | return 0; |
8286 | } | 8410 | } |
8287 | 8411 | ||
8412 | static int bnx2x_set_flags(struct net_device *dev, u32 data) | ||
8413 | { | ||
8414 | struct bnx2x *bp = netdev_priv(dev); | ||
8415 | int changed = 0; | ||
8416 | int rc = 0; | ||
8417 | |||
8418 | /* TPA requires Rx CSUM offloading */ | ||
8419 | if ((data & ETH_FLAG_LRO) && bp->rx_csum) { | ||
8420 | if (!(dev->features & NETIF_F_LRO)) { | ||
8421 | dev->features |= NETIF_F_LRO; | ||
8422 | bp->flags |= TPA_ENABLE_FLAG; | ||
8423 | changed = 1; | ||
8424 | } | ||
8425 | |||
8426 | } else if (dev->features & NETIF_F_LRO) { | ||
8427 | dev->features &= ~NETIF_F_LRO; | ||
8428 | bp->flags &= ~TPA_ENABLE_FLAG; | ||
8429 | changed = 1; | ||
8430 | } | ||
8431 | |||
8432 | if (changed && netif_running(dev)) { | ||
8433 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
8434 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); | ||
8435 | } | ||
8436 | |||
8437 | return rc; | ||
8438 | } | ||
8439 | |||
8288 | static u32 bnx2x_get_rx_csum(struct net_device *dev) | 8440 | static u32 bnx2x_get_rx_csum(struct net_device *dev) |
8289 | { | 8441 | { |
8290 | struct bnx2x *bp = netdev_priv(dev); | 8442 | struct bnx2x *bp = netdev_priv(dev); |
@@ -8295,9 +8447,19 @@ static u32 bnx2x_get_rx_csum(struct net_device *dev) | |||
8295 | static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) | 8447 | static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) |
8296 | { | 8448 | { |
8297 | struct bnx2x *bp = netdev_priv(dev); | 8449 | struct bnx2x *bp = netdev_priv(dev); |
8450 | int rc = 0; | ||
8298 | 8451 | ||
8299 | bp->rx_csum = data; | 8452 | bp->rx_csum = data; |
8300 | return 0; | 8453 | |
8454 | /* Disable TPA, when Rx CSUM is disabled. Otherwise all | ||
8455 | TPA'ed packets will be discarded due to wrong TCP CSUM */ | ||
8456 | if (!data) { | ||
8457 | u32 flags = ethtool_op_get_flags(dev); | ||
8458 | |||
8459 | rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO)); | ||
8460 | } | ||
8461 | |||
8462 | return rc; | ||
8301 | } | 8463 | } |
8302 | 8464 | ||
8303 | static int bnx2x_set_tso(struct net_device *dev, u32 data) | 8465 | static int bnx2x_set_tso(struct net_device *dev, u32 data) |
@@ -8335,6 +8497,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) | |||
8335 | { | 8497 | { |
8336 | int idx, i, rc = -ENODEV; | 8498 | int idx, i, rc = -ENODEV; |
8337 | u32 wr_val = 0; | 8499 | u32 wr_val = 0; |
8500 | int port = BP_PORT(bp); | ||
8338 | static const struct { | 8501 | static const struct { |
8339 | u32 offset0; | 8502 | u32 offset0; |
8340 | u32 offset1; | 8503 | u32 offset1; |
@@ -8400,7 +8563,6 @@ static int bnx2x_test_registers(struct bnx2x *bp) | |||
8400 | 8563 | ||
8401 | for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { | 8564 | for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { |
8402 | u32 offset, mask, save_val, val; | 8565 | u32 offset, mask, save_val, val; |
8403 | int port = BP_PORT(bp); | ||
8404 | 8566 | ||
8405 | offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; | 8567 | offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; |
8406 | mask = reg_tbl[i].mask; | 8568 | mask = reg_tbl[i].mask; |
@@ -8446,16 +8608,17 @@ static int bnx2x_test_memory(struct bnx2x *bp) | |||
8446 | static const struct { | 8608 | static const struct { |
8447 | char *name; | 8609 | char *name; |
8448 | u32 offset; | 8610 | u32 offset; |
8449 | u32 mask; | 8611 | u32 e1_mask; |
8612 | u32 e1h_mask; | ||
8450 | } prty_tbl[] = { | 8613 | } prty_tbl[] = { |
8451 | { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 }, | 8614 | { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 }, |
8452 | { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 }, | 8615 | { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 }, |
8453 | { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 }, | 8616 | { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 }, |
8454 | { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 }, | 8617 | { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 }, |
8455 | { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 }, | 8618 | { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 }, |
8456 | { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 }, | 8619 | { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 }, |
8457 | 8620 | ||
8458 | { NULL, 0xffffffff, 0 } | 8621 | { NULL, 0xffffffff, 0, 0 } |
8459 | }; | 8622 | }; |
8460 | 8623 | ||
8461 | if (!netif_running(bp->dev)) | 8624 | if (!netif_running(bp->dev)) |
@@ -8469,7 +8632,8 @@ static int bnx2x_test_memory(struct bnx2x *bp) | |||
8469 | /* Check the parity status */ | 8632 | /* Check the parity status */ |
8470 | for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { | 8633 | for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { |
8471 | val = REG_RD(bp, prty_tbl[i].offset); | 8634 | val = REG_RD(bp, prty_tbl[i].offset); |
8472 | if (val & ~(prty_tbl[i].mask)) { | 8635 | if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || |
8636 | (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) { | ||
8473 | DP(NETIF_MSG_HW, | 8637 | DP(NETIF_MSG_HW, |
8474 | "%s is 0x%x\n", prty_tbl[i].name, val); | 8638 | "%s is 0x%x\n", prty_tbl[i].name, val); |
8475 | goto test_mem_exit; | 8639 | goto test_mem_exit; |
@@ -8482,34 +8646,6 @@ test_mem_exit: | |||
8482 | return rc; | 8646 | return rc; |
8483 | } | 8647 | } |
8484 | 8648 | ||
8485 | static void bnx2x_netif_start(struct bnx2x *bp) | ||
8486 | { | ||
8487 | int i; | ||
8488 | |||
8489 | if (atomic_dec_and_test(&bp->intr_sem)) { | ||
8490 | if (netif_running(bp->dev)) { | ||
8491 | bnx2x_int_enable(bp); | ||
8492 | for_each_queue(bp, i) | ||
8493 | napi_enable(&bnx2x_fp(bp, i, napi)); | ||
8494 | if (bp->state == BNX2X_STATE_OPEN) | ||
8495 | netif_wake_queue(bp->dev); | ||
8496 | } | ||
8497 | } | ||
8498 | } | ||
8499 | |||
8500 | static void bnx2x_netif_stop(struct bnx2x *bp) | ||
8501 | { | ||
8502 | int i; | ||
8503 | |||
8504 | if (netif_running(bp->dev)) { | ||
8505 | netif_tx_disable(bp->dev); | ||
8506 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
8507 | for_each_queue(bp, i) | ||
8508 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
8509 | } | ||
8510 | bnx2x_int_disable_sync(bp); | ||
8511 | } | ||
8512 | |||
8513 | static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) | 8649 | static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) |
8514 | { | 8650 | { |
8515 | int cnt = 1000; | 8651 | int cnt = 1000; |
@@ -8539,15 +8675,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
8539 | 8675 | ||
8540 | if (loopback_mode == BNX2X_MAC_LOOPBACK) { | 8676 | if (loopback_mode == BNX2X_MAC_LOOPBACK) { |
8541 | bp->link_params.loopback_mode = LOOPBACK_BMAC; | 8677 | bp->link_params.loopback_mode = LOOPBACK_BMAC; |
8542 | bnx2x_phy_hw_lock(bp); | 8678 | bnx2x_acquire_phy_lock(bp); |
8543 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 8679 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
8544 | bnx2x_phy_hw_unlock(bp); | 8680 | bnx2x_release_phy_lock(bp); |
8545 | 8681 | ||
8546 | } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { | 8682 | } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { |
8547 | bp->link_params.loopback_mode = LOOPBACK_XGXS_10; | 8683 | bp->link_params.loopback_mode = LOOPBACK_XGXS_10; |
8548 | bnx2x_phy_hw_lock(bp); | 8684 | bnx2x_acquire_phy_lock(bp); |
8549 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 8685 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
8550 | bnx2x_phy_hw_unlock(bp); | 8686 | bnx2x_release_phy_lock(bp); |
8551 | /* wait until link state is restored */ | 8687 | /* wait until link state is restored */ |
8552 | bnx2x_wait_for_link(bp, link_up); | 8688 | bnx2x_wait_for_link(bp, link_up); |
8553 | 8689 | ||
@@ -8771,7 +8907,7 @@ static void bnx2x_self_test(struct net_device *dev, | |||
8771 | if (!netif_running(dev)) | 8907 | if (!netif_running(dev)) |
8772 | return; | 8908 | return; |
8773 | 8909 | ||
8774 | /* offline tests are not suppoerted in MF mode */ | 8910 | /* offline tests are not supported in MF mode */ |
8775 | if (IS_E1HMF(bp)) | 8911 | if (IS_E1HMF(bp)) |
8776 | etest->flags &= ~ETH_TEST_FL_OFFLINE; | 8912 | etest->flags &= ~ETH_TEST_FL_OFFLINE; |
8777 | 8913 | ||
@@ -8827,76 +8963,99 @@ static const struct { | |||
8827 | long offset; | 8963 | long offset; |
8828 | int size; | 8964 | int size; |
8829 | u32 flags; | 8965 | u32 flags; |
8830 | char string[ETH_GSTRING_LEN]; | 8966 | #define STATS_FLAGS_PORT 1 |
8967 | #define STATS_FLAGS_FUNC 2 | ||
8968 | u8 string[ETH_GSTRING_LEN]; | ||
8831 | } bnx2x_stats_arr[BNX2X_NUM_STATS] = { | 8969 | } bnx2x_stats_arr[BNX2X_NUM_STATS] = { |
8832 | /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" }, | 8970 | /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), |
8833 | { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" }, | 8971 | 8, STATS_FLAGS_FUNC, "rx_bytes" }, |
8834 | { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" }, | 8972 | { STATS_OFFSET32(error_bytes_received_hi), |
8835 | { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" }, | 8973 | 8, STATS_FLAGS_FUNC, "rx_error_bytes" }, |
8974 | { STATS_OFFSET32(total_bytes_transmitted_hi), | ||
8975 | 8, STATS_FLAGS_FUNC, "tx_bytes" }, | ||
8976 | { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), | ||
8977 | 8, STATS_FLAGS_PORT, "tx_error_bytes" }, | ||
8836 | { STATS_OFFSET32(total_unicast_packets_received_hi), | 8978 | { STATS_OFFSET32(total_unicast_packets_received_hi), |
8837 | 8, 1, "rx_ucast_packets" }, | 8979 | 8, STATS_FLAGS_FUNC, "rx_ucast_packets" }, |
8838 | { STATS_OFFSET32(total_multicast_packets_received_hi), | 8980 | { STATS_OFFSET32(total_multicast_packets_received_hi), |
8839 | 8, 1, "rx_mcast_packets" }, | 8981 | 8, STATS_FLAGS_FUNC, "rx_mcast_packets" }, |
8840 | { STATS_OFFSET32(total_broadcast_packets_received_hi), | 8982 | { STATS_OFFSET32(total_broadcast_packets_received_hi), |
8841 | 8, 1, "rx_bcast_packets" }, | 8983 | 8, STATS_FLAGS_FUNC, "rx_bcast_packets" }, |
8842 | { STATS_OFFSET32(total_unicast_packets_transmitted_hi), | 8984 | { STATS_OFFSET32(total_unicast_packets_transmitted_hi), |
8843 | 8, 1, "tx_packets" }, | 8985 | 8, STATS_FLAGS_FUNC, "tx_packets" }, |
8844 | { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), | 8986 | { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), |
8845 | 8, 0, "tx_mac_errors" }, | 8987 | 8, STATS_FLAGS_PORT, "tx_mac_errors" }, |
8846 | /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), | 8988 | /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), |
8847 | 8, 0, "tx_carrier_errors" }, | 8989 | 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, |
8848 | { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), | 8990 | { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), |
8849 | 8, 0, "rx_crc_errors" }, | 8991 | 8, STATS_FLAGS_PORT, "rx_crc_errors" }, |
8850 | { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), | 8992 | { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), |
8851 | 8, 0, "rx_align_errors" }, | 8993 | 8, STATS_FLAGS_PORT, "rx_align_errors" }, |
8852 | { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), | 8994 | { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), |
8853 | 8, 0, "tx_single_collisions" }, | 8995 | 8, STATS_FLAGS_PORT, "tx_single_collisions" }, |
8854 | { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), | 8996 | { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), |
8855 | 8, 0, "tx_multi_collisions" }, | 8997 | 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, |
8856 | { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), | 8998 | { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), |
8857 | 8, 0, "tx_deferred" }, | 8999 | 8, STATS_FLAGS_PORT, "tx_deferred" }, |
8858 | { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), | 9000 | { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), |
8859 | 8, 0, "tx_excess_collisions" }, | 9001 | 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, |
8860 | { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), | 9002 | { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), |
8861 | 8, 0, "tx_late_collisions" }, | 9003 | 8, STATS_FLAGS_PORT, "tx_late_collisions" }, |
8862 | { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), | 9004 | { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), |
8863 | 8, 0, "tx_total_collisions" }, | 9005 | 8, STATS_FLAGS_PORT, "tx_total_collisions" }, |
8864 | { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), | 9006 | { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), |
8865 | 8, 0, "rx_fragments" }, | 9007 | 8, STATS_FLAGS_PORT, "rx_fragments" }, |
8866 | /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" }, | 9008 | /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), |
9009 | 8, STATS_FLAGS_PORT, "rx_jabbers" }, | ||
8867 | { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), | 9010 | { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), |
8868 | 8, 0, "rx_undersize_packets" }, | 9011 | 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, |
8869 | { STATS_OFFSET32(jabber_packets_received), | 9012 | { STATS_OFFSET32(jabber_packets_received), |
8870 | 4, 1, "rx_oversize_packets" }, | 9013 | 4, STATS_FLAGS_FUNC, "rx_oversize_packets" }, |
8871 | { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), | 9014 | { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), |
8872 | 8, 0, "tx_64_byte_packets" }, | 9015 | 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, |
8873 | { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), | 9016 | { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), |
8874 | 8, 0, "tx_65_to_127_byte_packets" }, | 9017 | 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, |
8875 | { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), | 9018 | { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), |
8876 | 8, 0, "tx_128_to_255_byte_packets" }, | 9019 | 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, |
8877 | { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), | 9020 | { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), |
8878 | 8, 0, "tx_256_to_511_byte_packets" }, | 9021 | 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, |
8879 | { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), | 9022 | { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), |
8880 | 8, 0, "tx_512_to_1023_byte_packets" }, | 9023 | 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, |
8881 | { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), | 9024 | { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), |
8882 | 8, 0, "tx_1024_to_1522_byte_packets" }, | 9025 | 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, |
8883 | { STATS_OFFSET32(etherstatspktsover1522octets_hi), | 9026 | { STATS_OFFSET32(etherstatspktsover1522octets_hi), |
8884 | 8, 0, "tx_1523_to_9022_byte_packets" }, | 9027 | 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, |
8885 | /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi), | 9028 | /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi), |
8886 | 8, 0, "rx_xon_frames" }, | 9029 | 8, STATS_FLAGS_PORT, "rx_xon_frames" }, |
8887 | { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi), | 9030 | { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi), |
8888 | 8, 0, "rx_xoff_frames" }, | 9031 | 8, STATS_FLAGS_PORT, "rx_xoff_frames" }, |
8889 | { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" }, | 9032 | { STATS_OFFSET32(tx_stat_outxonsent_hi), |
8890 | { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" }, | 9033 | 8, STATS_FLAGS_PORT, "tx_xon_frames" }, |
9034 | { STATS_OFFSET32(tx_stat_outxoffsent_hi), | ||
9035 | 8, STATS_FLAGS_PORT, "tx_xoff_frames" }, | ||
8891 | { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), | 9036 | { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), |
8892 | 8, 0, "rx_mac_ctrl_frames" }, | 9037 | 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, |
8893 | { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" }, | 9038 | { STATS_OFFSET32(mac_filter_discard), |
8894 | { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" }, | 9039 | 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, |
8895 | { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" }, | 9040 | { STATS_OFFSET32(no_buff_discard), |
8896 | { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" }, | 9041 | 4, STATS_FLAGS_FUNC, "rx_discards" }, |
8897 | /* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" } | 9042 | { STATS_OFFSET32(xxoverflow_discard), |
9043 | 4, STATS_FLAGS_PORT, "rx_fw_discards" }, | ||
9044 | { STATS_OFFSET32(brb_drop_hi), | ||
9045 | 8, STATS_FLAGS_PORT, "brb_discard" }, | ||
9046 | { STATS_OFFSET32(brb_truncate_hi), | ||
9047 | 8, STATS_FLAGS_PORT, "brb_truncate" }, | ||
9048 | /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt), | ||
9049 | 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"}, | ||
9050 | { STATS_OFFSET32(rx_skb_alloc_failed), | ||
9051 | 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" }, | ||
9052 | /* 42 */{ STATS_OFFSET32(hw_csum_err), | ||
9053 | 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" } | ||
8898 | }; | 9054 | }; |
8899 | 9055 | ||
9056 | #define IS_NOT_E1HMF_STAT(bp, i) \ | ||
9057 | (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT)) | ||
9058 | |||
8900 | static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | 9059 | static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) |
8901 | { | 9060 | { |
8902 | struct bnx2x *bp = netdev_priv(dev); | 9061 | struct bnx2x *bp = netdev_priv(dev); |
@@ -8905,7 +9064,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | |||
8905 | switch (stringset) { | 9064 | switch (stringset) { |
8906 | case ETH_SS_STATS: | 9065 | case ETH_SS_STATS: |
8907 | for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { | 9066 | for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { |
8908 | if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) | 9067 | if (IS_NOT_E1HMF_STAT(bp, i)) |
8909 | continue; | 9068 | continue; |
8910 | strcpy(buf + j*ETH_GSTRING_LEN, | 9069 | strcpy(buf + j*ETH_GSTRING_LEN, |
8911 | bnx2x_stats_arr[i].string); | 9070 | bnx2x_stats_arr[i].string); |
@@ -8925,7 +9084,7 @@ static int bnx2x_get_stats_count(struct net_device *dev) | |||
8925 | int i, num_stats = 0; | 9084 | int i, num_stats = 0; |
8926 | 9085 | ||
8927 | for (i = 0; i < BNX2X_NUM_STATS; i++) { | 9086 | for (i = 0; i < BNX2X_NUM_STATS; i++) { |
8928 | if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) | 9087 | if (IS_NOT_E1HMF_STAT(bp, i)) |
8929 | continue; | 9088 | continue; |
8930 | num_stats++; | 9089 | num_stats++; |
8931 | } | 9090 | } |
@@ -8940,7 +9099,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, | |||
8940 | int i, j; | 9099 | int i, j; |
8941 | 9100 | ||
8942 | for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { | 9101 | for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { |
8943 | if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) | 9102 | if (IS_NOT_E1HMF_STAT(bp, i)) |
8944 | continue; | 9103 | continue; |
8945 | 9104 | ||
8946 | if (bnx2x_stats_arr[i].size == 0) { | 9105 | if (bnx2x_stats_arr[i].size == 0) { |
@@ -9057,7 +9216,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) | |||
9057 | PCI_PM_CTRL_PME_STATUS)); | 9216 | PCI_PM_CTRL_PME_STATUS)); |
9058 | 9217 | ||
9059 | if (pmcsr & PCI_PM_CTRL_STATE_MASK) | 9218 | if (pmcsr & PCI_PM_CTRL_STATE_MASK) |
9060 | /* delay required during transition out of D3hot */ | 9219 | /* delay required during transition out of D3hot */ |
9061 | msleep(20); | 9220 | msleep(20); |
9062 | break; | 9221 | break; |
9063 | 9222 | ||
@@ -9092,6 +9251,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
9092 | napi); | 9251 | napi); |
9093 | struct bnx2x *bp = fp->bp; | 9252 | struct bnx2x *bp = fp->bp; |
9094 | int work_done = 0; | 9253 | int work_done = 0; |
9254 | u16 rx_cons_sb; | ||
9095 | 9255 | ||
9096 | #ifdef BNX2X_STOP_ON_ERROR | 9256 | #ifdef BNX2X_STOP_ON_ERROR |
9097 | if (unlikely(bp->panic)) | 9257 | if (unlikely(bp->panic)) |
@@ -9104,17 +9264,22 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
9104 | 9264 | ||
9105 | bnx2x_update_fpsb_idx(fp); | 9265 | bnx2x_update_fpsb_idx(fp); |
9106 | 9266 | ||
9107 | if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || | 9267 | if (BNX2X_HAS_TX_WORK(fp)) |
9108 | (fp->tx_pkt_prod != fp->tx_pkt_cons)) | ||
9109 | bnx2x_tx_int(fp, budget); | 9268 | bnx2x_tx_int(fp, budget); |
9110 | 9269 | ||
9111 | if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) | 9270 | rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); |
9271 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
9272 | rx_cons_sb++; | ||
9273 | if (BNX2X_HAS_RX_WORK(fp)) | ||
9112 | work_done = bnx2x_rx_int(fp, budget); | 9274 | work_done = bnx2x_rx_int(fp, budget); |
9113 | 9275 | ||
9114 | rmb(); /* bnx2x_has_work() reads the status block */ | 9276 | rmb(); /* BNX2X_HAS_WORK() reads the status block */ |
9277 | rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); | ||
9278 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
9279 | rx_cons_sb++; | ||
9115 | 9280 | ||
9116 | /* must not complete if we consumed full budget */ | 9281 | /* must not complete if we consumed full budget */ |
9117 | if ((work_done < budget) && !bnx2x_has_work(fp)) { | 9282 | if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { |
9118 | 9283 | ||
9119 | #ifdef BNX2X_STOP_ON_ERROR | 9284 | #ifdef BNX2X_STOP_ON_ERROR |
9120 | poll_panic: | 9285 | poll_panic: |
@@ -9131,7 +9296,7 @@ poll_panic: | |||
9131 | 9296 | ||
9132 | 9297 | ||
9133 | /* we split the first BD into headers and data BDs | 9298 | /* we split the first BD into headers and data BDs |
9134 | * to ease the pain of our fellow micocode engineers | 9299 | * to ease the pain of our fellow microcode engineers |
9135 | * we use one mapping for both BDs | 9300 | * we use one mapping for both BDs |
9136 | * So far this has only been observed to happen | 9301 | * So far this has only been observed to happen |
9137 | * in Other Operating Systems(TM) | 9302 | * in Other Operating Systems(TM) |
@@ -9238,7 +9403,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, | |||
9238 | /* Check if LSO packet needs to be copied: | 9403 | /* Check if LSO packet needs to be copied: |
9239 | 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ | 9404 | 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ |
9240 | int wnd_size = MAX_FETCH_BD - 3; | 9405 | int wnd_size = MAX_FETCH_BD - 3; |
9241 | /* Number of widnows to check */ | 9406 | /* Number of windows to check */ |
9242 | int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; | 9407 | int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; |
9243 | int wnd_idx = 0; | 9408 | int wnd_idx = 0; |
9244 | int frag_idx = 0; | 9409 | int frag_idx = 0; |
@@ -9327,8 +9492,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9327 | fp_index = (smp_processor_id() % bp->num_queues); | 9492 | fp_index = (smp_processor_id() % bp->num_queues); |
9328 | fp = &bp->fp[fp_index]; | 9493 | fp = &bp->fp[fp_index]; |
9329 | 9494 | ||
9330 | if (unlikely(bnx2x_tx_avail(bp->fp) < | 9495 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { |
9331 | (skb_shinfo(skb)->nr_frags + 3))) { | ||
9332 | bp->eth_stats.driver_xoff++, | 9496 | bp->eth_stats.driver_xoff++, |
9333 | netif_stop_queue(dev); | 9497 | netif_stop_queue(dev); |
9334 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); | 9498 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); |
@@ -9340,7 +9504,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9340 | skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, | 9504 | skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, |
9341 | ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); | 9505 | ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); |
9342 | 9506 | ||
9343 | /* First, check if we need to linearaize the skb | 9507 | /* First, check if we need to linearize the skb |
9344 | (due to FW restrictions) */ | 9508 | (due to FW restrictions) */ |
9345 | if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { | 9509 | if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { |
9346 | /* Statistics of linearization */ | 9510 | /* Statistics of linearization */ |
@@ -9349,7 +9513,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9349 | DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " | 9513 | DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " |
9350 | "silently dropping this SKB\n"); | 9514 | "silently dropping this SKB\n"); |
9351 | dev_kfree_skb_any(skb); | 9515 | dev_kfree_skb_any(skb); |
9352 | return 0; | 9516 | return NETDEV_TX_OK; |
9353 | } | 9517 | } |
9354 | } | 9518 | } |
9355 | 9519 | ||
@@ -9372,7 +9536,8 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9372 | tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | 9536 | tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; |
9373 | tx_bd->general_data = (UNICAST_ADDRESS << | 9537 | tx_bd->general_data = (UNICAST_ADDRESS << |
9374 | ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); | 9538 | ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); |
9375 | tx_bd->general_data |= 1; /* header nbd */ | 9539 | /* header nbd */ |
9540 | tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT); | ||
9376 | 9541 | ||
9377 | /* remember the first BD of the packet */ | 9542 | /* remember the first BD of the packet */ |
9378 | tx_buf->first_bd = fp->tx_bd_prod; | 9543 | tx_buf->first_bd = fp->tx_bd_prod; |
@@ -9390,7 +9555,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9390 | tx_bd->vlan = cpu_to_le16(pkt_prod); | 9555 | tx_bd->vlan = cpu_to_le16(pkt_prod); |
9391 | 9556 | ||
9392 | if (xmit_type) { | 9557 | if (xmit_type) { |
9393 | |||
9394 | /* turn on parsing and get a BD */ | 9558 | /* turn on parsing and get a BD */ |
9395 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | 9559 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
9396 | pbd = (void *)&fp->tx_desc_ring[bd_prod]; | 9560 | pbd = (void *)&fp->tx_desc_ring[bd_prod]; |
@@ -9451,7 +9615,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9451 | 9615 | ||
9452 | tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 9616 | tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
9453 | tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 9617 | tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
9454 | nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2); | 9618 | nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2); |
9455 | tx_bd->nbd = cpu_to_le16(nbd); | 9619 | tx_bd->nbd = cpu_to_le16(nbd); |
9456 | tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); | 9620 | tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); |
9457 | 9621 | ||
@@ -9721,9 +9885,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p) | |||
9721 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 9885 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
9722 | if (netif_running(dev)) { | 9886 | if (netif_running(dev)) { |
9723 | if (CHIP_IS_E1(bp)) | 9887 | if (CHIP_IS_E1(bp)) |
9724 | bnx2x_set_mac_addr_e1(bp); | 9888 | bnx2x_set_mac_addr_e1(bp, 1); |
9725 | else | 9889 | else |
9726 | bnx2x_set_mac_addr_e1h(bp); | 9890 | bnx2x_set_mac_addr_e1h(bp, 1); |
9727 | } | 9891 | } |
9728 | 9892 | ||
9729 | return 0; | 9893 | return 0; |
@@ -9734,6 +9898,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
9734 | { | 9898 | { |
9735 | struct mii_ioctl_data *data = if_mii(ifr); | 9899 | struct mii_ioctl_data *data = if_mii(ifr); |
9736 | struct bnx2x *bp = netdev_priv(dev); | 9900 | struct bnx2x *bp = netdev_priv(dev); |
9901 | int port = BP_PORT(bp); | ||
9737 | int err; | 9902 | int err; |
9738 | 9903 | ||
9739 | switch (cmd) { | 9904 | switch (cmd) { |
@@ -9749,7 +9914,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
9749 | return -EAGAIN; | 9914 | return -EAGAIN; |
9750 | 9915 | ||
9751 | mutex_lock(&bp->port.phy_mutex); | 9916 | mutex_lock(&bp->port.phy_mutex); |
9752 | err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr, | 9917 | err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr, |
9753 | DEFAULT_PHY_DEV_ADDR, | 9918 | DEFAULT_PHY_DEV_ADDR, |
9754 | (data->reg_num & 0x1f), &mii_regval); | 9919 | (data->reg_num & 0x1f), &mii_regval); |
9755 | data->val_out = mii_regval; | 9920 | data->val_out = mii_regval; |
@@ -9765,7 +9930,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
9765 | return -EAGAIN; | 9930 | return -EAGAIN; |
9766 | 9931 | ||
9767 | mutex_lock(&bp->port.phy_mutex); | 9932 | mutex_lock(&bp->port.phy_mutex); |
9768 | err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr, | 9933 | err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr, |
9769 | DEFAULT_PHY_DEV_ADDR, | 9934 | DEFAULT_PHY_DEV_ADDR, |
9770 | (data->reg_num & 0x1f), data->val_in); | 9935 | (data->reg_num & 0x1f), data->val_in); |
9771 | mutex_unlock(&bp->port.phy_mutex); | 9936 | mutex_unlock(&bp->port.phy_mutex); |
@@ -10141,7 +10306,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) | |||
10141 | 10306 | ||
10142 | netif_device_detach(dev); | 10307 | netif_device_detach(dev); |
10143 | 10308 | ||
10144 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | 10309 | bnx2x_nic_unload(bp, UNLOAD_CLOSE); |
10145 | 10310 | ||
10146 | bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); | 10311 | bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); |
10147 | 10312 | ||
@@ -10174,7 +10339,7 @@ static int bnx2x_resume(struct pci_dev *pdev) | |||
10174 | bnx2x_set_power_state(bp, PCI_D0); | 10339 | bnx2x_set_power_state(bp, PCI_D0); |
10175 | netif_device_attach(dev); | 10340 | netif_device_attach(dev); |
10176 | 10341 | ||
10177 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); | 10342 | rc = bnx2x_nic_load(bp, LOAD_OPEN); |
10178 | 10343 | ||
10179 | rtnl_unlock(); | 10344 | rtnl_unlock(); |
10180 | 10345 | ||
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h index 15c9a9946724..a67b0c358ae4 100644 --- a/drivers/net/bnx2x_reg.h +++ b/drivers/net/bnx2x_reg.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation. | 7 | * the Free Software Foundation. |
8 | * | 8 | * |
9 | * The registers description starts with the regsister Access type followed | 9 | * The registers description starts with the register Access type followed |
10 | * by size in bits. For example [RW 32]. The access types are: | 10 | * by size in bits. For example [RW 32]. The access types are: |
11 | * R - Read only | 11 | * R - Read only |
12 | * RC - Clear on read | 12 | * RC - Clear on read |
@@ -49,7 +49,7 @@ | |||
49 | /* [RW 10] Write client 0: Assert pause threshold. */ | 49 | /* [RW 10] Write client 0: Assert pause threshold. */ |
50 | #define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 | 50 | #define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 |
51 | #define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c | 51 | #define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c |
52 | /* [R 24] The number of full blocks occpied by port. */ | 52 | /* [R 24] The number of full blocks occupied by port. */ |
53 | #define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 | 53 | #define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 |
54 | /* [RW 1] Reset the design by software. */ | 54 | /* [RW 1] Reset the design by software. */ |
55 | #define BRB1_REG_SOFT_RESET 0x600dc | 55 | #define BRB1_REG_SOFT_RESET 0x600dc |
@@ -740,6 +740,7 @@ | |||
740 | #define HC_REG_ATTN_MSG1_ADDR_L 0x108020 | 740 | #define HC_REG_ATTN_MSG1_ADDR_L 0x108020 |
741 | #define HC_REG_ATTN_NUM_P0 0x108038 | 741 | #define HC_REG_ATTN_NUM_P0 0x108038 |
742 | #define HC_REG_ATTN_NUM_P1 0x10803c | 742 | #define HC_REG_ATTN_NUM_P1 0x10803c |
743 | #define HC_REG_COMMAND_REG 0x108180 | ||
743 | #define HC_REG_CONFIG_0 0x108000 | 744 | #define HC_REG_CONFIG_0 0x108000 |
744 | #define HC_REG_CONFIG_1 0x108004 | 745 | #define HC_REG_CONFIG_1 0x108004 |
745 | #define HC_REG_FUNC_NUM_P0 0x1080ac | 746 | #define HC_REG_FUNC_NUM_P0 0x1080ac |
@@ -1372,6 +1373,23 @@ | |||
1372 | be asserted). */ | 1373 | be asserted). */ |
1373 | #define MISC_REG_DRIVER_CONTROL_16 0xa5f0 | 1374 | #define MISC_REG_DRIVER_CONTROL_16 0xa5f0 |
1374 | #define MISC_REG_DRIVER_CONTROL_16_SIZE 2 | 1375 | #define MISC_REG_DRIVER_CONTROL_16_SIZE 2 |
1376 | /* [RW 32] The following driver registers(1...16) represent 16 drivers and | ||
1377 | 32 clients. Each client can be controlled by one driver only. One in each | ||
1378 | bit represent that this driver control the appropriate client (Ex: bit 5 | ||
1379 | is set means this driver control client number 5). addr1 = set; addr0 = | ||
1380 | clear; read from both addresses will give the same result = status. write | ||
1381 | to address 1 will set a request to control all the clients that their | ||
1382 | appropriate bit (in the write command) is set. if the client is free (the | ||
1383 | appropriate bit in all the other drivers is clear) one will be written to | ||
1384 | that driver register; if the client isn't free the bit will remain zero. | ||
1385 | if the appropriate bit is set (the driver request to gain control on a | ||
1386 | client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW | ||
1387 | interrupt will be asserted). write to address 0 will set a request to | ||
1388 | free all the clients that their appropriate bit (in the write command) is | ||
1389 | set. if the appropriate bit is clear (the driver request to free a client | ||
1390 | it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will | ||
1391 | be asserted). */ | ||
1392 | #define MISC_REG_DRIVER_CONTROL_7 0xa3c8 | ||
1375 | /* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 | 1393 | /* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 |
1376 | only. */ | 1394 | only. */ |
1377 | #define MISC_REG_E1HMF_MODE 0xa5f8 | 1395 | #define MISC_REG_E1HMF_MODE 0xa5f8 |
@@ -1394,13 +1412,13 @@ | |||
1394 | #define MISC_REG_GPIO 0xa490 | 1412 | #define MISC_REG_GPIO 0xa490 |
1395 | /* [R 28] this field hold the last information that caused reserved | 1413 | /* [R 28] this field hold the last information that caused reserved |
1396 | attention. bits [19:0] - address; [22:20] function; [23] reserved; | 1414 | attention. bits [19:0] - address; [22:20] function; [23] reserved; |
1397 | [27:24] the master thatcaused the attention - according to the following | 1415 | [27:24] the master that caused the attention - according to the following |
1398 | encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = | 1416 | encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = |
1399 | dbu; 8 = dmae */ | 1417 | dbu; 8 = dmae */ |
1400 | #define MISC_REG_GRC_RSV_ATTN 0xa3c0 | 1418 | #define MISC_REG_GRC_RSV_ATTN 0xa3c0 |
1401 | /* [R 28] this field hold the last information that caused timeout | 1419 | /* [R 28] this field hold the last information that caused timeout |
1402 | attention. bits [19:0] - address; [22:20] function; [23] reserved; | 1420 | attention. bits [19:0] - address; [22:20] function; [23] reserved; |
1403 | [27:24] the master thatcaused the attention - according to the following | 1421 | [27:24] the master that caused the attention - according to the following |
1404 | encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = | 1422 | encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = |
1405 | dbu; 8 = dmae */ | 1423 | dbu; 8 = dmae */ |
1406 | #define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4 | 1424 | #define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4 |
@@ -1677,6 +1695,7 @@ | |||
1677 | /* [RW 8] init credit counter for port0 in LLH */ | 1695 | /* [RW 8] init credit counter for port0 in LLH */ |
1678 | #define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554 | 1696 | #define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554 |
1679 | #define NIG_REG_LLH0_XCM_MASK 0x10130 | 1697 | #define NIG_REG_LLH0_XCM_MASK 0x10130 |
1698 | #define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248 | ||
1680 | /* [RW 1] send to BRB1 if no match on any of RMP rules. */ | 1699 | /* [RW 1] send to BRB1 if no match on any of RMP rules. */ |
1681 | #define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc | 1700 | #define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc |
1682 | /* [RW 2] Determine the classification participants. 0: no classification.1: | 1701 | /* [RW 2] Determine the classification participants. 0: no classification.1: |
@@ -1727,6 +1746,9 @@ | |||
1727 | /* [R 32] Rx statistics : In user packets discarded due to BRB backpressure | 1746 | /* [R 32] Rx statistics : In user packets discarded due to BRB backpressure |
1728 | for port0 */ | 1747 | for port0 */ |
1729 | #define NIG_REG_STAT0_BRB_DISCARD 0x105f0 | 1748 | #define NIG_REG_STAT0_BRB_DISCARD 0x105f0 |
1749 | /* [R 32] Rx statistics : In user packets truncated due to BRB backpressure | ||
1750 | for port0 */ | ||
1751 | #define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8 | ||
1730 | /* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that | 1752 | /* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that |
1731 | between 1024 and 1522 bytes for port0 */ | 1753 | between 1024 and 1522 bytes for port0 */ |
1732 | #define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750 | 1754 | #define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750 |
@@ -2298,7 +2320,7 @@ | |||
2298 | /* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; | 2320 | /* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; |
2299 | -128k */ | 2321 | -128k */ |
2300 | #define PXP2_REG_RQ_QM_P_SIZE 0x120050 | 2322 | #define PXP2_REG_RQ_QM_P_SIZE 0x120050 |
2301 | /* [RW 1] 1' indicates that the RBC has finished configurating the PSWRQ */ | 2323 | /* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */ |
2302 | #define PXP2_REG_RQ_RBC_DONE 0x1201b0 | 2324 | #define PXP2_REG_RQ_RBC_DONE 0x1201b0 |
2303 | /* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; | 2325 | /* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; |
2304 | 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ | 2326 | 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ |
@@ -2406,7 +2428,7 @@ | |||
2406 | /* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the | 2428 | /* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the |
2407 | buffer reaches this number has_payload will be asserted */ | 2429 | buffer reaches this number has_payload will be asserted */ |
2408 | #define PXP2_REG_WR_DMAE_MPS 0x1205ec | 2430 | #define PXP2_REG_WR_DMAE_MPS 0x1205ec |
2409 | /* [RW 10] if Number of entries in dmae fifo will be higer than this | 2431 | /* [RW 10] if Number of entries in dmae fifo will be higher than this |
2410 | threshold then has_payload indication will be asserted; the default value | 2432 | threshold then has_payload indication will be asserted; the default value |
2411 | should be equal to > write MBS size! */ | 2433 | should be equal to > write MBS size! */ |
2412 | #define PXP2_REG_WR_DMAE_TH 0x120368 | 2434 | #define PXP2_REG_WR_DMAE_TH 0x120368 |
@@ -2427,7 +2449,7 @@ | |||
2427 | /* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the | 2449 | /* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the |
2428 | buffer reaches this number has_payload will be asserted */ | 2450 | buffer reaches this number has_payload will be asserted */ |
2429 | #define PXP2_REG_WR_TSDM_MPS 0x1205d4 | 2451 | #define PXP2_REG_WR_TSDM_MPS 0x1205d4 |
2430 | /* [RW 10] if Number of entries in usdmdp fifo will be higer than this | 2452 | /* [RW 10] if Number of entries in usdmdp fifo will be higher than this |
2431 | threshold then has_payload indication will be asserted; the default value | 2453 | threshold then has_payload indication will be asserted; the default value |
2432 | should be equal to > write MBS size! */ | 2454 | should be equal to > write MBS size! */ |
2433 | #define PXP2_REG_WR_USDMDP_TH 0x120348 | 2455 | #define PXP2_REG_WR_USDMDP_TH 0x120348 |
@@ -3294,12 +3316,12 @@ | |||
3294 | #define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0 | 3316 | #define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0 |
3295 | #define CFC_DEBUG1_REG_WRITE_AC (0x1<<4) | 3317 | #define CFC_DEBUG1_REG_WRITE_AC (0x1<<4) |
3296 | #define CFC_DEBUG1_REG_WRITE_AC_SIZE 4 | 3318 | #define CFC_DEBUG1_REG_WRITE_AC_SIZE 4 |
3297 | /* [R 1] debug only: This bit indicates wheter indicates that external | 3319 | /* [R 1] debug only: This bit indicates whether indicates that external |
3298 | buffer was wrapped (oldest data was thrown); Relevant only when | 3320 | buffer was wrapped (oldest data was thrown); Relevant only when |
3299 | ~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */ | 3321 | ~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */ |
3300 | #define DBG_REG_WRAP_ON_EXT_BUFFER 0xc124 | 3322 | #define DBG_REG_WRAP_ON_EXT_BUFFER 0xc124 |
3301 | #define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 1 | 3323 | #define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 1 |
3302 | /* [R 1] debug only: This bit indicates wheter the internal buffer was | 3324 | /* [R 1] debug only: This bit indicates whether the internal buffer was |
3303 | wrapped (oldest data was thrown) Relevant only when | 3325 | wrapped (oldest data was thrown) Relevant only when |
3304 | ~dbg_registers_debug_target=0 (internal buffer) */ | 3326 | ~dbg_registers_debug_target=0 (internal buffer) */ |
3305 | #define DBG_REG_WRAP_ON_INT_BUFFER 0xc128 | 3327 | #define DBG_REG_WRAP_ON_INT_BUFFER 0xc128 |
@@ -4944,6 +4966,7 @@ | |||
4944 | #define EMAC_RX_MODE_PROMISCUOUS (1L<<8) | 4966 | #define EMAC_RX_MODE_PROMISCUOUS (1L<<8) |
4945 | #define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) | 4967 | #define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) |
4946 | #define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) | 4968 | #define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) |
4969 | #define EMAC_TX_MODE_FLOW_EN (1L<<4) | ||
4947 | #define MISC_REGISTERS_GPIO_0 0 | 4970 | #define MISC_REGISTERS_GPIO_0 0 |
4948 | #define MISC_REGISTERS_GPIO_1 1 | 4971 | #define MISC_REGISTERS_GPIO_1 1 |
4949 | #define MISC_REGISTERS_GPIO_2 2 | 4972 | #define MISC_REGISTERS_GPIO_2 2 |
@@ -4959,6 +4982,7 @@ | |||
4959 | #define MISC_REGISTERS_GPIO_PORT_SHIFT 4 | 4982 | #define MISC_REGISTERS_GPIO_PORT_SHIFT 4 |
4960 | #define MISC_REGISTERS_GPIO_SET_POS 8 | 4983 | #define MISC_REGISTERS_GPIO_SET_POS 8 |
4961 | #define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 | 4984 | #define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 |
4985 | #define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) | ||
4962 | #define MISC_REGISTERS_RESET_REG_1_SET 0x584 | 4986 | #define MISC_REGISTERS_RESET_REG_1_SET 0x584 |
4963 | #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 | 4987 | #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 |
4964 | #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) | 4988 | #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) |
@@ -4993,7 +5017,9 @@ | |||
4993 | #define HW_LOCK_MAX_RESOURCE_VALUE 31 | 5017 | #define HW_LOCK_MAX_RESOURCE_VALUE 31 |
4994 | #define HW_LOCK_RESOURCE_8072_MDIO 0 | 5018 | #define HW_LOCK_RESOURCE_8072_MDIO 0 |
4995 | #define HW_LOCK_RESOURCE_GPIO 1 | 5019 | #define HW_LOCK_RESOURCE_GPIO 1 |
5020 | #define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 | ||
4996 | #define HW_LOCK_RESOURCE_SPIO 2 | 5021 | #define HW_LOCK_RESOURCE_SPIO 2 |
5022 | #define HW_LOCK_RESOURCE_UNDI 5 | ||
4997 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) | 5023 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) |
4998 | #define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) | 5024 | #define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) |
4999 | #define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) | 5025 | #define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) |
@@ -5144,59 +5170,73 @@ | |||
5144 | #define GRCBASE_MISC_AEU GRCBASE_MISC | 5170 | #define GRCBASE_MISC_AEU GRCBASE_MISC |
5145 | 5171 | ||
5146 | 5172 | ||
5147 | /*the offset of the configuration space in the pci core register*/ | 5173 | /* offset of configuration space in the pci core register */ |
5148 | #define PCICFG_OFFSET 0x2000 | 5174 | #define PCICFG_OFFSET 0x2000 |
5149 | #define PCICFG_VENDOR_ID_OFFSET 0x00 | 5175 | #define PCICFG_VENDOR_ID_OFFSET 0x00 |
5150 | #define PCICFG_DEVICE_ID_OFFSET 0x02 | 5176 | #define PCICFG_DEVICE_ID_OFFSET 0x02 |
5151 | #define PCICFG_COMMAND_OFFSET 0x04 | 5177 | #define PCICFG_COMMAND_OFFSET 0x04 |
5178 | #define PCICFG_COMMAND_IO_SPACE (1<<0) | ||
5179 | #define PCICFG_COMMAND_MEM_SPACE (1<<1) | ||
5180 | #define PCICFG_COMMAND_BUS_MASTER (1<<2) | ||
5181 | #define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3) | ||
5182 | #define PCICFG_COMMAND_MWI_CYCLES (1<<4) | ||
5183 | #define PCICFG_COMMAND_VGA_SNOOP (1<<5) | ||
5184 | #define PCICFG_COMMAND_PERR_ENA (1<<6) | ||
5185 | #define PCICFG_COMMAND_STEPPING (1<<7) | ||
5186 | #define PCICFG_COMMAND_SERR_ENA (1<<8) | ||
5187 | #define PCICFG_COMMAND_FAST_B2B (1<<9) | ||
5188 | #define PCICFG_COMMAND_INT_DISABLE (1<<10) | ||
5189 | #define PCICFG_COMMAND_RESERVED (0x1f<<11) | ||
5152 | #define PCICFG_STATUS_OFFSET 0x06 | 5190 | #define PCICFG_STATUS_OFFSET 0x06 |
5153 | #define PCICFG_REVESION_ID 0x08 | 5191 | #define PCICFG_REVESION_ID 0x08 |
5154 | #define PCICFG_CACHE_LINE_SIZE 0x0c | 5192 | #define PCICFG_CACHE_LINE_SIZE 0x0c |
5155 | #define PCICFG_LATENCY_TIMER 0x0d | 5193 | #define PCICFG_LATENCY_TIMER 0x0d |
5156 | #define PCICFG_BAR_1_LOW 0x10 | 5194 | #define PCICFG_BAR_1_LOW 0x10 |
5157 | #define PCICFG_BAR_1_HIGH 0x14 | 5195 | #define PCICFG_BAR_1_HIGH 0x14 |
5158 | #define PCICFG_BAR_2_LOW 0x18 | 5196 | #define PCICFG_BAR_2_LOW 0x18 |
5159 | #define PCICFG_BAR_2_HIGH 0x1c | 5197 | #define PCICFG_BAR_2_HIGH 0x1c |
5160 | #define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c | 5198 | #define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c |
5161 | #define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e | 5199 | #define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e |
5162 | #define PCICFG_INT_LINE 0x3c | 5200 | #define PCICFG_INT_LINE 0x3c |
5163 | #define PCICFG_INT_PIN 0x3d | 5201 | #define PCICFG_INT_PIN 0x3d |
5164 | #define PCICFG_PM_CSR_OFFSET 0x4c | 5202 | #define PCICFG_PM_CAPABILITY 0x48 |
5165 | #define PCICFG_GRC_ADDRESS 0x78 | 5203 | #define PCICFG_PM_CAPABILITY_VERSION (0x3<<16) |
5166 | #define PCICFG_GRC_DATA 0x80 | 5204 | #define PCICFG_PM_CAPABILITY_CLOCK (1<<19) |
5205 | #define PCICFG_PM_CAPABILITY_RESERVED (1<<20) | ||
5206 | #define PCICFG_PM_CAPABILITY_DSI (1<<21) | ||
5207 | #define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22) | ||
5208 | #define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25) | ||
5209 | #define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26) | ||
5210 | #define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27) | ||
5211 | #define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28) | ||
5212 | #define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29) | ||
5213 | #define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30) | ||
5214 | #define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31) | ||
5215 | #define PCICFG_PM_CSR_OFFSET 0x4c | ||
5216 | #define PCICFG_PM_CSR_STATE (0x3<<0) | ||
5217 | #define PCICFG_PM_CSR_PME_ENABLE (1<<8) | ||
5218 | #define PCICFG_PM_CSR_PME_STATUS (1<<15) | ||
5219 | #define PCICFG_GRC_ADDRESS 0x78 | ||
5220 | #define PCICFG_GRC_DATA 0x80 | ||
5167 | #define PCICFG_DEVICE_CONTROL 0xb4 | 5221 | #define PCICFG_DEVICE_CONTROL 0xb4 |
5168 | #define PCICFG_LINK_CONTROL 0xbc | 5222 | #define PCICFG_LINK_CONTROL 0xbc |
5169 | 5223 | ||
5170 | #define PCICFG_COMMAND_IO_SPACE (1<<0) | ||
5171 | #define PCICFG_COMMAND_MEM_SPACE (1<<1) | ||
5172 | #define PCICFG_COMMAND_BUS_MASTER (1<<2) | ||
5173 | #define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3) | ||
5174 | #define PCICFG_COMMAND_MWI_CYCLES (1<<4) | ||
5175 | #define PCICFG_COMMAND_VGA_SNOOP (1<<5) | ||
5176 | #define PCICFG_COMMAND_PERR_ENA (1<<6) | ||
5177 | #define PCICFG_COMMAND_STEPPING (1<<7) | ||
5178 | #define PCICFG_COMMAND_SERR_ENA (1<<8) | ||
5179 | #define PCICFG_COMMAND_FAST_B2B (1<<9) | ||
5180 | #define PCICFG_COMMAND_INT_DISABLE (1<<10) | ||
5181 | #define PCICFG_COMMAND_RESERVED (0x1f<<11) | ||
5182 | |||
5183 | #define PCICFG_PM_CSR_STATE (0x3<<0) | ||
5184 | #define PCICFG_PM_CSR_PME_STATUS (1<<15) | ||
5185 | 5224 | ||
5186 | #define BAR_USTRORM_INTMEM 0x400000 | 5225 | #define BAR_USTRORM_INTMEM 0x400000 |
5187 | #define BAR_CSTRORM_INTMEM 0x410000 | 5226 | #define BAR_CSTRORM_INTMEM 0x410000 |
5188 | #define BAR_XSTRORM_INTMEM 0x420000 | 5227 | #define BAR_XSTRORM_INTMEM 0x420000 |
5189 | #define BAR_TSTRORM_INTMEM 0x430000 | 5228 | #define BAR_TSTRORM_INTMEM 0x430000 |
5190 | 5229 | ||
5230 | /* for accessing the IGU in case of status block ACK */ | ||
5191 | #define BAR_IGU_INTMEM 0x440000 | 5231 | #define BAR_IGU_INTMEM 0x440000 |
5192 | 5232 | ||
5193 | #define BAR_DOORBELL_OFFSET 0x800000 | 5233 | #define BAR_DOORBELL_OFFSET 0x800000 |
5194 | 5234 | ||
5195 | #define BAR_ME_REGISTER 0x450000 | 5235 | #define BAR_ME_REGISTER 0x450000 |
5196 | 5236 | ||
5197 | 5237 | /* config_2 offset */ | |
5198 | #define GRC_CONFIG_2_SIZE_REG 0x408 /* config_2 offset */ | 5238 | #define GRC_CONFIG_2_SIZE_REG 0x408 |
5199 | #define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) | 5239 | #define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) |
5200 | #define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) | 5240 | #define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) |
5201 | #define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) | 5241 | #define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) |
5202 | #define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) | 5242 | #define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) |
@@ -5213,11 +5253,11 @@ | |||
5213 | #define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) | 5253 | #define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) |
5214 | #define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) | 5254 | #define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) |
5215 | #define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) | 5255 | #define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) |
5216 | #define PCI_CONFIG_2_BAR1_64ENA (1L<<4) | 5256 | #define PCI_CONFIG_2_BAR1_64ENA (1L<<4) |
5217 | #define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) | 5257 | #define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) |
5218 | #define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) | 5258 | #define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) |
5219 | #define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) | 5259 | #define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) |
5220 | #define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) | 5260 | #define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) |
5221 | #define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) | 5261 | #define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) |
5222 | #define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8) | 5262 | #define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8) |
5223 | #define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8) | 5263 | #define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8) |
@@ -5234,46 +5274,44 @@ | |||
5234 | #define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8) | 5274 | #define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8) |
5235 | #define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8) | 5275 | #define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8) |
5236 | #define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8) | 5276 | #define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8) |
5237 | #define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) | 5277 | #define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) |
5238 | #define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) | 5278 | #define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) |
5239 | 5279 | ||
5240 | /* config_3 offset */ | 5280 | /* config_3 offset */ |
5241 | #define GRC_CONFIG_3_SIZE_REG (0x40c) | 5281 | #define GRC_CONFIG_3_SIZE_REG 0x40c |
5242 | #define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) | 5282 | #define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) |
5243 | #define PCI_CONFIG_3_FORCE_PME (1L<<24) | 5283 | #define PCI_CONFIG_3_FORCE_PME (1L<<24) |
5244 | #define PCI_CONFIG_3_PME_STATUS (1L<<25) | 5284 | #define PCI_CONFIG_3_PME_STATUS (1L<<25) |
5245 | #define PCI_CONFIG_3_PME_ENABLE (1L<<26) | 5285 | #define PCI_CONFIG_3_PME_ENABLE (1L<<26) |
5246 | #define PCI_CONFIG_3_PM_STATE (0x3L<<27) | 5286 | #define PCI_CONFIG_3_PM_STATE (0x3L<<27) |
5247 | #define PCI_CONFIG_3_VAUX_PRESET (1L<<30) | 5287 | #define PCI_CONFIG_3_VAUX_PRESET (1L<<30) |
5248 | #define PCI_CONFIG_3_PCI_POWER (1L<<31) | 5288 | #define PCI_CONFIG_3_PCI_POWER (1L<<31) |
5249 | |||
5250 | /* config_2 offset */ | ||
5251 | #define GRC_CONFIG_2_SIZE_REG 0x408 | ||
5252 | 5289 | ||
5253 | #define GRC_BAR2_CONFIG 0x4e0 | 5290 | #define GRC_BAR2_CONFIG 0x4e0 |
5254 | #define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) | 5291 | #define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) |
5255 | #define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) | 5292 | #define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) |
5256 | #define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) | 5293 | #define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) |
5257 | #define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) | 5294 | #define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) |
5258 | #define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) | 5295 | #define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) |
5259 | #define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) | 5296 | #define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) |
5260 | #define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) | 5297 | #define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) |
5261 | #define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) | 5298 | #define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) |
5262 | #define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) | 5299 | #define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) |
5263 | #define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) | 5300 | #define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) |
5264 | #define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) | 5301 | #define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) |
5265 | #define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) | 5302 | #define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) |
5266 | #define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) | 5303 | #define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) |
5267 | #define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) | 5304 | #define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) |
5268 | #define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) | 5305 | #define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) |
5269 | #define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) | 5306 | #define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) |
5270 | #define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) | 5307 | #define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) |
5271 | #define PCI_CONFIG_2_BAR2_64ENA (1L<<4) | 5308 | #define PCI_CONFIG_2_BAR2_64ENA (1L<<4) |
5309 | |||
5310 | #define PCI_PM_DATA_A 0x410 | ||
5311 | #define PCI_PM_DATA_B 0x414 | ||
5312 | #define PCI_ID_VAL1 0x434 | ||
5313 | #define PCI_ID_VAL2 0x438 | ||
5272 | 5314 | ||
5273 | #define PCI_PM_DATA_A (0x410) | ||
5274 | #define PCI_PM_DATA_B (0x414) | ||
5275 | #define PCI_ID_VAL1 (0x434) | ||
5276 | #define PCI_ID_VAL2 (0x438) | ||
5277 | 5315 | ||
5278 | #define MDIO_REG_BANK_CL73_IEEEB0 0x0 | 5316 | #define MDIO_REG_BANK_CL73_IEEEB0 0x0 |
5279 | #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 | 5317 | #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 |
@@ -5522,6 +5560,8 @@ Theotherbitsarereservedandshouldbezero*/ | |||
5522 | #define MDIO_PMA_REG_GEN_CTRL 0xca10 | 5560 | #define MDIO_PMA_REG_GEN_CTRL 0xca10 |
5523 | #define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188 | 5561 | #define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188 |
5524 | #define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a | 5562 | #define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a |
5563 | #define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12 | ||
5564 | #define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13 | ||
5525 | #define MDIO_PMA_REG_ROM_VER1 0xca19 | 5565 | #define MDIO_PMA_REG_ROM_VER1 0xca19 |
5526 | #define MDIO_PMA_REG_ROM_VER2 0xca1a | 5566 | #define MDIO_PMA_REG_ROM_VER2 0xca1a |
5527 | #define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b | 5567 | #define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b |
@@ -5576,7 +5616,8 @@ Theotherbitsarereservedandshouldbezero*/ | |||
5576 | #define MDIO_AN_REG_LINK_STATUS 0x8304 | 5616 | #define MDIO_AN_REG_LINK_STATUS 0x8304 |
5577 | #define MDIO_AN_REG_CL37_CL73 0x8370 | 5617 | #define MDIO_AN_REG_CL37_CL73 0x8370 |
5578 | #define MDIO_AN_REG_CL37_AN 0xffe0 | 5618 | #define MDIO_AN_REG_CL37_AN 0xffe0 |
5579 | #define MDIO_AN_REG_CL37_FD 0xffe4 | 5619 | #define MDIO_AN_REG_CL37_FC_LD 0xffe4 |
5620 | #define MDIO_AN_REG_CL37_FC_LP 0xffe5 | ||
5580 | 5621 | ||
5581 | 5622 | ||
5582 | #define IGU_FUNC_BASE 0x0400 | 5623 | #define IGU_FUNC_BASE 0x0400 |
@@ -5600,4 +5641,13 @@ Theotherbitsarereservedandshouldbezero*/ | |||
5600 | #define IGU_INT_NOP 2 | 5641 | #define IGU_INT_NOP 2 |
5601 | #define IGU_INT_NOP2 3 | 5642 | #define IGU_INT_NOP2 3 |
5602 | 5643 | ||
5644 | #define COMMAND_REG_INT_ACK 0x0 | ||
5645 | #define COMMAND_REG_PROD_UPD 0x4 | ||
5646 | #define COMMAND_REG_ATTN_BITS_UPD 0x8 | ||
5647 | #define COMMAND_REG_ATTN_BITS_SET 0xc | ||
5648 | #define COMMAND_REG_ATTN_BITS_CLR 0x10 | ||
5649 | #define COMMAND_REG_COALESCE_NOW 0x14 | ||
5650 | #define COMMAND_REG_SIMD_MASK 0x18 | ||
5651 | #define COMMAND_REG_SIMD_NOMASK 0x1c | ||
5652 | |||
5603 | 5653 | ||
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index a7800e559090..ec6b0af3d46b 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/errno.h> | 26 | #include <linux/errno.h> |
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/version.h> | ||
30 | 29 | ||
31 | #include <linux/netdevice.h> | 30 | #include <linux/netdevice.h> |
32 | #include <linux/etherdevice.h> | 31 | #include <linux/etherdevice.h> |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 19d32a227be1..453115acaad2 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -1838,7 +1838,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, | |||
1838 | if ((le16_to_cpu(rfd->command) & cb_el) && | 1838 | if ((le16_to_cpu(rfd->command) & cb_el) && |
1839 | (RU_RUNNING == nic->ru_running)) | 1839 | (RU_RUNNING == nic->ru_running)) |
1840 | 1840 | ||
1841 | if (readb(&nic->csr->scb.status) & rus_no_res) | 1841 | if (ioread8(&nic->csr->scb.status) & rus_no_res) |
1842 | nic->ru_running = RU_SUSPENDED; | 1842 | nic->ru_running = RU_SUSPENDED; |
1843 | return -ENODATA; | 1843 | return -ENODATA; |
1844 | } | 1844 | } |
@@ -1861,7 +1861,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, | |||
1861 | if ((le16_to_cpu(rfd->command) & cb_el) && | 1861 | if ((le16_to_cpu(rfd->command) & cb_el) && |
1862 | (RU_RUNNING == nic->ru_running)) { | 1862 | (RU_RUNNING == nic->ru_running)) { |
1863 | 1863 | ||
1864 | if (readb(&nic->csr->scb.status) & rus_no_res) | 1864 | if (ioread8(&nic->csr->scb.status) & rus_no_res) |
1865 | nic->ru_running = RU_SUSPENDED; | 1865 | nic->ru_running = RU_SUSPENDED; |
1866 | } | 1866 | } |
1867 | 1867 | ||
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index b9f90a5d3d4d..213437d13154 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c | |||
@@ -208,7 +208,7 @@ struct e1000_option { | |||
208 | } r; | 208 | } r; |
209 | struct { /* list_option info */ | 209 | struct { /* list_option info */ |
210 | int nr; | 210 | int nr; |
211 | struct e1000_opt_list { int i; char *str; } *p; | 211 | const struct e1000_opt_list { int i; char *str; } *p; |
212 | } l; | 212 | } l; |
213 | } arg; | 213 | } arg; |
214 | }; | 214 | }; |
@@ -242,7 +242,7 @@ static int __devinit e1000_validate_option(unsigned int *value, | |||
242 | break; | 242 | break; |
243 | case list_option: { | 243 | case list_option: { |
244 | int i; | 244 | int i; |
245 | struct e1000_opt_list *ent; | 245 | const struct e1000_opt_list *ent; |
246 | 246 | ||
247 | for (i = 0; i < opt->arg.l.nr; i++) { | 247 | for (i = 0; i < opt->arg.l.nr; i++) { |
248 | ent = &opt->arg.l.p[i]; | 248 | ent = &opt->arg.l.p[i]; |
@@ -279,7 +279,9 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter); | |||
279 | 279 | ||
280 | void __devinit e1000_check_options(struct e1000_adapter *adapter) | 280 | void __devinit e1000_check_options(struct e1000_adapter *adapter) |
281 | { | 281 | { |
282 | struct e1000_option opt; | ||
282 | int bd = adapter->bd_number; | 283 | int bd = adapter->bd_number; |
284 | |||
283 | if (bd >= E1000_MAX_NIC) { | 285 | if (bd >= E1000_MAX_NIC) { |
284 | DPRINTK(PROBE, NOTICE, | 286 | DPRINTK(PROBE, NOTICE, |
285 | "Warning: no configuration for board #%i\n", bd); | 287 | "Warning: no configuration for board #%i\n", bd); |
@@ -287,19 +289,21 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) | |||
287 | } | 289 | } |
288 | 290 | ||
289 | { /* Transmit Descriptor Count */ | 291 | { /* Transmit Descriptor Count */ |
290 | struct e1000_option opt = { | 292 | struct e1000_tx_ring *tx_ring = adapter->tx_ring; |
293 | int i; | ||
294 | e1000_mac_type mac_type = adapter->hw.mac_type; | ||
295 | |||
296 | opt = (struct e1000_option) { | ||
291 | .type = range_option, | 297 | .type = range_option, |
292 | .name = "Transmit Descriptors", | 298 | .name = "Transmit Descriptors", |
293 | .err = "using default of " | 299 | .err = "using default of " |
294 | __MODULE_STRING(E1000_DEFAULT_TXD), | 300 | __MODULE_STRING(E1000_DEFAULT_TXD), |
295 | .def = E1000_DEFAULT_TXD, | 301 | .def = E1000_DEFAULT_TXD, |
296 | .arg = { .r = { .min = E1000_MIN_TXD }} | 302 | .arg = { .r = { |
303 | .min = E1000_MIN_TXD, | ||
304 | .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD | ||
305 | }} | ||
297 | }; | 306 | }; |
298 | struct e1000_tx_ring *tx_ring = adapter->tx_ring; | ||
299 | int i; | ||
300 | e1000_mac_type mac_type = adapter->hw.mac_type; | ||
301 | opt.arg.r.max = mac_type < e1000_82544 ? | ||
302 | E1000_MAX_TXD : E1000_MAX_82544_TXD; | ||
303 | 307 | ||
304 | if (num_TxDescriptors > bd) { | 308 | if (num_TxDescriptors > bd) { |
305 | tx_ring->count = TxDescriptors[bd]; | 309 | tx_ring->count = TxDescriptors[bd]; |
@@ -313,19 +317,21 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) | |||
313 | tx_ring[i].count = tx_ring->count; | 317 | tx_ring[i].count = tx_ring->count; |
314 | } | 318 | } |
315 | { /* Receive Descriptor Count */ | 319 | { /* Receive Descriptor Count */ |
316 | struct e1000_option opt = { | 320 | struct e1000_rx_ring *rx_ring = adapter->rx_ring; |
321 | int i; | ||
322 | e1000_mac_type mac_type = adapter->hw.mac_type; | ||
323 | |||
324 | opt = (struct e1000_option) { | ||
317 | .type = range_option, | 325 | .type = range_option, |
318 | .name = "Receive Descriptors", | 326 | .name = "Receive Descriptors", |
319 | .err = "using default of " | 327 | .err = "using default of " |
320 | __MODULE_STRING(E1000_DEFAULT_RXD), | 328 | __MODULE_STRING(E1000_DEFAULT_RXD), |
321 | .def = E1000_DEFAULT_RXD, | 329 | .def = E1000_DEFAULT_RXD, |
322 | .arg = { .r = { .min = E1000_MIN_RXD }} | 330 | .arg = { .r = { |
331 | .min = E1000_MIN_RXD, | ||
332 | .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD | ||
333 | }} | ||
323 | }; | 334 | }; |
324 | struct e1000_rx_ring *rx_ring = adapter->rx_ring; | ||
325 | int i; | ||
326 | e1000_mac_type mac_type = adapter->hw.mac_type; | ||
327 | opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : | ||
328 | E1000_MAX_82544_RXD; | ||
329 | 335 | ||
330 | if (num_RxDescriptors > bd) { | 336 | if (num_RxDescriptors > bd) { |
331 | rx_ring->count = RxDescriptors[bd]; | 337 | rx_ring->count = RxDescriptors[bd]; |
@@ -339,7 +345,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) | |||
339 | rx_ring[i].count = rx_ring->count; | 345 | rx_ring[i].count = rx_ring->count; |
340 | } | 346 | } |
341 | { /* Checksum Offload Enable/Disable */ | 347 | { /* Checksum Offload Enable/Disable */ |
342 | struct e1000_option opt = { | 348 | opt = (struct e1000_option) { |
343 | .type = enable_option, | 349 | .type = enable_option, |
344 | .name = "Checksum Offload", | 350 | .name = "Checksum Offload", |
345 | .err = "defaulting to Enabled", | 351 | .err = "defaulting to Enabled", |
@@ -363,7 +369,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) | |||
363 | { E1000_FC_FULL, "Flow Control Enabled" }, | 369 | { E1000_FC_FULL, "Flow Control Enabled" }, |
364 | { E1000_FC_DEFAULT, "Flow Control Hardware Default" }}; | 370 | { E1000_FC_DEFAULT, "Flow Control Hardware Default" }}; |
365 | 371 | ||
366 | struct e1000_option opt = { | 372 | opt = (struct e1000_option) { |
367 | .type = list_option, | 373 | .type = list_option, |
368 | .name = "Flow Control", | 374 | .name = "Flow Control", |
369 | .err = "reading default settings from EEPROM", | 375 | .err = "reading default settings from EEPROM", |
@@ -381,7 +387,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) | |||
381 | } | 387 | } |
382 | } | 388 | } |
383 | { /* Transmit Interrupt Delay */ | 389 | { /* Transmit Interrupt Delay */ |
384 | struct e1000_option opt = { | 390 | opt = (struct e1000_option) { |
385 | .type = range_option, | 391 | .type = range_option, |
386 | .name = "Transmit Interrupt Delay", | 392 | .name = "Transmit Interrupt Delay", |
387 | .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), | 393 | .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), |
@@ -399,7 +405,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) | |||
399 | } | 405 | } |
400 | } | 406 | } |
401 | { /* Transmit Absolute Interrupt Delay */ | 407 | { /* Transmit Absolute Interrupt Delay */ |
402 | struct e1000_option opt = { | 408 | opt = (struct e1000_option) { |
403 | .type = range_option, | 409 | .type = range_option, |
404 | .name = "Transmit Absolute Interrupt Delay", | 410 | .name = "Transmit Absolute Interrupt Delay", |
405 | .err = "using default of " __MODULE_STRING(DEFAULT_TADV), | 411 | .err = "using default of " __MODULE_STRING(DEFAULT_TADV), |
@@ -417,7 +423,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) | |||
417 | } | 423 | } |
418 | } | 424 | } |
419 | { /* Receive Interrupt Delay */ | 425 | { /* Receive Interrupt Delay */ |
420 | struct e1000_option opt = { | 426 | opt = (struct e1000_option) { |
421 | .type = range_option, | 427 | .type = range_option, |
422 | .name = "Receive Interrupt Delay", | 428 | .name = "Receive Interrupt Delay", |
423 | .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), | 429 | .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), |
@@ -435,7 +441,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) | |||
435 | } | 441 | } |
436 | } | 442 | } |
437 | { /* Receive Absolute Interrupt Delay */ | 443 | { /* Receive Absolute Interrupt Delay */ |
438 | struct e1000_option opt = { | 444 | opt = (struct e1000_option) { |
439 | .type = range_option, | 445 | .type = range_option, |
440 | .name = "Receive Absolute Interrupt Delay", | 446 | .name = "Receive Absolute Interrupt Delay", |
441 | .err = "using default of " __MODULE_STRING(DEFAULT_RADV), | 447 | .err = "using default of " __MODULE_STRING(DEFAULT_RADV), |
@@ -453,7 +459,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) | |||
453 | } | 459 | } |
454 | } | 460 | } |
455 | { /* Interrupt Throttling Rate */ | 461 | { /* Interrupt Throttling Rate */ |
456 | struct e1000_option opt = { | 462 | opt = (struct e1000_option) { |
457 | .type = range_option, | 463 | .type = range_option, |
458 | .name = "Interrupt Throttling Rate (ints/sec)", | 464 | .name = "Interrupt Throttling Rate (ints/sec)", |
459 | .err = "using default of " __MODULE_STRING(DEFAULT_ITR), | 465 | .err = "using default of " __MODULE_STRING(DEFAULT_ITR), |
@@ -497,7 +503,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) | |||
497 | } | 503 | } |
498 | } | 504 | } |
499 | { /* Smart Power Down */ | 505 | { /* Smart Power Down */ |
500 | struct e1000_option opt = { | 506 | opt = (struct e1000_option) { |
501 | .type = enable_option, | 507 | .type = enable_option, |
502 | .name = "PHY Smart Power Down", | 508 | .name = "PHY Smart Power Down", |
503 | .err = "defaulting to Disabled", | 509 | .err = "defaulting to Disabled", |
@@ -513,7 +519,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) | |||
513 | } | 519 | } |
514 | } | 520 | } |
515 | { /* Kumeran Lock Loss Workaround */ | 521 | { /* Kumeran Lock Loss Workaround */ |
516 | struct e1000_option opt = { | 522 | opt = (struct e1000_option) { |
517 | .type = enable_option, | 523 | .type = enable_option, |
518 | .name = "Kumeran Lock Loss Workaround", | 524 | .name = "Kumeran Lock Loss Workaround", |
519 | .err = "defaulting to Enabled", | 525 | .err = "defaulting to Enabled", |
@@ -578,16 +584,18 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter) | |||
578 | 584 | ||
579 | static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) | 585 | static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) |
580 | { | 586 | { |
587 | struct e1000_option opt; | ||
581 | unsigned int speed, dplx, an; | 588 | unsigned int speed, dplx, an; |
582 | int bd = adapter->bd_number; | 589 | int bd = adapter->bd_number; |
583 | 590 | ||
584 | { /* Speed */ | 591 | { /* Speed */ |
585 | struct e1000_opt_list speed_list[] = {{ 0, "" }, | 592 | static const struct e1000_opt_list speed_list[] = { |
586 | { SPEED_10, "" }, | 593 | { 0, "" }, |
587 | { SPEED_100, "" }, | 594 | { SPEED_10, "" }, |
588 | { SPEED_1000, "" }}; | 595 | { SPEED_100, "" }, |
596 | { SPEED_1000, "" }}; | ||
589 | 597 | ||
590 | struct e1000_option opt = { | 598 | opt = (struct e1000_option) { |
591 | .type = list_option, | 599 | .type = list_option, |
592 | .name = "Speed", | 600 | .name = "Speed", |
593 | .err = "parameter ignored", | 601 | .err = "parameter ignored", |
@@ -604,11 +612,12 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) | |||
604 | } | 612 | } |
605 | } | 613 | } |
606 | { /* Duplex */ | 614 | { /* Duplex */ |
607 | struct e1000_opt_list dplx_list[] = {{ 0, "" }, | 615 | static const struct e1000_opt_list dplx_list[] = { |
608 | { HALF_DUPLEX, "" }, | 616 | { 0, "" }, |
609 | { FULL_DUPLEX, "" }}; | 617 | { HALF_DUPLEX, "" }, |
618 | { FULL_DUPLEX, "" }}; | ||
610 | 619 | ||
611 | struct e1000_option opt = { | 620 | opt = (struct e1000_option) { |
612 | .type = list_option, | 621 | .type = list_option, |
613 | .name = "Duplex", | 622 | .name = "Duplex", |
614 | .err = "parameter ignored", | 623 | .err = "parameter ignored", |
@@ -637,7 +646,7 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) | |||
637 | "parameter ignored\n"); | 646 | "parameter ignored\n"); |
638 | adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; | 647 | adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; |
639 | } else { /* Autoneg */ | 648 | } else { /* Autoneg */ |
640 | struct e1000_opt_list an_list[] = | 649 | static const struct e1000_opt_list an_list[] = |
641 | #define AA "AutoNeg advertising " | 650 | #define AA "AutoNeg advertising " |
642 | {{ 0x01, AA "10/HD" }, | 651 | {{ 0x01, AA "10/HD" }, |
643 | { 0x02, AA "10/FD" }, | 652 | { 0x02, AA "10/FD" }, |
@@ -671,7 +680,7 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) | |||
671 | { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, | 680 | { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, |
672 | { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; | 681 | { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; |
673 | 682 | ||
674 | struct e1000_option opt = { | 683 | opt = (struct e1000_option) { |
675 | .type = list_option, | 684 | .type = list_option, |
676 | .name = "AutoNeg", | 685 | .name = "AutoNeg", |
677 | .err = "parameter ignored", | 686 | .err = "parameter ignored", |
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index f823b8ba5785..14b0e6cd3b8d 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
@@ -389,7 +389,7 @@ | |||
389 | 389 | ||
390 | /* Interrupt Cause Set */ | 390 | /* Interrupt Cause Set */ |
391 | #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ | 391 | #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ |
392 | #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ | 392 | #define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ |
393 | #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ | 393 | #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ |
394 | 394 | ||
395 | /* Transmit Descriptor Control */ | 395 | /* Transmit Descriptor Control */ |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index cf57050d99d8..ac4e506b4f88 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -326,6 +326,7 @@ struct e1000_info { | |||
326 | #define FLAG_RX_CSUM_ENABLED (1 << 28) | 326 | #define FLAG_RX_CSUM_ENABLED (1 << 28) |
327 | #define FLAG_TSO_FORCE (1 << 29) | 327 | #define FLAG_TSO_FORCE (1 << 29) |
328 | #define FLAG_RX_RESTART_NOW (1 << 30) | 328 | #define FLAG_RX_RESTART_NOW (1 << 30) |
329 | #define FLAG_MSI_TEST_FAILED (1 << 31) | ||
329 | 330 | ||
330 | #define E1000_RX_DESC_PS(R, i) \ | 331 | #define E1000_RX_DESC_PS(R, i) \ |
331 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 332 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index cf9679f2b7c4..e21c9e0f3738 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -177,7 +177,7 @@ static u32 e1000_get_link(struct net_device *netdev) | |||
177 | u32 status; | 177 | u32 status; |
178 | 178 | ||
179 | status = er32(STATUS); | 179 | status = er32(STATUS); |
180 | return (status & E1000_STATUS_LU); | 180 | return (status & E1000_STATUS_LU) ? 1 : 0; |
181 | } | 181 | } |
182 | 182 | ||
183 | static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | 183 | static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 05b0b2f9c54b..d266510c8a94 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -510,9 +510,12 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
510 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | 510 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); |
511 | if (new_skb) { | 511 | if (new_skb) { |
512 | skb_reserve(new_skb, NET_IP_ALIGN); | 512 | skb_reserve(new_skb, NET_IP_ALIGN); |
513 | memcpy(new_skb->data - NET_IP_ALIGN, | 513 | skb_copy_to_linear_data_offset(new_skb, |
514 | skb->data - NET_IP_ALIGN, | 514 | -NET_IP_ALIGN, |
515 | length + NET_IP_ALIGN); | 515 | (skb->data - |
516 | NET_IP_ALIGN), | ||
517 | (length + | ||
518 | NET_IP_ALIGN)); | ||
516 | /* save the skb in buffer_info as good */ | 519 | /* save the skb in buffer_info as good */ |
517 | buffer_info->skb = skb; | 520 | buffer_info->skb = skb; |
518 | skb = new_skb; | 521 | skb = new_skb; |
@@ -1233,26 +1236,36 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
1233 | return IRQ_HANDLED; | 1236 | return IRQ_HANDLED; |
1234 | } | 1237 | } |
1235 | 1238 | ||
1239 | /** | ||
1240 | * e1000_request_irq - initialize interrupts | ||
1241 | * | ||
1242 | * Attempts to configure interrupts using the best available | ||
1243 | * capabilities of the hardware and kernel. | ||
1244 | **/ | ||
1236 | static int e1000_request_irq(struct e1000_adapter *adapter) | 1245 | static int e1000_request_irq(struct e1000_adapter *adapter) |
1237 | { | 1246 | { |
1238 | struct net_device *netdev = adapter->netdev; | 1247 | struct net_device *netdev = adapter->netdev; |
1239 | irq_handler_t handler = e1000_intr; | ||
1240 | int irq_flags = IRQF_SHARED; | 1248 | int irq_flags = IRQF_SHARED; |
1241 | int err; | 1249 | int err; |
1242 | 1250 | ||
1243 | if (!pci_enable_msi(adapter->pdev)) { | 1251 | if (!(adapter->flags & FLAG_MSI_TEST_FAILED)) { |
1244 | adapter->flags |= FLAG_MSI_ENABLED; | 1252 | err = pci_enable_msi(adapter->pdev); |
1245 | handler = e1000_intr_msi; | 1253 | if (!err) { |
1246 | irq_flags = 0; | 1254 | adapter->flags |= FLAG_MSI_ENABLED; |
1255 | irq_flags = 0; | ||
1256 | } | ||
1247 | } | 1257 | } |
1248 | 1258 | ||
1249 | err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, | 1259 | err = request_irq(adapter->pdev->irq, |
1250 | netdev); | 1260 | ((adapter->flags & FLAG_MSI_ENABLED) ? |
1261 | &e1000_intr_msi : &e1000_intr), | ||
1262 | irq_flags, netdev->name, netdev); | ||
1251 | if (err) { | 1263 | if (err) { |
1252 | e_err("Unable to allocate %s interrupt (return: %d)\n", | 1264 | if (adapter->flags & FLAG_MSI_ENABLED) { |
1253 | adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", err); | ||
1254 | if (adapter->flags & FLAG_MSI_ENABLED) | ||
1255 | pci_disable_msi(adapter->pdev); | 1265 | pci_disable_msi(adapter->pdev); |
1266 | adapter->flags &= ~FLAG_MSI_ENABLED; | ||
1267 | } | ||
1268 | e_err("Unable to allocate interrupt, Error: %d\n", err); | ||
1256 | } | 1269 | } |
1257 | 1270 | ||
1258 | return err; | 1271 | return err; |
@@ -2592,6 +2605,135 @@ err: | |||
2592 | } | 2605 | } |
2593 | 2606 | ||
2594 | /** | 2607 | /** |
2608 | * e1000_intr_msi_test - Interrupt Handler | ||
2609 | * @irq: interrupt number | ||
2610 | * @data: pointer to a network interface device structure | ||
2611 | **/ | ||
2612 | static irqreturn_t e1000_intr_msi_test(int irq, void *data) | ||
2613 | { | ||
2614 | struct net_device *netdev = data; | ||
2615 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
2616 | struct e1000_hw *hw = &adapter->hw; | ||
2617 | u32 icr = er32(ICR); | ||
2618 | |||
2619 | e_dbg("%s: icr is %08X\n", netdev->name, icr); | ||
2620 | if (icr & E1000_ICR_RXSEQ) { | ||
2621 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; | ||
2622 | wmb(); | ||
2623 | } | ||
2624 | |||
2625 | return IRQ_HANDLED; | ||
2626 | } | ||
2627 | |||
2628 | /** | ||
2629 | * e1000_test_msi_interrupt - Returns 0 for successful test | ||
2630 | * @adapter: board private struct | ||
2631 | * | ||
2632 | * code flow taken from tg3.c | ||
2633 | **/ | ||
2634 | static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | ||
2635 | { | ||
2636 | struct net_device *netdev = adapter->netdev; | ||
2637 | struct e1000_hw *hw = &adapter->hw; | ||
2638 | int err; | ||
2639 | |||
2640 | /* poll_enable hasn't been called yet, so don't need disable */ | ||
2641 | /* clear any pending events */ | ||
2642 | er32(ICR); | ||
2643 | |||
2644 | /* free the real vector and request a test handler */ | ||
2645 | e1000_free_irq(adapter); | ||
2646 | |||
2647 | /* Assume that the test fails, if it succeeds then the test | ||
2648 | * MSI irq handler will unset this flag */ | ||
2649 | adapter->flags |= FLAG_MSI_TEST_FAILED; | ||
2650 | |||
2651 | err = pci_enable_msi(adapter->pdev); | ||
2652 | if (err) | ||
2653 | goto msi_test_failed; | ||
2654 | |||
2655 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0, | ||
2656 | netdev->name, netdev); | ||
2657 | if (err) { | ||
2658 | pci_disable_msi(adapter->pdev); | ||
2659 | goto msi_test_failed; | ||
2660 | } | ||
2661 | |||
2662 | wmb(); | ||
2663 | |||
2664 | e1000_irq_enable(adapter); | ||
2665 | |||
2666 | /* fire an unusual interrupt on the test handler */ | ||
2667 | ew32(ICS, E1000_ICS_RXSEQ); | ||
2668 | e1e_flush(); | ||
2669 | msleep(50); | ||
2670 | |||
2671 | e1000_irq_disable(adapter); | ||
2672 | |||
2673 | rmb(); | ||
2674 | |||
2675 | if (adapter->flags & FLAG_MSI_TEST_FAILED) { | ||
2676 | err = -EIO; | ||
2677 | e_info("MSI interrupt test failed!\n"); | ||
2678 | } | ||
2679 | |||
2680 | free_irq(adapter->pdev->irq, netdev); | ||
2681 | pci_disable_msi(adapter->pdev); | ||
2682 | |||
2683 | if (err == -EIO) | ||
2684 | goto msi_test_failed; | ||
2685 | |||
2686 | /* okay so the test worked, restore settings */ | ||
2687 | e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); | ||
2688 | msi_test_failed: | ||
2689 | /* restore the original vector, even if it failed */ | ||
2690 | e1000_request_irq(adapter); | ||
2691 | return err; | ||
2692 | } | ||
2693 | |||
2694 | /** | ||
2695 | * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored | ||
2696 | * @adapter: board private struct | ||
2697 | * | ||
2698 | * code flow taken from tg3.c, called with e1000 interrupts disabled. | ||
2699 | **/ | ||
2700 | static int e1000_test_msi(struct e1000_adapter *adapter) | ||
2701 | { | ||
2702 | int err; | ||
2703 | u16 pci_cmd; | ||
2704 | |||
2705 | if (!(adapter->flags & FLAG_MSI_ENABLED)) | ||
2706 | return 0; | ||
2707 | |||
2708 | /* disable SERR in case the MSI write causes a master abort */ | ||
2709 | pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); | ||
2710 | pci_write_config_word(adapter->pdev, PCI_COMMAND, | ||
2711 | pci_cmd & ~PCI_COMMAND_SERR); | ||
2712 | |||
2713 | err = e1000_test_msi_interrupt(adapter); | ||
2714 | |||
2715 | /* restore previous setting of command word */ | ||
2716 | pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); | ||
2717 | |||
2718 | /* success ! */ | ||
2719 | if (!err) | ||
2720 | return 0; | ||
2721 | |||
2722 | /* EIO means MSI test failed */ | ||
2723 | if (err != -EIO) | ||
2724 | return err; | ||
2725 | |||
2726 | /* back to INTx mode */ | ||
2727 | e_warn("MSI interrupt test failed, using legacy interrupt.\n"); | ||
2728 | |||
2729 | e1000_free_irq(adapter); | ||
2730 | |||
2731 | err = e1000_request_irq(adapter); | ||
2732 | |||
2733 | return err; | ||
2734 | } | ||
2735 | |||
2736 | /** | ||
2595 | * e1000_open - Called when a network interface is made active | 2737 | * e1000_open - Called when a network interface is made active |
2596 | * @netdev: network interface device structure | 2738 | * @netdev: network interface device structure |
2597 | * | 2739 | * |
@@ -2649,6 +2791,19 @@ static int e1000_open(struct net_device *netdev) | |||
2649 | if (err) | 2791 | if (err) |
2650 | goto err_req_irq; | 2792 | goto err_req_irq; |
2651 | 2793 | ||
2794 | /* | ||
2795 | * Work around PCIe errata with MSI interrupts causing some chipsets to | ||
2796 | * ignore e1000e MSI messages, which means we need to test our MSI | ||
2797 | * interrupt now | ||
2798 | */ | ||
2799 | { | ||
2800 | err = e1000_test_msi(adapter); | ||
2801 | if (err) { | ||
2802 | e_err("Interrupt allocation failed\n"); | ||
2803 | goto err_req_irq; | ||
2804 | } | ||
2805 | } | ||
2806 | |||
2652 | /* From here on the code is the same as e1000e_up() */ | 2807 | /* From here on the code is the same as e1000e_up() */ |
2653 | clear_bit(__E1000_DOWN, &adapter->state); | 2808 | clear_bit(__E1000_DOWN, &adapter->state); |
2654 | 2809 | ||
@@ -3055,7 +3210,7 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3055 | case SPEED_10: | 3210 | case SPEED_10: |
3056 | txb2b = 0; | 3211 | txb2b = 0; |
3057 | netdev->tx_queue_len = 10; | 3212 | netdev->tx_queue_len = 10; |
3058 | adapter->tx_timeout_factor = 14; | 3213 | adapter->tx_timeout_factor = 16; |
3059 | break; | 3214 | break; |
3060 | case SPEED_100: | 3215 | case SPEED_100: |
3061 | txb2b = 0; | 3216 | txb2b = 0; |
@@ -3721,7 +3876,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3721 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3876 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3722 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | 3877 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
3723 | 3878 | ||
3724 | if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || | 3879 | if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || |
3725 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | 3880 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
3726 | e_err("Invalid MTU setting\n"); | 3881 | e_err("Invalid MTU setting\n"); |
3727 | return -EINVAL; | 3882 | return -EINVAL; |
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index 8effc3107f9a..ed912e023a72 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c | |||
@@ -324,14 +324,27 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) | |||
324 | adapter->itr = 20000; | 324 | adapter->itr = 20000; |
325 | break; | 325 | break; |
326 | default: | 326 | default: |
327 | e1000_validate_option(&adapter->itr, &opt, | ||
328 | adapter); | ||
329 | /* | 327 | /* |
330 | * save the setting, because the dynamic bits | 328 | * Save the setting, because the dynamic bits |
331 | * change itr. clear the lower two bits | 329 | * change itr. |
332 | * because they are used as control | ||
333 | */ | 330 | */ |
334 | adapter->itr_setting = adapter->itr & ~3; | 331 | if (e1000_validate_option(&adapter->itr, &opt, |
332 | adapter) && | ||
333 | (adapter->itr == 3)) { | ||
334 | /* | ||
335 | * In case of invalid user value, | ||
336 | * default to conservative mode. | ||
337 | */ | ||
338 | adapter->itr_setting = adapter->itr; | ||
339 | adapter->itr = 20000; | ||
340 | } else { | ||
341 | /* | ||
342 | * Clear the lower two bits because | ||
343 | * they are used as control. | ||
344 | */ | ||
345 | adapter->itr_setting = | ||
346 | adapter->itr & ~3; | ||
347 | } | ||
335 | break; | 348 | break; |
336 | } | 349 | } |
337 | } else { | 350 | } else { |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 053971e5fc94..331b86b01fa9 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -5522,7 +5522,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5522 | if (id->driver_data & DEV_HAS_CHECKSUM) { | 5522 | if (id->driver_data & DEV_HAS_CHECKSUM) { |
5523 | np->rx_csum = 1; | 5523 | np->rx_csum = 1; |
5524 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; | 5524 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; |
5525 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; | 5525 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
5526 | dev->features |= NETIF_F_TSO; | 5526 | dev->features |= NETIF_F_TSO; |
5527 | } | 5527 | } |
5528 | 5528 | ||
@@ -5835,7 +5835,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5835 | 5835 | ||
5836 | dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", | 5836 | dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", |
5837 | dev->features & NETIF_F_HIGHDMA ? "highdma " : "", | 5837 | dev->features & NETIF_F_HIGHDMA ? "highdma " : "", |
5838 | dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ? | 5838 | dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? |
5839 | "csum " : "", | 5839 | "csum " : "", |
5840 | dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? | 5840 | dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? |
5841 | "vlan " : "", | 5841 | "vlan " : "", |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 9a51ec8293cc..9d461825bf4c 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -792,6 +792,10 @@ static int fs_enet_open(struct net_device *dev) | |||
792 | int r; | 792 | int r; |
793 | int err; | 793 | int err; |
794 | 794 | ||
795 | /* to initialize the fep->cur_rx,... */ | ||
796 | /* not doing this, will cause a crash in fs_enet_rx_napi */ | ||
797 | fs_init_bds(fep->ndev); | ||
798 | |||
795 | if (fep->fpi->use_napi) | 799 | if (fep->fpi->use_napi) |
796 | napi_enable(&fep->napi); | 800 | napi_enable(&fep->napi); |
797 | 801 | ||
@@ -1167,6 +1171,10 @@ static struct of_device_id fs_enet_match[] = { | |||
1167 | .compatible = "fsl,cpm1-scc-enet", | 1171 | .compatible = "fsl,cpm1-scc-enet", |
1168 | .data = (void *)&fs_scc_ops, | 1172 | .data = (void *)&fs_scc_ops, |
1169 | }, | 1173 | }, |
1174 | { | ||
1175 | .compatible = "fsl,cpm2-scc-enet", | ||
1176 | .data = (void *)&fs_scc_ops, | ||
1177 | }, | ||
1170 | #endif | 1178 | #endif |
1171 | #ifdef CONFIG_FS_ENET_HAS_FCC | 1179 | #ifdef CONFIG_FS_ENET_HAS_FCC |
1172 | { | 1180 | { |
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index 029b3c7ef29c..22f50dd8b277 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c | |||
@@ -47,7 +47,6 @@ | |||
47 | #include "fs_enet.h" | 47 | #include "fs_enet.h" |
48 | 48 | ||
49 | /*************************************************/ | 49 | /*************************************************/ |
50 | |||
51 | #if defined(CONFIG_CPM1) | 50 | #if defined(CONFIG_CPM1) |
52 | /* for a 8xx __raw_xxx's are sufficient */ | 51 | /* for a 8xx __raw_xxx's are sufficient */ |
53 | #define __fs_out32(addr, x) __raw_writel(x, addr) | 52 | #define __fs_out32(addr, x) __raw_writel(x, addr) |
@@ -62,6 +61,8 @@ | |||
62 | #define __fs_out16(addr, x) out_be16(addr, x) | 61 | #define __fs_out16(addr, x) out_be16(addr, x) |
63 | #define __fs_in32(addr) in_be32(addr) | 62 | #define __fs_in32(addr) in_be32(addr) |
64 | #define __fs_in16(addr) in_be16(addr) | 63 | #define __fs_in16(addr) in_be16(addr) |
64 | #define __fs_out8(addr, x) out_8(addr, x) | ||
65 | #define __fs_in8(addr) in_8(addr) | ||
65 | #endif | 66 | #endif |
66 | 67 | ||
67 | /* write, read, set bits, clear bits */ | 68 | /* write, read, set bits, clear bits */ |
@@ -262,8 +263,13 @@ static void restart(struct net_device *dev) | |||
262 | 263 | ||
263 | /* Initialize function code registers for big-endian. | 264 | /* Initialize function code registers for big-endian. |
264 | */ | 265 | */ |
266 | #ifndef CONFIG_NOT_COHERENT_CACHE | ||
267 | W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL); | ||
268 | W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL); | ||
269 | #else | ||
265 | W8(ep, sen_genscc.scc_rfcr, SCC_EB); | 270 | W8(ep, sen_genscc.scc_rfcr, SCC_EB); |
266 | W8(ep, sen_genscc.scc_tfcr, SCC_EB); | 271 | W8(ep, sen_genscc.scc_tfcr, SCC_EB); |
272 | #endif | ||
267 | 273 | ||
268 | /* Set maximum bytes per receive buffer. | 274 | /* Set maximum bytes per receive buffer. |
269 | * This appears to be an Ethernet frame size, not the buffer | 275 | * This appears to be an Ethernet frame size, not the buffer |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index ca6cf6ecb37b..4320a983a588 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -105,6 +105,7 @@ const char gfar_driver_version[] = "1.3"; | |||
105 | 105 | ||
106 | static int gfar_enet_open(struct net_device *dev); | 106 | static int gfar_enet_open(struct net_device *dev); |
107 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | 107 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); |
108 | static void gfar_reset_task(struct work_struct *work); | ||
108 | static void gfar_timeout(struct net_device *dev); | 109 | static void gfar_timeout(struct net_device *dev); |
109 | static int gfar_close(struct net_device *dev); | 110 | static int gfar_close(struct net_device *dev); |
110 | struct sk_buff *gfar_new_skb(struct net_device *dev); | 111 | struct sk_buff *gfar_new_skb(struct net_device *dev); |
@@ -134,9 +135,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int l | |||
134 | static void gfar_vlan_rx_register(struct net_device *netdev, | 135 | static void gfar_vlan_rx_register(struct net_device *netdev, |
135 | struct vlan_group *grp); | 136 | struct vlan_group *grp); |
136 | void gfar_halt(struct net_device *dev); | 137 | void gfar_halt(struct net_device *dev); |
137 | #ifdef CONFIG_PM | ||
138 | static void gfar_halt_nodisable(struct net_device *dev); | 138 | static void gfar_halt_nodisable(struct net_device *dev); |
139 | #endif | ||
140 | void gfar_start(struct net_device *dev); | 139 | void gfar_start(struct net_device *dev); |
141 | static void gfar_clear_exact_match(struct net_device *dev); | 140 | static void gfar_clear_exact_match(struct net_device *dev); |
142 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); | 141 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); |
@@ -211,6 +210,7 @@ static int gfar_probe(struct platform_device *pdev) | |||
211 | spin_lock_init(&priv->txlock); | 210 | spin_lock_init(&priv->txlock); |
212 | spin_lock_init(&priv->rxlock); | 211 | spin_lock_init(&priv->rxlock); |
213 | spin_lock_init(&priv->bflock); | 212 | spin_lock_init(&priv->bflock); |
213 | INIT_WORK(&priv->reset_task, gfar_reset_task); | ||
214 | 214 | ||
215 | platform_set_drvdata(pdev, dev); | 215 | platform_set_drvdata(pdev, dev); |
216 | 216 | ||
@@ -631,7 +631,6 @@ static void init_registers(struct net_device *dev) | |||
631 | } | 631 | } |
632 | 632 | ||
633 | 633 | ||
634 | #ifdef CONFIG_PM | ||
635 | /* Halt the receive and transmit queues */ | 634 | /* Halt the receive and transmit queues */ |
636 | static void gfar_halt_nodisable(struct net_device *dev) | 635 | static void gfar_halt_nodisable(struct net_device *dev) |
637 | { | 636 | { |
@@ -657,7 +656,6 @@ static void gfar_halt_nodisable(struct net_device *dev) | |||
657 | cpu_relax(); | 656 | cpu_relax(); |
658 | } | 657 | } |
659 | } | 658 | } |
660 | #endif | ||
661 | 659 | ||
662 | /* Halt the receive and transmit queues */ | 660 | /* Halt the receive and transmit queues */ |
663 | void gfar_halt(struct net_device *dev) | 661 | void gfar_halt(struct net_device *dev) |
@@ -666,6 +664,8 @@ void gfar_halt(struct net_device *dev) | |||
666 | struct gfar __iomem *regs = priv->regs; | 664 | struct gfar __iomem *regs = priv->regs; |
667 | u32 tempval; | 665 | u32 tempval; |
668 | 666 | ||
667 | gfar_halt_nodisable(dev); | ||
668 | |||
669 | /* Disable Rx and Tx */ | 669 | /* Disable Rx and Tx */ |
670 | tempval = gfar_read(®s->maccfg1); | 670 | tempval = gfar_read(®s->maccfg1); |
671 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); | 671 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); |
@@ -1214,6 +1214,7 @@ static int gfar_close(struct net_device *dev) | |||
1214 | 1214 | ||
1215 | napi_disable(&priv->napi); | 1215 | napi_disable(&priv->napi); |
1216 | 1216 | ||
1217 | cancel_work_sync(&priv->reset_task); | ||
1217 | stop_gfar(dev); | 1218 | stop_gfar(dev); |
1218 | 1219 | ||
1219 | /* Disconnect from the PHY */ | 1220 | /* Disconnect from the PHY */ |
@@ -1328,13 +1329,16 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) | |||
1328 | return 0; | 1329 | return 0; |
1329 | } | 1330 | } |
1330 | 1331 | ||
1331 | /* gfar_timeout gets called when a packet has not been | 1332 | /* gfar_reset_task gets scheduled when a packet has not been |
1332 | * transmitted after a set amount of time. | 1333 | * transmitted after a set amount of time. |
1333 | * For now, assume that clearing out all the structures, and | 1334 | * For now, assume that clearing out all the structures, and |
1334 | * starting over will fix the problem. */ | 1335 | * starting over will fix the problem. |
1335 | static void gfar_timeout(struct net_device *dev) | 1336 | */ |
1337 | static void gfar_reset_task(struct work_struct *work) | ||
1336 | { | 1338 | { |
1337 | dev->stats.tx_errors++; | 1339 | struct gfar_private *priv = container_of(work, struct gfar_private, |
1340 | reset_task); | ||
1341 | struct net_device *dev = priv->dev; | ||
1338 | 1342 | ||
1339 | if (dev->flags & IFF_UP) { | 1343 | if (dev->flags & IFF_UP) { |
1340 | stop_gfar(dev); | 1344 | stop_gfar(dev); |
@@ -1344,6 +1348,14 @@ static void gfar_timeout(struct net_device *dev) | |||
1344 | netif_tx_schedule_all(dev); | 1348 | netif_tx_schedule_all(dev); |
1345 | } | 1349 | } |
1346 | 1350 | ||
1351 | static void gfar_timeout(struct net_device *dev) | ||
1352 | { | ||
1353 | struct gfar_private *priv = netdev_priv(dev); | ||
1354 | |||
1355 | dev->stats.tx_errors++; | ||
1356 | schedule_work(&priv->reset_task); | ||
1357 | } | ||
1358 | |||
1347 | /* Interrupt Handler for Transmit complete */ | 1359 | /* Interrupt Handler for Transmit complete */ |
1348 | static int gfar_clean_tx_ring(struct net_device *dev) | 1360 | static int gfar_clean_tx_ring(struct net_device *dev) |
1349 | { | 1361 | { |
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index d59df98bd636..f46e9b63af13 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h | |||
@@ -756,6 +756,7 @@ struct gfar_private { | |||
756 | 756 | ||
757 | uint32_t msg_enable; | 757 | uint32_t msg_enable; |
758 | 758 | ||
759 | struct work_struct reset_task; | ||
759 | /* Network Statistics */ | 760 | /* Network Statistics */ |
760 | struct gfar_extra_stats extra_stats; | 761 | struct gfar_extra_stats extra_stats; |
761 | }; | 762 | }; |
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c index 5116f68e01b9..782c20170082 100644 --- a/drivers/net/gianfar_sysfs.c +++ b/drivers/net/gianfar_sysfs.c | |||
@@ -33,7 +33,6 @@ | |||
33 | 33 | ||
34 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <linux/version.h> | ||
37 | 36 | ||
38 | #include "gianfar.h" | 37 | #include "gianfar.h" |
39 | 38 | ||
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 2e720f26ca83..ccd9d9058f6d 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c | |||
@@ -663,9 +663,6 @@ static int emac_configure(struct emac_instance *dev) | |||
663 | if (emac_phy_gpcs(dev->phy.mode)) | 663 | if (emac_phy_gpcs(dev->phy.mode)) |
664 | emac_mii_reset_phy(&dev->phy); | 664 | emac_mii_reset_phy(&dev->phy); |
665 | 665 | ||
666 | /* Required for Pause packet support in EMAC */ | ||
667 | dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1); | ||
668 | |||
669 | return 0; | 666 | return 0; |
670 | } | 667 | } |
671 | 668 | ||
@@ -1150,6 +1147,9 @@ static int emac_open(struct net_device *ndev) | |||
1150 | } else | 1147 | } else |
1151 | netif_carrier_on(dev->ndev); | 1148 | netif_carrier_on(dev->ndev); |
1152 | 1149 | ||
1150 | /* Required for Pause packet support in EMAC */ | ||
1151 | dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1); | ||
1152 | |||
1153 | emac_configure(dev); | 1153 | emac_configure(dev); |
1154 | mal_poll_add(dev->mal, &dev->commac); | 1154 | mal_poll_add(dev->mal, &dev->commac); |
1155 | mal_enable_tx_channel(dev->mal, dev->mal_tx_chan); | 1155 | mal_enable_tx_channel(dev->mal, dev->mal_tx_chan); |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index a03fe1fb61ca..c2d57f836088 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -904,8 +904,6 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
904 | unsigned long data_dma_addr; | 904 | unsigned long data_dma_addr; |
905 | 905 | ||
906 | desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; | 906 | desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; |
907 | data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | ||
908 | skb->len, DMA_TO_DEVICE); | ||
909 | 907 | ||
910 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 908 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
911 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { | 909 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { |
@@ -924,6 +922,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
924 | buf[1] = 0; | 922 | buf[1] = 0; |
925 | } | 923 | } |
926 | 924 | ||
925 | data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | ||
926 | skb->len, DMA_TO_DEVICE); | ||
927 | if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { | 927 | if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { |
928 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 928 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
929 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); | 929 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); |
@@ -932,6 +932,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
932 | desc.fields.address = adapter->bounce_buffer_dma; | 932 | desc.fields.address = adapter->bounce_buffer_dma; |
933 | tx_map_failed++; | 933 | tx_map_failed++; |
934 | used_bounce = 1; | 934 | used_bounce = 1; |
935 | wmb(); | ||
935 | } else | 936 | } else |
936 | desc.fields.address = data_dma_addr; | 937 | desc.fields.address = data_dma_addr; |
937 | 938 | ||
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index bb823acc7443..f5e2e7235fcb 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c | |||
@@ -87,7 +87,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) | |||
87 | case E1000_DEV_ID_82576: | 87 | case E1000_DEV_ID_82576: |
88 | case E1000_DEV_ID_82576_FIBER: | 88 | case E1000_DEV_ID_82576_FIBER: |
89 | case E1000_DEV_ID_82576_SERDES: | 89 | case E1000_DEV_ID_82576_SERDES: |
90 | case E1000_DEV_ID_82576_QUAD_COPPER: | ||
91 | mac->type = e1000_82576; | 90 | mac->type = e1000_82576; |
92 | break; | 91 | break; |
93 | default: | 92 | default: |
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index a65ccc3095c3..99504a600a80 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h | |||
@@ -41,7 +41,6 @@ struct e1000_hw; | |||
41 | #define E1000_DEV_ID_82576 0x10C9 | 41 | #define E1000_DEV_ID_82576 0x10C9 |
42 | #define E1000_DEV_ID_82576_FIBER 0x10E6 | 42 | #define E1000_DEV_ID_82576_FIBER 0x10E6 |
43 | #define E1000_DEV_ID_82576_SERDES 0x10E7 | 43 | #define E1000_DEV_ID_82576_SERDES 0x10E7 |
44 | #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 | ||
45 | #define E1000_DEV_ID_82575EB_COPPER 0x10A7 | 44 | #define E1000_DEV_ID_82575EB_COPPER 0x10A7 |
46 | #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 | 45 | #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 |
47 | #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 | 46 | #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 |
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index 11aee1309951..58906c984be9 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -373,13 +373,17 @@ static void igb_get_regs(struct net_device *netdev, | |||
373 | regs_buff[12] = rd32(E1000_EECD); | 373 | regs_buff[12] = rd32(E1000_EECD); |
374 | 374 | ||
375 | /* Interrupt */ | 375 | /* Interrupt */ |
376 | regs_buff[13] = rd32(E1000_EICR); | 376 | /* Reading EICS for EICR because they read the |
377 | * same but EICS does not clear on read */ | ||
378 | regs_buff[13] = rd32(E1000_EICS); | ||
377 | regs_buff[14] = rd32(E1000_EICS); | 379 | regs_buff[14] = rd32(E1000_EICS); |
378 | regs_buff[15] = rd32(E1000_EIMS); | 380 | regs_buff[15] = rd32(E1000_EIMS); |
379 | regs_buff[16] = rd32(E1000_EIMC); | 381 | regs_buff[16] = rd32(E1000_EIMC); |
380 | regs_buff[17] = rd32(E1000_EIAC); | 382 | regs_buff[17] = rd32(E1000_EIAC); |
381 | regs_buff[18] = rd32(E1000_EIAM); | 383 | regs_buff[18] = rd32(E1000_EIAM); |
382 | regs_buff[19] = rd32(E1000_ICR); | 384 | /* Reading ICS for ICR because they read the |
385 | * same but ICS does not clear on read */ | ||
386 | regs_buff[19] = rd32(E1000_ICS); | ||
383 | regs_buff[20] = rd32(E1000_ICS); | 387 | regs_buff[20] = rd32(E1000_ICS); |
384 | regs_buff[21] = rd32(E1000_IMS); | 388 | regs_buff[21] = rd32(E1000_IMS); |
385 | regs_buff[22] = rd32(E1000_IMC); | 389 | regs_buff[22] = rd32(E1000_IMC); |
@@ -1746,15 +1750,6 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, | |||
1746 | /* return success for non excluded adapter ports */ | 1750 | /* return success for non excluded adapter ports */ |
1747 | retval = 0; | 1751 | retval = 0; |
1748 | break; | 1752 | break; |
1749 | case E1000_DEV_ID_82576_QUAD_COPPER: | ||
1750 | /* quad port adapters only support WoL on port A */ | ||
1751 | if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { | ||
1752 | wol->supported = 0; | ||
1753 | break; | ||
1754 | } | ||
1755 | /* return success for non excluded adapter ports */ | ||
1756 | retval = 0; | ||
1757 | break; | ||
1758 | default: | 1753 | default: |
1759 | /* dual port cards only support WoL on port A from now on | 1754 | /* dual port cards only support WoL on port A from now on |
1760 | * unless it was enabled in the eeprom for port B | 1755 | * unless it was enabled in the eeprom for port B |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 8f66e15ec8d6..634c4c9d87be 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -61,7 +61,6 @@ static struct pci_device_id igb_pci_tbl[] = { | |||
61 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, | 61 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, |
62 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, | 62 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, |
63 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, | 63 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, |
64 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, | ||
65 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, | 64 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, |
66 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, | 65 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, |
67 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, | 66 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, |
@@ -521,7 +520,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) | |||
521 | adapter->msix_entries, | 520 | adapter->msix_entries, |
522 | numvecs); | 521 | numvecs); |
523 | if (err == 0) | 522 | if (err == 0) |
524 | return; | 523 | goto out; |
525 | 524 | ||
526 | igb_reset_interrupt_capability(adapter); | 525 | igb_reset_interrupt_capability(adapter); |
527 | 526 | ||
@@ -531,7 +530,7 @@ msi_only: | |||
531 | adapter->num_tx_queues = 1; | 530 | adapter->num_tx_queues = 1; |
532 | if (!pci_enable_msi(adapter->pdev)) | 531 | if (!pci_enable_msi(adapter->pdev)) |
533 | adapter->flags |= IGB_FLAG_HAS_MSI; | 532 | adapter->flags |= IGB_FLAG_HAS_MSI; |
534 | 533 | out: | |
535 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ | 534 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ |
536 | adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; | 535 | adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; |
537 | return; | 536 | return; |
@@ -1217,16 +1216,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1217 | if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) | 1216 | if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) |
1218 | adapter->eeprom_wol = 0; | 1217 | adapter->eeprom_wol = 0; |
1219 | break; | 1218 | break; |
1220 | case E1000_DEV_ID_82576_QUAD_COPPER: | ||
1221 | /* if quad port adapter, disable WoL on all but port A */ | ||
1222 | if (global_quad_port_a != 0) | ||
1223 | adapter->eeprom_wol = 0; | ||
1224 | else | ||
1225 | adapter->flags |= IGB_FLAG_QUAD_PORT_A; | ||
1226 | /* Reset for multiple quad port adapters */ | ||
1227 | if (++global_quad_port_a == 4) | ||
1228 | global_quad_port_a = 0; | ||
1229 | break; | ||
1230 | } | 1219 | } |
1231 | 1220 | ||
1232 | /* initialize the wol settings based on the eeprom settings */ | 1221 | /* initialize the wol settings based on the eeprom settings */ |
@@ -2290,7 +2279,9 @@ static void igb_watchdog_task(struct work_struct *work) | |||
2290 | struct igb_ring *tx_ring = adapter->tx_ring; | 2279 | struct igb_ring *tx_ring = adapter->tx_ring; |
2291 | struct e1000_mac_info *mac = &adapter->hw.mac; | 2280 | struct e1000_mac_info *mac = &adapter->hw.mac; |
2292 | u32 link; | 2281 | u32 link; |
2282 | u32 eics = 0; | ||
2293 | s32 ret_val; | 2283 | s32 ret_val; |
2284 | int i; | ||
2294 | 2285 | ||
2295 | if ((netif_carrier_ok(netdev)) && | 2286 | if ((netif_carrier_ok(netdev)) && |
2296 | (rd32(E1000_STATUS) & E1000_STATUS_LU)) | 2287 | (rd32(E1000_STATUS) & E1000_STATUS_LU)) |
@@ -2392,7 +2383,13 @@ link_up: | |||
2392 | } | 2383 | } |
2393 | 2384 | ||
2394 | /* Cause software interrupt to ensure rx ring is cleaned */ | 2385 | /* Cause software interrupt to ensure rx ring is cleaned */ |
2395 | wr32(E1000_ICS, E1000_ICS_RXDMT0); | 2386 | if (adapter->msix_entries) { |
2387 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
2388 | eics |= adapter->rx_ring[i].eims_value; | ||
2389 | wr32(E1000_EICS, eics); | ||
2390 | } else { | ||
2391 | wr32(E1000_ICS, E1000_ICS_RXDMT0); | ||
2392 | } | ||
2396 | 2393 | ||
2397 | /* Force detection of hung controller every watchdog period */ | 2394 | /* Force detection of hung controller every watchdog period */ |
2398 | tx_ring->detect_tx_hung = true; | 2395 | tx_ring->detect_tx_hung = true; |
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h index e0e718ab4c2e..dd9318f19497 100644 --- a/drivers/net/ipg.h +++ b/drivers/net/ipg.h | |||
@@ -7,7 +7,6 @@ | |||
7 | #ifndef __LINUX_IPG_H | 7 | #ifndef __LINUX_IPG_H |
8 | #define __LINUX_IPG_H | 8 | #define __LINUX_IPG_H |
9 | 9 | ||
10 | #include <linux/version.h> | ||
11 | #include <linux/module.h> | 10 | #include <linux/module.h> |
12 | 11 | ||
13 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
@@ -21,7 +20,6 @@ | |||
21 | #include <linux/etherdevice.h> | 20 | #include <linux/etherdevice.h> |
22 | #include <linux/init.h> | 21 | #include <linux/init.h> |
23 | #include <linux/skbuff.h> | 22 | #include <linux/skbuff.h> |
24 | #include <linux/version.h> | ||
25 | #include <asm/bitops.h> | 23 | #include <asm/bitops.h> |
26 | 24 | ||
27 | /* | 25 | /* |
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 2f38e847e2cd..f96358b641af 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -190,6 +190,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) | |||
190 | case IXGBE_DEV_ID_82598AF_DUAL_PORT: | 190 | case IXGBE_DEV_ID_82598AF_DUAL_PORT: |
191 | case IXGBE_DEV_ID_82598AF_SINGLE_PORT: | 191 | case IXGBE_DEV_ID_82598AF_SINGLE_PORT: |
192 | case IXGBE_DEV_ID_82598EB_CX4: | 192 | case IXGBE_DEV_ID_82598EB_CX4: |
193 | case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: | ||
193 | media_type = ixgbe_media_type_fiber; | 194 | media_type = ixgbe_media_type_fiber; |
194 | break; | 195 | break; |
195 | case IXGBE_DEV_ID_82598AT_DUAL_PORT: | 196 | case IXGBE_DEV_ID_82598AT_DUAL_PORT: |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e5f3da8468cc..53f41b649f03 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -48,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe"; | |||
48 | static const char ixgbe_driver_string[] = | 48 | static const char ixgbe_driver_string[] = |
49 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 49 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
50 | 50 | ||
51 | #define DRV_VERSION "1.3.18-k2" | 51 | #define DRV_VERSION "1.3.18-k4" |
52 | const char ixgbe_driver_version[] = DRV_VERSION; | 52 | const char ixgbe_driver_version[] = DRV_VERSION; |
53 | static const char ixgbe_copyright[] = | 53 | static const char ixgbe_copyright[] = |
54 | "Copyright (c) 1999-2007 Intel Corporation."; | 54 | "Copyright (c) 1999-2007 Intel Corporation."; |
@@ -72,6 +72,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = { | |||
72 | board_82598 }, | 72 | board_82598 }, |
73 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), | 73 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), |
74 | board_82598 }, | 74 | board_82598 }, |
75 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), | ||
76 | board_82598 }, | ||
75 | 77 | ||
76 | /* required last entry */ | 78 | /* required last entry */ |
77 | {0, } | 79 | {0, } |
@@ -1634,16 +1636,17 @@ static void ixgbe_set_multi(struct net_device *netdev) | |||
1634 | struct ixgbe_hw *hw = &adapter->hw; | 1636 | struct ixgbe_hw *hw = &adapter->hw; |
1635 | struct dev_mc_list *mc_ptr; | 1637 | struct dev_mc_list *mc_ptr; |
1636 | u8 *mta_list; | 1638 | u8 *mta_list; |
1637 | u32 fctrl; | 1639 | u32 fctrl, vlnctrl; |
1638 | int i; | 1640 | int i; |
1639 | 1641 | ||
1640 | /* Check for Promiscuous and All Multicast modes */ | 1642 | /* Check for Promiscuous and All Multicast modes */ |
1641 | 1643 | ||
1642 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | 1644 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
1645 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
1643 | 1646 | ||
1644 | if (netdev->flags & IFF_PROMISC) { | 1647 | if (netdev->flags & IFF_PROMISC) { |
1645 | fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); | 1648 | fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); |
1646 | fctrl &= ~IXGBE_VLNCTRL_VFE; | 1649 | vlnctrl &= ~IXGBE_VLNCTRL_VFE; |
1647 | } else { | 1650 | } else { |
1648 | if (netdev->flags & IFF_ALLMULTI) { | 1651 | if (netdev->flags & IFF_ALLMULTI) { |
1649 | fctrl |= IXGBE_FCTRL_MPE; | 1652 | fctrl |= IXGBE_FCTRL_MPE; |
@@ -1651,10 +1654,11 @@ static void ixgbe_set_multi(struct net_device *netdev) | |||
1651 | } else { | 1654 | } else { |
1652 | fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); | 1655 | fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); |
1653 | } | 1656 | } |
1654 | fctrl |= IXGBE_VLNCTRL_VFE; | 1657 | vlnctrl |= IXGBE_VLNCTRL_VFE; |
1655 | } | 1658 | } |
1656 | 1659 | ||
1657 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | 1660 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); |
1661 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
1658 | 1662 | ||
1659 | if (netdev->mc_count) { | 1663 | if (netdev->mc_count) { |
1660 | mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC); | 1664 | mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC); |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 1ad7cb9c25a8..c0282a223df3 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -39,6 +39,7 @@ | |||
39 | #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 | 39 | #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 |
40 | #define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8 | 40 | #define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8 |
41 | #define IXGBE_DEV_ID_82598EB_CX4 0x10DD | 41 | #define IXGBE_DEV_ID_82598EB_CX4 0x10DD |
42 | #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC | ||
42 | 43 | ||
43 | /* General Registers */ | 44 | /* General Registers */ |
44 | #define IXGBE_CTRL 0x00000 | 45 | #define IXGBE_CTRL 0x00000 |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 49f6bc036a92..3b43bfd85a0f 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -64,68 +64,6 @@ struct pcpu_lstats { | |||
64 | unsigned long bytes; | 64 | unsigned long bytes; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | /* KISS: just allocate small chunks and copy bits. | ||
68 | * | ||
69 | * So, in fact, this is documentation, explaining what we expect | ||
70 | * of largesending device modulo TCP checksum, which is ignored for loopback. | ||
71 | */ | ||
72 | |||
73 | #ifdef LOOPBACK_TSO | ||
74 | static void emulate_large_send_offload(struct sk_buff *skb) | ||
75 | { | ||
76 | struct iphdr *iph = ip_hdr(skb); | ||
77 | struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) + | ||
78 | (iph->ihl * 4)); | ||
79 | unsigned int doffset = (iph->ihl + th->doff) * 4; | ||
80 | unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; | ||
81 | unsigned int offset = 0; | ||
82 | u32 seq = ntohl(th->seq); | ||
83 | u16 id = ntohs(iph->id); | ||
84 | |||
85 | while (offset + doffset < skb->len) { | ||
86 | unsigned int frag_size = min(mtu, skb->len - offset) - doffset; | ||
87 | struct sk_buff *nskb = alloc_skb(mtu + 32, GFP_ATOMIC); | ||
88 | |||
89 | if (!nskb) | ||
90 | break; | ||
91 | skb_reserve(nskb, 32); | ||
92 | skb_set_mac_header(nskb, -ETH_HLEN); | ||
93 | skb_reset_network_header(nskb); | ||
94 | iph = ip_hdr(nskb); | ||
95 | skb_copy_to_linear_data(nskb, skb_network_header(skb), | ||
96 | doffset); | ||
97 | if (skb_copy_bits(skb, | ||
98 | doffset + offset, | ||
99 | nskb->data + doffset, | ||
100 | frag_size)) | ||
101 | BUG(); | ||
102 | skb_put(nskb, doffset + frag_size); | ||
103 | nskb->ip_summed = CHECKSUM_UNNECESSARY; | ||
104 | nskb->dev = skb->dev; | ||
105 | nskb->priority = skb->priority; | ||
106 | nskb->protocol = skb->protocol; | ||
107 | nskb->dst = dst_clone(skb->dst); | ||
108 | memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); | ||
109 | nskb->pkt_type = skb->pkt_type; | ||
110 | |||
111 | th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4); | ||
112 | iph->tot_len = htons(frag_size + doffset); | ||
113 | iph->id = htons(id); | ||
114 | iph->check = 0; | ||
115 | iph->check = ip_fast_csum((unsigned char *) iph, iph->ihl); | ||
116 | th->seq = htonl(seq); | ||
117 | if (offset + doffset + frag_size < skb->len) | ||
118 | th->fin = th->psh = 0; | ||
119 | netif_rx(nskb); | ||
120 | offset += frag_size; | ||
121 | seq += frag_size; | ||
122 | id++; | ||
123 | } | ||
124 | |||
125 | dev_kfree_skb(skb); | ||
126 | } | ||
127 | #endif /* LOOPBACK_TSO */ | ||
128 | |||
129 | /* | 67 | /* |
130 | * The higher levels take care of making this non-reentrant (it's | 68 | * The higher levels take care of making this non-reentrant (it's |
131 | * called with bh's disabled). | 69 | * called with bh's disabled). |
@@ -137,9 +75,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) | |||
137 | skb_orphan(skb); | 75 | skb_orphan(skb); |
138 | 76 | ||
139 | skb->protocol = eth_type_trans(skb,dev); | 77 | skb->protocol = eth_type_trans(skb,dev); |
140 | #ifndef LOOPBACK_MUST_CHECKSUM | ||
141 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
142 | #endif | ||
143 | 78 | ||
144 | #ifdef LOOPBACK_TSO | 79 | #ifdef LOOPBACK_TSO |
145 | if (skb_is_gso(skb)) { | 80 | if (skb_is_gso(skb)) { |
@@ -234,9 +169,7 @@ static void loopback_setup(struct net_device *dev) | |||
234 | dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ | 169 | dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ |
235 | dev->flags = IFF_LOOPBACK; | 170 | dev->flags = IFF_LOOPBACK; |
236 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | 171 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
237 | #ifdef LOOPBACK_TSO | ||
238 | | NETIF_F_TSO | 172 | | NETIF_F_TSO |
239 | #endif | ||
240 | | NETIF_F_NO_CSUM | 173 | | NETIF_F_NO_CSUM |
241 | | NETIF_F_HIGHDMA | 174 | | NETIF_F_HIGHDMA |
242 | | NETIF_F_LLTX | 175 | | NETIF_F_LLTX |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 46819af3b062..0a18b9e96da1 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -55,7 +55,7 @@ | |||
55 | #include <asm/system.h> | 55 | #include <asm/system.h> |
56 | 56 | ||
57 | static char mv643xx_eth_driver_name[] = "mv643xx_eth"; | 57 | static char mv643xx_eth_driver_name[] = "mv643xx_eth"; |
58 | static char mv643xx_eth_driver_version[] = "1.2"; | 58 | static char mv643xx_eth_driver_version[] = "1.3"; |
59 | 59 | ||
60 | #define MV643XX_ETH_CHECKSUM_OFFLOAD_TX | 60 | #define MV643XX_ETH_CHECKSUM_OFFLOAD_TX |
61 | #define MV643XX_ETH_NAPI | 61 | #define MV643XX_ETH_NAPI |
@@ -474,11 +474,19 @@ static void rxq_refill(struct rx_queue *rxq) | |||
474 | /* | 474 | /* |
475 | * Reserve 2+14 bytes for an ethernet header (the | 475 | * Reserve 2+14 bytes for an ethernet header (the |
476 | * hardware automatically prepends 2 bytes of dummy | 476 | * hardware automatically prepends 2 bytes of dummy |
477 | * data to each received packet), 4 bytes for a VLAN | 477 | * data to each received packet), 16 bytes for up to |
478 | * header, and 4 bytes for the trailing FCS -- 24 | 478 | * four VLAN tags, and 4 bytes for the trailing FCS |
479 | * bytes total. | 479 | * -- 36 bytes total. |
480 | */ | 480 | */ |
481 | skb_size = mp->dev->mtu + 24; | 481 | skb_size = mp->dev->mtu + 36; |
482 | |||
483 | /* | ||
484 | * Make sure that the skb size is a multiple of 8 | ||
485 | * bytes, as the lower three bits of the receive | ||
486 | * descriptor's buffer size field are ignored by | ||
487 | * the hardware. | ||
488 | */ | ||
489 | skb_size = (skb_size + 7) & ~7; | ||
482 | 490 | ||
483 | skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); | 491 | skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); |
484 | if (skb == NULL) | 492 | if (skb == NULL) |
@@ -509,10 +517,8 @@ static void rxq_refill(struct rx_queue *rxq) | |||
509 | skb_reserve(skb, 2); | 517 | skb_reserve(skb, 2); |
510 | } | 518 | } |
511 | 519 | ||
512 | if (rxq->rx_desc_count != rxq->rx_ring_size) { | 520 | if (rxq->rx_desc_count != rxq->rx_ring_size) |
513 | rxq->rx_oom.expires = jiffies + (HZ / 10); | 521 | mod_timer(&rxq->rx_oom, jiffies + (HZ / 10)); |
514 | add_timer(&rxq->rx_oom); | ||
515 | } | ||
516 | 522 | ||
517 | spin_unlock_irqrestore(&mp->lock, flags); | 523 | spin_unlock_irqrestore(&mp->lock, flags); |
518 | } | 524 | } |
@@ -529,7 +535,7 @@ static int rxq_process(struct rx_queue *rxq, int budget) | |||
529 | int rx; | 535 | int rx; |
530 | 536 | ||
531 | rx = 0; | 537 | rx = 0; |
532 | while (rx < budget) { | 538 | while (rx < budget && rxq->rx_desc_count) { |
533 | struct rx_desc *rx_desc; | 539 | struct rx_desc *rx_desc; |
534 | unsigned int cmd_sts; | 540 | unsigned int cmd_sts; |
535 | struct sk_buff *skb; | 541 | struct sk_buff *skb; |
@@ -554,7 +560,7 @@ static int rxq_process(struct rx_queue *rxq, int budget) | |||
554 | spin_unlock_irqrestore(&mp->lock, flags); | 560 | spin_unlock_irqrestore(&mp->lock, flags); |
555 | 561 | ||
556 | dma_unmap_single(NULL, rx_desc->buf_ptr + 2, | 562 | dma_unmap_single(NULL, rx_desc->buf_ptr + 2, |
557 | mp->dev->mtu + 24, DMA_FROM_DEVICE); | 563 | rx_desc->buf_size, DMA_FROM_DEVICE); |
558 | rxq->rx_desc_count--; | 564 | rxq->rx_desc_count--; |
559 | rx++; | 565 | rx++; |
560 | 566 | ||
@@ -636,9 +642,9 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | |||
636 | txq_reclaim(mp->txq + i, 0); | 642 | txq_reclaim(mp->txq + i, 0); |
637 | 643 | ||
638 | if (netif_carrier_ok(mp->dev)) { | 644 | if (netif_carrier_ok(mp->dev)) { |
639 | spin_lock(&mp->lock); | 645 | spin_lock_irq(&mp->lock); |
640 | __txq_maybe_wake(mp->txq + mp->txq_primary); | 646 | __txq_maybe_wake(mp->txq + mp->txq_primary); |
641 | spin_unlock(&mp->lock); | 647 | spin_unlock_irq(&mp->lock); |
642 | } | 648 | } |
643 | } | 649 | } |
644 | #endif | 650 | #endif |
@@ -650,8 +656,6 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | |||
650 | 656 | ||
651 | if (rx < budget) { | 657 | if (rx < budget) { |
652 | netif_rx_complete(mp->dev, napi); | 658 | netif_rx_complete(mp->dev, napi); |
653 | wrl(mp, INT_CAUSE(mp->port_num), 0); | ||
654 | wrl(mp, INT_CAUSE_EXT(mp->port_num), 0); | ||
655 | wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); | 659 | wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); |
656 | } | 660 | } |
657 | 661 | ||
@@ -1796,6 +1800,7 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) | |||
1796 | */ | 1800 | */ |
1797 | #ifdef MV643XX_ETH_NAPI | 1801 | #ifdef MV643XX_ETH_NAPI |
1798 | if (int_cause & INT_RX) { | 1802 | if (int_cause & INT_RX) { |
1803 | wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX)); | ||
1799 | wrl(mp, INT_MASK(mp->port_num), 0x00000000); | 1804 | wrl(mp, INT_MASK(mp->port_num), 0x00000000); |
1800 | rdl(mp, INT_MASK(mp->port_num)); | 1805 | rdl(mp, INT_MASK(mp->port_num)); |
1801 | 1806 | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index f1de38f8b742..d6524db321af 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -56,7 +56,6 @@ | |||
56 | #include <linux/ethtool.h> | 56 | #include <linux/ethtool.h> |
57 | #include <linux/firmware.h> | 57 | #include <linux/firmware.h> |
58 | #include <linux/delay.h> | 58 | #include <linux/delay.h> |
59 | #include <linux/version.h> | ||
60 | #include <linux/timer.h> | 59 | #include <linux/timer.h> |
61 | #include <linux/vmalloc.h> | 60 | #include <linux/vmalloc.h> |
62 | #include <linux/crc32.h> | 61 | #include <linux/crc32.h> |
@@ -76,7 +75,7 @@ | |||
76 | #include "myri10ge_mcp.h" | 75 | #include "myri10ge_mcp.h" |
77 | #include "myri10ge_mcp_gen_header.h" | 76 | #include "myri10ge_mcp_gen_header.h" |
78 | 77 | ||
79 | #define MYRI10GE_VERSION_STR "1.3.99-1.347" | 78 | #define MYRI10GE_VERSION_STR "1.4.3-1.358" |
80 | 79 | ||
81 | MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); | 80 | MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); |
82 | MODULE_AUTHOR("Maintainer: help@myri.com"); | 81 | MODULE_AUTHOR("Maintainer: help@myri.com"); |
@@ -3548,7 +3547,11 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp) | |||
3548 | 3547 | ||
3549 | /* try to load the slice aware rss firmware */ | 3548 | /* try to load the slice aware rss firmware */ |
3550 | old_fw = mgp->fw_name; | 3549 | old_fw = mgp->fw_name; |
3551 | if (old_fw == myri10ge_fw_aligned) | 3550 | if (myri10ge_fw_name != NULL) { |
3551 | dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n", | ||
3552 | myri10ge_fw_name); | ||
3553 | mgp->fw_name = myri10ge_fw_name; | ||
3554 | } else if (old_fw == myri10ge_fw_aligned) | ||
3552 | mgp->fw_name = myri10ge_fw_rss_aligned; | 3555 | mgp->fw_name = myri10ge_fw_rss_aligned; |
3553 | else | 3556 | else |
3554 | mgp->fw_name = myri10ge_fw_rss_unaligned; | 3557 | mgp->fw_name = myri10ge_fw_rss_unaligned; |
diff --git a/drivers/net/ne.c b/drivers/net/ne.c index 42443d697423..fa3ceca4e15c 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c | |||
@@ -118,7 +118,7 @@ bad_clone_list[] __initdata = { | |||
118 | {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */ | 118 | {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */ |
119 | {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ | 119 | {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ |
120 | {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ | 120 | {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ |
121 | #if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) | 121 | #ifdef CONFIG_MACH_TX49XX |
122 | {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ | 122 | {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ |
123 | #endif | 123 | #endif |
124 | {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ | 124 | {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ |
@@ -142,7 +142,7 @@ bad_clone_list[] __initdata = { | |||
142 | #if defined(CONFIG_PLAT_MAPPI) | 142 | #if defined(CONFIG_PLAT_MAPPI) |
143 | # define DCR_VAL 0x4b | 143 | # define DCR_VAL 0x4b |
144 | #elif defined(CONFIG_PLAT_OAKS32R) || \ | 144 | #elif defined(CONFIG_PLAT_OAKS32R) || \ |
145 | defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) | 145 | defined(CONFIG_MACH_TX49XX) |
146 | # define DCR_VAL 0x48 /* 8-bit mode */ | 146 | # define DCR_VAL 0x48 /* 8-bit mode */ |
147 | #else | 147 | #else |
148 | # define DCR_VAL 0x49 | 148 | # define DCR_VAL 0x49 |
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 93a7b9b668d5..244ab49c4337 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -45,7 +45,6 @@ | |||
45 | #include <linux/in.h> | 45 | #include <linux/in.h> |
46 | #include <linux/tcp.h> | 46 | #include <linux/tcp.h> |
47 | #include <linux/skbuff.h> | 47 | #include <linux/skbuff.h> |
48 | #include <linux/version.h> | ||
49 | 48 | ||
50 | #include <linux/ethtool.h> | 49 | #include <linux/ethtool.h> |
51 | #include <linux/mii.h> | 50 | #include <linux/mii.h> |
@@ -66,8 +65,8 @@ | |||
66 | 65 | ||
67 | #define _NETXEN_NIC_LINUX_MAJOR 4 | 66 | #define _NETXEN_NIC_LINUX_MAJOR 4 |
68 | #define _NETXEN_NIC_LINUX_MINOR 0 | 67 | #define _NETXEN_NIC_LINUX_MINOR 0 |
69 | #define _NETXEN_NIC_LINUX_SUBVERSION 0 | 68 | #define _NETXEN_NIC_LINUX_SUBVERSION 11 |
70 | #define NETXEN_NIC_LINUX_VERSIONID "4.0.0" | 69 | #define NETXEN_NIC_LINUX_VERSIONID "4.0.11" |
71 | 70 | ||
72 | #define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c)) | 71 | #define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c)) |
73 | 72 | ||
@@ -1615,7 +1614,8 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter) | |||
1615 | 1614 | ||
1616 | 1615 | ||
1617 | int netxen_is_flash_supported(struct netxen_adapter *adapter); | 1616 | int netxen_is_flash_supported(struct netxen_adapter *adapter); |
1618 | int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 mac[]); | 1617 | int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac); |
1618 | int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac); | ||
1619 | extern void netxen_change_ringparam(struct netxen_adapter *adapter); | 1619 | extern void netxen_change_ringparam(struct netxen_adapter *adapter); |
1620 | extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, | 1620 | extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, |
1621 | int *valp); | 1621 | int *valp); |
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 4ad3e0844b99..b974ca0fc530 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <asm/io.h> | 38 | #include <asm/io.h> |
39 | #include <linux/netdevice.h> | 39 | #include <linux/netdevice.h> |
40 | #include <linux/ethtool.h> | 40 | #include <linux/ethtool.h> |
41 | #include <linux/version.h> | ||
42 | 41 | ||
43 | #include "netxen_nic.h" | 42 | #include "netxen_nic.h" |
44 | #include "netxen_nic_hw.h" | 43 | #include "netxen_nic_hw.h" |
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h index e8e8d73f6ed7..e80f9e3e5973 100644 --- a/drivers/net/netxen/netxen_nic_hdr.h +++ b/drivers/net/netxen/netxen_nic_hdr.h | |||
@@ -32,8 +32,6 @@ | |||
32 | 32 | ||
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/kernel.h> | 34 | #include <linux/kernel.h> |
35 | #include <linux/version.h> | ||
36 | |||
37 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
38 | #include <asm/irq.h> | 36 | #include <asm/irq.h> |
39 | #include <linux/init.h> | 37 | #include <linux/init.h> |
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 9aa20f961618..84978f80f396 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c | |||
@@ -733,31 +733,56 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, | |||
733 | return 0; | 733 | return 0; |
734 | } | 734 | } |
735 | 735 | ||
736 | int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 mac[]) | 736 | int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac) |
737 | { | 737 | { |
738 | __le32 *pmac = (__le32 *) & mac[0]; | 738 | __le32 *pmac = (__le32 *) mac; |
739 | u32 offset; | ||
739 | 740 | ||
740 | if (netxen_get_flash_block(adapter, | 741 | offset = NETXEN_USER_START + |
741 | NETXEN_USER_START + | 742 | offsetof(struct netxen_new_user_info, mac_addr) + |
742 | offsetof(struct netxen_new_user_info, | 743 | adapter->portnum * sizeof(u64); |
743 | mac_addr), | 744 | |
744 | FLASH_NUM_PORTS * sizeof(u64), pmac) == -1) { | 745 | if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) |
745 | return -1; | 746 | return -1; |
746 | } | 747 | |
747 | if (*mac == cpu_to_le64(~0ULL)) { | 748 | if (*mac == cpu_to_le64(~0ULL)) { |
749 | |||
750 | offset = NETXEN_USER_START_OLD + | ||
751 | offsetof(struct netxen_user_old_info, mac_addr) + | ||
752 | adapter->portnum * sizeof(u64); | ||
753 | |||
748 | if (netxen_get_flash_block(adapter, | 754 | if (netxen_get_flash_block(adapter, |
749 | NETXEN_USER_START_OLD + | 755 | offset, sizeof(u64), pmac) == -1) |
750 | offsetof(struct netxen_user_old_info, | ||
751 | mac_addr), | ||
752 | FLASH_NUM_PORTS * sizeof(u64), | ||
753 | pmac) == -1) | ||
754 | return -1; | 756 | return -1; |
757 | |||
755 | if (*mac == cpu_to_le64(~0ULL)) | 758 | if (*mac == cpu_to_le64(~0ULL)) |
756 | return -1; | 759 | return -1; |
757 | } | 760 | } |
758 | return 0; | 761 | return 0; |
759 | } | 762 | } |
760 | 763 | ||
764 | int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac) | ||
765 | { | ||
766 | uint32_t crbaddr, mac_hi, mac_lo; | ||
767 | int pci_func = adapter->ahw.pci_func; | ||
768 | |||
769 | crbaddr = CRB_MAC_BLOCK_START + | ||
770 | (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); | ||
771 | |||
772 | adapter->hw_read_wx(adapter, crbaddr, &mac_lo, 4); | ||
773 | adapter->hw_read_wx(adapter, crbaddr+4, &mac_hi, 4); | ||
774 | |||
775 | mac_hi = cpu_to_le32(mac_hi); | ||
776 | mac_lo = cpu_to_le32(mac_lo); | ||
777 | |||
778 | if (pci_func & 1) | ||
779 | *mac = ((mac_lo >> 16) | ((u64)mac_hi << 16)); | ||
780 | else | ||
781 | *mac = ((mac_lo) | ((u64)mac_hi << 32)); | ||
782 | |||
783 | return 0; | ||
784 | } | ||
785 | |||
761 | #define CRB_WIN_LOCK_TIMEOUT 100000000 | 786 | #define CRB_WIN_LOCK_TIMEOUT 100000000 |
762 | 787 | ||
763 | static int crb_win_lock(struct netxen_adapter *adapter) | 788 | static int crb_win_lock(struct netxen_adapter *adapter) |
@@ -2183,10 +2208,10 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter) | |||
2183 | if (adapter->portnum == 0) { | 2208 | if (adapter->portnum == 0) { |
2184 | get_brd_name_by_type(board_info->board_type, brd_name); | 2209 | get_brd_name_by_type(board_info->board_type, brd_name); |
2185 | 2210 | ||
2186 | printk("NetXen %s Board S/N %s Chip id 0x%x\n", | 2211 | printk(KERN_INFO "NetXen %s Board S/N %s Chip rev 0x%x\n", |
2187 | brd_name, serial_num, board_info->chip_id); | 2212 | brd_name, serial_num, adapter->ahw.revision_id); |
2188 | printk("NetXen Firmware version %d.%d.%d\n", fw_major, | 2213 | printk(KERN_INFO "NetXen Firmware version %d.%d.%d\n", |
2189 | fw_minor, fw_build); | 2214 | fw_major, fw_minor, fw_build); |
2190 | } | 2215 | } |
2191 | 2216 | ||
2192 | if (NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build) < | 2217 | if (NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build) < |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 519fc860e17e..5bba675d0504 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -1079,10 +1079,12 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter) | |||
1079 | 1079 | ||
1080 | void netxen_free_adapter_offload(struct netxen_adapter *adapter) | 1080 | void netxen_free_adapter_offload(struct netxen_adapter *adapter) |
1081 | { | 1081 | { |
1082 | int i; | 1082 | int i = 100; |
1083 | |||
1084 | if (!adapter->dummy_dma.addr) | ||
1085 | return; | ||
1083 | 1086 | ||
1084 | if (adapter->dummy_dma.addr) { | 1087 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { |
1085 | i = 100; | ||
1086 | do { | 1088 | do { |
1087 | if (dma_watchdog_shutdown_request(adapter) == 1) | 1089 | if (dma_watchdog_shutdown_request(adapter) == 1) |
1088 | break; | 1090 | break; |
@@ -1090,17 +1092,17 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter) | |||
1090 | if (dma_watchdog_shutdown_poll_result(adapter) == 1) | 1092 | if (dma_watchdog_shutdown_poll_result(adapter) == 1) |
1091 | break; | 1093 | break; |
1092 | } while (--i); | 1094 | } while (--i); |
1095 | } | ||
1093 | 1096 | ||
1094 | if (i) { | 1097 | if (i) { |
1095 | pci_free_consistent(adapter->pdev, | 1098 | pci_free_consistent(adapter->pdev, |
1096 | NETXEN_HOST_DUMMY_DMA_SIZE, | 1099 | NETXEN_HOST_DUMMY_DMA_SIZE, |
1097 | adapter->dummy_dma.addr, | 1100 | adapter->dummy_dma.addr, |
1098 | adapter->dummy_dma.phys_addr); | 1101 | adapter->dummy_dma.phys_addr); |
1099 | adapter->dummy_dma.addr = NULL; | 1102 | adapter->dummy_dma.addr = NULL; |
1100 | } else { | 1103 | } else { |
1101 | printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", | 1104 | printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", |
1102 | adapter->netdev->name); | 1105 | adapter->netdev->name); |
1103 | } | ||
1104 | } | 1106 | } |
1105 | } | 1107 | } |
1106 | 1108 | ||
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 7615c715e66e..32bb47adbe39 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -149,76 +149,18 @@ static uint32_t msi_tgt_status[8] = { | |||
149 | 149 | ||
150 | static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; | 150 | static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; |
151 | 151 | ||
152 | static void netxen_nic_disable_int(struct netxen_adapter *adapter) | 152 | static inline void netxen_nic_disable_int(struct netxen_adapter *adapter) |
153 | { | 153 | { |
154 | u32 mask = 0x7ff; | 154 | adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0); |
155 | int retries = 32; | ||
156 | int pci_fn = adapter->ahw.pci_func; | ||
157 | |||
158 | if (adapter->msi_mode != MSI_MODE_MULTIFUNC) | ||
159 | adapter->pci_write_normalize(adapter, | ||
160 | adapter->crb_intr_mask, 0); | ||
161 | |||
162 | if (adapter->intr_scheme != -1 && | ||
163 | adapter->intr_scheme != INTR_SCHEME_PERPORT) | ||
164 | adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask); | ||
165 | |||
166 | if (!NETXEN_IS_MSI_FAMILY(adapter)) { | ||
167 | do { | ||
168 | adapter->pci_write_immediate(adapter, | ||
169 | adapter->legacy_intr.tgt_status_reg, | ||
170 | 0xffffffff); | ||
171 | mask = adapter->pci_read_immediate(adapter, | ||
172 | ISR_INT_VECTOR); | ||
173 | if (!(mask & 0x80)) | ||
174 | break; | ||
175 | udelay(10); | ||
176 | } while (--retries); | ||
177 | |||
178 | if (!retries) { | ||
179 | printk(KERN_NOTICE "%s: Failed to disable interrupt\n", | ||
180 | netxen_nic_driver_name); | ||
181 | } | ||
182 | } else { | ||
183 | if (adapter->msi_mode == MSI_MODE_MULTIFUNC) { | ||
184 | adapter->pci_write_immediate(adapter, | ||
185 | msi_tgt_status[pci_fn], 0xffffffff); | ||
186 | } | ||
187 | } | ||
188 | } | 155 | } |
189 | 156 | ||
190 | static void netxen_nic_enable_int(struct netxen_adapter *adapter) | 157 | static inline void netxen_nic_enable_int(struct netxen_adapter *adapter) |
191 | { | 158 | { |
192 | u32 mask; | ||
193 | |||
194 | if (adapter->intr_scheme != -1 && | ||
195 | adapter->intr_scheme != INTR_SCHEME_PERPORT) { | ||
196 | switch (adapter->ahw.board_type) { | ||
197 | case NETXEN_NIC_GBE: | ||
198 | mask = 0x77b; | ||
199 | break; | ||
200 | case NETXEN_NIC_XGBE: | ||
201 | mask = 0x77f; | ||
202 | break; | ||
203 | default: | ||
204 | mask = 0x7ff; | ||
205 | break; | ||
206 | } | ||
207 | |||
208 | adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask); | ||
209 | } | ||
210 | |||
211 | adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1); | 159 | adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1); |
212 | 160 | ||
213 | if (!NETXEN_IS_MSI_FAMILY(adapter)) { | 161 | if (!NETXEN_IS_MSI_FAMILY(adapter)) |
214 | mask = 0xbff; | 162 | adapter->pci_write_immediate(adapter, |
215 | if (adapter->intr_scheme == INTR_SCHEME_PERPORT) | 163 | adapter->legacy_intr.tgt_mask_reg, 0xfbff); |
216 | adapter->pci_write_immediate(adapter, | ||
217 | adapter->legacy_intr.tgt_mask_reg, mask); | ||
218 | else | ||
219 | adapter->pci_write_normalize(adapter, | ||
220 | CRB_INT_VECTOR, 0); | ||
221 | } | ||
222 | } | 164 | } |
223 | 165 | ||
224 | static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) | 166 | static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) |
@@ -501,6 +443,44 @@ static void netxen_init_msix_entries(struct netxen_adapter *adapter) | |||
501 | adapter->msix_entries[i].entry = i; | 443 | adapter->msix_entries[i].entry = i; |
502 | } | 444 | } |
503 | 445 | ||
446 | static int | ||
447 | netxen_read_mac_addr(struct netxen_adapter *adapter) | ||
448 | { | ||
449 | int i; | ||
450 | unsigned char *p; | ||
451 | __le64 mac_addr; | ||
452 | DECLARE_MAC_BUF(mac); | ||
453 | struct net_device *netdev = adapter->netdev; | ||
454 | struct pci_dev *pdev = adapter->pdev; | ||
455 | |||
456 | if (netxen_is_flash_supported(adapter) != 0) | ||
457 | return -EIO; | ||
458 | |||
459 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { | ||
460 | if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) | ||
461 | return -EIO; | ||
462 | } else { | ||
463 | if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0) | ||
464 | return -EIO; | ||
465 | } | ||
466 | |||
467 | p = (unsigned char *)&mac_addr; | ||
468 | for (i = 0; i < 6; i++) | ||
469 | netdev->dev_addr[i] = *(p + 5 - i); | ||
470 | |||
471 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); | ||
472 | |||
473 | /* set station address */ | ||
474 | |||
475 | if (!is_valid_ether_addr(netdev->perm_addr)) { | ||
476 | dev_warn(&pdev->dev, "Bad MAC address %s.\n", | ||
477 | print_mac(mac, netdev->dev_addr)); | ||
478 | } else | ||
479 | adapter->macaddr_set(adapter, netdev->dev_addr); | ||
480 | |||
481 | return 0; | ||
482 | } | ||
483 | |||
504 | /* | 484 | /* |
505 | * netxen_nic_probe() | 485 | * netxen_nic_probe() |
506 | * | 486 | * |
@@ -529,10 +509,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
529 | unsigned long mem_base, mem_len, db_base, db_len, pci_len0 = 0; | 509 | unsigned long mem_base, mem_len, db_base, db_len, pci_len0 = 0; |
530 | int i = 0, err; | 510 | int i = 0, err; |
531 | int first_driver, first_boot; | 511 | int first_driver, first_boot; |
532 | __le64 mac_addr[FLASH_NUM_PORTS + 1]; | ||
533 | u32 val; | 512 | u32 val; |
534 | int pci_func_id = PCI_FUNC(pdev->devfn); | 513 | int pci_func_id = PCI_FUNC(pdev->devfn); |
535 | DECLARE_MAC_BUF(mac); | ||
536 | struct netxen_legacy_intr_set *legacy_intrp; | 514 | struct netxen_legacy_intr_set *legacy_intrp; |
537 | uint8_t revision_id; | 515 | uint8_t revision_id; |
538 | 516 | ||
@@ -545,6 +523,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
545 | return -ENODEV; | 523 | return -ENODEV; |
546 | } | 524 | } |
547 | 525 | ||
526 | if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) { | ||
527 | printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x" | ||
528 | "will not be enabled.\n", | ||
529 | NX_P3_A0, NX_P3_B1); | ||
530 | return -ENODEV; | ||
531 | } | ||
532 | |||
548 | if ((err = pci_enable_device(pdev))) | 533 | if ((err = pci_enable_device(pdev))) |
549 | return err; | 534 | return err; |
550 | 535 | ||
@@ -898,34 +883,14 @@ request_msi: | |||
898 | goto err_out_disable_msi; | 883 | goto err_out_disable_msi; |
899 | 884 | ||
900 | init_timer(&adapter->watchdog_timer); | 885 | init_timer(&adapter->watchdog_timer); |
901 | adapter->ahw.linkup = 0; | ||
902 | adapter->watchdog_timer.function = &netxen_watchdog; | 886 | adapter->watchdog_timer.function = &netxen_watchdog; |
903 | adapter->watchdog_timer.data = (unsigned long)adapter; | 887 | adapter->watchdog_timer.data = (unsigned long)adapter; |
904 | INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); | 888 | INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); |
905 | INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); | 889 | INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); |
906 | 890 | ||
907 | if (netxen_is_flash_supported(adapter) == 0 && | 891 | err = netxen_read_mac_addr(adapter); |
908 | netxen_get_flash_mac_addr(adapter, mac_addr) == 0) { | 892 | if (err) |
909 | unsigned char *p; | 893 | dev_warn(&pdev->dev, "failed to read mac addr\n"); |
910 | |||
911 | p = (unsigned char *)&mac_addr[adapter->portnum]; | ||
912 | netdev->dev_addr[0] = *(p + 5); | ||
913 | netdev->dev_addr[1] = *(p + 4); | ||
914 | netdev->dev_addr[2] = *(p + 3); | ||
915 | netdev->dev_addr[3] = *(p + 2); | ||
916 | netdev->dev_addr[4] = *(p + 1); | ||
917 | netdev->dev_addr[5] = *(p + 0); | ||
918 | |||
919 | memcpy(netdev->perm_addr, netdev->dev_addr, | ||
920 | netdev->addr_len); | ||
921 | if (!is_valid_ether_addr(netdev->perm_addr)) { | ||
922 | printk(KERN_ERR "%s: Bad MAC address %s.\n", | ||
923 | netxen_nic_driver_name, | ||
924 | print_mac(mac, netdev->dev_addr)); | ||
925 | } else { | ||
926 | adapter->macaddr_set(adapter, netdev->dev_addr); | ||
927 | } | ||
928 | } | ||
929 | 894 | ||
930 | netif_carrier_off(netdev); | 895 | netif_carrier_off(netdev); |
931 | netif_stop_queue(netdev); | 896 | netif_stop_queue(netdev); |
@@ -1000,6 +965,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) | |||
1000 | 965 | ||
1001 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { | 966 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { |
1002 | netxen_free_hw_resources(adapter); | 967 | netxen_free_hw_resources(adapter); |
968 | netxen_release_rx_buffers(adapter); | ||
1003 | netxen_free_sw_resources(adapter); | 969 | netxen_free_sw_resources(adapter); |
1004 | } | 970 | } |
1005 | 971 | ||
@@ -1069,6 +1035,15 @@ static int netxen_nic_open(struct net_device *netdev) | |||
1069 | goto err_out_free_sw; | 1035 | goto err_out_free_sw; |
1070 | } | 1036 | } |
1071 | 1037 | ||
1038 | if ((adapter->msi_mode != MSI_MODE_MULTIFUNC) || | ||
1039 | (adapter->intr_scheme != INTR_SCHEME_PERPORT)) { | ||
1040 | printk(KERN_ERR "%s: Firmware interrupt scheme is " | ||
1041 | "incompatible with driver\n", | ||
1042 | netdev->name); | ||
1043 | adapter->driver_mismatch = 1; | ||
1044 | goto err_out_free_hw; | ||
1045 | } | ||
1046 | |||
1072 | if (adapter->fw_major < 4) { | 1047 | if (adapter->fw_major < 4) { |
1073 | adapter->crb_addr_cmd_producer = | 1048 | adapter->crb_addr_cmd_producer = |
1074 | crb_cmd_producer[adapter->portnum]; | 1049 | crb_cmd_producer[adapter->portnum]; |
@@ -1094,7 +1069,7 @@ static int netxen_nic_open(struct net_device *netdev) | |||
1094 | flags, netdev->name, adapter); | 1069 | flags, netdev->name, adapter); |
1095 | if (err) { | 1070 | if (err) { |
1096 | printk(KERN_ERR "request_irq failed with: %d\n", err); | 1071 | printk(KERN_ERR "request_irq failed with: %d\n", err); |
1097 | goto err_out_free_hw; | 1072 | goto err_out_free_rxbuf; |
1098 | } | 1073 | } |
1099 | 1074 | ||
1100 | adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; | 1075 | adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; |
@@ -1116,6 +1091,7 @@ static int netxen_nic_open(struct net_device *netdev) | |||
1116 | if (adapter->set_mtu) | 1091 | if (adapter->set_mtu) |
1117 | adapter->set_mtu(adapter, netdev->mtu); | 1092 | adapter->set_mtu(adapter, netdev->mtu); |
1118 | 1093 | ||
1094 | adapter->ahw.linkup = 0; | ||
1119 | mod_timer(&adapter->watchdog_timer, jiffies); | 1095 | mod_timer(&adapter->watchdog_timer, jiffies); |
1120 | 1096 | ||
1121 | napi_enable(&adapter->napi); | 1097 | napi_enable(&adapter->napi); |
@@ -1127,6 +1103,8 @@ static int netxen_nic_open(struct net_device *netdev) | |||
1127 | 1103 | ||
1128 | err_out_free_irq: | 1104 | err_out_free_irq: |
1129 | free_irq(adapter->irq, adapter); | 1105 | free_irq(adapter->irq, adapter); |
1106 | err_out_free_rxbuf: | ||
1107 | netxen_release_rx_buffers(adapter); | ||
1130 | err_out_free_hw: | 1108 | err_out_free_hw: |
1131 | netxen_free_hw_resources(adapter); | 1109 | netxen_free_hw_resources(adapter); |
1132 | err_out_free_sw: | 1110 | err_out_free_sw: |
@@ -1152,10 +1130,8 @@ static int netxen_nic_close(struct net_device *netdev) | |||
1152 | 1130 | ||
1153 | netxen_release_tx_buffers(adapter); | 1131 | netxen_release_tx_buffers(adapter); |
1154 | 1132 | ||
1155 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { | 1133 | FLUSH_SCHEDULED_WORK(); |
1156 | FLUSH_SCHEDULED_WORK(); | 1134 | del_timer_sync(&adapter->watchdog_timer); |
1157 | del_timer_sync(&adapter->watchdog_timer); | ||
1158 | } | ||
1159 | 1135 | ||
1160 | return 0; | 1136 | return 0; |
1161 | } | 1137 | } |
@@ -1458,7 +1434,8 @@ void netxen_watchdog_task(struct work_struct *work) | |||
1458 | 1434 | ||
1459 | netxen_nic_handle_phy_intr(adapter); | 1435 | netxen_nic_handle_phy_intr(adapter); |
1460 | 1436 | ||
1461 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | 1437 | if (netif_running(adapter->netdev)) |
1438 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | ||
1462 | } | 1439 | } |
1463 | 1440 | ||
1464 | static void netxen_tx_timeout(struct net_device *netdev) | 1441 | static void netxen_tx_timeout(struct net_device *netdev) |
@@ -1518,18 +1495,9 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) | |||
1518 | return stats; | 1495 | return stats; |
1519 | } | 1496 | } |
1520 | 1497 | ||
1521 | static inline void | ||
1522 | netxen_handle_int(struct netxen_adapter *adapter) | ||
1523 | { | ||
1524 | netxen_nic_disable_int(adapter); | ||
1525 | napi_schedule(&adapter->napi); | ||
1526 | } | ||
1527 | |||
1528 | static irqreturn_t netxen_intr(int irq, void *data) | 1498 | static irqreturn_t netxen_intr(int irq, void *data) |
1529 | { | 1499 | { |
1530 | struct netxen_adapter *adapter = data; | 1500 | struct netxen_adapter *adapter = data; |
1531 | u32 our_int = 0; | ||
1532 | |||
1533 | u32 status = 0; | 1501 | u32 status = 0; |
1534 | 1502 | ||
1535 | status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); | 1503 | status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); |
@@ -1544,22 +1512,32 @@ static irqreturn_t netxen_intr(int irq, void *data) | |||
1544 | if (!ISR_LEGACY_INT_TRIGGERED(status)) | 1512 | if (!ISR_LEGACY_INT_TRIGGERED(status)) |
1545 | return IRQ_NONE; | 1513 | return IRQ_NONE; |
1546 | 1514 | ||
1547 | } else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { | 1515 | } else { |
1516 | unsigned long our_int = 0; | ||
1548 | 1517 | ||
1549 | our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); | 1518 | our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); |
1519 | |||
1550 | /* not our interrupt */ | 1520 | /* not our interrupt */ |
1551 | if ((our_int & (0x80 << adapter->portnum)) == 0) | 1521 | if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) |
1552 | return IRQ_NONE; | 1522 | return IRQ_NONE; |
1553 | 1523 | ||
1554 | if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { | 1524 | /* claim interrupt */ |
1555 | /* claim interrupt */ | 1525 | adapter->pci_write_normalize(adapter, |
1556 | adapter->pci_write_normalize(adapter, | 1526 | CRB_INT_VECTOR, (our_int & 0xffffffff)); |
1557 | CRB_INT_VECTOR, | ||
1558 | our_int & ~((u32)(0x80 << adapter->portnum))); | ||
1559 | } | ||
1560 | } | 1527 | } |
1561 | 1528 | ||
1562 | netxen_handle_int(adapter); | 1529 | /* clear interrupt */ |
1530 | if (adapter->fw_major < 4) | ||
1531 | netxen_nic_disable_int(adapter); | ||
1532 | |||
1533 | adapter->pci_write_immediate(adapter, | ||
1534 | adapter->legacy_intr.tgt_status_reg, | ||
1535 | 0xffffffff); | ||
1536 | /* read twice to ensure write is flushed */ | ||
1537 | adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); | ||
1538 | adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); | ||
1539 | |||
1540 | napi_schedule(&adapter->napi); | ||
1563 | 1541 | ||
1564 | return IRQ_HANDLED; | 1542 | return IRQ_HANDLED; |
1565 | } | 1543 | } |
@@ -1568,7 +1546,11 @@ static irqreturn_t netxen_msi_intr(int irq, void *data) | |||
1568 | { | 1546 | { |
1569 | struct netxen_adapter *adapter = data; | 1547 | struct netxen_adapter *adapter = data; |
1570 | 1548 | ||
1571 | netxen_handle_int(adapter); | 1549 | /* clear interrupt */ |
1550 | adapter->pci_write_immediate(adapter, | ||
1551 | msi_tgt_status[adapter->ahw.pci_func], 0xffffffff); | ||
1552 | |||
1553 | napi_schedule(&adapter->napi); | ||
1572 | return IRQ_HANDLED; | 1554 | return IRQ_HANDLED; |
1573 | } | 1555 | } |
1574 | 1556 | ||
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h index 83e5ee57bfef..b293adcc95ab 100644 --- a/drivers/net/netxen/netxen_nic_phan_reg.h +++ b/drivers/net/netxen/netxen_nic_phan_reg.h | |||
@@ -125,6 +125,8 @@ | |||
125 | #define CRB_SW_INT_MASK_2 NETXEN_NIC_REG(0x1e4) | 125 | #define CRB_SW_INT_MASK_2 NETXEN_NIC_REG(0x1e4) |
126 | #define CRB_SW_INT_MASK_3 NETXEN_NIC_REG(0x1e8) | 126 | #define CRB_SW_INT_MASK_3 NETXEN_NIC_REG(0x1e8) |
127 | 127 | ||
128 | #define CRB_MAC_BLOCK_START NETXEN_CAM_RAM(0x1c0) | ||
129 | |||
128 | /* | 130 | /* |
129 | * capabilities register, can be used to selectively enable/disable features | 131 | * capabilities register, can be used to selectively enable/disable features |
130 | * for backward compability | 132 | * for backward compability |
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c index b35d79449500..88f03c9e9403 100644 --- a/drivers/net/ppp_mppe.c +++ b/drivers/net/ppp_mppe.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include <linux/err.h> | 46 | #include <linux/err.h> |
47 | #include <linux/module.h> | 47 | #include <linux/module.h> |
48 | #include <linux/kernel.h> | 48 | #include <linux/kernel.h> |
49 | #include <linux/version.h> | ||
50 | #include <linux/init.h> | 49 | #include <linux/init.h> |
51 | #include <linux/types.h> | 50 | #include <linux/types.h> |
52 | #include <linux/slab.h> | 51 | #include <linux/slab.h> |
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index f9298827a76c..ff175e8f36b2 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c | |||
@@ -61,7 +61,6 @@ | |||
61 | */ | 61 | */ |
62 | 62 | ||
63 | #include <linux/module.h> | 63 | #include <linux/module.h> |
64 | #include <linux/version.h> | ||
65 | #include <linux/string.h> | 64 | #include <linux/string.h> |
66 | #include <linux/list.h> | 65 | #include <linux/list.h> |
67 | #include <asm/uaccess.h> | 66 | #include <asm/uaccess.h> |
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 6531ff565c54..5d86281d9363 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/version.h> | ||
28 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
29 | #include <linux/string.h> | 28 | #include <linux/string.h> |
30 | #include <linux/timer.h> | 29 | #include <linux/timer.h> |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index a3e3895e5032..0f6f9747d255 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -2792,7 +2792,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
2792 | pkt_size, PCI_DMA_FROMDEVICE); | 2792 | pkt_size, PCI_DMA_FROMDEVICE); |
2793 | rtl8169_mark_to_asic(desc, tp->rx_buf_sz); | 2793 | rtl8169_mark_to_asic(desc, tp->rx_buf_sz); |
2794 | } else { | 2794 | } else { |
2795 | pci_unmap_single(pdev, addr, pkt_size, | 2795 | pci_unmap_single(pdev, addr, tp->rx_buf_sz, |
2796 | PCI_DMA_FROMDEVICE); | 2796 | PCI_DMA_FROMDEVICE); |
2797 | tp->Rx_skbuff[entry] = NULL; | 2797 | tp->Rx_skbuff[entry] = NULL; |
2798 | } | 2798 | } |
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 25e62cf58d3a..1c370e6aa641 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
@@ -20,7 +20,6 @@ | |||
20 | * the file called "COPYING". | 20 | * the file called "COPYING". |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/version.h> | ||
24 | #include <linux/init.h> | 23 | #include <linux/init.h> |
25 | #include <linux/dma-mapping.h> | 24 | #include <linux/dma-mapping.h> |
26 | #include <linux/etherdevice.h> | 25 | #include <linux/etherdevice.h> |
diff --git a/drivers/net/skfp/ess.c b/drivers/net/skfp/ess.c index 889f98724610..a85efcfd9d0e 100644 --- a/drivers/net/skfp/ess.c +++ b/drivers/net/skfp/ess.c | |||
@@ -510,7 +510,7 @@ static void ess_send_response(struct s_smc *smc, struct smt_header *sm, | |||
510 | chg->path.para.p_type = SMT_P320B ; | 510 | chg->path.para.p_type = SMT_P320B ; |
511 | chg->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ; | 511 | chg->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ; |
512 | chg->path.mib_index = SBAPATHINDEX ; | 512 | chg->path.mib_index = SBAPATHINDEX ; |
513 | chg->path.path_pad = (u_short)NULL ; | 513 | chg->path.path_pad = 0; |
514 | chg->path.path_index = PRIMARY_RING ; | 514 | chg->path.path_index = PRIMARY_RING ; |
515 | 515 | ||
516 | /* set P320F */ | 516 | /* set P320F */ |
@@ -606,7 +606,7 @@ static void ess_send_alc_req(struct s_smc *smc) | |||
606 | req->path.para.p_type = SMT_P320B ; | 606 | req->path.para.p_type = SMT_P320B ; |
607 | req->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ; | 607 | req->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ; |
608 | req->path.mib_index = SBAPATHINDEX ; | 608 | req->path.mib_index = SBAPATHINDEX ; |
609 | req->path.path_pad = (u_short)NULL ; | 609 | req->path.path_pad = 0; |
610 | req->path.path_index = PRIMARY_RING ; | 610 | req->path.path_index = PRIMARY_RING ; |
611 | 611 | ||
612 | /* set P0017 */ | 612 | /* set P0017 */ |
@@ -636,7 +636,7 @@ static void ess_send_alc_req(struct s_smc *smc) | |||
636 | /* set P19 */ | 636 | /* set P19 */ |
637 | req->a_addr.para.p_type = SMT_P0019 ; | 637 | req->a_addr.para.p_type = SMT_P0019 ; |
638 | req->a_addr.para.p_len = sizeof(struct smt_p_0019) - PARA_LEN ; | 638 | req->a_addr.para.p_len = sizeof(struct smt_p_0019) - PARA_LEN ; |
639 | req->a_addr.sba_pad = (u_short)NULL ; | 639 | req->a_addr.sba_pad = 0; |
640 | req->a_addr.alloc_addr = null_addr ; | 640 | req->a_addr.alloc_addr = null_addr ; |
641 | 641 | ||
642 | /* set P1A */ | 642 | /* set P1A */ |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 7d29edcd40b4..e24b25ca1c69 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include <linux/crc32.h> | 25 | #include <linux/crc32.h> |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/version.h> | ||
28 | #include <linux/module.h> | 27 | #include <linux/module.h> |
29 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
30 | #include <linux/dma-mapping.h> | 29 | #include <linux/dma-mapping.h> |
@@ -666,11 +665,16 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) | |||
666 | 665 | ||
667 | if (hw->chip_id != CHIP_ID_YUKON_EC) { | 666 | if (hw->chip_id != CHIP_ID_YUKON_EC) { |
668 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { | 667 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { |
669 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); | 668 | /* select page 2 to access MAC control register */ |
669 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); | ||
670 | 670 | ||
671 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); | ||
671 | /* enable Power Down */ | 672 | /* enable Power Down */ |
672 | ctrl |= PHY_M_PC_POW_D_ENA; | 673 | ctrl |= PHY_M_PC_POW_D_ENA; |
673 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); | 674 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); |
675 | |||
676 | /* set page register back to 0 */ | ||
677 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); | ||
674 | } | 678 | } |
675 | 679 | ||
676 | /* set IEEE compatible Power Down Mode (dev. #4.99) */ | 680 | /* set IEEE compatible Power Down Mode (dev. #4.99) */ |
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 2040965d7724..24768c10cadb 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
@@ -2255,7 +2255,7 @@ static int smc_drv_remove(struct platform_device *pdev) | |||
2255 | 2255 | ||
2256 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); | 2256 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); |
2257 | if (!res) | 2257 | if (!res) |
2258 | platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2258 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2259 | release_mem_region(res->start, SMC_IO_EXTENT); | 2259 | release_mem_region(res->start, SMC_IO_EXTENT); |
2260 | 2260 | ||
2261 | free_netdev(ndev); | 2261 | free_netdev(ndev); |
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h index c66dfc9ec1ec..7db48f1cd949 100644 --- a/drivers/net/tehuti.h +++ b/drivers/net/tehuti.h | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/tty.h> | 28 | #include <linux/tty.h> |
29 | #include <linux/if_vlan.h> | 29 | #include <linux/if_vlan.h> |
30 | #include <linux/version.h> | ||
31 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
32 | #include <linux/vmalloc.h> | 31 | #include <linux/vmalloc.h> |
33 | #include <asm/byteorder.h> | 32 | #include <asm/byteorder.h> |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index d2439b85a790..71d2c5cfdad9 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -66,8 +66,8 @@ | |||
66 | 66 | ||
67 | #define DRV_MODULE_NAME "tg3" | 67 | #define DRV_MODULE_NAME "tg3" |
68 | #define PFX DRV_MODULE_NAME ": " | 68 | #define PFX DRV_MODULE_NAME ": " |
69 | #define DRV_MODULE_VERSION "3.93" | 69 | #define DRV_MODULE_VERSION "3.94" |
70 | #define DRV_MODULE_RELDATE "May 22, 2008" | 70 | #define DRV_MODULE_RELDATE "August 14, 2008" |
71 | 71 | ||
72 | #define TG3_DEF_MAC_MODE 0 | 72 | #define TG3_DEF_MAC_MODE 0 |
73 | #define TG3_DEF_RX_MODE 0 | 73 | #define TG3_DEF_RX_MODE 0 |
@@ -536,6 +536,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) | |||
536 | return 0; | 536 | return 0; |
537 | 537 | ||
538 | switch (locknum) { | 538 | switch (locknum) { |
539 | case TG3_APE_LOCK_GRC: | ||
539 | case TG3_APE_LOCK_MEM: | 540 | case TG3_APE_LOCK_MEM: |
540 | break; | 541 | break; |
541 | default: | 542 | default: |
@@ -573,6 +574,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) | |||
573 | return; | 574 | return; |
574 | 575 | ||
575 | switch (locknum) { | 576 | switch (locknum) { |
577 | case TG3_APE_LOCK_GRC: | ||
576 | case TG3_APE_LOCK_MEM: | 578 | case TG3_APE_LOCK_MEM: |
577 | break; | 579 | break; |
578 | default: | 580 | default: |
@@ -1018,15 +1020,43 @@ static void tg3_mdio_fini(struct tg3 *tp) | |||
1018 | } | 1020 | } |
1019 | 1021 | ||
1020 | /* tp->lock is held. */ | 1022 | /* tp->lock is held. */ |
1023 | static inline void tg3_generate_fw_event(struct tg3 *tp) | ||
1024 | { | ||
1025 | u32 val; | ||
1026 | |||
1027 | val = tr32(GRC_RX_CPU_EVENT); | ||
1028 | val |= GRC_RX_CPU_DRIVER_EVENT; | ||
1029 | tw32_f(GRC_RX_CPU_EVENT, val); | ||
1030 | |||
1031 | tp->last_event_jiffies = jiffies; | ||
1032 | } | ||
1033 | |||
1034 | #define TG3_FW_EVENT_TIMEOUT_USEC 2500 | ||
1035 | |||
1036 | /* tp->lock is held. */ | ||
1021 | static void tg3_wait_for_event_ack(struct tg3 *tp) | 1037 | static void tg3_wait_for_event_ack(struct tg3 *tp) |
1022 | { | 1038 | { |
1023 | int i; | 1039 | int i; |
1040 | unsigned int delay_cnt; | ||
1041 | long time_remain; | ||
1042 | |||
1043 | /* If enough time has passed, no wait is necessary. */ | ||
1044 | time_remain = (long)(tp->last_event_jiffies + 1 + | ||
1045 | usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - | ||
1046 | (long)jiffies; | ||
1047 | if (time_remain < 0) | ||
1048 | return; | ||
1024 | 1049 | ||
1025 | /* Wait for up to 2.5 milliseconds */ | 1050 | /* Check if we can shorten the wait time. */ |
1026 | for (i = 0; i < 250000; i++) { | 1051 | delay_cnt = jiffies_to_usecs(time_remain); |
1052 | if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) | ||
1053 | delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; | ||
1054 | delay_cnt = (delay_cnt >> 3) + 1; | ||
1055 | |||
1056 | for (i = 0; i < delay_cnt; i++) { | ||
1027 | if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) | 1057 | if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) |
1028 | break; | 1058 | break; |
1029 | udelay(10); | 1059 | udelay(8); |
1030 | } | 1060 | } |
1031 | } | 1061 | } |
1032 | 1062 | ||
@@ -1075,9 +1105,7 @@ static void tg3_ump_link_report(struct tg3 *tp) | |||
1075 | val = 0; | 1105 | val = 0; |
1076 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); | 1106 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); |
1077 | 1107 | ||
1078 | val = tr32(GRC_RX_CPU_EVENT); | 1108 | tg3_generate_fw_event(tp); |
1079 | val |= GRC_RX_CPU_DRIVER_EVENT; | ||
1080 | tw32_f(GRC_RX_CPU_EVENT, val); | ||
1081 | } | 1109 | } |
1082 | 1110 | ||
1083 | static void tg3_link_report(struct tg3 *tp) | 1111 | static void tg3_link_report(struct tg3 *tp) |
@@ -2124,6 +2152,13 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2124 | (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) | 2152 | (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) |
2125 | mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; | 2153 | mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; |
2126 | 2154 | ||
2155 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | ||
2156 | mac_mode |= tp->mac_mode & | ||
2157 | (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); | ||
2158 | if (mac_mode & MAC_MODE_APE_TX_EN) | ||
2159 | mac_mode |= MAC_MODE_TDE_ENABLE; | ||
2160 | } | ||
2161 | |||
2127 | tw32_f(MAC_MODE, mac_mode); | 2162 | tw32_f(MAC_MODE, mac_mode); |
2128 | udelay(100); | 2163 | udelay(100); |
2129 | 2164 | ||
@@ -5493,7 +5528,7 @@ static void tg3_ape_send_event(struct tg3 *tp, u32 event) | |||
5493 | return; | 5528 | return; |
5494 | 5529 | ||
5495 | apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); | 5530 | apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); |
5496 | if (apedata != APE_FW_STATUS_READY) | 5531 | if (!(apedata & APE_FW_STATUS_READY)) |
5497 | return; | 5532 | return; |
5498 | 5533 | ||
5499 | /* Wait for up to 1 millisecond for APE to service previous event. */ | 5534 | /* Wait for up to 1 millisecond for APE to service previous event. */ |
@@ -5760,6 +5795,8 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
5760 | 5795 | ||
5761 | tg3_mdio_stop(tp); | 5796 | tg3_mdio_stop(tp); |
5762 | 5797 | ||
5798 | tg3_ape_lock(tp, TG3_APE_LOCK_GRC); | ||
5799 | |||
5763 | /* No matching tg3_nvram_unlock() after this because | 5800 | /* No matching tg3_nvram_unlock() after this because |
5764 | * chip reset below will undo the nvram lock. | 5801 | * chip reset below will undo the nvram lock. |
5765 | */ | 5802 | */ |
@@ -5908,12 +5945,19 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
5908 | } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { | 5945 | } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { |
5909 | tp->mac_mode = MAC_MODE_PORT_MODE_GMII; | 5946 | tp->mac_mode = MAC_MODE_PORT_MODE_GMII; |
5910 | tw32_f(MAC_MODE, tp->mac_mode); | 5947 | tw32_f(MAC_MODE, tp->mac_mode); |
5948 | } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | ||
5949 | tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); | ||
5950 | if (tp->mac_mode & MAC_MODE_APE_TX_EN) | ||
5951 | tp->mac_mode |= MAC_MODE_TDE_ENABLE; | ||
5952 | tw32_f(MAC_MODE, tp->mac_mode); | ||
5911 | } else | 5953 | } else |
5912 | tw32_f(MAC_MODE, 0); | 5954 | tw32_f(MAC_MODE, 0); |
5913 | udelay(40); | 5955 | udelay(40); |
5914 | 5956 | ||
5915 | tg3_mdio_start(tp); | 5957 | tg3_mdio_start(tp); |
5916 | 5958 | ||
5959 | tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); | ||
5960 | |||
5917 | err = tg3_poll_fw(tp); | 5961 | err = tg3_poll_fw(tp); |
5918 | if (err) | 5962 | if (err) |
5919 | return err; | 5963 | return err; |
@@ -5935,6 +5979,7 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
5935 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); | 5979 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); |
5936 | if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { | 5980 | if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { |
5937 | tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; | 5981 | tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; |
5982 | tp->last_event_jiffies = jiffies; | ||
5938 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 5983 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) |
5939 | tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; | 5984 | tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; |
5940 | } | 5985 | } |
@@ -5948,15 +5993,12 @@ static void tg3_stop_fw(struct tg3 *tp) | |||
5948 | { | 5993 | { |
5949 | if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | 5994 | if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && |
5950 | !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { | 5995 | !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { |
5951 | u32 val; | ||
5952 | |||
5953 | /* Wait for RX cpu to ACK the previous event. */ | 5996 | /* Wait for RX cpu to ACK the previous event. */ |
5954 | tg3_wait_for_event_ack(tp); | 5997 | tg3_wait_for_event_ack(tp); |
5955 | 5998 | ||
5956 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); | 5999 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); |
5957 | val = tr32(GRC_RX_CPU_EVENT); | 6000 | |
5958 | val |= GRC_RX_CPU_DRIVER_EVENT; | 6001 | tg3_generate_fw_event(tp); |
5959 | tw32(GRC_RX_CPU_EVENT, val); | ||
5960 | 6002 | ||
5961 | /* Wait for RX cpu to ACK this event. */ | 6003 | /* Wait for RX cpu to ACK this event. */ |
5962 | tg3_wait_for_event_ack(tp); | 6004 | tg3_wait_for_event_ack(tp); |
@@ -7406,7 +7448,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7406 | udelay(10); | 7448 | udelay(10); |
7407 | } | 7449 | } |
7408 | 7450 | ||
7409 | tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | | 7451 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) |
7452 | tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; | ||
7453 | else | ||
7454 | tp->mac_mode = 0; | ||
7455 | tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | | ||
7410 | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; | 7456 | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; |
7411 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 7457 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && |
7412 | !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && | 7458 | !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && |
@@ -7840,9 +7886,8 @@ static void tg3_timer(unsigned long __opaque) | |||
7840 | * resets. | 7886 | * resets. |
7841 | */ | 7887 | */ |
7842 | if (!--tp->asf_counter) { | 7888 | if (!--tp->asf_counter) { |
7843 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { | 7889 | if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && |
7844 | u32 val; | 7890 | !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { |
7845 | |||
7846 | tg3_wait_for_event_ack(tp); | 7891 | tg3_wait_for_event_ack(tp); |
7847 | 7892 | ||
7848 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, | 7893 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, |
@@ -7850,9 +7895,8 @@ static void tg3_timer(unsigned long __opaque) | |||
7850 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); | 7895 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); |
7851 | /* 5 seconds timeout */ | 7896 | /* 5 seconds timeout */ |
7852 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); | 7897 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); |
7853 | val = tr32(GRC_RX_CPU_EVENT); | 7898 | |
7854 | val |= GRC_RX_CPU_DRIVER_EVENT; | 7899 | tg3_generate_fw_event(tp); |
7855 | tw32_f(GRC_RX_CPU_EVENT, val); | ||
7856 | } | 7900 | } |
7857 | tp->asf_counter = tp->asf_multiplier; | 7901 | tp->asf_counter = tp->asf_multiplier; |
7858 | } | 7902 | } |
@@ -8422,6 +8466,11 @@ static inline unsigned long get_stat64(tg3_stat64_t *val) | |||
8422 | return ret; | 8466 | return ret; |
8423 | } | 8467 | } |
8424 | 8468 | ||
8469 | static inline u64 get_estat64(tg3_stat64_t *val) | ||
8470 | { | ||
8471 | return ((u64)val->high << 32) | ((u64)val->low); | ||
8472 | } | ||
8473 | |||
8425 | static unsigned long calc_crc_errors(struct tg3 *tp) | 8474 | static unsigned long calc_crc_errors(struct tg3 *tp) |
8426 | { | 8475 | { |
8427 | struct tg3_hw_stats *hw_stats = tp->hw_stats; | 8476 | struct tg3_hw_stats *hw_stats = tp->hw_stats; |
@@ -8450,7 +8499,7 @@ static unsigned long calc_crc_errors(struct tg3 *tp) | |||
8450 | 8499 | ||
8451 | #define ESTAT_ADD(member) \ | 8500 | #define ESTAT_ADD(member) \ |
8452 | estats->member = old_estats->member + \ | 8501 | estats->member = old_estats->member + \ |
8453 | get_stat64(&hw_stats->member) | 8502 | get_estat64(&hw_stats->member) |
8454 | 8503 | ||
8455 | static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) | 8504 | static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) |
8456 | { | 8505 | { |
@@ -12416,6 +12465,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12416 | tp->misc_host_ctrl); | 12465 | tp->misc_host_ctrl); |
12417 | } | 12466 | } |
12418 | 12467 | ||
12468 | /* Preserve the APE MAC_MODE bits */ | ||
12469 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) | ||
12470 | tp->mac_mode = tr32(MAC_MODE) | | ||
12471 | MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; | ||
12472 | else | ||
12473 | tp->mac_mode = TG3_DEF_MAC_MODE; | ||
12474 | |||
12419 | /* these are limited to 10/100 only */ | 12475 | /* these are limited to 10/100 only */ |
12420 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && | 12476 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && |
12421 | (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || | 12477 | (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || |
@@ -13275,7 +13331,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
13275 | tp->pdev = pdev; | 13331 | tp->pdev = pdev; |
13276 | tp->dev = dev; | 13332 | tp->dev = dev; |
13277 | tp->pm_cap = pm_cap; | 13333 | tp->pm_cap = pm_cap; |
13278 | tp->mac_mode = TG3_DEF_MAC_MODE; | ||
13279 | tp->rx_mode = TG3_DEF_RX_MODE; | 13334 | tp->rx_mode = TG3_DEF_RX_MODE; |
13280 | tp->tx_mode = TG3_DEF_TX_MODE; | 13335 | tp->tx_mode = TG3_DEF_TX_MODE; |
13281 | 13336 | ||
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index df07842172b7..f5b8cab8d4b5 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
@@ -325,6 +325,8 @@ | |||
325 | #define MAC_MODE_TDE_ENABLE 0x00200000 | 325 | #define MAC_MODE_TDE_ENABLE 0x00200000 |
326 | #define MAC_MODE_RDE_ENABLE 0x00400000 | 326 | #define MAC_MODE_RDE_ENABLE 0x00400000 |
327 | #define MAC_MODE_FHDE_ENABLE 0x00800000 | 327 | #define MAC_MODE_FHDE_ENABLE 0x00800000 |
328 | #define MAC_MODE_APE_RX_EN 0x08000000 | ||
329 | #define MAC_MODE_APE_TX_EN 0x10000000 | ||
328 | #define MAC_STATUS 0x00000404 | 330 | #define MAC_STATUS 0x00000404 |
329 | #define MAC_STATUS_PCS_SYNCED 0x00000001 | 331 | #define MAC_STATUS_PCS_SYNCED 0x00000001 |
330 | #define MAC_STATUS_SIGNAL_DET 0x00000002 | 332 | #define MAC_STATUS_SIGNAL_DET 0x00000002 |
@@ -1889,6 +1891,7 @@ | |||
1889 | #define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 | 1891 | #define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 |
1890 | 1892 | ||
1891 | /* APE convenience enumerations. */ | 1893 | /* APE convenience enumerations. */ |
1894 | #define TG3_APE_LOCK_GRC 1 | ||
1892 | #define TG3_APE_LOCK_MEM 4 | 1895 | #define TG3_APE_LOCK_MEM 4 |
1893 | 1896 | ||
1894 | #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 | 1897 | #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 |
@@ -2429,7 +2432,10 @@ struct tg3 { | |||
2429 | struct tg3_ethtool_stats estats; | 2432 | struct tg3_ethtool_stats estats; |
2430 | struct tg3_ethtool_stats estats_prev; | 2433 | struct tg3_ethtool_stats estats_prev; |
2431 | 2434 | ||
2435 | union { | ||
2432 | unsigned long phy_crc_errors; | 2436 | unsigned long phy_crc_errors; |
2437 | unsigned long last_event_jiffies; | ||
2438 | }; | ||
2433 | 2439 | ||
2434 | u32 rx_offset; | 2440 | u32 rx_offset; |
2435 | u32 tg3_flags; | 2441 | u32 tg3_flags; |
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index 85246ed7cb9c..ec871f646766 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c | |||
@@ -360,8 +360,8 @@ TLan_GetSKB( const struct tlan_list_tag *tag) | |||
360 | { | 360 | { |
361 | unsigned long addr; | 361 | unsigned long addr; |
362 | 362 | ||
363 | addr = tag->buffer[8].address; | 363 | addr = tag->buffer[9].address; |
364 | addr |= (tag->buffer[9].address << 16) << 16; | 364 | addr |= (tag->buffer[8].address << 16) << 16; |
365 | return (struct sk_buff *) addr; | 365 | return (struct sk_buff *) addr; |
366 | } | 366 | } |
367 | 367 | ||
@@ -1984,7 +1984,6 @@ static void TLan_ResetLists( struct net_device *dev ) | |||
1984 | TLanList *list; | 1984 | TLanList *list; |
1985 | dma_addr_t list_phys; | 1985 | dma_addr_t list_phys; |
1986 | struct sk_buff *skb; | 1986 | struct sk_buff *skb; |
1987 | void *t = NULL; | ||
1988 | 1987 | ||
1989 | priv->txHead = 0; | 1988 | priv->txHead = 0; |
1990 | priv->txTail = 0; | 1989 | priv->txTail = 0; |
@@ -2022,7 +2021,8 @@ static void TLan_ResetLists( struct net_device *dev ) | |||
2022 | } | 2021 | } |
2023 | 2022 | ||
2024 | skb_reserve( skb, NET_IP_ALIGN ); | 2023 | skb_reserve( skb, NET_IP_ALIGN ); |
2025 | list->buffer[0].address = pci_map_single(priv->pciDev, t, | 2024 | list->buffer[0].address = pci_map_single(priv->pciDev, |
2025 | skb->data, | ||
2026 | TLAN_MAX_FRAME_SIZE, | 2026 | TLAN_MAX_FRAME_SIZE, |
2027 | PCI_DMA_FROMDEVICE); | 2027 | PCI_DMA_FROMDEVICE); |
2028 | TLan_StoreSKB(list, skb); | 2028 | TLan_StoreSKB(list, skb); |
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index 47d84cd28097..59d1673f9387 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c | |||
@@ -119,7 +119,6 @@ | |||
119 | #include <linux/pci.h> | 119 | #include <linux/pci.h> |
120 | #include <linux/dma-mapping.h> | 120 | #include <linux/dma-mapping.h> |
121 | #include <linux/spinlock.h> | 121 | #include <linux/spinlock.h> |
122 | #include <linux/version.h> | ||
123 | #include <linux/bitops.h> | 122 | #include <linux/bitops.h> |
124 | #include <linux/jiffies.h> | 123 | #include <linux/jiffies.h> |
125 | 124 | ||
diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h index e7bb3494afc7..13ccee6449c1 100644 --- a/drivers/net/tokenring/lanstreamer.h +++ b/drivers/net/tokenring/lanstreamer.h | |||
@@ -60,8 +60,6 @@ | |||
60 | * | 60 | * |
61 | */ | 61 | */ |
62 | 62 | ||
63 | #include <linux/version.h> | ||
64 | |||
65 | /* MAX_INTR - the maximum number of times we can loop | 63 | /* MAX_INTR - the maximum number of times we can loop |
66 | * inside the interrupt function before returning | 64 | * inside the interrupt function before returning |
67 | * control to the OS (maximum value is 256) | 65 | * control to the OS (maximum value is 256) |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index e6bbc639c2d0..6daea0c91862 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -358,6 +358,66 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait) | |||
358 | return mask; | 358 | return mask; |
359 | } | 359 | } |
360 | 360 | ||
361 | /* prepad is the amount to reserve at front. len is length after that. | ||
362 | * linear is a hint as to how much to copy (usually headers). */ | ||
363 | static struct sk_buff *tun_alloc_skb(size_t prepad, size_t len, size_t linear, | ||
364 | gfp_t gfp) | ||
365 | { | ||
366 | struct sk_buff *skb; | ||
367 | unsigned int i; | ||
368 | |||
369 | skb = alloc_skb(prepad + len, gfp|__GFP_NOWARN); | ||
370 | if (skb) { | ||
371 | skb_reserve(skb, prepad); | ||
372 | skb_put(skb, len); | ||
373 | return skb; | ||
374 | } | ||
375 | |||
376 | /* Under a page? Don't bother with paged skb. */ | ||
377 | if (prepad + len < PAGE_SIZE) | ||
378 | return NULL; | ||
379 | |||
380 | /* Start with a normal skb, and add pages. */ | ||
381 | skb = alloc_skb(prepad + linear, gfp); | ||
382 | if (!skb) | ||
383 | return NULL; | ||
384 | |||
385 | skb_reserve(skb, prepad); | ||
386 | skb_put(skb, linear); | ||
387 | |||
388 | len -= linear; | ||
389 | |||
390 | for (i = 0; i < MAX_SKB_FRAGS; i++) { | ||
391 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; | ||
392 | |||
393 | f->page = alloc_page(gfp|__GFP_ZERO); | ||
394 | if (!f->page) | ||
395 | break; | ||
396 | |||
397 | f->page_offset = 0; | ||
398 | f->size = PAGE_SIZE; | ||
399 | |||
400 | skb->data_len += PAGE_SIZE; | ||
401 | skb->len += PAGE_SIZE; | ||
402 | skb->truesize += PAGE_SIZE; | ||
403 | skb_shinfo(skb)->nr_frags++; | ||
404 | |||
405 | if (len < PAGE_SIZE) { | ||
406 | len = 0; | ||
407 | break; | ||
408 | } | ||
409 | len -= PAGE_SIZE; | ||
410 | } | ||
411 | |||
412 | /* Too large, or alloc fail? */ | ||
413 | if (unlikely(len)) { | ||
414 | kfree_skb(skb); | ||
415 | skb = NULL; | ||
416 | } | ||
417 | |||
418 | return skb; | ||
419 | } | ||
420 | |||
361 | /* Get packet from user space buffer */ | 421 | /* Get packet from user space buffer */ |
362 | static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count) | 422 | static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count) |
363 | { | 423 | { |
@@ -391,14 +451,12 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, | |||
391 | return -EINVAL; | 451 | return -EINVAL; |
392 | } | 452 | } |
393 | 453 | ||
394 | if (!(skb = alloc_skb(len + align, GFP_KERNEL))) { | 454 | if (!(skb = tun_alloc_skb(align, len, gso.hdr_len, GFP_KERNEL))) { |
395 | tun->dev->stats.rx_dropped++; | 455 | tun->dev->stats.rx_dropped++; |
396 | return -ENOMEM; | 456 | return -ENOMEM; |
397 | } | 457 | } |
398 | 458 | ||
399 | if (align) | 459 | if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) { |
400 | skb_reserve(skb, align); | ||
401 | if (memcpy_fromiovec(skb_put(skb, len), iv, len)) { | ||
402 | tun->dev->stats.rx_dropped++; | 460 | tun->dev->stats.rx_dropped++; |
403 | kfree_skb(skb); | 461 | kfree_skb(skb); |
404 | return -EFAULT; | 462 | return -EFAULT; |
@@ -748,6 +806,36 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
748 | return err; | 806 | return err; |
749 | } | 807 | } |
750 | 808 | ||
809 | static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) | ||
810 | { | ||
811 | struct tun_struct *tun = file->private_data; | ||
812 | |||
813 | if (!tun) | ||
814 | return -EBADFD; | ||
815 | |||
816 | DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); | ||
817 | |||
818 | strcpy(ifr->ifr_name, tun->dev->name); | ||
819 | |||
820 | ifr->ifr_flags = 0; | ||
821 | |||
822 | if (ifr->ifr_flags & TUN_TUN_DEV) | ||
823 | ifr->ifr_flags |= IFF_TUN; | ||
824 | else | ||
825 | ifr->ifr_flags |= IFF_TAP; | ||
826 | |||
827 | if (tun->flags & TUN_NO_PI) | ||
828 | ifr->ifr_flags |= IFF_NO_PI; | ||
829 | |||
830 | if (tun->flags & TUN_ONE_QUEUE) | ||
831 | ifr->ifr_flags |= IFF_ONE_QUEUE; | ||
832 | |||
833 | if (tun->flags & TUN_VNET_HDR) | ||
834 | ifr->ifr_flags |= IFF_VNET_HDR; | ||
835 | |||
836 | return 0; | ||
837 | } | ||
838 | |||
751 | /* This is like a cut-down ethtool ops, except done via tun fd so no | 839 | /* This is like a cut-down ethtool ops, except done via tun fd so no |
752 | * privs required. */ | 840 | * privs required. */ |
753 | static int set_offload(struct net_device *dev, unsigned long arg) | 841 | static int set_offload(struct net_device *dev, unsigned long arg) |
@@ -833,6 +921,15 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
833 | DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); | 921 | DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); |
834 | 922 | ||
835 | switch (cmd) { | 923 | switch (cmd) { |
924 | case TUNGETIFF: | ||
925 | ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr); | ||
926 | if (ret) | ||
927 | return ret; | ||
928 | |||
929 | if (copy_to_user(argp, &ifr, sizeof(ifr))) | ||
930 | return -EFAULT; | ||
931 | break; | ||
932 | |||
836 | case TUNSETNOCSUM: | 933 | case TUNSETNOCSUM: |
837 | /* Disable/Enable checksum */ | 934 | /* Disable/Enable checksum */ |
838 | if (arg) | 935 | if (arg) |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 8549f1159a30..734ce0977f02 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -128,7 +128,6 @@ static const int multicast_filter_limit = 32; | |||
128 | #include <asm/io.h> | 128 | #include <asm/io.h> |
129 | #include <asm/uaccess.h> | 129 | #include <asm/uaccess.h> |
130 | #include <linux/in6.h> | 130 | #include <linux/in6.h> |
131 | #include <linux/version.h> | ||
132 | #include <linux/dma-mapping.h> | 131 | #include <linux/dma-mapping.h> |
133 | 132 | ||
134 | #include "typhoon.h" | 133 | #include "typhoon.h" |
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 68e198bd538b..0973b6e37024 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
@@ -154,17 +154,6 @@ config USB_NET_AX8817X | |||
154 | This driver creates an interface named "ethX", where X depends on | 154 | This driver creates an interface named "ethX", where X depends on |
155 | what other networking devices you have in use. | 155 | what other networking devices you have in use. |
156 | 156 | ||
157 | config USB_HSO | ||
158 | tristate "Option USB High Speed Mobile Devices" | ||
159 | depends on USB && RFKILL | ||
160 | default n | ||
161 | help | ||
162 | Choose this option if you have an Option HSDPA/HSUPA card. | ||
163 | These cards support downlink speeds of 7.2Mbps or greater. | ||
164 | |||
165 | To compile this driver as a module, choose M here: the | ||
166 | module will be called hso. | ||
167 | |||
168 | config USB_NET_CDCETHER | 157 | config USB_NET_CDCETHER |
169 | tristate "CDC Ethernet support (smart devices such as cable modems)" | 158 | tristate "CDC Ethernet support (smart devices such as cable modems)" |
170 | depends on USB_USBNET | 159 | depends on USB_USBNET |
@@ -337,5 +326,15 @@ config USB_NET_ZAURUS | |||
337 | really need this non-conformant variant of CDC Ethernet (or in | 326 | really need this non-conformant variant of CDC Ethernet (or in |
338 | some cases CDC MDLM) protocol, not "g_ether". | 327 | some cases CDC MDLM) protocol, not "g_ether". |
339 | 328 | ||
329 | config USB_HSO | ||
330 | tristate "Option USB High Speed Mobile Devices" | ||
331 | depends on USB && RFKILL | ||
332 | default n | ||
333 | help | ||
334 | Choose this option if you have an Option HSDPA/HSUPA card. | ||
335 | These cards support downlink speeds of 7.2Mbps or greater. | ||
336 | |||
337 | To compile this driver as a module, choose M here: the | ||
338 | module will be called hso. | ||
340 | 339 | ||
341 | endmenu | 340 | endmenu |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 031d07b105af..6e42b5a8c22b 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -102,8 +102,12 @@ | |||
102 | 102 | ||
103 | #define MAX_RX_URBS 2 | 103 | #define MAX_RX_URBS 2 |
104 | 104 | ||
105 | #define get_serial_by_tty(x) \ | 105 | static inline struct hso_serial *get_serial_by_tty(struct tty_struct *tty) |
106 | (x ? (struct hso_serial *)x->driver_data : NULL) | 106 | { |
107 | if (tty) | ||
108 | return tty->driver_data; | ||
109 | return NULL; | ||
110 | } | ||
107 | 111 | ||
108 | /*****************************************************************************/ | 112 | /*****************************************************************************/ |
109 | /* Debugging functions */ | 113 | /* Debugging functions */ |
@@ -294,24 +298,25 @@ static int hso_get_activity(struct hso_device *hso_dev); | |||
294 | 298 | ||
295 | /* #define DEBUG */ | 299 | /* #define DEBUG */ |
296 | 300 | ||
297 | #define dev2net(x) (x->port_data.dev_net) | 301 | static inline struct hso_net *dev2net(struct hso_device *hso_dev) |
298 | #define dev2ser(x) (x->port_data.dev_serial) | 302 | { |
303 | return hso_dev->port_data.dev_net; | ||
304 | } | ||
305 | |||
306 | static inline struct hso_serial *dev2ser(struct hso_device *hso_dev) | ||
307 | { | ||
308 | return hso_dev->port_data.dev_serial; | ||
309 | } | ||
299 | 310 | ||
300 | /* Debugging functions */ | 311 | /* Debugging functions */ |
301 | #ifdef DEBUG | 312 | #ifdef DEBUG |
302 | static void dbg_dump(int line_count, const char *func_name, unsigned char *buf, | 313 | static void dbg_dump(int line_count, const char *func_name, unsigned char *buf, |
303 | unsigned int len) | 314 | unsigned int len) |
304 | { | 315 | { |
305 | u8 i = 0; | 316 | static char name[255]; |
306 | 317 | ||
307 | printk(KERN_DEBUG "[%d:%s]: len %d", line_count, func_name, len); | 318 | sprintf(name, "hso[%d:%s]", line_count, func_name); |
308 | 319 | print_hex_dump_bytes(name, DUMP_PREFIX_NONE, buf, len); | |
309 | for (i = 0; i < len; i++) { | ||
310 | if (!(i % 16)) | ||
311 | printk("\n 0x%03x: ", i); | ||
312 | printk("%02x ", (unsigned char)buf[i]); | ||
313 | } | ||
314 | printk("\n"); | ||
315 | } | 320 | } |
316 | 321 | ||
317 | #define DUMP(buf_, len_) \ | 322 | #define DUMP(buf_, len_) \ |
@@ -392,7 +397,7 @@ static const struct usb_device_id hso_ids[] = { | |||
392 | {default_port_device(0x0af0, 0xc031)}, /* Icon-Edge */ | 397 | {default_port_device(0x0af0, 0xc031)}, /* Icon-Edge */ |
393 | {icon321_port_device(0x0af0, 0xd013)}, /* Module HSxPA */ | 398 | {icon321_port_device(0x0af0, 0xd013)}, /* Module HSxPA */ |
394 | {icon321_port_device(0x0af0, 0xd031)}, /* Icon-321 */ | 399 | {icon321_port_device(0x0af0, 0xd031)}, /* Icon-321 */ |
395 | {default_port_device(0x0af0, 0xd033)}, /* Icon-322 */ | 400 | {icon321_port_device(0x0af0, 0xd033)}, /* Icon-322 */ |
396 | {USB_DEVICE(0x0af0, 0x7301)}, /* GE40x */ | 401 | {USB_DEVICE(0x0af0, 0x7301)}, /* GE40x */ |
397 | {USB_DEVICE(0x0af0, 0x7361)}, /* GE40x */ | 402 | {USB_DEVICE(0x0af0, 0x7361)}, /* GE40x */ |
398 | {USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */ | 403 | {USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */ |
@@ -528,13 +533,12 @@ static struct hso_serial *get_serial_by_shared_int_and_type( | |||
528 | 533 | ||
529 | static struct hso_serial *get_serial_by_index(unsigned index) | 534 | static struct hso_serial *get_serial_by_index(unsigned index) |
530 | { | 535 | { |
531 | struct hso_serial *serial; | 536 | struct hso_serial *serial = NULL; |
532 | unsigned long flags; | 537 | unsigned long flags; |
533 | 538 | ||
534 | if (!serial_table[index]) | ||
535 | return NULL; | ||
536 | spin_lock_irqsave(&serial_table_lock, flags); | 539 | spin_lock_irqsave(&serial_table_lock, flags); |
537 | serial = dev2ser(serial_table[index]); | 540 | if (serial_table[index]) |
541 | serial = dev2ser(serial_table[index]); | ||
538 | spin_unlock_irqrestore(&serial_table_lock, flags); | 542 | spin_unlock_irqrestore(&serial_table_lock, flags); |
539 | 543 | ||
540 | return serial; | 544 | return serial; |
@@ -561,6 +565,7 @@ static int get_free_serial_index(void) | |||
561 | static void set_serial_by_index(unsigned index, struct hso_serial *serial) | 565 | static void set_serial_by_index(unsigned index, struct hso_serial *serial) |
562 | { | 566 | { |
563 | unsigned long flags; | 567 | unsigned long flags; |
568 | |||
564 | spin_lock_irqsave(&serial_table_lock, flags); | 569 | spin_lock_irqsave(&serial_table_lock, flags); |
565 | if (serial) | 570 | if (serial) |
566 | serial_table[index] = serial->parent; | 571 | serial_table[index] = serial->parent; |
@@ -569,7 +574,7 @@ static void set_serial_by_index(unsigned index, struct hso_serial *serial) | |||
569 | spin_unlock_irqrestore(&serial_table_lock, flags); | 574 | spin_unlock_irqrestore(&serial_table_lock, flags); |
570 | } | 575 | } |
571 | 576 | ||
572 | /* log a meaningfull explanation of an USB status */ | 577 | /* log a meaningful explanation of an USB status */ |
573 | static void log_usb_status(int status, const char *function) | 578 | static void log_usb_status(int status, const char *function) |
574 | { | 579 | { |
575 | char *explanation; | 580 | char *explanation; |
@@ -1103,8 +1108,8 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) | |||
1103 | /* reset the rts and dtr */ | 1108 | /* reset the rts and dtr */ |
1104 | /* do the actual close */ | 1109 | /* do the actual close */ |
1105 | serial->open_count--; | 1110 | serial->open_count--; |
1111 | kref_put(&serial->parent->ref, hso_serial_ref_free); | ||
1106 | if (serial->open_count <= 0) { | 1112 | if (serial->open_count <= 0) { |
1107 | kref_put(&serial->parent->ref, hso_serial_ref_free); | ||
1108 | serial->open_count = 0; | 1113 | serial->open_count = 0; |
1109 | if (serial->tty) { | 1114 | if (serial->tty) { |
1110 | serial->tty->driver_data = NULL; | 1115 | serial->tty->driver_data = NULL; |
@@ -1467,7 +1472,8 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) | |||
1467 | return; | 1472 | return; |
1468 | } | 1473 | } |
1469 | hso_put_activity(serial->parent); | 1474 | hso_put_activity(serial->parent); |
1470 | tty_wakeup(serial->tty); | 1475 | if (serial->tty) |
1476 | tty_wakeup(serial->tty); | ||
1471 | hso_kick_transmit(serial); | 1477 | hso_kick_transmit(serial); |
1472 | 1478 | ||
1473 | D1(" "); | 1479 | D1(" "); |
@@ -1538,7 +1544,8 @@ static void ctrl_callback(struct urb *urb) | |||
1538 | clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags); | 1544 | clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags); |
1539 | } else { | 1545 | } else { |
1540 | hso_put_activity(serial->parent); | 1546 | hso_put_activity(serial->parent); |
1541 | tty_wakeup(serial->tty); | 1547 | if (serial->tty) |
1548 | tty_wakeup(serial->tty); | ||
1542 | /* response to a write command */ | 1549 | /* response to a write command */ |
1543 | hso_kick_transmit(serial); | 1550 | hso_kick_transmit(serial); |
1544 | } | 1551 | } |
@@ -2606,6 +2613,7 @@ static int hso_resume(struct usb_interface *iface) | |||
2606 | "Transmitting lingering data\n"); | 2613 | "Transmitting lingering data\n"); |
2607 | hso_net_start_xmit(hso_net->skb_tx_buf, | 2614 | hso_net_start_xmit(hso_net->skb_tx_buf, |
2608 | hso_net->net); | 2615 | hso_net->net); |
2616 | hso_net->skb_tx_buf = NULL; | ||
2609 | } | 2617 | } |
2610 | result = hso_start_net_device(network_table[i]); | 2618 | result = hso_start_net_device(network_table[i]); |
2611 | if (result) | 2619 | if (result) |
@@ -2652,7 +2660,7 @@ static void hso_free_interface(struct usb_interface *interface) | |||
2652 | hso_stop_net_device(network_table[i]); | 2660 | hso_stop_net_device(network_table[i]); |
2653 | cancel_work_sync(&network_table[i]->async_put_intf); | 2661 | cancel_work_sync(&network_table[i]->async_put_intf); |
2654 | cancel_work_sync(&network_table[i]->async_get_intf); | 2662 | cancel_work_sync(&network_table[i]->async_get_intf); |
2655 | if(rfk) | 2663 | if (rfk) |
2656 | rfkill_unregister(rfk); | 2664 | rfkill_unregister(rfk); |
2657 | hso_free_net_device(network_table[i]); | 2665 | hso_free_net_device(network_table[i]); |
2658 | } | 2666 | } |
@@ -2723,7 +2731,7 @@ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int, | |||
2723 | } | 2731 | } |
2724 | 2732 | ||
2725 | /* operations setup of the serial interface */ | 2733 | /* operations setup of the serial interface */ |
2726 | static struct tty_operations hso_serial_ops = { | 2734 | static const struct tty_operations hso_serial_ops = { |
2727 | .open = hso_serial_open, | 2735 | .open = hso_serial_open, |
2728 | .close = hso_serial_close, | 2736 | .close = hso_serial_close, |
2729 | .write = hso_serial_write, | 2737 | .write = hso_serial_write, |
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index c3d119f997f5..ca9d00c1194e 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c | |||
@@ -46,6 +46,10 @@ | |||
46 | 46 | ||
47 | #define MCS7830_VENDOR_ID 0x9710 | 47 | #define MCS7830_VENDOR_ID 0x9710 |
48 | #define MCS7830_PRODUCT_ID 0x7830 | 48 | #define MCS7830_PRODUCT_ID 0x7830 |
49 | #define MCS7730_PRODUCT_ID 0x7730 | ||
50 | |||
51 | #define SITECOM_VENDOR_ID 0x0DF6 | ||
52 | #define LN_030_PRODUCT_ID 0x0021 | ||
49 | 53 | ||
50 | #define MCS7830_MII_ADVERTISE (ADVERTISE_PAUSE_CAP | ADVERTISE_100FULL | \ | 54 | #define MCS7830_MII_ADVERTISE (ADVERTISE_PAUSE_CAP | ADVERTISE_100FULL | \ |
51 | ADVERTISE_100HALF | ADVERTISE_10FULL | \ | 55 | ADVERTISE_100HALF | ADVERTISE_10FULL | \ |
@@ -442,6 +446,29 @@ static struct ethtool_ops mcs7830_ethtool_ops = { | |||
442 | .nway_reset = usbnet_nway_reset, | 446 | .nway_reset = usbnet_nway_reset, |
443 | }; | 447 | }; |
444 | 448 | ||
449 | static int mcs7830_set_mac_address(struct net_device *netdev, void *p) | ||
450 | { | ||
451 | int ret; | ||
452 | struct usbnet *dev = netdev_priv(netdev); | ||
453 | struct sockaddr *addr = p; | ||
454 | |||
455 | if (netif_running(netdev)) | ||
456 | return -EBUSY; | ||
457 | |||
458 | if (!is_valid_ether_addr(addr->sa_data)) | ||
459 | return -EINVAL; | ||
460 | |||
461 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
462 | |||
463 | ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, | ||
464 | netdev->dev_addr); | ||
465 | |||
466 | if (ret < 0) | ||
467 | return ret; | ||
468 | |||
469 | return 0; | ||
470 | } | ||
471 | |||
445 | static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev) | 472 | static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev) |
446 | { | 473 | { |
447 | struct net_device *net = dev->net; | 474 | struct net_device *net = dev->net; |
@@ -455,6 +482,7 @@ static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev) | |||
455 | net->ethtool_ops = &mcs7830_ethtool_ops; | 482 | net->ethtool_ops = &mcs7830_ethtool_ops; |
456 | net->set_multicast_list = mcs7830_set_multicast; | 483 | net->set_multicast_list = mcs7830_set_multicast; |
457 | mcs7830_set_multicast(net); | 484 | mcs7830_set_multicast(net); |
485 | net->set_mac_address = mcs7830_set_mac_address; | ||
458 | 486 | ||
459 | /* reserve space for the status byte on rx */ | 487 | /* reserve space for the status byte on rx */ |
460 | dev->rx_urb_size = ETH_FRAME_LEN + 1; | 488 | dev->rx_urb_size = ETH_FRAME_LEN + 1; |
@@ -491,7 +519,16 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
491 | } | 519 | } |
492 | 520 | ||
493 | static const struct driver_info moschip_info = { | 521 | static const struct driver_info moschip_info = { |
494 | .description = "MOSCHIP 7830 usb-NET adapter", | 522 | .description = "MOSCHIP 7830/7730 usb-NET adapter", |
523 | .bind = mcs7830_bind, | ||
524 | .rx_fixup = mcs7830_rx_fixup, | ||
525 | .flags = FLAG_ETHER, | ||
526 | .in = 1, | ||
527 | .out = 2, | ||
528 | }; | ||
529 | |||
530 | static const struct driver_info sitecom_info = { | ||
531 | .description = "Sitecom LN-30 usb-NET adapter", | ||
495 | .bind = mcs7830_bind, | 532 | .bind = mcs7830_bind, |
496 | .rx_fixup = mcs7830_rx_fixup, | 533 | .rx_fixup = mcs7830_rx_fixup, |
497 | .flags = FLAG_ETHER, | 534 | .flags = FLAG_ETHER, |
@@ -504,6 +541,14 @@ static const struct usb_device_id products[] = { | |||
504 | USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID), | 541 | USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID), |
505 | .driver_info = (unsigned long) &moschip_info, | 542 | .driver_info = (unsigned long) &moschip_info, |
506 | }, | 543 | }, |
544 | { | ||
545 | USB_DEVICE(MCS7830_VENDOR_ID, MCS7730_PRODUCT_ID), | ||
546 | .driver_info = (unsigned long) &moschip_info, | ||
547 | }, | ||
548 | { | ||
549 | USB_DEVICE(SITECOM_VENDOR_ID, LN_030_PRODUCT_ID), | ||
550 | .driver_info = (unsigned long) &sitecom_info, | ||
551 | }, | ||
507 | {}, | 552 | {}, |
508 | }; | 553 | }; |
509 | MODULE_DEVICE_TABLE(usb, products); | 554 | MODULE_DEVICE_TABLE(usb, products); |
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index e59255a155a9..6596cd0742b9 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c | |||
@@ -1317,7 +1317,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd ) | |||
1317 | break; | 1317 | break; |
1318 | 1318 | ||
1319 | case SIOCDEVRESINSTATS : | 1319 | case SIOCDEVRESINSTATS : |
1320 | if( current->euid != 0 ) /* root only */ | 1320 | if (!capable(CAP_NET_ADMIN)) |
1321 | return -EPERM; | 1321 | return -EPERM; |
1322 | memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) ); | 1322 | memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) ); |
1323 | break; | 1323 | break; |
@@ -1334,7 +1334,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd ) | |||
1334 | break; | 1334 | break; |
1335 | 1335 | ||
1336 | case SIOCDEVSHWSTATE : | 1336 | case SIOCDEVSHWSTATE : |
1337 | if( current->euid != 0 ) /* root only */ | 1337 | if (!capable(CAP_NET_ADMIN)) |
1338 | return -EPERM; | 1338 | return -EPERM; |
1339 | 1339 | ||
1340 | spin_lock( &nl->lock ); | 1340 | spin_lock( &nl->lock ); |
@@ -1355,7 +1355,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd ) | |||
1355 | #ifdef CONFIG_SBNI_MULTILINE | 1355 | #ifdef CONFIG_SBNI_MULTILINE |
1356 | 1356 | ||
1357 | case SIOCDEVENSLAVE : | 1357 | case SIOCDEVENSLAVE : |
1358 | if( current->euid != 0 ) /* root only */ | 1358 | if (!capable(CAP_NET_ADMIN)) |
1359 | return -EPERM; | 1359 | return -EPERM; |
1360 | 1360 | ||
1361 | if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name )) | 1361 | if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name )) |
@@ -1370,7 +1370,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd ) | |||
1370 | return enslave( dev, slave_dev ); | 1370 | return enslave( dev, slave_dev ); |
1371 | 1371 | ||
1372 | case SIOCDEVEMANSIPATE : | 1372 | case SIOCDEVEMANSIPATE : |
1373 | if( current->euid != 0 ) /* root only */ | 1373 | if (!capable(CAP_NET_ADMIN)) |
1374 | return -EPERM; | 1374 | return -EPERM; |
1375 | 1375 | ||
1376 | return emancipate( dev ); | 1376 | return emancipate( dev ); |
diff --git a/drivers/net/wd.c b/drivers/net/wd.c index 6f9aa1643743..fa14255282af 100644 --- a/drivers/net/wd.c +++ b/drivers/net/wd.c | |||
@@ -337,7 +337,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr) | |||
337 | #ifdef CONFIG_NET_POLL_CONTROLLER | 337 | #ifdef CONFIG_NET_POLL_CONTROLLER |
338 | dev->poll_controller = ei_poll; | 338 | dev->poll_controller = ei_poll; |
339 | #endif | 339 | #endif |
340 | NS8390p_init(dev, 0); | 340 | NS8390_init(dev, 0); |
341 | 341 | ||
342 | #if 1 | 342 | #if 1 |
343 | /* Enable interrupt generation on softconfig cards -- M.U */ | 343 | /* Enable interrupt generation on softconfig cards -- M.U */ |
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 2028866f5995..0676c6d84383 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c | |||
@@ -40,7 +40,6 @@ | |||
40 | * | 40 | * |
41 | */ | 41 | */ |
42 | 42 | ||
43 | #include <linux/version.h> | ||
44 | #include <linux/module.h> | 43 | #include <linux/module.h> |
45 | #include <linux/delay.h> | 44 | #include <linux/delay.h> |
46 | #include <linux/hardirq.h> | 45 | #include <linux/hardirq.h> |
@@ -252,7 +251,7 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc, | |||
252 | return; | 251 | return; |
253 | pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len, | 252 | pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len, |
254 | PCI_DMA_TODEVICE); | 253 | PCI_DMA_TODEVICE); |
255 | dev_kfree_skb(bf->skb); | 254 | dev_kfree_skb_any(bf->skb); |
256 | bf->skb = NULL; | 255 | bf->skb = NULL; |
257 | } | 256 | } |
258 | 257 | ||
@@ -467,6 +466,7 @@ ath5k_pci_probe(struct pci_dev *pdev, | |||
467 | mutex_init(&sc->lock); | 466 | mutex_init(&sc->lock); |
468 | spin_lock_init(&sc->rxbuflock); | 467 | spin_lock_init(&sc->rxbuflock); |
469 | spin_lock_init(&sc->txbuflock); | 468 | spin_lock_init(&sc->txbuflock); |
469 | spin_lock_init(&sc->block); | ||
470 | 470 | ||
471 | /* Set private data */ | 471 | /* Set private data */ |
472 | pci_set_drvdata(pdev, hw); | 472 | pci_set_drvdata(pdev, hw); |
@@ -587,7 +587,6 @@ ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state) | |||
587 | ath5k_stop_hw(sc); | 587 | ath5k_stop_hw(sc); |
588 | 588 | ||
589 | free_irq(pdev->irq, sc); | 589 | free_irq(pdev->irq, sc); |
590 | pci_disable_msi(pdev); | ||
591 | pci_save_state(pdev); | 590 | pci_save_state(pdev); |
592 | pci_disable_device(pdev); | 591 | pci_disable_device(pdev); |
593 | pci_set_power_state(pdev, PCI_D3hot); | 592 | pci_set_power_state(pdev, PCI_D3hot); |
@@ -616,12 +615,10 @@ ath5k_pci_resume(struct pci_dev *pdev) | |||
616 | */ | 615 | */ |
617 | pci_write_config_byte(pdev, 0x41, 0); | 616 | pci_write_config_byte(pdev, 0x41, 0); |
618 | 617 | ||
619 | pci_enable_msi(pdev); | ||
620 | |||
621 | err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); | 618 | err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); |
622 | if (err) { | 619 | if (err) { |
623 | ATH5K_ERR(sc, "request_irq failed\n"); | 620 | ATH5K_ERR(sc, "request_irq failed\n"); |
624 | goto err_msi; | 621 | goto err_no_irq; |
625 | } | 622 | } |
626 | 623 | ||
627 | err = ath5k_init(sc); | 624 | err = ath5k_init(sc); |
@@ -642,8 +639,7 @@ ath5k_pci_resume(struct pci_dev *pdev) | |||
642 | return 0; | 639 | return 0; |
643 | err_irq: | 640 | err_irq: |
644 | free_irq(pdev->irq, sc); | 641 | free_irq(pdev->irq, sc); |
645 | err_msi: | 642 | err_no_irq: |
646 | pci_disable_msi(pdev); | ||
647 | pci_disable_device(pdev); | 643 | pci_disable_device(pdev); |
648 | return err; | 644 | return err; |
649 | } | 645 | } |
@@ -2184,8 +2180,11 @@ ath5k_beacon_config(struct ath5k_softc *sc) | |||
2184 | 2180 | ||
2185 | sc->imask |= AR5K_INT_SWBA; | 2181 | sc->imask |= AR5K_INT_SWBA; |
2186 | 2182 | ||
2187 | if (ath5k_hw_hasveol(ah)) | 2183 | if (ath5k_hw_hasveol(ah)) { |
2184 | spin_lock(&sc->block); | ||
2188 | ath5k_beacon_send(sc); | 2185 | ath5k_beacon_send(sc); |
2186 | spin_unlock(&sc->block); | ||
2187 | } | ||
2189 | } | 2188 | } |
2190 | /* TODO else AP */ | 2189 | /* TODO else AP */ |
2191 | 2190 | ||
@@ -2408,7 +2407,9 @@ ath5k_intr(int irq, void *dev_id) | |||
2408 | TSF_TO_TU(tsf), | 2407 | TSF_TO_TU(tsf), |
2409 | (unsigned long long) tsf); | 2408 | (unsigned long long) tsf); |
2410 | } else { | 2409 | } else { |
2410 | spin_lock(&sc->block); | ||
2411 | ath5k_beacon_send(sc); | 2411 | ath5k_beacon_send(sc); |
2412 | spin_unlock(&sc->block); | ||
2412 | } | 2413 | } |
2413 | } | 2414 | } |
2414 | if (status & AR5K_INT_RXEOL) { | 2415 | if (status & AR5K_INT_RXEOL) { |
@@ -2750,6 +2751,11 @@ static int ath5k_add_interface(struct ieee80211_hw *hw, | |||
2750 | ret = -EOPNOTSUPP; | 2751 | ret = -EOPNOTSUPP; |
2751 | goto end; | 2752 | goto end; |
2752 | } | 2753 | } |
2754 | |||
2755 | /* Set to a reasonable value. Note that this will | ||
2756 | * be set to mac80211's value at ath5k_config(). */ | ||
2757 | sc->bintval = 1000; | ||
2758 | |||
2753 | ret = 0; | 2759 | ret = 0; |
2754 | end: | 2760 | end: |
2755 | mutex_unlock(&sc->lock); | 2761 | mutex_unlock(&sc->lock); |
@@ -2794,9 +2800,6 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |||
2794 | struct ath5k_hw *ah = sc->ah; | 2800 | struct ath5k_hw *ah = sc->ah; |
2795 | int ret; | 2801 | int ret; |
2796 | 2802 | ||
2797 | /* Set to a reasonable value. Note that this will | ||
2798 | * be set to mac80211's value at ath5k_config(). */ | ||
2799 | sc->bintval = 1000; | ||
2800 | mutex_lock(&sc->lock); | 2803 | mutex_lock(&sc->lock); |
2801 | if (sc->vif != vif) { | 2804 | if (sc->vif != vif) { |
2802 | ret = -EIO; | 2805 | ret = -EIO; |
@@ -3055,6 +3058,7 @@ static int | |||
3055 | ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) | 3058 | ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) |
3056 | { | 3059 | { |
3057 | struct ath5k_softc *sc = hw->priv; | 3060 | struct ath5k_softc *sc = hw->priv; |
3061 | unsigned long flags; | ||
3058 | int ret; | 3062 | int ret; |
3059 | 3063 | ||
3060 | ath5k_debug_dump_skb(sc, skb, "BC ", 1); | 3064 | ath5k_debug_dump_skb(sc, skb, "BC ", 1); |
@@ -3064,12 +3068,14 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
3064 | goto end; | 3068 | goto end; |
3065 | } | 3069 | } |
3066 | 3070 | ||
3071 | spin_lock_irqsave(&sc->block, flags); | ||
3067 | ath5k_txbuf_free(sc, sc->bbuf); | 3072 | ath5k_txbuf_free(sc, sc->bbuf); |
3068 | sc->bbuf->skb = skb; | 3073 | sc->bbuf->skb = skb; |
3069 | ret = ath5k_beacon_setup(sc, sc->bbuf); | 3074 | ret = ath5k_beacon_setup(sc, sc->bbuf); |
3070 | if (ret) | 3075 | if (ret) |
3071 | sc->bbuf->skb = NULL; | 3076 | sc->bbuf->skb = NULL; |
3072 | else { | 3077 | spin_unlock_irqrestore(&sc->block, flags); |
3078 | if (!ret) { | ||
3073 | ath5k_beacon_config(sc); | 3079 | ath5k_beacon_config(sc); |
3074 | mmiowb(); | 3080 | mmiowb(); |
3075 | } | 3081 | } |
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h index d7e03e6b8271..7ec2f377d5c7 100644 --- a/drivers/net/wireless/ath5k/base.h +++ b/drivers/net/wireless/ath5k/base.h | |||
@@ -172,6 +172,7 @@ struct ath5k_softc { | |||
172 | struct tasklet_struct txtq; /* tx intr tasklet */ | 172 | struct tasklet_struct txtq; /* tx intr tasklet */ |
173 | struct ath5k_led tx_led; /* tx led */ | 173 | struct ath5k_led tx_led; /* tx led */ |
174 | 174 | ||
175 | spinlock_t block; /* protects beacon */ | ||
175 | struct ath5k_buf *bbuf; /* beacon buffer */ | 176 | struct ath5k_buf *bbuf; /* beacon buffer */ |
176 | unsigned int bhalq, /* SW q for outgoing beacons */ | 177 | unsigned int bhalq, /* SW q for outgoing beacons */ |
177 | bmisscount, /* missed beacon transmits */ | 178 | bmisscount, /* missed beacon transmits */ |
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c index bde162f128ab..a17eb130f574 100644 --- a/drivers/net/wireless/ath9k/hw.c +++ b/drivers/net/wireless/ath9k/hw.c | |||
@@ -5017,7 +5017,11 @@ static void ath9k_hw_spur_mitigate(struct ath_hal *ah, | |||
5017 | 5017 | ||
5018 | for (i = 0; i < 123; i++) { | 5018 | for (i = 0; i < 123; i++) { |
5019 | if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { | 5019 | if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { |
5020 | if ((abs(cur_vit_mask - bin)) < 75) | 5020 | |
5021 | /* workaround for gcc bug #37014 */ | ||
5022 | volatile int tmp = abs(cur_vit_mask - bin); | ||
5023 | |||
5024 | if (tmp < 75) | ||
5021 | mask_amt = 1; | 5025 | mask_amt = 1; |
5022 | else | 5026 | else |
5023 | mask_amt = 0; | 5027 | mask_amt = 0; |
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index bd35bb0a1480..bd65c485098c 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c | |||
@@ -1304,7 +1304,7 @@ EXPORT_SYMBOL(atmel_open); | |||
1304 | int atmel_open(struct net_device *dev) | 1304 | int atmel_open(struct net_device *dev) |
1305 | { | 1305 | { |
1306 | struct atmel_private *priv = netdev_priv(dev); | 1306 | struct atmel_private *priv = netdev_priv(dev); |
1307 | int i, channel; | 1307 | int i, channel, err; |
1308 | 1308 | ||
1309 | /* any scheduled timer is no longer needed and might screw things up.. */ | 1309 | /* any scheduled timer is no longer needed and might screw things up.. */ |
1310 | del_timer_sync(&priv->management_timer); | 1310 | del_timer_sync(&priv->management_timer); |
@@ -1328,8 +1328,9 @@ int atmel_open(struct net_device *dev) | |||
1328 | priv->site_survey_state = SITE_SURVEY_IDLE; | 1328 | priv->site_survey_state = SITE_SURVEY_IDLE; |
1329 | priv->station_is_associated = 0; | 1329 | priv->station_is_associated = 0; |
1330 | 1330 | ||
1331 | if (!reset_atmel_card(dev)) | 1331 | err = reset_atmel_card(dev); |
1332 | return -EAGAIN; | 1332 | if (err) |
1333 | return err; | ||
1333 | 1334 | ||
1334 | if (priv->config_reg_domain) { | 1335 | if (priv->config_reg_domain) { |
1335 | priv->reg_domain = priv->config_reg_domain; | 1336 | priv->reg_domain = priv->config_reg_domain; |
@@ -3061,12 +3062,20 @@ static void authenticate(struct atmel_private *priv, u16 frame_len) | |||
3061 | } | 3062 | } |
3062 | 3063 | ||
3063 | if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { | 3064 | if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { |
3064 | /* Do opensystem first, then try sharedkey */ | 3065 | /* Flip back and forth between WEP auth modes until the max |
3066 | * authentication tries has been exceeded. | ||
3067 | */ | ||
3065 | if (system == WLAN_AUTH_OPEN) { | 3068 | if (system == WLAN_AUTH_OPEN) { |
3066 | priv->CurrentAuthentTransactionSeqNum = 0x001; | 3069 | priv->CurrentAuthentTransactionSeqNum = 0x001; |
3067 | priv->exclude_unencrypted = 1; | 3070 | priv->exclude_unencrypted = 1; |
3068 | send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0); | 3071 | send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0); |
3069 | return; | 3072 | return; |
3073 | } else if ( system == WLAN_AUTH_SHARED_KEY | ||
3074 | && priv->wep_is_on) { | ||
3075 | priv->CurrentAuthentTransactionSeqNum = 0x001; | ||
3076 | priv->exclude_unencrypted = 0; | ||
3077 | send_authentication_request(priv, WLAN_AUTH_OPEN, NULL, 0); | ||
3078 | return; | ||
3070 | } else if (priv->connect_to_any_BSS) { | 3079 | } else if (priv->connect_to_any_BSS) { |
3071 | int bss_index; | 3080 | int bss_index; |
3072 | 3081 | ||
@@ -3580,12 +3589,12 @@ static int atmel_wakeup_firmware(struct atmel_private *priv) | |||
3580 | 3589 | ||
3581 | if (i == 0) { | 3590 | if (i == 0) { |
3582 | printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name); | 3591 | printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name); |
3583 | return 0; | 3592 | return -EIO; |
3584 | } | 3593 | } |
3585 | 3594 | ||
3586 | if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) { | 3595 | if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) { |
3587 | printk(KERN_ALERT "%s: card missing.\n", priv->dev->name); | 3596 | printk(KERN_ALERT "%s: card missing.\n", priv->dev->name); |
3588 | return 0; | 3597 | return -ENODEV; |
3589 | } | 3598 | } |
3590 | 3599 | ||
3591 | /* now check for completion of MAC initialization through | 3600 | /* now check for completion of MAC initialization through |
@@ -3609,19 +3618,19 @@ static int atmel_wakeup_firmware(struct atmel_private *priv) | |||
3609 | if (i == 0) { | 3618 | if (i == 0) { |
3610 | printk(KERN_ALERT "%s: MAC failed to initialise.\n", | 3619 | printk(KERN_ALERT "%s: MAC failed to initialise.\n", |
3611 | priv->dev->name); | 3620 | priv->dev->name); |
3612 | return 0; | 3621 | return -EIO; |
3613 | } | 3622 | } |
3614 | 3623 | ||
3615 | /* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */ | 3624 | /* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */ |
3616 | if ((mr3 & MAC_INIT_COMPLETE) && | 3625 | if ((mr3 & MAC_INIT_COMPLETE) && |
3617 | !(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) { | 3626 | !(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) { |
3618 | printk(KERN_ALERT "%s: MAC failed MR3 self-test.\n", priv->dev->name); | 3627 | printk(KERN_ALERT "%s: MAC failed MR3 self-test.\n", priv->dev->name); |
3619 | return 0; | 3628 | return -EIO; |
3620 | } | 3629 | } |
3621 | if ((mr1 & MAC_INIT_COMPLETE) && | 3630 | if ((mr1 & MAC_INIT_COMPLETE) && |
3622 | !(atmel_read16(priv->dev, MR1) & MAC_INIT_OK)) { | 3631 | !(atmel_read16(priv->dev, MR1) & MAC_INIT_OK)) { |
3623 | printk(KERN_ALERT "%s: MAC failed MR1 self-test.\n", priv->dev->name); | 3632 | printk(KERN_ALERT "%s: MAC failed MR1 self-test.\n", priv->dev->name); |
3624 | return 0; | 3633 | return -EIO; |
3625 | } | 3634 | } |
3626 | 3635 | ||
3627 | atmel_copy_to_host(priv->dev, (unsigned char *)iface, | 3636 | atmel_copy_to_host(priv->dev, (unsigned char *)iface, |
@@ -3642,7 +3651,7 @@ static int atmel_wakeup_firmware(struct atmel_private *priv) | |||
3642 | iface->func_ctrl = le16_to_cpu(iface->func_ctrl); | 3651 | iface->func_ctrl = le16_to_cpu(iface->func_ctrl); |
3643 | iface->mac_status = le16_to_cpu(iface->mac_status); | 3652 | iface->mac_status = le16_to_cpu(iface->mac_status); |
3644 | 3653 | ||
3645 | return 1; | 3654 | return 0; |
3646 | } | 3655 | } |
3647 | 3656 | ||
3648 | /* determine type of memory and MAC address */ | 3657 | /* determine type of memory and MAC address */ |
@@ -3693,7 +3702,7 @@ static int probe_atmel_card(struct net_device *dev) | |||
3693 | /* Standard firmware in flash, boot it up and ask | 3702 | /* Standard firmware in flash, boot it up and ask |
3694 | for the Mac Address */ | 3703 | for the Mac Address */ |
3695 | priv->card_type = CARD_TYPE_SPI_FLASH; | 3704 | priv->card_type = CARD_TYPE_SPI_FLASH; |
3696 | if (atmel_wakeup_firmware(priv)) { | 3705 | if (atmel_wakeup_firmware(priv) == 0) { |
3697 | atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6); | 3706 | atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6); |
3698 | 3707 | ||
3699 | /* got address, now squash it again until the network | 3708 | /* got address, now squash it again until the network |
@@ -3835,6 +3844,7 @@ static int reset_atmel_card(struct net_device *dev) | |||
3835 | struct atmel_private *priv = netdev_priv(dev); | 3844 | struct atmel_private *priv = netdev_priv(dev); |
3836 | u8 configuration; | 3845 | u8 configuration; |
3837 | int old_state = priv->station_state; | 3846 | int old_state = priv->station_state; |
3847 | int err = 0; | ||
3838 | 3848 | ||
3839 | /* data to add to the firmware names, in priority order | 3849 | /* data to add to the firmware names, in priority order |
3840 | this implemenents firmware versioning */ | 3850 | this implemenents firmware versioning */ |
@@ -3868,11 +3878,12 @@ static int reset_atmel_card(struct net_device *dev) | |||
3868 | dev->name); | 3878 | dev->name); |
3869 | strcpy(priv->firmware_id, "atmel_at76c502.bin"); | 3879 | strcpy(priv->firmware_id, "atmel_at76c502.bin"); |
3870 | } | 3880 | } |
3871 | if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) != 0) { | 3881 | err = request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev); |
3882 | if (err != 0) { | ||
3872 | printk(KERN_ALERT | 3883 | printk(KERN_ALERT |
3873 | "%s: firmware %s is missing, cannot continue.\n", | 3884 | "%s: firmware %s is missing, cannot continue.\n", |
3874 | dev->name, priv->firmware_id); | 3885 | dev->name, priv->firmware_id); |
3875 | return 0; | 3886 | return err; |
3876 | } | 3887 | } |
3877 | } else { | 3888 | } else { |
3878 | int fw_index = 0; | 3889 | int fw_index = 0; |
@@ -3901,7 +3912,7 @@ static int reset_atmel_card(struct net_device *dev) | |||
3901 | "%s: firmware %s is missing, cannot start.\n", | 3912 | "%s: firmware %s is missing, cannot start.\n", |
3902 | dev->name, priv->firmware_id); | 3913 | dev->name, priv->firmware_id); |
3903 | priv->firmware_id[0] = '\0'; | 3914 | priv->firmware_id[0] = '\0'; |
3904 | return 0; | 3915 | return -ENOENT; |
3905 | } | 3916 | } |
3906 | } | 3917 | } |
3907 | 3918 | ||
@@ -3926,8 +3937,9 @@ static int reset_atmel_card(struct net_device *dev) | |||
3926 | release_firmware(fw_entry); | 3937 | release_firmware(fw_entry); |
3927 | } | 3938 | } |
3928 | 3939 | ||
3929 | if (!atmel_wakeup_firmware(priv)) | 3940 | err = atmel_wakeup_firmware(priv); |
3930 | return 0; | 3941 | if (err != 0) |
3942 | return err; | ||
3931 | 3943 | ||
3932 | /* Check the version and set the correct flag for wpa stuff, | 3944 | /* Check the version and set the correct flag for wpa stuff, |
3933 | old and new firmware is incompatible. | 3945 | old and new firmware is incompatible. |
@@ -3968,10 +3980,9 @@ static int reset_atmel_card(struct net_device *dev) | |||
3968 | if (!priv->radio_on_broken) { | 3980 | if (!priv->radio_on_broken) { |
3969 | if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) == | 3981 | if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) == |
3970 | CMD_STATUS_REJECTED_RADIO_OFF) { | 3982 | CMD_STATUS_REJECTED_RADIO_OFF) { |
3971 | printk(KERN_INFO | 3983 | printk(KERN_INFO "%s: cannot turn the radio on.\n", |
3972 | "%s: cannot turn the radio on. (Hey radio, you're beautiful!)\n", | ||
3973 | dev->name); | 3984 | dev->name); |
3974 | return 0; | 3985 | return -EIO; |
3975 | } | 3986 | } |
3976 | } | 3987 | } |
3977 | 3988 | ||
@@ -4006,7 +4017,7 @@ static int reset_atmel_card(struct net_device *dev) | |||
4006 | wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); | 4017 | wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); |
4007 | } | 4018 | } |
4008 | 4019 | ||
4009 | return 1; | 4020 | return 0; |
4010 | } | 4021 | } |
4011 | 4022 | ||
4012 | static void atmel_send_command(struct atmel_private *priv, int command, | 4023 | static void atmel_send_command(struct atmel_private *priv, int command, |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 3bf3a869361f..7205a936ec74 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/moduleparam.h> | 33 | #include <linux/moduleparam.h> |
34 | #include <linux/if_arp.h> | 34 | #include <linux/if_arp.h> |
35 | #include <linux/etherdevice.h> | 35 | #include <linux/etherdevice.h> |
36 | #include <linux/version.h> | ||
37 | #include <linux/firmware.h> | 36 | #include <linux/firmware.h> |
38 | #include <linux/wireless.h> | 37 | #include <linux/wireless.h> |
39 | #include <linux/workqueue.h> | 38 | #include <linux/workqueue.h> |
@@ -4615,7 +4614,9 @@ static void b43_sprom_fixup(struct ssb_bus *bus) | |||
4615 | if (bus->bustype == SSB_BUSTYPE_PCI) { | 4614 | if (bus->bustype == SSB_BUSTYPE_PCI) { |
4616 | pdev = bus->host_pci; | 4615 | pdev = bus->host_pci; |
4617 | if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) || | 4616 | if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) || |
4617 | IS_PDEV(pdev, BROADCOM, 0x4320, DELL, 0x0003) || | ||
4618 | IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) || | 4618 | IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) || |
4619 | IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0014) || | ||
4619 | IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013)) | 4620 | IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013)) |
4620 | bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST; | 4621 | bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST; |
4621 | } | 4622 | } |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 2541c81932f0..1cb77db5c292 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/moduleparam.h> | 34 | #include <linux/moduleparam.h> |
35 | #include <linux/if_arp.h> | 35 | #include <linux/if_arp.h> |
36 | #include <linux/etherdevice.h> | 36 | #include <linux/etherdevice.h> |
37 | #include <linux/version.h> | ||
38 | #include <linux/firmware.h> | 37 | #include <linux/firmware.h> |
39 | #include <linux/wireless.h> | 38 | #include <linux/wireless.h> |
40 | #include <linux/workqueue.h> | 39 | #include <linux/workqueue.h> |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index c6f886ec08a3..19a401c4a0dc 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -157,7 +157,6 @@ that only one external action is invoked at a time. | |||
157 | #include <linux/stringify.h> | 157 | #include <linux/stringify.h> |
158 | #include <linux/tcp.h> | 158 | #include <linux/tcp.h> |
159 | #include <linux/types.h> | 159 | #include <linux/types.h> |
160 | #include <linux/version.h> | ||
161 | #include <linux/time.h> | 160 | #include <linux/time.h> |
162 | #include <linux/firmware.h> | 161 | #include <linux/firmware.h> |
163 | #include <linux/acpi.h> | 162 | #include <linux/acpi.h> |
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index 36e8d2f6e7b4..dcce3542d5a7 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -31,7 +31,6 @@ | |||
31 | ******************************************************************************/ | 31 | ******************************************************************************/ |
32 | 32 | ||
33 | #include "ipw2200.h" | 33 | #include "ipw2200.h" |
34 | #include <linux/version.h> | ||
35 | 34 | ||
36 | 35 | ||
37 | #ifndef KBUILD_EXTMOD | 36 | #ifndef KBUILD_EXTMOD |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c index d3336966b6b5..705c65bed9fd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c | |||
@@ -27,7 +27,6 @@ | |||
27 | 27 | ||
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/version.h> | ||
31 | #include <linux/init.h> | 30 | #include <linux/init.h> |
32 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
33 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index b3931f6135a4..3f51f3635344 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
@@ -26,7 +26,6 @@ | |||
26 | 26 | ||
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/version.h> | ||
30 | #include <linux/init.h> | 29 | #include <linux/init.h> |
31 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
32 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 22bb26985c2e..e2581229d8b2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -26,7 +26,6 @@ | |||
26 | 26 | ||
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/version.h> | ||
30 | #include <linux/init.h> | 29 | #include <linux/init.h> |
31 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
32 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
@@ -967,7 +966,7 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, | |||
967 | 966 | ||
968 | s = iwl4965_get_sub_band(priv, channel); | 967 | s = iwl4965_get_sub_band(priv, channel); |
969 | if (s >= EEPROM_TX_POWER_BANDS) { | 968 | if (s >= EEPROM_TX_POWER_BANDS) { |
970 | IWL_ERROR("Tx Power can not find channel %d ", channel); | 969 | IWL_ERROR("Tx Power can not find channel %d\n", channel); |
971 | return -1; | 970 | return -1; |
972 | } | 971 | } |
973 | 972 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index f3d139b663e6..cbc01a00eaf4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -25,7 +25,6 @@ | |||
25 | 25 | ||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/version.h> | ||
29 | #include <linux/init.h> | 28 | #include <linux/init.h> |
30 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
31 | #include <linux/dma-mapping.h> | 30 | #include <linux/dma-mapping.h> |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index ed09e48b1b61..061ffba9c884 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -29,7 +29,6 @@ | |||
29 | 29 | ||
30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/version.h> | ||
33 | #include <linux/init.h> | 32 | #include <linux/init.h> |
34 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
35 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 9bd61809129f..c72f72579bea 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
@@ -28,7 +28,6 @@ | |||
28 | 28 | ||
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/version.h> | ||
32 | #include <net/mac80211.h> | 31 | #include <net/mac80211.h> |
33 | 32 | ||
34 | struct iwl_priv; /* FIXME: remove */ | 33 | struct iwl_priv; /* FIXME: remove */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index bce53830b301..37155755efc5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c | |||
@@ -63,7 +63,6 @@ | |||
63 | 63 | ||
64 | #include <linux/kernel.h> | 64 | #include <linux/kernel.h> |
65 | #include <linux/module.h> | 65 | #include <linux/module.h> |
66 | #include <linux/version.h> | ||
67 | #include <linux/init.h> | 66 | #include <linux/init.h> |
68 | 67 | ||
69 | #include <net/mac80211.h> | 68 | #include <net/mac80211.h> |
@@ -146,7 +145,7 @@ int iwlcore_eeprom_verify_signature(struct iwl_priv *priv) | |||
146 | { | 145 | { |
147 | u32 gp = iwl_read32(priv, CSR_EEPROM_GP); | 146 | u32 gp = iwl_read32(priv, CSR_EEPROM_GP); |
148 | if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { | 147 | if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { |
149 | IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); | 148 | IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); |
150 | return -ENOENT; | 149 | return -ENOENT; |
151 | } | 150 | } |
152 | return 0; | 151 | return 0; |
@@ -227,7 +226,7 @@ int iwl_eeprom_init(struct iwl_priv *priv) | |||
227 | 226 | ||
228 | ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); | 227 | ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); |
229 | if (ret < 0) { | 228 | if (ret < 0) { |
230 | IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); | 229 | IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); |
231 | ret = -ENOENT; | 230 | ret = -ENOENT; |
232 | goto err; | 231 | goto err; |
233 | } | 232 | } |
@@ -254,7 +253,7 @@ int iwl_eeprom_init(struct iwl_priv *priv) | |||
254 | } | 253 | } |
255 | 254 | ||
256 | if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { | 255 | if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { |
257 | IWL_ERROR("Time out reading EEPROM[%d]", addr); | 256 | IWL_ERROR("Time out reading EEPROM[%d]\n", addr); |
258 | ret = -ETIMEDOUT; | 257 | ret = -ETIMEDOUT; |
259 | goto done; | 258 | goto done; |
260 | } | 259 | } |
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c index 6512834bb916..2eb03eea1908 100644 --- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c +++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c | |||
@@ -28,7 +28,6 @@ | |||
28 | 28 | ||
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/version.h> | ||
32 | #include <net/mac80211.h> | 31 | #include <net/mac80211.h> |
33 | 32 | ||
34 | #include "iwl-dev.h" /* FIXME: remove */ | 33 | #include "iwl-dev.h" /* FIXME: remove */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c index cb11c4a4d691..4eee1b163cd2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-led.c +++ b/drivers/net/wireless/iwlwifi/iwl-led.c | |||
@@ -27,7 +27,6 @@ | |||
27 | 27 | ||
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/version.h> | ||
31 | #include <linux/init.h> | 30 | #include <linux/init.h> |
32 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
33 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c index 028e3053c0ca..a099c9e30e55 100644 --- a/drivers/net/wireless/iwlwifi/iwl-power.c +++ b/drivers/net/wireless/iwlwifi/iwl-power.c | |||
@@ -29,7 +29,6 @@ | |||
29 | 29 | ||
30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/version.h> | ||
33 | #include <linux/init.h> | 32 | #include <linux/init.h> |
34 | 33 | ||
35 | #include <net/mac80211.h> | 34 | #include <net/mac80211.h> |
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c index e5e5846e9f25..5d642298f04c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c +++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c | |||
@@ -27,7 +27,6 @@ | |||
27 | *****************************************************************************/ | 27 | *****************************************************************************/ |
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/version.h> | ||
31 | #include <linux/init.h> | 30 | #include <linux/init.h> |
32 | 31 | ||
33 | #include <net/mac80211.h> | 32 | #include <net/mac80211.h> |
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 60a6e0106036..6283a3a707f5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c | |||
@@ -207,7 +207,7 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, | |||
207 | case WLAN_HT_CAP_MIMO_PS_DISABLED: | 207 | case WLAN_HT_CAP_MIMO_PS_DISABLED: |
208 | break; | 208 | break; |
209 | default: | 209 | default: |
210 | IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode); | 210 | IWL_WARNING("Invalid MIMO PS mode %d\n", mimo_ps_mode); |
211 | break; | 211 | break; |
212 | } | 212 | } |
213 | 213 | ||
@@ -969,7 +969,7 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) | |||
969 | return priv->hw_params.bcast_sta_id; | 969 | return priv->hw_params.bcast_sta_id; |
970 | 970 | ||
971 | default: | 971 | default: |
972 | IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode); | 972 | IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode); |
973 | return priv->hw_params.bcast_sta_id; | 973 | return priv->hw_params.bcast_sta_id; |
974 | } | 974 | } |
975 | } | 975 | } |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 4108c7c8f00f..d82823b5c8ab 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -493,7 +493,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
493 | /* Alloc keep-warm buffer */ | 493 | /* Alloc keep-warm buffer */ |
494 | ret = iwl_kw_alloc(priv); | 494 | ret = iwl_kw_alloc(priv); |
495 | if (ret) { | 495 | if (ret) { |
496 | IWL_ERROR("Keep Warm allocation failed"); | 496 | IWL_ERROR("Keep Warm allocation failed\n"); |
497 | goto error_kw; | 497 | goto error_kw; |
498 | } | 498 | } |
499 | spin_lock_irqsave(&priv->lock, flags); | 499 | spin_lock_irqsave(&priv->lock, flags); |
@@ -1463,7 +1463,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | |||
1463 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | 1463 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); |
1464 | 1464 | ||
1465 | if (scd_flow >= priv->hw_params.max_txq_num) { | 1465 | if (scd_flow >= priv->hw_params.max_txq_num) { |
1466 | IWL_ERROR("BUG_ON scd_flow is bigger than number of queues"); | 1466 | IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n"); |
1467 | return; | 1467 | return; |
1468 | } | 1468 | } |
1469 | 1469 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 444847ab1b5a..b775d5bab668 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -29,7 +29,6 @@ | |||
29 | 29 | ||
30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/version.h> | ||
33 | #include <linux/init.h> | 32 | #include <linux/init.h> |
34 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
35 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
@@ -1558,7 +1557,7 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv) | |||
1558 | BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); | 1557 | BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); |
1559 | 1558 | ||
1560 | if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { | 1559 | if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { |
1561 | IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); | 1560 | IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); |
1562 | return -ENOENT; | 1561 | return -ENOENT; |
1563 | } | 1562 | } |
1564 | 1563 | ||
@@ -1583,7 +1582,7 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv) | |||
1583 | } | 1582 | } |
1584 | 1583 | ||
1585 | if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { | 1584 | if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { |
1586 | IWL_ERROR("Time out reading EEPROM[%d]", addr); | 1585 | IWL_ERROR("Time out reading EEPROM[%d]\n", addr); |
1587 | return -ETIMEDOUT; | 1586 | return -ETIMEDOUT; |
1588 | } | 1587 | } |
1589 | e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); | 1588 | e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); |
@@ -2507,7 +2506,7 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h | |||
2507 | return priv->hw_setting.bcast_sta_id; | 2506 | return priv->hw_setting.bcast_sta_id; |
2508 | 2507 | ||
2509 | default: | 2508 | default: |
2510 | IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode); | 2509 | IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode); |
2511 | return priv->hw_setting.bcast_sta_id; | 2510 | return priv->hw_setting.bcast_sta_id; |
2512 | } | 2511 | } |
2513 | } | 2512 | } |
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c index 83cd85e1f847..29be3dc8ee09 100644 --- a/drivers/net/wireless/p54/p54common.c +++ b/drivers/net/wireless/p54/p54common.c | |||
@@ -413,12 +413,12 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb) | |||
413 | last_addr = range->end_addr; | 413 | last_addr = range->end_addr; |
414 | __skb_unlink(entry, &priv->tx_queue); | 414 | __skb_unlink(entry, &priv->tx_queue); |
415 | memset(&info->status, 0, sizeof(info->status)); | 415 | memset(&info->status, 0, sizeof(info->status)); |
416 | priv->tx_stats[skb_get_queue_mapping(skb)].len--; | ||
417 | entry_hdr = (struct p54_control_hdr *) entry->data; | 416 | entry_hdr = (struct p54_control_hdr *) entry->data; |
418 | entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; | 417 | entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; |
419 | if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) | 418 | if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) |
420 | pad = entry_data->align[0]; | 419 | pad = entry_data->align[0]; |
421 | 420 | ||
421 | priv->tx_stats[entry_data->hw_queue - 4].len--; | ||
422 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | 422 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { |
423 | if (!(payload->status & 0x01)) | 423 | if (!(payload->status & 0x01)) |
424 | info->flags |= IEEE80211_TX_STAT_ACK; | 424 | info->flags |= IEEE80211_TX_STAT_ACK; |
@@ -557,6 +557,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb) | |||
557 | struct p54_tx_control_allocdata *txhdr; | 557 | struct p54_tx_control_allocdata *txhdr; |
558 | size_t padding, len; | 558 | size_t padding, len; |
559 | u8 rate; | 559 | u8 rate; |
560 | u8 cts_rate = 0x20; | ||
560 | 561 | ||
561 | current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)]; | 562 | current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)]; |
562 | if (unlikely(current_queue->len > current_queue->limit)) | 563 | if (unlikely(current_queue->len > current_queue->limit)) |
@@ -581,28 +582,28 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb) | |||
581 | hdr->type = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 0 : cpu_to_le16(1); | 582 | hdr->type = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 0 : cpu_to_le16(1); |
582 | hdr->retry1 = hdr->retry2 = info->control.retry_limit; | 583 | hdr->retry1 = hdr->retry2 = info->control.retry_limit; |
583 | 584 | ||
584 | memset(txhdr->wep_key, 0x0, 16); | ||
585 | txhdr->padding = 0; | ||
586 | txhdr->padding2 = 0; | ||
587 | |||
588 | /* TODO: add support for alternate retry TX rates */ | 585 | /* TODO: add support for alternate retry TX rates */ |
589 | rate = ieee80211_get_tx_rate(dev, info)->hw_value; | 586 | rate = ieee80211_get_tx_rate(dev, info)->hw_value; |
590 | if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) | 587 | if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) { |
591 | rate |= 0x10; | 588 | rate |= 0x10; |
592 | if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) | 589 | cts_rate |= 0x10; |
590 | } | ||
591 | if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { | ||
593 | rate |= 0x40; | 592 | rate |= 0x40; |
594 | else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) | 593 | cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value; |
594 | } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { | ||
595 | rate |= 0x20; | 595 | rate |= 0x20; |
596 | cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value; | ||
597 | } | ||
596 | memset(txhdr->rateset, rate, 8); | 598 | memset(txhdr->rateset, rate, 8); |
597 | txhdr->wep_key_present = 0; | 599 | txhdr->key_type = 0; |
598 | txhdr->wep_key_len = 0; | 600 | txhdr->key_len = 0; |
599 | txhdr->frame_type = cpu_to_le32(skb_get_queue_mapping(skb) + 4); | 601 | txhdr->hw_queue = skb_get_queue_mapping(skb) + 4; |
600 | txhdr->magic4 = 0; | 602 | txhdr->tx_antenna = (info->antenna_sel_tx == 0) ? |
601 | txhdr->antenna = (info->antenna_sel_tx == 0) ? | ||
602 | 2 : info->antenna_sel_tx - 1; | 603 | 2 : info->antenna_sel_tx - 1; |
603 | txhdr->output_power = 0x7f; // HW Maximum | 604 | txhdr->output_power = 0x7f; // HW Maximum |
604 | txhdr->magic5 = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? | 605 | txhdr->cts_rate = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? |
605 | 0 : ((rate > 0x3) ? cpu_to_le32(0x33) : cpu_to_le32(0x23)); | 606 | 0 : cts_rate; |
606 | if (padding) | 607 | if (padding) |
607 | txhdr->align[0] = padding; | 608 | txhdr->align[0] = padding; |
608 | 609 | ||
@@ -836,10 +837,21 @@ static int p54_start(struct ieee80211_hw *dev) | |||
836 | struct p54_common *priv = dev->priv; | 837 | struct p54_common *priv = dev->priv; |
837 | int err; | 838 | int err; |
838 | 839 | ||
840 | if (!priv->cached_vdcf) { | ||
841 | priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf)+ | ||
842 | priv->tx_hdr_len + sizeof(struct p54_control_hdr), | ||
843 | GFP_KERNEL); | ||
844 | |||
845 | if (!priv->cached_vdcf) | ||
846 | return -ENOMEM; | ||
847 | } | ||
848 | |||
839 | err = priv->open(dev); | 849 | err = priv->open(dev); |
840 | if (!err) | 850 | if (!err) |
841 | priv->mode = IEEE80211_IF_TYPE_MNTR; | 851 | priv->mode = IEEE80211_IF_TYPE_MNTR; |
842 | 852 | ||
853 | p54_init_vdcf(dev); | ||
854 | |||
843 | return err; | 855 | return err; |
844 | } | 856 | } |
845 | 857 | ||
@@ -1019,15 +1031,6 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len) | |||
1019 | dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 + | 1031 | dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 + |
1020 | sizeof(struct p54_tx_control_allocdata); | 1032 | sizeof(struct p54_tx_control_allocdata); |
1021 | 1033 | ||
1022 | priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf) + | ||
1023 | priv->tx_hdr_len + sizeof(struct p54_control_hdr), GFP_KERNEL); | ||
1024 | |||
1025 | if (!priv->cached_vdcf) { | ||
1026 | ieee80211_free_hw(dev); | ||
1027 | return NULL; | ||
1028 | } | ||
1029 | |||
1030 | p54_init_vdcf(dev); | ||
1031 | mutex_init(&priv->conf_mutex); | 1034 | mutex_init(&priv->conf_mutex); |
1032 | 1035 | ||
1033 | return dev; | 1036 | return dev; |
diff --git a/drivers/net/wireless/p54/p54common.h b/drivers/net/wireless/p54/p54common.h index 2245fcce92dc..8db6c0e8e540 100644 --- a/drivers/net/wireless/p54/p54common.h +++ b/drivers/net/wireless/p54/p54common.h | |||
@@ -183,16 +183,16 @@ struct p54_frame_sent_hdr { | |||
183 | 183 | ||
184 | struct p54_tx_control_allocdata { | 184 | struct p54_tx_control_allocdata { |
185 | u8 rateset[8]; | 185 | u8 rateset[8]; |
186 | u16 padding; | 186 | u8 unalloc0[2]; |
187 | u8 wep_key_present; | 187 | u8 key_type; |
188 | u8 wep_key_len; | 188 | u8 key_len; |
189 | u8 wep_key[16]; | 189 | u8 key[16]; |
190 | __le32 frame_type; | 190 | u8 hw_queue; |
191 | u32 padding2; | 191 | u8 unalloc1[9]; |
192 | __le16 magic4; | 192 | u8 tx_antenna; |
193 | u8 antenna; | ||
194 | u8 output_power; | 193 | u8 output_power; |
195 | __le32 magic5; | 194 | u8 cts_rate; |
195 | u8 unalloc2[3]; | ||
196 | u8 align[0]; | 196 | u8 align[0]; |
197 | } __attribute__ ((packed)); | 197 | } __attribute__ ((packed)); |
198 | 198 | ||
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 815c095ef797..cbaca23a9453 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c | |||
@@ -109,7 +109,17 @@ static void p54u_rx_cb(struct urb *urb) | |||
109 | urb->context = skb; | 109 | urb->context = skb; |
110 | skb_queue_tail(&priv->rx_queue, skb); | 110 | skb_queue_tail(&priv->rx_queue, skb); |
111 | } else { | 111 | } else { |
112 | if (!priv->hw_type) | ||
113 | skb_push(skb, sizeof(struct net2280_tx_hdr)); | ||
114 | |||
115 | skb_reset_tail_pointer(skb); | ||
112 | skb_trim(skb, 0); | 116 | skb_trim(skb, 0); |
117 | if (urb->transfer_buffer != skb_tail_pointer(skb)) { | ||
118 | /* this should not happen */ | ||
119 | WARN_ON(1); | ||
120 | urb->transfer_buffer = skb_tail_pointer(skb); | ||
121 | } | ||
122 | |||
113 | skb_queue_tail(&priv->rx_queue, skb); | 123 | skb_queue_tail(&priv->rx_queue, skb); |
114 | } | 124 | } |
115 | 125 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h index a4a8c57004db..ff78e52ce43c 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.h +++ b/drivers/net/wireless/rt2x00/rt2x00queue.h | |||
@@ -173,10 +173,10 @@ struct rxdone_entry_desc { | |||
173 | * frame transmission failed due to excessive retries. | 173 | * frame transmission failed due to excessive retries. |
174 | */ | 174 | */ |
175 | enum txdone_entry_desc_flags { | 175 | enum txdone_entry_desc_flags { |
176 | TXDONE_UNKNOWN = 1 << 0, | 176 | TXDONE_UNKNOWN, |
177 | TXDONE_SUCCESS = 1 << 1, | 177 | TXDONE_SUCCESS, |
178 | TXDONE_FAILURE = 1 << 2, | 178 | TXDONE_FAILURE, |
179 | TXDONE_EXCESSIVE_RETRY = 1 << 3, | 179 | TXDONE_EXCESSIVE_RETRY, |
180 | }; | 180 | }; |
181 | 181 | ||
182 | /** | 182 | /** |
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index 8d76bb2e0312..2050227ea530 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c | |||
@@ -181,6 +181,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb) | |||
181 | * (Only indirectly by looking at the failed TX counters | 181 | * (Only indirectly by looking at the failed TX counters |
182 | * in the register). | 182 | * in the register). |
183 | */ | 183 | */ |
184 | txdesc.flags = 0; | ||
184 | if (!urb->status) | 185 | if (!urb->status) |
185 | __set_bit(TXDONE_UNKNOWN, &txdesc.flags); | 186 | __set_bit(TXDONE_UNKNOWN, &txdesc.flags); |
186 | else | 187 | else |
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c index 57376fb993ed..ca5deb6244e6 100644 --- a/drivers/net/wireless/rtl8187_dev.c +++ b/drivers/net/wireless/rtl8187_dev.c | |||
@@ -40,6 +40,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = { | |||
40 | /* Netgear */ | 40 | /* Netgear */ |
41 | {USB_DEVICE(0x0846, 0x6100), .driver_info = DEVICE_RTL8187}, | 41 | {USB_DEVICE(0x0846, 0x6100), .driver_info = DEVICE_RTL8187}, |
42 | {USB_DEVICE(0x0846, 0x6a00), .driver_info = DEVICE_RTL8187}, | 42 | {USB_DEVICE(0x0846, 0x6a00), .driver_info = DEVICE_RTL8187}, |
43 | {USB_DEVICE(0x0846, 0x4260), .driver_info = DEVICE_RTL8187B}, | ||
43 | /* HP */ | 44 | /* HP */ |
44 | {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187}, | 45 | {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187}, |
45 | /* Sitecom */ | 46 | /* Sitecom */ |
diff --git a/drivers/of/device.c b/drivers/of/device.c index 8a1d93a2bb81..51e5214071da 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c | |||
@@ -57,6 +57,15 @@ static ssize_t devspec_show(struct device *dev, | |||
57 | return sprintf(buf, "%s\n", ofdev->node->full_name); | 57 | return sprintf(buf, "%s\n", ofdev->node->full_name); |
58 | } | 58 | } |
59 | 59 | ||
60 | static ssize_t name_show(struct device *dev, | ||
61 | struct device_attribute *attr, char *buf) | ||
62 | { | ||
63 | struct of_device *ofdev; | ||
64 | |||
65 | ofdev = to_of_device(dev); | ||
66 | return sprintf(buf, "%s\n", ofdev->node->name); | ||
67 | } | ||
68 | |||
60 | static ssize_t modalias_show(struct device *dev, | 69 | static ssize_t modalias_show(struct device *dev, |
61 | struct device_attribute *attr, char *buf) | 70 | struct device_attribute *attr, char *buf) |
62 | { | 71 | { |
@@ -71,6 +80,7 @@ static ssize_t modalias_show(struct device *dev, | |||
71 | 80 | ||
72 | struct device_attribute of_platform_device_attrs[] = { | 81 | struct device_attribute of_platform_device_attrs[] = { |
73 | __ATTR_RO(devspec), | 82 | __ATTR_RO(devspec), |
83 | __ATTR_RO(name), | ||
74 | __ATTR_RO(modalias), | 84 | __ATTR_RO(modalias), |
75 | __ATTR_NULL | 85 | __ATTR_NULL |
76 | }; | 86 | }; |
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 2450b3a393ff..7ba78e6d210e 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -38,8 +38,10 @@ void free_cpu_buffers(void) | |||
38 | { | 38 | { |
39 | int i; | 39 | int i; |
40 | 40 | ||
41 | for_each_online_cpu(i) | 41 | for_each_online_cpu(i) { |
42 | vfree(per_cpu(cpu_buffer, i).buffer); | 42 | vfree(per_cpu(cpu_buffer, i).buffer); |
43 | per_cpu(cpu_buffer, i).buffer = NULL; | ||
44 | } | ||
43 | } | 45 | } |
44 | 46 | ||
45 | int alloc_cpu_buffers(void) | 47 | int alloc_cpu_buffers(void) |
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index e7fbac529935..8d692a5c8e73 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c | |||
@@ -93,6 +93,8 @@ out: | |||
93 | void free_event_buffer(void) | 93 | void free_event_buffer(void) |
94 | { | 94 | { |
95 | vfree(event_buffer); | 95 | vfree(event_buffer); |
96 | |||
97 | event_buffer = NULL; | ||
96 | } | 98 | } |
97 | 99 | ||
98 | 100 | ||
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 93e37f0666ab..e17ef54f0efc 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
@@ -382,7 +382,7 @@ EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware); | |||
382 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) | 382 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) |
383 | { | 383 | { |
384 | acpi_status status; | 384 | acpi_status status; |
385 | acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev)); | 385 | acpi_handle chandle, handle; |
386 | struct pci_dev *pdev = dev; | 386 | struct pci_dev *pdev = dev; |
387 | struct pci_bus *parent; | 387 | struct pci_bus *parent; |
388 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; | 388 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; |
@@ -399,10 +399,25 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) | |||
399 | * Per PCI firmware specification, we should run the ACPI _OSC | 399 | * Per PCI firmware specification, we should run the ACPI _OSC |
400 | * method to get control of hotplug hardware before using it. If | 400 | * method to get control of hotplug hardware before using it. If |
401 | * an _OSC is missing, we look for an OSHP to do the same thing. | 401 | * an _OSC is missing, we look for an OSHP to do the same thing. |
402 | * To handle different BIOS behavior, we look for _OSC and OSHP | 402 | * To handle different BIOS behavior, we look for _OSC on a root |
403 | * within the scope of the hotplug controller and its parents, | 403 | * bridge preferentially (according to PCI fw spec). Later for |
404 | * OSHP within the scope of the hotplug controller and its parents, | ||
404 | * upto the host bridge under which this controller exists. | 405 | * upto the host bridge under which this controller exists. |
405 | */ | 406 | */ |
407 | handle = acpi_find_root_bridge_handle(pdev); | ||
408 | if (handle) { | ||
409 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); | ||
410 | dbg("Trying to get hotplug control for %s\n", | ||
411 | (char *)string.pointer); | ||
412 | status = pci_osc_control_set(handle, flags); | ||
413 | if (ACPI_SUCCESS(status)) | ||
414 | goto got_one; | ||
415 | kfree(string.pointer); | ||
416 | string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; | ||
417 | } | ||
418 | |||
419 | pdev = dev; | ||
420 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | ||
406 | while (!handle) { | 421 | while (!handle) { |
407 | /* | 422 | /* |
408 | * This hotplug controller was not listed in the ACPI name | 423 | * This hotplug controller was not listed in the ACPI name |
@@ -427,15 +442,9 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) | |||
427 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); | 442 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); |
428 | dbg("Trying to get hotplug control for %s \n", | 443 | dbg("Trying to get hotplug control for %s \n", |
429 | (char *)string.pointer); | 444 | (char *)string.pointer); |
430 | status = pci_osc_control_set(handle, flags); | 445 | status = acpi_run_oshp(handle); |
431 | if (status == AE_NOT_FOUND) | 446 | if (ACPI_SUCCESS(status)) |
432 | status = acpi_run_oshp(handle); | 447 | goto got_one; |
433 | if (ACPI_SUCCESS(status)) { | ||
434 | dbg("Gained control for hotplug HW for pci %s (%s)\n", | ||
435 | pci_name(dev), (char *)string.pointer); | ||
436 | kfree(string.pointer); | ||
437 | return 0; | ||
438 | } | ||
439 | if (acpi_root_bridge(handle)) | 448 | if (acpi_root_bridge(handle)) |
440 | break; | 449 | break; |
441 | chandle = handle; | 450 | chandle = handle; |
@@ -449,6 +458,11 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) | |||
449 | 458 | ||
450 | kfree(string.pointer); | 459 | kfree(string.pointer); |
451 | return -ENODEV; | 460 | return -ENODEV; |
461 | got_one: | ||
462 | dbg("Gained control for hotplug HW for pci %s (%s)\n", pci_name(dev), | ||
463 | (char *)string.pointer); | ||
464 | kfree(string.pointer); | ||
465 | return 0; | ||
452 | } | 466 | } |
453 | EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); | 467 | EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); |
454 | 468 | ||
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index e3a1e7e7dba2..9e6cec67e1cc 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
@@ -43,7 +43,6 @@ extern int pciehp_poll_mode; | |||
43 | extern int pciehp_poll_time; | 43 | extern int pciehp_poll_time; |
44 | extern int pciehp_debug; | 44 | extern int pciehp_debug; |
45 | extern int pciehp_force; | 45 | extern int pciehp_force; |
46 | extern int pciehp_slot_with_bus; | ||
47 | extern struct workqueue_struct *pciehp_wq; | 46 | extern struct workqueue_struct *pciehp_wq; |
48 | 47 | ||
49 | #define dbg(format, arg...) \ | 48 | #define dbg(format, arg...) \ |
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 3677495c4f91..4fd5355bc3b5 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -41,7 +41,6 @@ int pciehp_debug; | |||
41 | int pciehp_poll_mode; | 41 | int pciehp_poll_mode; |
42 | int pciehp_poll_time; | 42 | int pciehp_poll_time; |
43 | int pciehp_force; | 43 | int pciehp_force; |
44 | int pciehp_slot_with_bus; | ||
45 | struct workqueue_struct *pciehp_wq; | 44 | struct workqueue_struct *pciehp_wq; |
46 | 45 | ||
47 | #define DRIVER_VERSION "0.4" | 46 | #define DRIVER_VERSION "0.4" |
@@ -56,12 +55,10 @@ module_param(pciehp_debug, bool, 0644); | |||
56 | module_param(pciehp_poll_mode, bool, 0644); | 55 | module_param(pciehp_poll_mode, bool, 0644); |
57 | module_param(pciehp_poll_time, int, 0644); | 56 | module_param(pciehp_poll_time, int, 0644); |
58 | module_param(pciehp_force, bool, 0644); | 57 | module_param(pciehp_force, bool, 0644); |
59 | module_param(pciehp_slot_with_bus, bool, 0644); | ||
60 | MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); | 58 | MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); |
61 | MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); | 59 | MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); |
62 | MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); | 60 | MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); |
63 | MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); | 61 | MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); |
64 | MODULE_PARM_DESC(pciehp_slot_with_bus, "Use bus number in the slot name"); | ||
65 | 62 | ||
66 | #define PCIE_MODULE_NAME "pciehp" | 63 | #define PCIE_MODULE_NAME "pciehp" |
67 | 64 | ||
@@ -194,6 +191,7 @@ static int init_slots(struct controller *ctrl) | |||
194 | struct slot *slot; | 191 | struct slot *slot; |
195 | struct hotplug_slot *hotplug_slot; | 192 | struct hotplug_slot *hotplug_slot; |
196 | struct hotplug_slot_info *info; | 193 | struct hotplug_slot_info *info; |
194 | int len, dup = 1; | ||
197 | int retval = -ENOMEM; | 195 | int retval = -ENOMEM; |
198 | 196 | ||
199 | list_for_each_entry(slot, &ctrl->slot_list, slot_list) { | 197 | list_for_each_entry(slot, &ctrl->slot_list, slot_list) { |
@@ -220,15 +218,24 @@ static int init_slots(struct controller *ctrl) | |||
220 | dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " | 218 | dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " |
221 | "slot_device_offset=%x\n", slot->bus, slot->device, | 219 | "slot_device_offset=%x\n", slot->bus, slot->device, |
222 | slot->hp_slot, slot->number, ctrl->slot_device_offset); | 220 | slot->hp_slot, slot->number, ctrl->slot_device_offset); |
221 | duplicate_name: | ||
223 | retval = pci_hp_register(hotplug_slot, | 222 | retval = pci_hp_register(hotplug_slot, |
224 | ctrl->pci_dev->subordinate, | 223 | ctrl->pci_dev->subordinate, |
225 | slot->device); | 224 | slot->device); |
226 | if (retval) { | 225 | if (retval) { |
226 | /* | ||
227 | * If slot N already exists, we'll try to create | ||
228 | * slot N-1, N-2 ... N-M, until we overflow. | ||
229 | */ | ||
230 | if (retval == -EEXIST) { | ||
231 | len = snprintf(slot->name, SLOT_NAME_SIZE, | ||
232 | "%d-%d", slot->number, dup++); | ||
233 | if (len < SLOT_NAME_SIZE) | ||
234 | goto duplicate_name; | ||
235 | else | ||
236 | err("duplicate slot name overflow\n"); | ||
237 | } | ||
227 | err("pci_hp_register failed with error %d\n", retval); | 238 | err("pci_hp_register failed with error %d\n", retval); |
228 | if (retval == -EEXIST) | ||
229 | err("Failed to register slot because of name " | ||
230 | "collision. Try \'pciehp_slot_with_bus\' " | ||
231 | "module option.\n"); | ||
232 | goto error_info; | 239 | goto error_info; |
233 | } | 240 | } |
234 | /* create additional sysfs entries */ | 241 | /* create additional sysfs entries */ |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index ad27e9e225a6..ab31f5ba665d 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -1030,15 +1030,6 @@ static void pcie_shutdown_notification(struct controller *ctrl) | |||
1030 | pciehp_free_irq(ctrl); | 1030 | pciehp_free_irq(ctrl); |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | static void make_slot_name(struct slot *slot) | ||
1034 | { | ||
1035 | if (pciehp_slot_with_bus) | ||
1036 | snprintf(slot->name, SLOT_NAME_SIZE, "%04d_%04d", | ||
1037 | slot->bus, slot->number); | ||
1038 | else | ||
1039 | snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number); | ||
1040 | } | ||
1041 | |||
1042 | static int pcie_init_slot(struct controller *ctrl) | 1033 | static int pcie_init_slot(struct controller *ctrl) |
1043 | { | 1034 | { |
1044 | struct slot *slot; | 1035 | struct slot *slot; |
@@ -1053,7 +1044,7 @@ static int pcie_init_slot(struct controller *ctrl) | |||
1053 | slot->device = ctrl->slot_device_offset + slot->hp_slot; | 1044 | slot->device = ctrl->slot_device_offset + slot->hp_slot; |
1054 | slot->hpc_ops = ctrl->hpc_ops; | 1045 | slot->hpc_ops = ctrl->hpc_ops; |
1055 | slot->number = ctrl->first_slot; | 1046 | slot->number = ctrl->first_slot; |
1056 | make_slot_name(slot); | 1047 | snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number); |
1057 | mutex_init(&slot->lock); | 1048 | mutex_init(&slot->lock); |
1058 | INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); | 1049 | INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); |
1059 | list_add(&slot->slot_list, &ctrl->slot_list); | 1050 | list_add(&slot->slot_list, &ctrl->slot_list); |
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index a8cbd039b85b..cc38615395f1 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c | |||
@@ -39,7 +39,6 @@ | |||
39 | int shpchp_debug; | 39 | int shpchp_debug; |
40 | int shpchp_poll_mode; | 40 | int shpchp_poll_mode; |
41 | int shpchp_poll_time; | 41 | int shpchp_poll_time; |
42 | static int shpchp_slot_with_bus; | ||
43 | struct workqueue_struct *shpchp_wq; | 42 | struct workqueue_struct *shpchp_wq; |
44 | 43 | ||
45 | #define DRIVER_VERSION "0.4" | 44 | #define DRIVER_VERSION "0.4" |
@@ -53,11 +52,9 @@ MODULE_LICENSE("GPL"); | |||
53 | module_param(shpchp_debug, bool, 0644); | 52 | module_param(shpchp_debug, bool, 0644); |
54 | module_param(shpchp_poll_mode, bool, 0644); | 53 | module_param(shpchp_poll_mode, bool, 0644); |
55 | module_param(shpchp_poll_time, int, 0644); | 54 | module_param(shpchp_poll_time, int, 0644); |
56 | module_param(shpchp_slot_with_bus, bool, 0644); | ||
57 | MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not"); | 55 | MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not"); |
58 | MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not"); | 56 | MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not"); |
59 | MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds"); | 57 | MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds"); |
60 | MODULE_PARM_DESC(shpchp_slot_with_bus, "Use bus number in the slot name"); | ||
61 | 58 | ||
62 | #define SHPC_MODULE_NAME "shpchp" | 59 | #define SHPC_MODULE_NAME "shpchp" |
63 | 60 | ||
@@ -99,23 +96,13 @@ static void release_slot(struct hotplug_slot *hotplug_slot) | |||
99 | kfree(slot); | 96 | kfree(slot); |
100 | } | 97 | } |
101 | 98 | ||
102 | static void make_slot_name(struct slot *slot) | ||
103 | { | ||
104 | if (shpchp_slot_with_bus) | ||
105 | snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d", | ||
106 | slot->bus, slot->number); | ||
107 | else | ||
108 | snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d", | ||
109 | slot->number); | ||
110 | } | ||
111 | |||
112 | static int init_slots(struct controller *ctrl) | 99 | static int init_slots(struct controller *ctrl) |
113 | { | 100 | { |
114 | struct slot *slot; | 101 | struct slot *slot; |
115 | struct hotplug_slot *hotplug_slot; | 102 | struct hotplug_slot *hotplug_slot; |
116 | struct hotplug_slot_info *info; | 103 | struct hotplug_slot_info *info; |
117 | int retval = -ENOMEM; | 104 | int retval = -ENOMEM; |
118 | int i; | 105 | int i, len, dup = 1; |
119 | 106 | ||
120 | for (i = 0; i < ctrl->num_slots; i++) { | 107 | for (i = 0; i < ctrl->num_slots; i++) { |
121 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | 108 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); |
@@ -146,7 +133,7 @@ static int init_slots(struct controller *ctrl) | |||
146 | /* register this slot with the hotplug pci core */ | 133 | /* register this slot with the hotplug pci core */ |
147 | hotplug_slot->private = slot; | 134 | hotplug_slot->private = slot; |
148 | hotplug_slot->release = &release_slot; | 135 | hotplug_slot->release = &release_slot; |
149 | make_slot_name(slot); | 136 | snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number); |
150 | hotplug_slot->ops = &shpchp_hotplug_slot_ops; | 137 | hotplug_slot->ops = &shpchp_hotplug_slot_ops; |
151 | 138 | ||
152 | get_power_status(hotplug_slot, &info->power_status); | 139 | get_power_status(hotplug_slot, &info->power_status); |
@@ -157,14 +144,23 @@ static int init_slots(struct controller *ctrl) | |||
157 | dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " | 144 | dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " |
158 | "slot_device_offset=%x\n", slot->bus, slot->device, | 145 | "slot_device_offset=%x\n", slot->bus, slot->device, |
159 | slot->hp_slot, slot->number, ctrl->slot_device_offset); | 146 | slot->hp_slot, slot->number, ctrl->slot_device_offset); |
147 | duplicate_name: | ||
160 | retval = pci_hp_register(slot->hotplug_slot, | 148 | retval = pci_hp_register(slot->hotplug_slot, |
161 | ctrl->pci_dev->subordinate, slot->device); | 149 | ctrl->pci_dev->subordinate, slot->device); |
162 | if (retval) { | 150 | if (retval) { |
151 | /* | ||
152 | * If slot N already exists, we'll try to create | ||
153 | * slot N-1, N-2 ... N-M, until we overflow. | ||
154 | */ | ||
155 | if (retval == -EEXIST) { | ||
156 | len = snprintf(slot->name, SLOT_NAME_SIZE, | ||
157 | "%d-%d", slot->number, dup++); | ||
158 | if (len < SLOT_NAME_SIZE) | ||
159 | goto duplicate_name; | ||
160 | else | ||
161 | err("duplicate slot name overflow\n"); | ||
162 | } | ||
163 | err("pci_hp_register failed with error %d\n", retval); | 163 | err("pci_hp_register failed with error %d\n", retval); |
164 | if (retval == -EEXIST) | ||
165 | err("Failed to register slot because of name " | ||
166 | "collision. Try \'shpchp_slot_with_bus\' " | ||
167 | "module option.\n"); | ||
168 | goto error_info; | 164 | goto error_info; |
169 | } | 165 | } |
170 | 166 | ||
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index 30f581b8791f..6dd7b13e9808 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c | |||
@@ -36,12 +36,7 @@ int aer_osc_setup(struct pcie_device *pciedev) | |||
36 | if (acpi_pci_disabled) | 36 | if (acpi_pci_disabled) |
37 | return -1; | 37 | return -1; |
38 | 38 | ||
39 | /* Find root host bridge */ | 39 | handle = acpi_find_root_bridge_handle(pdev); |
40 | while (pdev->bus->self) | ||
41 | pdev = pdev->bus->self; | ||
42 | handle = acpi_get_pci_rootbridge_handle( | ||
43 | pci_domain_nr(pdev->bus), pdev->bus->number); | ||
44 | |||
45 | if (handle) { | 40 | if (handle) { |
46 | pcie_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT); | 41 | pcie_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT); |
47 | status = pci_osc_control_set(handle, | 42 | status = pci_osc_control_set(handle, |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index a04498d390c8..cce2f4cb1fbf 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -383,6 +383,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
383 | res->start = base; | 383 | res->start = base; |
384 | if (!res->end) | 384 | if (!res->end) |
385 | res->end = limit + 0xfff; | 385 | res->end = limit + 0xfff; |
386 | printk(KERN_INFO "PCI: bridge %s io port: [%llx, %llx]\n", pci_name(dev), res->start, res->end); | ||
386 | } | 387 | } |
387 | 388 | ||
388 | res = child->resource[1]; | 389 | res = child->resource[1]; |
@@ -394,6 +395,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
394 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; | 395 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; |
395 | res->start = base; | 396 | res->start = base; |
396 | res->end = limit + 0xfffff; | 397 | res->end = limit + 0xfffff; |
398 | printk(KERN_INFO "PCI: bridge %s 32bit mmio: [%llx, %llx]\n", pci_name(dev), res->start, res->end); | ||
397 | } | 399 | } |
398 | 400 | ||
399 | res = child->resource[2]; | 401 | res = child->resource[2]; |
@@ -429,6 +431,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
429 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; | 431 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; |
430 | res->start = base; | 432 | res->start = base; |
431 | res->end = limit + 0xfffff; | 433 | res->end = limit + 0xfffff; |
434 | printk(KERN_INFO "PCI: bridge %s %sbit mmio pref: [%llx, %llx]\n", pci_name(dev), (res->flags & PCI_PREF_RANGE_TYPE_64)?"64":"32",res->start, res->end); | ||
432 | } | 435 | } |
433 | } | 436 | } |
434 | 437 | ||
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 217814fef4ef..3b3b5f178797 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
@@ -280,6 +280,8 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, | |||
280 | match_pci_dev_by_id); | 280 | match_pci_dev_by_id); |
281 | if (dev) | 281 | if (dev) |
282 | pdev = to_pci_dev(dev); | 282 | pdev = to_pci_dev(dev); |
283 | if (from) | ||
284 | pci_dev_put(from); | ||
283 | return pdev; | 285 | return pdev; |
284 | } | 286 | } |
285 | 287 | ||
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 827c0a520e2b..82634a2f1b1d 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -530,6 +530,36 @@ void __ref pci_bus_assign_resources(struct pci_bus *bus) | |||
530 | } | 530 | } |
531 | EXPORT_SYMBOL(pci_bus_assign_resources); | 531 | EXPORT_SYMBOL(pci_bus_assign_resources); |
532 | 532 | ||
533 | static void pci_bus_dump_res(struct pci_bus *bus) | ||
534 | { | ||
535 | int i; | ||
536 | |||
537 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | ||
538 | struct resource *res = bus->resource[i]; | ||
539 | if (!res) | ||
540 | continue; | ||
541 | |||
542 | printk(KERN_INFO "bus: %02x index %x %s: [%llx, %llx]\n", bus->number, i, (res->flags & IORESOURCE_IO)? "io port":"mmio", res->start, res->end); | ||
543 | } | ||
544 | } | ||
545 | |||
546 | static void pci_bus_dump_resources(struct pci_bus *bus) | ||
547 | { | ||
548 | struct pci_bus *b; | ||
549 | struct pci_dev *dev; | ||
550 | |||
551 | |||
552 | pci_bus_dump_res(bus); | ||
553 | |||
554 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
555 | b = dev->subordinate; | ||
556 | if (!b) | ||
557 | continue; | ||
558 | |||
559 | pci_bus_dump_resources(b); | ||
560 | } | ||
561 | } | ||
562 | |||
533 | void __init | 563 | void __init |
534 | pci_assign_unassigned_resources(void) | 564 | pci_assign_unassigned_resources(void) |
535 | { | 565 | { |
@@ -545,4 +575,9 @@ pci_assign_unassigned_resources(void) | |||
545 | pci_bus_assign_resources(bus); | 575 | pci_bus_assign_resources(bus); |
546 | pci_enable_bridges(bus); | 576 | pci_enable_bridges(bus); |
547 | } | 577 | } |
578 | |||
579 | /* dump the resource on buses */ | ||
580 | list_for_each_entry(bus, &pci_root_buses, node) { | ||
581 | pci_bus_dump_resources(bus); | ||
582 | } | ||
548 | } | 583 | } |
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c index a8771ffc61e8..e07b5c51ec5b 100644 --- a/drivers/pcmcia/pxa2xx_palmtx.c +++ b/drivers/pcmcia/pxa2xx_palmtx.c | |||
@@ -23,12 +23,57 @@ | |||
23 | 23 | ||
24 | static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | 24 | static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt) |
25 | { | 25 | { |
26 | skt->irq = IRQ_GPIO(GPIO_NR_PALMTX_PCMCIA_READY); | 26 | int ret; |
27 | |||
28 | ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER1, "PCMCIA PWR1"); | ||
29 | if (ret) | ||
30 | goto err1; | ||
31 | ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER1, 0); | ||
32 | if (ret) | ||
33 | goto err2; | ||
34 | |||
35 | ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER2, "PCMCIA PWR2"); | ||
36 | if (ret) | ||
37 | goto err2; | ||
38 | ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER2, 0); | ||
39 | if (ret) | ||
40 | goto err3; | ||
41 | |||
42 | ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_RESET, "PCMCIA RST"); | ||
43 | if (ret) | ||
44 | goto err3; | ||
45 | ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_RESET, 1); | ||
46 | if (ret) | ||
47 | goto err4; | ||
48 | |||
49 | ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_READY, "PCMCIA RDY"); | ||
50 | if (ret) | ||
51 | goto err4; | ||
52 | ret = gpio_direction_input(GPIO_NR_PALMTX_PCMCIA_READY); | ||
53 | if (ret) | ||
54 | goto err5; | ||
55 | |||
56 | skt->irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY); | ||
27 | return 0; | 57 | return 0; |
58 | |||
59 | err5: | ||
60 | gpio_free(GPIO_NR_PALMTX_PCMCIA_READY); | ||
61 | err4: | ||
62 | gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET); | ||
63 | err3: | ||
64 | gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2); | ||
65 | err2: | ||
66 | gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1); | ||
67 | err1: | ||
68 | return ret; | ||
28 | } | 69 | } |
29 | 70 | ||
30 | static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) | 71 | static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) |
31 | { | 72 | { |
73 | gpio_free(GPIO_NR_PALMTX_PCMCIA_READY); | ||
74 | gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET); | ||
75 | gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2); | ||
76 | gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1); | ||
32 | } | 77 | } |
33 | 78 | ||
34 | static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt, | 79 | static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt, |
@@ -109,7 +154,7 @@ static void __exit palmtx_pcmcia_exit(void) | |||
109 | platform_device_unregister(palmtx_pcmcia_device); | 154 | platform_device_unregister(palmtx_pcmcia_device); |
110 | } | 155 | } |
111 | 156 | ||
112 | fs_initcall(palmtx_pcmcia_init); | 157 | module_init(palmtx_pcmcia_init); |
113 | module_exit(palmtx_pcmcia_exit); | 158 | module_exit(palmtx_pcmcia_exit); |
114 | 159 | ||
115 | MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); | 160 | MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 90ab73825401..9a9755c92fad 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -561,7 +561,7 @@ config RTC_DRV_AT91SAM9_GPBR | |||
561 | 561 | ||
562 | config RTC_DRV_BFIN | 562 | config RTC_DRV_BFIN |
563 | tristate "Blackfin On-Chip RTC" | 563 | tristate "Blackfin On-Chip RTC" |
564 | depends on BLACKFIN | 564 | depends on BLACKFIN && !BF561 |
565 | help | 565 | help |
566 | If you say yes here you will get support for the | 566 | If you say yes here you will get support for the |
567 | Blackfin On-Chip Real Time Clock. | 567 | Blackfin On-Chip Real Time Clock. |
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index a1af4c27939b..34439ce3967e 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c | |||
@@ -218,26 +218,6 @@ static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id) | |||
218 | return IRQ_NONE; | 218 | return IRQ_NONE; |
219 | } | 219 | } |
220 | 220 | ||
221 | static int bfin_rtc_open(struct device *dev) | ||
222 | { | ||
223 | int ret; | ||
224 | |||
225 | dev_dbg_stamp(dev); | ||
226 | |||
227 | ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, to_platform_device(dev)->name, dev); | ||
228 | if (!ret) | ||
229 | bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE); | ||
230 | |||
231 | return ret; | ||
232 | } | ||
233 | |||
234 | static void bfin_rtc_release(struct device *dev) | ||
235 | { | ||
236 | dev_dbg_stamp(dev); | ||
237 | bfin_rtc_reset(dev, 0); | ||
238 | free_irq(IRQ_RTC, dev); | ||
239 | } | ||
240 | |||
241 | static void bfin_rtc_int_set(u16 rtc_int) | 221 | static void bfin_rtc_int_set(u16 rtc_int) |
242 | { | 222 | { |
243 | bfin_write_RTC_ISTAT(rtc_int); | 223 | bfin_write_RTC_ISTAT(rtc_int); |
@@ -370,8 +350,6 @@ static int bfin_rtc_proc(struct device *dev, struct seq_file *seq) | |||
370 | } | 350 | } |
371 | 351 | ||
372 | static struct rtc_class_ops bfin_rtc_ops = { | 352 | static struct rtc_class_ops bfin_rtc_ops = { |
373 | .open = bfin_rtc_open, | ||
374 | .release = bfin_rtc_release, | ||
375 | .ioctl = bfin_rtc_ioctl, | 353 | .ioctl = bfin_rtc_ioctl, |
376 | .read_time = bfin_rtc_read_time, | 354 | .read_time = bfin_rtc_read_time, |
377 | .set_time = bfin_rtc_set_time, | 355 | .set_time = bfin_rtc_set_time, |
@@ -383,29 +361,44 @@ static struct rtc_class_ops bfin_rtc_ops = { | |||
383 | static int __devinit bfin_rtc_probe(struct platform_device *pdev) | 361 | static int __devinit bfin_rtc_probe(struct platform_device *pdev) |
384 | { | 362 | { |
385 | struct bfin_rtc *rtc; | 363 | struct bfin_rtc *rtc; |
364 | struct device *dev = &pdev->dev; | ||
386 | int ret = 0; | 365 | int ret = 0; |
366 | unsigned long timeout; | ||
387 | 367 | ||
388 | dev_dbg_stamp(&pdev->dev); | 368 | dev_dbg_stamp(dev); |
389 | 369 | ||
370 | /* Allocate memory for our RTC struct */ | ||
390 | rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); | 371 | rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); |
391 | if (unlikely(!rtc)) | 372 | if (unlikely(!rtc)) |
392 | return -ENOMEM; | 373 | return -ENOMEM; |
374 | platform_set_drvdata(pdev, rtc); | ||
375 | device_init_wakeup(dev, 1); | ||
393 | 376 | ||
394 | rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE); | 377 | /* Grab the IRQ and init the hardware */ |
395 | if (IS_ERR(rtc)) { | 378 | ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev); |
396 | ret = PTR_ERR(rtc->rtc_dev); | 379 | if (unlikely(ret)) |
397 | goto err; | 380 | goto err; |
398 | } | 381 | /* sometimes the bootloader touched things, but the write complete was not |
399 | 382 | * enabled, so let's just do a quick timeout here since the IRQ will not fire ... | |
400 | /* see comment at top of file about stopwatch/PIE */ | 383 | */ |
384 | timeout = jiffies + HZ; | ||
385 | while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING) | ||
386 | if (time_after(jiffies, timeout)) | ||
387 | break; | ||
388 | bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE); | ||
401 | bfin_write_RTC_SWCNT(0); | 389 | bfin_write_RTC_SWCNT(0); |
402 | 390 | ||
403 | platform_set_drvdata(pdev, rtc); | 391 | /* Register our RTC with the RTC framework */ |
404 | 392 | rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, THIS_MODULE); | |
405 | device_init_wakeup(&pdev->dev, 1); | 393 | if (unlikely(IS_ERR(rtc))) { |
394 | ret = PTR_ERR(rtc->rtc_dev); | ||
395 | goto err_irq; | ||
396 | } | ||
406 | 397 | ||
407 | return 0; | 398 | return 0; |
408 | 399 | ||
400 | err_irq: | ||
401 | free_irq(IRQ_RTC, dev); | ||
409 | err: | 402 | err: |
410 | kfree(rtc); | 403 | kfree(rtc); |
411 | return ret; | 404 | return ret; |
@@ -414,7 +407,10 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev) | |||
414 | static int __devexit bfin_rtc_remove(struct platform_device *pdev) | 407 | static int __devexit bfin_rtc_remove(struct platform_device *pdev) |
415 | { | 408 | { |
416 | struct bfin_rtc *rtc = platform_get_drvdata(pdev); | 409 | struct bfin_rtc *rtc = platform_get_drvdata(pdev); |
410 | struct device *dev = &pdev->dev; | ||
417 | 411 | ||
412 | bfin_rtc_reset(dev, 0); | ||
413 | free_irq(IRQ_RTC, dev); | ||
418 | rtc_device_unregister(rtc->rtc_dev); | 414 | rtc_device_unregister(rtc->rtc_dev); |
419 | platform_set_drvdata(pdev, NULL); | 415 | platform_set_drvdata(pdev, NULL); |
420 | kfree(rtc); | 416 | kfree(rtc); |
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 856cc1af40df..f118252f3a9f 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -13,7 +13,6 @@ | |||
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/rtc.h> | 15 | #include <linux/rtc.h> |
16 | #include <linux/smp_lock.h> | ||
17 | #include "rtc-core.h" | 16 | #include "rtc-core.h" |
18 | 17 | ||
19 | static dev_t rtc_devt; | 18 | static dev_t rtc_devt; |
@@ -27,11 +26,8 @@ static int rtc_dev_open(struct inode *inode, struct file *file) | |||
27 | struct rtc_device, char_dev); | 26 | struct rtc_device, char_dev); |
28 | const struct rtc_class_ops *ops = rtc->ops; | 27 | const struct rtc_class_ops *ops = rtc->ops; |
29 | 28 | ||
30 | lock_kernel(); | 29 | if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) |
31 | if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) { | 30 | return -EBUSY; |
32 | err = -EBUSY; | ||
33 | goto out; | ||
34 | } | ||
35 | 31 | ||
36 | file->private_data = rtc; | 32 | file->private_data = rtc; |
37 | 33 | ||
@@ -41,13 +37,11 @@ static int rtc_dev_open(struct inode *inode, struct file *file) | |||
41 | rtc->irq_data = 0; | 37 | rtc->irq_data = 0; |
42 | spin_unlock_irq(&rtc->irq_lock); | 38 | spin_unlock_irq(&rtc->irq_lock); |
43 | 39 | ||
44 | goto out; | 40 | return 0; |
45 | } | 41 | } |
46 | 42 | ||
47 | /* something has gone wrong */ | 43 | /* something has gone wrong */ |
48 | clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); | 44 | clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); |
49 | out: | ||
50 | unlock_kernel(); | ||
51 | return err; | 45 | return err; |
52 | } | 46 | } |
53 | 47 | ||
@@ -409,11 +403,14 @@ static long rtc_dev_ioctl(struct file *file, | |||
409 | 403 | ||
410 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | 404 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL |
411 | case RTC_UIE_OFF: | 405 | case RTC_UIE_OFF: |
406 | mutex_unlock(&rtc->ops_lock); | ||
412 | clear_uie(rtc); | 407 | clear_uie(rtc); |
413 | break; | 408 | return 0; |
414 | 409 | ||
415 | case RTC_UIE_ON: | 410 | case RTC_UIE_ON: |
411 | mutex_unlock(&rtc->ops_lock); | ||
416 | err = set_uie(rtc); | 412 | err = set_uie(rtc); |
413 | return err; | ||
417 | #endif | 414 | #endif |
418 | default: | 415 | default: |
419 | err = -ENOTTY; | 416 | err = -ENOTTY; |
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 640acd20fdde..a150418fba76 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c | |||
@@ -173,7 +173,7 @@ static int ds1374_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) | |||
173 | int cr, sr; | 173 | int cr, sr; |
174 | int ret = 0; | 174 | int ret = 0; |
175 | 175 | ||
176 | if (client->irq < 0) | 176 | if (client->irq <= 0) |
177 | return -EINVAL; | 177 | return -EINVAL; |
178 | 178 | ||
179 | mutex_lock(&ds1374->mutex); | 179 | mutex_lock(&ds1374->mutex); |
@@ -212,7 +212,7 @@ static int ds1374_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) | |||
212 | int cr; | 212 | int cr; |
213 | int ret = 0; | 213 | int ret = 0; |
214 | 214 | ||
215 | if (client->irq < 0) | 215 | if (client->irq <= 0) |
216 | return -EINVAL; | 216 | return -EINVAL; |
217 | 217 | ||
218 | ret = ds1374_read_time(dev, &now); | 218 | ret = ds1374_read_time(dev, &now); |
@@ -381,7 +381,7 @@ static int ds1374_probe(struct i2c_client *client, | |||
381 | if (ret) | 381 | if (ret) |
382 | goto out_free; | 382 | goto out_free; |
383 | 383 | ||
384 | if (client->irq >= 0) { | 384 | if (client->irq > 0) { |
385 | ret = request_irq(client->irq, ds1374_irq, 0, | 385 | ret = request_irq(client->irq, ds1374_irq, 0, |
386 | "ds1374", client); | 386 | "ds1374", client); |
387 | if (ret) { | 387 | if (ret) { |
@@ -401,7 +401,7 @@ static int ds1374_probe(struct i2c_client *client, | |||
401 | return 0; | 401 | return 0; |
402 | 402 | ||
403 | out_irq: | 403 | out_irq: |
404 | if (client->irq >= 0) | 404 | if (client->irq > 0) |
405 | free_irq(client->irq, client); | 405 | free_irq(client->irq, client); |
406 | 406 | ||
407 | out_free: | 407 | out_free: |
@@ -414,7 +414,7 @@ static int __devexit ds1374_remove(struct i2c_client *client) | |||
414 | { | 414 | { |
415 | struct ds1374 *ds1374 = i2c_get_clientdata(client); | 415 | struct ds1374 *ds1374 = i2c_get_clientdata(client); |
416 | 416 | ||
417 | if (client->irq >= 0) { | 417 | if (client->irq > 0) { |
418 | mutex_lock(&ds1374->mutex); | 418 | mutex_lock(&ds1374->mutex); |
419 | ds1374->exiting = 1; | 419 | ds1374->exiting = 1; |
420 | mutex_unlock(&ds1374->mutex); | 420 | mutex_unlock(&ds1374->mutex); |
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index fbb90b1e4098..a81adab6e515 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c | |||
@@ -482,7 +482,7 @@ isl1208_sysfs_register(struct device *dev) | |||
482 | static int | 482 | static int |
483 | isl1208_sysfs_unregister(struct device *dev) | 483 | isl1208_sysfs_unregister(struct device *dev) |
484 | { | 484 | { |
485 | device_remove_file(dev, &dev_attr_atrim); | 485 | device_remove_file(dev, &dev_attr_dtrim); |
486 | device_remove_file(dev, &dev_attr_atrim); | 486 | device_remove_file(dev, &dev_attr_atrim); |
487 | device_remove_file(dev, &dev_attr_usr); | 487 | device_remove_file(dev, &dev_attr_usr); |
488 | 488 | ||
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c index 12f0310ae89c..78b2551fb19d 100644 --- a/drivers/rtc/rtc-max6902.c +++ b/drivers/rtc/rtc-max6902.c | |||
@@ -20,8 +20,6 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/version.h> | ||
24 | |||
25 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
26 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
27 | #include <linux/init.h> | 25 | #include <linux/init.h> |
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c index b35f9bfa2af4..395985b339c9 100644 --- a/drivers/rtc/rtc-r9701.c +++ b/drivers/rtc/rtc-r9701.c | |||
@@ -14,7 +14,6 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/version.h> | ||
18 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
19 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
20 | #include <linux/device.h> | 19 | #include <linux/device.h> |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 1b6c52ef7339..acb78017e7d0 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -2333,13 +2333,11 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) | |||
2333 | { | 2333 | { |
2334 | struct dasd_device *device; | 2334 | struct dasd_device *device; |
2335 | struct dasd_ccw_req *cqr; | 2335 | struct dasd_ccw_req *cqr; |
2336 | unsigned long flags; | ||
2337 | int ret; | 2336 | int ret; |
2338 | 2337 | ||
2339 | device = dasd_device_from_cdev(cdev); | 2338 | device = dasd_device_from_cdev_locked(cdev); |
2340 | if (IS_ERR(device)) | 2339 | if (IS_ERR(device)) |
2341 | return 0; | 2340 | return 0; |
2342 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | ||
2343 | ret = 0; | 2341 | ret = 0; |
2344 | switch (event) { | 2342 | switch (event) { |
2345 | case CIO_GONE: | 2343 | case CIO_GONE: |
@@ -2369,7 +2367,6 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) | |||
2369 | ret = 1; | 2367 | ret = 1; |
2370 | break; | 2368 | break; |
2371 | } | 2369 | } |
2372 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | ||
2373 | dasd_put_device(device); | 2370 | dasd_put_device(device); |
2374 | return ret; | 2371 | return ret; |
2375 | } | 2372 | } |
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index 4bf0aa5112c1..2476f87d21d0 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h | |||
@@ -308,7 +308,7 @@ struct dasd_psf_prssd_data { | |||
308 | unsigned char flags; | 308 | unsigned char flags; |
309 | unsigned char reserved[4]; | 309 | unsigned char reserved[4]; |
310 | unsigned char suborder; | 310 | unsigned char suborder; |
311 | unsigned char varies[9]; | 311 | unsigned char varies[5]; |
312 | } __attribute__ ((packed)); | 312 | } __attribute__ ((packed)); |
313 | 313 | ||
314 | /* | 314 | /* |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 29da4413ad43..bf512ac75b9e 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/poll.h> | 16 | #include <linux/poll.h> |
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/smp_lock.h> | 18 | #include <linux/smp_lock.h> |
19 | #include <linux/err.h> | ||
19 | 20 | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/atomic.h> | 22 | #include <asm/atomic.h> |
@@ -457,7 +458,7 @@ int dasd_eer_enable(struct dasd_device *device) | |||
457 | 458 | ||
458 | cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */, | 459 | cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */, |
459 | SNSS_DATA_SIZE, device); | 460 | SNSS_DATA_SIZE, device); |
460 | if (!cqr) | 461 | if (IS_ERR(cqr)) |
461 | return -ENOMEM; | 462 | return -ENOMEM; |
462 | 463 | ||
463 | cqr->startdev = device; | 464 | cqr->startdev = device; |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 01fcdd91b846..711b3004b3e6 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -384,6 +384,11 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
384 | * get minor, add to list | 384 | * get minor, add to list |
385 | */ | 385 | */ |
386 | down_write(&dcssblk_devices_sem); | 386 | down_write(&dcssblk_devices_sem); |
387 | if (dcssblk_get_device_by_name(local_buf)) { | ||
388 | up_write(&dcssblk_devices_sem); | ||
389 | rc = -EEXIST; | ||
390 | goto unload_seg; | ||
391 | } | ||
387 | rc = dcssblk_assign_free_minor(dev_info); | 392 | rc = dcssblk_assign_free_minor(dev_info); |
388 | if (rc) { | 393 | if (rc) { |
389 | up_write(&dcssblk_devices_sem); | 394 | up_write(&dcssblk_devices_sem); |
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 687720b552d1..be0ce2215c8d 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c | |||
@@ -109,7 +109,7 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) | |||
109 | 109 | ||
110 | /* The current idal buffer is not correct. Allocate a new one. */ | 110 | /* The current idal buffer is not correct. Allocate a new one. */ |
111 | new = idal_buffer_alloc(block_size, 0); | 111 | new = idal_buffer_alloc(block_size, 0); |
112 | if (new == NULL) | 112 | if (IS_ERR(new)) |
113 | return -ENOMEM; | 113 | return -ENOMEM; |
114 | 114 | ||
115 | if (device->char_data.idal_buf != NULL) | 115 | if (device->char_data.idal_buf != NULL) |
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c index 2a1af4e60be0..cc8fd781ee22 100644 --- a/drivers/s390/char/tape_std.c +++ b/drivers/s390/char/tape_std.c | |||
@@ -248,7 +248,7 @@ tape_std_mtsetblk(struct tape_device *device, int count) | |||
248 | 248 | ||
249 | /* Allocate a new idal buffer. */ | 249 | /* Allocate a new idal buffer. */ |
250 | new = idal_buffer_alloc(count, 0); | 250 | new = idal_buffer_alloc(count, 0); |
251 | if (new == NULL) | 251 | if (IS_ERR(new)) |
252 | return -ENOMEM; | 252 | return -ENOMEM; |
253 | if (device->char_data.idal_buf != NULL) | 253 | if (device->char_data.idal_buf != NULL) |
254 | idal_buffer_free(device->char_data.idal_buf); | 254 | idal_buffer_free(device->char_data.idal_buf); |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 26a930e832bd..e0ce65fca4e7 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -112,8 +112,10 @@ ccwgroup_release (struct device *dev) | |||
112 | gdev = to_ccwgroupdev(dev); | 112 | gdev = to_ccwgroupdev(dev); |
113 | 113 | ||
114 | for (i = 0; i < gdev->count; i++) { | 114 | for (i = 0; i < gdev->count; i++) { |
115 | dev_set_drvdata(&gdev->cdev[i]->dev, NULL); | 115 | if (gdev->cdev[i]) { |
116 | put_device(&gdev->cdev[i]->dev); | 116 | dev_set_drvdata(&gdev->cdev[i]->dev, NULL); |
117 | put_device(&gdev->cdev[i]->dev); | ||
118 | } | ||
117 | } | 119 | } |
118 | kfree(gdev); | 120 | kfree(gdev); |
119 | } | 121 | } |
@@ -221,6 +223,13 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, | |||
221 | atomic_set(&gdev->onoff, 0); | 223 | atomic_set(&gdev->onoff, 0); |
222 | mutex_init(&gdev->reg_mutex); | 224 | mutex_init(&gdev->reg_mutex); |
223 | mutex_lock(&gdev->reg_mutex); | 225 | mutex_lock(&gdev->reg_mutex); |
226 | gdev->creator_id = creator_id; | ||
227 | gdev->count = num_devices; | ||
228 | gdev->dev.bus = &ccwgroup_bus_type; | ||
229 | gdev->dev.parent = root; | ||
230 | gdev->dev.release = ccwgroup_release; | ||
231 | device_initialize(&gdev->dev); | ||
232 | |||
224 | curr_buf = buf; | 233 | curr_buf = buf; |
225 | for (i = 0; i < num_devices && curr_buf; i++) { | 234 | for (i = 0; i < num_devices && curr_buf; i++) { |
226 | rc = __get_next_bus_id(&curr_buf, tmp_bus_id); | 235 | rc = __get_next_bus_id(&curr_buf, tmp_bus_id); |
@@ -258,16 +267,11 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, | |||
258 | rc = -EINVAL; | 267 | rc = -EINVAL; |
259 | goto error; | 268 | goto error; |
260 | } | 269 | } |
261 | gdev->creator_id = creator_id; | ||
262 | gdev->count = num_devices; | ||
263 | gdev->dev.bus = &ccwgroup_bus_type; | ||
264 | gdev->dev.parent = root; | ||
265 | gdev->dev.release = ccwgroup_release; | ||
266 | 270 | ||
267 | snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s", | 271 | snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s", |
268 | gdev->cdev[0]->dev.bus_id); | 272 | gdev->cdev[0]->dev.bus_id); |
269 | 273 | ||
270 | rc = device_register(&gdev->dev); | 274 | rc = device_add(&gdev->dev); |
271 | if (rc) | 275 | if (rc) |
272 | goto error; | 276 | goto error; |
273 | get_device(&gdev->dev); | 277 | get_device(&gdev->dev); |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 46c021d880dc..51489eff6b0b 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -477,7 +477,6 @@ void css_schedule_eval_all(void) | |||
477 | 477 | ||
478 | void css_wait_for_slow_path(void) | 478 | void css_wait_for_slow_path(void) |
479 | { | 479 | { |
480 | flush_workqueue(ccw_device_notify_work); | ||
481 | flush_workqueue(slow_path_wq); | 480 | flush_workqueue(slow_path_wq); |
482 | } | 481 | } |
483 | 482 | ||
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e818d0c54c09..28221030b886 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -150,7 +150,6 @@ static struct css_driver io_subchannel_driver = { | |||
150 | }; | 150 | }; |
151 | 151 | ||
152 | struct workqueue_struct *ccw_device_work; | 152 | struct workqueue_struct *ccw_device_work; |
153 | struct workqueue_struct *ccw_device_notify_work; | ||
154 | wait_queue_head_t ccw_device_init_wq; | 153 | wait_queue_head_t ccw_device_init_wq; |
155 | atomic_t ccw_device_init_count; | 154 | atomic_t ccw_device_init_count; |
156 | 155 | ||
@@ -168,11 +167,6 @@ init_ccw_bus_type (void) | |||
168 | ccw_device_work = create_singlethread_workqueue("cio"); | 167 | ccw_device_work = create_singlethread_workqueue("cio"); |
169 | if (!ccw_device_work) | 168 | if (!ccw_device_work) |
170 | return -ENOMEM; /* FIXME: better errno ? */ | 169 | return -ENOMEM; /* FIXME: better errno ? */ |
171 | ccw_device_notify_work = create_singlethread_workqueue("cio_notify"); | ||
172 | if (!ccw_device_notify_work) { | ||
173 | ret = -ENOMEM; /* FIXME: better errno ? */ | ||
174 | goto out_err; | ||
175 | } | ||
176 | slow_path_wq = create_singlethread_workqueue("kslowcrw"); | 170 | slow_path_wq = create_singlethread_workqueue("kslowcrw"); |
177 | if (!slow_path_wq) { | 171 | if (!slow_path_wq) { |
178 | ret = -ENOMEM; /* FIXME: better errno ? */ | 172 | ret = -ENOMEM; /* FIXME: better errno ? */ |
@@ -192,8 +186,6 @@ init_ccw_bus_type (void) | |||
192 | out_err: | 186 | out_err: |
193 | if (ccw_device_work) | 187 | if (ccw_device_work) |
194 | destroy_workqueue(ccw_device_work); | 188 | destroy_workqueue(ccw_device_work); |
195 | if (ccw_device_notify_work) | ||
196 | destroy_workqueue(ccw_device_notify_work); | ||
197 | if (slow_path_wq) | 189 | if (slow_path_wq) |
198 | destroy_workqueue(slow_path_wq); | 190 | destroy_workqueue(slow_path_wq); |
199 | return ret; | 191 | return ret; |
@@ -204,7 +196,6 @@ cleanup_ccw_bus_type (void) | |||
204 | { | 196 | { |
205 | css_driver_unregister(&io_subchannel_driver); | 197 | css_driver_unregister(&io_subchannel_driver); |
206 | bus_unregister(&ccw_bus_type); | 198 | bus_unregister(&ccw_bus_type); |
207 | destroy_workqueue(ccw_device_notify_work); | ||
208 | destroy_workqueue(ccw_device_work); | 199 | destroy_workqueue(ccw_device_work); |
209 | } | 200 | } |
210 | 201 | ||
@@ -1496,11 +1487,22 @@ static void device_set_disconnected(struct ccw_device *cdev) | |||
1496 | ccw_device_schedule_recovery(); | 1487 | ccw_device_schedule_recovery(); |
1497 | } | 1488 | } |
1498 | 1489 | ||
1490 | void ccw_device_set_notoper(struct ccw_device *cdev) | ||
1491 | { | ||
1492 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
1493 | |||
1494 | CIO_TRACE_EVENT(2, "notoper"); | ||
1495 | CIO_TRACE_EVENT(2, sch->dev.bus_id); | ||
1496 | ccw_device_set_timeout(cdev, 0); | ||
1497 | cio_disable_subchannel(sch); | ||
1498 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
1499 | } | ||
1500 | |||
1499 | static int io_subchannel_sch_event(struct subchannel *sch, int slow) | 1501 | static int io_subchannel_sch_event(struct subchannel *sch, int slow) |
1500 | { | 1502 | { |
1501 | int event, ret, disc; | 1503 | int event, ret, disc; |
1502 | unsigned long flags; | 1504 | unsigned long flags; |
1503 | enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; | 1505 | enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE, DISC } action; |
1504 | struct ccw_device *cdev; | 1506 | struct ccw_device *cdev; |
1505 | 1507 | ||
1506 | spin_lock_irqsave(sch->lock, flags); | 1508 | spin_lock_irqsave(sch->lock, flags); |
@@ -1535,16 +1537,11 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow) | |||
1535 | } | 1537 | } |
1536 | /* fall through */ | 1538 | /* fall through */ |
1537 | case CIO_GONE: | 1539 | case CIO_GONE: |
1538 | /* Prevent unwanted effects when opening lock. */ | ||
1539 | cio_disable_subchannel(sch); | ||
1540 | device_set_disconnected(cdev); | ||
1541 | /* Ask driver what to do with device. */ | 1540 | /* Ask driver what to do with device. */ |
1542 | action = UNREGISTER; | 1541 | if (io_subchannel_notify(sch, event)) |
1543 | spin_unlock_irqrestore(sch->lock, flags); | 1542 | action = DISC; |
1544 | ret = io_subchannel_notify(sch, event); | 1543 | else |
1545 | spin_lock_irqsave(sch->lock, flags); | 1544 | action = UNREGISTER; |
1546 | if (ret) | ||
1547 | action = NONE; | ||
1548 | break; | 1545 | break; |
1549 | case CIO_REVALIDATE: | 1546 | case CIO_REVALIDATE: |
1550 | /* Device will be removed, so no notify necessary. */ | 1547 | /* Device will be removed, so no notify necessary. */ |
@@ -1565,6 +1562,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow) | |||
1565 | switch (action) { | 1562 | switch (action) { |
1566 | case UNREGISTER: | 1563 | case UNREGISTER: |
1567 | case UNREGISTER_PROBE: | 1564 | case UNREGISTER_PROBE: |
1565 | ccw_device_set_notoper(cdev); | ||
1568 | /* Unregister device (will use subchannel lock). */ | 1566 | /* Unregister device (will use subchannel lock). */ |
1569 | spin_unlock_irqrestore(sch->lock, flags); | 1567 | spin_unlock_irqrestore(sch->lock, flags); |
1570 | css_sch_device_unregister(sch); | 1568 | css_sch_device_unregister(sch); |
@@ -1577,6 +1575,9 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow) | |||
1577 | case REPROBE: | 1575 | case REPROBE: |
1578 | ccw_device_trigger_reprobe(cdev); | 1576 | ccw_device_trigger_reprobe(cdev); |
1579 | break; | 1577 | break; |
1578 | case DISC: | ||
1579 | device_set_disconnected(cdev); | ||
1580 | break; | ||
1580 | default: | 1581 | default: |
1581 | break; | 1582 | break; |
1582 | } | 1583 | } |
@@ -1828,5 +1829,4 @@ EXPORT_SYMBOL(ccw_driver_unregister); | |||
1828 | EXPORT_SYMBOL(get_ccwdev_by_busid); | 1829 | EXPORT_SYMBOL(get_ccwdev_by_busid); |
1829 | EXPORT_SYMBOL(ccw_bus_type); | 1830 | EXPORT_SYMBOL(ccw_bus_type); |
1830 | EXPORT_SYMBOL(ccw_device_work); | 1831 | EXPORT_SYMBOL(ccw_device_work); |
1831 | EXPORT_SYMBOL(ccw_device_notify_work); | ||
1832 | EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); | 1832 | EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 9800a8335a3f..6f5c3f2b3587 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -72,7 +72,6 @@ dev_fsm_final_state(struct ccw_device *cdev) | |||
72 | } | 72 | } |
73 | 73 | ||
74 | extern struct workqueue_struct *ccw_device_work; | 74 | extern struct workqueue_struct *ccw_device_work; |
75 | extern struct workqueue_struct *ccw_device_notify_work; | ||
76 | extern wait_queue_head_t ccw_device_init_wq; | 75 | extern wait_queue_head_t ccw_device_init_wq; |
77 | extern atomic_t ccw_device_init_count; | 76 | extern atomic_t ccw_device_init_count; |
78 | 77 | ||
@@ -120,6 +119,7 @@ int ccw_device_stlck(struct ccw_device *); | |||
120 | void ccw_device_trigger_reprobe(struct ccw_device *); | 119 | void ccw_device_trigger_reprobe(struct ccw_device *); |
121 | void ccw_device_kill_io(struct ccw_device *); | 120 | void ccw_device_kill_io(struct ccw_device *); |
122 | int ccw_device_notify(struct ccw_device *, int); | 121 | int ccw_device_notify(struct ccw_device *, int); |
122 | void ccw_device_set_notoper(struct ccw_device *cdev); | ||
123 | 123 | ||
124 | /* qdio needs this. */ | 124 | /* qdio needs this. */ |
125 | void ccw_device_set_timeout(struct ccw_device *, int); | 125 | void ccw_device_set_timeout(struct ccw_device *, int); |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 8b5fe57fb2f3..550508df952b 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -337,26 +337,34 @@ int ccw_device_notify(struct ccw_device *cdev, int event) | |||
337 | return 0; | 337 | return 0; |
338 | if (!cdev->online) | 338 | if (!cdev->online) |
339 | return 0; | 339 | return 0; |
340 | CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", | ||
341 | cdev->private->dev_id.ssid, cdev->private->dev_id.devno, | ||
342 | event); | ||
340 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; | 343 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; |
341 | } | 344 | } |
342 | 345 | ||
343 | static void | 346 | static void cmf_reenable_delayed(struct work_struct *work) |
344 | ccw_device_oper_notify(struct work_struct *work) | ||
345 | { | 347 | { |
346 | struct ccw_device_private *priv; | 348 | struct ccw_device_private *priv; |
347 | struct ccw_device *cdev; | 349 | struct ccw_device *cdev; |
348 | int ret; | ||
349 | 350 | ||
350 | priv = container_of(work, struct ccw_device_private, kick_work); | 351 | priv = container_of(work, struct ccw_device_private, kick_work); |
351 | cdev = priv->cdev; | 352 | cdev = priv->cdev; |
352 | ret = ccw_device_notify(cdev, CIO_OPER); | 353 | cmf_reenable(cdev); |
353 | if (ret) { | 354 | } |
355 | |||
356 | static void ccw_device_oper_notify(struct ccw_device *cdev) | ||
357 | { | ||
358 | if (ccw_device_notify(cdev, CIO_OPER)) { | ||
354 | /* Reenable channel measurements, if needed. */ | 359 | /* Reenable channel measurements, if needed. */ |
355 | cmf_reenable(cdev); | 360 | PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed); |
356 | wake_up(&cdev->private->wait_q); | 361 | queue_work(ccw_device_work, &cdev->private->kick_work); |
357 | } else | 362 | return; |
358 | /* Driver doesn't want device back. */ | 363 | } |
359 | ccw_device_do_unreg_rereg(work); | 364 | /* Driver doesn't want device back. */ |
365 | ccw_device_set_notoper(cdev); | ||
366 | PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unreg_rereg); | ||
367 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
360 | } | 368 | } |
361 | 369 | ||
362 | /* | 370 | /* |
@@ -386,8 +394,7 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
386 | 394 | ||
387 | if (cdev->private->flags.donotify) { | 395 | if (cdev->private->flags.donotify) { |
388 | cdev->private->flags.donotify = 0; | 396 | cdev->private->flags.donotify = 0; |
389 | PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify); | 397 | ccw_device_oper_notify(cdev); |
390 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | ||
391 | } | 398 | } |
392 | wake_up(&cdev->private->wait_q); | 399 | wake_up(&cdev->private->wait_q); |
393 | 400 | ||
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h index 8484b83698e1..5a4d85b829ad 100644 --- a/drivers/s390/cio/qdio_debug.h +++ b/drivers/s390/cio/qdio_debug.h | |||
@@ -61,18 +61,18 @@ | |||
61 | 61 | ||
62 | /* s390dbf views */ | 62 | /* s390dbf views */ |
63 | #define QDIO_DBF_SETUP_LEN 8 | 63 | #define QDIO_DBF_SETUP_LEN 8 |
64 | #define QDIO_DBF_SETUP_PAGES 4 | 64 | #define QDIO_DBF_SETUP_PAGES 8 |
65 | #define QDIO_DBF_SETUP_NR_AREAS 1 | 65 | #define QDIO_DBF_SETUP_NR_AREAS 1 |
66 | 66 | ||
67 | #define QDIO_DBF_TRACE_LEN 8 | 67 | #define QDIO_DBF_TRACE_LEN 8 |
68 | #define QDIO_DBF_TRACE_NR_AREAS 2 | 68 | #define QDIO_DBF_TRACE_NR_AREAS 2 |
69 | 69 | ||
70 | #ifdef CONFIG_QDIO_DEBUG | 70 | #ifdef CONFIG_QDIO_DEBUG |
71 | #define QDIO_DBF_TRACE_PAGES 16 | 71 | #define QDIO_DBF_TRACE_PAGES 32 |
72 | #define QDIO_DBF_SETUP_LEVEL 6 | 72 | #define QDIO_DBF_SETUP_LEVEL 6 |
73 | #define QDIO_DBF_TRACE_LEVEL 4 | 73 | #define QDIO_DBF_TRACE_LEVEL 4 |
74 | #else /* !CONFIG_QDIO_DEBUG */ | 74 | #else /* !CONFIG_QDIO_DEBUG */ |
75 | #define QDIO_DBF_TRACE_PAGES 4 | 75 | #define QDIO_DBF_TRACE_PAGES 8 |
76 | #define QDIO_DBF_SETUP_LEVEL 2 | 76 | #define QDIO_DBF_SETUP_LEVEL 2 |
77 | #define QDIO_DBF_TRACE_LEVEL 2 | 77 | #define QDIO_DBF_TRACE_LEVEL 2 |
78 | #endif /* CONFIG_QDIO_DEBUG */ | 78 | #endif /* CONFIG_QDIO_DEBUG */ |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index d15648514a0f..e6eabc853422 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -330,6 +330,7 @@ static int qdio_siga_output(struct qdio_q *q) | |||
330 | int cc; | 330 | int cc; |
331 | u32 busy_bit; | 331 | u32 busy_bit; |
332 | u64 start_time = 0; | 332 | u64 start_time = 0; |
333 | char dbf_text[15]; | ||
333 | 334 | ||
334 | QDIO_DBF_TEXT5(0, trace, "sigaout"); | 335 | QDIO_DBF_TEXT5(0, trace, "sigaout"); |
335 | QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); | 336 | QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); |
@@ -338,6 +339,9 @@ static int qdio_siga_output(struct qdio_q *q) | |||
338 | again: | 339 | again: |
339 | cc = qdio_do_siga_output(q, &busy_bit); | 340 | cc = qdio_do_siga_output(q, &busy_bit); |
340 | if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) { | 341 | if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) { |
342 | sprintf(dbf_text, "bb%4x%2x", q->irq_ptr->schid.sch_no, q->nr); | ||
343 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
344 | |||
341 | if (!start_time) | 345 | if (!start_time) |
342 | start_time = get_usecs(); | 346 | start_time = get_usecs(); |
343 | else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) | 347 | else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) |
@@ -748,16 +752,18 @@ static void qdio_kick_outbound_q(struct qdio_q *q) | |||
748 | rc = qdio_siga_output(q); | 752 | rc = qdio_siga_output(q); |
749 | switch (rc) { | 753 | switch (rc) { |
750 | case 0: | 754 | case 0: |
751 | /* went smooth this time, reset timestamp */ | ||
752 | q->u.out.timestamp = 0; | ||
753 | |||
754 | /* TODO: improve error handling for CC=0 case */ | 755 | /* TODO: improve error handling for CC=0 case */ |
755 | #ifdef CONFIG_QDIO_DEBUG | 756 | #ifdef CONFIG_QDIO_DEBUG |
756 | QDIO_DBF_TEXT3(0, trace, "cc2reslv"); | 757 | if (q->u.out.timestamp) { |
757 | sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, | 758 | QDIO_DBF_TEXT3(0, trace, "cc2reslv"); |
758 | atomic_read(&q->u.out.busy_siga_counter)); | 759 | sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, |
759 | QDIO_DBF_TEXT3(0, trace, dbf_text); | 760 | q->nr, |
761 | atomic_read(&q->u.out.busy_siga_counter)); | ||
762 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
763 | } | ||
760 | #endif /* CONFIG_QDIO_DEBUG */ | 764 | #endif /* CONFIG_QDIO_DEBUG */ |
765 | /* went smooth this time, reset timestamp */ | ||
766 | q->u.out.timestamp = 0; | ||
761 | break; | 767 | break; |
762 | /* cc=2 and busy bit */ | 768 | /* cc=2 and busy bit */ |
763 | case (2 | QDIO_ERROR_SIGA_BUSY): | 769 | case (2 | QDIO_ERROR_SIGA_BUSY): |
@@ -1066,14 +1072,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1066 | if (IS_ERR(irb)) { | 1072 | if (IS_ERR(irb)) { |
1067 | switch (PTR_ERR(irb)) { | 1073 | switch (PTR_ERR(irb)) { |
1068 | case -EIO: | 1074 | case -EIO: |
1069 | sprintf(dbf_text, "ierr%4x", | 1075 | sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no); |
1070 | cdev->private->schid.sch_no); | ||
1071 | QDIO_DBF_TEXT2(1, setup, dbf_text); | 1076 | QDIO_DBF_TEXT2(1, setup, dbf_text); |
1072 | qdio_int_error(cdev); | 1077 | qdio_int_error(cdev); |
1073 | return; | 1078 | return; |
1074 | case -ETIMEDOUT: | 1079 | case -ETIMEDOUT: |
1075 | sprintf(dbf_text, "qtoh%4x", | 1080 | sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no); |
1076 | cdev->private->schid.sch_no); | ||
1077 | QDIO_DBF_TEXT2(1, setup, dbf_text); | 1081 | QDIO_DBF_TEXT2(1, setup, dbf_text); |
1078 | qdio_int_error(cdev); | 1082 | qdio_int_error(cdev); |
1079 | return; | 1083 | return; |
@@ -1124,8 +1128,10 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1124 | struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev) | 1128 | struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev) |
1125 | { | 1129 | { |
1126 | struct qdio_irq *irq_ptr; | 1130 | struct qdio_irq *irq_ptr; |
1131 | char dbf_text[15]; | ||
1127 | 1132 | ||
1128 | QDIO_DBF_TEXT0(0, setup, "getssqd"); | 1133 | sprintf(dbf_text, "qssq%4x", cdev->private->schid.sch_no); |
1134 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1129 | 1135 | ||
1130 | irq_ptr = cdev->private->qdio_data; | 1136 | irq_ptr = cdev->private->qdio_data; |
1131 | if (!irq_ptr) | 1137 | if (!irq_ptr) |
@@ -1149,14 +1155,13 @@ int qdio_cleanup(struct ccw_device *cdev, int how) | |||
1149 | char dbf_text[15]; | 1155 | char dbf_text[15]; |
1150 | int rc; | 1156 | int rc; |
1151 | 1157 | ||
1158 | sprintf(dbf_text, "qcln%4x", cdev->private->schid.sch_no); | ||
1159 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1160 | |||
1152 | irq_ptr = cdev->private->qdio_data; | 1161 | irq_ptr = cdev->private->qdio_data; |
1153 | if (!irq_ptr) | 1162 | if (!irq_ptr) |
1154 | return -ENODEV; | 1163 | return -ENODEV; |
1155 | 1164 | ||
1156 | sprintf(dbf_text, "qcln%4x", irq_ptr->schid.sch_no); | ||
1157 | QDIO_DBF_TEXT1(0, trace, dbf_text); | ||
1158 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1159 | |||
1160 | rc = qdio_shutdown(cdev, how); | 1165 | rc = qdio_shutdown(cdev, how); |
1161 | if (rc == 0) | 1166 | if (rc == 0) |
1162 | rc = qdio_free(cdev); | 1167 | rc = qdio_free(cdev); |
@@ -1191,6 +1196,9 @@ int qdio_shutdown(struct ccw_device *cdev, int how) | |||
1191 | unsigned long flags; | 1196 | unsigned long flags; |
1192 | char dbf_text[15]; | 1197 | char dbf_text[15]; |
1193 | 1198 | ||
1199 | sprintf(dbf_text, "qshu%4x", cdev->private->schid.sch_no); | ||
1200 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1201 | |||
1194 | irq_ptr = cdev->private->qdio_data; | 1202 | irq_ptr = cdev->private->qdio_data; |
1195 | if (!irq_ptr) | 1203 | if (!irq_ptr) |
1196 | return -ENODEV; | 1204 | return -ENODEV; |
@@ -1205,10 +1213,6 @@ int qdio_shutdown(struct ccw_device *cdev, int how) | |||
1205 | return 0; | 1213 | return 0; |
1206 | } | 1214 | } |
1207 | 1215 | ||
1208 | sprintf(dbf_text, "qsqs%4x", irq_ptr->schid.sch_no); | ||
1209 | QDIO_DBF_TEXT1(0, trace, dbf_text); | ||
1210 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1211 | |||
1212 | tiqdio_remove_input_queues(irq_ptr); | 1216 | tiqdio_remove_input_queues(irq_ptr); |
1213 | qdio_shutdown_queues(cdev); | 1217 | qdio_shutdown_queues(cdev); |
1214 | qdio_shutdown_debug_entries(irq_ptr, cdev); | 1218 | qdio_shutdown_debug_entries(irq_ptr, cdev); |
@@ -1247,7 +1251,6 @@ no_cleanup: | |||
1247 | 1251 | ||
1248 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); | 1252 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); |
1249 | mutex_unlock(&irq_ptr->setup_mutex); | 1253 | mutex_unlock(&irq_ptr->setup_mutex); |
1250 | module_put(THIS_MODULE); | ||
1251 | if (rc) | 1254 | if (rc) |
1252 | return rc; | 1255 | return rc; |
1253 | return 0; | 1256 | return 0; |
@@ -1263,16 +1266,14 @@ int qdio_free(struct ccw_device *cdev) | |||
1263 | struct qdio_irq *irq_ptr; | 1266 | struct qdio_irq *irq_ptr; |
1264 | char dbf_text[15]; | 1267 | char dbf_text[15]; |
1265 | 1268 | ||
1269 | sprintf(dbf_text, "qfre%4x", cdev->private->schid.sch_no); | ||
1270 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1271 | |||
1266 | irq_ptr = cdev->private->qdio_data; | 1272 | irq_ptr = cdev->private->qdio_data; |
1267 | if (!irq_ptr) | 1273 | if (!irq_ptr) |
1268 | return -ENODEV; | 1274 | return -ENODEV; |
1269 | 1275 | ||
1270 | mutex_lock(&irq_ptr->setup_mutex); | 1276 | mutex_lock(&irq_ptr->setup_mutex); |
1271 | |||
1272 | sprintf(dbf_text, "qfqs%4x", irq_ptr->schid.sch_no); | ||
1273 | QDIO_DBF_TEXT1(0, trace, dbf_text); | ||
1274 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1275 | |||
1276 | cdev->private->qdio_data = NULL; | 1277 | cdev->private->qdio_data = NULL; |
1277 | mutex_unlock(&irq_ptr->setup_mutex); | 1278 | mutex_unlock(&irq_ptr->setup_mutex); |
1278 | 1279 | ||
@@ -1295,7 +1296,6 @@ int qdio_initialize(struct qdio_initialize *init_data) | |||
1295 | 1296 | ||
1296 | sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no); | 1297 | sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no); |
1297 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 1298 | QDIO_DBF_TEXT0(0, setup, dbf_text); |
1298 | QDIO_DBF_TEXT0(0, trace, dbf_text); | ||
1299 | 1299 | ||
1300 | rc = qdio_allocate(init_data); | 1300 | rc = qdio_allocate(init_data); |
1301 | if (rc) | 1301 | if (rc) |
@@ -1319,7 +1319,6 @@ int qdio_allocate(struct qdio_initialize *init_data) | |||
1319 | 1319 | ||
1320 | sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no); | 1320 | sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no); |
1321 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 1321 | QDIO_DBF_TEXT0(0, setup, dbf_text); |
1322 | QDIO_DBF_TEXT0(0, trace, dbf_text); | ||
1323 | 1322 | ||
1324 | if ((init_data->no_input_qs && !init_data->input_handler) || | 1323 | if ((init_data->no_input_qs && !init_data->input_handler) || |
1325 | (init_data->no_output_qs && !init_data->output_handler)) | 1324 | (init_data->no_output_qs && !init_data->output_handler)) |
@@ -1389,6 +1388,9 @@ int qdio_establish(struct qdio_initialize *init_data) | |||
1389 | unsigned long saveflags; | 1388 | unsigned long saveflags; |
1390 | int rc; | 1389 | int rc; |
1391 | 1390 | ||
1391 | sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no); | ||
1392 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1393 | |||
1392 | irq_ptr = cdev->private->qdio_data; | 1394 | irq_ptr = cdev->private->qdio_data; |
1393 | if (!irq_ptr) | 1395 | if (!irq_ptr) |
1394 | return -ENODEV; | 1396 | return -ENODEV; |
@@ -1396,13 +1398,6 @@ int qdio_establish(struct qdio_initialize *init_data) | |||
1396 | if (cdev->private->state != DEV_STATE_ONLINE) | 1398 | if (cdev->private->state != DEV_STATE_ONLINE) |
1397 | return -EINVAL; | 1399 | return -EINVAL; |
1398 | 1400 | ||
1399 | if (!try_module_get(THIS_MODULE)) | ||
1400 | return -EINVAL; | ||
1401 | |||
1402 | sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no); | ||
1403 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1404 | QDIO_DBF_TEXT0(0, trace, dbf_text); | ||
1405 | |||
1406 | mutex_lock(&irq_ptr->setup_mutex); | 1401 | mutex_lock(&irq_ptr->setup_mutex); |
1407 | qdio_setup_irq(init_data); | 1402 | qdio_setup_irq(init_data); |
1408 | 1403 | ||
@@ -1472,6 +1467,9 @@ int qdio_activate(struct ccw_device *cdev) | |||
1472 | unsigned long saveflags; | 1467 | unsigned long saveflags; |
1473 | char dbf_text[20]; | 1468 | char dbf_text[20]; |
1474 | 1469 | ||
1470 | sprintf(dbf_text, "qact%4x", cdev->private->schid.sch_no); | ||
1471 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1472 | |||
1475 | irq_ptr = cdev->private->qdio_data; | 1473 | irq_ptr = cdev->private->qdio_data; |
1476 | if (!irq_ptr) | 1474 | if (!irq_ptr) |
1477 | return -ENODEV; | 1475 | return -ENODEV; |
@@ -1485,10 +1483,6 @@ int qdio_activate(struct ccw_device *cdev) | |||
1485 | goto out; | 1483 | goto out; |
1486 | } | 1484 | } |
1487 | 1485 | ||
1488 | sprintf(dbf_text, "qact%4x", irq_ptr->schid.sch_no); | ||
1489 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
1490 | QDIO_DBF_TEXT2(0, trace, dbf_text); | ||
1491 | |||
1492 | irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; | 1486 | irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; |
1493 | irq_ptr->ccw.flags = CCW_FLAG_SLI; | 1487 | irq_ptr->ccw.flags = CCW_FLAG_SLI; |
1494 | irq_ptr->ccw.count = irq_ptr->aqueue.count; | 1488 | irq_ptr->ccw.count = irq_ptr->aqueue.count; |
@@ -1663,7 +1657,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | |||
1663 | #ifdef CONFIG_QDIO_DEBUG | 1657 | #ifdef CONFIG_QDIO_DEBUG |
1664 | char dbf_text[20]; | 1658 | char dbf_text[20]; |
1665 | 1659 | ||
1666 | sprintf(dbf_text, "doQD%04x", cdev->private->schid.sch_no); | 1660 | sprintf(dbf_text, "doQD%4x", cdev->private->schid.sch_no); |
1667 | QDIO_DBF_TEXT3(0, trace, dbf_text); | 1661 | QDIO_DBF_TEXT3(0, trace, dbf_text); |
1668 | #endif /* CONFIG_QDIO_DEBUG */ | 1662 | #endif /* CONFIG_QDIO_DEBUG */ |
1669 | 1663 | ||
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 1bd2a208db28..1679e2f91c94 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
@@ -165,7 +165,7 @@ static void setup_queues(struct qdio_irq *irq_ptr, | |||
165 | void **output_sbal_array = qdio_init->output_sbal_addr_array; | 165 | void **output_sbal_array = qdio_init->output_sbal_addr_array; |
166 | int i; | 166 | int i; |
167 | 167 | ||
168 | sprintf(dbf_text, "qfqs%4x", qdio_init->cdev->private->schid.sch_no); | 168 | sprintf(dbf_text, "qset%4x", qdio_init->cdev->private->schid.sch_no); |
169 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 169 | QDIO_DBF_TEXT0(0, setup, dbf_text); |
170 | 170 | ||
171 | for_each_input_queue(irq_ptr, q, i) { | 171 | for_each_input_queue(irq_ptr, q, i) { |
@@ -285,7 +285,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) | |||
285 | rc = __get_ssqd_info(irq_ptr); | 285 | rc = __get_ssqd_info(irq_ptr); |
286 | if (rc) { | 286 | if (rc) { |
287 | QDIO_DBF_TEXT2(0, setup, "ssqdasig"); | 287 | QDIO_DBF_TEXT2(0, setup, "ssqdasig"); |
288 | sprintf(dbf_text, "schno%x", irq_ptr->schid.sch_no); | 288 | sprintf(dbf_text, "schn%4x", irq_ptr->schid.sch_no); |
289 | QDIO_DBF_TEXT2(0, setup, dbf_text); | 289 | QDIO_DBF_TEXT2(0, setup, dbf_text); |
290 | sprintf(dbf_text, "rc:%d", rc); | 290 | sprintf(dbf_text, "rc:%d", rc); |
291 | QDIO_DBF_TEXT2(0, setup, dbf_text); | 291 | QDIO_DBF_TEXT2(0, setup, dbf_text); |
@@ -447,7 +447,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, | |||
447 | { | 447 | { |
448 | char s[80]; | 448 | char s[80]; |
449 | 449 | ||
450 | sprintf(s, "%s ", cdev->dev.bus_id); | 450 | sprintf(s, "%s sc:%x ", cdev->dev.bus_id, irq_ptr->schid.sch_no); |
451 | 451 | ||
452 | switch (irq_ptr->qib.qfmt) { | 452 | switch (irq_ptr->qib.qfmt) { |
453 | case QDIO_QETH_QFMT: | 453 | case QDIO_QETH_QFMT: |
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 9291a771d812..ea7f61400267 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c | |||
@@ -113,7 +113,11 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) | |||
113 | struct qdio_q *q; | 113 | struct qdio_q *q; |
114 | int i; | 114 | int i; |
115 | 115 | ||
116 | for_each_input_queue(irq_ptr, q, i) { | 116 | for (i = 0; i < irq_ptr->nr_input_qs; i++) { |
117 | q = irq_ptr->input_qs[i]; | ||
118 | /* if establish triggered an error */ | ||
119 | if (!q || !q->entry.prev || !q->entry.next) | ||
120 | continue; | ||
117 | list_del_rcu(&q->entry); | 121 | list_del_rcu(&q->entry); |
118 | synchronize_rcu(); | 122 | synchronize_rcu(); |
119 | } | 123 | } |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index a08b1682c8e8..e10ac9ab2d44 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -133,14 +133,14 @@ claw_register_debug_facility(void) | |||
133 | static inline void | 133 | static inline void |
134 | claw_set_busy(struct net_device *dev) | 134 | claw_set_busy(struct net_device *dev) |
135 | { | 135 | { |
136 | ((struct claw_privbk *) dev->priv)->tbusy=1; | 136 | ((struct claw_privbk *)dev->ml_priv)->tbusy = 1; |
137 | eieio(); | 137 | eieio(); |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline void | 140 | static inline void |
141 | claw_clear_busy(struct net_device *dev) | 141 | claw_clear_busy(struct net_device *dev) |
142 | { | 142 | { |
143 | clear_bit(0, &(((struct claw_privbk *) dev->priv)->tbusy)); | 143 | clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy)); |
144 | netif_wake_queue(dev); | 144 | netif_wake_queue(dev); |
145 | eieio(); | 145 | eieio(); |
146 | } | 146 | } |
@@ -149,20 +149,20 @@ static inline int | |||
149 | claw_check_busy(struct net_device *dev) | 149 | claw_check_busy(struct net_device *dev) |
150 | { | 150 | { |
151 | eieio(); | 151 | eieio(); |
152 | return ((struct claw_privbk *) dev->priv)->tbusy; | 152 | return ((struct claw_privbk *) dev->ml_priv)->tbusy; |
153 | } | 153 | } |
154 | 154 | ||
155 | static inline void | 155 | static inline void |
156 | claw_setbit_busy(int nr,struct net_device *dev) | 156 | claw_setbit_busy(int nr,struct net_device *dev) |
157 | { | 157 | { |
158 | netif_stop_queue(dev); | 158 | netif_stop_queue(dev); |
159 | set_bit(nr, (void *)&(((struct claw_privbk *)dev->priv)->tbusy)); | 159 | set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy)); |
160 | } | 160 | } |
161 | 161 | ||
162 | static inline void | 162 | static inline void |
163 | claw_clearbit_busy(int nr,struct net_device *dev) | 163 | claw_clearbit_busy(int nr,struct net_device *dev) |
164 | { | 164 | { |
165 | clear_bit(nr,(void *)&(((struct claw_privbk *)dev->priv)->tbusy)); | 165 | clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy)); |
166 | netif_wake_queue(dev); | 166 | netif_wake_queue(dev); |
167 | } | 167 | } |
168 | 168 | ||
@@ -171,7 +171,7 @@ claw_test_and_setbit_busy(int nr,struct net_device *dev) | |||
171 | { | 171 | { |
172 | netif_stop_queue(dev); | 172 | netif_stop_queue(dev); |
173 | return test_and_set_bit(nr, | 173 | return test_and_set_bit(nr, |
174 | (void *)&(((struct claw_privbk *) dev->priv)->tbusy)); | 174 | (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy)); |
175 | } | 175 | } |
176 | 176 | ||
177 | 177 | ||
@@ -271,6 +271,7 @@ claw_probe(struct ccwgroup_device *cgdev) | |||
271 | if (!get_device(&cgdev->dev)) | 271 | if (!get_device(&cgdev->dev)) |
272 | return -ENODEV; | 272 | return -ENODEV; |
273 | privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); | 273 | privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); |
274 | cgdev->dev.driver_data = privptr; | ||
274 | if (privptr == NULL) { | 275 | if (privptr == NULL) { |
275 | probe_error(cgdev); | 276 | probe_error(cgdev); |
276 | put_device(&cgdev->dev); | 277 | put_device(&cgdev->dev); |
@@ -305,7 +306,6 @@ claw_probe(struct ccwgroup_device *cgdev) | |||
305 | privptr->p_env->p_priv = privptr; | 306 | privptr->p_env->p_priv = privptr; |
306 | cgdev->cdev[0]->handler = claw_irq_handler; | 307 | cgdev->cdev[0]->handler = claw_irq_handler; |
307 | cgdev->cdev[1]->handler = claw_irq_handler; | 308 | cgdev->cdev[1]->handler = claw_irq_handler; |
308 | cgdev->dev.driver_data = privptr; | ||
309 | CLAW_DBF_TEXT(2, setup, "prbext 0"); | 309 | CLAW_DBF_TEXT(2, setup, "prbext 0"); |
310 | 310 | ||
311 | return 0; | 311 | return 0; |
@@ -319,7 +319,7 @@ static int | |||
319 | claw_tx(struct sk_buff *skb, struct net_device *dev) | 319 | claw_tx(struct sk_buff *skb, struct net_device *dev) |
320 | { | 320 | { |
321 | int rc; | 321 | int rc; |
322 | struct claw_privbk *privptr=dev->priv; | 322 | struct claw_privbk *privptr = dev->ml_priv; |
323 | unsigned long saveflags; | 323 | unsigned long saveflags; |
324 | struct chbk *p_ch; | 324 | struct chbk *p_ch; |
325 | 325 | ||
@@ -404,7 +404,7 @@ claw_pack_skb(struct claw_privbk *privptr) | |||
404 | static int | 404 | static int |
405 | claw_change_mtu(struct net_device *dev, int new_mtu) | 405 | claw_change_mtu(struct net_device *dev, int new_mtu) |
406 | { | 406 | { |
407 | struct claw_privbk *privptr=dev->priv; | 407 | struct claw_privbk *privptr = dev->ml_priv; |
408 | int buff_size; | 408 | int buff_size; |
409 | CLAW_DBF_TEXT(4, trace, "setmtu"); | 409 | CLAW_DBF_TEXT(4, trace, "setmtu"); |
410 | buff_size = privptr->p_env->write_size; | 410 | buff_size = privptr->p_env->write_size; |
@@ -434,7 +434,7 @@ claw_open(struct net_device *dev) | |||
434 | struct ccwbk *p_buf; | 434 | struct ccwbk *p_buf; |
435 | 435 | ||
436 | CLAW_DBF_TEXT(4, trace, "open"); | 436 | CLAW_DBF_TEXT(4, trace, "open"); |
437 | privptr = (struct claw_privbk *)dev->priv; | 437 | privptr = (struct claw_privbk *)dev->ml_priv; |
438 | /* allocate and initialize CCW blocks */ | 438 | /* allocate and initialize CCW blocks */ |
439 | if (privptr->buffs_alloc == 0) { | 439 | if (privptr->buffs_alloc == 0) { |
440 | rc=init_ccw_bk(dev); | 440 | rc=init_ccw_bk(dev); |
@@ -780,7 +780,7 @@ claw_irq_tasklet ( unsigned long data ) | |||
780 | p_ch = (struct chbk *) data; | 780 | p_ch = (struct chbk *) data; |
781 | dev = (struct net_device *)p_ch->ndev; | 781 | dev = (struct net_device *)p_ch->ndev; |
782 | CLAW_DBF_TEXT(4, trace, "IRQtask"); | 782 | CLAW_DBF_TEXT(4, trace, "IRQtask"); |
783 | privptr = (struct claw_privbk *) dev->priv; | 783 | privptr = (struct claw_privbk *)dev->ml_priv; |
784 | unpack_read(dev); | 784 | unpack_read(dev); |
785 | clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); | 785 | clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); |
786 | CLAW_DBF_TEXT(4, trace, "TskletXt"); | 786 | CLAW_DBF_TEXT(4, trace, "TskletXt"); |
@@ -805,7 +805,7 @@ claw_release(struct net_device *dev) | |||
805 | 805 | ||
806 | if (!dev) | 806 | if (!dev) |
807 | return 0; | 807 | return 0; |
808 | privptr = (struct claw_privbk *) dev->priv; | 808 | privptr = (struct claw_privbk *)dev->ml_priv; |
809 | if (!privptr) | 809 | if (!privptr) |
810 | return 0; | 810 | return 0; |
811 | CLAW_DBF_TEXT(4, trace, "release"); | 811 | CLAW_DBF_TEXT(4, trace, "release"); |
@@ -960,7 +960,7 @@ claw_write_next ( struct chbk * p_ch ) | |||
960 | if (p_ch->claw_state == CLAW_STOP) | 960 | if (p_ch->claw_state == CLAW_STOP) |
961 | return; | 961 | return; |
962 | dev = (struct net_device *) p_ch->ndev; | 962 | dev = (struct net_device *) p_ch->ndev; |
963 | privptr = (struct claw_privbk *) dev->priv; | 963 | privptr = (struct claw_privbk *) dev->ml_priv; |
964 | claw_free_wrt_buf( dev ); | 964 | claw_free_wrt_buf( dev ); |
965 | if ((privptr->write_free_count > 0) && | 965 | if ((privptr->write_free_count > 0) && |
966 | !skb_queue_empty(&p_ch->collect_queue)) { | 966 | !skb_queue_empty(&p_ch->collect_queue)) { |
@@ -1042,7 +1042,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, | |||
1042 | struct ccw1 temp_ccw; | 1042 | struct ccw1 temp_ccw; |
1043 | struct endccw * p_end; | 1043 | struct endccw * p_end; |
1044 | CLAW_DBF_TEXT(4, trace, "addreads"); | 1044 | CLAW_DBF_TEXT(4, trace, "addreads"); |
1045 | privptr = dev->priv; | 1045 | privptr = dev->ml_priv; |
1046 | p_end = privptr->p_end_ccw; | 1046 | p_end = privptr->p_end_ccw; |
1047 | 1047 | ||
1048 | /* first CCW and last CCW contains a new set of read channel programs | 1048 | /* first CCW and last CCW contains a new set of read channel programs |
@@ -1212,7 +1212,7 @@ find_link(struct net_device *dev, char *host_name, char *ws_name ) | |||
1212 | int rc=0; | 1212 | int rc=0; |
1213 | 1213 | ||
1214 | CLAW_DBF_TEXT(2, setup, "findlink"); | 1214 | CLAW_DBF_TEXT(2, setup, "findlink"); |
1215 | privptr=dev->priv; | 1215 | privptr = dev->ml_priv; |
1216 | p_env=privptr->p_env; | 1216 | p_env=privptr->p_env; |
1217 | switch (p_env->packing) | 1217 | switch (p_env->packing) |
1218 | { | 1218 | { |
@@ -1264,7 +1264,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1264 | struct chbk *ch; | 1264 | struct chbk *ch; |
1265 | 1265 | ||
1266 | CLAW_DBF_TEXT(4, trace, "hw_tx"); | 1266 | CLAW_DBF_TEXT(4, trace, "hw_tx"); |
1267 | privptr = (struct claw_privbk *) (dev->priv); | 1267 | privptr = (struct claw_privbk *)(dev->ml_priv); |
1268 | p_ch=(struct chbk *)&privptr->channel[WRITE]; | 1268 | p_ch=(struct chbk *)&privptr->channel[WRITE]; |
1269 | p_env =privptr->p_env; | 1269 | p_env =privptr->p_env; |
1270 | claw_free_wrt_buf(dev); /* Clean up free chain if posible */ | 1270 | claw_free_wrt_buf(dev); /* Clean up free chain if posible */ |
@@ -1483,8 +1483,8 @@ init_ccw_bk(struct net_device *dev) | |||
1483 | struct ccwbk*p_last_CCWB; | 1483 | struct ccwbk*p_last_CCWB; |
1484 | struct ccwbk*p_first_CCWB; | 1484 | struct ccwbk*p_first_CCWB; |
1485 | struct endccw *p_endccw=NULL; | 1485 | struct endccw *p_endccw=NULL; |
1486 | addr_t real_address; | 1486 | addr_t real_address; |
1487 | struct claw_privbk *privptr=dev->priv; | 1487 | struct claw_privbk *privptr = dev->ml_priv; |
1488 | struct clawh *pClawH=NULL; | 1488 | struct clawh *pClawH=NULL; |
1489 | addr_t real_TIC_address; | 1489 | addr_t real_TIC_address; |
1490 | int i,j; | 1490 | int i,j; |
@@ -1960,19 +1960,16 @@ init_ccw_bk(struct net_device *dev) | |||
1960 | static void | 1960 | static void |
1961 | probe_error( struct ccwgroup_device *cgdev) | 1961 | probe_error( struct ccwgroup_device *cgdev) |
1962 | { | 1962 | { |
1963 | struct claw_privbk *privptr; | 1963 | struct claw_privbk *privptr; |
1964 | 1964 | ||
1965 | CLAW_DBF_TEXT(4, trace, "proberr"); | 1965 | CLAW_DBF_TEXT(4, trace, "proberr"); |
1966 | privptr=(struct claw_privbk *)cgdev->dev.driver_data; | 1966 | privptr = (struct claw_privbk *) cgdev->dev.driver_data; |
1967 | if (privptr!=NULL) { | 1967 | if (privptr != NULL) { |
1968 | cgdev->dev.driver_data = NULL; | ||
1968 | kfree(privptr->p_env); | 1969 | kfree(privptr->p_env); |
1969 | privptr->p_env=NULL; | 1970 | kfree(privptr->p_mtc_envelope); |
1970 | kfree(privptr->p_mtc_envelope); | 1971 | kfree(privptr); |
1971 | privptr->p_mtc_envelope=NULL; | 1972 | } |
1972 | kfree(privptr); | ||
1973 | privptr=NULL; | ||
1974 | } | ||
1975 | return; | ||
1976 | } /* probe_error */ | 1973 | } /* probe_error */ |
1977 | 1974 | ||
1978 | /*-------------------------------------------------------------------* | 1975 | /*-------------------------------------------------------------------* |
@@ -2000,7 +1997,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2000 | CLAW_DBF_TEXT(2, setup, "clw_cntl"); | 1997 | CLAW_DBF_TEXT(2, setup, "clw_cntl"); |
2001 | udelay(1000); /* Wait a ms for the control packets to | 1998 | udelay(1000); /* Wait a ms for the control packets to |
2002 | *catch up to each other */ | 1999 | *catch up to each other */ |
2003 | privptr=dev->priv; | 2000 | privptr = dev->ml_priv; |
2004 | p_env=privptr->p_env; | 2001 | p_env=privptr->p_env; |
2005 | tdev = &privptr->channel[READ].cdev->dev; | 2002 | tdev = &privptr->channel[READ].cdev->dev; |
2006 | memcpy( &temp_host_name, p_env->host_name, 8); | 2003 | memcpy( &temp_host_name, p_env->host_name, 8); |
@@ -2278,7 +2275,7 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link, | |||
2278 | struct sk_buff *skb; | 2275 | struct sk_buff *skb; |
2279 | 2276 | ||
2280 | CLAW_DBF_TEXT(2, setup, "sndcntl"); | 2277 | CLAW_DBF_TEXT(2, setup, "sndcntl"); |
2281 | privptr=dev->priv; | 2278 | privptr = dev->ml_priv; |
2282 | p_ctl=(struct clawctl *)&privptr->ctl_bk; | 2279 | p_ctl=(struct clawctl *)&privptr->ctl_bk; |
2283 | 2280 | ||
2284 | p_ctl->command=type; | 2281 | p_ctl->command=type; |
@@ -2348,7 +2345,7 @@ static int | |||
2348 | claw_snd_conn_req(struct net_device *dev, __u8 link) | 2345 | claw_snd_conn_req(struct net_device *dev, __u8 link) |
2349 | { | 2346 | { |
2350 | int rc; | 2347 | int rc; |
2351 | struct claw_privbk *privptr=dev->priv; | 2348 | struct claw_privbk *privptr = dev->ml_priv; |
2352 | struct clawctl *p_ctl; | 2349 | struct clawctl *p_ctl; |
2353 | 2350 | ||
2354 | CLAW_DBF_TEXT(2, setup, "snd_conn"); | 2351 | CLAW_DBF_TEXT(2, setup, "snd_conn"); |
@@ -2408,7 +2405,7 @@ claw_snd_sys_validate_rsp(struct net_device *dev, | |||
2408 | int rc; | 2405 | int rc; |
2409 | 2406 | ||
2410 | CLAW_DBF_TEXT(2, setup, "chkresp"); | 2407 | CLAW_DBF_TEXT(2, setup, "chkresp"); |
2411 | privptr = dev->priv; | 2408 | privptr = dev->ml_priv; |
2412 | p_env=privptr->p_env; | 2409 | p_env=privptr->p_env; |
2413 | rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE, | 2410 | rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE, |
2414 | p_ctl->linkid, | 2411 | p_ctl->linkid, |
@@ -2446,7 +2443,7 @@ net_device_stats *claw_stats(struct net_device *dev) | |||
2446 | struct claw_privbk *privptr; | 2443 | struct claw_privbk *privptr; |
2447 | 2444 | ||
2448 | CLAW_DBF_TEXT(4, trace, "stats"); | 2445 | CLAW_DBF_TEXT(4, trace, "stats"); |
2449 | privptr = dev->priv; | 2446 | privptr = dev->ml_priv; |
2450 | return &privptr->stats; | 2447 | return &privptr->stats; |
2451 | } /* end of claw_stats */ | 2448 | } /* end of claw_stats */ |
2452 | 2449 | ||
@@ -2482,7 +2479,7 @@ unpack_read(struct net_device *dev ) | |||
2482 | p_last_ccw=NULL; | 2479 | p_last_ccw=NULL; |
2483 | p_packh=NULL; | 2480 | p_packh=NULL; |
2484 | p_packd=NULL; | 2481 | p_packd=NULL; |
2485 | privptr=dev->priv; | 2482 | privptr = dev->ml_priv; |
2486 | 2483 | ||
2487 | p_dev = &privptr->channel[READ].cdev->dev; | 2484 | p_dev = &privptr->channel[READ].cdev->dev; |
2488 | p_env = privptr->p_env; | 2485 | p_env = privptr->p_env; |
@@ -2651,7 +2648,7 @@ claw_strt_read (struct net_device *dev, int lock ) | |||
2651 | int rc = 0; | 2648 | int rc = 0; |
2652 | __u32 parm; | 2649 | __u32 parm; |
2653 | unsigned long saveflags = 0; | 2650 | unsigned long saveflags = 0; |
2654 | struct claw_privbk *privptr=dev->priv; | 2651 | struct claw_privbk *privptr = dev->ml_priv; |
2655 | struct ccwbk*p_ccwbk; | 2652 | struct ccwbk*p_ccwbk; |
2656 | struct chbk *p_ch; | 2653 | struct chbk *p_ch; |
2657 | struct clawh *p_clawh; | 2654 | struct clawh *p_clawh; |
@@ -2708,7 +2705,7 @@ claw_strt_out_IO( struct net_device *dev ) | |||
2708 | if (!dev) { | 2705 | if (!dev) { |
2709 | return; | 2706 | return; |
2710 | } | 2707 | } |
2711 | privptr=(struct claw_privbk *)dev->priv; | 2708 | privptr = (struct claw_privbk *)dev->ml_priv; |
2712 | p_ch=&privptr->channel[WRITE]; | 2709 | p_ch=&privptr->channel[WRITE]; |
2713 | 2710 | ||
2714 | CLAW_DBF_TEXT(4, trace, "strt_io"); | 2711 | CLAW_DBF_TEXT(4, trace, "strt_io"); |
@@ -2741,7 +2738,7 @@ static void | |||
2741 | claw_free_wrt_buf( struct net_device *dev ) | 2738 | claw_free_wrt_buf( struct net_device *dev ) |
2742 | { | 2739 | { |
2743 | 2740 | ||
2744 | struct claw_privbk *privptr=(struct claw_privbk *)dev->priv; | 2741 | struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv; |
2745 | struct ccwbk*p_first_ccw; | 2742 | struct ccwbk*p_first_ccw; |
2746 | struct ccwbk*p_last_ccw; | 2743 | struct ccwbk*p_last_ccw; |
2747 | struct ccwbk*p_this_ccw; | 2744 | struct ccwbk*p_this_ccw; |
@@ -2798,13 +2795,13 @@ claw_free_netdevice(struct net_device * dev, int free_dev) | |||
2798 | if (!dev) | 2795 | if (!dev) |
2799 | return; | 2796 | return; |
2800 | CLAW_DBF_TEXT_(2, setup, "%s", dev->name); | 2797 | CLAW_DBF_TEXT_(2, setup, "%s", dev->name); |
2801 | privptr = dev->priv; | 2798 | privptr = dev->ml_priv; |
2802 | if (dev->flags & IFF_RUNNING) | 2799 | if (dev->flags & IFF_RUNNING) |
2803 | claw_release(dev); | 2800 | claw_release(dev); |
2804 | if (privptr) { | 2801 | if (privptr) { |
2805 | privptr->channel[READ].ndev = NULL; /* say it's free */ | 2802 | privptr->channel[READ].ndev = NULL; /* say it's free */ |
2806 | } | 2803 | } |
2807 | dev->priv=NULL; | 2804 | dev->ml_priv = NULL; |
2808 | #ifdef MODULE | 2805 | #ifdef MODULE |
2809 | if (free_dev) { | 2806 | if (free_dev) { |
2810 | free_netdev(dev); | 2807 | free_netdev(dev); |
@@ -2921,7 +2918,7 @@ claw_new_device(struct ccwgroup_device *cgdev) | |||
2921 | printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__); | 2918 | printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__); |
2922 | goto out; | 2919 | goto out; |
2923 | } | 2920 | } |
2924 | dev->priv = privptr; | 2921 | dev->ml_priv = privptr; |
2925 | cgdev->dev.driver_data = privptr; | 2922 | cgdev->dev.driver_data = privptr; |
2926 | cgdev->cdev[READ]->dev.driver_data = privptr; | 2923 | cgdev->cdev[READ]->dev.driver_data = privptr; |
2927 | cgdev->cdev[WRITE]->dev.driver_data = privptr; | 2924 | cgdev->cdev[WRITE]->dev.driver_data = privptr; |
@@ -3002,7 +2999,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev) | |||
3002 | ret = claw_release(ndev); | 2999 | ret = claw_release(ndev); |
3003 | ndev->flags &=~IFF_RUNNING; | 3000 | ndev->flags &=~IFF_RUNNING; |
3004 | unregister_netdev(ndev); | 3001 | unregister_netdev(ndev); |
3005 | ndev->priv = NULL; /* cgdev data, not ndev's to free */ | 3002 | ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */ |
3006 | claw_free_netdevice(ndev, 1); | 3003 | claw_free_netdevice(ndev, 1); |
3007 | priv->channel[READ].ndev = NULL; | 3004 | priv->channel[READ].ndev = NULL; |
3008 | priv->channel[WRITE].ndev = NULL; | 3005 | priv->channel[WRITE].ndev = NULL; |
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 0b4e6253abe4..42776550acfd 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c | |||
@@ -245,7 +245,7 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg) | |||
245 | { | 245 | { |
246 | struct channel *ch = arg; | 246 | struct channel *ch = arg; |
247 | struct net_device *dev = ch->netdev; | 247 | struct net_device *dev = ch->netdev; |
248 | struct ctcm_priv *priv = dev->priv; | 248 | struct ctcm_priv *priv = dev->ml_priv; |
249 | struct sk_buff *skb; | 249 | struct sk_buff *skb; |
250 | int first = 1; | 250 | int first = 1; |
251 | int i; | 251 | int i; |
@@ -336,7 +336,7 @@ void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) | |||
336 | { | 336 | { |
337 | struct channel *ch = arg; | 337 | struct channel *ch = arg; |
338 | struct net_device *dev = ch->netdev; | 338 | struct net_device *dev = ch->netdev; |
339 | struct ctcm_priv *priv = dev->priv; | 339 | struct ctcm_priv *priv = dev->ml_priv; |
340 | 340 | ||
341 | CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); | 341 | CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); |
342 | 342 | ||
@@ -357,7 +357,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg) | |||
357 | { | 357 | { |
358 | struct channel *ch = arg; | 358 | struct channel *ch = arg; |
359 | struct net_device *dev = ch->netdev; | 359 | struct net_device *dev = ch->netdev; |
360 | struct ctcm_priv *priv = dev->priv; | 360 | struct ctcm_priv *priv = dev->ml_priv; |
361 | int len = ch->max_bufsize - ch->irb->scsw.cmd.count; | 361 | int len = ch->max_bufsize - ch->irb->scsw.cmd.count; |
362 | struct sk_buff *skb = ch->trans_skb; | 362 | struct sk_buff *skb = ch->trans_skb; |
363 | __u16 block_len = *((__u16 *)skb->data); | 363 | __u16 block_len = *((__u16 *)skb->data); |
@@ -459,7 +459,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg) | |||
459 | chx_rxidle(fi, event, arg); | 459 | chx_rxidle(fi, event, arg); |
460 | } else { | 460 | } else { |
461 | struct net_device *dev = ch->netdev; | 461 | struct net_device *dev = ch->netdev; |
462 | struct ctcm_priv *priv = dev->priv; | 462 | struct ctcm_priv *priv = dev->ml_priv; |
463 | fsm_newstate(fi, CTC_STATE_TXIDLE); | 463 | fsm_newstate(fi, CTC_STATE_TXIDLE); |
464 | fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); | 464 | fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); |
465 | } | 465 | } |
@@ -496,7 +496,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg) | |||
496 | if ((CHANNEL_DIRECTION(ch->flags) == READ) && | 496 | if ((CHANNEL_DIRECTION(ch->flags) == READ) && |
497 | (ch->protocol == CTCM_PROTO_S390)) { | 497 | (ch->protocol == CTCM_PROTO_S390)) { |
498 | struct net_device *dev = ch->netdev; | 498 | struct net_device *dev = ch->netdev; |
499 | struct ctcm_priv *priv = dev->priv; | 499 | struct ctcm_priv *priv = dev->ml_priv; |
500 | fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); | 500 | fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); |
501 | } | 501 | } |
502 | } | 502 | } |
@@ -514,7 +514,7 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg) | |||
514 | { | 514 | { |
515 | struct channel *ch = arg; | 515 | struct channel *ch = arg; |
516 | struct net_device *dev = ch->netdev; | 516 | struct net_device *dev = ch->netdev; |
517 | struct ctcm_priv *priv = dev->priv; | 517 | struct ctcm_priv *priv = dev->ml_priv; |
518 | __u16 buflen; | 518 | __u16 buflen; |
519 | int rc; | 519 | int rc; |
520 | 520 | ||
@@ -699,7 +699,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state, | |||
699 | struct channel *ch) | 699 | struct channel *ch) |
700 | { | 700 | { |
701 | struct net_device *dev = ch->netdev; | 701 | struct net_device *dev = ch->netdev; |
702 | struct ctcm_priv *priv = dev->priv; | 702 | struct ctcm_priv *priv = dev->ml_priv; |
703 | 703 | ||
704 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, | 704 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, |
705 | "%s(%s): %s[%d]\n", | 705 | "%s(%s): %s[%d]\n", |
@@ -784,7 +784,7 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) | |||
784 | { | 784 | { |
785 | struct channel *ch = arg; | 785 | struct channel *ch = arg; |
786 | struct net_device *dev = ch->netdev; | 786 | struct net_device *dev = ch->netdev; |
787 | struct ctcm_priv *priv = dev->priv; | 787 | struct ctcm_priv *priv = dev->ml_priv; |
788 | 788 | ||
789 | /* | 789 | /* |
790 | * Special case: Got UC_RCRESET on setmode. | 790 | * Special case: Got UC_RCRESET on setmode. |
@@ -874,7 +874,7 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) | |||
874 | { | 874 | { |
875 | struct channel *ch = arg; | 875 | struct channel *ch = arg; |
876 | struct net_device *dev = ch->netdev; | 876 | struct net_device *dev = ch->netdev; |
877 | struct ctcm_priv *priv = dev->priv; | 877 | struct ctcm_priv *priv = dev->ml_priv; |
878 | 878 | ||
879 | if (event == CTC_EVENT_TIMER) { | 879 | if (event == CTC_EVENT_TIMER) { |
880 | if (!IS_MPCDEV(dev)) | 880 | if (!IS_MPCDEV(dev)) |
@@ -902,7 +902,7 @@ static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) | |||
902 | { | 902 | { |
903 | struct channel *ch = arg; | 903 | struct channel *ch = arg; |
904 | struct net_device *dev = ch->netdev; | 904 | struct net_device *dev = ch->netdev; |
905 | struct ctcm_priv *priv = dev->priv; | 905 | struct ctcm_priv *priv = dev->ml_priv; |
906 | 906 | ||
907 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, | 907 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
908 | "%s(%s): RX %s busy, init. fail", | 908 | "%s(%s): RX %s busy, init. fail", |
@@ -923,7 +923,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) | |||
923 | struct channel *ch = arg; | 923 | struct channel *ch = arg; |
924 | struct channel *ch2; | 924 | struct channel *ch2; |
925 | struct net_device *dev = ch->netdev; | 925 | struct net_device *dev = ch->netdev; |
926 | struct ctcm_priv *priv = dev->priv; | 926 | struct ctcm_priv *priv = dev->ml_priv; |
927 | 927 | ||
928 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, | 928 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, |
929 | "%s: %s: remote disconnect - re-init ...", | 929 | "%s: %s: remote disconnect - re-init ...", |
@@ -954,7 +954,7 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) | |||
954 | { | 954 | { |
955 | struct channel *ch = arg; | 955 | struct channel *ch = arg; |
956 | struct net_device *dev = ch->netdev; | 956 | struct net_device *dev = ch->netdev; |
957 | struct ctcm_priv *priv = dev->priv; | 957 | struct ctcm_priv *priv = dev->ml_priv; |
958 | 958 | ||
959 | if (event == CTC_EVENT_TIMER) { | 959 | if (event == CTC_EVENT_TIMER) { |
960 | fsm_deltimer(&ch->timer); | 960 | fsm_deltimer(&ch->timer); |
@@ -984,7 +984,7 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) | |||
984 | { | 984 | { |
985 | struct channel *ch = arg; | 985 | struct channel *ch = arg; |
986 | struct net_device *dev = ch->netdev; | 986 | struct net_device *dev = ch->netdev; |
987 | struct ctcm_priv *priv = dev->priv; | 987 | struct ctcm_priv *priv = dev->ml_priv; |
988 | struct sk_buff *skb; | 988 | struct sk_buff *skb; |
989 | 989 | ||
990 | CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", | 990 | CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", |
@@ -1057,7 +1057,7 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) | |||
1057 | { | 1057 | { |
1058 | struct channel *ch = arg; | 1058 | struct channel *ch = arg; |
1059 | struct net_device *dev = ch->netdev; | 1059 | struct net_device *dev = ch->netdev; |
1060 | struct ctcm_priv *priv = dev->priv; | 1060 | struct ctcm_priv *priv = dev->ml_priv; |
1061 | int rd = CHANNEL_DIRECTION(ch->flags); | 1061 | int rd = CHANNEL_DIRECTION(ch->flags); |
1062 | 1062 | ||
1063 | fsm_deltimer(&ch->timer); | 1063 | fsm_deltimer(&ch->timer); |
@@ -1207,7 +1207,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) | |||
1207 | { | 1207 | { |
1208 | struct channel *ch = arg; | 1208 | struct channel *ch = arg; |
1209 | struct net_device *dev = ch->netdev; | 1209 | struct net_device *dev = ch->netdev; |
1210 | struct ctcm_priv *priv = dev->priv; | 1210 | struct ctcm_priv *priv = dev->ml_priv; |
1211 | struct mpc_group *grp = priv->mpcg; | 1211 | struct mpc_group *grp = priv->mpcg; |
1212 | struct sk_buff *skb; | 1212 | struct sk_buff *skb; |
1213 | int first = 1; | 1213 | int first = 1; |
@@ -1368,7 +1368,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) | |||
1368 | { | 1368 | { |
1369 | struct channel *ch = arg; | 1369 | struct channel *ch = arg; |
1370 | struct net_device *dev = ch->netdev; | 1370 | struct net_device *dev = ch->netdev; |
1371 | struct ctcm_priv *priv = dev->priv; | 1371 | struct ctcm_priv *priv = dev->ml_priv; |
1372 | struct mpc_group *grp = priv->mpcg; | 1372 | struct mpc_group *grp = priv->mpcg; |
1373 | struct sk_buff *skb = ch->trans_skb; | 1373 | struct sk_buff *skb = ch->trans_skb; |
1374 | struct sk_buff *new_skb; | 1374 | struct sk_buff *new_skb; |
@@ -1471,7 +1471,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) | |||
1471 | { | 1471 | { |
1472 | struct channel *ch = arg; | 1472 | struct channel *ch = arg; |
1473 | struct net_device *dev = ch->netdev; | 1473 | struct net_device *dev = ch->netdev; |
1474 | struct ctcm_priv *priv = dev->priv; | 1474 | struct ctcm_priv *priv = dev->ml_priv; |
1475 | struct mpc_group *gptr = priv->mpcg; | 1475 | struct mpc_group *gptr = priv->mpcg; |
1476 | 1476 | ||
1477 | CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", | 1477 | CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", |
@@ -1525,7 +1525,7 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) | |||
1525 | { | 1525 | { |
1526 | struct channel *ch = arg; | 1526 | struct channel *ch = arg; |
1527 | struct net_device *dev = ch->netdev; | 1527 | struct net_device *dev = ch->netdev; |
1528 | struct ctcm_priv *priv = dev->priv; | 1528 | struct ctcm_priv *priv = dev->ml_priv; |
1529 | struct mpc_group *grp = priv->mpcg; | 1529 | struct mpc_group *grp = priv->mpcg; |
1530 | int rc; | 1530 | int rc; |
1531 | unsigned long saveflags = 0; /* avoids compiler warning */ | 1531 | unsigned long saveflags = 0; /* avoids compiler warning */ |
@@ -1580,7 +1580,7 @@ static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) | |||
1580 | { | 1580 | { |
1581 | struct channel *ch = arg; | 1581 | struct channel *ch = arg; |
1582 | struct net_device *dev = ch->netdev; | 1582 | struct net_device *dev = ch->netdev; |
1583 | struct ctcm_priv *priv = dev->priv; | 1583 | struct ctcm_priv *priv = dev->ml_priv; |
1584 | struct mpc_group *grp = priv->mpcg; | 1584 | struct mpc_group *grp = priv->mpcg; |
1585 | 1585 | ||
1586 | CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", | 1586 | CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", |
@@ -1639,7 +1639,7 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) | |||
1639 | { | 1639 | { |
1640 | struct channel *ch = arg; | 1640 | struct channel *ch = arg; |
1641 | struct net_device *dev = ch->netdev; | 1641 | struct net_device *dev = ch->netdev; |
1642 | struct ctcm_priv *priv = dev->priv; | 1642 | struct ctcm_priv *priv = dev->ml_priv; |
1643 | struct mpc_group *grp = priv->mpcg; | 1643 | struct mpc_group *grp = priv->mpcg; |
1644 | 1644 | ||
1645 | CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n", | 1645 | CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n", |
@@ -1724,7 +1724,7 @@ static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) | |||
1724 | { | 1724 | { |
1725 | struct channel *ch = arg; | 1725 | struct channel *ch = arg; |
1726 | struct net_device *dev = ch->netdev; | 1726 | struct net_device *dev = ch->netdev; |
1727 | struct ctcm_priv *priv = dev->priv; | 1727 | struct ctcm_priv *priv = dev->ml_priv; |
1728 | struct mpc_group *grp = priv->mpcg; | 1728 | struct mpc_group *grp = priv->mpcg; |
1729 | 1729 | ||
1730 | fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); | 1730 | fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); |
@@ -1740,7 +1740,7 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) | |||
1740 | { | 1740 | { |
1741 | struct channel *ach = arg; | 1741 | struct channel *ach = arg; |
1742 | struct net_device *dev = ach->netdev; | 1742 | struct net_device *dev = ach->netdev; |
1743 | struct ctcm_priv *priv = dev->priv; | 1743 | struct ctcm_priv *priv = dev->ml_priv; |
1744 | struct mpc_group *grp = priv->mpcg; | 1744 | struct mpc_group *grp = priv->mpcg; |
1745 | struct channel *wch = priv->channel[WRITE]; | 1745 | struct channel *wch = priv->channel[WRITE]; |
1746 | struct channel *rch = priv->channel[READ]; | 1746 | struct channel *rch = priv->channel[READ]; |
@@ -2050,7 +2050,7 @@ int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm); | |||
2050 | static void dev_action_start(fsm_instance *fi, int event, void *arg) | 2050 | static void dev_action_start(fsm_instance *fi, int event, void *arg) |
2051 | { | 2051 | { |
2052 | struct net_device *dev = arg; | 2052 | struct net_device *dev = arg; |
2053 | struct ctcm_priv *priv = dev->priv; | 2053 | struct ctcm_priv *priv = dev->ml_priv; |
2054 | int direction; | 2054 | int direction; |
2055 | 2055 | ||
2056 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); | 2056 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); |
@@ -2076,7 +2076,7 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg) | |||
2076 | { | 2076 | { |
2077 | int direction; | 2077 | int direction; |
2078 | struct net_device *dev = arg; | 2078 | struct net_device *dev = arg; |
2079 | struct ctcm_priv *priv = dev->priv; | 2079 | struct ctcm_priv *priv = dev->ml_priv; |
2080 | 2080 | ||
2081 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); | 2081 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); |
2082 | 2082 | ||
@@ -2096,7 +2096,7 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg) | |||
2096 | { | 2096 | { |
2097 | int restart_timer; | 2097 | int restart_timer; |
2098 | struct net_device *dev = arg; | 2098 | struct net_device *dev = arg; |
2099 | struct ctcm_priv *priv = dev->priv; | 2099 | struct ctcm_priv *priv = dev->ml_priv; |
2100 | 2100 | ||
2101 | CTCMY_DBF_DEV_NAME(TRACE, dev, ""); | 2101 | CTCMY_DBF_DEV_NAME(TRACE, dev, ""); |
2102 | 2102 | ||
@@ -2133,12 +2133,12 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg) | |||
2133 | static void dev_action_chup(fsm_instance *fi, int event, void *arg) | 2133 | static void dev_action_chup(fsm_instance *fi, int event, void *arg) |
2134 | { | 2134 | { |
2135 | struct net_device *dev = arg; | 2135 | struct net_device *dev = arg; |
2136 | struct ctcm_priv *priv = dev->priv; | 2136 | struct ctcm_priv *priv = dev->ml_priv; |
2137 | int dev_stat = fsm_getstate(fi); | 2137 | int dev_stat = fsm_getstate(fi); |
2138 | 2138 | ||
2139 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, | 2139 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, |
2140 | "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL, | 2140 | "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL, |
2141 | dev->name, dev->priv, dev_stat, event); | 2141 | dev->name, dev->ml_priv, dev_stat, event); |
2142 | 2142 | ||
2143 | switch (fsm_getstate(fi)) { | 2143 | switch (fsm_getstate(fi)) { |
2144 | case DEV_STATE_STARTWAIT_RXTX: | 2144 | case DEV_STATE_STARTWAIT_RXTX: |
@@ -2195,7 +2195,7 @@ static void dev_action_chdown(fsm_instance *fi, int event, void *arg) | |||
2195 | { | 2195 | { |
2196 | 2196 | ||
2197 | struct net_device *dev = arg; | 2197 | struct net_device *dev = arg; |
2198 | struct ctcm_priv *priv = dev->priv; | 2198 | struct ctcm_priv *priv = dev->ml_priv; |
2199 | 2199 | ||
2200 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); | 2200 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); |
2201 | 2201 | ||
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 126a3ebb8ab2..b11fec24c7d2 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
@@ -69,7 +69,7 @@ struct channel *channels; | |||
69 | void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) | 69 | void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) |
70 | { | 70 | { |
71 | struct net_device *dev = ch->netdev; | 71 | struct net_device *dev = ch->netdev; |
72 | struct ctcm_priv *priv = dev->priv; | 72 | struct ctcm_priv *priv = dev->ml_priv; |
73 | __u16 len = *((__u16 *) pskb->data); | 73 | __u16 len = *((__u16 *) pskb->data); |
74 | 74 | ||
75 | skb_put(pskb, 2 + LL_HEADER_LENGTH); | 75 | skb_put(pskb, 2 + LL_HEADER_LENGTH); |
@@ -414,7 +414,7 @@ int ctcm_ch_alloc_buffer(struct channel *ch) | |||
414 | */ | 414 | */ |
415 | int ctcm_open(struct net_device *dev) | 415 | int ctcm_open(struct net_device *dev) |
416 | { | 416 | { |
417 | struct ctcm_priv *priv = dev->priv; | 417 | struct ctcm_priv *priv = dev->ml_priv; |
418 | 418 | ||
419 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); | 419 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); |
420 | if (!IS_MPC(priv)) | 420 | if (!IS_MPC(priv)) |
@@ -432,7 +432,7 @@ int ctcm_open(struct net_device *dev) | |||
432 | */ | 432 | */ |
433 | int ctcm_close(struct net_device *dev) | 433 | int ctcm_close(struct net_device *dev) |
434 | { | 434 | { |
435 | struct ctcm_priv *priv = dev->priv; | 435 | struct ctcm_priv *priv = dev->ml_priv; |
436 | 436 | ||
437 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); | 437 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); |
438 | if (!IS_MPC(priv)) | 438 | if (!IS_MPC(priv)) |
@@ -573,7 +573,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
573 | skb_pull(skb, LL_HEADER_LENGTH + 2); | 573 | skb_pull(skb, LL_HEADER_LENGTH + 2); |
574 | } else if (ccw_idx == 0) { | 574 | } else if (ccw_idx == 0) { |
575 | struct net_device *dev = ch->netdev; | 575 | struct net_device *dev = ch->netdev; |
576 | struct ctcm_priv *priv = dev->priv; | 576 | struct ctcm_priv *priv = dev->ml_priv; |
577 | priv->stats.tx_packets++; | 577 | priv->stats.tx_packets++; |
578 | priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; | 578 | priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; |
579 | } | 579 | } |
@@ -592,7 +592,7 @@ static void ctcmpc_send_sweep_req(struct channel *rch) | |||
592 | struct channel *ch; | 592 | struct channel *ch; |
593 | /* int rc = 0; */ | 593 | /* int rc = 0; */ |
594 | 594 | ||
595 | priv = dev->priv; | 595 | priv = dev->ml_priv; |
596 | grp = priv->mpcg; | 596 | grp = priv->mpcg; |
597 | ch = priv->channel[WRITE]; | 597 | ch = priv->channel[WRITE]; |
598 | 598 | ||
@@ -652,7 +652,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
652 | { | 652 | { |
653 | struct pdu *p_header; | 653 | struct pdu *p_header; |
654 | struct net_device *dev = ch->netdev; | 654 | struct net_device *dev = ch->netdev; |
655 | struct ctcm_priv *priv = dev->priv; | 655 | struct ctcm_priv *priv = dev->ml_priv; |
656 | struct mpc_group *grp = priv->mpcg; | 656 | struct mpc_group *grp = priv->mpcg; |
657 | struct th_header *header; | 657 | struct th_header *header; |
658 | struct sk_buff *nskb; | 658 | struct sk_buff *nskb; |
@@ -867,7 +867,7 @@ done: | |||
867 | /* first merge version - leaving both functions separated */ | 867 | /* first merge version - leaving both functions separated */ |
868 | static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) | 868 | static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) |
869 | { | 869 | { |
870 | struct ctcm_priv *priv = dev->priv; | 870 | struct ctcm_priv *priv = dev->ml_priv; |
871 | 871 | ||
872 | if (skb == NULL) { | 872 | if (skb == NULL) { |
873 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, | 873 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
@@ -911,7 +911,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) | |||
911 | static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) | 911 | static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) |
912 | { | 912 | { |
913 | int len = 0; | 913 | int len = 0; |
914 | struct ctcm_priv *priv = dev->priv; | 914 | struct ctcm_priv *priv = dev->ml_priv; |
915 | struct mpc_group *grp = priv->mpcg; | 915 | struct mpc_group *grp = priv->mpcg; |
916 | struct sk_buff *newskb = NULL; | 916 | struct sk_buff *newskb = NULL; |
917 | 917 | ||
@@ -1025,7 +1025,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu) | |||
1025 | if (new_mtu < 576 || new_mtu > 65527) | 1025 | if (new_mtu < 576 || new_mtu > 65527) |
1026 | return -EINVAL; | 1026 | return -EINVAL; |
1027 | 1027 | ||
1028 | priv = dev->priv; | 1028 | priv = dev->ml_priv; |
1029 | max_bufsize = priv->channel[READ]->max_bufsize; | 1029 | max_bufsize = priv->channel[READ]->max_bufsize; |
1030 | 1030 | ||
1031 | if (IS_MPC(priv)) { | 1031 | if (IS_MPC(priv)) { |
@@ -1050,7 +1050,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu) | |||
1050 | */ | 1050 | */ |
1051 | static struct net_device_stats *ctcm_stats(struct net_device *dev) | 1051 | static struct net_device_stats *ctcm_stats(struct net_device *dev) |
1052 | { | 1052 | { |
1053 | return &((struct ctcm_priv *)dev->priv)->stats; | 1053 | return &((struct ctcm_priv *)dev->ml_priv)->stats; |
1054 | } | 1054 | } |
1055 | 1055 | ||
1056 | static void ctcm_free_netdevice(struct net_device *dev) | 1056 | static void ctcm_free_netdevice(struct net_device *dev) |
@@ -1060,7 +1060,7 @@ static void ctcm_free_netdevice(struct net_device *dev) | |||
1060 | 1060 | ||
1061 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, | 1061 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, |
1062 | "%s(%s)", CTCM_FUNTAIL, dev->name); | 1062 | "%s(%s)", CTCM_FUNTAIL, dev->name); |
1063 | priv = dev->priv; | 1063 | priv = dev->ml_priv; |
1064 | if (priv) { | 1064 | if (priv) { |
1065 | grp = priv->mpcg; | 1065 | grp = priv->mpcg; |
1066 | if (grp) { | 1066 | if (grp) { |
@@ -1125,7 +1125,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) | |||
1125 | CTCM_FUNTAIL); | 1125 | CTCM_FUNTAIL); |
1126 | return NULL; | 1126 | return NULL; |
1127 | } | 1127 | } |
1128 | dev->priv = priv; | 1128 | dev->ml_priv = priv; |
1129 | priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names, | 1129 | priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names, |
1130 | CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS, | 1130 | CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS, |
1131 | dev_fsm, dev_fsm_len, GFP_KERNEL); | 1131 | dev_fsm, dev_fsm_len, GFP_KERNEL); |
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h index a72e0feeb27f..8e10ee86a5ee 100644 --- a/drivers/s390/net/ctcm_main.h +++ b/drivers/s390/net/ctcm_main.h | |||
@@ -229,14 +229,14 @@ void ctcm_remove_files(struct device *dev); | |||
229 | */ | 229 | */ |
230 | static inline void ctcm_clear_busy_do(struct net_device *dev) | 230 | static inline void ctcm_clear_busy_do(struct net_device *dev) |
231 | { | 231 | { |
232 | clear_bit(0, &(((struct ctcm_priv *)dev->priv)->tbusy)); | 232 | clear_bit(0, &(((struct ctcm_priv *)dev->ml_priv)->tbusy)); |
233 | netif_wake_queue(dev); | 233 | netif_wake_queue(dev); |
234 | } | 234 | } |
235 | 235 | ||
236 | static inline void ctcm_clear_busy(struct net_device *dev) | 236 | static inline void ctcm_clear_busy(struct net_device *dev) |
237 | { | 237 | { |
238 | struct mpc_group *grp; | 238 | struct mpc_group *grp; |
239 | grp = ((struct ctcm_priv *)dev->priv)->mpcg; | 239 | grp = ((struct ctcm_priv *)dev->ml_priv)->mpcg; |
240 | 240 | ||
241 | if (!(grp && grp->in_sweep)) | 241 | if (!(grp && grp->in_sweep)) |
242 | ctcm_clear_busy_do(dev); | 242 | ctcm_clear_busy_do(dev); |
@@ -246,7 +246,8 @@ static inline void ctcm_clear_busy(struct net_device *dev) | |||
246 | static inline int ctcm_test_and_set_busy(struct net_device *dev) | 246 | static inline int ctcm_test_and_set_busy(struct net_device *dev) |
247 | { | 247 | { |
248 | netif_stop_queue(dev); | 248 | netif_stop_queue(dev); |
249 | return test_and_set_bit(0, &(((struct ctcm_priv *)dev->priv)->tbusy)); | 249 | return test_and_set_bit(0, |
250 | &(((struct ctcm_priv *)dev->ml_priv)->tbusy)); | ||
250 | } | 251 | } |
251 | 252 | ||
252 | extern int loglevel; | 253 | extern int loglevel; |
@@ -292,7 +293,7 @@ struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv); | |||
292 | #define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC) | 293 | #define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC) |
293 | 294 | ||
294 | /* test if struct ctcm_priv of struct net_device has MPC protocol setting */ | 295 | /* test if struct ctcm_priv of struct net_device has MPC protocol setting */ |
295 | #define IS_MPCDEV(d) IS_MPC((struct ctcm_priv *)d->priv) | 296 | #define IS_MPCDEV(dev) IS_MPC((struct ctcm_priv *)dev->ml_priv) |
296 | 297 | ||
297 | static inline gfp_t gfp_type(void) | 298 | static inline gfp_t gfp_type(void) |
298 | { | 299 | { |
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 49ae1cd25caa..cbe470493bf0 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #undef DEBUGDATA | 19 | #undef DEBUGDATA |
20 | #undef DEBUGCCW | 20 | #undef DEBUGCCW |
21 | 21 | ||
22 | #include <linux/version.h> | ||
23 | #include <linux/module.h> | 22 | #include <linux/module.h> |
24 | #include <linux/init.h> | 23 | #include <linux/init.h> |
25 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
@@ -313,10 +312,10 @@ static struct net_device *ctcmpc_get_dev(int port_num) | |||
313 | CTCM_FUNTAIL, device); | 312 | CTCM_FUNTAIL, device); |
314 | return NULL; | 313 | return NULL; |
315 | } | 314 | } |
316 | priv = dev->priv; | 315 | priv = dev->ml_priv; |
317 | if (priv == NULL) { | 316 | if (priv == NULL) { |
318 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, | 317 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
319 | "%s(%s): dev->priv is NULL", | 318 | "%s(%s): dev->ml_priv is NULL", |
320 | CTCM_FUNTAIL, device); | 319 | CTCM_FUNTAIL, device); |
321 | return NULL; | 320 | return NULL; |
322 | } | 321 | } |
@@ -345,7 +344,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) | |||
345 | dev = ctcmpc_get_dev(port_num); | 344 | dev = ctcmpc_get_dev(port_num); |
346 | if (dev == NULL) | 345 | if (dev == NULL) |
347 | return 1; | 346 | return 1; |
348 | priv = dev->priv; | 347 | priv = dev->ml_priv; |
349 | grp = priv->mpcg; | 348 | grp = priv->mpcg; |
350 | 349 | ||
351 | grp->allochanfunc = callback; | 350 | grp->allochanfunc = callback; |
@@ -417,7 +416,7 @@ void ctc_mpc_establish_connectivity(int port_num, | |||
417 | dev = ctcmpc_get_dev(port_num); | 416 | dev = ctcmpc_get_dev(port_num); |
418 | if (dev == NULL) | 417 | if (dev == NULL) |
419 | return; | 418 | return; |
420 | priv = dev->priv; | 419 | priv = dev->ml_priv; |
421 | grp = priv->mpcg; | 420 | grp = priv->mpcg; |
422 | rch = priv->channel[READ]; | 421 | rch = priv->channel[READ]; |
423 | wch = priv->channel[WRITE]; | 422 | wch = priv->channel[WRITE]; |
@@ -535,7 +534,7 @@ void ctc_mpc_dealloc_ch(int port_num) | |||
535 | dev = ctcmpc_get_dev(port_num); | 534 | dev = ctcmpc_get_dev(port_num); |
536 | if (dev == NULL) | 535 | if (dev == NULL) |
537 | return; | 536 | return; |
538 | priv = dev->priv; | 537 | priv = dev->ml_priv; |
539 | grp = priv->mpcg; | 538 | grp = priv->mpcg; |
540 | 539 | ||
541 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, | 540 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, |
@@ -571,7 +570,7 @@ void ctc_mpc_flow_control(int port_num, int flowc) | |||
571 | dev = ctcmpc_get_dev(port_num); | 570 | dev = ctcmpc_get_dev(port_num); |
572 | if (dev == NULL) | 571 | if (dev == NULL) |
573 | return; | 572 | return; |
574 | priv = dev->priv; | 573 | priv = dev->ml_priv; |
575 | grp = priv->mpcg; | 574 | grp = priv->mpcg; |
576 | 575 | ||
577 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, | 576 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, |
@@ -620,7 +619,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo) | |||
620 | { | 619 | { |
621 | struct channel *rch = mpcginfo->ch; | 620 | struct channel *rch = mpcginfo->ch; |
622 | struct net_device *dev = rch->netdev; | 621 | struct net_device *dev = rch->netdev; |
623 | struct ctcm_priv *priv = dev->priv; | 622 | struct ctcm_priv *priv = dev->ml_priv; |
624 | struct mpc_group *grp = priv->mpcg; | 623 | struct mpc_group *grp = priv->mpcg; |
625 | struct channel *ch = priv->channel[WRITE]; | 624 | struct channel *ch = priv->channel[WRITE]; |
626 | 625 | ||
@@ -651,7 +650,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo) | |||
651 | static void ctcmpc_send_sweep_resp(struct channel *rch) | 650 | static void ctcmpc_send_sweep_resp(struct channel *rch) |
652 | { | 651 | { |
653 | struct net_device *dev = rch->netdev; | 652 | struct net_device *dev = rch->netdev; |
654 | struct ctcm_priv *priv = dev->priv; | 653 | struct ctcm_priv *priv = dev->ml_priv; |
655 | struct mpc_group *grp = priv->mpcg; | 654 | struct mpc_group *grp = priv->mpcg; |
656 | int rc = 0; | 655 | int rc = 0; |
657 | struct th_sweep *header; | 656 | struct th_sweep *header; |
@@ -713,7 +712,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo) | |||
713 | { | 712 | { |
714 | struct channel *rch = mpcginfo->ch; | 713 | struct channel *rch = mpcginfo->ch; |
715 | struct net_device *dev = rch->netdev; | 714 | struct net_device *dev = rch->netdev; |
716 | struct ctcm_priv *priv = dev->priv; | 715 | struct ctcm_priv *priv = dev->ml_priv; |
717 | struct mpc_group *grp = priv->mpcg; | 716 | struct mpc_group *grp = priv->mpcg; |
718 | struct channel *ch = priv->channel[WRITE]; | 717 | struct channel *ch = priv->channel[WRITE]; |
719 | 718 | ||
@@ -847,7 +846,7 @@ static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm); | |||
847 | static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg) | 846 | static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg) |
848 | { | 847 | { |
849 | struct net_device *dev = arg; | 848 | struct net_device *dev = arg; |
850 | struct ctcm_priv *priv = dev->priv; | 849 | struct ctcm_priv *priv = dev->ml_priv; |
851 | struct mpc_group *grp = priv->mpcg; | 850 | struct mpc_group *grp = priv->mpcg; |
852 | 851 | ||
853 | if (grp == NULL) { | 852 | if (grp == NULL) { |
@@ -891,7 +890,7 @@ static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg) | |||
891 | void mpc_group_ready(unsigned long adev) | 890 | void mpc_group_ready(unsigned long adev) |
892 | { | 891 | { |
893 | struct net_device *dev = (struct net_device *)adev; | 892 | struct net_device *dev = (struct net_device *)adev; |
894 | struct ctcm_priv *priv = dev->priv; | 893 | struct ctcm_priv *priv = dev->ml_priv; |
895 | struct mpc_group *grp = priv->mpcg; | 894 | struct mpc_group *grp = priv->mpcg; |
896 | struct channel *ch = NULL; | 895 | struct channel *ch = NULL; |
897 | 896 | ||
@@ -947,7 +946,7 @@ void mpc_group_ready(unsigned long adev) | |||
947 | void mpc_channel_action(struct channel *ch, int direction, int action) | 946 | void mpc_channel_action(struct channel *ch, int direction, int action) |
948 | { | 947 | { |
949 | struct net_device *dev = ch->netdev; | 948 | struct net_device *dev = ch->netdev; |
950 | struct ctcm_priv *priv = dev->priv; | 949 | struct ctcm_priv *priv = dev->ml_priv; |
951 | struct mpc_group *grp = priv->mpcg; | 950 | struct mpc_group *grp = priv->mpcg; |
952 | 951 | ||
953 | if (grp == NULL) { | 952 | if (grp == NULL) { |
@@ -1057,7 +1056,7 @@ done: | |||
1057 | static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | 1056 | static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) |
1058 | { | 1057 | { |
1059 | struct net_device *dev = ch->netdev; | 1058 | struct net_device *dev = ch->netdev; |
1060 | struct ctcm_priv *priv = dev->priv; | 1059 | struct ctcm_priv *priv = dev->ml_priv; |
1061 | struct mpc_group *grp = priv->mpcg; | 1060 | struct mpc_group *grp = priv->mpcg; |
1062 | struct pdu *curr_pdu; | 1061 | struct pdu *curr_pdu; |
1063 | struct mpcg_info *mpcginfo; | 1062 | struct mpcg_info *mpcginfo; |
@@ -1255,7 +1254,7 @@ void ctcmpc_bh(unsigned long thischan) | |||
1255 | struct channel *ch = (struct channel *)thischan; | 1254 | struct channel *ch = (struct channel *)thischan; |
1256 | struct sk_buff *skb; | 1255 | struct sk_buff *skb; |
1257 | struct net_device *dev = ch->netdev; | 1256 | struct net_device *dev = ch->netdev; |
1258 | struct ctcm_priv *priv = dev->priv; | 1257 | struct ctcm_priv *priv = dev->ml_priv; |
1259 | struct mpc_group *grp = priv->mpcg; | 1258 | struct mpc_group *grp = priv->mpcg; |
1260 | 1259 | ||
1261 | CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n", | 1260 | CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n", |
@@ -1377,7 +1376,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg) | |||
1377 | BUG_ON(dev == NULL); | 1376 | BUG_ON(dev == NULL); |
1378 | CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name); | 1377 | CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name); |
1379 | 1378 | ||
1380 | priv = dev->priv; | 1379 | priv = dev->ml_priv; |
1381 | grp = priv->mpcg; | 1380 | grp = priv->mpcg; |
1382 | grp->flow_off_called = 0; | 1381 | grp->flow_off_called = 0; |
1383 | fsm_deltimer(&grp->timer); | 1382 | fsm_deltimer(&grp->timer); |
@@ -1483,7 +1482,7 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg) | |||
1483 | 1482 | ||
1484 | BUG_ON(dev == NULL); | 1483 | BUG_ON(dev == NULL); |
1485 | 1484 | ||
1486 | priv = dev->priv; | 1485 | priv = dev->ml_priv; |
1487 | grp = priv->mpcg; | 1486 | grp = priv->mpcg; |
1488 | wch = priv->channel[WRITE]; | 1487 | wch = priv->channel[WRITE]; |
1489 | rch = priv->channel[READ]; | 1488 | rch = priv->channel[READ]; |
@@ -1521,7 +1520,7 @@ void mpc_action_discontact(fsm_instance *fi, int event, void *arg) | |||
1521 | if (ch) { | 1520 | if (ch) { |
1522 | dev = ch->netdev; | 1521 | dev = ch->netdev; |
1523 | if (dev) { | 1522 | if (dev) { |
1524 | priv = dev->priv; | 1523 | priv = dev->ml_priv; |
1525 | if (priv) { | 1524 | if (priv) { |
1526 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, | 1525 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, |
1527 | "%s: %s: %s\n", | 1526 | "%s: %s: %s\n", |
@@ -1569,7 +1568,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo) | |||
1569 | { | 1568 | { |
1570 | struct channel *ch = mpcginfo->ch; | 1569 | struct channel *ch = mpcginfo->ch; |
1571 | struct net_device *dev = ch->netdev; | 1570 | struct net_device *dev = ch->netdev; |
1572 | struct ctcm_priv *priv = dev->priv; | 1571 | struct ctcm_priv *priv = dev->ml_priv; |
1573 | struct mpc_group *grp = priv->mpcg; | 1572 | struct mpc_group *grp = priv->mpcg; |
1574 | struct xid2 *xid = mpcginfo->xid; | 1573 | struct xid2 *xid = mpcginfo->xid; |
1575 | int rc = 0; | 1574 | int rc = 0; |
@@ -1866,7 +1865,7 @@ static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg) | |||
1866 | { | 1865 | { |
1867 | struct channel *ch = arg; | 1866 | struct channel *ch = arg; |
1868 | struct net_device *dev = ch->netdev; | 1867 | struct net_device *dev = ch->netdev; |
1869 | struct ctcm_priv *priv = dev->priv; | 1868 | struct ctcm_priv *priv = dev->ml_priv; |
1870 | struct mpc_group *grp = priv->mpcg; | 1869 | struct mpc_group *grp = priv->mpcg; |
1871 | 1870 | ||
1872 | CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", | 1871 | CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", |
@@ -1906,7 +1905,7 @@ static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg) | |||
1906 | static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg) | 1905 | static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg) |
1907 | { | 1906 | { |
1908 | struct net_device *dev = arg; | 1907 | struct net_device *dev = arg; |
1909 | struct ctcm_priv *priv = dev->priv; | 1908 | struct ctcm_priv *priv = dev->ml_priv; |
1910 | struct mpc_group *grp = NULL; | 1909 | struct mpc_group *grp = NULL; |
1911 | int direction; | 1910 | int direction; |
1912 | int send = 0; | 1911 | int send = 0; |
@@ -1983,7 +1982,7 @@ static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg) | |||
1983 | struct mpcg_info *mpcginfo = arg; | 1982 | struct mpcg_info *mpcginfo = arg; |
1984 | struct channel *ch = mpcginfo->ch; | 1983 | struct channel *ch = mpcginfo->ch; |
1985 | struct net_device *dev = ch->netdev; | 1984 | struct net_device *dev = ch->netdev; |
1986 | struct ctcm_priv *priv = dev->priv; | 1985 | struct ctcm_priv *priv = dev->ml_priv; |
1987 | struct mpc_group *grp = priv->mpcg; | 1986 | struct mpc_group *grp = priv->mpcg; |
1988 | 1987 | ||
1989 | CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n", | 1988 | CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n", |
@@ -2045,7 +2044,7 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg) | |||
2045 | struct mpcg_info *mpcginfo = arg; | 2044 | struct mpcg_info *mpcginfo = arg; |
2046 | struct channel *ch = mpcginfo->ch; | 2045 | struct channel *ch = mpcginfo->ch; |
2047 | struct net_device *dev = ch->netdev; | 2046 | struct net_device *dev = ch->netdev; |
2048 | struct ctcm_priv *priv = dev->priv; | 2047 | struct ctcm_priv *priv = dev->ml_priv; |
2049 | struct mpc_group *grp = priv->mpcg; | 2048 | struct mpc_group *grp = priv->mpcg; |
2050 | 2049 | ||
2051 | CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", | 2050 | CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", |
@@ -2097,7 +2096,7 @@ static int mpc_send_qllc_discontact(struct net_device *dev) | |||
2097 | __u32 new_len = 0; | 2096 | __u32 new_len = 0; |
2098 | struct sk_buff *skb; | 2097 | struct sk_buff *skb; |
2099 | struct qllc *qllcptr; | 2098 | struct qllc *qllcptr; |
2100 | struct ctcm_priv *priv = dev->priv; | 2099 | struct ctcm_priv *priv = dev->ml_priv; |
2101 | struct mpc_group *grp = priv->mpcg; | 2100 | struct mpc_group *grp = priv->mpcg; |
2102 | 2101 | ||
2103 | CTCM_PR_DEBUG("%s: GROUP STATE: %s\n", | 2102 | CTCM_PR_DEBUG("%s: GROUP STATE: %s\n", |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 6de28385b354..9bcfa04d863b 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -1412,7 +1412,8 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1412 | } | 1412 | } |
1413 | /* How far in the ccw chain have we processed? */ | 1413 | /* How far in the ccw chain have we processed? */ |
1414 | if ((channel->state != LCS_CH_STATE_INIT) && | 1414 | if ((channel->state != LCS_CH_STATE_INIT) && |
1415 | (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC)) { | 1415 | (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && |
1416 | (irb->scsw.cmd.cpa != 0)) { | ||
1416 | index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa) | 1417 | index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa) |
1417 | - channel->ccws; | 1418 | - channel->ccws; |
1418 | if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) || | 1419 | if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) || |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 80971c21ea1a..bf8a75c92f28 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -689,6 +689,7 @@ struct qeth_mc_mac { | |||
689 | struct list_head list; | 689 | struct list_head list; |
690 | __u8 mc_addr[MAX_ADDR_LEN]; | 690 | __u8 mc_addr[MAX_ADDR_LEN]; |
691 | unsigned char mc_addrlen; | 691 | unsigned char mc_addrlen; |
692 | int is_vmac; | ||
692 | }; | 693 | }; |
693 | 694 | ||
694 | struct qeth_card { | 695 | struct qeth_card { |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index bd420d1b9a0d..c7ab1b864516 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -3024,7 +3024,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, | |||
3024 | struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, | 3024 | struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, |
3025 | int offset) | 3025 | int offset) |
3026 | { | 3026 | { |
3027 | int length = skb->len; | 3027 | int length = skb->len - offset; |
3028 | int length_here; | 3028 | int length_here; |
3029 | int element; | 3029 | int element; |
3030 | char *data; | 3030 | char *data; |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index b3cee032f578..3ac3cc1e03cc 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -177,9 +177,10 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) | |||
177 | qeth_l2_send_delgroupmac_cb); | 177 | qeth_l2_send_delgroupmac_cb); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac) | 180 | static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) |
181 | { | 181 | { |
182 | struct qeth_mc_mac *mc; | 182 | struct qeth_mc_mac *mc; |
183 | int rc; | ||
183 | 184 | ||
184 | mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC); | 185 | mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC); |
185 | 186 | ||
@@ -188,8 +189,16 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac) | |||
188 | 189 | ||
189 | memcpy(mc->mc_addr, mac, OSA_ADDR_LEN); | 190 | memcpy(mc->mc_addr, mac, OSA_ADDR_LEN); |
190 | mc->mc_addrlen = OSA_ADDR_LEN; | 191 | mc->mc_addrlen = OSA_ADDR_LEN; |
192 | mc->is_vmac = vmac; | ||
193 | |||
194 | if (vmac) { | ||
195 | rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, | ||
196 | NULL); | ||
197 | } else { | ||
198 | rc = qeth_l2_send_setgroupmac(card, mac); | ||
199 | } | ||
191 | 200 | ||
192 | if (!qeth_l2_send_setgroupmac(card, mac)) | 201 | if (!rc) |
193 | list_add_tail(&mc->list, &card->mc_list); | 202 | list_add_tail(&mc->list, &card->mc_list); |
194 | else | 203 | else |
195 | kfree(mc); | 204 | kfree(mc); |
@@ -201,7 +210,11 @@ static void qeth_l2_del_all_mc(struct qeth_card *card) | |||
201 | 210 | ||
202 | spin_lock_bh(&card->mclock); | 211 | spin_lock_bh(&card->mclock); |
203 | list_for_each_entry_safe(mc, tmp, &card->mc_list, list) { | 212 | list_for_each_entry_safe(mc, tmp, &card->mc_list, list) { |
204 | qeth_l2_send_delgroupmac(card, mc->mc_addr); | 213 | if (mc->is_vmac) |
214 | qeth_l2_send_setdelmac(card, mc->mc_addr, | ||
215 | IPA_CMD_DELVMAC, NULL); | ||
216 | else | ||
217 | qeth_l2_send_delgroupmac(card, mc->mc_addr); | ||
205 | list_del(&mc->list); | 218 | list_del(&mc->list); |
206 | kfree(mc); | 219 | kfree(mc); |
207 | } | 220 | } |
@@ -590,7 +603,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) | |||
590 | static void qeth_l2_set_multicast_list(struct net_device *dev) | 603 | static void qeth_l2_set_multicast_list(struct net_device *dev) |
591 | { | 604 | { |
592 | struct qeth_card *card = dev->ml_priv; | 605 | struct qeth_card *card = dev->ml_priv; |
593 | struct dev_mc_list *dm; | 606 | struct dev_addr_list *dm; |
594 | 607 | ||
595 | if (card->info.type == QETH_CARD_TYPE_OSN) | 608 | if (card->info.type == QETH_CARD_TYPE_OSN) |
596 | return ; | 609 | return ; |
@@ -599,7 +612,11 @@ static void qeth_l2_set_multicast_list(struct net_device *dev) | |||
599 | qeth_l2_del_all_mc(card); | 612 | qeth_l2_del_all_mc(card); |
600 | spin_lock_bh(&card->mclock); | 613 | spin_lock_bh(&card->mclock); |
601 | for (dm = dev->mc_list; dm; dm = dm->next) | 614 | for (dm = dev->mc_list; dm; dm = dm->next) |
602 | qeth_l2_add_mc(card, dm->dmi_addr); | 615 | qeth_l2_add_mc(card, dm->da_addr, 0); |
616 | |||
617 | for (dm = dev->uc_list; dm; dm = dm->next) | ||
618 | qeth_l2_add_mc(card, dm->da_addr, 1); | ||
619 | |||
603 | spin_unlock_bh(&card->mclock); | 620 | spin_unlock_bh(&card->mclock); |
604 | if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) | 621 | if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) |
605 | return; | 622 | return; |
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index ac1993708ae9..210ddb639748 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c | |||
@@ -136,7 +136,7 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev, | |||
136 | return -EINVAL; | 136 | return -EINVAL; |
137 | 137 | ||
138 | if (!qeth_is_supported(card, IPA_IPV6)) { | 138 | if (!qeth_is_supported(card, IPA_IPV6)) { |
139 | return -ENOTSUPP; | 139 | return -EOPNOTSUPP; |
140 | } | 140 | } |
141 | 141 | ||
142 | return qeth_l3_dev_route_store(card, &card->options.route6, | 142 | return qeth_l3_dev_route_store(card, &card->options.route6, |
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c index 73a86d09bba8..9c129248466c 100644 --- a/drivers/sbus/sbus.c +++ b/drivers/sbus/sbus.c | |||
@@ -7,13 +7,13 @@ | |||
7 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/device.h> | 9 | #include <linux/device.h> |
10 | #include <linux/of_device.h> | ||
10 | 11 | ||
11 | #include <asm/system.h> | 12 | #include <asm/system.h> |
12 | #include <asm/sbus.h> | 13 | #include <asm/sbus.h> |
13 | #include <asm/dma.h> | 14 | #include <asm/dma.h> |
14 | #include <asm/oplib.h> | 15 | #include <asm/oplib.h> |
15 | #include <asm/prom.h> | 16 | #include <asm/prom.h> |
16 | #include <asm/of_device.h> | ||
17 | #include <asm/bpp.h> | 17 | #include <asm/bpp.h> |
18 | #include <asm/irq.h> | 18 | #include <asm/irq.h> |
19 | 19 | ||
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index fcdd73f25625..994da56fffed 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c | |||
@@ -680,7 +680,7 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req) | |||
680 | 680 | ||
681 | } | 681 | } |
682 | 682 | ||
683 | const struct scsi_dh_devlist alua_dev_list[] = { | 683 | static const struct scsi_dh_devlist alua_dev_list[] = { |
684 | {"HP", "MSA VOLUME" }, | 684 | {"HP", "MSA VOLUME" }, |
685 | {"HP", "HSV101" }, | 685 | {"HP", "HSV101" }, |
686 | {"HP", "HSV111" }, | 686 | {"HP", "HSV111" }, |
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index aa46b131b20e..b9d23e9e9a44 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c | |||
@@ -562,7 +562,7 @@ done: | |||
562 | return result; | 562 | return result; |
563 | } | 563 | } |
564 | 564 | ||
565 | const struct scsi_dh_devlist clariion_dev_list[] = { | 565 | static const struct scsi_dh_devlist clariion_dev_list[] = { |
566 | {"DGC", "RAID"}, | 566 | {"DGC", "RAID"}, |
567 | {"DGC", "DISK"}, | 567 | {"DGC", "DISK"}, |
568 | {"DGC", "VRAID"}, | 568 | {"DGC", "VRAID"}, |
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 9c7a1f8ebb72..a6a4ef3ad51c 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c | |||
@@ -282,7 +282,7 @@ static int hp_sw_activate(struct scsi_device *sdev) | |||
282 | return ret; | 282 | return ret; |
283 | } | 283 | } |
284 | 284 | ||
285 | const struct scsi_dh_devlist hp_sw_dh_data_list[] = { | 285 | static const struct scsi_dh_devlist hp_sw_dh_data_list[] = { |
286 | {"COMPAQ", "MSA1000 VOLUME"}, | 286 | {"COMPAQ", "MSA1000 VOLUME"}, |
287 | {"COMPAQ", "HSV110"}, | 287 | {"COMPAQ", "HSV110"}, |
288 | {"HP", "HSV100"}, | 288 | {"HP", "HSV100"}, |
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index b093a501f8ae..2dee69da35cf 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c | |||
@@ -376,7 +376,7 @@ static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h) | |||
376 | if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' || | 376 | if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' || |
377 | inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd') | 377 | inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd') |
378 | return SCSI_DH_NOSYS; | 378 | return SCSI_DH_NOSYS; |
379 | h->lun = scsilun_to_int((struct scsi_lun *)inqp->lun); | 379 | h->lun = inqp->lun[7]; /* Uses only the last byte */ |
380 | } | 380 | } |
381 | return err; | 381 | return err; |
382 | } | 382 | } |
@@ -386,6 +386,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) | |||
386 | int err; | 386 | int err; |
387 | struct c9_inquiry *inqp; | 387 | struct c9_inquiry *inqp; |
388 | 388 | ||
389 | h->lun_state = RDAC_LUN_UNOWNED; | ||
389 | err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); | 390 | err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); |
390 | if (err == SCSI_DH_OK) { | 391 | if (err == SCSI_DH_OK) { |
391 | inqp = &h->inq.c9; | 392 | inqp = &h->inq.c9; |
@@ -574,7 +575,7 @@ static int rdac_check_sense(struct scsi_device *sdev, | |||
574 | return SCSI_RETURN_NOT_HANDLED; | 575 | return SCSI_RETURN_NOT_HANDLED; |
575 | } | 576 | } |
576 | 577 | ||
577 | const struct scsi_dh_devlist rdac_dev_list[] = { | 578 | static const struct scsi_dh_devlist rdac_dev_list[] = { |
578 | {"IBM", "1722"}, | 579 | {"IBM", "1722"}, |
579 | {"IBM", "1724"}, | 580 | {"IBM", "1724"}, |
580 | {"IBM", "1726"}, | 581 | {"IBM", "1726"}, |
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h index 19406cea6d6a..179ad77f6cc9 100644 --- a/drivers/scsi/dpt/dpti_i2o.h +++ b/drivers/scsi/dpt/dpti_i2o.h | |||
@@ -21,7 +21,6 @@ | |||
21 | 21 | ||
22 | #include <linux/i2o-dev.h> | 22 | #include <linux/i2o-dev.h> |
23 | 23 | ||
24 | #include <linux/version.h> | ||
25 | #include <linux/notifier.h> | 24 | #include <linux/notifier.h> |
26 | #include <asm/atomic.h> | 25 | #include <asm/atomic.h> |
27 | 26 | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index ae560bc04f9d..4e0b7c8eb32e 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -556,11 +556,12 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost, | |||
556 | /** | 556 | /** |
557 | * ibmvfc_init_host - Start host initialization | 557 | * ibmvfc_init_host - Start host initialization |
558 | * @vhost: ibmvfc host struct | 558 | * @vhost: ibmvfc host struct |
559 | * @relogin: is this a re-login? | ||
559 | * | 560 | * |
560 | * Return value: | 561 | * Return value: |
561 | * nothing | 562 | * nothing |
562 | **/ | 563 | **/ |
563 | static void ibmvfc_init_host(struct ibmvfc_host *vhost) | 564 | static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) |
564 | { | 565 | { |
565 | struct ibmvfc_target *tgt; | 566 | struct ibmvfc_target *tgt; |
566 | 567 | ||
@@ -574,6 +575,11 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost) | |||
574 | } | 575 | } |
575 | 576 | ||
576 | if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { | 577 | if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { |
578 | if (!relogin) { | ||
579 | memset(vhost->async_crq.msgs, 0, PAGE_SIZE); | ||
580 | vhost->async_crq.cur = 0; | ||
581 | } | ||
582 | |||
577 | list_for_each_entry(tgt, &vhost->targets, queue) | 583 | list_for_each_entry(tgt, &vhost->targets, queue) |
578 | tgt->need_login = 1; | 584 | tgt->need_login = 1; |
579 | scsi_block_requests(vhost->host); | 585 | scsi_block_requests(vhost->host); |
@@ -1059,9 +1065,10 @@ static void ibmvfc_get_starget_port_id(struct scsi_target *starget) | |||
1059 | static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost) | 1065 | static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost) |
1060 | { | 1066 | { |
1061 | long timeout = wait_event_timeout(vhost->init_wait_q, | 1067 | long timeout = wait_event_timeout(vhost->init_wait_q, |
1062 | (vhost->state == IBMVFC_ACTIVE || | 1068 | ((vhost->state == IBMVFC_ACTIVE || |
1063 | vhost->state == IBMVFC_HOST_OFFLINE || | 1069 | vhost->state == IBMVFC_HOST_OFFLINE || |
1064 | vhost->state == IBMVFC_LINK_DEAD), | 1070 | vhost->state == IBMVFC_LINK_DEAD) && |
1071 | vhost->action == IBMVFC_HOST_ACTION_NONE), | ||
1065 | (init_timeout * HZ)); | 1072 | (init_timeout * HZ)); |
1066 | 1073 | ||
1067 | return timeout ? 0 : -EIO; | 1074 | return timeout ? 0 : -EIO; |
@@ -1450,8 +1457,8 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt) | |||
1450 | struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; | 1457 | struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; |
1451 | struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; | 1458 | struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; |
1452 | struct scsi_cmnd *cmnd = evt->cmnd; | 1459 | struct scsi_cmnd *cmnd = evt->cmnd; |
1453 | int rsp_len = 0; | 1460 | u32 rsp_len = 0; |
1454 | int sense_len = rsp->fcp_sense_len; | 1461 | u32 sense_len = rsp->fcp_sense_len; |
1455 | 1462 | ||
1456 | if (cmnd) { | 1463 | if (cmnd) { |
1457 | if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID) | 1464 | if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID) |
@@ -1468,7 +1475,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt) | |||
1468 | rsp_len = rsp->fcp_rsp_len; | 1475 | rsp_len = rsp->fcp_rsp_len; |
1469 | if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE) | 1476 | if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE) |
1470 | sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len; | 1477 | sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len; |
1471 | if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len) | 1478 | if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) |
1472 | memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); | 1479 | memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); |
1473 | 1480 | ||
1474 | ibmvfc_log_error(evt); | 1481 | ibmvfc_log_error(evt); |
@@ -2077,17 +2084,18 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, | |||
2077 | { | 2084 | { |
2078 | const char *desc = ibmvfc_get_ae_desc(crq->event); | 2085 | const char *desc = ibmvfc_get_ae_desc(crq->event); |
2079 | 2086 | ||
2080 | ibmvfc_log(vhost, 3, "%s event received\n", desc); | 2087 | ibmvfc_log(vhost, 3, "%s event received. scsi_id: %lx, wwpn: %lx," |
2088 | " node_name: %lx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); | ||
2081 | 2089 | ||
2082 | switch (crq->event) { | 2090 | switch (crq->event) { |
2083 | case IBMVFC_AE_LINK_UP: | 2091 | case IBMVFC_AE_LINK_UP: |
2084 | case IBMVFC_AE_RESUME: | 2092 | case IBMVFC_AE_RESUME: |
2085 | vhost->events_to_log |= IBMVFC_AE_LINKUP; | 2093 | vhost->events_to_log |= IBMVFC_AE_LINKUP; |
2086 | ibmvfc_init_host(vhost); | 2094 | ibmvfc_init_host(vhost, 1); |
2087 | break; | 2095 | break; |
2088 | case IBMVFC_AE_SCN_FABRIC: | 2096 | case IBMVFC_AE_SCN_FABRIC: |
2089 | vhost->events_to_log |= IBMVFC_AE_RSCN; | 2097 | vhost->events_to_log |= IBMVFC_AE_RSCN; |
2090 | ibmvfc_init_host(vhost); | 2098 | ibmvfc_init_host(vhost, 1); |
2091 | break; | 2099 | break; |
2092 | case IBMVFC_AE_SCN_NPORT: | 2100 | case IBMVFC_AE_SCN_NPORT: |
2093 | case IBMVFC_AE_SCN_GROUP: | 2101 | case IBMVFC_AE_SCN_GROUP: |
@@ -2133,13 +2141,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) | |||
2133 | /* Send back a response */ | 2141 | /* Send back a response */ |
2134 | rc = ibmvfc_send_crq_init_complete(vhost); | 2142 | rc = ibmvfc_send_crq_init_complete(vhost); |
2135 | if (rc == 0) | 2143 | if (rc == 0) |
2136 | ibmvfc_init_host(vhost); | 2144 | ibmvfc_init_host(vhost, 0); |
2137 | else | 2145 | else |
2138 | dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc); | 2146 | dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc); |
2139 | break; | 2147 | break; |
2140 | case IBMVFC_CRQ_INIT_COMPLETE: | 2148 | case IBMVFC_CRQ_INIT_COMPLETE: |
2141 | dev_info(vhost->dev, "Partner initialization complete\n"); | 2149 | dev_info(vhost->dev, "Partner initialization complete\n"); |
2142 | ibmvfc_init_host(vhost); | 2150 | ibmvfc_init_host(vhost, 0); |
2143 | break; | 2151 | break; |
2144 | default: | 2152 | default: |
2145 | dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); | 2153 | dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); |
@@ -3357,8 +3365,6 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) | |||
3357 | mad->buffer.va = vhost->login_buf_dma; | 3365 | mad->buffer.va = vhost->login_buf_dma; |
3358 | mad->buffer.len = sizeof(*vhost->login_buf); | 3366 | mad->buffer.len = sizeof(*vhost->login_buf); |
3359 | 3367 | ||
3360 | memset(vhost->async_crq.msgs, 0, PAGE_SIZE); | ||
3361 | vhost->async_crq.cur = 0; | ||
3362 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); | 3368 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); |
3363 | 3369 | ||
3364 | if (!ibmvfc_send_event(evt, vhost, default_timeout)) | 3370 | if (!ibmvfc_send_event(evt, vhost, default_timeout)) |
@@ -3601,8 +3607,9 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) | |||
3601 | } | 3607 | } |
3602 | } | 3608 | } |
3603 | 3609 | ||
3604 | if (vhost->reinit) { | 3610 | if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { |
3605 | vhost->reinit = 0; | 3611 | vhost->reinit = 0; |
3612 | scsi_block_requests(vhost->host); | ||
3606 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); | 3613 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); |
3607 | } else { | 3614 | } else { |
3608 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); | 3615 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 4bf6e374f076..fb3177ab6691 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
@@ -29,8 +29,8 @@ | |||
29 | #include "viosrp.h" | 29 | #include "viosrp.h" |
30 | 30 | ||
31 | #define IBMVFC_NAME "ibmvfc" | 31 | #define IBMVFC_NAME "ibmvfc" |
32 | #define IBMVFC_DRIVER_VERSION "1.0.1" | 32 | #define IBMVFC_DRIVER_VERSION "1.0.2" |
33 | #define IBMVFC_DRIVER_DATE "(July 11, 2008)" | 33 | #define IBMVFC_DRIVER_DATE "(August 14, 2008)" |
34 | 34 | ||
35 | #define IBMVFC_DEFAULT_TIMEOUT 15 | 35 | #define IBMVFC_DEFAULT_TIMEOUT 15 |
36 | #define IBMVFC_INIT_TIMEOUT 30 | 36 | #define IBMVFC_INIT_TIMEOUT 30 |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 6b24b9cdb04c..7b1502c0ab6e 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -1636,7 +1636,7 @@ static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev) | |||
1636 | unsigned long desired_io = max_requests * sizeof(union viosrp_iu); | 1636 | unsigned long desired_io = max_requests * sizeof(union viosrp_iu); |
1637 | 1637 | ||
1638 | /* add io space for sg data */ | 1638 | /* add io space for sg data */ |
1639 | desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * | 1639 | desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 * |
1640 | IBMVSCSI_CMDS_PER_LUN_DEFAULT); | 1640 | IBMVSCSI_CMDS_PER_LUN_DEFAULT); |
1641 | 1641 | ||
1642 | return desired_io; | 1642 | return desired_io; |
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 7c615c70ec5c..bc9e6ddf41df 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c | |||
@@ -165,7 +165,6 @@ | |||
165 | #include <asm/byteorder.h> | 165 | #include <asm/byteorder.h> |
166 | #include <asm/page.h> | 166 | #include <asm/page.h> |
167 | #include <linux/stddef.h> | 167 | #include <linux/stddef.h> |
168 | #include <linux/version.h> | ||
169 | #include <linux/string.h> | 168 | #include <linux/string.h> |
170 | #include <linux/errno.h> | 169 | #include <linux/errno.h> |
171 | #include <linux/kernel.h> | 170 | #include <linux/kernel.h> |
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h index e0657b6f009c..4e49fbcfe8af 100644 --- a/drivers/scsi/ips.h +++ b/drivers/scsi/ips.h | |||
@@ -50,7 +50,6 @@ | |||
50 | #ifndef _IPS_H_ | 50 | #ifndef _IPS_H_ |
51 | #define _IPS_H_ | 51 | #define _IPS_H_ |
52 | 52 | ||
53 | #include <linux/version.h> | ||
54 | #include <linux/nmi.h> | 53 | #include <linux/nmi.h> |
55 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
56 | #include <asm/io.h> | 55 | #include <asm/io.h> |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 90272e65957a..094b47e94b29 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
28 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
29 | #include <linux/ctype.h> | 29 | #include <linux/ctype.h> |
30 | #include <linux/version.h> | ||
31 | 30 | ||
32 | #include <scsi/scsi.h> | 31 | #include <scsi/scsi.h> |
33 | #include <scsi/scsi_device.h> | 32 | #include <scsi/scsi_device.h> |
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index fc7ac158476c..97b763378e7d 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | * | 11 | * |
12 | * FILE : megaraid_sas.c | 12 | * FILE : megaraid_sas.c |
13 | * Version : v00.00.03.20-rc1 | 13 | * Version : v00.00.04.01-rc1 |
14 | * | 14 | * |
15 | * Authors: | 15 | * Authors: |
16 | * (email-id : megaraidlinux@lsi.com) | 16 | * (email-id : megaraidlinux@lsi.com) |
@@ -71,6 +71,10 @@ static struct pci_device_id megasas_pci_table[] = { | |||
71 | /* ppc IOP */ | 71 | /* ppc IOP */ |
72 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, | 72 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, |
73 | /* ppc IOP */ | 73 | /* ppc IOP */ |
74 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, | ||
75 | /* gen2*/ | ||
76 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, | ||
77 | /* gen2*/ | ||
74 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, | 78 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, |
75 | /* xscale IOP, vega */ | 79 | /* xscale IOP, vega */ |
76 | {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, | 80 | {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, |
@@ -198,6 +202,9 @@ megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) | |||
198 | */ | 202 | */ |
199 | writel(status, ®s->outbound_intr_status); | 203 | writel(status, ®s->outbound_intr_status); |
200 | 204 | ||
205 | /* Dummy readl to force pci flush */ | ||
206 | readl(®s->outbound_intr_status); | ||
207 | |||
201 | return 0; | 208 | return 0; |
202 | } | 209 | } |
203 | 210 | ||
@@ -293,6 +300,9 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) | |||
293 | */ | 300 | */ |
294 | writel(status, ®s->outbound_doorbell_clear); | 301 | writel(status, ®s->outbound_doorbell_clear); |
295 | 302 | ||
303 | /* Dummy readl to force pci flush */ | ||
304 | readl(®s->outbound_doorbell_clear); | ||
305 | |||
296 | return 0; | 306 | return 0; |
297 | } | 307 | } |
298 | /** | 308 | /** |
@@ -318,6 +328,99 @@ static struct megasas_instance_template megasas_instance_template_ppc = { | |||
318 | }; | 328 | }; |
319 | 329 | ||
320 | /** | 330 | /** |
331 | * The following functions are defined for gen2 (deviceid : 0x78 0x79) | ||
332 | * controllers | ||
333 | */ | ||
334 | |||
335 | /** | ||
336 | * megasas_enable_intr_gen2 - Enables interrupts | ||
337 | * @regs: MFI register set | ||
338 | */ | ||
339 | static inline void | ||
340 | megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs) | ||
341 | { | ||
342 | writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); | ||
343 | |||
344 | /* write ~0x00000005 (4 & 1) to the intr mask*/ | ||
345 | writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); | ||
346 | |||
347 | /* Dummy readl to force pci flush */ | ||
348 | readl(®s->outbound_intr_mask); | ||
349 | } | ||
350 | |||
351 | /** | ||
352 | * megasas_disable_intr_gen2 - Disables interrupt | ||
353 | * @regs: MFI register set | ||
354 | */ | ||
355 | static inline void | ||
356 | megasas_disable_intr_gen2(struct megasas_register_set __iomem *regs) | ||
357 | { | ||
358 | u32 mask = 0xFFFFFFFF; | ||
359 | writel(mask, ®s->outbound_intr_mask); | ||
360 | /* Dummy readl to force pci flush */ | ||
361 | readl(®s->outbound_intr_mask); | ||
362 | } | ||
363 | |||
364 | /** | ||
365 | * megasas_read_fw_status_reg_gen2 - returns the current FW status value | ||
366 | * @regs: MFI register set | ||
367 | */ | ||
368 | static u32 | ||
369 | megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs) | ||
370 | { | ||
371 | return readl(&(regs)->outbound_scratch_pad); | ||
372 | } | ||
373 | |||
374 | /** | ||
375 | * megasas_clear_interrupt_gen2 - Check & clear interrupt | ||
376 | * @regs: MFI register set | ||
377 | */ | ||
378 | static int | ||
379 | megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) | ||
380 | { | ||
381 | u32 status; | ||
382 | /* | ||
383 | * Check if it is our interrupt | ||
384 | */ | ||
385 | status = readl(®s->outbound_intr_status); | ||
386 | |||
387 | if (!(status & MFI_GEN2_ENABLE_INTERRUPT_MASK)) | ||
388 | return 1; | ||
389 | |||
390 | /* | ||
391 | * Clear the interrupt by writing back the same value | ||
392 | */ | ||
393 | writel(status, ®s->outbound_doorbell_clear); | ||
394 | |||
395 | /* Dummy readl to force pci flush */ | ||
396 | readl(®s->outbound_intr_status); | ||
397 | |||
398 | return 0; | ||
399 | } | ||
400 | /** | ||
401 | * megasas_fire_cmd_gen2 - Sends command to the FW | ||
402 | * @frame_phys_addr : Physical address of cmd | ||
403 | * @frame_count : Number of frames for the command | ||
404 | * @regs : MFI register set | ||
405 | */ | ||
406 | static inline void | ||
407 | megasas_fire_cmd_gen2(dma_addr_t frame_phys_addr, u32 frame_count, | ||
408 | struct megasas_register_set __iomem *regs) | ||
409 | { | ||
410 | writel((frame_phys_addr | (frame_count<<1))|1, | ||
411 | &(regs)->inbound_queue_port); | ||
412 | } | ||
413 | |||
414 | static struct megasas_instance_template megasas_instance_template_gen2 = { | ||
415 | |||
416 | .fire_cmd = megasas_fire_cmd_gen2, | ||
417 | .enable_intr = megasas_enable_intr_gen2, | ||
418 | .disable_intr = megasas_disable_intr_gen2, | ||
419 | .clear_intr = megasas_clear_intr_gen2, | ||
420 | .read_fw_status_reg = megasas_read_fw_status_reg_gen2, | ||
421 | }; | ||
422 | |||
423 | /** | ||
321 | * This is the end of set of functions & definitions | 424 | * This is the end of set of functions & definitions |
322 | * specific to ppc (deviceid : 0x60) controllers | 425 | * specific to ppc (deviceid : 0x60) controllers |
323 | */ | 426 | */ |
@@ -1976,7 +2079,12 @@ static int megasas_init_mfi(struct megasas_instance *instance) | |||
1976 | /* | 2079 | /* |
1977 | * Map the message registers | 2080 | * Map the message registers |
1978 | */ | 2081 | */ |
1979 | instance->base_addr = pci_resource_start(instance->pdev, 0); | 2082 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) || |
2083 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) { | ||
2084 | instance->base_addr = pci_resource_start(instance->pdev, 1); | ||
2085 | } else { | ||
2086 | instance->base_addr = pci_resource_start(instance->pdev, 0); | ||
2087 | } | ||
1980 | 2088 | ||
1981 | if (pci_request_regions(instance->pdev, "megasas: LSI")) { | 2089 | if (pci_request_regions(instance->pdev, "megasas: LSI")) { |
1982 | printk(KERN_DEBUG "megasas: IO memory region busy!\n"); | 2090 | printk(KERN_DEBUG "megasas: IO memory region busy!\n"); |
@@ -1998,6 +2106,10 @@ static int megasas_init_mfi(struct megasas_instance *instance) | |||
1998 | case PCI_DEVICE_ID_LSI_SAS1078DE: | 2106 | case PCI_DEVICE_ID_LSI_SAS1078DE: |
1999 | instance->instancet = &megasas_instance_template_ppc; | 2107 | instance->instancet = &megasas_instance_template_ppc; |
2000 | break; | 2108 | break; |
2109 | case PCI_DEVICE_ID_LSI_SAS1078GEN2: | ||
2110 | case PCI_DEVICE_ID_LSI_SAS0079GEN2: | ||
2111 | instance->instancet = &megasas_instance_template_gen2; | ||
2112 | break; | ||
2001 | case PCI_DEVICE_ID_LSI_SAS1064R: | 2113 | case PCI_DEVICE_ID_LSI_SAS1064R: |
2002 | case PCI_DEVICE_ID_DELL_PERC5: | 2114 | case PCI_DEVICE_ID_DELL_PERC5: |
2003 | default: | 2115 | default: |
@@ -2857,6 +2969,7 @@ static void megasas_shutdown(struct pci_dev *pdev) | |||
2857 | { | 2969 | { |
2858 | struct megasas_instance *instance = pci_get_drvdata(pdev); | 2970 | struct megasas_instance *instance = pci_get_drvdata(pdev); |
2859 | megasas_flush_cache(instance); | 2971 | megasas_flush_cache(instance); |
2972 | megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); | ||
2860 | } | 2973 | } |
2861 | 2974 | ||
2862 | /** | 2975 | /** |
@@ -3292,7 +3405,7 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun | |||
3292 | return retval; | 3405 | return retval; |
3293 | } | 3406 | } |
3294 | 3407 | ||
3295 | static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUGO, megasas_sysfs_show_dbg_lvl, | 3408 | static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, |
3296 | megasas_sysfs_set_dbg_lvl); | 3409 | megasas_sysfs_set_dbg_lvl); |
3297 | 3410 | ||
3298 | static ssize_t | 3411 | static ssize_t |
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index b0c41e671702..0d033248fdf1 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
@@ -18,9 +18,9 @@ | |||
18 | /* | 18 | /* |
19 | * MegaRAID SAS Driver meta data | 19 | * MegaRAID SAS Driver meta data |
20 | */ | 20 | */ |
21 | #define MEGASAS_VERSION "00.00.03.20-rc1" | 21 | #define MEGASAS_VERSION "00.00.04.01" |
22 | #define MEGASAS_RELDATE "March 10, 2008" | 22 | #define MEGASAS_RELDATE "July 24, 2008" |
23 | #define MEGASAS_EXT_VERSION "Mon. March 10 11:02:31 PDT 2008" | 23 | #define MEGASAS_EXT_VERSION "Thu July 24 11:41:51 PST 2008" |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * Device IDs | 26 | * Device IDs |
@@ -28,6 +28,8 @@ | |||
28 | #define PCI_DEVICE_ID_LSI_SAS1078R 0x0060 | 28 | #define PCI_DEVICE_ID_LSI_SAS1078R 0x0060 |
29 | #define PCI_DEVICE_ID_LSI_SAS1078DE 0x007C | 29 | #define PCI_DEVICE_ID_LSI_SAS1078DE 0x007C |
30 | #define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413 | 30 | #define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413 |
31 | #define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078 | ||
32 | #define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079 | ||
31 | 33 | ||
32 | /* | 34 | /* |
33 | * ===================================== | 35 | * ===================================== |
@@ -580,6 +582,8 @@ struct megasas_ctrl_info { | |||
580 | #define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10) | 582 | #define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10) |
581 | 583 | ||
582 | #define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 | 584 | #define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 |
585 | #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 | ||
586 | #define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004) | ||
583 | 587 | ||
584 | /* | 588 | /* |
585 | * register set for both 1068 and 1078 controllers | 589 | * register set for both 1068 and 1078 controllers |
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index edf9fdb3cb3c..22052bb7becb 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c | |||
@@ -23,7 +23,6 @@ | |||
23 | * 1.2: PowerPC (big endian) support. | 23 | * 1.2: PowerPC (big endian) support. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/version.h> | ||
27 | #include <linux/module.h> | 26 | #include <linux/module.h> |
28 | #include <linux/init.h> | 27 | #include <linux/init.h> |
29 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
diff --git a/drivers/scsi/nsp32.h b/drivers/scsi/nsp32.h index 6715ecb3bfca..9565acf1aa72 100644 --- a/drivers/scsi/nsp32.h +++ b/drivers/scsi/nsp32.h | |||
@@ -16,7 +16,6 @@ | |||
16 | #ifndef _NSP32_H | 16 | #ifndef _NSP32_H |
17 | #define _NSP32_H | 17 | #define _NSP32_H |
18 | 18 | ||
19 | #include <linux/version.h> | ||
20 | //#define NSP32_DEBUG 9 | 19 | //#define NSP32_DEBUG 9 |
21 | 20 | ||
22 | /* | 21 | /* |
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index a221b6ef9fa9..24e6cb8396e3 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c | |||
@@ -25,7 +25,6 @@ | |||
25 | 25 | ||
26 | ***********************************************************************/ | 26 | ***********************************************************************/ |
27 | 27 | ||
28 | #include <linux/version.h> | ||
29 | #include <linux/module.h> | 28 | #include <linux/module.h> |
30 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
31 | #include <linux/init.h> | 30 | #include <linux/init.h> |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index a319a20ed440..45e7dcb4b34d 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -993,6 +993,17 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) | |||
993 | { | 993 | { |
994 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; | 994 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; |
995 | 995 | ||
996 | /* | ||
997 | * At this point all fcport's software-states are cleared. Perform any | ||
998 | * final cleanup of firmware resources (PCBs and XCBs). | ||
999 | */ | ||
1000 | if (fcport->loop_id != FC_NO_LOOP_ID) { | ||
1001 | fcport->ha->isp_ops->fabric_logout(fcport->ha, fcport->loop_id, | ||
1002 | fcport->d_id.b.domain, fcport->d_id.b.area, | ||
1003 | fcport->d_id.b.al_pa); | ||
1004 | fcport->loop_id = FC_NO_LOOP_ID; | ||
1005 | } | ||
1006 | |||
996 | qla2x00_abort_fcport_cmds(fcport); | 1007 | qla2x00_abort_fcport_cmds(fcport); |
997 | scsi_target_unblock(&rport->dev); | 1008 | scsi_target_unblock(&rport->dev); |
998 | } | 1009 | } |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 6da31ba94404..94a720eabfd8 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -2237,6 +2237,7 @@ typedef struct scsi_qla_host { | |||
2237 | #define REGISTER_FDMI_NEEDED 26 | 2237 | #define REGISTER_FDMI_NEEDED 26 |
2238 | #define FCPORT_UPDATE_NEEDED 27 | 2238 | #define FCPORT_UPDATE_NEEDED 27 |
2239 | #define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */ | 2239 | #define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */ |
2240 | #define UNLOADING 29 | ||
2240 | 2241 | ||
2241 | uint32_t device_flags; | 2242 | uint32_t device_flags; |
2242 | #define DFLG_LOCAL_DEVICES BIT_0 | 2243 | #define DFLG_LOCAL_DEVICES BIT_0 |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 601a6b29750c..ee89ddd64aae 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -976,8 +976,9 @@ qla2x00_setup_chip(scsi_qla_host_t *ha) | |||
976 | &ha->fw_attributes, &ha->fw_memory_size); | 976 | &ha->fw_attributes, &ha->fw_memory_size); |
977 | qla2x00_resize_request_q(ha); | 977 | qla2x00_resize_request_q(ha); |
978 | ha->flags.npiv_supported = 0; | 978 | ha->flags.npiv_supported = 0; |
979 | if ((IS_QLA24XX(ha) || IS_QLA25XX(ha)) && | 979 | if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) || |
980 | (ha->fw_attributes & BIT_2)) { | 980 | IS_QLA84XX(ha)) && |
981 | (ha->fw_attributes & BIT_2)) { | ||
981 | ha->flags.npiv_supported = 1; | 982 | ha->flags.npiv_supported = 1; |
982 | if ((!ha->max_npiv_vports) || | 983 | if ((!ha->max_npiv_vports) || |
983 | ((ha->max_npiv_vports + 1) % | 984 | ((ha->max_npiv_vports + 1) % |
@@ -3251,6 +3252,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha) | |||
3251 | { | 3252 | { |
3252 | int rval; | 3253 | int rval; |
3253 | uint8_t status = 0; | 3254 | uint8_t status = 0; |
3255 | scsi_qla_host_t *vha; | ||
3254 | 3256 | ||
3255 | if (ha->flags.online) { | 3257 | if (ha->flags.online) { |
3256 | ha->flags.online = 0; | 3258 | ha->flags.online = 0; |
@@ -3265,6 +3267,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha) | |||
3265 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { | 3267 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { |
3266 | atomic_set(&ha->loop_state, LOOP_DOWN); | 3268 | atomic_set(&ha->loop_state, LOOP_DOWN); |
3267 | qla2x00_mark_all_devices_lost(ha, 0); | 3269 | qla2x00_mark_all_devices_lost(ha, 0); |
3270 | list_for_each_entry(vha, &ha->vp_list, vp_list) | ||
3271 | qla2x00_mark_all_devices_lost(vha, 0); | ||
3268 | } else { | 3272 | } else { |
3269 | if (!atomic_read(&ha->loop_down_timer)) | 3273 | if (!atomic_read(&ha->loop_down_timer)) |
3270 | atomic_set(&ha->loop_down_timer, | 3274 | atomic_set(&ha->loop_down_timer, |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 874d802edb7d..45a3b93eed57 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -879,11 +879,12 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) | |||
879 | sp->request_sense_ptr += sense_len; | 879 | sp->request_sense_ptr += sense_len; |
880 | sp->request_sense_length -= sense_len; | 880 | sp->request_sense_length -= sense_len; |
881 | if (sp->request_sense_length != 0) | 881 | if (sp->request_sense_length != 0) |
882 | sp->ha->status_srb = sp; | 882 | sp->fcport->ha->status_srb = sp; |
883 | 883 | ||
884 | DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " | 884 | DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " |
885 | "cmd=%p pid=%ld\n", __func__, sp->ha->host_no, cp->device->channel, | 885 | "cmd=%p pid=%ld\n", __func__, sp->fcport->ha->host_no, |
886 | cp->device->id, cp->device->lun, cp, cp->serial_number)); | 886 | cp->device->channel, cp->device->id, cp->device->lun, cp, |
887 | cp->serial_number)); | ||
887 | if (sense_len) | 888 | if (sense_len) |
888 | DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, | 889 | DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, |
889 | CMD_ACTUAL_SNSLEN(cp))); | 890 | CMD_ACTUAL_SNSLEN(cp))); |
@@ -1184,9 +1185,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1184 | atomic_read(&fcport->state))); | 1185 | atomic_read(&fcport->state))); |
1185 | 1186 | ||
1186 | cp->result = DID_BUS_BUSY << 16; | 1187 | cp->result = DID_BUS_BUSY << 16; |
1187 | if (atomic_read(&fcport->state) == FCS_ONLINE) { | 1188 | if (atomic_read(&fcport->state) == FCS_ONLINE) |
1188 | qla2x00_mark_device_lost(ha, fcport, 1, 1); | 1189 | qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); |
1189 | } | ||
1190 | break; | 1190 | break; |
1191 | 1191 | ||
1192 | case CS_RESET: | 1192 | case CS_RESET: |
@@ -1229,7 +1229,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1229 | 1229 | ||
1230 | /* Check to see if logout occurred. */ | 1230 | /* Check to see if logout occurred. */ |
1231 | if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT)) | 1231 | if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT)) |
1232 | qla2x00_mark_device_lost(ha, fcport, 1, 1); | 1232 | qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); |
1233 | break; | 1233 | break; |
1234 | 1234 | ||
1235 | default: | 1235 | default: |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index bc90d6b8d0a0..813bc7784c0a 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -2686,7 +2686,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha, | |||
2686 | set_bit(VP_IDX_ACQUIRED, &vha->vp_flags); | 2686 | set_bit(VP_IDX_ACQUIRED, &vha->vp_flags); |
2687 | set_bit(VP_DPC_NEEDED, &ha->dpc_flags); | 2687 | set_bit(VP_DPC_NEEDED, &ha->dpc_flags); |
2688 | 2688 | ||
2689 | wake_up_process(ha->dpc_thread); | 2689 | qla2xxx_wake_dpc(ha); |
2690 | } | 2690 | } |
2691 | } | 2691 | } |
2692 | 2692 | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 50baf6a1d67c..93560cd72784 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
@@ -6,7 +6,6 @@ | |||
6 | */ | 6 | */ |
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | 8 | ||
9 | #include <linux/version.h> | ||
10 | #include <linux/moduleparam.h> | 9 | #include <linux/moduleparam.h> |
11 | #include <linux/vmalloc.h> | 10 | #include <linux/vmalloc.h> |
12 | #include <linux/smp_lock.h> | 11 | #include <linux/smp_lock.h> |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 7c8af7ed2a5d..26afe44265c7 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -780,7 +780,8 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t, | |||
780 | sp = pha->outstanding_cmds[cnt]; | 780 | sp = pha->outstanding_cmds[cnt]; |
781 | if (!sp) | 781 | if (!sp) |
782 | continue; | 782 | continue; |
783 | if (ha->vp_idx != sp->ha->vp_idx) | 783 | |
784 | if (ha->vp_idx != sp->fcport->ha->vp_idx) | ||
784 | continue; | 785 | continue; |
785 | match = 0; | 786 | match = 0; |
786 | switch (type) { | 787 | switch (type) { |
@@ -1080,9 +1081,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res) | |||
1080 | sp = ha->outstanding_cmds[cnt]; | 1081 | sp = ha->outstanding_cmds[cnt]; |
1081 | if (sp) { | 1082 | if (sp) { |
1082 | ha->outstanding_cmds[cnt] = NULL; | 1083 | ha->outstanding_cmds[cnt] = NULL; |
1083 | sp->flags = 0; | ||
1084 | sp->cmd->result = res; | 1084 | sp->cmd->result = res; |
1085 | sp->cmd->host_scribble = (unsigned char *)NULL; | ||
1086 | qla2x00_sp_compl(ha, sp); | 1085 | qla2x00_sp_compl(ha, sp); |
1087 | } | 1086 | } |
1088 | } | 1087 | } |
@@ -1776,10 +1775,15 @@ probe_out: | |||
1776 | static void | 1775 | static void |
1777 | qla2x00_remove_one(struct pci_dev *pdev) | 1776 | qla2x00_remove_one(struct pci_dev *pdev) |
1778 | { | 1777 | { |
1779 | scsi_qla_host_t *ha; | 1778 | scsi_qla_host_t *ha, *vha, *temp; |
1780 | 1779 | ||
1781 | ha = pci_get_drvdata(pdev); | 1780 | ha = pci_get_drvdata(pdev); |
1782 | 1781 | ||
1782 | list_for_each_entry_safe(vha, temp, &ha->vp_list, vp_list) | ||
1783 | fc_vport_terminate(vha->fc_vport); | ||
1784 | |||
1785 | set_bit(UNLOADING, &ha->dpc_flags); | ||
1786 | |||
1783 | qla2x00_dfs_remove(ha); | 1787 | qla2x00_dfs_remove(ha); |
1784 | 1788 | ||
1785 | qla84xx_put_chip(ha); | 1789 | qla84xx_put_chip(ha); |
@@ -2451,8 +2455,10 @@ qla2x00_do_dpc(void *data) | |||
2451 | void | 2455 | void |
2452 | qla2xxx_wake_dpc(scsi_qla_host_t *ha) | 2456 | qla2xxx_wake_dpc(scsi_qla_host_t *ha) |
2453 | { | 2457 | { |
2454 | if (ha->dpc_thread) | 2458 | struct task_struct *t = ha->dpc_thread; |
2455 | wake_up_process(ha->dpc_thread); | 2459 | |
2460 | if (!test_bit(UNLOADING, &ha->dpc_flags) && t) | ||
2461 | wake_up_process(t); | ||
2456 | } | 2462 | } |
2457 | 2463 | ||
2458 | /* | 2464 | /* |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 676c390db354..4160e4caa7b9 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.02.01-k6" | 10 | #define QLA2XXX_VERSION "8.02.01-k7" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 2 | 13 | #define QLA_DRIVER_MINOR_VER 2 |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 3d36270a8b4d..661f9f21650a 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -217,6 +217,18 @@ static int sg_last_dev(void); | |||
217 | #define SZ_SG_IOVEC sizeof(sg_iovec_t) | 217 | #define SZ_SG_IOVEC sizeof(sg_iovec_t) |
218 | #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) | 218 | #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) |
219 | 219 | ||
220 | static int sg_allow_access(struct file *filp, unsigned char *cmd) | ||
221 | { | ||
222 | struct sg_fd *sfp = (struct sg_fd *)filp->private_data; | ||
223 | struct request_queue *q = sfp->parentdp->device->request_queue; | ||
224 | |||
225 | if (sfp->parentdp->device->type == TYPE_SCANNER) | ||
226 | return 0; | ||
227 | |||
228 | return blk_verify_command(&q->cmd_filter, | ||
229 | cmd, filp->f_mode & FMODE_WRITE); | ||
230 | } | ||
231 | |||
220 | static int | 232 | static int |
221 | sg_open(struct inode *inode, struct file *filp) | 233 | sg_open(struct inode *inode, struct file *filp) |
222 | { | 234 | { |
@@ -689,7 +701,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, | |||
689 | sg_remove_request(sfp, srp); | 701 | sg_remove_request(sfp, srp); |
690 | return -EFAULT; | 702 | return -EFAULT; |
691 | } | 703 | } |
692 | if (read_only && !blk_verify_command(file, cmnd)) { | 704 | if (read_only && sg_allow_access(file, cmnd)) { |
693 | sg_remove_request(sfp, srp); | 705 | sg_remove_request(sfp, srp); |
694 | return -EPERM; | 706 | return -EPERM; |
695 | } | 707 | } |
@@ -793,6 +805,7 @@ sg_ioctl(struct inode *inode, struct file *filp, | |||
793 | 805 | ||
794 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) | 806 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
795 | return -ENXIO; | 807 | return -ENXIO; |
808 | |||
796 | SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n", | 809 | SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n", |
797 | sdp->disk->disk_name, (int) cmd_in)); | 810 | sdp->disk->disk_name, (int) cmd_in)); |
798 | read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); | 811 | read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); |
@@ -1061,7 +1074,7 @@ sg_ioctl(struct inode *inode, struct file *filp, | |||
1061 | 1074 | ||
1062 | if (copy_from_user(&opcode, siocp->data, 1)) | 1075 | if (copy_from_user(&opcode, siocp->data, 1)) |
1063 | return -EFAULT; | 1076 | return -EFAULT; |
1064 | if (!blk_verify_command(filp, &opcode)) | 1077 | if (sg_allow_access(filp, &opcode)) |
1065 | return -EPERM; | 1078 | return -EPERM; |
1066 | } | 1079 | } |
1067 | return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p); | 1080 | return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p); |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 3b4a14e355c1..77cb34270fc1 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -449,6 +449,7 @@ config SERIAL_CLPS711X_CONSOLE | |||
449 | config SERIAL_SAMSUNG | 449 | config SERIAL_SAMSUNG |
450 | tristate "Samsung SoC serial support" | 450 | tristate "Samsung SoC serial support" |
451 | depends on ARM && PLAT_S3C24XX | 451 | depends on ARM && PLAT_S3C24XX |
452 | select SERIAL_CORE | ||
452 | help | 453 | help |
453 | Support for the on-chip UARTs on the Samsung S3C24XX series CPUs, | 454 | Support for the on-chip UARTs on the Samsung S3C24XX series CPUs, |
454 | providing /dev/ttySAC0, 1 and 2 (note, some machines may not | 455 | providing /dev/ttySAC0, 1 and 2 (note, some machines may not |
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index efcd44344fb1..4a0d30bed9f1 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #endif | 28 | #endif |
29 | 29 | ||
30 | #include <asm/gpio.h> | 30 | #include <asm/gpio.h> |
31 | #include <asm/mach/bfin_serial_5xx.h> | 31 | #include <mach/bfin_serial_5xx.h> |
32 | 32 | ||
33 | #ifdef CONFIG_SERIAL_BFIN_DMA | 33 | #ifdef CONFIG_SERIAL_BFIN_DMA |
34 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c index aeeec5588afd..e41766d08035 100644 --- a/drivers/serial/sunhv.c +++ b/drivers/serial/sunhv.c | |||
@@ -17,11 +17,11 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/of_device.h> | ||
20 | 21 | ||
21 | #include <asm/hypervisor.h> | 22 | #include <asm/hypervisor.h> |
22 | #include <asm/spitfire.h> | 23 | #include <asm/spitfire.h> |
23 | #include <asm/prom.h> | 24 | #include <asm/prom.h> |
24 | #include <asm/of_device.h> | ||
25 | #include <asm/irq.h> | 25 | #include <asm/irq.h> |
26 | 26 | ||
27 | #if defined(CONFIG_MAGIC_SYSRQ) | 27 | #if defined(CONFIG_MAGIC_SYSRQ) |
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c index 15ee497e1c78..29b4458abf74 100644 --- a/drivers/serial/sunsab.c +++ b/drivers/serial/sunsab.c | |||
@@ -32,11 +32,11 @@ | |||
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
34 | #include <linux/init.h> | 34 | #include <linux/init.h> |
35 | #include <linux/of_device.h> | ||
35 | 36 | ||
36 | #include <asm/io.h> | 37 | #include <asm/io.h> |
37 | #include <asm/irq.h> | 38 | #include <asm/irq.h> |
38 | #include <asm/prom.h> | 39 | #include <asm/prom.h> |
39 | #include <asm/of_device.h> | ||
40 | 40 | ||
41 | #if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) | 41 | #if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) |
42 | #define SUPPORT_SYSRQ | 42 | #define SUPPORT_SYSRQ |
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index e24e68235088..a378464f9292 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c | |||
@@ -35,11 +35,11 @@ | |||
35 | #include <linux/serial_reg.h> | 35 | #include <linux/serial_reg.h> |
36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
37 | #include <linux/delay.h> | 37 | #include <linux/delay.h> |
38 | #include <linux/of_device.h> | ||
38 | 39 | ||
39 | #include <asm/io.h> | 40 | #include <asm/io.h> |
40 | #include <asm/irq.h> | 41 | #include <asm/irq.h> |
41 | #include <asm/prom.h> | 42 | #include <asm/prom.h> |
42 | #include <asm/of_device.h> | ||
43 | 43 | ||
44 | #if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) | 44 | #if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) |
45 | #define SUPPORT_SYSRQ | 45 | #define SUPPORT_SYSRQ |
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c index 0f3d69b86d67..3cb4c8aee13f 100644 --- a/drivers/serial/sunzilog.c +++ b/drivers/serial/sunzilog.c | |||
@@ -32,11 +32,11 @@ | |||
32 | #include <linux/serio.h> | 32 | #include <linux/serio.h> |
33 | #endif | 33 | #endif |
34 | #include <linux/init.h> | 34 | #include <linux/init.h> |
35 | #include <linux/of_device.h> | ||
35 | 36 | ||
36 | #include <asm/io.h> | 37 | #include <asm/io.h> |
37 | #include <asm/irq.h> | 38 | #include <asm/irq.h> |
38 | #include <asm/prom.h> | 39 | #include <asm/prom.h> |
39 | #include <asm/of_device.h> | ||
40 | 40 | ||
41 | #if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) | 41 | #if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) |
42 | #define SUPPORT_SYSRQ | 42 | #define SUPPORT_SYSRQ |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 964124b60db2..75e86865234c 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -226,10 +226,11 @@ EXPORT_SYMBOL_GPL(spi_alloc_device); | |||
226 | * Companion function to spi_alloc_device. Devices allocated with | 226 | * Companion function to spi_alloc_device. Devices allocated with |
227 | * spi_alloc_device can be added onto the spi bus with this function. | 227 | * spi_alloc_device can be added onto the spi bus with this function. |
228 | * | 228 | * |
229 | * Returns 0 on success; non-zero on failure | 229 | * Returns 0 on success; negative errno on failure |
230 | */ | 230 | */ |
231 | int spi_add_device(struct spi_device *spi) | 231 | int spi_add_device(struct spi_device *spi) |
232 | { | 232 | { |
233 | static DEFINE_MUTEX(spi_add_lock); | ||
233 | struct device *dev = spi->master->dev.parent; | 234 | struct device *dev = spi->master->dev.parent; |
234 | int status; | 235 | int status; |
235 | 236 | ||
@@ -246,26 +247,43 @@ int spi_add_device(struct spi_device *spi) | |||
246 | "%s.%u", spi->master->dev.bus_id, | 247 | "%s.%u", spi->master->dev.bus_id, |
247 | spi->chip_select); | 248 | spi->chip_select); |
248 | 249 | ||
249 | /* drivers may modify this initial i/o setup */ | 250 | |
251 | /* We need to make sure there's no other device with this | ||
252 | * chipselect **BEFORE** we call setup(), else we'll trash | ||
253 | * its configuration. Lock against concurrent add() calls. | ||
254 | */ | ||
255 | mutex_lock(&spi_add_lock); | ||
256 | |||
257 | if (bus_find_device_by_name(&spi_bus_type, NULL, spi->dev.bus_id) | ||
258 | != NULL) { | ||
259 | dev_err(dev, "chipselect %d already in use\n", | ||
260 | spi->chip_select); | ||
261 | status = -EBUSY; | ||
262 | goto done; | ||
263 | } | ||
264 | |||
265 | /* Drivers may modify this initial i/o setup, but will | ||
266 | * normally rely on the device being setup. Devices | ||
267 | * using SPI_CS_HIGH can't coexist well otherwise... | ||
268 | */ | ||
250 | status = spi->master->setup(spi); | 269 | status = spi->master->setup(spi); |
251 | if (status < 0) { | 270 | if (status < 0) { |
252 | dev_err(dev, "can't %s %s, status %d\n", | 271 | dev_err(dev, "can't %s %s, status %d\n", |
253 | "setup", spi->dev.bus_id, status); | 272 | "setup", spi->dev.bus_id, status); |
254 | return status; | 273 | goto done; |
255 | } | 274 | } |
256 | 275 | ||
257 | /* driver core catches callers that misbehave by defining | 276 | /* Device may be bound to an active driver when this returns */ |
258 | * devices that already exist. | ||
259 | */ | ||
260 | status = device_add(&spi->dev); | 277 | status = device_add(&spi->dev); |
261 | if (status < 0) { | 278 | if (status < 0) |
262 | dev_err(dev, "can't %s %s, status %d\n", | 279 | dev_err(dev, "can't %s %s, status %d\n", |
263 | "add", spi->dev.bus_id, status); | 280 | "add", spi->dev.bus_id, status); |
264 | return status; | 281 | else |
265 | } | 282 | dev_dbg(dev, "registered child %s\n", spi->dev.bus_id); |
266 | 283 | ||
267 | dev_dbg(dev, "registered child %s\n", spi->dev.bus_id); | 284 | done: |
268 | return 0; | 285 | mutex_unlock(&spi_add_lock); |
286 | return status; | ||
269 | } | 287 | } |
270 | EXPORT_SYMBOL_GPL(spi_add_device); | 288 | EXPORT_SYMBOL_GPL(spi_add_device); |
271 | 289 | ||
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index d831a2beff39..87ab2443e66d 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c | |||
@@ -1165,15 +1165,19 @@ EXPORT_SYMBOL(ssb_dma_translation); | |||
1165 | 1165 | ||
1166 | int ssb_dma_set_mask(struct ssb_device *dev, u64 mask) | 1166 | int ssb_dma_set_mask(struct ssb_device *dev, u64 mask) |
1167 | { | 1167 | { |
1168 | #ifdef CONFIG_SSB_PCIHOST | ||
1168 | int err; | 1169 | int err; |
1170 | #endif | ||
1169 | 1171 | ||
1170 | switch (dev->bus->bustype) { | 1172 | switch (dev->bus->bustype) { |
1171 | case SSB_BUSTYPE_PCI: | 1173 | case SSB_BUSTYPE_PCI: |
1174 | #ifdef CONFIG_SSB_PCIHOST | ||
1172 | err = pci_set_dma_mask(dev->bus->host_pci, mask); | 1175 | err = pci_set_dma_mask(dev->bus->host_pci, mask); |
1173 | if (err) | 1176 | if (err) |
1174 | return err; | 1177 | return err; |
1175 | err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask); | 1178 | err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask); |
1176 | return err; | 1179 | return err; |
1180 | #endif | ||
1177 | case SSB_BUSTYPE_SSB: | 1181 | case SSB_BUSTYPE_SSB: |
1178 | return dma_set_mask(dev->dev, mask); | 1182 | return dma_set_mask(dev->dev, mask); |
1179 | default: | 1183 | default: |
@@ -1188,6 +1192,7 @@ void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size, | |||
1188 | { | 1192 | { |
1189 | switch (dev->bus->bustype) { | 1193 | switch (dev->bus->bustype) { |
1190 | case SSB_BUSTYPE_PCI: | 1194 | case SSB_BUSTYPE_PCI: |
1195 | #ifdef CONFIG_SSB_PCIHOST | ||
1191 | if (gfp_flags & GFP_DMA) { | 1196 | if (gfp_flags & GFP_DMA) { |
1192 | /* Workaround: The PCI API does not support passing | 1197 | /* Workaround: The PCI API does not support passing |
1193 | * a GFP flag. */ | 1198 | * a GFP flag. */ |
@@ -1195,6 +1200,7 @@ void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size, | |||
1195 | size, dma_handle, gfp_flags); | 1200 | size, dma_handle, gfp_flags); |
1196 | } | 1201 | } |
1197 | return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle); | 1202 | return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle); |
1203 | #endif | ||
1198 | case SSB_BUSTYPE_SSB: | 1204 | case SSB_BUSTYPE_SSB: |
1199 | return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags); | 1205 | return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags); |
1200 | default: | 1206 | default: |
@@ -1210,6 +1216,7 @@ void ssb_dma_free_consistent(struct ssb_device *dev, size_t size, | |||
1210 | { | 1216 | { |
1211 | switch (dev->bus->bustype) { | 1217 | switch (dev->bus->bustype) { |
1212 | case SSB_BUSTYPE_PCI: | 1218 | case SSB_BUSTYPE_PCI: |
1219 | #ifdef CONFIG_SSB_PCIHOST | ||
1213 | if (gfp_flags & GFP_DMA) { | 1220 | if (gfp_flags & GFP_DMA) { |
1214 | /* Workaround: The PCI API does not support passing | 1221 | /* Workaround: The PCI API does not support passing |
1215 | * a GFP flag. */ | 1222 | * a GFP flag. */ |
@@ -1220,6 +1227,7 @@ void ssb_dma_free_consistent(struct ssb_device *dev, size_t size, | |||
1220 | pci_free_consistent(dev->bus->host_pci, size, | 1227 | pci_free_consistent(dev->bus->host_pci, size, |
1221 | vaddr, dma_handle); | 1228 | vaddr, dma_handle); |
1222 | return; | 1229 | return; |
1230 | #endif | ||
1223 | case SSB_BUSTYPE_SSB: | 1231 | case SSB_BUSTYPE_SSB: |
1224 | dma_free_coherent(dev->dev, size, vaddr, dma_handle); | 1232 | dma_free_coherent(dev->dev, size, vaddr, dma_handle); |
1225 | return; | 1233 | return; |
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig index 2e9079df26b3..4190be64917f 100644 --- a/drivers/uio/Kconfig +++ b/drivers/uio/Kconfig | |||
@@ -33,6 +33,19 @@ config UIO_PDRV | |||
33 | 33 | ||
34 | If you don't know what to do here, say N. | 34 | If you don't know what to do here, say N. |
35 | 35 | ||
36 | config UIO_PDRV_GENIRQ | ||
37 | tristate "Userspace I/O platform driver with generic IRQ handling" | ||
38 | help | ||
39 | Platform driver for Userspace I/O devices, including generic | ||
40 | interrupt handling code. Shared interrupts are not supported. | ||
41 | |||
42 | This kernel driver requires that the matching userspace driver | ||
43 | handles interrupts in a special way. Userspace is responsible | ||
44 | for acknowledging the hardware device if needed, and re-enabling | ||
45 | interrupts in the interrupt controller using the write() syscall. | ||
46 | |||
47 | If you don't know what to do here, say N. | ||
48 | |||
36 | config UIO_SMX | 49 | config UIO_SMX |
37 | tristate "SMX cryptengine UIO interface" | 50 | tristate "SMX cryptengine UIO interface" |
38 | default n | 51 | default n |
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile index e00ce0def1a0..8667bbdef904 100644 --- a/drivers/uio/Makefile +++ b/drivers/uio/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | obj-$(CONFIG_UIO) += uio.o | 1 | obj-$(CONFIG_UIO) += uio.o |
2 | obj-$(CONFIG_UIO_CIF) += uio_cif.o | 2 | obj-$(CONFIG_UIO_CIF) += uio_cif.o |
3 | obj-$(CONFIG_UIO_PDRV) += uio_pdrv.o | 3 | obj-$(CONFIG_UIO_PDRV) += uio_pdrv.o |
4 | obj-$(CONFIG_UIO_PDRV_GENIRQ) += uio_pdrv_genirq.o | ||
4 | obj-$(CONFIG_UIO_SMX) += uio_smx.o | 5 | obj-$(CONFIG_UIO_SMX) += uio_smx.o |
diff --git a/drivers/uio/uio_pdrv.c b/drivers/uio/uio_pdrv.c index 5d0d2e85d982..0b4ef39cd85d 100644 --- a/drivers/uio/uio_pdrv.c +++ b/drivers/uio/uio_pdrv.c | |||
@@ -88,6 +88,8 @@ static int uio_pdrv_remove(struct platform_device *pdev) | |||
88 | 88 | ||
89 | uio_unregister_device(pdata->uioinfo); | 89 | uio_unregister_device(pdata->uioinfo); |
90 | 90 | ||
91 | kfree(pdata); | ||
92 | |||
91 | return 0; | 93 | return 0; |
92 | } | 94 | } |
93 | 95 | ||
@@ -114,5 +116,5 @@ module_exit(uio_pdrv_exit); | |||
114 | 116 | ||
115 | MODULE_AUTHOR("Uwe Kleine-Koenig"); | 117 | MODULE_AUTHOR("Uwe Kleine-Koenig"); |
116 | MODULE_DESCRIPTION("Userspace I/O platform driver"); | 118 | MODULE_DESCRIPTION("Userspace I/O platform driver"); |
117 | MODULE_LICENSE("GPL"); | 119 | MODULE_LICENSE("GPL v2"); |
118 | MODULE_ALIAS("platform:" DRIVER_NAME); | 120 | MODULE_ALIAS("platform:" DRIVER_NAME); |
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c new file mode 100644 index 000000000000..1f82c83a92ae --- /dev/null +++ b/drivers/uio/uio_pdrv_genirq.c | |||
@@ -0,0 +1,188 @@ | |||
1 | /* | ||
2 | * drivers/uio/uio_pdrv_genirq.c | ||
3 | * | ||
4 | * Userspace I/O platform driver with generic IRQ handling code. | ||
5 | * | ||
6 | * Copyright (C) 2008 Magnus Damm | ||
7 | * | ||
8 | * Based on uio_pdrv.c by Uwe Kleine-Koenig, | ||
9 | * Copyright (C) 2008 by Digi International Inc. | ||
10 | * All rights reserved. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms of the GNU General Public License version 2 as published by | ||
14 | * the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/uio_driver.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/bitops.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/stringify.h> | ||
23 | |||
24 | #define DRIVER_NAME "uio_pdrv_genirq" | ||
25 | |||
26 | struct uio_pdrv_genirq_platdata { | ||
27 | struct uio_info *uioinfo; | ||
28 | spinlock_t lock; | ||
29 | unsigned long flags; | ||
30 | }; | ||
31 | |||
32 | static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info) | ||
33 | { | ||
34 | struct uio_pdrv_genirq_platdata *priv = dev_info->priv; | ||
35 | |||
36 | /* Just disable the interrupt in the interrupt controller, and | ||
37 | * remember the state so we can allow user space to enable it later. | ||
38 | */ | ||
39 | |||
40 | if (!test_and_set_bit(0, &priv->flags)) | ||
41 | disable_irq_nosync(irq); | ||
42 | |||
43 | return IRQ_HANDLED; | ||
44 | } | ||
45 | |||
46 | static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) | ||
47 | { | ||
48 | struct uio_pdrv_genirq_platdata *priv = dev_info->priv; | ||
49 | unsigned long flags; | ||
50 | |||
51 | /* Allow user space to enable and disable the interrupt | ||
52 | * in the interrupt controller, but keep track of the | ||
53 | * state to prevent per-irq depth damage. | ||
54 | * | ||
55 | * Serialize this operation to support multiple tasks. | ||
56 | */ | ||
57 | |||
58 | spin_lock_irqsave(&priv->lock, flags); | ||
59 | if (irq_on) { | ||
60 | if (test_and_clear_bit(0, &priv->flags)) | ||
61 | enable_irq(dev_info->irq); | ||
62 | } else { | ||
63 | if (!test_and_set_bit(0, &priv->flags)) | ||
64 | disable_irq(dev_info->irq); | ||
65 | } | ||
66 | spin_unlock_irqrestore(&priv->lock, flags); | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | static int uio_pdrv_genirq_probe(struct platform_device *pdev) | ||
72 | { | ||
73 | struct uio_info *uioinfo = pdev->dev.platform_data; | ||
74 | struct uio_pdrv_genirq_platdata *priv; | ||
75 | struct uio_mem *uiomem; | ||
76 | int ret = -EINVAL; | ||
77 | int i; | ||
78 | |||
79 | if (!uioinfo || !uioinfo->name || !uioinfo->version) { | ||
80 | dev_err(&pdev->dev, "missing platform_data\n"); | ||
81 | goto bad0; | ||
82 | } | ||
83 | |||
84 | if (uioinfo->handler || uioinfo->irqcontrol || uioinfo->irq_flags) { | ||
85 | dev_err(&pdev->dev, "interrupt configuration error\n"); | ||
86 | goto bad0; | ||
87 | } | ||
88 | |||
89 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
90 | if (!priv) { | ||
91 | ret = -ENOMEM; | ||
92 | dev_err(&pdev->dev, "unable to kmalloc\n"); | ||
93 | goto bad0; | ||
94 | } | ||
95 | |||
96 | priv->uioinfo = uioinfo; | ||
97 | spin_lock_init(&priv->lock); | ||
98 | priv->flags = 0; /* interrupt is enabled to begin with */ | ||
99 | |||
100 | uiomem = &uioinfo->mem[0]; | ||
101 | |||
102 | for (i = 0; i < pdev->num_resources; ++i) { | ||
103 | struct resource *r = &pdev->resource[i]; | ||
104 | |||
105 | if (r->flags != IORESOURCE_MEM) | ||
106 | continue; | ||
107 | |||
108 | if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { | ||
109 | dev_warn(&pdev->dev, "device has more than " | ||
110 | __stringify(MAX_UIO_MAPS) | ||
111 | " I/O memory resources.\n"); | ||
112 | break; | ||
113 | } | ||
114 | |||
115 | uiomem->memtype = UIO_MEM_PHYS; | ||
116 | uiomem->addr = r->start; | ||
117 | uiomem->size = r->end - r->start + 1; | ||
118 | ++uiomem; | ||
119 | } | ||
120 | |||
121 | while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) { | ||
122 | uiomem->size = 0; | ||
123 | ++uiomem; | ||
124 | } | ||
125 | |||
126 | /* This driver requires no hardware specific kernel code to handle | ||
127 | * interrupts. Instead, the interrupt handler simply disables the | ||
128 | * interrupt in the interrupt controller. User space is responsible | ||
129 | * for performing hardware specific acknowledge and re-enabling of | ||
130 | * the interrupt in the interrupt controller. | ||
131 | * | ||
132 | * Interrupt sharing is not supported. | ||
133 | */ | ||
134 | |||
135 | uioinfo->irq_flags = IRQF_DISABLED; | ||
136 | uioinfo->handler = uio_pdrv_genirq_handler; | ||
137 | uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol; | ||
138 | uioinfo->priv = priv; | ||
139 | |||
140 | ret = uio_register_device(&pdev->dev, priv->uioinfo); | ||
141 | if (ret) { | ||
142 | dev_err(&pdev->dev, "unable to register uio device\n"); | ||
143 | goto bad1; | ||
144 | } | ||
145 | |||
146 | platform_set_drvdata(pdev, priv); | ||
147 | return 0; | ||
148 | bad1: | ||
149 | kfree(priv); | ||
150 | bad0: | ||
151 | return ret; | ||
152 | } | ||
153 | |||
154 | static int uio_pdrv_genirq_remove(struct platform_device *pdev) | ||
155 | { | ||
156 | struct uio_pdrv_genirq_platdata *priv = platform_get_drvdata(pdev); | ||
157 | |||
158 | uio_unregister_device(priv->uioinfo); | ||
159 | kfree(priv); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | static struct platform_driver uio_pdrv_genirq = { | ||
164 | .probe = uio_pdrv_genirq_probe, | ||
165 | .remove = uio_pdrv_genirq_remove, | ||
166 | .driver = { | ||
167 | .name = DRIVER_NAME, | ||
168 | .owner = THIS_MODULE, | ||
169 | }, | ||
170 | }; | ||
171 | |||
172 | static int __init uio_pdrv_genirq_init(void) | ||
173 | { | ||
174 | return platform_driver_register(&uio_pdrv_genirq); | ||
175 | } | ||
176 | |||
177 | static void __exit uio_pdrv_genirq_exit(void) | ||
178 | { | ||
179 | platform_driver_unregister(&uio_pdrv_genirq); | ||
180 | } | ||
181 | |||
182 | module_init(uio_pdrv_genirq_init); | ||
183 | module_exit(uio_pdrv_genirq_exit); | ||
184 | |||
185 | MODULE_AUTHOR("Magnus Damm"); | ||
186 | MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling"); | ||
187 | MODULE_LICENSE("GPL v2"); | ||
188 | MODULE_ALIAS("platform:" DRIVER_NAME); | ||
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 755823cdf62a..bcefbddeba50 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig | |||
@@ -95,16 +95,18 @@ config USB | |||
95 | 95 | ||
96 | source "drivers/usb/core/Kconfig" | 96 | source "drivers/usb/core/Kconfig" |
97 | 97 | ||
98 | source "drivers/usb/mon/Kconfig" | ||
99 | |||
98 | source "drivers/usb/host/Kconfig" | 100 | source "drivers/usb/host/Kconfig" |
99 | 101 | ||
102 | source "drivers/usb/musb/Kconfig" | ||
103 | |||
100 | source "drivers/usb/class/Kconfig" | 104 | source "drivers/usb/class/Kconfig" |
101 | 105 | ||
102 | source "drivers/usb/storage/Kconfig" | 106 | source "drivers/usb/storage/Kconfig" |
103 | 107 | ||
104 | source "drivers/usb/image/Kconfig" | 108 | source "drivers/usb/image/Kconfig" |
105 | 109 | ||
106 | source "drivers/usb/mon/Kconfig" | ||
107 | |||
108 | comment "USB port drivers" | 110 | comment "USB port drivers" |
109 | depends on USB | 111 | depends on USB |
110 | 112 | ||
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 507a9bd0d77c..9aea43a8c4ad 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c | |||
@@ -602,7 +602,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ | |||
602 | offd = le32_to_cpu(buf[offb++]); | 602 | offd = le32_to_cpu(buf[offb++]); |
603 | if (offd >= size) { | 603 | if (offd >= size) { |
604 | if (printk_ratelimit()) | 604 | if (printk_ratelimit()) |
605 | usb_err(instance->usbatm, "wrong index #%x in response to cm #%x\n", | 605 | usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n", |
606 | offd, cm); | 606 | offd, cm); |
607 | ret = -EIO; | 607 | ret = -EIO; |
608 | goto cleanup; | 608 | goto cleanup; |
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index cb01b5106efd..b6483dd98acc 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c | |||
@@ -64,7 +64,6 @@ | |||
64 | #include <linux/ctype.h> | 64 | #include <linux/ctype.h> |
65 | #include <linux/sched.h> | 65 | #include <linux/sched.h> |
66 | #include <linux/kthread.h> | 66 | #include <linux/kthread.h> |
67 | #include <linux/version.h> | ||
68 | #include <linux/mutex.h> | 67 | #include <linux/mutex.h> |
69 | #include <linux/freezer.h> | 68 | #include <linux/freezer.h> |
70 | 69 | ||
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 0725b1871f23..c257453fa9de 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -51,6 +51,7 @@ | |||
51 | */ | 51 | */ |
52 | 52 | ||
53 | #undef DEBUG | 53 | #undef DEBUG |
54 | #undef VERBOSE_DEBUG | ||
54 | 55 | ||
55 | #include <linux/kernel.h> | 56 | #include <linux/kernel.h> |
56 | #include <linux/errno.h> | 57 | #include <linux/errno.h> |
@@ -70,6 +71,9 @@ | |||
70 | 71 | ||
71 | #include "cdc-acm.h" | 72 | #include "cdc-acm.h" |
72 | 73 | ||
74 | |||
75 | #define ACM_CLOSE_TIMEOUT 15 /* seconds to let writes drain */ | ||
76 | |||
73 | /* | 77 | /* |
74 | * Version Information | 78 | * Version Information |
75 | */ | 79 | */ |
@@ -85,6 +89,12 @@ static DEFINE_MUTEX(open_mutex); | |||
85 | 89 | ||
86 | #define ACM_READY(acm) (acm && acm->dev && acm->used) | 90 | #define ACM_READY(acm) (acm && acm->dev && acm->used) |
87 | 91 | ||
92 | #ifdef VERBOSE_DEBUG | ||
93 | #define verbose 1 | ||
94 | #else | ||
95 | #define verbose 0 | ||
96 | #endif | ||
97 | |||
88 | /* | 98 | /* |
89 | * Functions for ACM control messages. | 99 | * Functions for ACM control messages. |
90 | */ | 100 | */ |
@@ -136,19 +146,17 @@ static int acm_wb_alloc(struct acm *acm) | |||
136 | static int acm_wb_is_avail(struct acm *acm) | 146 | static int acm_wb_is_avail(struct acm *acm) |
137 | { | 147 | { |
138 | int i, n; | 148 | int i, n; |
149 | unsigned long flags; | ||
139 | 150 | ||
140 | n = ACM_NW; | 151 | n = ACM_NW; |
152 | spin_lock_irqsave(&acm->write_lock, flags); | ||
141 | for (i = 0; i < ACM_NW; i++) { | 153 | for (i = 0; i < ACM_NW; i++) { |
142 | n -= acm->wb[i].use; | 154 | n -= acm->wb[i].use; |
143 | } | 155 | } |
156 | spin_unlock_irqrestore(&acm->write_lock, flags); | ||
144 | return n; | 157 | return n; |
145 | } | 158 | } |
146 | 159 | ||
147 | static inline int acm_wb_is_used(struct acm *acm, int wbn) | ||
148 | { | ||
149 | return acm->wb[wbn].use; | ||
150 | } | ||
151 | |||
152 | /* | 160 | /* |
153 | * Finish write. | 161 | * Finish write. |
154 | */ | 162 | */ |
@@ -157,7 +165,6 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb) | |||
157 | unsigned long flags; | 165 | unsigned long flags; |
158 | 166 | ||
159 | spin_lock_irqsave(&acm->write_lock, flags); | 167 | spin_lock_irqsave(&acm->write_lock, flags); |
160 | acm->write_ready = 1; | ||
161 | wb->use = 0; | 168 | wb->use = 0; |
162 | acm->transmitting--; | 169 | acm->transmitting--; |
163 | spin_unlock_irqrestore(&acm->write_lock, flags); | 170 | spin_unlock_irqrestore(&acm->write_lock, flags); |
@@ -190,40 +197,25 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb) | |||
190 | static int acm_write_start(struct acm *acm, int wbn) | 197 | static int acm_write_start(struct acm *acm, int wbn) |
191 | { | 198 | { |
192 | unsigned long flags; | 199 | unsigned long flags; |
193 | struct acm_wb *wb; | 200 | struct acm_wb *wb = &acm->wb[wbn]; |
194 | int rc; | 201 | int rc; |
195 | 202 | ||
196 | spin_lock_irqsave(&acm->write_lock, flags); | 203 | spin_lock_irqsave(&acm->write_lock, flags); |
197 | if (!acm->dev) { | 204 | if (!acm->dev) { |
205 | wb->use = 0; | ||
198 | spin_unlock_irqrestore(&acm->write_lock, flags); | 206 | spin_unlock_irqrestore(&acm->write_lock, flags); |
199 | return -ENODEV; | 207 | return -ENODEV; |
200 | } | 208 | } |
201 | 209 | ||
202 | if (!acm->write_ready) { | ||
203 | spin_unlock_irqrestore(&acm->write_lock, flags); | ||
204 | return 0; /* A white lie */ | ||
205 | } | ||
206 | |||
207 | wb = &acm->wb[wbn]; | ||
208 | if(acm_wb_is_avail(acm) <= 1) | ||
209 | acm->write_ready = 0; | ||
210 | |||
211 | dbg("%s susp_count: %d", __func__, acm->susp_count); | 210 | dbg("%s susp_count: %d", __func__, acm->susp_count); |
212 | if (acm->susp_count) { | 211 | if (acm->susp_count) { |
213 | acm->old_ready = acm->write_ready; | ||
214 | acm->delayed_wb = wb; | 212 | acm->delayed_wb = wb; |
215 | acm->write_ready = 0; | ||
216 | schedule_work(&acm->waker); | 213 | schedule_work(&acm->waker); |
217 | spin_unlock_irqrestore(&acm->write_lock, flags); | 214 | spin_unlock_irqrestore(&acm->write_lock, flags); |
218 | return 0; /* A white lie */ | 215 | return 0; /* A white lie */ |
219 | } | 216 | } |
220 | usb_mark_last_busy(acm->dev); | 217 | usb_mark_last_busy(acm->dev); |
221 | 218 | ||
222 | if (!acm_wb_is_used(acm, wbn)) { | ||
223 | spin_unlock_irqrestore(&acm->write_lock, flags); | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | rc = acm_start_wb(acm, wb); | 219 | rc = acm_start_wb(acm, wb); |
228 | spin_unlock_irqrestore(&acm->write_lock, flags); | 220 | spin_unlock_irqrestore(&acm->write_lock, flags); |
229 | 221 | ||
@@ -488,22 +480,28 @@ urbs: | |||
488 | /* data interface wrote those outgoing bytes */ | 480 | /* data interface wrote those outgoing bytes */ |
489 | static void acm_write_bulk(struct urb *urb) | 481 | static void acm_write_bulk(struct urb *urb) |
490 | { | 482 | { |
491 | struct acm *acm; | ||
492 | struct acm_wb *wb = urb->context; | 483 | struct acm_wb *wb = urb->context; |
484 | struct acm *acm = wb->instance; | ||
493 | 485 | ||
494 | dbg("Entering acm_write_bulk with status %d", urb->status); | 486 | if (verbose || urb->status |
487 | || (urb->actual_length != urb->transfer_buffer_length)) | ||
488 | dev_dbg(&acm->data->dev, "tx %d/%d bytes -- > %d\n", | ||
489 | urb->actual_length, | ||
490 | urb->transfer_buffer_length, | ||
491 | urb->status); | ||
495 | 492 | ||
496 | acm = wb->instance; | ||
497 | acm_write_done(acm, wb); | 493 | acm_write_done(acm, wb); |
498 | if (ACM_READY(acm)) | 494 | if (ACM_READY(acm)) |
499 | schedule_work(&acm->work); | 495 | schedule_work(&acm->work); |
496 | else | ||
497 | wake_up_interruptible(&acm->drain_wait); | ||
500 | } | 498 | } |
501 | 499 | ||
502 | static void acm_softint(struct work_struct *work) | 500 | static void acm_softint(struct work_struct *work) |
503 | { | 501 | { |
504 | struct acm *acm = container_of(work, struct acm, work); | 502 | struct acm *acm = container_of(work, struct acm, work); |
505 | dbg("Entering acm_softint."); | 503 | |
506 | 504 | dev_vdbg(&acm->data->dev, "tx work\n"); | |
507 | if (!ACM_READY(acm)) | 505 | if (!ACM_READY(acm)) |
508 | return; | 506 | return; |
509 | tty_wakeup(acm->tty); | 507 | tty_wakeup(acm->tty); |
@@ -512,7 +510,6 @@ static void acm_softint(struct work_struct *work) | |||
512 | static void acm_waker(struct work_struct *waker) | 510 | static void acm_waker(struct work_struct *waker) |
513 | { | 511 | { |
514 | struct acm *acm = container_of(waker, struct acm, waker); | 512 | struct acm *acm = container_of(waker, struct acm, waker); |
515 | long flags; | ||
516 | int rv; | 513 | int rv; |
517 | 514 | ||
518 | rv = usb_autopm_get_interface(acm->control); | 515 | rv = usb_autopm_get_interface(acm->control); |
@@ -524,9 +521,6 @@ static void acm_waker(struct work_struct *waker) | |||
524 | acm_start_wb(acm, acm->delayed_wb); | 521 | acm_start_wb(acm, acm->delayed_wb); |
525 | acm->delayed_wb = NULL; | 522 | acm->delayed_wb = NULL; |
526 | } | 523 | } |
527 | spin_lock_irqsave(&acm->write_lock, flags); | ||
528 | acm->write_ready = acm->old_ready; | ||
529 | spin_unlock_irqrestore(&acm->write_lock, flags); | ||
530 | usb_autopm_put_interface(acm->control); | 524 | usb_autopm_put_interface(acm->control); |
531 | } | 525 | } |
532 | 526 | ||
@@ -595,8 +589,8 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp) | |||
595 | tasklet_schedule(&acm->urb_task); | 589 | tasklet_schedule(&acm->urb_task); |
596 | 590 | ||
597 | done: | 591 | done: |
598 | err_out: | ||
599 | mutex_unlock(&acm->mutex); | 592 | mutex_unlock(&acm->mutex); |
593 | err_out: | ||
600 | mutex_unlock(&open_mutex); | 594 | mutex_unlock(&open_mutex); |
601 | return rv; | 595 | return rv; |
602 | 596 | ||
@@ -628,6 +622,8 @@ static void acm_tty_unregister(struct acm *acm) | |||
628 | kfree(acm); | 622 | kfree(acm); |
629 | } | 623 | } |
630 | 624 | ||
625 | static int acm_tty_chars_in_buffer(struct tty_struct *tty); | ||
626 | |||
631 | static void acm_tty_close(struct tty_struct *tty, struct file *filp) | 627 | static void acm_tty_close(struct tty_struct *tty, struct file *filp) |
632 | { | 628 | { |
633 | struct acm *acm = tty->driver_data; | 629 | struct acm *acm = tty->driver_data; |
@@ -642,6 +638,13 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp) | |||
642 | if (acm->dev) { | 638 | if (acm->dev) { |
643 | usb_autopm_get_interface(acm->control); | 639 | usb_autopm_get_interface(acm->control); |
644 | acm_set_control(acm, acm->ctrlout = 0); | 640 | acm_set_control(acm, acm->ctrlout = 0); |
641 | |||
642 | /* try letting the last writes drain naturally */ | ||
643 | wait_event_interruptible_timeout(acm->drain_wait, | ||
644 | (ACM_NW == acm_wb_is_avail(acm)) | ||
645 | || !acm->dev, | ||
646 | ACM_CLOSE_TIMEOUT * HZ); | ||
647 | |||
645 | usb_kill_urb(acm->ctrlurb); | 648 | usb_kill_urb(acm->ctrlurb); |
646 | for (i = 0; i < ACM_NW; i++) | 649 | for (i = 0; i < ACM_NW; i++) |
647 | usb_kill_urb(acm->wb[i].urb); | 650 | usb_kill_urb(acm->wb[i].urb); |
@@ -697,7 +700,7 @@ static int acm_tty_write_room(struct tty_struct *tty) | |||
697 | * Do not let the line discipline to know that we have a reserve, | 700 | * Do not let the line discipline to know that we have a reserve, |
698 | * or it might get too enthusiastic. | 701 | * or it might get too enthusiastic. |
699 | */ | 702 | */ |
700 | return (acm->write_ready && acm_wb_is_avail(acm)) ? acm->writesize : 0; | 703 | return acm_wb_is_avail(acm) ? acm->writesize : 0; |
701 | } | 704 | } |
702 | 705 | ||
703 | static int acm_tty_chars_in_buffer(struct tty_struct *tty) | 706 | static int acm_tty_chars_in_buffer(struct tty_struct *tty) |
@@ -1072,11 +1075,11 @@ skip_normal_probe: | |||
1072 | acm->urb_task.data = (unsigned long) acm; | 1075 | acm->urb_task.data = (unsigned long) acm; |
1073 | INIT_WORK(&acm->work, acm_softint); | 1076 | INIT_WORK(&acm->work, acm_softint); |
1074 | INIT_WORK(&acm->waker, acm_waker); | 1077 | INIT_WORK(&acm->waker, acm_waker); |
1078 | init_waitqueue_head(&acm->drain_wait); | ||
1075 | spin_lock_init(&acm->throttle_lock); | 1079 | spin_lock_init(&acm->throttle_lock); |
1076 | spin_lock_init(&acm->write_lock); | 1080 | spin_lock_init(&acm->write_lock); |
1077 | spin_lock_init(&acm->read_lock); | 1081 | spin_lock_init(&acm->read_lock); |
1078 | mutex_init(&acm->mutex); | 1082 | mutex_init(&acm->mutex); |
1079 | acm->write_ready = 1; | ||
1080 | acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); | 1083 | acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); |
1081 | 1084 | ||
1082 | buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); | 1085 | buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); |
@@ -1108,9 +1111,11 @@ skip_normal_probe: | |||
1108 | rcv->instance = acm; | 1111 | rcv->instance = acm; |
1109 | } | 1112 | } |
1110 | for (i = 0; i < num_rx_buf; i++) { | 1113 | for (i = 0; i < num_rx_buf; i++) { |
1111 | struct acm_rb *buf = &(acm->rb[i]); | 1114 | struct acm_rb *rb = &(acm->rb[i]); |
1112 | 1115 | ||
1113 | if (!(buf->base = usb_buffer_alloc(acm->dev, readsize, GFP_KERNEL, &buf->dma))) { | 1116 | rb->base = usb_buffer_alloc(acm->dev, readsize, |
1117 | GFP_KERNEL, &rb->dma); | ||
1118 | if (!rb->base) { | ||
1114 | dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n"); | 1119 | dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n"); |
1115 | goto alloc_fail7; | 1120 | goto alloc_fail7; |
1116 | } | 1121 | } |
@@ -1172,6 +1177,7 @@ skip_countries: | |||
1172 | acm_set_line(acm, &acm->line); | 1177 | acm_set_line(acm, &acm->line); |
1173 | 1178 | ||
1174 | usb_driver_claim_interface(&acm_driver, data_interface, acm); | 1179 | usb_driver_claim_interface(&acm_driver, data_interface, acm); |
1180 | usb_set_intfdata(data_interface, acm); | ||
1175 | 1181 | ||
1176 | usb_get_intf(control_interface); | 1182 | usb_get_intf(control_interface); |
1177 | tty_register_device(acm_tty_driver, minor, &control_interface->dev); | 1183 | tty_register_device(acm_tty_driver, minor, &control_interface->dev); |
@@ -1221,11 +1227,11 @@ static void acm_disconnect(struct usb_interface *intf) | |||
1221 | struct acm *acm = usb_get_intfdata(intf); | 1227 | struct acm *acm = usb_get_intfdata(intf); |
1222 | struct usb_device *usb_dev = interface_to_usbdev(intf); | 1228 | struct usb_device *usb_dev = interface_to_usbdev(intf); |
1223 | 1229 | ||
1224 | mutex_lock(&open_mutex); | 1230 | /* sibling interface is already cleaning up */ |
1225 | if (!acm || !acm->dev) { | 1231 | if (!acm) |
1226 | mutex_unlock(&open_mutex); | ||
1227 | return; | 1232 | return; |
1228 | } | 1233 | |
1234 | mutex_lock(&open_mutex); | ||
1229 | if (acm->country_codes){ | 1235 | if (acm->country_codes){ |
1230 | device_remove_file(&acm->control->dev, | 1236 | device_remove_file(&acm->control->dev, |
1231 | &dev_attr_wCountryCodes); | 1237 | &dev_attr_wCountryCodes); |
@@ -1356,6 +1362,9 @@ static struct usb_device_id acm_ids[] = { | |||
1356 | { USB_DEVICE(0x0803, 0x3095), /* Zoom Telephonics Model 3095F USB MODEM */ | 1362 | { USB_DEVICE(0x0803, 0x3095), /* Zoom Telephonics Model 3095F USB MODEM */ |
1357 | .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ | 1363 | .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ |
1358 | }, | 1364 | }, |
1365 | { USB_DEVICE(0x0572, 0x1321), /* Conexant USB MODEM CX93010 */ | ||
1366 | .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ | ||
1367 | }, | ||
1359 | 1368 | ||
1360 | /* control interfaces with various AT-command sets */ | 1369 | /* control interfaces with various AT-command sets */ |
1361 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, | 1370 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, |
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 85c3aaaab7c5..1f95e7aa1b66 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h | |||
@@ -106,8 +106,6 @@ struct acm { | |||
106 | struct list_head spare_read_bufs; | 106 | struct list_head spare_read_bufs; |
107 | struct list_head filled_read_bufs; | 107 | struct list_head filled_read_bufs; |
108 | int write_used; /* number of non-empty write buffers */ | 108 | int write_used; /* number of non-empty write buffers */ |
109 | int write_ready; /* write urb is not running */ | ||
110 | int old_ready; | ||
111 | int processing; | 109 | int processing; |
112 | int transmitting; | 110 | int transmitting; |
113 | spinlock_t write_lock; | 111 | spinlock_t write_lock; |
@@ -115,6 +113,7 @@ struct acm { | |||
115 | struct usb_cdc_line_coding line; /* bits, stop, parity */ | 113 | struct usb_cdc_line_coding line; /* bits, stop, parity */ |
116 | struct work_struct work; /* work queue entry for line discipline waking up */ | 114 | struct work_struct work; /* work queue entry for line discipline waking up */ |
117 | struct work_struct waker; | 115 | struct work_struct waker; |
116 | wait_queue_head_t drain_wait; /* close processing */ | ||
118 | struct tasklet_struct urb_task; /* rx processing */ | 117 | struct tasklet_struct urb_task; /* rx processing */ |
119 | spinlock_t throttle_lock; /* synchronize throtteling and read callback */ | 118 | spinlock_t throttle_lock; /* synchronize throtteling and read callback */ |
120 | unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ | 119 | unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ |
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index ddb54e14a5c5..5a7fa6f09958 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
@@ -230,6 +230,13 @@ static int usb_probe_interface(struct device *dev) | |||
230 | */ | 230 | */ |
231 | intf->pm_usage_cnt = !(driver->supports_autosuspend); | 231 | intf->pm_usage_cnt = !(driver->supports_autosuspend); |
232 | 232 | ||
233 | /* Carry out a deferred switch to altsetting 0 */ | ||
234 | if (intf->needs_altsetting0) { | ||
235 | usb_set_interface(udev, intf->altsetting[0]. | ||
236 | desc.bInterfaceNumber, 0); | ||
237 | intf->needs_altsetting0 = 0; | ||
238 | } | ||
239 | |||
233 | error = driver->probe(intf, id); | 240 | error = driver->probe(intf, id); |
234 | if (error) { | 241 | if (error) { |
235 | mark_quiesced(intf); | 242 | mark_quiesced(intf); |
@@ -266,8 +273,17 @@ static int usb_unbind_interface(struct device *dev) | |||
266 | 273 | ||
267 | driver->disconnect(intf); | 274 | driver->disconnect(intf); |
268 | 275 | ||
269 | /* reset other interface state */ | 276 | /* Reset other interface state. |
270 | usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0); | 277 | * We cannot do a Set-Interface if the device is suspended or |
278 | * if it is prepared for a system sleep (since installing a new | ||
279 | * altsetting means creating new endpoint device entries). | ||
280 | * When either of these happens, defer the Set-Interface. | ||
281 | */ | ||
282 | if (!error && intf->dev.power.status == DPM_ON) | ||
283 | usb_set_interface(udev, intf->altsetting[0]. | ||
284 | desc.bInterfaceNumber, 0); | ||
285 | else | ||
286 | intf->needs_altsetting0 = 1; | ||
271 | usb_set_intfdata(intf, NULL); | 287 | usb_set_intfdata(intf, NULL); |
272 | 288 | ||
273 | intf->condition = USB_INTERFACE_UNBOUND; | 289 | intf->condition = USB_INTERFACE_UNBOUND; |
@@ -774,7 +790,6 @@ void usb_deregister(struct usb_driver *driver) | |||
774 | } | 790 | } |
775 | EXPORT_SYMBOL_GPL(usb_deregister); | 791 | EXPORT_SYMBOL_GPL(usb_deregister); |
776 | 792 | ||
777 | |||
778 | /* Forced unbinding of a USB interface driver, either because | 793 | /* Forced unbinding of a USB interface driver, either because |
779 | * it doesn't support pre_reset/post_reset/reset_resume or | 794 | * it doesn't support pre_reset/post_reset/reset_resume or |
780 | * because it doesn't support suspend/resume. | 795 | * because it doesn't support suspend/resume. |
@@ -799,7 +814,8 @@ void usb_forced_unbind_intf(struct usb_interface *intf) | |||
799 | * The caller must hold @intf's device's lock, but not its pm_mutex | 814 | * The caller must hold @intf's device's lock, but not its pm_mutex |
800 | * and not @intf->dev.sem. | 815 | * and not @intf->dev.sem. |
801 | * | 816 | * |
802 | * FIXME: The caller must block system sleep transitions. | 817 | * Note: Rebinds will be skipped if a system sleep transition is in |
818 | * progress and the PM "complete" callback hasn't occurred yet. | ||
803 | */ | 819 | */ |
804 | void usb_rebind_intf(struct usb_interface *intf) | 820 | void usb_rebind_intf(struct usb_interface *intf) |
805 | { | 821 | { |
@@ -815,12 +831,16 @@ void usb_rebind_intf(struct usb_interface *intf) | |||
815 | } | 831 | } |
816 | 832 | ||
817 | /* Try to rebind the interface */ | 833 | /* Try to rebind the interface */ |
818 | intf->needs_binding = 0; | 834 | if (intf->dev.power.status == DPM_ON) { |
819 | rc = device_attach(&intf->dev); | 835 | intf->needs_binding = 0; |
820 | if (rc < 0) | 836 | rc = device_attach(&intf->dev); |
821 | dev_warn(&intf->dev, "rebind failed: %d\n", rc); | 837 | if (rc < 0) |
838 | dev_warn(&intf->dev, "rebind failed: %d\n", rc); | ||
839 | } | ||
822 | } | 840 | } |
823 | 841 | ||
842 | #ifdef CONFIG_PM | ||
843 | |||
824 | #define DO_UNBIND 0 | 844 | #define DO_UNBIND 0 |
825 | #define DO_REBIND 1 | 845 | #define DO_REBIND 1 |
826 | 846 | ||
@@ -828,7 +848,6 @@ void usb_rebind_intf(struct usb_interface *intf) | |||
828 | * or rebind interfaces that have been unbound, according to @action. | 848 | * or rebind interfaces that have been unbound, according to @action. |
829 | * | 849 | * |
830 | * The caller must hold @udev's device lock. | 850 | * The caller must hold @udev's device lock. |
831 | * FIXME: For rebinds, the caller must block system sleep transitions. | ||
832 | */ | 851 | */ |
833 | static void do_unbind_rebind(struct usb_device *udev, int action) | 852 | static void do_unbind_rebind(struct usb_device *udev, int action) |
834 | { | 853 | { |
@@ -850,30 +869,14 @@ static void do_unbind_rebind(struct usb_device *udev, int action) | |||
850 | } | 869 | } |
851 | break; | 870 | break; |
852 | case DO_REBIND: | 871 | case DO_REBIND: |
853 | if (intf->needs_binding) { | 872 | if (intf->needs_binding) |
854 | |||
855 | /* FIXME: The next line is needed because we are going to probe | ||
856 | * the interface, but as far as the PM core is concerned the | ||
857 | * interface is still suspended. The problem wouldn't exist | ||
858 | * if we could rebind the interface during the interface's own | ||
859 | * resume() call, but at the time the usb_device isn't locked! | ||
860 | * | ||
861 | * The real solution will be to carry this out during the device's | ||
862 | * complete() callback. Until that is implemented, we have to | ||
863 | * use this hack. | ||
864 | */ | ||
865 | // intf->dev.power.sleeping = 0; | ||
866 | |||
867 | usb_rebind_intf(intf); | 873 | usb_rebind_intf(intf); |
868 | } | ||
869 | break; | 874 | break; |
870 | } | 875 | } |
871 | } | 876 | } |
872 | } | 877 | } |
873 | } | 878 | } |
874 | 879 | ||
875 | #ifdef CONFIG_PM | ||
876 | |||
877 | /* Caller has locked udev's pm_mutex */ | 880 | /* Caller has locked udev's pm_mutex */ |
878 | static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) | 881 | static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) |
879 | { | 882 | { |
@@ -927,14 +930,14 @@ static int usb_resume_device(struct usb_device *udev) | |||
927 | } | 930 | } |
928 | 931 | ||
929 | /* Caller has locked intf's usb_device's pm mutex */ | 932 | /* Caller has locked intf's usb_device's pm mutex */ |
930 | static int usb_suspend_interface(struct usb_interface *intf, pm_message_t msg) | 933 | static int usb_suspend_interface(struct usb_device *udev, |
934 | struct usb_interface *intf, pm_message_t msg) | ||
931 | { | 935 | { |
932 | struct usb_driver *driver; | 936 | struct usb_driver *driver; |
933 | int status = 0; | 937 | int status = 0; |
934 | 938 | ||
935 | /* with no hardware, USB interfaces only use FREEZE and ON states */ | 939 | /* with no hardware, USB interfaces only use FREEZE and ON states */ |
936 | if (interface_to_usbdev(intf)->state == USB_STATE_NOTATTACHED || | 940 | if (udev->state == USB_STATE_NOTATTACHED || !is_active(intf)) |
937 | !is_active(intf)) | ||
938 | goto done; | 941 | goto done; |
939 | 942 | ||
940 | if (intf->condition == USB_INTERFACE_UNBOUND) /* This can't happen */ | 943 | if (intf->condition == USB_INTERFACE_UNBOUND) /* This can't happen */ |
@@ -945,7 +948,7 @@ static int usb_suspend_interface(struct usb_interface *intf, pm_message_t msg) | |||
945 | status = driver->suspend(intf, msg); | 948 | status = driver->suspend(intf, msg); |
946 | if (status == 0) | 949 | if (status == 0) |
947 | mark_quiesced(intf); | 950 | mark_quiesced(intf); |
948 | else if (!interface_to_usbdev(intf)->auto_pm) | 951 | else if (!udev->auto_pm) |
949 | dev_err(&intf->dev, "%s error %d\n", | 952 | dev_err(&intf->dev, "%s error %d\n", |
950 | "suspend", status); | 953 | "suspend", status); |
951 | } else { | 954 | } else { |
@@ -962,13 +965,13 @@ static int usb_suspend_interface(struct usb_interface *intf, pm_message_t msg) | |||
962 | } | 965 | } |
963 | 966 | ||
964 | /* Caller has locked intf's usb_device's pm_mutex */ | 967 | /* Caller has locked intf's usb_device's pm_mutex */ |
965 | static int usb_resume_interface(struct usb_interface *intf, int reset_resume) | 968 | static int usb_resume_interface(struct usb_device *udev, |
969 | struct usb_interface *intf, int reset_resume) | ||
966 | { | 970 | { |
967 | struct usb_driver *driver; | 971 | struct usb_driver *driver; |
968 | int status = 0; | 972 | int status = 0; |
969 | 973 | ||
970 | if (interface_to_usbdev(intf)->state == USB_STATE_NOTATTACHED || | 974 | if (udev->state == USB_STATE_NOTATTACHED || is_active(intf)) |
971 | is_active(intf)) | ||
972 | goto done; | 975 | goto done; |
973 | 976 | ||
974 | /* Don't let autoresume interfere with unbinding */ | 977 | /* Don't let autoresume interfere with unbinding */ |
@@ -976,8 +979,17 @@ static int usb_resume_interface(struct usb_interface *intf, int reset_resume) | |||
976 | goto done; | 979 | goto done; |
977 | 980 | ||
978 | /* Can't resume it if it doesn't have a driver. */ | 981 | /* Can't resume it if it doesn't have a driver. */ |
979 | if (intf->condition == USB_INTERFACE_UNBOUND) | 982 | if (intf->condition == USB_INTERFACE_UNBOUND) { |
983 | |||
984 | /* Carry out a deferred switch to altsetting 0 */ | ||
985 | if (intf->needs_altsetting0 && | ||
986 | intf->dev.power.status == DPM_ON) { | ||
987 | usb_set_interface(udev, intf->altsetting[0]. | ||
988 | desc.bInterfaceNumber, 0); | ||
989 | intf->needs_altsetting0 = 0; | ||
990 | } | ||
980 | goto done; | 991 | goto done; |
992 | } | ||
981 | 993 | ||
982 | /* Don't resume if the interface is marked for rebinding */ | 994 | /* Don't resume if the interface is marked for rebinding */ |
983 | if (intf->needs_binding) | 995 | if (intf->needs_binding) |
@@ -1152,7 +1164,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | |||
1152 | if (udev->actconfig) { | 1164 | if (udev->actconfig) { |
1153 | for (; i < udev->actconfig->desc.bNumInterfaces; i++) { | 1165 | for (; i < udev->actconfig->desc.bNumInterfaces; i++) { |
1154 | intf = udev->actconfig->interface[i]; | 1166 | intf = udev->actconfig->interface[i]; |
1155 | status = usb_suspend_interface(intf, msg); | 1167 | status = usb_suspend_interface(udev, intf, msg); |
1156 | if (status != 0) | 1168 | if (status != 0) |
1157 | break; | 1169 | break; |
1158 | } | 1170 | } |
@@ -1164,7 +1176,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | |||
1164 | if (status != 0) { | 1176 | if (status != 0) { |
1165 | while (--i >= 0) { | 1177 | while (--i >= 0) { |
1166 | intf = udev->actconfig->interface[i]; | 1178 | intf = udev->actconfig->interface[i]; |
1167 | usb_resume_interface(intf, 0); | 1179 | usb_resume_interface(udev, intf, 0); |
1168 | } | 1180 | } |
1169 | 1181 | ||
1170 | /* Try another autosuspend when the interfaces aren't busy */ | 1182 | /* Try another autosuspend when the interfaces aren't busy */ |
@@ -1277,7 +1289,7 @@ static int usb_resume_both(struct usb_device *udev) | |||
1277 | if (status == 0 && udev->actconfig) { | 1289 | if (status == 0 && udev->actconfig) { |
1278 | for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { | 1290 | for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { |
1279 | intf = udev->actconfig->interface[i]; | 1291 | intf = udev->actconfig->interface[i]; |
1280 | usb_resume_interface(intf, udev->reset_resume); | 1292 | usb_resume_interface(udev, intf, udev->reset_resume); |
1281 | } | 1293 | } |
1282 | } | 1294 | } |
1283 | 1295 | ||
@@ -1606,12 +1618,10 @@ int usb_external_resume_device(struct usb_device *udev) | |||
1606 | return status; | 1618 | return status; |
1607 | } | 1619 | } |
1608 | 1620 | ||
1609 | static int usb_suspend(struct device *dev, pm_message_t message) | 1621 | int usb_suspend(struct device *dev, pm_message_t message) |
1610 | { | 1622 | { |
1611 | struct usb_device *udev; | 1623 | struct usb_device *udev; |
1612 | 1624 | ||
1613 | if (!is_usb_device(dev)) /* Ignore PM for interfaces */ | ||
1614 | return 0; | ||
1615 | udev = to_usb_device(dev); | 1625 | udev = to_usb_device(dev); |
1616 | 1626 | ||
1617 | /* If udev is already suspended, we can skip this suspend and | 1627 | /* If udev is already suspended, we can skip this suspend and |
@@ -1630,12 +1640,10 @@ static int usb_suspend(struct device *dev, pm_message_t message) | |||
1630 | return usb_external_suspend_device(udev, message); | 1640 | return usb_external_suspend_device(udev, message); |
1631 | } | 1641 | } |
1632 | 1642 | ||
1633 | static int usb_resume(struct device *dev) | 1643 | int usb_resume(struct device *dev) |
1634 | { | 1644 | { |
1635 | struct usb_device *udev; | 1645 | struct usb_device *udev; |
1636 | 1646 | ||
1637 | if (!is_usb_device(dev)) /* Ignore PM for interfaces */ | ||
1638 | return 0; | ||
1639 | udev = to_usb_device(dev); | 1647 | udev = to_usb_device(dev); |
1640 | 1648 | ||
1641 | /* If udev->skip_sys_resume is set then udev was already suspended | 1649 | /* If udev->skip_sys_resume is set then udev was already suspended |
@@ -1647,17 +1655,10 @@ static int usb_resume(struct device *dev) | |||
1647 | return usb_external_resume_device(udev); | 1655 | return usb_external_resume_device(udev); |
1648 | } | 1656 | } |
1649 | 1657 | ||
1650 | #else | ||
1651 | |||
1652 | #define usb_suspend NULL | ||
1653 | #define usb_resume NULL | ||
1654 | |||
1655 | #endif /* CONFIG_PM */ | 1658 | #endif /* CONFIG_PM */ |
1656 | 1659 | ||
1657 | struct bus_type usb_bus_type = { | 1660 | struct bus_type usb_bus_type = { |
1658 | .name = "usb", | 1661 | .name = "usb", |
1659 | .match = usb_device_match, | 1662 | .match = usb_device_match, |
1660 | .uevent = usb_uevent, | 1663 | .uevent = usb_uevent, |
1661 | .suspend = usb_suspend, | ||
1662 | .resume = usb_resume, | ||
1663 | }; | 1664 | }; |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index f7bfd72ef115..8abd4e59bf4a 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -924,15 +924,6 @@ static int register_root_hub(struct usb_hcd *hcd) | |||
924 | return retval; | 924 | return retval; |
925 | } | 925 | } |
926 | 926 | ||
927 | void usb_enable_root_hub_irq (struct usb_bus *bus) | ||
928 | { | ||
929 | struct usb_hcd *hcd; | ||
930 | |||
931 | hcd = container_of (bus, struct usb_hcd, self); | ||
932 | if (hcd->driver->hub_irq_enable && hcd->state != HC_STATE_HALT) | ||
933 | hcd->driver->hub_irq_enable (hcd); | ||
934 | } | ||
935 | |||
936 | 927 | ||
937 | /*-------------------------------------------------------------------------*/ | 928 | /*-------------------------------------------------------------------------*/ |
938 | 929 | ||
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h index 5b0b59b0d89b..e710ce04e228 100644 --- a/drivers/usb/core/hcd.h +++ b/drivers/usb/core/hcd.h | |||
@@ -212,8 +212,6 @@ struct hc_driver { | |||
212 | int (*bus_suspend)(struct usb_hcd *); | 212 | int (*bus_suspend)(struct usb_hcd *); |
213 | int (*bus_resume)(struct usb_hcd *); | 213 | int (*bus_resume)(struct usb_hcd *); |
214 | int (*start_port_reset)(struct usb_hcd *, unsigned port_num); | 214 | int (*start_port_reset)(struct usb_hcd *, unsigned port_num); |
215 | void (*hub_irq_enable)(struct usb_hcd *); | ||
216 | /* Needed only if port-change IRQs are level-triggered */ | ||
217 | 215 | ||
218 | /* force handover of high-speed port to full-speed companion */ | 216 | /* force handover of high-speed port to full-speed companion */ |
219 | void (*relinquish_port)(struct usb_hcd *, int); | 217 | void (*relinquish_port)(struct usb_hcd *, int); |
@@ -379,8 +377,6 @@ extern struct list_head usb_bus_list; | |||
379 | extern struct mutex usb_bus_list_lock; | 377 | extern struct mutex usb_bus_list_lock; |
380 | extern wait_queue_head_t usb_kill_urb_queue; | 378 | extern wait_queue_head_t usb_kill_urb_queue; |
381 | 379 | ||
382 | extern void usb_enable_root_hub_irq(struct usb_bus *bus); | ||
383 | |||
384 | extern int usb_find_interface_driver(struct usb_device *dev, | 380 | extern int usb_find_interface_driver(struct usb_device *dev, |
385 | struct usb_interface *interface); | 381 | struct usb_interface *interface); |
386 | 382 | ||
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 107e1d25ddec..6a5cb018383d 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -2102,8 +2102,6 @@ int usb_port_resume(struct usb_device *udev) | |||
2102 | } | 2102 | } |
2103 | 2103 | ||
2104 | clear_bit(port1, hub->busy_bits); | 2104 | clear_bit(port1, hub->busy_bits); |
2105 | if (!hub->hdev->parent && !hub->busy_bits[0]) | ||
2106 | usb_enable_root_hub_irq(hub->hdev->bus); | ||
2107 | 2105 | ||
2108 | status = check_port_resume_type(udev, | 2106 | status = check_port_resume_type(udev, |
2109 | hub, port1, status, portchange, portstatus); | 2107 | hub, port1, status, portchange, portstatus); |
@@ -3081,11 +3079,6 @@ static void hub_events(void) | |||
3081 | } | 3079 | } |
3082 | } | 3080 | } |
3083 | 3081 | ||
3084 | /* If this is a root hub, tell the HCD it's okay to | ||
3085 | * re-enable port-change interrupts now. */ | ||
3086 | if (!hdev->parent && !hub->busy_bits[0]) | ||
3087 | usb_enable_root_hub_irq(hdev->bus); | ||
3088 | |||
3089 | loop_autopm: | 3082 | loop_autopm: |
3090 | /* Allow autosuspend if we're not going to run again */ | 3083 | /* Allow autosuspend if we're not going to run again */ |
3091 | if (list_empty(&hub->event_list)) | 3084 | if (list_empty(&hub->event_list)) |
@@ -3311,8 +3304,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev) | |||
3311 | break; | 3304 | break; |
3312 | } | 3305 | } |
3313 | clear_bit(port1, parent_hub->busy_bits); | 3306 | clear_bit(port1, parent_hub->busy_bits); |
3314 | if (!parent_hdev->parent && !parent_hub->busy_bits[0]) | ||
3315 | usb_enable_root_hub_irq(parent_hdev->bus); | ||
3316 | 3307 | ||
3317 | if (ret < 0) | 3308 | if (ret < 0) |
3318 | goto re_enumerate; | 3309 | goto re_enumerate; |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 586d6f1376cf..286b4431a097 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -1091,8 +1091,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) | |||
1091 | continue; | 1091 | continue; |
1092 | dev_dbg(&dev->dev, "unregistering interface %s\n", | 1092 | dev_dbg(&dev->dev, "unregistering interface %s\n", |
1093 | dev_name(&interface->dev)); | 1093 | dev_name(&interface->dev)); |
1094 | device_del(&interface->dev); | ||
1095 | usb_remove_sysfs_intf_files(interface); | 1094 | usb_remove_sysfs_intf_files(interface); |
1095 | device_del(&interface->dev); | ||
1096 | } | 1096 | } |
1097 | 1097 | ||
1098 | /* Now that the interfaces are unbound, nobody should | 1098 | /* Now that the interfaces are unbound, nobody should |
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index c0b1ae25ae2a..47111e88f791 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c | |||
@@ -601,15 +601,20 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); | |||
601 | void usb_unlink_anchored_urbs(struct usb_anchor *anchor) | 601 | void usb_unlink_anchored_urbs(struct usb_anchor *anchor) |
602 | { | 602 | { |
603 | struct urb *victim; | 603 | struct urb *victim; |
604 | unsigned long flags; | ||
604 | 605 | ||
605 | spin_lock_irq(&anchor->lock); | 606 | spin_lock_irqsave(&anchor->lock, flags); |
606 | while (!list_empty(&anchor->urb_list)) { | 607 | while (!list_empty(&anchor->urb_list)) { |
607 | victim = list_entry(anchor->urb_list.prev, struct urb, | 608 | victim = list_entry(anchor->urb_list.prev, struct urb, |
608 | anchor_list); | 609 | anchor_list); |
610 | usb_get_urb(victim); | ||
611 | spin_unlock_irqrestore(&anchor->lock, flags); | ||
609 | /* this will unanchor the URB */ | 612 | /* this will unanchor the URB */ |
610 | usb_unlink_urb(victim); | 613 | usb_unlink_urb(victim); |
614 | usb_put_urb(victim); | ||
615 | spin_lock_irqsave(&anchor->lock, flags); | ||
611 | } | 616 | } |
612 | spin_unlock_irq(&anchor->lock); | 617 | spin_unlock_irqrestore(&anchor->lock, flags); |
613 | } | 618 | } |
614 | EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs); | 619 | EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs); |
615 | 620 | ||
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 84fcaa6a21ec..be1fa0723f2c 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c | |||
@@ -219,12 +219,6 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
219 | } | 219 | } |
220 | #endif /* CONFIG_HOTPLUG */ | 220 | #endif /* CONFIG_HOTPLUG */ |
221 | 221 | ||
222 | struct device_type usb_device_type = { | ||
223 | .name = "usb_device", | ||
224 | .release = usb_release_dev, | ||
225 | .uevent = usb_dev_uevent, | ||
226 | }; | ||
227 | |||
228 | #ifdef CONFIG_PM | 222 | #ifdef CONFIG_PM |
229 | 223 | ||
230 | static int ksuspend_usb_init(void) | 224 | static int ksuspend_usb_init(void) |
@@ -244,13 +238,80 @@ static void ksuspend_usb_cleanup(void) | |||
244 | destroy_workqueue(ksuspend_usb_wq); | 238 | destroy_workqueue(ksuspend_usb_wq); |
245 | } | 239 | } |
246 | 240 | ||
241 | /* USB device Power-Management thunks. | ||
242 | * There's no need to distinguish here between quiescing a USB device | ||
243 | * and powering it down; the generic_suspend() routine takes care of | ||
244 | * it by skipping the usb_port_suspend() call for a quiesce. And for | ||
245 | * USB interfaces there's no difference at all. | ||
246 | */ | ||
247 | |||
248 | static int usb_dev_prepare(struct device *dev) | ||
249 | { | ||
250 | return 0; /* Implement eventually? */ | ||
251 | } | ||
252 | |||
253 | static void usb_dev_complete(struct device *dev) | ||
254 | { | ||
255 | /* Currently used only for rebinding interfaces */ | ||
256 | usb_resume(dev); /* Implement eventually? */ | ||
257 | } | ||
258 | |||
259 | static int usb_dev_suspend(struct device *dev) | ||
260 | { | ||
261 | return usb_suspend(dev, PMSG_SUSPEND); | ||
262 | } | ||
263 | |||
264 | static int usb_dev_resume(struct device *dev) | ||
265 | { | ||
266 | return usb_resume(dev); | ||
267 | } | ||
268 | |||
269 | static int usb_dev_freeze(struct device *dev) | ||
270 | { | ||
271 | return usb_suspend(dev, PMSG_FREEZE); | ||
272 | } | ||
273 | |||
274 | static int usb_dev_thaw(struct device *dev) | ||
275 | { | ||
276 | return usb_resume(dev); | ||
277 | } | ||
278 | |||
279 | static int usb_dev_poweroff(struct device *dev) | ||
280 | { | ||
281 | return usb_suspend(dev, PMSG_HIBERNATE); | ||
282 | } | ||
283 | |||
284 | static int usb_dev_restore(struct device *dev) | ||
285 | { | ||
286 | return usb_resume(dev); | ||
287 | } | ||
288 | |||
289 | static struct pm_ops usb_device_pm_ops = { | ||
290 | .prepare = usb_dev_prepare, | ||
291 | .complete = usb_dev_complete, | ||
292 | .suspend = usb_dev_suspend, | ||
293 | .resume = usb_dev_resume, | ||
294 | .freeze = usb_dev_freeze, | ||
295 | .thaw = usb_dev_thaw, | ||
296 | .poweroff = usb_dev_poweroff, | ||
297 | .restore = usb_dev_restore, | ||
298 | }; | ||
299 | |||
247 | #else | 300 | #else |
248 | 301 | ||
249 | #define ksuspend_usb_init() 0 | 302 | #define ksuspend_usb_init() 0 |
250 | #define ksuspend_usb_cleanup() do {} while (0) | 303 | #define ksuspend_usb_cleanup() do {} while (0) |
304 | #define usb_device_pm_ops (*(struct pm_ops *)0) | ||
251 | 305 | ||
252 | #endif /* CONFIG_PM */ | 306 | #endif /* CONFIG_PM */ |
253 | 307 | ||
308 | struct device_type usb_device_type = { | ||
309 | .name = "usb_device", | ||
310 | .release = usb_release_dev, | ||
311 | .uevent = usb_dev_uevent, | ||
312 | .pm = &usb_device_pm_ops, | ||
313 | }; | ||
314 | |||
254 | 315 | ||
255 | /* Returns 1 if @usb_bus is WUSB, 0 otherwise */ | 316 | /* Returns 1 if @usb_bus is WUSB, 0 otherwise */ |
256 | static unsigned usb_bus_is_wusb(struct usb_bus *bus) | 317 | static unsigned usb_bus_is_wusb(struct usb_bus *bus) |
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index d9a6e16dbf84..9a1a45ac3add 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h | |||
@@ -41,6 +41,9 @@ extern void usb_host_cleanup(void); | |||
41 | 41 | ||
42 | #ifdef CONFIG_PM | 42 | #ifdef CONFIG_PM |
43 | 43 | ||
44 | extern int usb_suspend(struct device *dev, pm_message_t msg); | ||
45 | extern int usb_resume(struct device *dev); | ||
46 | |||
44 | extern void usb_autosuspend_work(struct work_struct *work); | 47 | extern void usb_autosuspend_work(struct work_struct *work); |
45 | extern int usb_port_suspend(struct usb_device *dev); | 48 | extern int usb_port_suspend(struct usb_device *dev); |
46 | extern int usb_port_resume(struct usb_device *dev); | 49 | extern int usb_port_resume(struct usb_device *dev); |
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index c6a8c6b1116a..acc95b2ac6f8 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig | |||
@@ -284,6 +284,16 @@ config USB_LH7A40X | |||
284 | default USB_GADGET | 284 | default USB_GADGET |
285 | select USB_GADGET_SELECTED | 285 | select USB_GADGET_SELECTED |
286 | 286 | ||
287 | # built in ../musb along with host support | ||
288 | config USB_GADGET_MUSB_HDRC | ||
289 | boolean "Inventra HDRC USB Peripheral (TI, ...)" | ||
290 | depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG) | ||
291 | select USB_GADGET_DUALSPEED | ||
292 | select USB_GADGET_SELECTED | ||
293 | help | ||
294 | This OTG-capable silicon IP is used in dual designs including | ||
295 | the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010. | ||
296 | |||
287 | config USB_GADGET_OMAP | 297 | config USB_GADGET_OMAP |
288 | boolean "OMAP USB Device Controller" | 298 | boolean "OMAP USB Device Controller" |
289 | depends on ARCH_OMAP | 299 | depends on ARCH_OMAP |
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c index 1500e1b3c302..abf8192f89e8 100644 --- a/drivers/usb/gadget/amd5536udc.c +++ b/drivers/usb/gadget/amd5536udc.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/module.h> | 44 | #include <linux/module.h> |
45 | #include <linux/pci.h> | 45 | #include <linux/pci.h> |
46 | #include <linux/kernel.h> | 46 | #include <linux/kernel.h> |
47 | #include <linux/version.h> | ||
48 | #include <linux/delay.h> | 47 | #include <linux/delay.h> |
49 | #include <linux/ioport.h> | 48 | #include <linux/ioport.h> |
50 | #include <linux/sched.h> | 49 | #include <linux/sched.h> |
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c index 21d1406af9ee..7600a0c78753 100644 --- a/drivers/usb/gadget/dummy_hcd.c +++ b/drivers/usb/gadget/dummy_hcd.c | |||
@@ -542,13 +542,14 @@ dummy_queue (struct usb_ep *_ep, struct usb_request *_req, | |||
542 | req->req.context = dum; | 542 | req->req.context = dum; |
543 | req->req.complete = fifo_complete; | 543 | req->req.complete = fifo_complete; |
544 | 544 | ||
545 | list_add_tail(&req->queue, &ep->queue); | ||
545 | spin_unlock (&dum->lock); | 546 | spin_unlock (&dum->lock); |
546 | _req->actual = _req->length; | 547 | _req->actual = _req->length; |
547 | _req->status = 0; | 548 | _req->status = 0; |
548 | _req->complete (_ep, _req); | 549 | _req->complete (_ep, _req); |
549 | spin_lock (&dum->lock); | 550 | spin_lock (&dum->lock); |
550 | } | 551 | } else |
551 | list_add_tail (&req->queue, &ep->queue); | 552 | list_add_tail(&req->queue, &ep->queue); |
552 | spin_unlock_irqrestore (&dum->lock, flags); | 553 | spin_unlock_irqrestore (&dum->lock, flags); |
553 | 554 | ||
554 | /* real hardware would likely enable transfers here, in case | 555 | /* real hardware would likely enable transfers here, in case |
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c index d8faccf27895..5ee1590b8e9c 100644 --- a/drivers/usb/gadget/f_acm.c +++ b/drivers/usb/gadget/f_acm.c | |||
@@ -47,18 +47,37 @@ struct f_acm { | |||
47 | u8 ctrl_id, data_id; | 47 | u8 ctrl_id, data_id; |
48 | u8 port_num; | 48 | u8 port_num; |
49 | 49 | ||
50 | struct usb_descriptor_header **fs_function; | 50 | u8 pending; |
51 | |||
52 | /* lock is mostly for pending and notify_req ... they get accessed | ||
53 | * by callbacks both from tty (open/close/break) under its spinlock, | ||
54 | * and notify_req.complete() which can't use that lock. | ||
55 | */ | ||
56 | spinlock_t lock; | ||
57 | |||
51 | struct acm_ep_descs fs; | 58 | struct acm_ep_descs fs; |
52 | struct usb_descriptor_header **hs_function; | ||
53 | struct acm_ep_descs hs; | 59 | struct acm_ep_descs hs; |
54 | 60 | ||
55 | struct usb_ep *notify; | 61 | struct usb_ep *notify; |
56 | struct usb_endpoint_descriptor *notify_desc; | 62 | struct usb_endpoint_descriptor *notify_desc; |
63 | struct usb_request *notify_req; | ||
57 | 64 | ||
58 | struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ | 65 | struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ |
66 | |||
67 | /* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */ | ||
59 | u16 port_handshake_bits; | 68 | u16 port_handshake_bits; |
60 | #define RS232_RTS (1 << 1) /* unused with full duplex */ | 69 | #define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */ |
61 | #define RS232_DTR (1 << 0) /* host is ready for data r/w */ | 70 | #define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */ |
71 | |||
72 | /* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */ | ||
73 | u16 serial_state; | ||
74 | #define ACM_CTRL_OVERRUN (1 << 6) | ||
75 | #define ACM_CTRL_PARITY (1 << 5) | ||
76 | #define ACM_CTRL_FRAMING (1 << 4) | ||
77 | #define ACM_CTRL_RI (1 << 3) | ||
78 | #define ACM_CTRL_BRK (1 << 2) | ||
79 | #define ACM_CTRL_DSR (1 << 1) | ||
80 | #define ACM_CTRL_DCD (1 << 0) | ||
62 | }; | 81 | }; |
63 | 82 | ||
64 | static inline struct f_acm *func_to_acm(struct usb_function *f) | 83 | static inline struct f_acm *func_to_acm(struct usb_function *f) |
@@ -66,12 +85,17 @@ static inline struct f_acm *func_to_acm(struct usb_function *f) | |||
66 | return container_of(f, struct f_acm, port.func); | 85 | return container_of(f, struct f_acm, port.func); |
67 | } | 86 | } |
68 | 87 | ||
88 | static inline struct f_acm *port_to_acm(struct gserial *p) | ||
89 | { | ||
90 | return container_of(p, struct f_acm, port); | ||
91 | } | ||
92 | |||
69 | /*-------------------------------------------------------------------------*/ | 93 | /*-------------------------------------------------------------------------*/ |
70 | 94 | ||
71 | /* notification endpoint uses smallish and infrequent fixed-size messages */ | 95 | /* notification endpoint uses smallish and infrequent fixed-size messages */ |
72 | 96 | ||
73 | #define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */ | 97 | #define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */ |
74 | #define GS_NOTIFY_MAXPACKET 8 | 98 | #define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */ |
75 | 99 | ||
76 | /* interface and class descriptors: */ | 100 | /* interface and class descriptors: */ |
77 | 101 | ||
@@ -117,7 +141,7 @@ static struct usb_cdc_acm_descriptor acm_descriptor __initdata = { | |||
117 | .bLength = sizeof(acm_descriptor), | 141 | .bLength = sizeof(acm_descriptor), |
118 | .bDescriptorType = USB_DT_CS_INTERFACE, | 142 | .bDescriptorType = USB_DT_CS_INTERFACE, |
119 | .bDescriptorSubType = USB_CDC_ACM_TYPE, | 143 | .bDescriptorSubType = USB_CDC_ACM_TYPE, |
120 | .bmCapabilities = (1 << 1), | 144 | .bmCapabilities = USB_CDC_CAP_LINE, |
121 | }; | 145 | }; |
122 | 146 | ||
123 | static struct usb_cdc_union_desc acm_union_desc __initdata = { | 147 | static struct usb_cdc_union_desc acm_union_desc __initdata = { |
@@ -277,6 +301,11 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) | |||
277 | 301 | ||
278 | /* composite driver infrastructure handles everything except | 302 | /* composite driver infrastructure handles everything except |
279 | * CDC class messages; interface activation uses set_alt(). | 303 | * CDC class messages; interface activation uses set_alt(). |
304 | * | ||
305 | * Note CDC spec table 4 lists the ACM request profile. It requires | ||
306 | * encapsulated command support ... we don't handle any, and respond | ||
307 | * to them by stalling. Options include get/set/clear comm features | ||
308 | * (not that useful) and SEND_BREAK. | ||
280 | */ | 309 | */ |
281 | switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { | 310 | switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { |
282 | 311 | ||
@@ -312,7 +341,7 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) | |||
312 | value = 0; | 341 | value = 0; |
313 | 342 | ||
314 | /* FIXME we should not allow data to flow until the | 343 | /* FIXME we should not allow data to flow until the |
315 | * host sets the RS232_DTR bit; and when it clears | 344 | * host sets the ACM_CTRL_DTR bit; and when it clears |
316 | * that bit, we should return to that no-flow state. | 345 | * that bit, we should return to that no-flow state. |
317 | */ | 346 | */ |
318 | acm->port_handshake_bits = w_value; | 347 | acm->port_handshake_bits = w_value; |
@@ -350,9 +379,6 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) | |||
350 | /* we know alt == 0, so this is an activation or a reset */ | 379 | /* we know alt == 0, so this is an activation or a reset */ |
351 | 380 | ||
352 | if (intf == acm->ctrl_id) { | 381 | if (intf == acm->ctrl_id) { |
353 | /* REVISIT this may need more work when we start to | ||
354 | * send notifications ... | ||
355 | */ | ||
356 | if (acm->notify->driver_data) { | 382 | if (acm->notify->driver_data) { |
357 | VDBG(cdev, "reset acm control interface %d\n", intf); | 383 | VDBG(cdev, "reset acm control interface %d\n", intf); |
358 | usb_ep_disable(acm->notify); | 384 | usb_ep_disable(acm->notify); |
@@ -397,6 +423,128 @@ static void acm_disable(struct usb_function *f) | |||
397 | 423 | ||
398 | /*-------------------------------------------------------------------------*/ | 424 | /*-------------------------------------------------------------------------*/ |
399 | 425 | ||
426 | /** | ||
427 | * acm_cdc_notify - issue CDC notification to host | ||
428 | * @acm: wraps host to be notified | ||
429 | * @type: notification type | ||
430 | * @value: Refer to cdc specs, wValue field. | ||
431 | * @data: data to be sent | ||
432 | * @length: size of data | ||
433 | * Context: irqs blocked, acm->lock held, acm_notify_req non-null | ||
434 | * | ||
435 | * Returns zero on sucess or a negative errno. | ||
436 | * | ||
437 | * See section 6.3.5 of the CDC 1.1 specification for information | ||
438 | * about the only notification we issue: SerialState change. | ||
439 | */ | ||
440 | static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value, | ||
441 | void *data, unsigned length) | ||
442 | { | ||
443 | struct usb_ep *ep = acm->notify; | ||
444 | struct usb_request *req; | ||
445 | struct usb_cdc_notification *notify; | ||
446 | const unsigned len = sizeof(*notify) + length; | ||
447 | void *buf; | ||
448 | int status; | ||
449 | |||
450 | req = acm->notify_req; | ||
451 | acm->notify_req = NULL; | ||
452 | acm->pending = false; | ||
453 | |||
454 | req->length = len; | ||
455 | notify = req->buf; | ||
456 | buf = notify + 1; | ||
457 | |||
458 | notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS | ||
459 | | USB_RECIP_INTERFACE; | ||
460 | notify->bNotificationType = type; | ||
461 | notify->wValue = cpu_to_le16(value); | ||
462 | notify->wIndex = cpu_to_le16(acm->ctrl_id); | ||
463 | notify->wLength = cpu_to_le16(length); | ||
464 | memcpy(buf, data, length); | ||
465 | |||
466 | status = usb_ep_queue(ep, req, GFP_ATOMIC); | ||
467 | if (status < 0) { | ||
468 | ERROR(acm->port.func.config->cdev, | ||
469 | "acm ttyGS%d can't notify serial state, %d\n", | ||
470 | acm->port_num, status); | ||
471 | acm->notify_req = req; | ||
472 | } | ||
473 | |||
474 | return status; | ||
475 | } | ||
476 | |||
477 | static int acm_notify_serial_state(struct f_acm *acm) | ||
478 | { | ||
479 | struct usb_composite_dev *cdev = acm->port.func.config->cdev; | ||
480 | int status; | ||
481 | |||
482 | spin_lock(&acm->lock); | ||
483 | if (acm->notify_req) { | ||
484 | DBG(cdev, "acm ttyGS%d serial state %04x\n", | ||
485 | acm->port_num, acm->serial_state); | ||
486 | status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, | ||
487 | 0, &acm->serial_state, sizeof(acm->serial_state)); | ||
488 | } else { | ||
489 | acm->pending = true; | ||
490 | status = 0; | ||
491 | } | ||
492 | spin_unlock(&acm->lock); | ||
493 | return status; | ||
494 | } | ||
495 | |||
496 | static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req) | ||
497 | { | ||
498 | struct f_acm *acm = req->context; | ||
499 | u8 doit = false; | ||
500 | |||
501 | /* on this call path we do NOT hold the port spinlock, | ||
502 | * which is why ACM needs its own spinlock | ||
503 | */ | ||
504 | spin_lock(&acm->lock); | ||
505 | if (req->status != -ESHUTDOWN) | ||
506 | doit = acm->pending; | ||
507 | acm->notify_req = req; | ||
508 | spin_unlock(&acm->lock); | ||
509 | |||
510 | if (doit) | ||
511 | acm_notify_serial_state(acm); | ||
512 | } | ||
513 | |||
514 | /* connect == the TTY link is open */ | ||
515 | |||
516 | static void acm_connect(struct gserial *port) | ||
517 | { | ||
518 | struct f_acm *acm = port_to_acm(port); | ||
519 | |||
520 | acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD; | ||
521 | acm_notify_serial_state(acm); | ||
522 | } | ||
523 | |||
524 | static void acm_disconnect(struct gserial *port) | ||
525 | { | ||
526 | struct f_acm *acm = port_to_acm(port); | ||
527 | |||
528 | acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD); | ||
529 | acm_notify_serial_state(acm); | ||
530 | } | ||
531 | |||
532 | static int acm_send_break(struct gserial *port, int duration) | ||
533 | { | ||
534 | struct f_acm *acm = port_to_acm(port); | ||
535 | u16 state; | ||
536 | |||
537 | state = acm->serial_state; | ||
538 | state &= ~ACM_CTRL_BRK; | ||
539 | if (duration) | ||
540 | state |= ACM_CTRL_BRK; | ||
541 | |||
542 | acm->serial_state = state; | ||
543 | return acm_notify_serial_state(acm); | ||
544 | } | ||
545 | |||
546 | /*-------------------------------------------------------------------------*/ | ||
547 | |||
400 | /* ACM function driver setup/binding */ | 548 | /* ACM function driver setup/binding */ |
401 | static int __init | 549 | static int __init |
402 | acm_bind(struct usb_configuration *c, struct usb_function *f) | 550 | acm_bind(struct usb_configuration *c, struct usb_function *f) |
@@ -445,8 +593,20 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) | |||
445 | acm->notify = ep; | 593 | acm->notify = ep; |
446 | ep->driver_data = cdev; /* claim */ | 594 | ep->driver_data = cdev; /* claim */ |
447 | 595 | ||
596 | /* allocate notification */ | ||
597 | acm->notify_req = gs_alloc_req(ep, | ||
598 | sizeof(struct usb_cdc_notification) + 2, | ||
599 | GFP_KERNEL); | ||
600 | if (!acm->notify_req) | ||
601 | goto fail; | ||
602 | |||
603 | acm->notify_req->complete = acm_cdc_notify_complete; | ||
604 | acm->notify_req->context = acm; | ||
605 | |||
448 | /* copy descriptors, and track endpoint copies */ | 606 | /* copy descriptors, and track endpoint copies */ |
449 | f->descriptors = usb_copy_descriptors(acm_fs_function); | 607 | f->descriptors = usb_copy_descriptors(acm_fs_function); |
608 | if (!f->descriptors) | ||
609 | goto fail; | ||
450 | 610 | ||
451 | acm->fs.in = usb_find_endpoint(acm_fs_function, | 611 | acm->fs.in = usb_find_endpoint(acm_fs_function, |
452 | f->descriptors, &acm_fs_in_desc); | 612 | f->descriptors, &acm_fs_in_desc); |
@@ -478,8 +638,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) | |||
478 | f->hs_descriptors, &acm_hs_notify_desc); | 638 | f->hs_descriptors, &acm_hs_notify_desc); |
479 | } | 639 | } |
480 | 640 | ||
481 | /* FIXME provide a callback for triggering notifications */ | ||
482 | |||
483 | DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n", | 641 | DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n", |
484 | acm->port_num, | 642 | acm->port_num, |
485 | gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", | 643 | gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", |
@@ -488,6 +646,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) | |||
488 | return 0; | 646 | return 0; |
489 | 647 | ||
490 | fail: | 648 | fail: |
649 | if (acm->notify_req) | ||
650 | gs_free_req(acm->notify, acm->notify_req); | ||
651 | |||
491 | /* we might as well release our claims on endpoints */ | 652 | /* we might as well release our claims on endpoints */ |
492 | if (acm->notify) | 653 | if (acm->notify) |
493 | acm->notify->driver_data = NULL; | 654 | acm->notify->driver_data = NULL; |
@@ -504,10 +665,13 @@ fail: | |||
504 | static void | 665 | static void |
505 | acm_unbind(struct usb_configuration *c, struct usb_function *f) | 666 | acm_unbind(struct usb_configuration *c, struct usb_function *f) |
506 | { | 667 | { |
668 | struct f_acm *acm = func_to_acm(f); | ||
669 | |||
507 | if (gadget_is_dualspeed(c->cdev->gadget)) | 670 | if (gadget_is_dualspeed(c->cdev->gadget)) |
508 | usb_free_descriptors(f->hs_descriptors); | 671 | usb_free_descriptors(f->hs_descriptors); |
509 | usb_free_descriptors(f->descriptors); | 672 | usb_free_descriptors(f->descriptors); |
510 | kfree(func_to_acm(f)); | 673 | gs_free_req(acm->notify, acm->notify_req); |
674 | kfree(acm); | ||
511 | } | 675 | } |
512 | 676 | ||
513 | /* Some controllers can't support CDC ACM ... */ | 677 | /* Some controllers can't support CDC ACM ... */ |
@@ -571,8 +735,14 @@ int __init acm_bind_config(struct usb_configuration *c, u8 port_num) | |||
571 | if (!acm) | 735 | if (!acm) |
572 | return -ENOMEM; | 736 | return -ENOMEM; |
573 | 737 | ||
738 | spin_lock_init(&acm->lock); | ||
739 | |||
574 | acm->port_num = port_num; | 740 | acm->port_num = port_num; |
575 | 741 | ||
742 | acm->port.connect = acm_connect; | ||
743 | acm->port.disconnect = acm_disconnect; | ||
744 | acm->port.send_break = acm_send_break; | ||
745 | |||
576 | acm->port.func.name = "acm"; | 746 | acm->port.func.name = "acm"; |
577 | acm->port.func.strings = acm_strings; | 747 | acm->port.func.strings = acm_strings; |
578 | /* descriptors are per-instance copies */ | 748 | /* descriptors are per-instance copies */ |
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c index 0822e9d7693a..a2b5c092bda0 100644 --- a/drivers/usb/gadget/f_ecm.c +++ b/drivers/usb/gadget/f_ecm.c | |||
@@ -63,9 +63,7 @@ struct f_ecm { | |||
63 | 63 | ||
64 | char ethaddr[14]; | 64 | char ethaddr[14]; |
65 | 65 | ||
66 | struct usb_descriptor_header **fs_function; | ||
67 | struct ecm_ep_descs fs; | 66 | struct ecm_ep_descs fs; |
68 | struct usb_descriptor_header **hs_function; | ||
69 | struct ecm_ep_descs hs; | 67 | struct ecm_ep_descs hs; |
70 | 68 | ||
71 | struct usb_ep *notify; | 69 | struct usb_ep *notify; |
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 61652f0f13fd..659b3d9671c4 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c | |||
@@ -85,9 +85,7 @@ struct f_rndis { | |||
85 | u8 ethaddr[ETH_ALEN]; | 85 | u8 ethaddr[ETH_ALEN]; |
86 | int config; | 86 | int config; |
87 | 87 | ||
88 | struct usb_descriptor_header **fs_function; | ||
89 | struct rndis_ep_descs fs; | 88 | struct rndis_ep_descs fs; |
90 | struct usb_descriptor_header **hs_function; | ||
91 | struct rndis_ep_descs hs; | 89 | struct rndis_ep_descs hs; |
92 | 90 | ||
93 | struct usb_ep *notify; | 91 | struct usb_ep *notify; |
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c index 1b6bde9aaed5..fe5674db344b 100644 --- a/drivers/usb/gadget/f_serial.c +++ b/drivers/usb/gadget/f_serial.c | |||
@@ -36,9 +36,7 @@ struct f_gser { | |||
36 | u8 data_id; | 36 | u8 data_id; |
37 | u8 port_num; | 37 | u8 port_num; |
38 | 38 | ||
39 | struct usb_descriptor_header **fs_function; | ||
40 | struct gser_descs fs; | 39 | struct gser_descs fs; |
41 | struct usb_descriptor_header **hs_function; | ||
42 | struct gser_descs hs; | 40 | struct gser_descs hs; |
43 | }; | 41 | }; |
44 | 42 | ||
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c index afeab9a0523f..acb8d233aa1d 100644 --- a/drivers/usb/gadget/f_subset.c +++ b/drivers/usb/gadget/f_subset.c | |||
@@ -66,9 +66,7 @@ struct f_gether { | |||
66 | 66 | ||
67 | char ethaddr[14]; | 67 | char ethaddr[14]; |
68 | 68 | ||
69 | struct usb_descriptor_header **fs_function; | ||
70 | struct geth_descs fs; | 69 | struct geth_descs fs; |
71 | struct usb_descriptor_header **hs_function; | ||
72 | struct geth_descs hs; | 70 | struct geth_descs hs; |
73 | }; | 71 | }; |
74 | 72 | ||
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h index 5246e8fef2b2..17d9905101b7 100644 --- a/drivers/usb/gadget/gadget_chips.h +++ b/drivers/usb/gadget/gadget_chips.h | |||
@@ -11,6 +11,10 @@ | |||
11 | * Some are available on 2.4 kernels; several are available, but not | 11 | * Some are available on 2.4 kernels; several are available, but not |
12 | * yet pushed in the 2.6 mainline tree. | 12 | * yet pushed in the 2.6 mainline tree. |
13 | */ | 13 | */ |
14 | |||
15 | #ifndef __GADGET_CHIPS_H | ||
16 | #define __GADGET_CHIPS_H | ||
17 | |||
14 | #ifdef CONFIG_USB_GADGET_NET2280 | 18 | #ifdef CONFIG_USB_GADGET_NET2280 |
15 | #define gadget_is_net2280(g) !strcmp("net2280", (g)->name) | 19 | #define gadget_is_net2280(g) !strcmp("net2280", (g)->name) |
16 | #else | 20 | #else |
@@ -237,3 +241,5 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget) | |||
237 | /* Everything else is *presumably* fine ... */ | 241 | /* Everything else is *presumably* fine ... */ |
238 | return true; | 242 | return true; |
239 | } | 243 | } |
244 | |||
245 | #endif /* __GADGET_CHIPS_H */ | ||
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c index 376e80c07530..574c53831a05 100644 --- a/drivers/usb/gadget/omap_udc.c +++ b/drivers/usb/gadget/omap_udc.c | |||
@@ -54,6 +54,7 @@ | |||
54 | 54 | ||
55 | #include <mach/dma.h> | 55 | #include <mach/dma.h> |
56 | #include <mach/usb.h> | 56 | #include <mach/usb.h> |
57 | #include <mach/control.h> | ||
57 | 58 | ||
58 | #include "omap_udc.h" | 59 | #include "omap_udc.h" |
59 | 60 | ||
@@ -2310,10 +2311,10 @@ static int proc_otg_show(struct seq_file *s) | |||
2310 | u32 trans; | 2311 | u32 trans; |
2311 | char *ctrl_name; | 2312 | char *ctrl_name; |
2312 | 2313 | ||
2313 | tmp = OTG_REV_REG; | 2314 | tmp = omap_readl(OTG_REV); |
2314 | if (cpu_is_omap24xx()) { | 2315 | if (cpu_is_omap24xx()) { |
2315 | ctrl_name = "control_devconf"; | 2316 | ctrl_name = "control_devconf"; |
2316 | trans = CONTROL_DEVCONF_REG; | 2317 | trans = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0); |
2317 | } else { | 2318 | } else { |
2318 | ctrl_name = "tranceiver_ctrl"; | 2319 | ctrl_name = "tranceiver_ctrl"; |
2319 | trans = omap_readw(USB_TRANSCEIVER_CTRL); | 2320 | trans = omap_readw(USB_TRANSCEIVER_CTRL); |
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c index a28513ecbe5b..7cbc78a6853d 100644 --- a/drivers/usb/gadget/pxa27x_udc.c +++ b/drivers/usb/gadget/pxa27x_udc.c | |||
@@ -1622,7 +1622,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver) | |||
1622 | struct pxa_udc *udc = the_controller; | 1622 | struct pxa_udc *udc = the_controller; |
1623 | int retval; | 1623 | int retval; |
1624 | 1624 | ||
1625 | if (!driver || driver->speed != USB_SPEED_FULL || !driver->bind | 1625 | if (!driver || driver->speed < USB_SPEED_FULL || !driver->bind |
1626 | || !driver->disconnect || !driver->setup) | 1626 | || !driver->disconnect || !driver->setup) |
1627 | return -EINVAL; | 1627 | return -EINVAL; |
1628 | if (!udc) | 1628 | if (!udc) |
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c index 538807384592..29d13ebe7500 100644 --- a/drivers/usb/gadget/s3c2410_udc.c +++ b/drivers/usb/gadget/s3c2410_udc.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/list.h> | 35 | #include <linux/list.h> |
36 | #include <linux/interrupt.h> | 36 | #include <linux/interrupt.h> |
37 | #include <linux/platform_device.h> | 37 | #include <linux/platform_device.h> |
38 | #include <linux/version.h> | ||
39 | #include <linux/clk.h> | 38 | #include <linux/clk.h> |
40 | 39 | ||
41 | #include <linux/debugfs.h> | 40 | #include <linux/debugfs.h> |
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index abf9505d3a75..53d59287f2bc 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c | |||
@@ -52,13 +52,16 @@ | |||
52 | * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. | 52 | * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. |
53 | */ | 53 | */ |
54 | 54 | ||
55 | #define PREFIX "ttyGS" | ||
56 | |||
55 | /* | 57 | /* |
56 | * gserial is the lifecycle interface, used by USB functions | 58 | * gserial is the lifecycle interface, used by USB functions |
57 | * gs_port is the I/O nexus, used by the tty driver | 59 | * gs_port is the I/O nexus, used by the tty driver |
58 | * tty_struct links to the tty/filesystem framework | 60 | * tty_struct links to the tty/filesystem framework |
59 | * | 61 | * |
60 | * gserial <---> gs_port ... links will be null when the USB link is | 62 | * gserial <---> gs_port ... links will be null when the USB link is |
61 | * inactive; managed by gserial_{connect,disconnect}(). | 63 | * inactive; managed by gserial_{connect,disconnect}(). each gserial |
64 | * instance can wrap its own USB control protocol. | ||
62 | * gserial->ioport == usb_ep->driver_data ... gs_port | 65 | * gserial->ioport == usb_ep->driver_data ... gs_port |
63 | * gs_port->port_usb ... gserial | 66 | * gs_port->port_usb ... gserial |
64 | * | 67 | * |
@@ -100,6 +103,8 @@ struct gs_port { | |||
100 | wait_queue_head_t close_wait; /* wait for last close */ | 103 | wait_queue_head_t close_wait; /* wait for last close */ |
101 | 104 | ||
102 | struct list_head read_pool; | 105 | struct list_head read_pool; |
106 | struct list_head read_queue; | ||
107 | unsigned n_read; | ||
103 | struct tasklet_struct push; | 108 | struct tasklet_struct push; |
104 | 109 | ||
105 | struct list_head write_pool; | 110 | struct list_head write_pool; |
@@ -177,7 +182,7 @@ static void gs_buf_clear(struct gs_buf *gb) | |||
177 | /* | 182 | /* |
178 | * gs_buf_data_avail | 183 | * gs_buf_data_avail |
179 | * | 184 | * |
180 | * Return the number of bytes of data available in the circular | 185 | * Return the number of bytes of data written into the circular |
181 | * buffer. | 186 | * buffer. |
182 | */ | 187 | */ |
183 | static unsigned gs_buf_data_avail(struct gs_buf *gb) | 188 | static unsigned gs_buf_data_avail(struct gs_buf *gb) |
@@ -278,7 +283,7 @@ gs_buf_get(struct gs_buf *gb, char *buf, unsigned count) | |||
278 | * Allocate a usb_request and its buffer. Returns a pointer to the | 283 | * Allocate a usb_request and its buffer. Returns a pointer to the |
279 | * usb_request or NULL if there is an error. | 284 | * usb_request or NULL if there is an error. |
280 | */ | 285 | */ |
281 | static struct usb_request * | 286 | struct usb_request * |
282 | gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) | 287 | gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) |
283 | { | 288 | { |
284 | struct usb_request *req; | 289 | struct usb_request *req; |
@@ -302,7 +307,7 @@ gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) | |||
302 | * | 307 | * |
303 | * Free a usb_request and its buffer. | 308 | * Free a usb_request and its buffer. |
304 | */ | 309 | */ |
305 | static void gs_free_req(struct usb_ep *ep, struct usb_request *req) | 310 | void gs_free_req(struct usb_ep *ep, struct usb_request *req) |
306 | { | 311 | { |
307 | kfree(req->buf); | 312 | kfree(req->buf); |
308 | usb_ep_free_request(ep, req); | 313 | usb_ep_free_request(ep, req); |
@@ -367,11 +372,9 @@ __acquires(&port->port_lock) | |||
367 | req->length = len; | 372 | req->length = len; |
368 | list_del(&req->list); | 373 | list_del(&req->list); |
369 | 374 | ||
370 | #ifdef VERBOSE_DEBUG | 375 | pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", |
371 | pr_debug("%s: %s, len=%d, 0x%02x 0x%02x 0x%02x ...\n", | 376 | port->port_num, len, *((u8 *)req->buf), |
372 | __func__, in->name, len, *((u8 *)req->buf), | ||
373 | *((u8 *)req->buf+1), *((u8 *)req->buf+2)); | 377 | *((u8 *)req->buf+1), *((u8 *)req->buf+2)); |
374 | #endif | ||
375 | 378 | ||
376 | /* Drop lock while we call out of driver; completions | 379 | /* Drop lock while we call out of driver; completions |
377 | * could be issued while we do so. Disconnection may | 380 | * could be issued while we do so. Disconnection may |
@@ -401,56 +404,6 @@ __acquires(&port->port_lock) | |||
401 | return status; | 404 | return status; |
402 | } | 405 | } |
403 | 406 | ||
404 | static void gs_rx_push(unsigned long _port) | ||
405 | { | ||
406 | struct gs_port *port = (void *)_port; | ||
407 | struct tty_struct *tty = port->port_tty; | ||
408 | |||
409 | /* With low_latency, tty_flip_buffer_push() doesn't put its | ||
410 | * real work through a workqueue, so the ldisc has a better | ||
411 | * chance to keep up with peak USB data rates. | ||
412 | */ | ||
413 | if (tty) { | ||
414 | tty_flip_buffer_push(tty); | ||
415 | wake_up_interruptible(&tty->read_wait); | ||
416 | } | ||
417 | } | ||
418 | |||
419 | /* | ||
420 | * gs_recv_packet | ||
421 | * | ||
422 | * Called for each USB packet received. Reads the packet | ||
423 | * header and stuffs the data in the appropriate tty buffer. | ||
424 | * Returns 0 if successful, or a negative error number. | ||
425 | * | ||
426 | * Called during USB completion routine, on interrupt time. | ||
427 | * With port_lock. | ||
428 | */ | ||
429 | static int gs_recv_packet(struct gs_port *port, char *packet, unsigned size) | ||
430 | { | ||
431 | unsigned len; | ||
432 | struct tty_struct *tty; | ||
433 | |||
434 | /* I/O completions can continue for a while after close(), until the | ||
435 | * request queue empties. Just discard any data we receive, until | ||
436 | * something reopens this TTY ... as if there were no HW flow control. | ||
437 | */ | ||
438 | tty = port->port_tty; | ||
439 | if (tty == NULL) { | ||
440 | pr_vdebug("%s: ttyGS%d, after close\n", | ||
441 | __func__, port->port_num); | ||
442 | return -EIO; | ||
443 | } | ||
444 | |||
445 | len = tty_insert_flip_string(tty, packet, size); | ||
446 | if (len > 0) | ||
447 | tasklet_schedule(&port->push); | ||
448 | if (len < size) | ||
449 | pr_debug("%s: ttyGS%d, drop %d bytes\n", | ||
450 | __func__, port->port_num, size - len); | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | /* | 407 | /* |
455 | * Context: caller owns port_lock, and port_usb is set | 408 | * Context: caller owns port_lock, and port_usb is set |
456 | */ | 409 | */ |
@@ -469,9 +422,9 @@ __acquires(&port->port_lock) | |||
469 | int status; | 422 | int status; |
470 | struct tty_struct *tty; | 423 | struct tty_struct *tty; |
471 | 424 | ||
472 | /* no more rx if closed or throttled */ | 425 | /* no more rx if closed */ |
473 | tty = port->port_tty; | 426 | tty = port->port_tty; |
474 | if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) | 427 | if (!tty) |
475 | break; | 428 | break; |
476 | 429 | ||
477 | req = list_entry(pool->next, struct usb_request, list); | 430 | req = list_entry(pool->next, struct usb_request, list); |
@@ -500,36 +453,134 @@ __acquires(&port->port_lock) | |||
500 | return started; | 453 | return started; |
501 | } | 454 | } |
502 | 455 | ||
503 | static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) | 456 | /* |
457 | * RX tasklet takes data out of the RX queue and hands it up to the TTY | ||
458 | * layer until it refuses to take any more data (or is throttled back). | ||
459 | * Then it issues reads for any further data. | ||
460 | * | ||
461 | * If the RX queue becomes full enough that no usb_request is queued, | ||
462 | * the OUT endpoint may begin NAKing as soon as its FIFO fills up. | ||
463 | * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) | ||
464 | * can be buffered before the TTY layer's buffers (currently 64 KB). | ||
465 | */ | ||
466 | static void gs_rx_push(unsigned long _port) | ||
504 | { | 467 | { |
505 | int status; | 468 | struct gs_port *port = (void *)_port; |
506 | struct gs_port *port = ep->driver_data; | 469 | struct tty_struct *tty; |
470 | struct list_head *queue = &port->read_queue; | ||
471 | bool disconnect = false; | ||
472 | bool do_push = false; | ||
507 | 473 | ||
508 | spin_lock(&port->port_lock); | 474 | /* hand any queued data to the tty */ |
509 | list_add(&req->list, &port->read_pool); | 475 | spin_lock_irq(&port->port_lock); |
476 | tty = port->port_tty; | ||
477 | while (!list_empty(queue)) { | ||
478 | struct usb_request *req; | ||
510 | 479 | ||
511 | switch (req->status) { | 480 | req = list_first_entry(queue, struct usb_request, list); |
512 | case 0: | ||
513 | /* normal completion */ | ||
514 | status = gs_recv_packet(port, req->buf, req->actual); | ||
515 | if (status && status != -EIO) | ||
516 | pr_debug("%s: %s %s err %d\n", | ||
517 | __func__, "recv", ep->name, status); | ||
518 | gs_start_rx(port); | ||
519 | break; | ||
520 | 481 | ||
521 | case -ESHUTDOWN: | 482 | /* discard data if tty was closed */ |
522 | /* disconnect */ | 483 | if (!tty) |
523 | pr_vdebug("%s: %s shutdown\n", __func__, ep->name); | 484 | goto recycle; |
524 | break; | ||
525 | 485 | ||
526 | default: | 486 | /* leave data queued if tty was rx throttled */ |
527 | /* presumably a transient fault */ | 487 | if (test_bit(TTY_THROTTLED, &tty->flags)) |
528 | pr_warning("%s: unexpected %s status %d\n", | 488 | break; |
529 | __func__, ep->name, req->status); | 489 | |
530 | gs_start_rx(port); | 490 | switch (req->status) { |
531 | break; | 491 | case -ESHUTDOWN: |
492 | disconnect = true; | ||
493 | pr_vdebug(PREFIX "%d: shutdown\n", port->port_num); | ||
494 | break; | ||
495 | |||
496 | default: | ||
497 | /* presumably a transient fault */ | ||
498 | pr_warning(PREFIX "%d: unexpected RX status %d\n", | ||
499 | port->port_num, req->status); | ||
500 | /* FALLTHROUGH */ | ||
501 | case 0: | ||
502 | /* normal completion */ | ||
503 | break; | ||
504 | } | ||
505 | |||
506 | /* push data to (open) tty */ | ||
507 | if (req->actual) { | ||
508 | char *packet = req->buf; | ||
509 | unsigned size = req->actual; | ||
510 | unsigned n; | ||
511 | int count; | ||
512 | |||
513 | /* we may have pushed part of this packet already... */ | ||
514 | n = port->n_read; | ||
515 | if (n) { | ||
516 | packet += n; | ||
517 | size -= n; | ||
518 | } | ||
519 | |||
520 | count = tty_insert_flip_string(tty, packet, size); | ||
521 | if (count) | ||
522 | do_push = true; | ||
523 | if (count != size) { | ||
524 | /* stop pushing; TTY layer can't handle more */ | ||
525 | port->n_read += count; | ||
526 | pr_vdebug(PREFIX "%d: rx block %d/%d\n", | ||
527 | port->port_num, | ||
528 | count, req->actual); | ||
529 | break; | ||
530 | } | ||
531 | port->n_read = 0; | ||
532 | } | ||
533 | recycle: | ||
534 | list_move(&req->list, &port->read_pool); | ||
532 | } | 535 | } |
536 | |||
537 | /* Push from tty to ldisc; this is immediate with low_latency, and | ||
538 | * may trigger callbacks to this driver ... so drop the spinlock. | ||
539 | */ | ||
540 | if (tty && do_push) { | ||
541 | spin_unlock_irq(&port->port_lock); | ||
542 | tty_flip_buffer_push(tty); | ||
543 | wake_up_interruptible(&tty->read_wait); | ||
544 | spin_lock_irq(&port->port_lock); | ||
545 | |||
546 | /* tty may have been closed */ | ||
547 | tty = port->port_tty; | ||
548 | } | ||
549 | |||
550 | |||
551 | /* We want our data queue to become empty ASAP, keeping data | ||
552 | * in the tty and ldisc (not here). If we couldn't push any | ||
553 | * this time around, there may be trouble unless there's an | ||
554 | * implicit tty_unthrottle() call on its way... | ||
555 | * | ||
556 | * REVISIT we should probably add a timer to keep the tasklet | ||
557 | * from starving ... but it's not clear that case ever happens. | ||
558 | */ | ||
559 | if (!list_empty(queue) && tty) { | ||
560 | if (!test_bit(TTY_THROTTLED, &tty->flags)) { | ||
561 | if (do_push) | ||
562 | tasklet_schedule(&port->push); | ||
563 | else | ||
564 | pr_warning(PREFIX "%d: RX not scheduled?\n", | ||
565 | port->port_num); | ||
566 | } | ||
567 | } | ||
568 | |||
569 | /* If we're still connected, refill the USB RX queue. */ | ||
570 | if (!disconnect && port->port_usb) | ||
571 | gs_start_rx(port); | ||
572 | |||
573 | spin_unlock_irq(&port->port_lock); | ||
574 | } | ||
575 | |||
576 | static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) | ||
577 | { | ||
578 | struct gs_port *port = ep->driver_data; | ||
579 | |||
580 | /* Queue all received data until the tty layer is ready for it. */ | ||
581 | spin_lock(&port->port_lock); | ||
582 | list_add_tail(&req->list, &port->read_queue); | ||
583 | tasklet_schedule(&port->push); | ||
533 | spin_unlock(&port->port_lock); | 584 | spin_unlock(&port->port_lock); |
534 | } | 585 | } |
535 | 586 | ||
@@ -625,6 +676,7 @@ static int gs_start_io(struct gs_port *port) | |||
625 | } | 676 | } |
626 | 677 | ||
627 | /* queue read requests */ | 678 | /* queue read requests */ |
679 | port->n_read = 0; | ||
628 | started = gs_start_rx(port); | 680 | started = gs_start_rx(port); |
629 | 681 | ||
630 | /* unblock any pending writes into our circular buffer */ | 682 | /* unblock any pending writes into our circular buffer */ |
@@ -633,9 +685,10 @@ static int gs_start_io(struct gs_port *port) | |||
633 | } else { | 685 | } else { |
634 | gs_free_requests(ep, head); | 686 | gs_free_requests(ep, head); |
635 | gs_free_requests(port->port_usb->in, &port->write_pool); | 687 | gs_free_requests(port->port_usb->in, &port->write_pool); |
688 | status = -EIO; | ||
636 | } | 689 | } |
637 | 690 | ||
638 | return started ? 0 : status; | 691 | return status; |
639 | } | 692 | } |
640 | 693 | ||
641 | /*-------------------------------------------------------------------------*/ | 694 | /*-------------------------------------------------------------------------*/ |
@@ -736,10 +789,13 @@ static int gs_open(struct tty_struct *tty, struct file *file) | |||
736 | 789 | ||
737 | /* if connected, start the I/O stream */ | 790 | /* if connected, start the I/O stream */ |
738 | if (port->port_usb) { | 791 | if (port->port_usb) { |
792 | struct gserial *gser = port->port_usb; | ||
793 | |||
739 | pr_debug("gs_open: start ttyGS%d\n", port->port_num); | 794 | pr_debug("gs_open: start ttyGS%d\n", port->port_num); |
740 | gs_start_io(port); | 795 | gs_start_io(port); |
741 | 796 | ||
742 | /* REVISIT for ACM, issue "network connected" event */ | 797 | if (gser->connect) |
798 | gser->connect(gser); | ||
743 | } | 799 | } |
744 | 800 | ||
745 | pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file); | 801 | pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file); |
@@ -766,6 +822,7 @@ static int gs_writes_finished(struct gs_port *p) | |||
766 | static void gs_close(struct tty_struct *tty, struct file *file) | 822 | static void gs_close(struct tty_struct *tty, struct file *file) |
767 | { | 823 | { |
768 | struct gs_port *port = tty->driver_data; | 824 | struct gs_port *port = tty->driver_data; |
825 | struct gserial *gser; | ||
769 | 826 | ||
770 | spin_lock_irq(&port->port_lock); | 827 | spin_lock_irq(&port->port_lock); |
771 | 828 | ||
@@ -785,32 +842,31 @@ static void gs_close(struct tty_struct *tty, struct file *file) | |||
785 | port->openclose = true; | 842 | port->openclose = true; |
786 | port->open_count = 0; | 843 | port->open_count = 0; |
787 | 844 | ||
788 | if (port->port_usb) | 845 | gser = port->port_usb; |
789 | /* REVISIT for ACM, issue "network disconnected" event */; | 846 | if (gser && gser->disconnect) |
847 | gser->disconnect(gser); | ||
790 | 848 | ||
791 | /* wait for circular write buffer to drain, disconnect, or at | 849 | /* wait for circular write buffer to drain, disconnect, or at |
792 | * most GS_CLOSE_TIMEOUT seconds; then discard the rest | 850 | * most GS_CLOSE_TIMEOUT seconds; then discard the rest |
793 | */ | 851 | */ |
794 | if (gs_buf_data_avail(&port->port_write_buf) > 0 | 852 | if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) { |
795 | && port->port_usb) { | ||
796 | spin_unlock_irq(&port->port_lock); | 853 | spin_unlock_irq(&port->port_lock); |
797 | wait_event_interruptible_timeout(port->drain_wait, | 854 | wait_event_interruptible_timeout(port->drain_wait, |
798 | gs_writes_finished(port), | 855 | gs_writes_finished(port), |
799 | GS_CLOSE_TIMEOUT * HZ); | 856 | GS_CLOSE_TIMEOUT * HZ); |
800 | spin_lock_irq(&port->port_lock); | 857 | spin_lock_irq(&port->port_lock); |
858 | gser = port->port_usb; | ||
801 | } | 859 | } |
802 | 860 | ||
803 | /* Iff we're disconnected, there can be no I/O in flight so it's | 861 | /* Iff we're disconnected, there can be no I/O in flight so it's |
804 | * ok to free the circular buffer; else just scrub it. And don't | 862 | * ok to free the circular buffer; else just scrub it. And don't |
805 | * let the push tasklet fire again until we're re-opened. | 863 | * let the push tasklet fire again until we're re-opened. |
806 | */ | 864 | */ |
807 | if (port->port_usb == NULL) | 865 | if (gser == NULL) |
808 | gs_buf_free(&port->port_write_buf); | 866 | gs_buf_free(&port->port_write_buf); |
809 | else | 867 | else |
810 | gs_buf_clear(&port->port_write_buf); | 868 | gs_buf_clear(&port->port_write_buf); |
811 | 869 | ||
812 | tasklet_kill(&port->push); | ||
813 | |||
814 | tty->driver_data = NULL; | 870 | tty->driver_data = NULL; |
815 | port->port_tty = NULL; | 871 | port->port_tty = NULL; |
816 | 872 | ||
@@ -911,15 +967,35 @@ static void gs_unthrottle(struct tty_struct *tty) | |||
911 | { | 967 | { |
912 | struct gs_port *port = tty->driver_data; | 968 | struct gs_port *port = tty->driver_data; |
913 | unsigned long flags; | 969 | unsigned long flags; |
914 | unsigned started = 0; | ||
915 | 970 | ||
916 | spin_lock_irqsave(&port->port_lock, flags); | 971 | spin_lock_irqsave(&port->port_lock, flags); |
917 | if (port->port_usb) | 972 | if (port->port_usb) { |
918 | started = gs_start_rx(port); | 973 | /* Kickstart read queue processing. We don't do xon/xoff, |
974 | * rts/cts, or other handshaking with the host, but if the | ||
975 | * read queue backs up enough we'll be NAKing OUT packets. | ||
976 | */ | ||
977 | tasklet_schedule(&port->push); | ||
978 | pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num); | ||
979 | } | ||
919 | spin_unlock_irqrestore(&port->port_lock, flags); | 980 | spin_unlock_irqrestore(&port->port_lock, flags); |
981 | } | ||
982 | |||
983 | static int gs_break_ctl(struct tty_struct *tty, int duration) | ||
984 | { | ||
985 | struct gs_port *port = tty->driver_data; | ||
986 | int status = 0; | ||
987 | struct gserial *gser; | ||
988 | |||
989 | pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n", | ||
990 | port->port_num, duration); | ||
920 | 991 | ||
921 | pr_vdebug("gs_unthrottle: ttyGS%d, %d packets\n", | 992 | spin_lock_irq(&port->port_lock); |
922 | port->port_num, started); | 993 | gser = port->port_usb; |
994 | if (gser && gser->send_break) | ||
995 | status = gser->send_break(gser, duration); | ||
996 | spin_unlock_irq(&port->port_lock); | ||
997 | |||
998 | return status; | ||
923 | } | 999 | } |
924 | 1000 | ||
925 | static const struct tty_operations gs_tty_ops = { | 1001 | static const struct tty_operations gs_tty_ops = { |
@@ -931,6 +1007,7 @@ static const struct tty_operations gs_tty_ops = { | |||
931 | .write_room = gs_write_room, | 1007 | .write_room = gs_write_room, |
932 | .chars_in_buffer = gs_chars_in_buffer, | 1008 | .chars_in_buffer = gs_chars_in_buffer, |
933 | .unthrottle = gs_unthrottle, | 1009 | .unthrottle = gs_unthrottle, |
1010 | .break_ctl = gs_break_ctl, | ||
934 | }; | 1011 | }; |
935 | 1012 | ||
936 | /*-------------------------------------------------------------------------*/ | 1013 | /*-------------------------------------------------------------------------*/ |
@@ -953,6 +1030,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding) | |||
953 | tasklet_init(&port->push, gs_rx_push, (unsigned long) port); | 1030 | tasklet_init(&port->push, gs_rx_push, (unsigned long) port); |
954 | 1031 | ||
955 | INIT_LIST_HEAD(&port->read_pool); | 1032 | INIT_LIST_HEAD(&port->read_pool); |
1033 | INIT_LIST_HEAD(&port->read_queue); | ||
956 | INIT_LIST_HEAD(&port->write_pool); | 1034 | INIT_LIST_HEAD(&port->write_pool); |
957 | 1035 | ||
958 | port->port_num = port_num; | 1036 | port->port_num = port_num; |
@@ -997,7 +1075,7 @@ int __init gserial_setup(struct usb_gadget *g, unsigned count) | |||
997 | 1075 | ||
998 | gs_tty_driver->owner = THIS_MODULE; | 1076 | gs_tty_driver->owner = THIS_MODULE; |
999 | gs_tty_driver->driver_name = "g_serial"; | 1077 | gs_tty_driver->driver_name = "g_serial"; |
1000 | gs_tty_driver->name = "ttyGS"; | 1078 | gs_tty_driver->name = PREFIX; |
1001 | /* uses dynamically assigned dev_t values */ | 1079 | /* uses dynamically assigned dev_t values */ |
1002 | 1080 | ||
1003 | gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; | 1081 | gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; |
@@ -1104,6 +1182,8 @@ void gserial_cleanup(void) | |||
1104 | ports[i].port = NULL; | 1182 | ports[i].port = NULL; |
1105 | mutex_unlock(&ports[i].lock); | 1183 | mutex_unlock(&ports[i].lock); |
1106 | 1184 | ||
1185 | tasklet_kill(&port->push); | ||
1186 | |||
1107 | /* wait for old opens to finish */ | 1187 | /* wait for old opens to finish */ |
1108 | wait_event(port->close_wait, gs_closed(port)); | 1188 | wait_event(port->close_wait, gs_closed(port)); |
1109 | 1189 | ||
@@ -1175,14 +1255,17 @@ int gserial_connect(struct gserial *gser, u8 port_num) | |||
1175 | 1255 | ||
1176 | /* REVISIT if waiting on "carrier detect", signal. */ | 1256 | /* REVISIT if waiting on "carrier detect", signal. */ |
1177 | 1257 | ||
1178 | /* REVISIT for ACM, issue "network connection" status notification: | 1258 | /* if it's already open, start I/O ... and notify the serial |
1179 | * connected if open_count, else disconnected. | 1259 | * protocol about open/close status (connect/disconnect). |
1180 | */ | 1260 | */ |
1181 | |||
1182 | /* if it's already open, start I/O */ | ||
1183 | if (port->open_count) { | 1261 | if (port->open_count) { |
1184 | pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); | 1262 | pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); |
1185 | gs_start_io(port); | 1263 | gs_start_io(port); |
1264 | if (gser->connect) | ||
1265 | gser->connect(gser); | ||
1266 | } else { | ||
1267 | if (gser->disconnect) | ||
1268 | gser->disconnect(gser); | ||
1186 | } | 1269 | } |
1187 | 1270 | ||
1188 | spin_unlock_irqrestore(&port->port_lock, flags); | 1271 | spin_unlock_irqrestore(&port->port_lock, flags); |
@@ -1241,6 +1324,7 @@ void gserial_disconnect(struct gserial *gser) | |||
1241 | if (port->open_count == 0 && !port->openclose) | 1324 | if (port->open_count == 0 && !port->openclose) |
1242 | gs_buf_free(&port->port_write_buf); | 1325 | gs_buf_free(&port->port_write_buf); |
1243 | gs_free_requests(gser->out, &port->read_pool); | 1326 | gs_free_requests(gser->out, &port->read_pool); |
1327 | gs_free_requests(gser->out, &port->read_queue); | ||
1244 | gs_free_requests(gser->in, &port->write_pool); | 1328 | gs_free_requests(gser->in, &port->write_pool); |
1245 | spin_unlock_irqrestore(&port->port_lock, flags); | 1329 | spin_unlock_irqrestore(&port->port_lock, flags); |
1246 | } | 1330 | } |
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h index 7b561138f90e..af3910d01aea 100644 --- a/drivers/usb/gadget/u_serial.h +++ b/drivers/usb/gadget/u_serial.h | |||
@@ -23,8 +23,7 @@ | |||
23 | * style I/O using the USB peripheral endpoints listed here, including | 23 | * style I/O using the USB peripheral endpoints listed here, including |
24 | * hookups to sysfs and /dev for each logical "tty" device. | 24 | * hookups to sysfs and /dev for each logical "tty" device. |
25 | * | 25 | * |
26 | * REVISIT need TTY --> USB event flow too, so ACM can report open/close | 26 | * REVISIT at least ACM could support tiocmget() if needed. |
27 | * as carrier detect events. Model after ECM. There's more ACM state too. | ||
28 | * | 27 | * |
29 | * REVISIT someday, allow multiplexing several TTYs over these endpoints. | 28 | * REVISIT someday, allow multiplexing several TTYs over these endpoints. |
30 | */ | 29 | */ |
@@ -41,8 +40,17 @@ struct gserial { | |||
41 | 40 | ||
42 | /* REVISIT avoid this CDC-ACM support harder ... */ | 41 | /* REVISIT avoid this CDC-ACM support harder ... */ |
43 | struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */ | 42 | struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */ |
43 | |||
44 | /* notification callbacks */ | ||
45 | void (*connect)(struct gserial *p); | ||
46 | void (*disconnect)(struct gserial *p); | ||
47 | int (*send_break)(struct gserial *p, int duration); | ||
44 | }; | 48 | }; |
45 | 49 | ||
50 | /* utilities to allocate/free request and buffer */ | ||
51 | struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags); | ||
52 | void gs_free_req(struct usb_ep *, struct usb_request *req); | ||
53 | |||
46 | /* port setup/teardown is handled by gadget driver */ | 54 | /* port setup/teardown is handled by gadget driver */ |
47 | int gserial_setup(struct usb_gadget *g, unsigned n_ports); | 55 | int gserial_setup(struct usb_gadget *g, unsigned n_ports); |
48 | void gserial_cleanup(void); | 56 | void gserial_cleanup(void); |
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index 5fbdc14e63b3..5416cf969005 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
14 | #include <linux/mbus.h> | 14 | #include <linux/mbus.h> |
15 | #include <asm/plat-orion/ehci-orion.h> | 15 | #include <plat/ehci-orion.h> |
16 | 16 | ||
17 | #define rdl(off) __raw_readl(hcd->regs + (off)) | 17 | #define rdl(off) __raw_readl(hcd->regs + (off)) |
18 | #define wrl(off, val) __raw_writel((val), hcd->regs + (off)) | 18 | #define wrl(off, val) __raw_writel((val), hcd->regs + (off)) |
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index c858f2adb929..8017f1cf78e2 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c | |||
@@ -126,9 +126,8 @@ static void isp1760_writel(const unsigned int val, __u32 __iomem *regs) | |||
126 | * doesn't quite work because some people have to enforce 32-bit access | 126 | * doesn't quite work because some people have to enforce 32-bit access |
127 | */ | 127 | */ |
128 | static void priv_read_copy(struct isp1760_hcd *priv, u32 *src, | 128 | static void priv_read_copy(struct isp1760_hcd *priv, u32 *src, |
129 | __u32 __iomem *dst, u32 offset, u32 len) | 129 | __u32 __iomem *dst, u32 len) |
130 | { | 130 | { |
131 | struct usb_hcd *hcd = priv_to_hcd(priv); | ||
132 | u32 val; | 131 | u32 val; |
133 | u8 *buff8; | 132 | u8 *buff8; |
134 | 133 | ||
@@ -136,11 +135,6 @@ static void priv_read_copy(struct isp1760_hcd *priv, u32 *src, | |||
136 | printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len); | 135 | printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len); |
137 | return; | 136 | return; |
138 | } | 137 | } |
139 | isp1760_writel(offset, hcd->regs + HC_MEMORY_REG); | ||
140 | /* XXX | ||
141 | * 90nsec delay, the spec says something how this could be avoided. | ||
142 | */ | ||
143 | mdelay(1); | ||
144 | 138 | ||
145 | while (len >= 4) { | 139 | while (len >= 4) { |
146 | *src = __raw_readl(dst); | 140 | *src = __raw_readl(dst); |
@@ -987,8 +981,20 @@ static void do_atl_int(struct usb_hcd *usb_hcd) | |||
987 | printk(KERN_ERR "qh is 0\n"); | 981 | printk(KERN_ERR "qh is 0\n"); |
988 | continue; | 982 | continue; |
989 | } | 983 | } |
990 | priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs, | 984 | isp1760_writel(atl_regs + ISP_BANK(0), usb_hcd->regs + |
991 | atl_regs, sizeof(ptd)); | 985 | HC_MEMORY_REG); |
986 | isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + | ||
987 | HC_MEMORY_REG); | ||
988 | /* | ||
989 | * write bank1 address twice to ensure the 90ns delay (time | ||
990 | * between BANK0 write and the priv_read_copy() call is at | ||
991 | * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 109ns) | ||
992 | */ | ||
993 | isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + | ||
994 | HC_MEMORY_REG); | ||
995 | |||
996 | priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs + | ||
997 | ISP_BANK(0), sizeof(ptd)); | ||
992 | 998 | ||
993 | dw1 = le32_to_cpu(ptd.dw1); | 999 | dw1 = le32_to_cpu(ptd.dw1); |
994 | dw2 = le32_to_cpu(ptd.dw2); | 1000 | dw2 = le32_to_cpu(ptd.dw2); |
@@ -1091,7 +1097,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd) | |||
1091 | case IN_PID: | 1097 | case IN_PID: |
1092 | priv_read_copy(priv, | 1098 | priv_read_copy(priv, |
1093 | priv->atl_ints[queue_entry].data_buffer, | 1099 | priv->atl_ints[queue_entry].data_buffer, |
1094 | usb_hcd->regs + payload, payload, | 1100 | usb_hcd->regs + payload + ISP_BANK(1), |
1095 | length); | 1101 | length); |
1096 | 1102 | ||
1097 | case OUT_PID: | 1103 | case OUT_PID: |
@@ -1122,11 +1128,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd) | |||
1122 | } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) { | 1128 | } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) { |
1123 | /* short BULK received */ | 1129 | /* short BULK received */ |
1124 | 1130 | ||
1125 | printk(KERN_ERR "short bulk, %d instead %zu\n", length, | ||
1126 | qtd->length); | ||
1127 | if (urb->transfer_flags & URB_SHORT_NOT_OK) { | 1131 | if (urb->transfer_flags & URB_SHORT_NOT_OK) { |
1128 | urb->status = -EREMOTEIO; | 1132 | urb->status = -EREMOTEIO; |
1129 | printk(KERN_ERR "not okey\n"); | 1133 | isp1760_dbg(priv, "short bulk, %d instead %zu " |
1134 | "with URB_SHORT_NOT_OK flag.\n", | ||
1135 | length, qtd->length); | ||
1130 | } | 1136 | } |
1131 | 1137 | ||
1132 | if (urb->status == -EINPROGRESS) | 1138 | if (urb->status == -EINPROGRESS) |
@@ -1206,8 +1212,20 @@ static void do_intl_int(struct usb_hcd *usb_hcd) | |||
1206 | continue; | 1212 | continue; |
1207 | } | 1213 | } |
1208 | 1214 | ||
1209 | priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs, | 1215 | isp1760_writel(int_regs + ISP_BANK(0), usb_hcd->regs + |
1210 | int_regs, sizeof(ptd)); | 1216 | HC_MEMORY_REG); |
1217 | isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + | ||
1218 | HC_MEMORY_REG); | ||
1219 | /* | ||
1220 | * write bank1 address twice to ensure the 90ns delay (time | ||
1221 | * between BANK0 write and the priv_read_copy() call is at | ||
1222 | * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns) | ||
1223 | */ | ||
1224 | isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs + | ||
1225 | HC_MEMORY_REG); | ||
1226 | |||
1227 | priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs + | ||
1228 | ISP_BANK(0), sizeof(ptd)); | ||
1211 | dw1 = le32_to_cpu(ptd.dw1); | 1229 | dw1 = le32_to_cpu(ptd.dw1); |
1212 | dw3 = le32_to_cpu(ptd.dw3); | 1230 | dw3 = le32_to_cpu(ptd.dw3); |
1213 | check_int_err_status(le32_to_cpu(ptd.dw4)); | 1231 | check_int_err_status(le32_to_cpu(ptd.dw4)); |
@@ -1242,7 +1260,7 @@ static void do_intl_int(struct usb_hcd *usb_hcd) | |||
1242 | case IN_PID: | 1260 | case IN_PID: |
1243 | priv_read_copy(priv, | 1261 | priv_read_copy(priv, |
1244 | priv->int_ints[queue_entry].data_buffer, | 1262 | priv->int_ints[queue_entry].data_buffer, |
1245 | usb_hcd->regs + payload , payload, | 1263 | usb_hcd->regs + payload + ISP_BANK(1), |
1246 | length); | 1264 | length); |
1247 | case OUT_PID: | 1265 | case OUT_PID: |
1248 | 1266 | ||
@@ -1615,8 +1633,7 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | |||
1615 | return -EPIPE; | 1633 | return -EPIPE; |
1616 | } | 1634 | } |
1617 | 1635 | ||
1618 | isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe); | 1636 | return isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe); |
1619 | return 0; | ||
1620 | } | 1637 | } |
1621 | 1638 | ||
1622 | static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, | 1639 | static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, |
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h index 6473dd86993c..4377277667d9 100644 --- a/drivers/usb/host/isp1760-hcd.h +++ b/drivers/usb/host/isp1760-hcd.h | |||
@@ -54,6 +54,8 @@ void deinit_kmem_cache(void); | |||
54 | #define BUFFER_MAP 0x7 | 54 | #define BUFFER_MAP 0x7 |
55 | 55 | ||
56 | #define HC_MEMORY_REG 0x33c | 56 | #define HC_MEMORY_REG 0x33c |
57 | #define ISP_BANK(x) ((x) << 16) | ||
58 | |||
57 | #define HC_PORT1_CTRL 0x374 | 59 | #define HC_PORT1_CTRL 0x374 |
58 | #define PORT1_POWER (3 << 3) | 60 | #define PORT1_POWER (3 << 3) |
59 | #define PORT1_INIT1 (1 << 7) | 61 | #define PORT1_INIT1 (1 << 7) |
@@ -119,6 +121,9 @@ struct inter_packet_info { | |||
119 | typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh, | 121 | typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh, |
120 | struct isp1760_qtd *qtd); | 122 | struct isp1760_qtd *qtd); |
121 | 123 | ||
124 | #define isp1760_dbg(priv, fmt, args...) \ | ||
125 | dev_dbg(priv_to_hcd(priv)->self.controller, fmt, ##args) | ||
126 | |||
122 | #define isp1760_info(priv, fmt, args...) \ | 127 | #define isp1760_info(priv, fmt, args...) \ |
123 | dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args) | 128 | dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args) |
124 | 129 | ||
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 6db7a2889e66..4ed228a89943 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c | |||
@@ -260,7 +260,6 @@ static const struct hc_driver ohci_at91_hc_driver = { | |||
260 | */ | 260 | */ |
261 | .hub_status_data = ohci_hub_status_data, | 261 | .hub_status_data = ohci_hub_status_data, |
262 | .hub_control = ohci_hub_control, | 262 | .hub_control = ohci_hub_control, |
263 | .hub_irq_enable = ohci_rhsc_enable, | ||
264 | #ifdef CONFIG_PM | 263 | #ifdef CONFIG_PM |
265 | .bus_suspend = ohci_bus_suspend, | 264 | .bus_suspend = ohci_bus_suspend, |
266 | .bus_resume = ohci_bus_resume, | 265 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c index c0948008fe3d..2ac4e022a13f 100644 --- a/drivers/usb/host/ohci-au1xxx.c +++ b/drivers/usb/host/ohci-au1xxx.c | |||
@@ -163,7 +163,6 @@ static const struct hc_driver ohci_au1xxx_hc_driver = { | |||
163 | */ | 163 | */ |
164 | .hub_status_data = ohci_hub_status_data, | 164 | .hub_status_data = ohci_hub_status_data, |
165 | .hub_control = ohci_hub_control, | 165 | .hub_control = ohci_hub_control, |
166 | .hub_irq_enable = ohci_rhsc_enable, | ||
167 | #ifdef CONFIG_PM | 166 | #ifdef CONFIG_PM |
168 | .bus_suspend = ohci_bus_suspend, | 167 | .bus_suspend = ohci_bus_suspend, |
169 | .bus_resume = ohci_bus_resume, | 168 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c index cb0b506f8259..fb3055f084b5 100644 --- a/drivers/usb/host/ohci-ep93xx.c +++ b/drivers/usb/host/ohci-ep93xx.c | |||
@@ -134,7 +134,6 @@ static struct hc_driver ohci_ep93xx_hc_driver = { | |||
134 | .get_frame_number = ohci_get_frame, | 134 | .get_frame_number = ohci_get_frame, |
135 | .hub_status_data = ohci_hub_status_data, | 135 | .hub_status_data = ohci_hub_status_data, |
136 | .hub_control = ohci_hub_control, | 136 | .hub_control = ohci_hub_control, |
137 | .hub_irq_enable = ohci_rhsc_enable, | ||
138 | #ifdef CONFIG_PM | 137 | #ifdef CONFIG_PM |
139 | .bus_suspend = ohci_bus_suspend, | 138 | .bus_suspend = ohci_bus_suspend, |
140 | .bus_resume = ohci_bus_resume, | 139 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 26bc47941d01..89901962cbfd 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
@@ -86,6 +86,21 @@ static void ohci_stop (struct usb_hcd *hcd); | |||
86 | static int ohci_restart (struct ohci_hcd *ohci); | 86 | static int ohci_restart (struct ohci_hcd *ohci); |
87 | #endif | 87 | #endif |
88 | 88 | ||
89 | #ifdef CONFIG_PCI | ||
90 | static void quirk_amd_pll(int state); | ||
91 | static void amd_iso_dev_put(void); | ||
92 | #else | ||
93 | static inline void quirk_amd_pll(int state) | ||
94 | { | ||
95 | return; | ||
96 | } | ||
97 | static inline void amd_iso_dev_put(void) | ||
98 | { | ||
99 | return; | ||
100 | } | ||
101 | #endif | ||
102 | |||
103 | |||
89 | #include "ohci-hub.c" | 104 | #include "ohci-hub.c" |
90 | #include "ohci-dbg.c" | 105 | #include "ohci-dbg.c" |
91 | #include "ohci-mem.c" | 106 | #include "ohci-mem.c" |
@@ -483,6 +498,9 @@ static int ohci_init (struct ohci_hcd *ohci) | |||
483 | int ret; | 498 | int ret; |
484 | struct usb_hcd *hcd = ohci_to_hcd(ohci); | 499 | struct usb_hcd *hcd = ohci_to_hcd(ohci); |
485 | 500 | ||
501 | if (distrust_firmware) | ||
502 | ohci->flags |= OHCI_QUIRK_HUB_POWER; | ||
503 | |||
486 | disable (ohci); | 504 | disable (ohci); |
487 | ohci->regs = hcd->regs; | 505 | ohci->regs = hcd->regs; |
488 | 506 | ||
@@ -689,7 +707,8 @@ retry: | |||
689 | temp |= RH_A_NOCP; | 707 | temp |= RH_A_NOCP; |
690 | temp &= ~(RH_A_POTPGT | RH_A_NPS); | 708 | temp &= ~(RH_A_POTPGT | RH_A_NPS); |
691 | ohci_writel (ohci, temp, &ohci->regs->roothub.a); | 709 | ohci_writel (ohci, temp, &ohci->regs->roothub.a); |
692 | } else if ((ohci->flags & OHCI_QUIRK_AMD756) || distrust_firmware) { | 710 | } else if ((ohci->flags & OHCI_QUIRK_AMD756) || |
711 | (ohci->flags & OHCI_QUIRK_HUB_POWER)) { | ||
693 | /* hub power always on; required for AMD-756 and some | 712 | /* hub power always on; required for AMD-756 and some |
694 | * Mac platforms. ganged overcurrent reporting, if any. | 713 | * Mac platforms. ganged overcurrent reporting, if any. |
695 | */ | 714 | */ |
@@ -882,6 +901,8 @@ static void ohci_stop (struct usb_hcd *hcd) | |||
882 | 901 | ||
883 | if (quirk_zfmicro(ohci)) | 902 | if (quirk_zfmicro(ohci)) |
884 | del_timer(&ohci->unlink_watchdog); | 903 | del_timer(&ohci->unlink_watchdog); |
904 | if (quirk_amdiso(ohci)) | ||
905 | amd_iso_dev_put(); | ||
885 | 906 | ||
886 | remove_debug_files (ohci); | 907 | remove_debug_files (ohci); |
887 | ohci_mem_cleanup (ohci); | 908 | ohci_mem_cleanup (ohci); |
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index b56739221d11..7ea9a7b31155 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c | |||
@@ -36,18 +36,6 @@ | |||
36 | 36 | ||
37 | /*-------------------------------------------------------------------------*/ | 37 | /*-------------------------------------------------------------------------*/ |
38 | 38 | ||
39 | /* hcd->hub_irq_enable() */ | ||
40 | static void ohci_rhsc_enable (struct usb_hcd *hcd) | ||
41 | { | ||
42 | struct ohci_hcd *ohci = hcd_to_ohci (hcd); | ||
43 | |||
44 | spin_lock_irq(&ohci->lock); | ||
45 | if (!ohci->autostop) | ||
46 | del_timer(&hcd->rh_timer); /* Prevent next poll */ | ||
47 | ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable); | ||
48 | spin_unlock_irq(&ohci->lock); | ||
49 | } | ||
50 | |||
51 | #define OHCI_SCHED_ENABLES \ | 39 | #define OHCI_SCHED_ENABLES \ |
52 | (OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_PLE|OHCI_CTRL_IE) | 40 | (OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_PLE|OHCI_CTRL_IE) |
53 | 41 | ||
@@ -374,18 +362,28 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed, | |||
374 | int any_connected) | 362 | int any_connected) |
375 | { | 363 | { |
376 | int poll_rh = 1; | 364 | int poll_rh = 1; |
365 | int rhsc; | ||
377 | 366 | ||
367 | rhsc = ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC; | ||
378 | switch (ohci->hc_control & OHCI_CTRL_HCFS) { | 368 | switch (ohci->hc_control & OHCI_CTRL_HCFS) { |
379 | 369 | ||
380 | case OHCI_USB_OPER: | 370 | case OHCI_USB_OPER: |
381 | /* keep on polling until we know a device is connected | 371 | /* If no status changes are pending, enable status-change |
382 | * and RHSC is enabled */ | 372 | * interrupts. |
373 | */ | ||
374 | if (!rhsc && !changed) { | ||
375 | rhsc = OHCI_INTR_RHSC; | ||
376 | ohci_writel(ohci, rhsc, &ohci->regs->intrenable); | ||
377 | } | ||
378 | |||
379 | /* Keep on polling until we know a device is connected | ||
380 | * and RHSC is enabled, or until we autostop. | ||
381 | */ | ||
383 | if (!ohci->autostop) { | 382 | if (!ohci->autostop) { |
384 | if (any_connected || | 383 | if (any_connected || |
385 | !device_may_wakeup(&ohci_to_hcd(ohci) | 384 | !device_may_wakeup(&ohci_to_hcd(ohci) |
386 | ->self.root_hub->dev)) { | 385 | ->self.root_hub->dev)) { |
387 | if (ohci_readl(ohci, &ohci->regs->intrenable) & | 386 | if (rhsc) |
388 | OHCI_INTR_RHSC) | ||
389 | poll_rh = 0; | 387 | poll_rh = 0; |
390 | } else { | 388 | } else { |
391 | ohci->autostop = 1; | 389 | ohci->autostop = 1; |
@@ -398,12 +396,13 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed, | |||
398 | ohci->autostop = 0; | 396 | ohci->autostop = 0; |
399 | ohci->next_statechange = jiffies + | 397 | ohci->next_statechange = jiffies + |
400 | STATECHANGE_DELAY; | 398 | STATECHANGE_DELAY; |
401 | } else if (time_after_eq(jiffies, | 399 | } else if (rhsc && time_after_eq(jiffies, |
402 | ohci->next_statechange) | 400 | ohci->next_statechange) |
403 | && !ohci->ed_rm_list | 401 | && !ohci->ed_rm_list |
404 | && !(ohci->hc_control & | 402 | && !(ohci->hc_control & |
405 | OHCI_SCHED_ENABLES)) { | 403 | OHCI_SCHED_ENABLES)) { |
406 | ohci_rh_suspend(ohci, 1); | 404 | ohci_rh_suspend(ohci, 1); |
405 | poll_rh = 0; | ||
407 | } | 406 | } |
408 | } | 407 | } |
409 | break; | 408 | break; |
@@ -417,6 +416,12 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed, | |||
417 | else | 416 | else |
418 | usb_hcd_resume_root_hub(ohci_to_hcd(ohci)); | 417 | usb_hcd_resume_root_hub(ohci_to_hcd(ohci)); |
419 | } else { | 418 | } else { |
419 | if (!rhsc && (ohci->autostop || | ||
420 | ohci_to_hcd(ohci)->self.root_hub-> | ||
421 | do_remote_wakeup)) | ||
422 | ohci_writel(ohci, OHCI_INTR_RHSC, | ||
423 | &ohci->regs->intrenable); | ||
424 | |||
420 | /* everything is idle, no need for polling */ | 425 | /* everything is idle, no need for polling */ |
421 | poll_rh = 0; | 426 | poll_rh = 0; |
422 | } | 427 | } |
@@ -438,12 +443,16 @@ static inline int ohci_rh_resume(struct ohci_hcd *ohci) | |||
438 | static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed, | 443 | static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed, |
439 | int any_connected) | 444 | int any_connected) |
440 | { | 445 | { |
441 | int poll_rh = 1; | 446 | /* If RHSC is enabled, don't poll */ |
442 | |||
443 | /* keep on polling until RHSC is enabled */ | ||
444 | if (ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC) | 447 | if (ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC) |
445 | poll_rh = 0; | 448 | return 0; |
446 | return poll_rh; | 449 | |
450 | /* If no status changes are pending, enable status-change interrupts */ | ||
451 | if (!changed) { | ||
452 | ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable); | ||
453 | return 0; | ||
454 | } | ||
455 | return 1; | ||
447 | } | 456 | } |
448 | 457 | ||
449 | #endif /* CONFIG_PM */ | 458 | #endif /* CONFIG_PM */ |
@@ -483,6 +492,13 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf) | |||
483 | length++; | 492 | length++; |
484 | } | 493 | } |
485 | 494 | ||
495 | /* Some broken controllers never turn off RHCS in the interrupt | ||
496 | * status register. For their sake we won't re-enable RHSC | ||
497 | * interrupts if the flag is already set. | ||
498 | */ | ||
499 | if (ohci_readl(ohci, &ohci->regs->intrstatus) & OHCI_INTR_RHSC) | ||
500 | changed = 1; | ||
501 | |||
486 | /* look at each port */ | 502 | /* look at each port */ |
487 | for (i = 0; i < ohci->num_ports; i++) { | 503 | for (i = 0; i < ohci->num_ports; i++) { |
488 | u32 status = roothub_portstatus (ohci, i); | 504 | u32 status = roothub_portstatus (ohci, i); |
@@ -572,8 +588,6 @@ static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port) | |||
572 | return 0; | 588 | return 0; |
573 | } | 589 | } |
574 | 590 | ||
575 | static void start_hnp(struct ohci_hcd *ohci); | ||
576 | |||
577 | #else | 591 | #else |
578 | 592 | ||
579 | #define ohci_start_port_reset NULL | 593 | #define ohci_start_port_reset NULL |
@@ -760,7 +774,7 @@ static int ohci_hub_control ( | |||
760 | #ifdef CONFIG_USB_OTG | 774 | #ifdef CONFIG_USB_OTG |
761 | if (hcd->self.otg_port == (wIndex + 1) | 775 | if (hcd->self.otg_port == (wIndex + 1) |
762 | && hcd->self.b_hnp_enable) | 776 | && hcd->self.b_hnp_enable) |
763 | start_hnp(ohci); | 777 | ohci->start_hnp(ohci); |
764 | else | 778 | else |
765 | #endif | 779 | #endif |
766 | ohci_writel (ohci, RH_PS_PSS, | 780 | ohci_writel (ohci, RH_PS_PSS, |
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c index 9e31d440d115..de42283149c7 100644 --- a/drivers/usb/host/ohci-lh7a404.c +++ b/drivers/usb/host/ohci-lh7a404.c | |||
@@ -193,7 +193,6 @@ static const struct hc_driver ohci_lh7a404_hc_driver = { | |||
193 | */ | 193 | */ |
194 | .hub_status_data = ohci_hub_status_data, | 194 | .hub_status_data = ohci_hub_status_data, |
195 | .hub_control = ohci_hub_control, | 195 | .hub_control = ohci_hub_control, |
196 | .hub_irq_enable = ohci_rhsc_enable, | ||
197 | #ifdef CONFIG_PM | 196 | #ifdef CONFIG_PM |
198 | .bus_suspend = ohci_bus_suspend, | 197 | .bus_suspend = ohci_bus_suspend, |
199 | .bus_resume = ohci_bus_resume, | 198 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index 94dfca02f7e1..1eb64d08b60a 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c | |||
@@ -225,6 +225,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) | |||
225 | dev_err(hcd->self.controller, "can't find transceiver\n"); | 225 | dev_err(hcd->self.controller, "can't find transceiver\n"); |
226 | return -ENODEV; | 226 | return -ENODEV; |
227 | } | 227 | } |
228 | ohci->start_hnp = start_hnp; | ||
228 | } | 229 | } |
229 | #endif | 230 | #endif |
230 | 231 | ||
@@ -260,7 +261,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) | |||
260 | omap_cfg_reg(W4_USB_HIGHZ); | 261 | omap_cfg_reg(W4_USB_HIGHZ); |
261 | } | 262 | } |
262 | ohci_writel(ohci, rh, &ohci->regs->roothub.a); | 263 | ohci_writel(ohci, rh, &ohci->regs->roothub.a); |
263 | distrust_firmware = 0; | 264 | ohci->flags &= ~OHCI_QUIRK_HUB_POWER; |
264 | } else if (machine_is_nokia770()) { | 265 | } else if (machine_is_nokia770()) { |
265 | /* We require a self-powered hub, which should have | 266 | /* We require a self-powered hub, which should have |
266 | * plenty of power. */ | 267 | * plenty of power. */ |
@@ -469,7 +470,6 @@ static const struct hc_driver ohci_omap_hc_driver = { | |||
469 | */ | 470 | */ |
470 | .hub_status_data = ohci_hub_status_data, | 471 | .hub_status_data = ohci_hub_status_data, |
471 | .hub_control = ohci_hub_control, | 472 | .hub_control = ohci_hub_control, |
472 | .hub_irq_enable = ohci_rhsc_enable, | ||
473 | #ifdef CONFIG_PM | 473 | #ifdef CONFIG_PM |
474 | .bus_suspend = ohci_bus_suspend, | 474 | .bus_suspend = ohci_bus_suspend, |
475 | .bus_resume = ohci_bus_resume, | 475 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 4696cc912e16..a9c2ae36c7ad 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c | |||
@@ -18,6 +18,28 @@ | |||
18 | #error "This file is PCI bus glue. CONFIG_PCI must be defined." | 18 | #error "This file is PCI bus glue. CONFIG_PCI must be defined." |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/io.h> | ||
23 | |||
24 | |||
25 | /* constants used to work around PM-related transfer | ||
26 | * glitches in some AMD 700 series southbridges | ||
27 | */ | ||
28 | #define AB_REG_BAR 0xf0 | ||
29 | #define AB_INDX(addr) ((addr) + 0x00) | ||
30 | #define AB_DATA(addr) ((addr) + 0x04) | ||
31 | #define AX_INDXC 0X30 | ||
32 | #define AX_DATAC 0x34 | ||
33 | |||
34 | #define NB_PCIE_INDX_ADDR 0xe0 | ||
35 | #define NB_PCIE_INDX_DATA 0xe4 | ||
36 | #define PCIE_P_CNTL 0x10040 | ||
37 | #define BIF_NB 0x10002 | ||
38 | |||
39 | static struct pci_dev *amd_smbus_dev; | ||
40 | static struct pci_dev *amd_hb_dev; | ||
41 | static int amd_ohci_iso_count; | ||
42 | |||
21 | /*-------------------------------------------------------------------------*/ | 43 | /*-------------------------------------------------------------------------*/ |
22 | 44 | ||
23 | static int broken_suspend(struct usb_hcd *hcd) | 45 | static int broken_suspend(struct usb_hcd *hcd) |
@@ -143,6 +165,103 @@ static int ohci_quirk_nec(struct usb_hcd *hcd) | |||
143 | return 0; | 165 | return 0; |
144 | } | 166 | } |
145 | 167 | ||
168 | static int ohci_quirk_amd700(struct usb_hcd *hcd) | ||
169 | { | ||
170 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); | ||
171 | u8 rev = 0; | ||
172 | |||
173 | if (!amd_smbus_dev) | ||
174 | amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, | ||
175 | PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); | ||
176 | if (!amd_smbus_dev) | ||
177 | return 0; | ||
178 | |||
179 | pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev); | ||
180 | if ((rev > 0x3b) || (rev < 0x30)) { | ||
181 | pci_dev_put(amd_smbus_dev); | ||
182 | amd_smbus_dev = NULL; | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | amd_ohci_iso_count++; | ||
187 | |||
188 | if (!amd_hb_dev) | ||
189 | amd_hb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9600, NULL); | ||
190 | |||
191 | ohci->flags |= OHCI_QUIRK_AMD_ISO; | ||
192 | ohci_dbg(ohci, "enabled AMD ISO transfers quirk\n"); | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | /* | ||
198 | * The hardware normally enables the A-link power management feature, which | ||
199 | * lets the system lower the power consumption in idle states. | ||
200 | * | ||
201 | * Assume the system is configured to have USB 1.1 ISO transfers going | ||
202 | * to or from a USB device. Without this quirk, that stream may stutter | ||
203 | * or have breaks occasionally. For transfers going to speakers, this | ||
204 | * makes a very audible mess... | ||
205 | * | ||
206 | * That audio playback corruption is due to the audio stream getting | ||
207 | * interrupted occasionally when the link goes in lower power state | ||
208 | * This USB quirk prevents the link going into that lower power state | ||
209 | * during audio playback or other ISO operations. | ||
210 | */ | ||
211 | static void quirk_amd_pll(int on) | ||
212 | { | ||
213 | u32 addr; | ||
214 | u32 val; | ||
215 | u32 bit = (on > 0) ? 1 : 0; | ||
216 | |||
217 | pci_read_config_dword(amd_smbus_dev, AB_REG_BAR, &addr); | ||
218 | |||
219 | /* BIT names/meanings are NDA-protected, sorry ... */ | ||
220 | |||
221 | outl(AX_INDXC, AB_INDX(addr)); | ||
222 | outl(0x40, AB_DATA(addr)); | ||
223 | outl(AX_DATAC, AB_INDX(addr)); | ||
224 | val = inl(AB_DATA(addr)); | ||
225 | val &= ~((1 << 3) | (1 << 4) | (1 << 9)); | ||
226 | val |= (bit << 3) | ((!bit) << 4) | ((!bit) << 9); | ||
227 | outl(val, AB_DATA(addr)); | ||
228 | |||
229 | if (amd_hb_dev) { | ||
230 | addr = PCIE_P_CNTL; | ||
231 | pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr); | ||
232 | |||
233 | pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val); | ||
234 | val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12)); | ||
235 | val |= bit | (bit << 3) | (bit << 12); | ||
236 | val |= ((!bit) << 4) | ((!bit) << 9); | ||
237 | pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val); | ||
238 | |||
239 | addr = BIF_NB; | ||
240 | pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr); | ||
241 | |||
242 | pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val); | ||
243 | val &= ~(1 << 8); | ||
244 | val |= bit << 8; | ||
245 | pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val); | ||
246 | } | ||
247 | } | ||
248 | |||
249 | static void amd_iso_dev_put(void) | ||
250 | { | ||
251 | amd_ohci_iso_count--; | ||
252 | if (amd_ohci_iso_count == 0) { | ||
253 | if (amd_smbus_dev) { | ||
254 | pci_dev_put(amd_smbus_dev); | ||
255 | amd_smbus_dev = NULL; | ||
256 | } | ||
257 | if (amd_hb_dev) { | ||
258 | pci_dev_put(amd_hb_dev); | ||
259 | amd_hb_dev = NULL; | ||
260 | } | ||
261 | } | ||
262 | |||
263 | } | ||
264 | |||
146 | /* List of quirks for OHCI */ | 265 | /* List of quirks for OHCI */ |
147 | static const struct pci_device_id ohci_pci_quirks[] = { | 266 | static const struct pci_device_id ohci_pci_quirks[] = { |
148 | { | 267 | { |
@@ -181,6 +300,19 @@ static const struct pci_device_id ohci_pci_quirks[] = { | |||
181 | PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152), | 300 | PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152), |
182 | .driver_data = (unsigned long) broken_suspend, | 301 | .driver_data = (unsigned long) broken_suspend, |
183 | }, | 302 | }, |
303 | { | ||
304 | PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4397), | ||
305 | .driver_data = (unsigned long)ohci_quirk_amd700, | ||
306 | }, | ||
307 | { | ||
308 | PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4398), | ||
309 | .driver_data = (unsigned long)ohci_quirk_amd700, | ||
310 | }, | ||
311 | { | ||
312 | PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), | ||
313 | .driver_data = (unsigned long)ohci_quirk_amd700, | ||
314 | }, | ||
315 | |||
184 | /* FIXME for some of the early AMD 760 southbridges, OHCI | 316 | /* FIXME for some of the early AMD 760 southbridges, OHCI |
185 | * won't work at all. blacklist them. | 317 | * won't work at all. blacklist them. |
186 | */ | 318 | */ |
@@ -327,7 +459,6 @@ static const struct hc_driver ohci_pci_hc_driver = { | |||
327 | */ | 459 | */ |
328 | .hub_status_data = ohci_hub_status_data, | 460 | .hub_status_data = ohci_hub_status_data, |
329 | .hub_control = ohci_hub_control, | 461 | .hub_control = ohci_hub_control, |
330 | .hub_irq_enable = ohci_rhsc_enable, | ||
331 | #ifdef CONFIG_PM | 462 | #ifdef CONFIG_PM |
332 | .bus_suspend = ohci_bus_suspend, | 463 | .bus_suspend = ohci_bus_suspend, |
333 | .bus_resume = ohci_bus_resume, | 464 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c index b02cd0761977..658a2a978c32 100644 --- a/drivers/usb/host/ohci-pnx4008.c +++ b/drivers/usb/host/ohci-pnx4008.c | |||
@@ -277,7 +277,6 @@ static const struct hc_driver ohci_pnx4008_hc_driver = { | |||
277 | */ | 277 | */ |
278 | .hub_status_data = ohci_hub_status_data, | 278 | .hub_status_data = ohci_hub_status_data, |
279 | .hub_control = ohci_hub_control, | 279 | .hub_control = ohci_hub_control, |
280 | .hub_irq_enable = ohci_rhsc_enable, | ||
281 | #ifdef CONFIG_PM | 280 | #ifdef CONFIG_PM |
282 | .bus_suspend = ohci_bus_suspend, | 281 | .bus_suspend = ohci_bus_suspend, |
283 | .bus_resume = ohci_bus_resume, | 282 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-pnx8550.c b/drivers/usb/host/ohci-pnx8550.c index 605d59cba28e..28467e288a93 100644 --- a/drivers/usb/host/ohci-pnx8550.c +++ b/drivers/usb/host/ohci-pnx8550.c | |||
@@ -201,7 +201,6 @@ static const struct hc_driver ohci_pnx8550_hc_driver = { | |||
201 | */ | 201 | */ |
202 | .hub_status_data = ohci_hub_status_data, | 202 | .hub_status_data = ohci_hub_status_data, |
203 | .hub_control = ohci_hub_control, | 203 | .hub_control = ohci_hub_control, |
204 | .hub_irq_enable = ohci_rhsc_enable, | ||
205 | #ifdef CONFIG_PM | 204 | #ifdef CONFIG_PM |
206 | .bus_suspend = ohci_bus_suspend, | 205 | .bus_suspend = ohci_bus_suspend, |
207 | .bus_resume = ohci_bus_resume, | 206 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c index 91e6e101a4cc..7ac53264ead3 100644 --- a/drivers/usb/host/ohci-ppc-of.c +++ b/drivers/usb/host/ohci-ppc-of.c | |||
@@ -72,7 +72,6 @@ static const struct hc_driver ohci_ppc_of_hc_driver = { | |||
72 | */ | 72 | */ |
73 | .hub_status_data = ohci_hub_status_data, | 73 | .hub_status_data = ohci_hub_status_data, |
74 | .hub_control = ohci_hub_control, | 74 | .hub_control = ohci_hub_control, |
75 | .hub_irq_enable = ohci_rhsc_enable, | ||
76 | #ifdef CONFIG_PM | 75 | #ifdef CONFIG_PM |
77 | .bus_suspend = ohci_bus_suspend, | 76 | .bus_suspend = ohci_bus_suspend, |
78 | .bus_resume = ohci_bus_resume, | 77 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c index 523c30125577..cd3398b675b2 100644 --- a/drivers/usb/host/ohci-ppc-soc.c +++ b/drivers/usb/host/ohci-ppc-soc.c | |||
@@ -172,7 +172,6 @@ static const struct hc_driver ohci_ppc_soc_hc_driver = { | |||
172 | */ | 172 | */ |
173 | .hub_status_data = ohci_hub_status_data, | 173 | .hub_status_data = ohci_hub_status_data, |
174 | .hub_control = ohci_hub_control, | 174 | .hub_control = ohci_hub_control, |
175 | .hub_irq_enable = ohci_rhsc_enable, | ||
176 | #ifdef CONFIG_PM | 175 | #ifdef CONFIG_PM |
177 | .bus_suspend = ohci_bus_suspend, | 176 | .bus_suspend = ohci_bus_suspend, |
178 | .bus_resume = ohci_bus_resume, | 177 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c index 55c95647f008..2089d8a46c4b 100644 --- a/drivers/usb/host/ohci-ps3.c +++ b/drivers/usb/host/ohci-ps3.c | |||
@@ -68,7 +68,6 @@ static const struct hc_driver ps3_ohci_hc_driver = { | |||
68 | .get_frame_number = ohci_get_frame, | 68 | .get_frame_number = ohci_get_frame, |
69 | .hub_status_data = ohci_hub_status_data, | 69 | .hub_status_data = ohci_hub_status_data, |
70 | .hub_control = ohci_hub_control, | 70 | .hub_control = ohci_hub_control, |
71 | .hub_irq_enable = ohci_rhsc_enable, | ||
72 | .start_port_reset = ohci_start_port_reset, | 71 | .start_port_reset = ohci_start_port_reset, |
73 | #if defined(CONFIG_PM) | 72 | #if defined(CONFIG_PM) |
74 | .bus_suspend = ohci_bus_suspend, | 73 | .bus_suspend = ohci_bus_suspend, |
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index 8c9c4849db6e..7f0f35c78185 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c | |||
@@ -298,7 +298,6 @@ static const struct hc_driver ohci_pxa27x_hc_driver = { | |||
298 | */ | 298 | */ |
299 | .hub_status_data = ohci_hub_status_data, | 299 | .hub_status_data = ohci_hub_status_data, |
300 | .hub_control = ohci_hub_control, | 300 | .hub_control = ohci_hub_control, |
301 | .hub_irq_enable = ohci_rhsc_enable, | ||
302 | #ifdef CONFIG_PM | 301 | #ifdef CONFIG_PM |
303 | .bus_suspend = ohci_bus_suspend, | 302 | .bus_suspend = ohci_bus_suspend, |
304 | .bus_resume = ohci_bus_resume, | 303 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index 6a9b4c557953..c2d80f80448b 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c | |||
@@ -49,6 +49,9 @@ __acquires(ohci->lock) | |||
49 | switch (usb_pipetype (urb->pipe)) { | 49 | switch (usb_pipetype (urb->pipe)) { |
50 | case PIPE_ISOCHRONOUS: | 50 | case PIPE_ISOCHRONOUS: |
51 | ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; | 51 | ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; |
52 | if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 | ||
53 | && quirk_amdiso(ohci)) | ||
54 | quirk_amd_pll(1); | ||
52 | break; | 55 | break; |
53 | case PIPE_INTERRUPT: | 56 | case PIPE_INTERRUPT: |
54 | ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; | 57 | ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; |
@@ -677,6 +680,9 @@ static void td_submit_urb ( | |||
677 | data + urb->iso_frame_desc [cnt].offset, | 680 | data + urb->iso_frame_desc [cnt].offset, |
678 | urb->iso_frame_desc [cnt].length, urb, cnt); | 681 | urb->iso_frame_desc [cnt].length, urb, cnt); |
679 | } | 682 | } |
683 | if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 | ||
684 | && quirk_amdiso(ohci)) | ||
685 | quirk_amd_pll(0); | ||
680 | periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 | 686 | periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 |
681 | && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; | 687 | && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; |
682 | break; | 688 | break; |
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c index 9e3dc4069e8b..f46af7a718d4 100644 --- a/drivers/usb/host/ohci-s3c2410.c +++ b/drivers/usb/host/ohci-s3c2410.c | |||
@@ -466,7 +466,6 @@ static const struct hc_driver ohci_s3c2410_hc_driver = { | |||
466 | */ | 466 | */ |
467 | .hub_status_data = ohci_s3c2410_hub_status_data, | 467 | .hub_status_data = ohci_s3c2410_hub_status_data, |
468 | .hub_control = ohci_s3c2410_hub_control, | 468 | .hub_control = ohci_s3c2410_hub_control, |
469 | .hub_irq_enable = ohci_rhsc_enable, | ||
470 | #ifdef CONFIG_PM | 469 | #ifdef CONFIG_PM |
471 | .bus_suspend = ohci_bus_suspend, | 470 | .bus_suspend = ohci_bus_suspend, |
472 | .bus_resume = ohci_bus_resume, | 471 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c index 4626b002e670..e4bbe8e188e4 100644 --- a/drivers/usb/host/ohci-sa1111.c +++ b/drivers/usb/host/ohci-sa1111.c | |||
@@ -231,7 +231,6 @@ static const struct hc_driver ohci_sa1111_hc_driver = { | |||
231 | */ | 231 | */ |
232 | .hub_status_data = ohci_hub_status_data, | 232 | .hub_status_data = ohci_hub_status_data, |
233 | .hub_control = ohci_hub_control, | 233 | .hub_control = ohci_hub_control, |
234 | .hub_irq_enable = ohci_rhsc_enable, | ||
235 | #ifdef CONFIG_PM | 234 | #ifdef CONFIG_PM |
236 | .bus_suspend = ohci_bus_suspend, | 235 | .bus_suspend = ohci_bus_suspend, |
237 | .bus_resume = ohci_bus_resume, | 236 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-sh.c b/drivers/usb/host/ohci-sh.c index e7ee607278fe..60f03cc7ec4f 100644 --- a/drivers/usb/host/ohci-sh.c +++ b/drivers/usb/host/ohci-sh.c | |||
@@ -68,7 +68,6 @@ static const struct hc_driver ohci_sh_hc_driver = { | |||
68 | */ | 68 | */ |
69 | .hub_status_data = ohci_hub_status_data, | 69 | .hub_status_data = ohci_hub_status_data, |
70 | .hub_control = ohci_hub_control, | 70 | .hub_control = ohci_hub_control, |
71 | .hub_irq_enable = ohci_rhsc_enable, | ||
72 | #ifdef CONFIG_PM | 71 | #ifdef CONFIG_PM |
73 | .bus_suspend = ohci_bus_suspend, | 72 | .bus_suspend = ohci_bus_suspend, |
74 | .bus_resume = ohci_bus_resume, | 73 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c index 21b164e4abeb..cff23637cfcc 100644 --- a/drivers/usb/host/ohci-sm501.c +++ b/drivers/usb/host/ohci-sm501.c | |||
@@ -75,7 +75,6 @@ static const struct hc_driver ohci_sm501_hc_driver = { | |||
75 | */ | 75 | */ |
76 | .hub_status_data = ohci_hub_status_data, | 76 | .hub_status_data = ohci_hub_status_data, |
77 | .hub_control = ohci_hub_control, | 77 | .hub_control = ohci_hub_control, |
78 | .hub_irq_enable = ohci_rhsc_enable, | ||
79 | #ifdef CONFIG_PM | 78 | #ifdef CONFIG_PM |
80 | .bus_suspend = ohci_bus_suspend, | 79 | .bus_suspend = ohci_bus_suspend, |
81 | .bus_resume = ohci_bus_resume, | 80 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci-ssb.c b/drivers/usb/host/ohci-ssb.c index 3660c83d80af..23fd6a886bdd 100644 --- a/drivers/usb/host/ohci-ssb.c +++ b/drivers/usb/host/ohci-ssb.c | |||
@@ -81,7 +81,6 @@ static const struct hc_driver ssb_ohci_hc_driver = { | |||
81 | 81 | ||
82 | .hub_status_data = ohci_hub_status_data, | 82 | .hub_status_data = ohci_hub_status_data, |
83 | .hub_control = ohci_hub_control, | 83 | .hub_control = ohci_hub_control, |
84 | .hub_irq_enable = ohci_rhsc_enable, | ||
85 | #ifdef CONFIG_PM | 84 | #ifdef CONFIG_PM |
86 | .bus_suspend = ohci_bus_suspend, | 85 | .bus_suspend = ohci_bus_suspend, |
87 | .bus_resume = ohci_bus_resume, | 86 | .bus_resume = ohci_bus_resume, |
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h index dc544ddc7849..faf622eafce7 100644 --- a/drivers/usb/host/ohci.h +++ b/drivers/usb/host/ohci.h | |||
@@ -371,6 +371,7 @@ struct ohci_hcd { | |||
371 | * other external transceivers should be software-transparent | 371 | * other external transceivers should be software-transparent |
372 | */ | 372 | */ |
373 | struct otg_transceiver *transceiver; | 373 | struct otg_transceiver *transceiver; |
374 | void (*start_hnp)(struct ohci_hcd *ohci); | ||
374 | 375 | ||
375 | /* | 376 | /* |
376 | * memory management for queue data structures | 377 | * memory management for queue data structures |
@@ -399,6 +400,8 @@ struct ohci_hcd { | |||
399 | #define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/ | 400 | #define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/ |
400 | #define OHCI_QUIRK_NEC 0x40 /* lost interrupts */ | 401 | #define OHCI_QUIRK_NEC 0x40 /* lost interrupts */ |
401 | #define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */ | 402 | #define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */ |
403 | #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ | ||
404 | #define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/ | ||
402 | // there are also chip quirks/bugs in init logic | 405 | // there are also chip quirks/bugs in init logic |
403 | 406 | ||
404 | struct work_struct nec_work; /* Worker for NEC quirk */ | 407 | struct work_struct nec_work; /* Worker for NEC quirk */ |
@@ -426,6 +429,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci) | |||
426 | { | 429 | { |
427 | return ohci->flags & OHCI_QUIRK_ZFMICRO; | 430 | return ohci->flags & OHCI_QUIRK_ZFMICRO; |
428 | } | 431 | } |
432 | static inline int quirk_amdiso(struct ohci_hcd *ohci) | ||
433 | { | ||
434 | return ohci->flags & OHCI_QUIRK_AMD_ISO; | ||
435 | } | ||
429 | #else | 436 | #else |
430 | static inline int quirk_nec(struct ohci_hcd *ohci) | 437 | static inline int quirk_nec(struct ohci_hcd *ohci) |
431 | { | 438 | { |
@@ -435,6 +442,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci) | |||
435 | { | 442 | { |
436 | return 0; | 443 | return 0; |
437 | } | 444 | } |
445 | static inline int quirk_amdiso(struct ohci_hcd *ohci) | ||
446 | { | ||
447 | return 0; | ||
448 | } | ||
438 | #endif | 449 | #endif |
439 | 450 | ||
440 | /* convert between an hcd pointer and the corresponding ohci_hcd */ | 451 | /* convert between an hcd pointer and the corresponding ohci_hcd */ |
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index d5f02dddb120..ea7126f99cab 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c | |||
@@ -964,11 +964,34 @@ static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum) | |||
964 | disable_irq_nrdy(r8a66597, pipenum); | 964 | disable_irq_nrdy(r8a66597, pipenum); |
965 | } | 965 | } |
966 | 966 | ||
967 | static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597) | ||
968 | { | ||
969 | mod_timer(&r8a66597->rh_timer, | ||
970 | jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME)); | ||
971 | } | ||
972 | |||
973 | static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port, | ||
974 | int connect) | ||
975 | { | ||
976 | struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; | ||
977 | |||
978 | rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST; | ||
979 | rh->scount = R8A66597_MAX_SAMPLING; | ||
980 | if (connect) | ||
981 | rh->port |= 1 << USB_PORT_FEAT_CONNECTION; | ||
982 | else | ||
983 | rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION); | ||
984 | rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION; | ||
985 | |||
986 | r8a66597_root_hub_start_polling(r8a66597); | ||
987 | } | ||
988 | |||
967 | /* this function must be called with interrupt disabled */ | 989 | /* this function must be called with interrupt disabled */ |
968 | static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, | 990 | static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, |
969 | u16 syssts) | 991 | u16 syssts) |
970 | { | 992 | { |
971 | if (syssts == SE0) { | 993 | if (syssts == SE0) { |
994 | r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); | ||
972 | r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); | 995 | r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); |
973 | return; | 996 | return; |
974 | } | 997 | } |
@@ -1002,13 +1025,10 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port) | |||
1002 | { | 1025 | { |
1003 | struct r8a66597_device *dev = r8a66597->root_hub[port].dev; | 1026 | struct r8a66597_device *dev = r8a66597->root_hub[port].dev; |
1004 | 1027 | ||
1005 | r8a66597->root_hub[port].port &= ~(1 << USB_PORT_FEAT_CONNECTION); | ||
1006 | r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_C_CONNECTION); | ||
1007 | |||
1008 | disable_r8a66597_pipe_all(r8a66597, dev); | 1028 | disable_r8a66597_pipe_all(r8a66597, dev); |
1009 | free_usb_address(r8a66597, dev); | 1029 | free_usb_address(r8a66597, dev); |
1010 | 1030 | ||
1011 | r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); | 1031 | start_root_hub_sampling(r8a66597, port, 0); |
1012 | } | 1032 | } |
1013 | 1033 | ||
1014 | /* this function must be called with interrupt disabled */ | 1034 | /* this function must be called with interrupt disabled */ |
@@ -1551,23 +1571,6 @@ static void irq_pipe_nrdy(struct r8a66597 *r8a66597) | |||
1551 | } | 1571 | } |
1552 | } | 1572 | } |
1553 | 1573 | ||
1554 | static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597) | ||
1555 | { | ||
1556 | mod_timer(&r8a66597->rh_timer, | ||
1557 | jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME)); | ||
1558 | } | ||
1559 | |||
1560 | static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port) | ||
1561 | { | ||
1562 | struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; | ||
1563 | |||
1564 | rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST; | ||
1565 | rh->scount = R8A66597_MAX_SAMPLING; | ||
1566 | r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION) | ||
1567 | | (1 << USB_PORT_FEAT_C_CONNECTION); | ||
1568 | r8a66597_root_hub_start_polling(r8a66597); | ||
1569 | } | ||
1570 | |||
1571 | static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) | 1574 | static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) |
1572 | { | 1575 | { |
1573 | struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); | 1576 | struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); |
@@ -1594,7 +1597,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) | |||
1594 | r8a66597_bclr(r8a66597, ATTCHE, INTENB2); | 1597 | r8a66597_bclr(r8a66597, ATTCHE, INTENB2); |
1595 | 1598 | ||
1596 | /* start usb bus sampling */ | 1599 | /* start usb bus sampling */ |
1597 | start_root_hub_sampling(r8a66597, 1); | 1600 | start_root_hub_sampling(r8a66597, 1, 1); |
1598 | } | 1601 | } |
1599 | if (mask2 & DTCH) { | 1602 | if (mask2 & DTCH) { |
1600 | r8a66597_write(r8a66597, ~DTCH, INTSTS2); | 1603 | r8a66597_write(r8a66597, ~DTCH, INTSTS2); |
@@ -1609,7 +1612,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) | |||
1609 | r8a66597_bclr(r8a66597, ATTCHE, INTENB1); | 1612 | r8a66597_bclr(r8a66597, ATTCHE, INTENB1); |
1610 | 1613 | ||
1611 | /* start usb bus sampling */ | 1614 | /* start usb bus sampling */ |
1612 | start_root_hub_sampling(r8a66597, 0); | 1615 | start_root_hub_sampling(r8a66597, 0, 1); |
1613 | } | 1616 | } |
1614 | if (mask1 & DTCH) { | 1617 | if (mask1 & DTCH) { |
1615 | r8a66597_write(r8a66597, ~DTCH, INTSTS1); | 1618 | r8a66597_write(r8a66597, ~DTCH, INTSTS1); |
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index 20ad3c48fcb2..228f2b070f2b 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c | |||
@@ -2934,16 +2934,6 @@ static int u132_start_port_reset(struct usb_hcd *hcd, unsigned port_num) | |||
2934 | return 0; | 2934 | return 0; |
2935 | } | 2935 | } |
2936 | 2936 | ||
2937 | static void u132_hub_irq_enable(struct usb_hcd *hcd) | ||
2938 | { | ||
2939 | struct u132 *u132 = hcd_to_u132(hcd); | ||
2940 | if (u132->going > 1) { | ||
2941 | dev_err(&u132->platform_dev->dev, "device has been removed %d\n" | ||
2942 | , u132->going); | ||
2943 | } else if (u132->going > 0) | ||
2944 | dev_err(&u132->platform_dev->dev, "device is being removed\n"); | ||
2945 | } | ||
2946 | |||
2947 | 2937 | ||
2948 | #ifdef CONFIG_PM | 2938 | #ifdef CONFIG_PM |
2949 | static int u132_bus_suspend(struct usb_hcd *hcd) | 2939 | static int u132_bus_suspend(struct usb_hcd *hcd) |
@@ -2995,7 +2985,6 @@ static struct hc_driver u132_hc_driver = { | |||
2995 | .bus_suspend = u132_bus_suspend, | 2985 | .bus_suspend = u132_bus_suspend, |
2996 | .bus_resume = u132_bus_resume, | 2986 | .bus_resume = u132_bus_resume, |
2997 | .start_port_reset = u132_start_port_reset, | 2987 | .start_port_reset = u132_start_port_reset, |
2998 | .hub_irq_enable = u132_hub_irq_enable, | ||
2999 | }; | 2988 | }; |
3000 | 2989 | ||
3001 | /* | 2990 | /* |
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 001789c9a11a..4ea50e0abcbb 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig | |||
@@ -42,16 +42,6 @@ config USB_ADUTUX | |||
42 | To compile this driver as a module, choose M here. The module | 42 | To compile this driver as a module, choose M here. The module |
43 | will be called adutux. | 43 | will be called adutux. |
44 | 44 | ||
45 | config USB_AUERSWALD | ||
46 | tristate "USB Auerswald ISDN support" | ||
47 | depends on USB | ||
48 | help | ||
49 | Say Y here if you want to connect an Auerswald USB ISDN Device | ||
50 | to your computer's USB port. | ||
51 | |||
52 | To compile this driver as a module, choose M here: the | ||
53 | module will be called auerswald. | ||
54 | |||
55 | config USB_RIO500 | 45 | config USB_RIO500 |
56 | tristate "USB Diamond Rio500 support" | 46 | tristate "USB Diamond Rio500 support" |
57 | depends on USB | 47 | depends on USB |
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile index aba091cb5ec0..45b4e12afb08 100644 --- a/drivers/usb/misc/Makefile +++ b/drivers/usb/misc/Makefile | |||
@@ -5,7 +5,6 @@ | |||
5 | 5 | ||
6 | obj-$(CONFIG_USB_ADUTUX) += adutux.o | 6 | obj-$(CONFIG_USB_ADUTUX) += adutux.o |
7 | obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o | 7 | obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o |
8 | obj-$(CONFIG_USB_AUERSWALD) += auerswald.o | ||
9 | obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o | 8 | obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o |
10 | obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o | 9 | obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o |
11 | obj-$(CONFIG_USB_CYTHERM) += cytherm.o | 10 | obj-$(CONFIG_USB_CYTHERM) += cytherm.o |
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c deleted file mode 100644 index d2f61d5510e7..000000000000 --- a/drivers/usb/misc/auerswald.c +++ /dev/null | |||
@@ -1,2152 +0,0 @@ | |||
1 | /*****************************************************************************/ | ||
2 | /* | ||
3 | * auerswald.c -- Auerswald PBX/System Telephone usb driver. | ||
4 | * | ||
5 | * Copyright (C) 2001 Wolfgang Mües (wolfgang@iksw-muees.de) | ||
6 | * | ||
7 | * Very much code of this driver is borrowed from dabusb.c (Deti Fliegl) | ||
8 | * and from the USB Skeleton driver (Greg Kroah-Hartman). Thank you. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | */ | ||
24 | /*****************************************************************************/ | ||
25 | |||
26 | /* Standard Linux module include files */ | ||
27 | #include <asm/uaccess.h> | ||
28 | #include <asm/byteorder.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/wait.h> | ||
33 | #include <linux/usb.h> | ||
34 | #include <linux/mutex.h> | ||
35 | |||
36 | /*-------------------------------------------------------------------*/ | ||
37 | /* Debug support */ | ||
38 | #ifdef DEBUG | ||
39 | #define dump( adr, len) \ | ||
40 | do { \ | ||
41 | unsigned int u; \ | ||
42 | printk (KERN_DEBUG); \ | ||
43 | for (u = 0; u < len; u++) \ | ||
44 | printk (" %02X", adr[u] & 0xFF); \ | ||
45 | printk ("\n"); \ | ||
46 | } while (0) | ||
47 | #else | ||
48 | #define dump( adr, len) | ||
49 | #endif | ||
50 | |||
51 | /*-------------------------------------------------------------------*/ | ||
52 | /* Version Information */ | ||
53 | #define DRIVER_VERSION "0.9.11" | ||
54 | #define DRIVER_AUTHOR "Wolfgang Mües <wolfgang@iksw-muees.de>" | ||
55 | #define DRIVER_DESC "Auerswald PBX/System Telephone usb driver" | ||
56 | |||
57 | /*-------------------------------------------------------------------*/ | ||
58 | /* Private declarations for Auerswald USB driver */ | ||
59 | |||
60 | /* Auerswald Vendor ID */ | ||
61 | #define ID_AUERSWALD 0x09BF | ||
62 | |||
63 | #define AUER_MINOR_BASE 112 /* auerswald driver minor number */ | ||
64 | |||
65 | /* we can have up to this number of device plugged in at once */ | ||
66 | #define AUER_MAX_DEVICES 16 | ||
67 | |||
68 | |||
69 | /* Number of read buffers for each device */ | ||
70 | #define AU_RBUFFERS 10 | ||
71 | |||
72 | /* Number of chain elements for each control chain */ | ||
73 | #define AUCH_ELEMENTS 20 | ||
74 | |||
75 | /* Number of retries in communication */ | ||
76 | #define AU_RETRIES 10 | ||
77 | |||
78 | /*-------------------------------------------------------------------*/ | ||
79 | /* vendor specific protocol */ | ||
80 | /* Header Byte */ | ||
81 | #define AUH_INDIRMASK 0x80 /* mask for direct/indirect bit */ | ||
82 | #define AUH_DIRECT 0x00 /* data is for USB device */ | ||
83 | #define AUH_INDIRECT 0x80 /* USB device is relay */ | ||
84 | |||
85 | #define AUH_SPLITMASK 0x40 /* mask for split bit */ | ||
86 | #define AUH_UNSPLIT 0x00 /* data block is full-size */ | ||
87 | #define AUH_SPLIT 0x40 /* data block is part of a larger one, | ||
88 | split-byte follows */ | ||
89 | |||
90 | #define AUH_TYPEMASK 0x3F /* mask for type of data transfer */ | ||
91 | #define AUH_TYPESIZE 0x40 /* different types */ | ||
92 | #define AUH_DCHANNEL 0x00 /* D channel data */ | ||
93 | #define AUH_B1CHANNEL 0x01 /* B1 channel transparent */ | ||
94 | #define AUH_B2CHANNEL 0x02 /* B2 channel transparent */ | ||
95 | /* 0x03..0x0F reserved for driver internal use */ | ||
96 | #define AUH_COMMAND 0x10 /* Command channel */ | ||
97 | #define AUH_BPROT 0x11 /* Configuration block protocol */ | ||
98 | #define AUH_DPROTANA 0x12 /* D channel protocol analyzer */ | ||
99 | #define AUH_TAPI 0x13 /* telephone api data (ATD) */ | ||
100 | /* 0x14..0x3F reserved for other protocols */ | ||
101 | #define AUH_UNASSIGNED 0xFF /* if char device has no assigned service */ | ||
102 | #define AUH_FIRSTUSERCH 0x11 /* first channel which is available for driver users */ | ||
103 | |||
104 | #define AUH_SIZE 1 /* Size of Header Byte */ | ||
105 | |||
106 | /* Split Byte. Only present if split bit in header byte set.*/ | ||
107 | #define AUS_STARTMASK 0x80 /* mask for first block of splitted frame */ | ||
108 | #define AUS_FIRST 0x80 /* first block */ | ||
109 | #define AUS_FOLLOW 0x00 /* following block */ | ||
110 | |||
111 | #define AUS_ENDMASK 0x40 /* mask for last block of splitted frame */ | ||
112 | #define AUS_END 0x40 /* last block */ | ||
113 | #define AUS_NOEND 0x00 /* not the last block */ | ||
114 | |||
115 | #define AUS_LENMASK 0x3F /* mask for block length information */ | ||
116 | |||
117 | /* Request types */ | ||
118 | #define AUT_RREQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Read Request */ | ||
119 | #define AUT_WREQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Write Request */ | ||
120 | |||
121 | /* Vendor Requests */ | ||
122 | #define AUV_GETINFO 0x00 /* GetDeviceInfo */ | ||
123 | #define AUV_WBLOCK 0x01 /* Write Block */ | ||
124 | #define AUV_RBLOCK 0x02 /* Read Block */ | ||
125 | #define AUV_CHANNELCTL 0x03 /* Channel Control */ | ||
126 | #define AUV_DUMMY 0x04 /* Dummy Out for retry */ | ||
127 | |||
128 | /* Device Info Types */ | ||
129 | #define AUDI_NUMBCH 0x0000 /* Number of supported B channels */ | ||
130 | #define AUDI_OUTFSIZE 0x0001 /* Size of OUT B channel fifos */ | ||
131 | #define AUDI_MBCTRANS 0x0002 /* max. Blocklength of control transfer */ | ||
132 | |||
133 | /* Interrupt endpoint definitions */ | ||
134 | #define AU_IRQENDP 1 /* Endpoint number */ | ||
135 | #define AU_IRQCMDID 16 /* Command-block ID */ | ||
136 | #define AU_BLOCKRDY 0 /* Command: Block data ready on ctl endpoint */ | ||
137 | #define AU_IRQMINSIZE 5 /* Nr. of bytes decoded in this driver */ | ||
138 | |||
139 | /* Device String Descriptors */ | ||
140 | #define AUSI_VENDOR 1 /* "Auerswald GmbH & Co. KG" */ | ||
141 | #define AUSI_DEVICE 2 /* Name of the Device */ | ||
142 | #define AUSI_SERIALNR 3 /* Serial Number */ | ||
143 | #define AUSI_MSN 4 /* "MSN ..." (first) Multiple Subscriber Number */ | ||
144 | |||
145 | #define AUSI_DLEN 100 /* Max. Length of Device Description */ | ||
146 | |||
147 | #define AUV_RETRY 0x101 /* First Firmware version which can do control retries */ | ||
148 | |||
149 | /*-------------------------------------------------------------------*/ | ||
150 | /* External data structures / Interface */ | ||
151 | typedef struct | ||
152 | { | ||
153 | char __user *buf; /* return buffer for string contents */ | ||
154 | unsigned int bsize; /* size of return buffer */ | ||
155 | } audevinfo_t,*paudevinfo_t; | ||
156 | |||
157 | /* IO controls */ | ||
158 | #define IOCTL_AU_SLEN _IOR( 'U', 0xF0, int) /* return the max. string descriptor length */ | ||
159 | #define IOCTL_AU_DEVINFO _IOWR('U', 0xF1, audevinfo_t) /* get name of a specific device */ | ||
160 | #define IOCTL_AU_SERVREQ _IOW( 'U', 0xF2, int) /* request a service channel */ | ||
161 | #define IOCTL_AU_BUFLEN _IOR( 'U', 0xF3, int) /* return the max. buffer length for the device */ | ||
162 | #define IOCTL_AU_RXAVAIL _IOR( 'U', 0xF4, int) /* return != 0 if Receive Data available */ | ||
163 | #define IOCTL_AU_CONNECT _IOR( 'U', 0xF5, int) /* return != 0 if connected to a service channel */ | ||
164 | #define IOCTL_AU_TXREADY _IOR( 'U', 0xF6, int) /* return != 0 if Transmitt channel ready to send */ | ||
165 | /* 'U' 0xF7..0xFF reseved */ | ||
166 | |||
167 | /*-------------------------------------------------------------------*/ | ||
168 | /* Internal data structures */ | ||
169 | |||
170 | /* ..................................................................*/ | ||
171 | /* urb chain element */ | ||
172 | struct auerchain; /* forward for circular reference */ | ||
173 | typedef struct | ||
174 | { | ||
175 | struct auerchain *chain; /* pointer to the chain to which this element belongs */ | ||
176 | struct urb * urbp; /* pointer to attached urb */ | ||
177 | void *context; /* saved URB context */ | ||
178 | usb_complete_t complete; /* saved URB completion function */ | ||
179 | struct list_head list; /* to include element into a list */ | ||
180 | } auerchainelement_t,*pauerchainelement_t; | ||
181 | |||
182 | /* urb chain */ | ||
183 | typedef struct auerchain | ||
184 | { | ||
185 | pauerchainelement_t active; /* element which is submitted to urb */ | ||
186 | spinlock_t lock; /* protection agains interrupts */ | ||
187 | struct list_head waiting_list; /* list of waiting elements */ | ||
188 | struct list_head free_list; /* list of available elements */ | ||
189 | } auerchain_t,*pauerchain_t; | ||
190 | |||
191 | /* urb blocking completion helper struct */ | ||
192 | typedef struct | ||
193 | { | ||
194 | wait_queue_head_t wqh; /* wait for completion */ | ||
195 | unsigned int done; /* completion flag */ | ||
196 | } auerchain_chs_t,*pauerchain_chs_t; | ||
197 | |||
198 | /* ...................................................................*/ | ||
199 | /* buffer element */ | ||
200 | struct auerbufctl; /* forward */ | ||
201 | typedef struct | ||
202 | { | ||
203 | char *bufp; /* reference to allocated data buffer */ | ||
204 | unsigned int len; /* number of characters in data buffer */ | ||
205 | unsigned int retries; /* for urb retries */ | ||
206 | struct usb_ctrlrequest *dr; /* for setup data in control messages */ | ||
207 | struct urb * urbp; /* USB urb */ | ||
208 | struct auerbufctl *list; /* pointer to list */ | ||
209 | struct list_head buff_list; /* reference to next buffer in list */ | ||
210 | } auerbuf_t,*pauerbuf_t; | ||
211 | |||
212 | /* buffer list control block */ | ||
213 | typedef struct auerbufctl | ||
214 | { | ||
215 | spinlock_t lock; /* protection in interrupt */ | ||
216 | struct list_head free_buff_list;/* free buffers */ | ||
217 | struct list_head rec_buff_list; /* buffers with receive data */ | ||
218 | } auerbufctl_t,*pauerbufctl_t; | ||
219 | |||
220 | /* ...................................................................*/ | ||
221 | /* service context */ | ||
222 | struct auerscon; /* forward */ | ||
223 | typedef void (*auer_dispatch_t)(struct auerscon*, pauerbuf_t); | ||
224 | typedef void (*auer_disconn_t) (struct auerscon*); | ||
225 | typedef struct auerscon | ||
226 | { | ||
227 | unsigned int id; /* protocol service id AUH_xxxx */ | ||
228 | auer_dispatch_t dispatch; /* dispatch read buffer */ | ||
229 | auer_disconn_t disconnect; /* disconnect from device, wake up all char readers */ | ||
230 | } auerscon_t,*pauerscon_t; | ||
231 | |||
232 | /* ...................................................................*/ | ||
233 | /* USB device context */ | ||
234 | typedef struct | ||
235 | { | ||
236 | struct mutex mutex; /* protection in user context */ | ||
237 | char name[20]; /* name of the /dev/usb entry */ | ||
238 | unsigned int dtindex; /* index in the device table */ | ||
239 | struct usb_device * usbdev; /* USB device handle */ | ||
240 | int open_count; /* count the number of open character channels */ | ||
241 | char dev_desc[AUSI_DLEN];/* for storing a textual description */ | ||
242 | unsigned int maxControlLength; /* max. Length of control paket (without header) */ | ||
243 | struct urb * inturbp; /* interrupt urb */ | ||
244 | char * intbufp; /* data buffer for interrupt urb */ | ||
245 | unsigned int irqsize; /* size of interrupt endpoint 1 */ | ||
246 | struct auerchain controlchain; /* for chaining of control messages */ | ||
247 | auerbufctl_t bufctl; /* Buffer control for control transfers */ | ||
248 | pauerscon_t services[AUH_TYPESIZE];/* context pointers for each service */ | ||
249 | unsigned int version; /* Version of the device */ | ||
250 | wait_queue_head_t bufferwait; /* wait for a control buffer */ | ||
251 | } auerswald_t,*pauerswald_t; | ||
252 | |||
253 | /* ................................................................... */ | ||
254 | /* character device context */ | ||
255 | typedef struct | ||
256 | { | ||
257 | struct mutex mutex; /* protection in user context */ | ||
258 | pauerswald_t auerdev; /* context pointer of assigned device */ | ||
259 | auerbufctl_t bufctl; /* controls the buffer chain */ | ||
260 | auerscon_t scontext; /* service context */ | ||
261 | wait_queue_head_t readwait; /* for synchronous reading */ | ||
262 | struct mutex readmutex; /* protection against multiple reads */ | ||
263 | pauerbuf_t readbuf; /* buffer held for partial reading */ | ||
264 | unsigned int readoffset; /* current offset in readbuf */ | ||
265 | unsigned int removed; /* is != 0 if device is removed */ | ||
266 | } auerchar_t,*pauerchar_t; | ||
267 | |||
268 | |||
269 | /*-------------------------------------------------------------------*/ | ||
270 | /* Forwards */ | ||
271 | static void auerswald_ctrlread_complete (struct urb * urb); | ||
272 | static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp); | ||
273 | static struct usb_driver auerswald_driver; | ||
274 | |||
275 | |||
276 | /*-------------------------------------------------------------------*/ | ||
277 | /* USB chain helper functions */ | ||
278 | /* -------------------------- */ | ||
279 | |||
280 | /* completion function for chained urbs */ | ||
281 | static void auerchain_complete (struct urb * urb) | ||
282 | { | ||
283 | unsigned long flags; | ||
284 | int result; | ||
285 | |||
286 | /* get pointer to element and to chain */ | ||
287 | pauerchainelement_t acep = urb->context; | ||
288 | pauerchain_t acp = acep->chain; | ||
289 | |||
290 | /* restore original entries in urb */ | ||
291 | urb->context = acep->context; | ||
292 | urb->complete = acep->complete; | ||
293 | |||
294 | dbg ("auerchain_complete called"); | ||
295 | |||
296 | /* call original completion function | ||
297 | NOTE: this function may lead to more urbs submitted into the chain. | ||
298 | (no chain lock at calling complete()!) | ||
299 | acp->active != NULL is protecting us against recursion.*/ | ||
300 | urb->complete (urb); | ||
301 | |||
302 | /* detach element from chain data structure */ | ||
303 | spin_lock_irqsave (&acp->lock, flags); | ||
304 | if (acp->active != acep) /* paranoia debug check */ | ||
305 | dbg ("auerchain_complete: completion on non-active element called!"); | ||
306 | else | ||
307 | acp->active = NULL; | ||
308 | |||
309 | /* add the used chain element to the list of free elements */ | ||
310 | list_add_tail (&acep->list, &acp->free_list); | ||
311 | acep = NULL; | ||
312 | |||
313 | /* is there a new element waiting in the chain? */ | ||
314 | if (!acp->active && !list_empty (&acp->waiting_list)) { | ||
315 | /* yes: get the entry */ | ||
316 | struct list_head *tmp = acp->waiting_list.next; | ||
317 | list_del (tmp); | ||
318 | acep = list_entry (tmp, auerchainelement_t, list); | ||
319 | acp->active = acep; | ||
320 | } | ||
321 | spin_unlock_irqrestore (&acp->lock, flags); | ||
322 | |||
323 | /* submit the new urb */ | ||
324 | if (acep) { | ||
325 | urb = acep->urbp; | ||
326 | dbg ("auerchain_complete: submitting next urb from chain"); | ||
327 | urb->status = 0; /* needed! */ | ||
328 | result = usb_submit_urb(urb, GFP_ATOMIC); | ||
329 | |||
330 | /* check for submit errors */ | ||
331 | if (result) { | ||
332 | urb->status = result; | ||
333 | dbg("auerchain_complete: usb_submit_urb with error code %d", result); | ||
334 | /* and do error handling via *this* completion function (recursive) */ | ||
335 | auerchain_complete( urb); | ||
336 | } | ||
337 | } else { | ||
338 | /* simple return without submitting a new urb. | ||
339 | The empty chain is detected with acp->active == NULL. */ | ||
340 | }; | ||
341 | } | ||
342 | |||
343 | |||
344 | /* submit function for chained urbs | ||
345 | this function may be called from completion context or from user space! | ||
346 | early = 1 -> submit in front of chain | ||
347 | */ | ||
348 | static int auerchain_submit_urb_list (pauerchain_t acp, struct urb * urb, int early) | ||
349 | { | ||
350 | int result; | ||
351 | unsigned long flags; | ||
352 | pauerchainelement_t acep = NULL; | ||
353 | |||
354 | dbg ("auerchain_submit_urb called"); | ||
355 | |||
356 | /* try to get a chain element */ | ||
357 | spin_lock_irqsave (&acp->lock, flags); | ||
358 | if (!list_empty (&acp->free_list)) { | ||
359 | /* yes: get the entry */ | ||
360 | struct list_head *tmp = acp->free_list.next; | ||
361 | list_del (tmp); | ||
362 | acep = list_entry (tmp, auerchainelement_t, list); | ||
363 | } | ||
364 | spin_unlock_irqrestore (&acp->lock, flags); | ||
365 | |||
366 | /* if no chain element available: return with error */ | ||
367 | if (!acep) { | ||
368 | return -ENOMEM; | ||
369 | } | ||
370 | |||
371 | /* fill in the new chain element values */ | ||
372 | acep->chain = acp; | ||
373 | acep->context = urb->context; | ||
374 | acep->complete = urb->complete; | ||
375 | acep->urbp = urb; | ||
376 | INIT_LIST_HEAD (&acep->list); | ||
377 | |||
378 | /* modify urb */ | ||
379 | urb->context = acep; | ||
380 | urb->complete = auerchain_complete; | ||
381 | urb->status = -EINPROGRESS; /* usb_submit_urb does this, too */ | ||
382 | |||
383 | /* add element to chain - or start it immediately */ | ||
384 | spin_lock_irqsave (&acp->lock, flags); | ||
385 | if (acp->active) { | ||
386 | /* there is traffic in the chain, simple add element to chain */ | ||
387 | if (early) { | ||
388 | dbg ("adding new urb to head of chain"); | ||
389 | list_add (&acep->list, &acp->waiting_list); | ||
390 | } else { | ||
391 | dbg ("adding new urb to end of chain"); | ||
392 | list_add_tail (&acep->list, &acp->waiting_list); | ||
393 | } | ||
394 | acep = NULL; | ||
395 | } else { | ||
396 | /* the chain is empty. Prepare restart */ | ||
397 | acp->active = acep; | ||
398 | } | ||
399 | /* Spin has to be removed before usb_submit_urb! */ | ||
400 | spin_unlock_irqrestore (&acp->lock, flags); | ||
401 | |||
402 | /* Submit urb if immediate restart */ | ||
403 | if (acep) { | ||
404 | dbg("submitting urb immediate"); | ||
405 | urb->status = 0; /* needed! */ | ||
406 | result = usb_submit_urb(urb, GFP_ATOMIC); | ||
407 | /* check for submit errors */ | ||
408 | if (result) { | ||
409 | urb->status = result; | ||
410 | dbg("auerchain_submit_urb: usb_submit_urb with error code %d", result); | ||
411 | /* and do error handling via completion function */ | ||
412 | auerchain_complete( urb); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | /* submit function for chained urbs | ||
420 | this function may be called from completion context or from user space! | ||
421 | */ | ||
422 | static int auerchain_submit_urb (pauerchain_t acp, struct urb * urb) | ||
423 | { | ||
424 | return auerchain_submit_urb_list (acp, urb, 0); | ||
425 | } | ||
426 | |||
427 | /* cancel an urb which is submitted to the chain | ||
428 | the result is 0 if the urb is cancelled, or -EINPROGRESS if | ||
429 | the function is successfully started. | ||
430 | */ | ||
431 | static int auerchain_unlink_urb (pauerchain_t acp, struct urb * urb) | ||
432 | { | ||
433 | unsigned long flags; | ||
434 | struct urb * urbp; | ||
435 | pauerchainelement_t acep; | ||
436 | struct list_head *tmp; | ||
437 | |||
438 | dbg ("auerchain_unlink_urb called"); | ||
439 | |||
440 | /* search the chain of waiting elements */ | ||
441 | spin_lock_irqsave (&acp->lock, flags); | ||
442 | list_for_each (tmp, &acp->waiting_list) { | ||
443 | acep = list_entry (tmp, auerchainelement_t, list); | ||
444 | if (acep->urbp == urb) { | ||
445 | list_del (tmp); | ||
446 | urb->context = acep->context; | ||
447 | urb->complete = acep->complete; | ||
448 | list_add_tail (&acep->list, &acp->free_list); | ||
449 | spin_unlock_irqrestore (&acp->lock, flags); | ||
450 | dbg ("unlink waiting urb"); | ||
451 | urb->status = -ENOENT; | ||
452 | urb->complete (urb); | ||
453 | return 0; | ||
454 | } | ||
455 | } | ||
456 | /* not found. */ | ||
457 | spin_unlock_irqrestore (&acp->lock, flags); | ||
458 | |||
459 | /* get the active urb */ | ||
460 | acep = acp->active; | ||
461 | if (acep) { | ||
462 | urbp = acep->urbp; | ||
463 | |||
464 | /* check if we have to cancel the active urb */ | ||
465 | if (urbp == urb) { | ||
466 | /* note that there is a race condition between the check above | ||
467 | and the unlink() call because of no lock. This race is harmless, | ||
468 | because the usb module will detect the unlink() after completion. | ||
469 | We can't use the acp->lock here because the completion function | ||
470 | wants to grab it. | ||
471 | */ | ||
472 | dbg ("unlink active urb"); | ||
473 | return usb_unlink_urb (urbp); | ||
474 | } | ||
475 | } | ||
476 | |||
477 | /* not found anyway | ||
478 | ... is some kind of success | ||
479 | */ | ||
480 | dbg ("urb to unlink not found in chain"); | ||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | /* cancel all urbs which are in the chain. | ||
485 | this function must not be called from interrupt or completion handler. | ||
486 | */ | ||
487 | static void auerchain_unlink_all (pauerchain_t acp) | ||
488 | { | ||
489 | unsigned long flags; | ||
490 | struct urb * urbp; | ||
491 | pauerchainelement_t acep; | ||
492 | |||
493 | dbg ("auerchain_unlink_all called"); | ||
494 | |||
495 | /* clear the chain of waiting elements */ | ||
496 | spin_lock_irqsave (&acp->lock, flags); | ||
497 | while (!list_empty (&acp->waiting_list)) { | ||
498 | /* get the next entry */ | ||
499 | struct list_head *tmp = acp->waiting_list.next; | ||
500 | list_del (tmp); | ||
501 | acep = list_entry (tmp, auerchainelement_t, list); | ||
502 | urbp = acep->urbp; | ||
503 | urbp->context = acep->context; | ||
504 | urbp->complete = acep->complete; | ||
505 | list_add_tail (&acep->list, &acp->free_list); | ||
506 | spin_unlock_irqrestore (&acp->lock, flags); | ||
507 | dbg ("unlink waiting urb"); | ||
508 | urbp->status = -ENOENT; | ||
509 | urbp->complete (urbp); | ||
510 | spin_lock_irqsave (&acp->lock, flags); | ||
511 | } | ||
512 | spin_unlock_irqrestore (&acp->lock, flags); | ||
513 | |||
514 | /* clear the active urb */ | ||
515 | acep = acp->active; | ||
516 | if (acep) { | ||
517 | urbp = acep->urbp; | ||
518 | dbg ("unlink active urb"); | ||
519 | usb_kill_urb (urbp); | ||
520 | } | ||
521 | } | ||
522 | |||
523 | |||
524 | /* free the chain. | ||
525 | this function must not be called from interrupt or completion handler. | ||
526 | */ | ||
527 | static void auerchain_free (pauerchain_t acp) | ||
528 | { | ||
529 | unsigned long flags; | ||
530 | pauerchainelement_t acep; | ||
531 | |||
532 | dbg ("auerchain_free called"); | ||
533 | |||
534 | /* first, cancel all pending urbs */ | ||
535 | auerchain_unlink_all (acp); | ||
536 | |||
537 | /* free the elements */ | ||
538 | spin_lock_irqsave (&acp->lock, flags); | ||
539 | while (!list_empty (&acp->free_list)) { | ||
540 | /* get the next entry */ | ||
541 | struct list_head *tmp = acp->free_list.next; | ||
542 | list_del (tmp); | ||
543 | spin_unlock_irqrestore (&acp->lock, flags); | ||
544 | acep = list_entry (tmp, auerchainelement_t, list); | ||
545 | kfree (acep); | ||
546 | spin_lock_irqsave (&acp->lock, flags); | ||
547 | } | ||
548 | spin_unlock_irqrestore (&acp->lock, flags); | ||
549 | } | ||
550 | |||
551 | |||
552 | /* Init the chain control structure */ | ||
553 | static void auerchain_init (pauerchain_t acp) | ||
554 | { | ||
555 | /* init the chain data structure */ | ||
556 | acp->active = NULL; | ||
557 | spin_lock_init (&acp->lock); | ||
558 | INIT_LIST_HEAD (&acp->waiting_list); | ||
559 | INIT_LIST_HEAD (&acp->free_list); | ||
560 | } | ||
561 | |||
562 | /* setup a chain. | ||
563 | It is assumed that there is no concurrency while setting up the chain | ||
564 | requirement: auerchain_init() | ||
565 | */ | ||
566 | static int auerchain_setup (pauerchain_t acp, unsigned int numElements) | ||
567 | { | ||
568 | pauerchainelement_t acep; | ||
569 | |||
570 | dbg ("auerchain_setup called with %d elements", numElements); | ||
571 | |||
572 | /* fill the list of free elements */ | ||
573 | for (;numElements; numElements--) { | ||
574 | acep = kzalloc(sizeof(auerchainelement_t), GFP_KERNEL); | ||
575 | if (!acep) | ||
576 | goto ac_fail; | ||
577 | INIT_LIST_HEAD (&acep->list); | ||
578 | list_add_tail (&acep->list, &acp->free_list); | ||
579 | } | ||
580 | return 0; | ||
581 | |||
582 | ac_fail:/* free the elements */ | ||
583 | while (!list_empty (&acp->free_list)) { | ||
584 | /* get the next entry */ | ||
585 | struct list_head *tmp = acp->free_list.next; | ||
586 | list_del (tmp); | ||
587 | acep = list_entry (tmp, auerchainelement_t, list); | ||
588 | kfree (acep); | ||
589 | } | ||
590 | return -ENOMEM; | ||
591 | } | ||
592 | |||
593 | |||
594 | /* completion handler for synchronous chained URBs */ | ||
595 | static void auerchain_blocking_completion (struct urb *urb) | ||
596 | { | ||
597 | pauerchain_chs_t pchs = urb->context; | ||
598 | pchs->done = 1; | ||
599 | wmb(); | ||
600 | wake_up (&pchs->wqh); | ||
601 | } | ||
602 | |||
603 | |||
604 | /* Starts chained urb and waits for completion or timeout */ | ||
605 | static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int timeout, int* actual_length) | ||
606 | { | ||
607 | auerchain_chs_t chs; | ||
608 | int status; | ||
609 | |||
610 | dbg ("auerchain_start_wait_urb called"); | ||
611 | init_waitqueue_head (&chs.wqh); | ||
612 | chs.done = 0; | ||
613 | |||
614 | urb->context = &chs; | ||
615 | status = auerchain_submit_urb (acp, urb); | ||
616 | if (status) | ||
617 | /* something went wrong */ | ||
618 | return status; | ||
619 | |||
620 | timeout = wait_event_timeout(chs.wqh, chs.done, timeout); | ||
621 | |||
622 | if (!timeout && !chs.done) { | ||
623 | if (urb->status != -EINPROGRESS) { /* No callback?!! */ | ||
624 | dbg ("auerchain_start_wait_urb: raced timeout"); | ||
625 | status = urb->status; | ||
626 | } else { | ||
627 | dbg ("auerchain_start_wait_urb: timeout"); | ||
628 | auerchain_unlink_urb (acp, urb); /* remove urb safely */ | ||
629 | status = -ETIMEDOUT; | ||
630 | } | ||
631 | } else | ||
632 | status = urb->status; | ||
633 | |||
634 | if (status >= 0) | ||
635 | *actual_length = urb->actual_length; | ||
636 | |||
637 | return status; | ||
638 | } | ||
639 | |||
640 | |||
641 | /* auerchain_control_msg - Builds a control urb, sends it off and waits for completion | ||
642 | acp: pointer to the auerchain | ||
643 | dev: pointer to the usb device to send the message to | ||
644 | pipe: endpoint "pipe" to send the message to | ||
645 | request: USB message request value | ||
646 | requesttype: USB message request type value | ||
647 | value: USB message value | ||
648 | index: USB message index value | ||
649 | data: pointer to the data to send | ||
650 | size: length in bytes of the data to send | ||
651 | timeout: time to wait for the message to complete before timing out (if 0 the wait is forever) | ||
652 | |||
653 | This function sends a simple control message to a specified endpoint | ||
654 | and waits for the message to complete, or timeout. | ||
655 | |||
656 | If successful, it returns the transferred length, otherwise a negative error number. | ||
657 | |||
658 | Don't use this function from within an interrupt context, like a | ||
659 | bottom half handler. If you need an asynchronous message, or need to send | ||
660 | a message from within interrupt context, use auerchain_submit_urb() | ||
661 | */ | ||
662 | static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, | ||
663 | __u16 value, __u16 index, void *data, __u16 size, int timeout) | ||
664 | { | ||
665 | int ret; | ||
666 | struct usb_ctrlrequest *dr; | ||
667 | struct urb *urb; | ||
668 | int uninitialized_var(length); | ||
669 | |||
670 | dbg ("auerchain_control_msg"); | ||
671 | dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL); | ||
672 | if (!dr) | ||
673 | return -ENOMEM; | ||
674 | urb = usb_alloc_urb (0, GFP_KERNEL); | ||
675 | if (!urb) { | ||
676 | kfree (dr); | ||
677 | return -ENOMEM; | ||
678 | } | ||
679 | |||
680 | dr->bRequestType = requesttype; | ||
681 | dr->bRequest = request; | ||
682 | dr->wValue = cpu_to_le16 (value); | ||
683 | dr->wIndex = cpu_to_le16 (index); | ||
684 | dr->wLength = cpu_to_le16 (size); | ||
685 | |||
686 | usb_fill_control_urb (urb, dev, pipe, (unsigned char*)dr, data, size, /* build urb */ | ||
687 | auerchain_blocking_completion, NULL); | ||
688 | ret = auerchain_start_wait_urb (acp, urb, timeout, &length); | ||
689 | |||
690 | usb_free_urb (urb); | ||
691 | kfree (dr); | ||
692 | |||
693 | if (ret < 0) | ||
694 | return ret; | ||
695 | else | ||
696 | return length; | ||
697 | } | ||
698 | |||
699 | |||
700 | /*-------------------------------------------------------------------*/ | ||
701 | /* Buffer List helper functions */ | ||
702 | |||
703 | /* free a single auerbuf */ | ||
704 | static void auerbuf_free (pauerbuf_t bp) | ||
705 | { | ||
706 | kfree(bp->bufp); | ||
707 | kfree(bp->dr); | ||
708 | usb_free_urb(bp->urbp); | ||
709 | kfree(bp); | ||
710 | } | ||
711 | |||
712 | /* free the buffers from an auerbuf list */ | ||
713 | static void auerbuf_free_list (struct list_head *q) | ||
714 | { | ||
715 | struct list_head *tmp; | ||
716 | struct list_head *p; | ||
717 | pauerbuf_t bp; | ||
718 | |||
719 | dbg ("auerbuf_free_list"); | ||
720 | for (p = q->next; p != q;) { | ||
721 | bp = list_entry (p, auerbuf_t, buff_list); | ||
722 | tmp = p->next; | ||
723 | list_del (p); | ||
724 | p = tmp; | ||
725 | auerbuf_free (bp); | ||
726 | } | ||
727 | } | ||
728 | |||
729 | /* init the members of a list control block */ | ||
730 | static void auerbuf_init (pauerbufctl_t bcp) | ||
731 | { | ||
732 | dbg ("auerbuf_init"); | ||
733 | spin_lock_init (&bcp->lock); | ||
734 | INIT_LIST_HEAD (&bcp->free_buff_list); | ||
735 | INIT_LIST_HEAD (&bcp->rec_buff_list); | ||
736 | } | ||
737 | |||
738 | /* free all buffers from an auerbuf chain */ | ||
739 | static void auerbuf_free_buffers (pauerbufctl_t bcp) | ||
740 | { | ||
741 | unsigned long flags; | ||
742 | dbg ("auerbuf_free_buffers"); | ||
743 | |||
744 | spin_lock_irqsave (&bcp->lock, flags); | ||
745 | |||
746 | auerbuf_free_list (&bcp->free_buff_list); | ||
747 | auerbuf_free_list (&bcp->rec_buff_list); | ||
748 | |||
749 | spin_unlock_irqrestore (&bcp->lock, flags); | ||
750 | } | ||
751 | |||
752 | /* setup a list of buffers */ | ||
753 | /* requirement: auerbuf_init() */ | ||
754 | static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned int bufsize) | ||
755 | { | ||
756 | pauerbuf_t bep = NULL; | ||
757 | |||
758 | dbg ("auerbuf_setup called with %d elements of %d bytes", numElements, bufsize); | ||
759 | |||
760 | /* fill the list of free elements */ | ||
761 | for (;numElements; numElements--) { | ||
762 | bep = kzalloc(sizeof(auerbuf_t), GFP_KERNEL); | ||
763 | if (!bep) | ||
764 | goto bl_fail; | ||
765 | bep->list = bcp; | ||
766 | INIT_LIST_HEAD (&bep->buff_list); | ||
767 | bep->bufp = kmalloc (bufsize, GFP_KERNEL); | ||
768 | if (!bep->bufp) | ||
769 | goto bl_fail; | ||
770 | bep->dr = kmalloc(sizeof (struct usb_ctrlrequest), GFP_KERNEL); | ||
771 | if (!bep->dr) | ||
772 | goto bl_fail; | ||
773 | bep->urbp = usb_alloc_urb (0, GFP_KERNEL); | ||
774 | if (!bep->urbp) | ||
775 | goto bl_fail; | ||
776 | list_add_tail (&bep->buff_list, &bcp->free_buff_list); | ||
777 | } | ||
778 | return 0; | ||
779 | |||
780 | bl_fail:/* not enough memory. Free allocated elements */ | ||
781 | dbg ("auerbuf_setup: no more memory"); | ||
782 | auerbuf_free(bep); | ||
783 | auerbuf_free_buffers (bcp); | ||
784 | return -ENOMEM; | ||
785 | } | ||
786 | |||
787 | /* insert a used buffer into the free list */ | ||
788 | static void auerbuf_releasebuf( pauerbuf_t bp) | ||
789 | { | ||
790 | unsigned long flags; | ||
791 | pauerbufctl_t bcp = bp->list; | ||
792 | bp->retries = 0; | ||
793 | |||
794 | dbg ("auerbuf_releasebuf called"); | ||
795 | spin_lock_irqsave (&bcp->lock, flags); | ||
796 | list_add_tail (&bp->buff_list, &bcp->free_buff_list); | ||
797 | spin_unlock_irqrestore (&bcp->lock, flags); | ||
798 | } | ||
799 | |||
800 | |||
801 | /*-------------------------------------------------------------------*/ | ||
802 | /* Completion handlers */ | ||
803 | |||
804 | /* Values of urb->status or results of usb_submit_urb(): | ||
805 | 0 Initial, OK | ||
806 | -EINPROGRESS during submission until end | ||
807 | -ENOENT if urb is unlinked | ||
808 | -ETIME Device did not respond | ||
809 | -ENOMEM Memory Overflow | ||
810 | -ENODEV Specified USB-device or bus doesn't exist | ||
811 | -ENXIO URB already queued | ||
812 | -EINVAL a) Invalid transfer type specified (or not supported) | ||
813 | b) Invalid interrupt interval (0n256) | ||
814 | -EAGAIN a) Specified ISO start frame too early | ||
815 | b) (using ISO-ASAP) Too much scheduled for the future wait some time and try again. | ||
816 | -EFBIG Too much ISO frames requested (currently uhci900) | ||
817 | -EPIPE Specified pipe-handle/Endpoint is already stalled | ||
818 | -EMSGSIZE Endpoint message size is zero, do interface/alternate setting | ||
819 | -EPROTO a) Bitstuff error | ||
820 | b) Unknown USB error | ||
821 | -EILSEQ CRC mismatch | ||
822 | -ENOSR Buffer error | ||
823 | -EREMOTEIO Short packet detected | ||
824 | -EXDEV ISO transfer only partially completed look at individual frame status for details | ||
825 | -EINVAL ISO madness, if this happens: Log off and go home | ||
826 | -EOVERFLOW babble | ||
827 | */ | ||
828 | |||
829 | /* check if a status code allows a retry */ | ||
830 | static int auerswald_status_retry (int status) | ||
831 | { | ||
832 | switch (status) { | ||
833 | case 0: | ||
834 | case -ETIME: | ||
835 | case -EOVERFLOW: | ||
836 | case -EAGAIN: | ||
837 | case -EPIPE: | ||
838 | case -EPROTO: | ||
839 | case -EILSEQ: | ||
840 | case -ENOSR: | ||
841 | case -EREMOTEIO: | ||
842 | return 1; /* do a retry */ | ||
843 | } | ||
844 | return 0; /* no retry possible */ | ||
845 | } | ||
846 | |||
847 | /* Completion of asynchronous write block */ | ||
848 | static void auerchar_ctrlwrite_complete (struct urb * urb) | ||
849 | { | ||
850 | pauerbuf_t bp = urb->context; | ||
851 | pauerswald_t cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); | ||
852 | dbg ("auerchar_ctrlwrite_complete called"); | ||
853 | |||
854 | /* reuse the buffer */ | ||
855 | auerbuf_releasebuf (bp); | ||
856 | /* Wake up all processes waiting for a buffer */ | ||
857 | wake_up (&cp->bufferwait); | ||
858 | } | ||
859 | |||
860 | /* Completion handler for dummy retry packet */ | ||
861 | static void auerswald_ctrlread_wretcomplete (struct urb * urb) | ||
862 | { | ||
863 | pauerbuf_t bp = urb->context; | ||
864 | pauerswald_t cp; | ||
865 | int ret; | ||
866 | int status = urb->status; | ||
867 | |||
868 | dbg ("auerswald_ctrlread_wretcomplete called"); | ||
869 | dbg ("complete with status: %d", status); | ||
870 | cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); | ||
871 | |||
872 | /* check if it is possible to advance */ | ||
873 | if (!auerswald_status_retry(status) || !cp->usbdev) { | ||
874 | /* reuse the buffer */ | ||
875 | err ("control dummy: transmission error %d, can not retry", status); | ||
876 | auerbuf_releasebuf (bp); | ||
877 | /* Wake up all processes waiting for a buffer */ | ||
878 | wake_up (&cp->bufferwait); | ||
879 | return; | ||
880 | } | ||
881 | |||
882 | /* fill the control message */ | ||
883 | bp->dr->bRequestType = AUT_RREQ; | ||
884 | bp->dr->bRequest = AUV_RBLOCK; | ||
885 | bp->dr->wLength = bp->dr->wValue; /* temporary stored */ | ||
886 | bp->dr->wValue = cpu_to_le16 (1); /* Retry Flag */ | ||
887 | /* bp->dr->index = channel id; remains */ | ||
888 | usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0), | ||
889 | (unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->wLength), | ||
890 | auerswald_ctrlread_complete,bp); | ||
891 | |||
892 | /* submit the control msg as next paket */ | ||
893 | ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1); | ||
894 | if (ret) { | ||
895 | dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret); | ||
896 | bp->urbp->status = ret; | ||
897 | auerswald_ctrlread_complete (bp->urbp); | ||
898 | } | ||
899 | } | ||
900 | |||
901 | /* completion handler for receiving of control messages */ | ||
902 | static void auerswald_ctrlread_complete (struct urb * urb) | ||
903 | { | ||
904 | unsigned int serviceid; | ||
905 | pauerswald_t cp; | ||
906 | pauerscon_t scp; | ||
907 | pauerbuf_t bp = urb->context; | ||
908 | int status = urb->status; | ||
909 | int ret; | ||
910 | |||
911 | dbg ("auerswald_ctrlread_complete called"); | ||
912 | |||
913 | cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); | ||
914 | |||
915 | /* check if there is valid data in this urb */ | ||
916 | if (status) { | ||
917 | dbg ("complete with non-zero status: %d", status); | ||
918 | /* should we do a retry? */ | ||
919 | if (!auerswald_status_retry(status) | ||
920 | || !cp->usbdev | ||
921 | || (cp->version < AUV_RETRY) | ||
922 | || (bp->retries >= AU_RETRIES)) { | ||
923 | /* reuse the buffer */ | ||
924 | err ("control read: transmission error %d, can not retry", status); | ||
925 | auerbuf_releasebuf (bp); | ||
926 | /* Wake up all processes waiting for a buffer */ | ||
927 | wake_up (&cp->bufferwait); | ||
928 | return; | ||
929 | } | ||
930 | bp->retries++; | ||
931 | dbg ("Retry count = %d", bp->retries); | ||
932 | /* send a long dummy control-write-message to allow device firmware to react */ | ||
933 | bp->dr->bRequestType = AUT_WREQ; | ||
934 | bp->dr->bRequest = AUV_DUMMY; | ||
935 | bp->dr->wValue = bp->dr->wLength; /* temporary storage */ | ||
936 | // bp->dr->wIndex channel ID remains | ||
937 | bp->dr->wLength = cpu_to_le16 (32); /* >= 8 bytes */ | ||
938 | usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0), | ||
939 | (unsigned char*)bp->dr, bp->bufp, 32, | ||
940 | auerswald_ctrlread_wretcomplete,bp); | ||
941 | |||
942 | /* submit the control msg as next paket */ | ||
943 | ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1); | ||
944 | if (ret) { | ||
945 | dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret); | ||
946 | bp->urbp->status = ret; | ||
947 | auerswald_ctrlread_wretcomplete (bp->urbp); | ||
948 | } | ||
949 | return; | ||
950 | } | ||
951 | |||
952 | /* get the actual bytecount (incl. headerbyte) */ | ||
953 | bp->len = urb->actual_length; | ||
954 | serviceid = bp->bufp[0] & AUH_TYPEMASK; | ||
955 | dbg ("Paket with serviceid %d and %d bytes received", serviceid, bp->len); | ||
956 | |||
957 | /* dispatch the paket */ | ||
958 | scp = cp->services[serviceid]; | ||
959 | if (scp) { | ||
960 | /* look, Ma, a listener! */ | ||
961 | scp->dispatch (scp, bp); | ||
962 | } | ||
963 | |||
964 | /* release the paket */ | ||
965 | auerbuf_releasebuf (bp); | ||
966 | /* Wake up all processes waiting for a buffer */ | ||
967 | wake_up (&cp->bufferwait); | ||
968 | } | ||
969 | |||
970 | /*-------------------------------------------------------------------*/ | ||
971 | /* Handling of Interrupt Endpoint */ | ||
972 | /* This interrupt Endpoint is used to inform the host about waiting | ||
973 | messages from the USB device. | ||
974 | */ | ||
975 | /* int completion handler. */ | ||
976 | static void auerswald_int_complete (struct urb * urb) | ||
977 | { | ||
978 | unsigned long flags; | ||
979 | unsigned int channelid; | ||
980 | unsigned int bytecount; | ||
981 | int ret; | ||
982 | int status = urb->status; | ||
983 | pauerbuf_t bp = NULL; | ||
984 | pauerswald_t cp = urb->context; | ||
985 | |||
986 | dbg ("%s called", __func__); | ||
987 | |||
988 | switch (status) { | ||
989 | case 0: | ||
990 | /* success */ | ||
991 | break; | ||
992 | case -ECONNRESET: | ||
993 | case -ENOENT: | ||
994 | case -ESHUTDOWN: | ||
995 | /* this urb is terminated, clean up */ | ||
996 | dbg("%s - urb shutting down with status: %d", __func__, status); | ||
997 | return; | ||
998 | default: | ||
999 | dbg("%s - nonzero urb status received: %d", __func__, status); | ||
1000 | goto exit; | ||
1001 | } | ||
1002 | |||
1003 | /* check if all needed data was received */ | ||
1004 | if (urb->actual_length < AU_IRQMINSIZE) { | ||
1005 | dbg ("invalid data length received: %d bytes", urb->actual_length); | ||
1006 | goto exit; | ||
1007 | } | ||
1008 | |||
1009 | /* check the command code */ | ||
1010 | if (cp->intbufp[0] != AU_IRQCMDID) { | ||
1011 | dbg ("invalid command received: %d", cp->intbufp[0]); | ||
1012 | goto exit; | ||
1013 | } | ||
1014 | |||
1015 | /* check the command type */ | ||
1016 | if (cp->intbufp[1] != AU_BLOCKRDY) { | ||
1017 | dbg ("invalid command type received: %d", cp->intbufp[1]); | ||
1018 | goto exit; | ||
1019 | } | ||
1020 | |||
1021 | /* now extract the information */ | ||
1022 | channelid = cp->intbufp[2]; | ||
1023 | bytecount = (unsigned char)cp->intbufp[3]; | ||
1024 | bytecount |= (unsigned char)cp->intbufp[4] << 8; | ||
1025 | |||
1026 | /* check the channel id */ | ||
1027 | if (channelid >= AUH_TYPESIZE) { | ||
1028 | dbg ("invalid channel id received: %d", channelid); | ||
1029 | goto exit; | ||
1030 | } | ||
1031 | |||
1032 | /* check the byte count */ | ||
1033 | if (bytecount > (cp->maxControlLength+AUH_SIZE)) { | ||
1034 | dbg ("invalid byte count received: %d", bytecount); | ||
1035 | goto exit; | ||
1036 | } | ||
1037 | dbg ("Service Channel = %d", channelid); | ||
1038 | dbg ("Byte Count = %d", bytecount); | ||
1039 | |||
1040 | /* get a buffer for the next data paket */ | ||
1041 | spin_lock_irqsave (&cp->bufctl.lock, flags); | ||
1042 | if (!list_empty (&cp->bufctl.free_buff_list)) { | ||
1043 | /* yes: get the entry */ | ||
1044 | struct list_head *tmp = cp->bufctl.free_buff_list.next; | ||
1045 | list_del (tmp); | ||
1046 | bp = list_entry (tmp, auerbuf_t, buff_list); | ||
1047 | } | ||
1048 | spin_unlock_irqrestore (&cp->bufctl.lock, flags); | ||
1049 | |||
1050 | /* if no buffer available: skip it */ | ||
1051 | if (!bp) { | ||
1052 | dbg ("auerswald_int_complete: no data buffer available"); | ||
1053 | /* can we do something more? | ||
1054 | This is a big problem: if this int packet is ignored, the | ||
1055 | device will wait forever and not signal any more data. | ||
1056 | The only real solution is: having enough buffers! | ||
1057 | Or perhaps temporary disabling the int endpoint? | ||
1058 | */ | ||
1059 | goto exit; | ||
1060 | } | ||
1061 | |||
1062 | /* fill the control message */ | ||
1063 | bp->dr->bRequestType = AUT_RREQ; | ||
1064 | bp->dr->bRequest = AUV_RBLOCK; | ||
1065 | bp->dr->wValue = cpu_to_le16 (0); | ||
1066 | bp->dr->wIndex = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT); | ||
1067 | bp->dr->wLength = cpu_to_le16 (bytecount); | ||
1068 | usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0), | ||
1069 | (unsigned char*)bp->dr, bp->bufp, bytecount, | ||
1070 | auerswald_ctrlread_complete,bp); | ||
1071 | |||
1072 | /* submit the control msg */ | ||
1073 | ret = auerchain_submit_urb (&cp->controlchain, bp->urbp); | ||
1074 | if (ret) { | ||
1075 | dbg ("auerswald_int_complete: nonzero result of auerchain_submit_urb %d", ret); | ||
1076 | bp->urbp->status = ret; | ||
1077 | auerswald_ctrlread_complete( bp->urbp); | ||
1078 | /* here applies the same problem as above: device locking! */ | ||
1079 | } | ||
1080 | exit: | ||
1081 | ret = usb_submit_urb (urb, GFP_ATOMIC); | ||
1082 | if (ret) | ||
1083 | err ("%s - usb_submit_urb failed with result %d", | ||
1084 | __func__, ret); | ||
1085 | } | ||
1086 | |||
1087 | /* int memory deallocation | ||
1088 | NOTE: no mutex please! | ||
1089 | */ | ||
1090 | static void auerswald_int_free (pauerswald_t cp) | ||
1091 | { | ||
1092 | if (cp->inturbp) { | ||
1093 | usb_free_urb(cp->inturbp); | ||
1094 | cp->inturbp = NULL; | ||
1095 | } | ||
1096 | kfree(cp->intbufp); | ||
1097 | cp->intbufp = NULL; | ||
1098 | } | ||
1099 | |||
1100 | /* This function is called to activate the interrupt | ||
1101 | endpoint. This function returns 0 if successful or an error code. | ||
1102 | NOTE: no mutex please! | ||
1103 | */ | ||
1104 | static int auerswald_int_open (pauerswald_t cp) | ||
1105 | { | ||
1106 | int ret; | ||
1107 | struct usb_host_endpoint *ep; | ||
1108 | int irqsize; | ||
1109 | dbg ("auerswald_int_open"); | ||
1110 | |||
1111 | ep = cp->usbdev->ep_in[AU_IRQENDP]; | ||
1112 | if (!ep) { | ||
1113 | ret = -EFAULT; | ||
1114 | goto intoend; | ||
1115 | } | ||
1116 | irqsize = le16_to_cpu(ep->desc.wMaxPacketSize); | ||
1117 | cp->irqsize = irqsize; | ||
1118 | |||
1119 | /* allocate the urb and data buffer */ | ||
1120 | if (!cp->inturbp) { | ||
1121 | cp->inturbp = usb_alloc_urb (0, GFP_KERNEL); | ||
1122 | if (!cp->inturbp) { | ||
1123 | ret = -ENOMEM; | ||
1124 | goto intoend; | ||
1125 | } | ||
1126 | } | ||
1127 | if (!cp->intbufp) { | ||
1128 | cp->intbufp = kmalloc (irqsize, GFP_KERNEL); | ||
1129 | if (!cp->intbufp) { | ||
1130 | ret = -ENOMEM; | ||
1131 | goto intoend; | ||
1132 | } | ||
1133 | } | ||
1134 | /* setup urb */ | ||
1135 | usb_fill_int_urb (cp->inturbp, cp->usbdev, | ||
1136 | usb_rcvintpipe (cp->usbdev,AU_IRQENDP), cp->intbufp, | ||
1137 | irqsize, auerswald_int_complete, cp, ep->desc.bInterval); | ||
1138 | /* start the urb */ | ||
1139 | cp->inturbp->status = 0; /* needed! */ | ||
1140 | ret = usb_submit_urb (cp->inturbp, GFP_KERNEL); | ||
1141 | |||
1142 | intoend: | ||
1143 | if (ret < 0) { | ||
1144 | /* activation of interrupt endpoint has failed. Now clean up. */ | ||
1145 | dbg ("auerswald_int_open: activation of int endpoint failed"); | ||
1146 | |||
1147 | /* deallocate memory */ | ||
1148 | auerswald_int_free (cp); | ||
1149 | } | ||
1150 | return ret; | ||
1151 | } | ||
1152 | |||
1153 | /* This function is called to deactivate the interrupt | ||
1154 | endpoint. This function returns 0 if successful or an error code. | ||
1155 | NOTE: no mutex please! | ||
1156 | */ | ||
1157 | static void auerswald_int_release (pauerswald_t cp) | ||
1158 | { | ||
1159 | dbg ("auerswald_int_release"); | ||
1160 | |||
1161 | /* stop the int endpoint */ | ||
1162 | usb_kill_urb (cp->inturbp); | ||
1163 | |||
1164 | /* deallocate memory */ | ||
1165 | auerswald_int_free (cp); | ||
1166 | } | ||
1167 | |||
1168 | /* --------------------------------------------------------------------- */ | ||
1169 | /* Helper functions */ | ||
1170 | |||
1171 | /* wake up waiting readers */ | ||
1172 | static void auerchar_disconnect (pauerscon_t scp) | ||
1173 | { | ||
1174 | pauerchar_t ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext))); | ||
1175 | dbg ("auerchar_disconnect called"); | ||
1176 | ccp->removed = 1; | ||
1177 | wake_up (&ccp->readwait); | ||
1178 | } | ||
1179 | |||
1180 | |||
1181 | /* dispatch a read paket to a waiting character device */ | ||
1182 | static void auerchar_ctrlread_dispatch (pauerscon_t scp, pauerbuf_t bp) | ||
1183 | { | ||
1184 | unsigned long flags; | ||
1185 | pauerchar_t ccp; | ||
1186 | pauerbuf_t newbp = NULL; | ||
1187 | char * charp; | ||
1188 | dbg ("auerchar_ctrlread_dispatch called"); | ||
1189 | ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext))); | ||
1190 | |||
1191 | /* get a read buffer from character device context */ | ||
1192 | spin_lock_irqsave (&ccp->bufctl.lock, flags); | ||
1193 | if (!list_empty (&ccp->bufctl.free_buff_list)) { | ||
1194 | /* yes: get the entry */ | ||
1195 | struct list_head *tmp = ccp->bufctl.free_buff_list.next; | ||
1196 | list_del (tmp); | ||
1197 | newbp = list_entry (tmp, auerbuf_t, buff_list); | ||
1198 | } | ||
1199 | spin_unlock_irqrestore (&ccp->bufctl.lock, flags); | ||
1200 | |||
1201 | if (!newbp) { | ||
1202 | dbg ("No read buffer available, discard paket!"); | ||
1203 | return; /* no buffer, no dispatch */ | ||
1204 | } | ||
1205 | |||
1206 | /* copy information to new buffer element | ||
1207 | (all buffers have the same length) */ | ||
1208 | charp = newbp->bufp; | ||
1209 | newbp->bufp = bp->bufp; | ||
1210 | bp->bufp = charp; | ||
1211 | newbp->len = bp->len; | ||
1212 | |||
1213 | /* insert new buffer in read list */ | ||
1214 | spin_lock_irqsave (&ccp->bufctl.lock, flags); | ||
1215 | list_add_tail (&newbp->buff_list, &ccp->bufctl.rec_buff_list); | ||
1216 | spin_unlock_irqrestore (&ccp->bufctl.lock, flags); | ||
1217 | dbg ("read buffer appended to rec_list"); | ||
1218 | |||
1219 | /* wake up pending synchronous reads */ | ||
1220 | wake_up (&ccp->readwait); | ||
1221 | } | ||
1222 | |||
1223 | |||
1224 | /* Delete an auerswald driver context */ | ||
1225 | static void auerswald_delete( pauerswald_t cp) | ||
1226 | { | ||
1227 | dbg( "auerswald_delete"); | ||
1228 | if (cp == NULL) | ||
1229 | return; | ||
1230 | |||
1231 | /* Wake up all processes waiting for a buffer */ | ||
1232 | wake_up (&cp->bufferwait); | ||
1233 | |||
1234 | /* Cleaning up */ | ||
1235 | auerswald_int_release (cp); | ||
1236 | auerchain_free (&cp->controlchain); | ||
1237 | auerbuf_free_buffers (&cp->bufctl); | ||
1238 | |||
1239 | /* release the memory */ | ||
1240 | kfree( cp); | ||
1241 | } | ||
1242 | |||
1243 | |||
1244 | /* Delete an auerswald character context */ | ||
1245 | static void auerchar_delete( pauerchar_t ccp) | ||
1246 | { | ||
1247 | dbg ("auerchar_delete"); | ||
1248 | if (ccp == NULL) | ||
1249 | return; | ||
1250 | |||
1251 | /* wake up pending synchronous reads */ | ||
1252 | ccp->removed = 1; | ||
1253 | wake_up (&ccp->readwait); | ||
1254 | |||
1255 | /* remove the read buffer */ | ||
1256 | if (ccp->readbuf) { | ||
1257 | auerbuf_releasebuf (ccp->readbuf); | ||
1258 | ccp->readbuf = NULL; | ||
1259 | } | ||
1260 | |||
1261 | /* remove the character buffers */ | ||
1262 | auerbuf_free_buffers (&ccp->bufctl); | ||
1263 | |||
1264 | /* release the memory */ | ||
1265 | kfree( ccp); | ||
1266 | } | ||
1267 | |||
1268 | |||
1269 | /* add a new service to the device | ||
1270 | scp->id must be set! | ||
1271 | return: 0 if OK, else error code | ||
1272 | */ | ||
1273 | static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp) | ||
1274 | { | ||
1275 | int ret; | ||
1276 | |||
1277 | /* is the device available? */ | ||
1278 | if (!cp->usbdev) { | ||
1279 | dbg ("usbdev == NULL"); | ||
1280 | return -EIO; /*no: can not add a service, sorry*/ | ||
1281 | } | ||
1282 | |||
1283 | /* is the service available? */ | ||
1284 | if (cp->services[scp->id]) { | ||
1285 | dbg ("service is busy"); | ||
1286 | return -EBUSY; | ||
1287 | } | ||
1288 | |||
1289 | /* device is available, service is free */ | ||
1290 | cp->services[scp->id] = scp; | ||
1291 | |||
1292 | /* register service in device */ | ||
1293 | ret = auerchain_control_msg( | ||
1294 | &cp->controlchain, /* pointer to control chain */ | ||
1295 | cp->usbdev, /* pointer to device */ | ||
1296 | usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */ | ||
1297 | AUV_CHANNELCTL, /* USB message request value */ | ||
1298 | AUT_WREQ, /* USB message request type value */ | ||
1299 | 0x01, /* open USB message value */ | ||
1300 | scp->id, /* USB message index value */ | ||
1301 | NULL, /* pointer to the data to send */ | ||
1302 | 0, /* length in bytes of the data to send */ | ||
1303 | HZ * 2); /* time to wait for the message to complete before timing out */ | ||
1304 | if (ret < 0) { | ||
1305 | dbg ("auerswald_addservice: auerchain_control_msg returned error code %d", ret); | ||
1306 | /* undo above actions */ | ||
1307 | cp->services[scp->id] = NULL; | ||
1308 | return ret; | ||
1309 | } | ||
1310 | |||
1311 | dbg ("auerswald_addservice: channel open OK"); | ||
1312 | return 0; | ||
1313 | } | ||
1314 | |||
1315 | |||
1316 | /* remove a service from the device | ||
1317 | scp->id must be set! */ | ||
1318 | static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp) | ||
1319 | { | ||
1320 | dbg ("auerswald_removeservice called"); | ||
1321 | |||
1322 | /* check if we have a service allocated */ | ||
1323 | if (scp->id == AUH_UNASSIGNED) | ||
1324 | return; | ||
1325 | |||
1326 | /* If there is a device: close the channel */ | ||
1327 | if (cp->usbdev) { | ||
1328 | /* Close the service channel inside the device */ | ||
1329 | int ret = auerchain_control_msg( | ||
1330 | &cp->controlchain, /* pointer to control chain */ | ||
1331 | cp->usbdev, /* pointer to device */ | ||
1332 | usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */ | ||
1333 | AUV_CHANNELCTL, /* USB message request value */ | ||
1334 | AUT_WREQ, /* USB message request type value */ | ||
1335 | 0x00, // close /* USB message value */ | ||
1336 | scp->id, /* USB message index value */ | ||
1337 | NULL, /* pointer to the data to send */ | ||
1338 | 0, /* length in bytes of the data to send */ | ||
1339 | HZ * 2); /* time to wait for the message to complete before timing out */ | ||
1340 | if (ret < 0) { | ||
1341 | dbg ("auerswald_removeservice: auerchain_control_msg returned error code %d", ret); | ||
1342 | } | ||
1343 | else { | ||
1344 | dbg ("auerswald_removeservice: channel close OK"); | ||
1345 | } | ||
1346 | } | ||
1347 | |||
1348 | /* remove the service from the device */ | ||
1349 | cp->services[scp->id] = NULL; | ||
1350 | scp->id = AUH_UNASSIGNED; | ||
1351 | } | ||
1352 | |||
1353 | |||
1354 | /* --------------------------------------------------------------------- */ | ||
1355 | /* Char device functions */ | ||
1356 | |||
1357 | /* Open a new character device */ | ||
1358 | static int auerchar_open (struct inode *inode, struct file *file) | ||
1359 | { | ||
1360 | int dtindex = iminor(inode); | ||
1361 | pauerswald_t cp = NULL; | ||
1362 | pauerchar_t ccp = NULL; | ||
1363 | struct usb_interface *intf; | ||
1364 | int ret; | ||
1365 | |||
1366 | /* minor number in range? */ | ||
1367 | if (dtindex < 0) { | ||
1368 | return -ENODEV; | ||
1369 | } | ||
1370 | intf = usb_find_interface(&auerswald_driver, dtindex); | ||
1371 | if (!intf) { | ||
1372 | return -ENODEV; | ||
1373 | } | ||
1374 | |||
1375 | /* usb device available? */ | ||
1376 | cp = usb_get_intfdata (intf); | ||
1377 | if (cp == NULL) { | ||
1378 | return -ENODEV; | ||
1379 | } | ||
1380 | if (mutex_lock_interruptible(&cp->mutex)) { | ||
1381 | return -ERESTARTSYS; | ||
1382 | } | ||
1383 | |||
1384 | /* we have access to the device. Now lets allocate memory */ | ||
1385 | ccp = kzalloc(sizeof(auerchar_t), GFP_KERNEL); | ||
1386 | if (ccp == NULL) { | ||
1387 | err ("out of memory"); | ||
1388 | ret = -ENOMEM; | ||
1389 | goto ofail; | ||
1390 | } | ||
1391 | |||
1392 | /* Initialize device descriptor */ | ||
1393 | mutex_init(&ccp->mutex); | ||
1394 | mutex_init(&ccp->readmutex); | ||
1395 | auerbuf_init (&ccp->bufctl); | ||
1396 | ccp->scontext.id = AUH_UNASSIGNED; | ||
1397 | ccp->scontext.dispatch = auerchar_ctrlread_dispatch; | ||
1398 | ccp->scontext.disconnect = auerchar_disconnect; | ||
1399 | init_waitqueue_head (&ccp->readwait); | ||
1400 | |||
1401 | ret = auerbuf_setup (&ccp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE); | ||
1402 | if (ret) { | ||
1403 | goto ofail; | ||
1404 | } | ||
1405 | |||
1406 | cp->open_count++; | ||
1407 | ccp->auerdev = cp; | ||
1408 | dbg("open %s as /dev/%s", cp->dev_desc, cp->name); | ||
1409 | mutex_unlock(&cp->mutex); | ||
1410 | |||
1411 | /* file IO stuff */ | ||
1412 | file->f_pos = 0; | ||
1413 | file->private_data = ccp; | ||
1414 | return nonseekable_open(inode, file); | ||
1415 | |||
1416 | /* Error exit */ | ||
1417 | ofail: mutex_unlock(&cp->mutex); | ||
1418 | auerchar_delete (ccp); | ||
1419 | return ret; | ||
1420 | } | ||
1421 | |||
1422 | |||
1423 | /* IOCTL functions */ | ||
1424 | static long auerchar_ioctl(struct file *file, unsigned int cmd, | ||
1425 | unsigned long arg) | ||
1426 | { | ||
1427 | pauerchar_t ccp = (pauerchar_t) file->private_data; | ||
1428 | int ret = 0; | ||
1429 | audevinfo_t devinfo; | ||
1430 | pauerswald_t cp = NULL; | ||
1431 | unsigned int u; | ||
1432 | unsigned int __user *user_arg = (unsigned int __user *)arg; | ||
1433 | |||
1434 | dbg ("ioctl"); | ||
1435 | |||
1436 | /* get the mutexes */ | ||
1437 | if (mutex_lock_interruptible(&ccp->mutex)) { | ||
1438 | return -ERESTARTSYS; | ||
1439 | } | ||
1440 | cp = ccp->auerdev; | ||
1441 | if (!cp) { | ||
1442 | mutex_unlock(&ccp->mutex); | ||
1443 | return -ENODEV; | ||
1444 | } | ||
1445 | if (mutex_lock_interruptible(&cp->mutex)) { | ||
1446 | mutex_unlock(&ccp->mutex); | ||
1447 | return -ERESTARTSYS; | ||
1448 | } | ||
1449 | |||
1450 | /* Check for removal */ | ||
1451 | if (!cp->usbdev) { | ||
1452 | mutex_unlock(&cp->mutex); | ||
1453 | mutex_unlock(&ccp->mutex); | ||
1454 | return -ENODEV; | ||
1455 | } | ||
1456 | lock_kernel(); | ||
1457 | switch (cmd) { | ||
1458 | |||
1459 | /* return != 0 if Transmitt channel ready to send */ | ||
1460 | case IOCTL_AU_TXREADY: | ||
1461 | dbg ("IOCTL_AU_TXREADY"); | ||
1462 | u = ccp->auerdev | ||
1463 | && (ccp->scontext.id != AUH_UNASSIGNED) | ||
1464 | && !list_empty (&cp->bufctl.free_buff_list); | ||
1465 | ret = put_user (u, user_arg); | ||
1466 | break; | ||
1467 | |||
1468 | /* return != 0 if connected to a service channel */ | ||
1469 | case IOCTL_AU_CONNECT: | ||
1470 | dbg ("IOCTL_AU_CONNECT"); | ||
1471 | u = (ccp->scontext.id != AUH_UNASSIGNED); | ||
1472 | ret = put_user (u, user_arg); | ||
1473 | break; | ||
1474 | |||
1475 | /* return != 0 if Receive Data available */ | ||
1476 | case IOCTL_AU_RXAVAIL: | ||
1477 | dbg ("IOCTL_AU_RXAVAIL"); | ||
1478 | if (ccp->scontext.id == AUH_UNASSIGNED) { | ||
1479 | ret = -EIO; | ||
1480 | break; | ||
1481 | } | ||
1482 | u = 0; /* no data */ | ||
1483 | if (ccp->readbuf) { | ||
1484 | int restlen = ccp->readbuf->len - ccp->readoffset; | ||
1485 | if (restlen > 0) | ||
1486 | u = 1; | ||
1487 | } | ||
1488 | if (!u) { | ||
1489 | if (!list_empty (&ccp->bufctl.rec_buff_list)) { | ||
1490 | u = 1; | ||
1491 | } | ||
1492 | } | ||
1493 | ret = put_user (u, user_arg); | ||
1494 | break; | ||
1495 | |||
1496 | /* return the max. buffer length for the device */ | ||
1497 | case IOCTL_AU_BUFLEN: | ||
1498 | dbg ("IOCTL_AU_BUFLEN"); | ||
1499 | u = cp->maxControlLength; | ||
1500 | ret = put_user (u, user_arg); | ||
1501 | break; | ||
1502 | |||
1503 | /* requesting a service channel */ | ||
1504 | case IOCTL_AU_SERVREQ: | ||
1505 | dbg ("IOCTL_AU_SERVREQ"); | ||
1506 | /* requesting a service means: release the previous one first */ | ||
1507 | auerswald_removeservice (cp, &ccp->scontext); | ||
1508 | /* get the channel number */ | ||
1509 | ret = get_user (u, user_arg); | ||
1510 | if (ret) { | ||
1511 | break; | ||
1512 | } | ||
1513 | if ((u < AUH_FIRSTUSERCH) || (u >= AUH_TYPESIZE)) { | ||
1514 | ret = -EIO; | ||
1515 | break; | ||
1516 | } | ||
1517 | dbg ("auerchar service request parameters are ok"); | ||
1518 | ccp->scontext.id = u; | ||
1519 | |||
1520 | /* request the service now */ | ||
1521 | ret = auerswald_addservice (cp, &ccp->scontext); | ||
1522 | if (ret) { | ||
1523 | /* no: revert service entry */ | ||
1524 | ccp->scontext.id = AUH_UNASSIGNED; | ||
1525 | } | ||
1526 | break; | ||
1527 | |||
1528 | /* get a string descriptor for the device */ | ||
1529 | case IOCTL_AU_DEVINFO: | ||
1530 | dbg ("IOCTL_AU_DEVINFO"); | ||
1531 | if (copy_from_user (&devinfo, (void __user *) arg, sizeof (audevinfo_t))) { | ||
1532 | ret = -EFAULT; | ||
1533 | break; | ||
1534 | } | ||
1535 | u = strlen(cp->dev_desc)+1; | ||
1536 | if (u > devinfo.bsize) { | ||
1537 | u = devinfo.bsize; | ||
1538 | } | ||
1539 | ret = copy_to_user(devinfo.buf, cp->dev_desc, u) ? -EFAULT : 0; | ||
1540 | break; | ||
1541 | |||
1542 | /* get the max. string descriptor length */ | ||
1543 | case IOCTL_AU_SLEN: | ||
1544 | dbg ("IOCTL_AU_SLEN"); | ||
1545 | u = AUSI_DLEN; | ||
1546 | ret = put_user (u, user_arg); | ||
1547 | break; | ||
1548 | |||
1549 | default: | ||
1550 | dbg ("IOCTL_AU_UNKNOWN"); | ||
1551 | ret = -ENOTTY; | ||
1552 | break; | ||
1553 | } | ||
1554 | unlock_kernel(); | ||
1555 | /* release the mutexes */ | ||
1556 | mutex_unlock(&cp->mutex); | ||
1557 | mutex_unlock(&ccp->mutex); | ||
1558 | return ret; | ||
1559 | } | ||
1560 | |||
1561 | /* Read data from the device */ | ||
1562 | static ssize_t auerchar_read (struct file *file, char __user *buf, size_t count, loff_t * ppos) | ||
1563 | { | ||
1564 | unsigned long flags; | ||
1565 | pauerchar_t ccp = (pauerchar_t) file->private_data; | ||
1566 | pauerbuf_t bp = NULL; | ||
1567 | wait_queue_t wait; | ||
1568 | |||
1569 | dbg ("auerchar_read"); | ||
1570 | |||
1571 | /* Error checking */ | ||
1572 | if (!ccp) | ||
1573 | return -EIO; | ||
1574 | if (*ppos) | ||
1575 | return -ESPIPE; | ||
1576 | if (count == 0) | ||
1577 | return 0; | ||
1578 | |||
1579 | /* get the mutex */ | ||
1580 | if (mutex_lock_interruptible(&ccp->mutex)) | ||
1581 | return -ERESTARTSYS; | ||
1582 | |||
1583 | /* Can we expect to read something? */ | ||
1584 | if (ccp->scontext.id == AUH_UNASSIGNED) { | ||
1585 | mutex_unlock(&ccp->mutex); | ||
1586 | return -EIO; | ||
1587 | } | ||
1588 | |||
1589 | /* only one reader per device allowed */ | ||
1590 | if (mutex_lock_interruptible(&ccp->readmutex)) { | ||
1591 | mutex_unlock(&ccp->mutex); | ||
1592 | return -ERESTARTSYS; | ||
1593 | } | ||
1594 | |||
1595 | /* read data from readbuf, if available */ | ||
1596 | doreadbuf: | ||
1597 | bp = ccp->readbuf; | ||
1598 | if (bp) { | ||
1599 | /* read the maximum bytes */ | ||
1600 | int restlen = bp->len - ccp->readoffset; | ||
1601 | if (restlen < 0) | ||
1602 | restlen = 0; | ||
1603 | if (count > restlen) | ||
1604 | count = restlen; | ||
1605 | if (count) { | ||
1606 | if (copy_to_user (buf, bp->bufp+ccp->readoffset, count)) { | ||
1607 | dbg ("auerswald_read: copy_to_user failed"); | ||
1608 | mutex_unlock(&ccp->readmutex); | ||
1609 | mutex_unlock(&ccp->mutex); | ||
1610 | return -EFAULT; | ||
1611 | } | ||
1612 | } | ||
1613 | /* advance the read offset */ | ||
1614 | ccp->readoffset += count; | ||
1615 | restlen -= count; | ||
1616 | // reuse the read buffer | ||
1617 | if (restlen <= 0) { | ||
1618 | auerbuf_releasebuf (bp); | ||
1619 | ccp->readbuf = NULL; | ||
1620 | } | ||
1621 | /* return with number of bytes read */ | ||
1622 | if (count) { | ||
1623 | mutex_unlock(&ccp->readmutex); | ||
1624 | mutex_unlock(&ccp->mutex); | ||
1625 | return count; | ||
1626 | } | ||
1627 | } | ||
1628 | |||
1629 | /* a read buffer is not available. Try to get the next data block. */ | ||
1630 | doreadlist: | ||
1631 | /* Preparing for sleep */ | ||
1632 | init_waitqueue_entry (&wait, current); | ||
1633 | set_current_state (TASK_INTERRUPTIBLE); | ||
1634 | add_wait_queue (&ccp->readwait, &wait); | ||
1635 | |||
1636 | bp = NULL; | ||
1637 | spin_lock_irqsave (&ccp->bufctl.lock, flags); | ||
1638 | if (!list_empty (&ccp->bufctl.rec_buff_list)) { | ||
1639 | /* yes: get the entry */ | ||
1640 | struct list_head *tmp = ccp->bufctl.rec_buff_list.next; | ||
1641 | list_del (tmp); | ||
1642 | bp = list_entry (tmp, auerbuf_t, buff_list); | ||
1643 | } | ||
1644 | spin_unlock_irqrestore (&ccp->bufctl.lock, flags); | ||
1645 | |||
1646 | /* have we got data? */ | ||
1647 | if (bp) { | ||
1648 | ccp->readbuf = bp; | ||
1649 | ccp->readoffset = AUH_SIZE; /* for headerbyte */ | ||
1650 | set_current_state (TASK_RUNNING); | ||
1651 | remove_wait_queue (&ccp->readwait, &wait); | ||
1652 | goto doreadbuf; /* now we can read! */ | ||
1653 | } | ||
1654 | |||
1655 | /* no data available. Should we wait? */ | ||
1656 | if (file->f_flags & O_NONBLOCK) { | ||
1657 | dbg ("No read buffer available, returning -EAGAIN"); | ||
1658 | set_current_state (TASK_RUNNING); | ||
1659 | remove_wait_queue (&ccp->readwait, &wait); | ||
1660 | mutex_unlock(&ccp->readmutex); | ||
1661 | mutex_unlock(&ccp->mutex); | ||
1662 | return -EAGAIN; /* nonblocking, no data available */ | ||
1663 | } | ||
1664 | |||
1665 | /* yes, we should wait! */ | ||
1666 | mutex_unlock(&ccp->mutex); /* allow other operations while we wait */ | ||
1667 | schedule(); | ||
1668 | remove_wait_queue (&ccp->readwait, &wait); | ||
1669 | if (signal_pending (current)) { | ||
1670 | /* waked up by a signal */ | ||
1671 | mutex_unlock(&ccp->readmutex); | ||
1672 | return -ERESTARTSYS; | ||
1673 | } | ||
1674 | |||
1675 | /* Anything left to read? */ | ||
1676 | if ((ccp->scontext.id == AUH_UNASSIGNED) || ccp->removed) { | ||
1677 | mutex_unlock(&ccp->readmutex); | ||
1678 | return -EIO; | ||
1679 | } | ||
1680 | |||
1681 | if (mutex_lock_interruptible(&ccp->mutex)) { | ||
1682 | mutex_unlock(&ccp->readmutex); | ||
1683 | return -ERESTARTSYS; | ||
1684 | } | ||
1685 | |||
1686 | /* try to read the incoming data again */ | ||
1687 | goto doreadlist; | ||
1688 | } | ||
1689 | |||
1690 | |||
1691 | /* Write a data block into the right service channel of the device */ | ||
1692 | static ssize_t auerchar_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos) | ||
1693 | { | ||
1694 | pauerchar_t ccp = (pauerchar_t) file->private_data; | ||
1695 | pauerswald_t cp = NULL; | ||
1696 | pauerbuf_t bp; | ||
1697 | unsigned long flags; | ||
1698 | int ret; | ||
1699 | wait_queue_t wait; | ||
1700 | |||
1701 | dbg ("auerchar_write %zd bytes", len); | ||
1702 | |||
1703 | /* Error checking */ | ||
1704 | if (!ccp) | ||
1705 | return -EIO; | ||
1706 | if (*ppos) | ||
1707 | return -ESPIPE; | ||
1708 | if (len == 0) | ||
1709 | return 0; | ||
1710 | |||
1711 | write_again: | ||
1712 | /* get the mutex */ | ||
1713 | if (mutex_lock_interruptible(&ccp->mutex)) | ||
1714 | return -ERESTARTSYS; | ||
1715 | |||
1716 | /* Can we expect to write something? */ | ||
1717 | if (ccp->scontext.id == AUH_UNASSIGNED) { | ||
1718 | mutex_unlock(&ccp->mutex); | ||
1719 | return -EIO; | ||
1720 | } | ||
1721 | |||
1722 | cp = ccp->auerdev; | ||
1723 | if (!cp) { | ||
1724 | mutex_unlock(&ccp->mutex); | ||
1725 | return -ERESTARTSYS; | ||
1726 | } | ||
1727 | if (mutex_lock_interruptible(&cp->mutex)) { | ||
1728 | mutex_unlock(&ccp->mutex); | ||
1729 | return -ERESTARTSYS; | ||
1730 | } | ||
1731 | if (!cp->usbdev) { | ||
1732 | mutex_unlock(&cp->mutex); | ||
1733 | mutex_unlock(&ccp->mutex); | ||
1734 | return -EIO; | ||
1735 | } | ||
1736 | /* Prepare for sleep */ | ||
1737 | init_waitqueue_entry (&wait, current); | ||
1738 | set_current_state (TASK_INTERRUPTIBLE); | ||
1739 | add_wait_queue (&cp->bufferwait, &wait); | ||
1740 | |||
1741 | /* Try to get a buffer from the device pool. | ||
1742 | We can't use a buffer from ccp->bufctl because the write | ||
1743 | command will last beond a release() */ | ||
1744 | bp = NULL; | ||
1745 | spin_lock_irqsave (&cp->bufctl.lock, flags); | ||
1746 | if (!list_empty (&cp->bufctl.free_buff_list)) { | ||
1747 | /* yes: get the entry */ | ||
1748 | struct list_head *tmp = cp->bufctl.free_buff_list.next; | ||
1749 | list_del (tmp); | ||
1750 | bp = list_entry (tmp, auerbuf_t, buff_list); | ||
1751 | } | ||
1752 | spin_unlock_irqrestore (&cp->bufctl.lock, flags); | ||
1753 | |||
1754 | /* are there any buffers left? */ | ||
1755 | if (!bp) { | ||
1756 | mutex_unlock(&cp->mutex); | ||
1757 | mutex_unlock(&ccp->mutex); | ||
1758 | |||
1759 | /* NONBLOCK: don't wait */ | ||
1760 | if (file->f_flags & O_NONBLOCK) { | ||
1761 | set_current_state (TASK_RUNNING); | ||
1762 | remove_wait_queue (&cp->bufferwait, &wait); | ||
1763 | return -EAGAIN; | ||
1764 | } | ||
1765 | |||
1766 | /* BLOCKING: wait */ | ||
1767 | schedule(); | ||
1768 | remove_wait_queue (&cp->bufferwait, &wait); | ||
1769 | if (signal_pending (current)) { | ||
1770 | /* waked up by a signal */ | ||
1771 | return -ERESTARTSYS; | ||
1772 | } | ||
1773 | goto write_again; | ||
1774 | } else { | ||
1775 | set_current_state (TASK_RUNNING); | ||
1776 | remove_wait_queue (&cp->bufferwait, &wait); | ||
1777 | } | ||
1778 | |||
1779 | /* protect against too big write requests */ | ||
1780 | if (len > cp->maxControlLength) | ||
1781 | len = cp->maxControlLength; | ||
1782 | |||
1783 | /* Fill the buffer */ | ||
1784 | if (copy_from_user ( bp->bufp+AUH_SIZE, buf, len)) { | ||
1785 | dbg ("copy_from_user failed"); | ||
1786 | auerbuf_releasebuf (bp); | ||
1787 | /* Wake up all processes waiting for a buffer */ | ||
1788 | wake_up (&cp->bufferwait); | ||
1789 | mutex_unlock(&cp->mutex); | ||
1790 | mutex_unlock(&ccp->mutex); | ||
1791 | return -EFAULT; | ||
1792 | } | ||
1793 | |||
1794 | /* set the header byte */ | ||
1795 | *(bp->bufp) = ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT; | ||
1796 | |||
1797 | /* Set the transfer Parameters */ | ||
1798 | bp->len = len+AUH_SIZE; | ||
1799 | bp->dr->bRequestType = AUT_WREQ; | ||
1800 | bp->dr->bRequest = AUV_WBLOCK; | ||
1801 | bp->dr->wValue = cpu_to_le16 (0); | ||
1802 | bp->dr->wIndex = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT); | ||
1803 | bp->dr->wLength = cpu_to_le16 (len+AUH_SIZE); | ||
1804 | usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0), | ||
1805 | (unsigned char*)bp->dr, bp->bufp, len+AUH_SIZE, | ||
1806 | auerchar_ctrlwrite_complete, bp); | ||
1807 | /* up we go */ | ||
1808 | ret = auerchain_submit_urb (&cp->controlchain, bp->urbp); | ||
1809 | mutex_unlock(&cp->mutex); | ||
1810 | if (ret) { | ||
1811 | dbg ("auerchar_write: nonzero result of auerchain_submit_urb %d", ret); | ||
1812 | auerbuf_releasebuf (bp); | ||
1813 | /* Wake up all processes waiting for a buffer */ | ||
1814 | wake_up (&cp->bufferwait); | ||
1815 | mutex_unlock(&ccp->mutex); | ||
1816 | return -EIO; | ||
1817 | } | ||
1818 | else { | ||
1819 | dbg ("auerchar_write: Write OK"); | ||
1820 | mutex_unlock(&ccp->mutex); | ||
1821 | return len; | ||
1822 | } | ||
1823 | } | ||
1824 | |||
1825 | |||
1826 | /* Close a character device */ | ||
1827 | static int auerchar_release (struct inode *inode, struct file *file) | ||
1828 | { | ||
1829 | pauerchar_t ccp = (pauerchar_t) file->private_data; | ||
1830 | pauerswald_t cp; | ||
1831 | dbg("release"); | ||
1832 | |||
1833 | mutex_lock(&ccp->mutex); | ||
1834 | cp = ccp->auerdev; | ||
1835 | if (cp) { | ||
1836 | mutex_lock(&cp->mutex); | ||
1837 | /* remove an open service */ | ||
1838 | auerswald_removeservice (cp, &ccp->scontext); | ||
1839 | /* detach from device */ | ||
1840 | if ((--cp->open_count <= 0) && (cp->usbdev == NULL)) { | ||
1841 | /* usb device waits for removal */ | ||
1842 | mutex_unlock(&cp->mutex); | ||
1843 | auerswald_delete (cp); | ||
1844 | } else { | ||
1845 | mutex_unlock(&cp->mutex); | ||
1846 | } | ||
1847 | cp = NULL; | ||
1848 | ccp->auerdev = NULL; | ||
1849 | } | ||
1850 | mutex_unlock(&ccp->mutex); | ||
1851 | auerchar_delete (ccp); | ||
1852 | |||
1853 | return 0; | ||
1854 | } | ||
1855 | |||
1856 | |||
1857 | /*----------------------------------------------------------------------*/ | ||
1858 | /* File operation structure */ | ||
1859 | static const struct file_operations auerswald_fops = | ||
1860 | { | ||
1861 | .owner = THIS_MODULE, | ||
1862 | .llseek = no_llseek, | ||
1863 | .read = auerchar_read, | ||
1864 | .write = auerchar_write, | ||
1865 | .unlocked_ioctl = auerchar_ioctl, | ||
1866 | .open = auerchar_open, | ||
1867 | .release = auerchar_release, | ||
1868 | }; | ||
1869 | |||
1870 | static struct usb_class_driver auerswald_class = { | ||
1871 | .name = "auer%d", | ||
1872 | .fops = &auerswald_fops, | ||
1873 | .minor_base = AUER_MINOR_BASE, | ||
1874 | }; | ||
1875 | |||
1876 | |||
1877 | /* --------------------------------------------------------------------- */ | ||
1878 | /* Special USB driver functions */ | ||
1879 | |||
1880 | /* Probe if this driver wants to serve an USB device | ||
1881 | |||
1882 | This entry point is called whenever a new device is attached to the bus. | ||
1883 | Then the device driver has to create a new instance of its internal data | ||
1884 | structures for the new device. | ||
1885 | |||
1886 | The dev argument specifies the device context, which contains pointers | ||
1887 | to all USB descriptors. The interface argument specifies the interface | ||
1888 | number. If a USB driver wants to bind itself to a particular device and | ||
1889 | interface it has to return a pointer. This pointer normally references | ||
1890 | the device driver's context structure. | ||
1891 | |||
1892 | Probing normally is done by checking the vendor and product identifications | ||
1893 | or the class and subclass definitions. If they match the interface number | ||
1894 | is compared with the ones supported by the driver. When probing is done | ||
1895 | class based it might be necessary to parse some more USB descriptors because | ||
1896 | the device properties can differ in a wide range. | ||
1897 | */ | ||
1898 | static int auerswald_probe (struct usb_interface *intf, | ||
1899 | const struct usb_device_id *id) | ||
1900 | { | ||
1901 | struct usb_device *usbdev = interface_to_usbdev(intf); | ||
1902 | pauerswald_t cp = NULL; | ||
1903 | unsigned int u = 0; | ||
1904 | __le16 *pbuf; | ||
1905 | int ret; | ||
1906 | |||
1907 | dbg ("probe: vendor id 0x%x, device id 0x%x", | ||
1908 | le16_to_cpu(usbdev->descriptor.idVendor), | ||
1909 | le16_to_cpu(usbdev->descriptor.idProduct)); | ||
1910 | |||
1911 | /* we use only the first -and only- interface */ | ||
1912 | if (intf->altsetting->desc.bInterfaceNumber != 0) | ||
1913 | return -ENODEV; | ||
1914 | |||
1915 | /* allocate memory for our device and initialize it */ | ||
1916 | cp = kzalloc (sizeof(auerswald_t), GFP_KERNEL); | ||
1917 | if (cp == NULL) { | ||
1918 | err ("out of memory"); | ||
1919 | goto pfail; | ||
1920 | } | ||
1921 | |||
1922 | /* Initialize device descriptor */ | ||
1923 | mutex_init(&cp->mutex); | ||
1924 | cp->usbdev = usbdev; | ||
1925 | auerchain_init (&cp->controlchain); | ||
1926 | auerbuf_init (&cp->bufctl); | ||
1927 | init_waitqueue_head (&cp->bufferwait); | ||
1928 | |||
1929 | ret = usb_register_dev(intf, &auerswald_class); | ||
1930 | if (ret) { | ||
1931 | err ("Not able to get a minor for this device."); | ||
1932 | goto pfail; | ||
1933 | } | ||
1934 | |||
1935 | /* Give the device a name */ | ||
1936 | sprintf (cp->name, "usb/auer%d", intf->minor); | ||
1937 | |||
1938 | /* Store the index */ | ||
1939 | cp->dtindex = intf->minor; | ||
1940 | |||
1941 | /* Get the usb version of the device */ | ||
1942 | cp->version = le16_to_cpu(cp->usbdev->descriptor.bcdDevice); | ||
1943 | dbg ("Version is %X", cp->version); | ||
1944 | |||
1945 | /* allow some time to settle the device */ | ||
1946 | msleep(334); | ||
1947 | |||
1948 | /* Try to get a suitable textual description of the device */ | ||
1949 | /* Device name:*/ | ||
1950 | ret = usb_string( cp->usbdev, AUSI_DEVICE, cp->dev_desc, AUSI_DLEN-1); | ||
1951 | if (ret >= 0) { | ||
1952 | u += ret; | ||
1953 | /* Append Serial Number */ | ||
1954 | memcpy(&cp->dev_desc[u], ",Ser# ", 6); | ||
1955 | u += 6; | ||
1956 | ret = usb_string( cp->usbdev, AUSI_SERIALNR, &cp->dev_desc[u], AUSI_DLEN-u-1); | ||
1957 | if (ret >= 0) { | ||
1958 | u += ret; | ||
1959 | /* Append subscriber number */ | ||
1960 | memcpy(&cp->dev_desc[u], ", ", 2); | ||
1961 | u += 2; | ||
1962 | ret = usb_string( cp->usbdev, AUSI_MSN, &cp->dev_desc[u], AUSI_DLEN-u-1); | ||
1963 | if (ret >= 0) { | ||
1964 | u += ret; | ||
1965 | } | ||
1966 | } | ||
1967 | } | ||
1968 | cp->dev_desc[u] = '\0'; | ||
1969 | info("device is a %s", cp->dev_desc); | ||
1970 | |||
1971 | /* get the maximum allowed control transfer length */ | ||
1972 | pbuf = kmalloc(2, GFP_KERNEL); /* use an allocated buffer because of urb target */ | ||
1973 | if (!pbuf) { | ||
1974 | err( "out of memory"); | ||
1975 | goto pfail; | ||
1976 | } | ||
1977 | ret = usb_control_msg(cp->usbdev, /* pointer to device */ | ||
1978 | usb_rcvctrlpipe( cp->usbdev, 0 ), /* pipe to control endpoint */ | ||
1979 | AUV_GETINFO, /* USB message request value */ | ||
1980 | AUT_RREQ, /* USB message request type value */ | ||
1981 | 0, /* USB message value */ | ||
1982 | AUDI_MBCTRANS, /* USB message index value */ | ||
1983 | pbuf, /* pointer to the receive buffer */ | ||
1984 | 2, /* length of the buffer */ | ||
1985 | 2000); /* time to wait for the message to complete before timing out */ | ||
1986 | if (ret == 2) { | ||
1987 | cp->maxControlLength = le16_to_cpup(pbuf); | ||
1988 | kfree(pbuf); | ||
1989 | dbg("setup: max. allowed control transfersize is %d bytes", cp->maxControlLength); | ||
1990 | } else { | ||
1991 | kfree(pbuf); | ||
1992 | err("setup: getting max. allowed control transfer length failed with error %d", ret); | ||
1993 | goto pfail; | ||
1994 | } | ||
1995 | |||
1996 | /* allocate a chain for the control messages */ | ||
1997 | if (auerchain_setup (&cp->controlchain, AUCH_ELEMENTS)) { | ||
1998 | err ("out of memory"); | ||
1999 | goto pfail; | ||
2000 | } | ||
2001 | |||
2002 | /* allocate buffers for control messages */ | ||
2003 | if (auerbuf_setup (&cp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE)) { | ||
2004 | err ("out of memory"); | ||
2005 | goto pfail; | ||
2006 | } | ||
2007 | |||
2008 | /* start the interrupt endpoint */ | ||
2009 | if (auerswald_int_open (cp)) { | ||
2010 | err ("int endpoint failed"); | ||
2011 | goto pfail; | ||
2012 | } | ||
2013 | |||
2014 | /* all OK */ | ||
2015 | usb_set_intfdata (intf, cp); | ||
2016 | return 0; | ||
2017 | |||
2018 | /* Error exit: clean up the memory */ | ||
2019 | pfail: auerswald_delete (cp); | ||
2020 | return -EIO; | ||
2021 | } | ||
2022 | |||
2023 | |||
2024 | /* Disconnect driver from a served device | ||
2025 | |||
2026 | This function is called whenever a device which was served by this driver | ||
2027 | is disconnected. | ||
2028 | |||
2029 | The argument dev specifies the device context and the driver_context | ||
2030 | returns a pointer to the previously registered driver_context of the | ||
2031 | probe function. After returning from the disconnect function the USB | ||
2032 | framework completely deallocates all data structures associated with | ||
2033 | this device. So especially the usb_device structure must not be used | ||
2034 | any longer by the usb driver. | ||
2035 | */ | ||
2036 | static void auerswald_disconnect (struct usb_interface *intf) | ||
2037 | { | ||
2038 | pauerswald_t cp = usb_get_intfdata (intf); | ||
2039 | unsigned int u; | ||
2040 | |||
2041 | usb_set_intfdata (intf, NULL); | ||
2042 | if (!cp) | ||
2043 | return; | ||
2044 | |||
2045 | /* give back our USB minor number */ | ||
2046 | usb_deregister_dev(intf, &auerswald_class); | ||
2047 | |||
2048 | mutex_lock(&cp->mutex); | ||
2049 | info ("device /dev/%s now disconnecting", cp->name); | ||
2050 | |||
2051 | /* Stop the interrupt endpoint */ | ||
2052 | auerswald_int_release (cp); | ||
2053 | |||
2054 | /* remove the control chain allocated in auerswald_probe | ||
2055 | This has the benefit of | ||
2056 | a) all pending (a)synchronous urbs are unlinked | ||
2057 | b) all buffers dealing with urbs are reclaimed | ||
2058 | */ | ||
2059 | auerchain_free (&cp->controlchain); | ||
2060 | |||
2061 | if (cp->open_count == 0) { | ||
2062 | /* nobody is using this device. So we can clean up now */ | ||
2063 | mutex_unlock(&cp->mutex); | ||
2064 | /* mutex_unlock() is possible here because no other task | ||
2065 | can open the device (see above). I don't want | ||
2066 | to kfree() a locked mutex. */ | ||
2067 | |||
2068 | auerswald_delete (cp); | ||
2069 | } else { | ||
2070 | /* device is used. Remove the pointer to the | ||
2071 | usb device (it's not valid any more). The last | ||
2072 | release() will do the clean up */ | ||
2073 | cp->usbdev = NULL; | ||
2074 | mutex_unlock(&cp->mutex); | ||
2075 | /* Terminate waiting writers */ | ||
2076 | wake_up (&cp->bufferwait); | ||
2077 | /* Inform all waiting readers */ | ||
2078 | for ( u = 0; u < AUH_TYPESIZE; u++) { | ||
2079 | pauerscon_t scp = cp->services[u]; | ||
2080 | if (scp) | ||
2081 | scp->disconnect( scp); | ||
2082 | } | ||
2083 | } | ||
2084 | } | ||
2085 | |||
2086 | /* Descriptor for the devices which are served by this driver. | ||
2087 | NOTE: this struct is parsed by the usbmanager install scripts. | ||
2088 | Don't change without caution! | ||
2089 | */ | ||
2090 | static struct usb_device_id auerswald_ids [] = { | ||
2091 | { USB_DEVICE (ID_AUERSWALD, 0x00C0) }, /* COMpact 2104 USB */ | ||
2092 | { USB_DEVICE (ID_AUERSWALD, 0x00DB) }, /* COMpact 4410/2206 USB */ | ||
2093 | { USB_DEVICE (ID_AUERSWALD, 0x00DC) }, /* COMpact 4406 DSL */ | ||
2094 | { USB_DEVICE (ID_AUERSWALD, 0x00DD) }, /* COMpact 2204 USB */ | ||
2095 | { USB_DEVICE (ID_AUERSWALD, 0x00F1) }, /* Comfort 2000 System Telephone */ | ||
2096 | { USB_DEVICE (ID_AUERSWALD, 0x00F2) }, /* Comfort 1200 System Telephone */ | ||
2097 | { } /* Terminating entry */ | ||
2098 | }; | ||
2099 | |||
2100 | /* Standard module device table */ | ||
2101 | MODULE_DEVICE_TABLE (usb, auerswald_ids); | ||
2102 | |||
2103 | /* Standard usb driver struct */ | ||
2104 | static struct usb_driver auerswald_driver = { | ||
2105 | .name = "auerswald", | ||
2106 | .probe = auerswald_probe, | ||
2107 | .disconnect = auerswald_disconnect, | ||
2108 | .id_table = auerswald_ids, | ||
2109 | }; | ||
2110 | |||
2111 | |||
2112 | /* --------------------------------------------------------------------- */ | ||
2113 | /* Module loading/unloading */ | ||
2114 | |||
2115 | /* Driver initialisation. Called after module loading. | ||
2116 | NOTE: there is no concurrency at _init | ||
2117 | */ | ||
2118 | static int __init auerswald_init (void) | ||
2119 | { | ||
2120 | int result; | ||
2121 | dbg ("init"); | ||
2122 | |||
2123 | /* register driver at the USB subsystem */ | ||
2124 | result = usb_register (&auerswald_driver); | ||
2125 | if (result < 0) { | ||
2126 | err ("driver could not be registered"); | ||
2127 | return -1; | ||
2128 | } | ||
2129 | return 0; | ||
2130 | } | ||
2131 | |||
2132 | /* Driver deinit. Called before module removal. | ||
2133 | NOTE: there is no concurrency at _cleanup | ||
2134 | */ | ||
2135 | static void __exit auerswald_cleanup (void) | ||
2136 | { | ||
2137 | dbg ("cleanup"); | ||
2138 | usb_deregister (&auerswald_driver); | ||
2139 | } | ||
2140 | |||
2141 | /* --------------------------------------------------------------------- */ | ||
2142 | /* Linux device driver module description */ | ||
2143 | |||
2144 | MODULE_AUTHOR (DRIVER_AUTHOR); | ||
2145 | MODULE_DESCRIPTION (DRIVER_DESC); | ||
2146 | MODULE_LICENSE ("GPL"); | ||
2147 | |||
2148 | module_init (auerswald_init); | ||
2149 | module_exit (auerswald_cleanup); | ||
2150 | |||
2151 | /* --------------------------------------------------------------------- */ | ||
2152 | |||
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index e6ca9979e3ae..a4ef77ef917d 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/poll.h> | 21 | #include <linux/poll.h> |
22 | #include <linux/version.h> | ||
23 | #include <linux/usb/iowarrior.h> | 22 | #include <linux/usb/iowarrior.h> |
24 | 23 | ||
25 | /* Version Information */ | 24 | /* Version Information */ |
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c index d94aa7387608..b897f6554ecd 100644 --- a/drivers/usb/misc/isight_firmware.c +++ b/drivers/usb/misc/isight_firmware.c | |||
@@ -48,7 +48,8 @@ static int isight_firmware_load(struct usb_interface *intf, | |||
48 | 48 | ||
49 | if (request_firmware(&firmware, "isight.fw", &dev->dev) != 0) { | 49 | if (request_firmware(&firmware, "isight.fw", &dev->dev) != 0) { |
50 | printk(KERN_ERR "Unable to load isight firmware\n"); | 50 | printk(KERN_ERR "Unable to load isight firmware\n"); |
51 | return -ENODEV; | 51 | ret = -ENODEV; |
52 | goto out; | ||
52 | } | 53 | } |
53 | 54 | ||
54 | ptr = firmware->data; | 55 | ptr = firmware->data; |
@@ -91,7 +92,6 @@ static int isight_firmware_load(struct usb_interface *intf, | |||
91 | buf, llen, 300) != llen) { | 92 | buf, llen, 300) != llen) { |
92 | printk(KERN_ERR | 93 | printk(KERN_ERR |
93 | "Failed to load isight firmware\n"); | 94 | "Failed to load isight firmware\n"); |
94 | kfree(buf); | ||
95 | ret = -ENODEV; | 95 | ret = -ENODEV; |
96 | goto out; | 96 | goto out; |
97 | } | 97 | } |
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index fbace41a7cba..69c34a58e205 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c | |||
@@ -3270,6 +3270,7 @@ static struct usb_device_id sisusb_table [] = { | |||
3270 | { USB_DEVICE(0x0711, 0x0900) }, | 3270 | { USB_DEVICE(0x0711, 0x0900) }, |
3271 | { USB_DEVICE(0x0711, 0x0901) }, | 3271 | { USB_DEVICE(0x0711, 0x0901) }, |
3272 | { USB_DEVICE(0x0711, 0x0902) }, | 3272 | { USB_DEVICE(0x0711, 0x0902) }, |
3273 | { USB_DEVICE(0x0711, 0x0918) }, | ||
3273 | { USB_DEVICE(0x182d, 0x021c) }, | 3274 | { USB_DEVICE(0x182d, 0x021c) }, |
3274 | { USB_DEVICE(0x182d, 0x0269) }, | 3275 | { USB_DEVICE(0x182d, 0x0269) }, |
3275 | { } | 3276 | { } |
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig new file mode 100644 index 000000000000..a0017486ad4e --- /dev/null +++ b/drivers/usb/musb/Kconfig | |||
@@ -0,0 +1,175 @@ | |||
1 | # | ||
2 | # USB Dual Role (OTG-ready) Controller Drivers | ||
3 | # for silicon based on Mentor Graphics INVENTRA designs | ||
4 | # | ||
5 | |||
6 | comment "Enable Host or Gadget support to see Inventra options" | ||
7 | depends on !USB && USB_GADGET=n | ||
8 | |||
9 | # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller | ||
10 | config USB_MUSB_HDRC | ||
11 | depends on (USB || USB_GADGET) && HAVE_CLK | ||
12 | select TWL4030_USB if MACH_OMAP_3430SDP | ||
13 | tristate 'Inventra Highspeed Dual Role Controller (TI, ...)' | ||
14 | help | ||
15 | Say Y here if your system has a dual role high speed USB | ||
16 | controller based on the Mentor Graphics silicon IP. Then | ||
17 | configure options to match your silicon and the board | ||
18 | it's being used with, including the USB peripheral role, | ||
19 | or the USB host role, or both. | ||
20 | |||
21 | Texas Instruments parts using this IP include DaVinci 644x, | ||
22 | OMAP 243x, OMAP 343x, and TUSB 6010. | ||
23 | |||
24 | If you do not know what this is, please say N. | ||
25 | |||
26 | To compile this driver as a module, choose M here; the | ||
27 | module will be called "musb_hdrc". | ||
28 | |||
29 | config USB_MUSB_SOC | ||
30 | boolean | ||
31 | depends on USB_MUSB_HDRC | ||
32 | default y if ARCH_DAVINCI | ||
33 | default y if ARCH_OMAP2430 | ||
34 | default y if ARCH_OMAP34XX | ||
35 | help | ||
36 | Use a static <asm/arch/hdrc_cnf.h> file to describe how the | ||
37 | controller is configured (endpoints, mechanisms, etc) on the | ||
38 | current iteration of a given system-on-chip. | ||
39 | |||
40 | comment "DaVinci 644x USB support" | ||
41 | depends on USB_MUSB_HDRC && ARCH_DAVINCI | ||
42 | |||
43 | comment "OMAP 243x high speed USB support" | ||
44 | depends on USB_MUSB_HDRC && ARCH_OMAP2430 | ||
45 | |||
46 | comment "OMAP 343x high speed USB support" | ||
47 | depends on USB_MUSB_HDRC && ARCH_OMAP34XX | ||
48 | |||
49 | config USB_TUSB6010 | ||
50 | boolean "TUSB 6010 support" | ||
51 | depends on USB_MUSB_HDRC && !USB_MUSB_SOC | ||
52 | default y | ||
53 | help | ||
54 | The TUSB 6010 chip, from Texas Instruments, connects a discrete | ||
55 | HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ | ||
56 | (a high speed serial link). It can use system-specific external | ||
57 | DMA controllers. | ||
58 | |||
59 | choice | ||
60 | prompt "Driver Mode" | ||
61 | depends on USB_MUSB_HDRC | ||
62 | help | ||
63 | Dual-Role devices can support both host and peripheral roles, | ||
64 | as well as a the special "OTG Device" role which can switch | ||
65 | between both roles as needed. | ||
66 | |||
67 | # use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support; | ||
68 | # OTG needs both roles, not just USB_MUSB_HOST. | ||
69 | config USB_MUSB_HOST | ||
70 | depends on USB | ||
71 | bool "USB Host" | ||
72 | help | ||
73 | Say Y here if your system supports the USB host role. | ||
74 | If it has a USB "A" (rectangular), "Mini-A" (uncommon), | ||
75 | or "Mini-AB" connector, it supports the host role. | ||
76 | (With a "Mini-AB" connector, you should enable USB OTG.) | ||
77 | |||
78 | # use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral | ||
79 | # side support ... OTG needs both roles | ||
80 | config USB_MUSB_PERIPHERAL | ||
81 | depends on USB_GADGET | ||
82 | bool "USB Peripheral (gadget stack)" | ||
83 | select USB_GADGET_MUSB_HDRC | ||
84 | help | ||
85 | Say Y here if your system supports the USB peripheral role. | ||
86 | If it has a USB "B" (squarish), "Mini-B", or "Mini-AB" | ||
87 | connector, it supports the peripheral role. | ||
88 | (With a "Mini-AB" connector, you should enable USB OTG.) | ||
89 | |||
90 | config USB_MUSB_OTG | ||
91 | depends on USB && USB_GADGET && PM && EXPERIMENTAL | ||
92 | bool "Both host and peripheral: USB OTG (On The Go) Device" | ||
93 | select USB_GADGET_MUSB_HDRC | ||
94 | select USB_OTG | ||
95 | help | ||
96 | The most notable feature of USB OTG is support for a | ||
97 | "Dual-Role" device, which can act as either a device | ||
98 | or a host. The initial role choice can be changed | ||
99 | later, when two dual-role devices talk to each other. | ||
100 | |||
101 | At this writing, the OTG support in this driver is incomplete, | ||
102 | omitting the mandatory HNP or SRP protocols. However, some | ||
103 | of the cable based role switching works. (That is, grounding | ||
104 | the ID pin switches the controller to host mode, while leaving | ||
105 | it floating leaves it in peripheral mode.) | ||
106 | |||
107 | Select this if your system has a Mini-AB connector, or | ||
108 | to simplify certain kinds of configuration. | ||
109 | |||
110 | To implement your OTG Targeted Peripherals List (TPL), enable | ||
111 | USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h" | ||
112 | to match your requirements. | ||
113 | |||
114 | endchoice | ||
115 | |||
116 | # enable peripheral support (including with OTG) | ||
117 | config USB_GADGET_MUSB_HDRC | ||
118 | bool | ||
119 | depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG) | ||
120 | # default y | ||
121 | # select USB_GADGET_DUALSPEED | ||
122 | # select USB_GADGET_SELECTED | ||
123 | |||
124 | # enables host support (including with OTG) | ||
125 | config USB_MUSB_HDRC_HCD | ||
126 | bool | ||
127 | depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG) | ||
128 | select USB_OTG if USB_GADGET_MUSB_HDRC | ||
129 | default y | ||
130 | |||
131 | |||
132 | config MUSB_PIO_ONLY | ||
133 | bool 'Disable DMA (always use PIO)' | ||
134 | depends on USB_MUSB_HDRC | ||
135 | default y if USB_TUSB6010 | ||
136 | help | ||
137 | All data is copied between memory and FIFO by the CPU. | ||
138 | DMA controllers are ignored. | ||
139 | |||
140 | Do not select 'n' here unless DMA support for your SOC or board | ||
141 | is unavailable (or unstable). When DMA is enabled at compile time, | ||
142 | you can still disable it at run time using the "use_dma=n" module | ||
143 | parameter. | ||
144 | |||
145 | config USB_INVENTRA_DMA | ||
146 | bool | ||
147 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY | ||
148 | default ARCH_OMAP2430 || ARCH_OMAP34XX | ||
149 | help | ||
150 | Enable DMA transfers using Mentor's engine. | ||
151 | |||
152 | config USB_TI_CPPI_DMA | ||
153 | bool | ||
154 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY | ||
155 | default ARCH_DAVINCI | ||
156 | help | ||
157 | Enable DMA transfers when TI CPPI DMA is available. | ||
158 | |||
159 | config USB_TUSB_OMAP_DMA | ||
160 | bool | ||
161 | depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY | ||
162 | depends on USB_TUSB6010 | ||
163 | depends on ARCH_OMAP | ||
164 | default y | ||
165 | help | ||
166 | Enable DMA transfers on TUSB 6010 when OMAP DMA is available. | ||
167 | |||
168 | config USB_MUSB_DEBUG | ||
169 | depends on USB_MUSB_HDRC | ||
170 | bool "Enable debugging messages" | ||
171 | default n | ||
172 | help | ||
173 | This enables musb debugging. To set the logging level use the debug | ||
174 | module parameter. Starting at level 3, per-transfer (urb, usb_request, | ||
175 | packet, or dma transfer) tracing may kick in. | ||
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile new file mode 100644 index 000000000000..b6af0d687a73 --- /dev/null +++ b/drivers/usb/musb/Makefile | |||
@@ -0,0 +1,69 @@ | |||
1 | # | ||
2 | # for USB OTG silicon based on Mentor Graphics INVENTRA designs | ||
3 | # | ||
4 | |||
5 | musb_hdrc-objs := musb_core.o | ||
6 | |||
7 | obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o | ||
8 | |||
9 | ifeq ($(CONFIG_ARCH_DAVINCI),y) | ||
10 | musb_hdrc-objs += davinci.o | ||
11 | endif | ||
12 | |||
13 | ifeq ($(CONFIG_USB_TUSB6010),y) | ||
14 | musb_hdrc-objs += tusb6010.o | ||
15 | endif | ||
16 | |||
17 | ifeq ($(CONFIG_ARCH_OMAP2430),y) | ||
18 | musb_hdrc-objs += omap2430.o | ||
19 | endif | ||
20 | |||
21 | ifeq ($(CONFIG_ARCH_OMAP3430),y) | ||
22 | musb_hdrc-objs += omap2430.o | ||
23 | endif | ||
24 | |||
25 | ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y) | ||
26 | musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o | ||
27 | endif | ||
28 | |||
29 | ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y) | ||
30 | musb_hdrc-objs += musb_virthub.o musb_host.o | ||
31 | endif | ||
32 | |||
33 | # the kconfig must guarantee that only one of the | ||
34 | # possible I/O schemes will be enabled at a time ... | ||
35 | # PIO only, or DMA (several potential schemes). | ||
36 | # though PIO is always there to back up DMA, and for ep0 | ||
37 | |||
38 | ifneq ($(CONFIG_MUSB_PIO_ONLY),y) | ||
39 | |||
40 | ifeq ($(CONFIG_USB_INVENTRA_DMA),y) | ||
41 | musb_hdrc-objs += musbhsdma.o | ||
42 | |||
43 | else | ||
44 | ifeq ($(CONFIG_USB_TI_CPPI_DMA),y) | ||
45 | musb_hdrc-objs += cppi_dma.o | ||
46 | |||
47 | else | ||
48 | ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y) | ||
49 | musb_hdrc-objs += tusb6010_omap.o | ||
50 | |||
51 | endif | ||
52 | endif | ||
53 | endif | ||
54 | endif | ||
55 | |||
56 | |||
57 | ################################################################################ | ||
58 | |||
59 | # FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_* | ||
60 | |||
61 | ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y) | ||
62 | EXTRA_CFLAGS += -DMUSB_AHB_ID | ||
63 | endif | ||
64 | |||
65 | # Debugging | ||
66 | |||
67 | ifeq ($(CONFIG_USB_MUSB_DEBUG),y) | ||
68 | EXTRA_CFLAGS += -DDEBUG | ||
69 | endif | ||
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c new file mode 100644 index 000000000000..5ad6d0893cbe --- /dev/null +++ b/drivers/usb/musb/cppi_dma.c | |||
@@ -0,0 +1,1540 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005-2006 by Texas Instruments | ||
3 | * | ||
4 | * This file implements a DMA interface using TI's CPPI DMA. | ||
5 | * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB. | ||
6 | * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci. | ||
7 | */ | ||
8 | |||
9 | #include <linux/usb.h> | ||
10 | |||
11 | #include "musb_core.h" | ||
12 | #include "cppi_dma.h" | ||
13 | |||
14 | |||
15 | /* CPPI DMA status 7-mar-2006: | ||
16 | * | ||
17 | * - See musb_{host,gadget}.c for more info | ||
18 | * | ||
19 | * - Correct RX DMA generally forces the engine into irq-per-packet mode, | ||
20 | * which can easily saturate the CPU under non-mass-storage loads. | ||
21 | * | ||
22 | * NOTES 24-aug-2006 (2.6.18-rc4): | ||
23 | * | ||
24 | * - peripheral RXDMA wedged in a test with packets of length 512/512/1. | ||
25 | * evidently after the 1 byte packet was received and acked, the queue | ||
26 | * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003, | ||
27 | * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401 | ||
28 | * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx | ||
29 | * of its next (512 byte) packet. IRQ issues? | ||
30 | * | ||
31 | * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will | ||
32 | * evidently also directly update the RX and TX CSRs ... so audit all | ||
33 | * host and peripheral side DMA code to avoid CSR access after DMA has | ||
34 | * been started. | ||
35 | */ | ||
36 | |||
37 | /* REVISIT now we can avoid preallocating these descriptors; or | ||
38 | * more simply, switch to a global freelist not per-channel ones. | ||
39 | * Note: at full speed, 64 descriptors == 4K bulk data. | ||
40 | */ | ||
41 | #define NUM_TXCHAN_BD 64 | ||
42 | #define NUM_RXCHAN_BD 64 | ||
43 | |||
44 | static inline void cpu_drain_writebuffer(void) | ||
45 | { | ||
46 | wmb(); | ||
47 | #ifdef CONFIG_CPU_ARM926T | ||
48 | /* REVISIT this "should not be needed", | ||
49 | * but lack of it sure seemed to hurt ... | ||
50 | */ | ||
51 | asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n"); | ||
52 | #endif | ||
53 | } | ||
54 | |||
55 | static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) | ||
56 | { | ||
57 | struct cppi_descriptor *bd = c->freelist; | ||
58 | |||
59 | if (bd) | ||
60 | c->freelist = bd->next; | ||
61 | return bd; | ||
62 | } | ||
63 | |||
64 | static inline void | ||
65 | cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) | ||
66 | { | ||
67 | if (!bd) | ||
68 | return; | ||
69 | bd->next = c->freelist; | ||
70 | c->freelist = bd; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Start DMA controller | ||
75 | * | ||
76 | * Initialize the DMA controller as necessary. | ||
77 | */ | ||
78 | |||
79 | /* zero out entire rx state RAM entry for the channel */ | ||
80 | static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx) | ||
81 | { | ||
82 | musb_writel(&rx->rx_skipbytes, 0, 0); | ||
83 | musb_writel(&rx->rx_head, 0, 0); | ||
84 | musb_writel(&rx->rx_sop, 0, 0); | ||
85 | musb_writel(&rx->rx_current, 0, 0); | ||
86 | musb_writel(&rx->rx_buf_current, 0, 0); | ||
87 | musb_writel(&rx->rx_len_len, 0, 0); | ||
88 | musb_writel(&rx->rx_cnt_cnt, 0, 0); | ||
89 | } | ||
90 | |||
91 | /* zero out entire tx state RAM entry for the channel */ | ||
92 | static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr) | ||
93 | { | ||
94 | musb_writel(&tx->tx_head, 0, 0); | ||
95 | musb_writel(&tx->tx_buf, 0, 0); | ||
96 | musb_writel(&tx->tx_current, 0, 0); | ||
97 | musb_writel(&tx->tx_buf_current, 0, 0); | ||
98 | musb_writel(&tx->tx_info, 0, 0); | ||
99 | musb_writel(&tx->tx_rem_len, 0, 0); | ||
100 | /* musb_writel(&tx->tx_dummy, 0, 0); */ | ||
101 | musb_writel(&tx->tx_complete, 0, ptr); | ||
102 | } | ||
103 | |||
104 | static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) | ||
105 | { | ||
106 | int j; | ||
107 | |||
108 | /* initialize channel fields */ | ||
109 | c->head = NULL; | ||
110 | c->tail = NULL; | ||
111 | c->last_processed = NULL; | ||
112 | c->channel.status = MUSB_DMA_STATUS_UNKNOWN; | ||
113 | c->controller = cppi; | ||
114 | c->is_rndis = 0; | ||
115 | c->freelist = NULL; | ||
116 | |||
117 | /* build the BD Free list for the channel */ | ||
118 | for (j = 0; j < NUM_TXCHAN_BD + 1; j++) { | ||
119 | struct cppi_descriptor *bd; | ||
120 | dma_addr_t dma; | ||
121 | |||
122 | bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); | ||
123 | bd->dma = dma; | ||
124 | cppi_bd_free(c, bd); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | static int cppi_channel_abort(struct dma_channel *); | ||
129 | |||
130 | static void cppi_pool_free(struct cppi_channel *c) | ||
131 | { | ||
132 | struct cppi *cppi = c->controller; | ||
133 | struct cppi_descriptor *bd; | ||
134 | |||
135 | (void) cppi_channel_abort(&c->channel); | ||
136 | c->channel.status = MUSB_DMA_STATUS_UNKNOWN; | ||
137 | c->controller = NULL; | ||
138 | |||
139 | /* free all its bds */ | ||
140 | bd = c->last_processed; | ||
141 | do { | ||
142 | if (bd) | ||
143 | dma_pool_free(cppi->pool, bd, bd->dma); | ||
144 | bd = cppi_bd_alloc(c); | ||
145 | } while (bd); | ||
146 | c->last_processed = NULL; | ||
147 | } | ||
148 | |||
149 | static int __init cppi_controller_start(struct dma_controller *c) | ||
150 | { | ||
151 | struct cppi *controller; | ||
152 | void __iomem *tibase; | ||
153 | int i; | ||
154 | |||
155 | controller = container_of(c, struct cppi, controller); | ||
156 | |||
157 | /* do whatever is necessary to start controller */ | ||
158 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { | ||
159 | controller->tx[i].transmit = true; | ||
160 | controller->tx[i].index = i; | ||
161 | } | ||
162 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { | ||
163 | controller->rx[i].transmit = false; | ||
164 | controller->rx[i].index = i; | ||
165 | } | ||
166 | |||
167 | /* setup BD list on a per channel basis */ | ||
168 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) | ||
169 | cppi_pool_init(controller, controller->tx + i); | ||
170 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) | ||
171 | cppi_pool_init(controller, controller->rx + i); | ||
172 | |||
173 | tibase = controller->tibase; | ||
174 | INIT_LIST_HEAD(&controller->tx_complete); | ||
175 | |||
176 | /* initialise tx/rx channel head pointers to zero */ | ||
177 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { | ||
178 | struct cppi_channel *tx_ch = controller->tx + i; | ||
179 | struct cppi_tx_stateram __iomem *tx; | ||
180 | |||
181 | INIT_LIST_HEAD(&tx_ch->tx_complete); | ||
182 | |||
183 | tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); | ||
184 | tx_ch->state_ram = tx; | ||
185 | cppi_reset_tx(tx, 0); | ||
186 | } | ||
187 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { | ||
188 | struct cppi_channel *rx_ch = controller->rx + i; | ||
189 | struct cppi_rx_stateram __iomem *rx; | ||
190 | |||
191 | INIT_LIST_HEAD(&rx_ch->tx_complete); | ||
192 | |||
193 | rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i); | ||
194 | rx_ch->state_ram = rx; | ||
195 | cppi_reset_rx(rx); | ||
196 | } | ||
197 | |||
198 | /* enable individual cppi channels */ | ||
199 | musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, | ||
200 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | ||
201 | musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG, | ||
202 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | ||
203 | |||
204 | /* enable tx/rx CPPI control */ | ||
205 | musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); | ||
206 | musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); | ||
207 | |||
208 | /* disable RNDIS mode, also host rx RNDIS autorequest */ | ||
209 | musb_writel(tibase, DAVINCI_RNDIS_REG, 0); | ||
210 | musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0); | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | /* | ||
216 | * Stop DMA controller | ||
217 | * | ||
218 | * De-Init the DMA controller as necessary. | ||
219 | */ | ||
220 | |||
221 | static int cppi_controller_stop(struct dma_controller *c) | ||
222 | { | ||
223 | struct cppi *controller; | ||
224 | void __iomem *tibase; | ||
225 | int i; | ||
226 | |||
227 | controller = container_of(c, struct cppi, controller); | ||
228 | |||
229 | tibase = controller->tibase; | ||
230 | /* DISABLE INDIVIDUAL CHANNEL Interrupts */ | ||
231 | musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, | ||
232 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | ||
233 | musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, | ||
234 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); | ||
235 | |||
236 | DBG(1, "Tearing down RX and TX Channels\n"); | ||
237 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { | ||
238 | /* FIXME restructure of txdma to use bds like rxdma */ | ||
239 | controller->tx[i].last_processed = NULL; | ||
240 | cppi_pool_free(controller->tx + i); | ||
241 | } | ||
242 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) | ||
243 | cppi_pool_free(controller->rx + i); | ||
244 | |||
245 | /* in Tx Case proper teardown is supported. We resort to disabling | ||
246 | * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is | ||
247 | * complete TX CPPI cannot be disabled. | ||
248 | */ | ||
249 | /*disable tx/rx cppi */ | ||
250 | musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); | ||
251 | musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); | ||
252 | |||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | /* While dma channel is allocated, we only want the core irqs active | ||
257 | * for fault reports, otherwise we'd get irqs that we don't care about. | ||
258 | * Except for TX irqs, where dma done != fifo empty and reusable ... | ||
259 | * | ||
260 | * NOTE: docs don't say either way, but irq masking **enables** irqs. | ||
261 | * | ||
262 | * REVISIT same issue applies to pure PIO usage too, and non-cppi dma... | ||
263 | */ | ||
264 | static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum) | ||
265 | { | ||
266 | musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8)); | ||
267 | } | ||
268 | |||
269 | static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum) | ||
270 | { | ||
271 | musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8)); | ||
272 | } | ||
273 | |||
274 | |||
275 | /* | ||
276 | * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to | ||
277 | * each transfer direction of a non-control endpoint, so allocating | ||
278 | * (and deallocating) is mostly a way to notice bad housekeeping on | ||
279 | * the software side. We assume the irqs are always active. | ||
280 | */ | ||
281 | static struct dma_channel * | ||
282 | cppi_channel_allocate(struct dma_controller *c, | ||
283 | struct musb_hw_ep *ep, u8 transmit) | ||
284 | { | ||
285 | struct cppi *controller; | ||
286 | u8 index; | ||
287 | struct cppi_channel *cppi_ch; | ||
288 | void __iomem *tibase; | ||
289 | |||
290 | controller = container_of(c, struct cppi, controller); | ||
291 | tibase = controller->tibase; | ||
292 | |||
293 | /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ | ||
294 | index = ep->epnum - 1; | ||
295 | |||
296 | /* return the corresponding CPPI Channel Handle, and | ||
297 | * probably disable the non-CPPI irq until we need it. | ||
298 | */ | ||
299 | if (transmit) { | ||
300 | if (index >= ARRAY_SIZE(controller->tx)) { | ||
301 | DBG(1, "no %cX%d CPPI channel\n", 'T', index); | ||
302 | return NULL; | ||
303 | } | ||
304 | cppi_ch = controller->tx + index; | ||
305 | } else { | ||
306 | if (index >= ARRAY_SIZE(controller->rx)) { | ||
307 | DBG(1, "no %cX%d CPPI channel\n", 'R', index); | ||
308 | return NULL; | ||
309 | } | ||
310 | cppi_ch = controller->rx + index; | ||
311 | core_rxirq_disable(tibase, ep->epnum); | ||
312 | } | ||
313 | |||
314 | /* REVISIT make this an error later once the same driver code works | ||
315 | * with the other DMA engine too | ||
316 | */ | ||
317 | if (cppi_ch->hw_ep) | ||
318 | DBG(1, "re-allocating DMA%d %cX channel %p\n", | ||
319 | index, transmit ? 'T' : 'R', cppi_ch); | ||
320 | cppi_ch->hw_ep = ep; | ||
321 | cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; | ||
322 | |||
323 | DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); | ||
324 | return &cppi_ch->channel; | ||
325 | } | ||
326 | |||
327 | /* Release a CPPI Channel. */ | ||
328 | static void cppi_channel_release(struct dma_channel *channel) | ||
329 | { | ||
330 | struct cppi_channel *c; | ||
331 | void __iomem *tibase; | ||
332 | |||
333 | /* REVISIT: for paranoia, check state and abort if needed... */ | ||
334 | |||
335 | c = container_of(channel, struct cppi_channel, channel); | ||
336 | tibase = c->controller->tibase; | ||
337 | if (!c->hw_ep) | ||
338 | DBG(1, "releasing idle DMA channel %p\n", c); | ||
339 | else if (!c->transmit) | ||
340 | core_rxirq_enable(tibase, c->index + 1); | ||
341 | |||
342 | /* for now, leave its cppi IRQ enabled (we won't trigger it) */ | ||
343 | c->hw_ep = NULL; | ||
344 | channel->status = MUSB_DMA_STATUS_UNKNOWN; | ||
345 | } | ||
346 | |||
347 | /* Context: controller irqlocked */ | ||
348 | static void | ||
349 | cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) | ||
350 | { | ||
351 | void __iomem *base = c->controller->mregs; | ||
352 | struct cppi_rx_stateram __iomem *rx = c->state_ram; | ||
353 | |||
354 | musb_ep_select(base, c->index + 1); | ||
355 | |||
356 | DBG(level, "RX DMA%d%s: %d left, csr %04x, " | ||
357 | "%08x H%08x S%08x C%08x, " | ||
358 | "B%08x L%08x %08x .. %08x" | ||
359 | "\n", | ||
360 | c->index, tag, | ||
361 | musb_readl(c->controller->tibase, | ||
362 | DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), | ||
363 | musb_readw(c->hw_ep->regs, MUSB_RXCSR), | ||
364 | |||
365 | musb_readl(&rx->rx_skipbytes, 0), | ||
366 | musb_readl(&rx->rx_head, 0), | ||
367 | musb_readl(&rx->rx_sop, 0), | ||
368 | musb_readl(&rx->rx_current, 0), | ||
369 | |||
370 | musb_readl(&rx->rx_buf_current, 0), | ||
371 | musb_readl(&rx->rx_len_len, 0), | ||
372 | musb_readl(&rx->rx_cnt_cnt, 0), | ||
373 | musb_readl(&rx->rx_complete, 0) | ||
374 | ); | ||
375 | } | ||
376 | |||
377 | /* Context: controller irqlocked */ | ||
378 | static void | ||
379 | cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) | ||
380 | { | ||
381 | void __iomem *base = c->controller->mregs; | ||
382 | struct cppi_tx_stateram __iomem *tx = c->state_ram; | ||
383 | |||
384 | musb_ep_select(base, c->index + 1); | ||
385 | |||
386 | DBG(level, "TX DMA%d%s: csr %04x, " | ||
387 | "H%08x S%08x C%08x %08x, " | ||
388 | "F%08x L%08x .. %08x" | ||
389 | "\n", | ||
390 | c->index, tag, | ||
391 | musb_readw(c->hw_ep->regs, MUSB_TXCSR), | ||
392 | |||
393 | musb_readl(&tx->tx_head, 0), | ||
394 | musb_readl(&tx->tx_buf, 0), | ||
395 | musb_readl(&tx->tx_current, 0), | ||
396 | musb_readl(&tx->tx_buf_current, 0), | ||
397 | |||
398 | musb_readl(&tx->tx_info, 0), | ||
399 | musb_readl(&tx->tx_rem_len, 0), | ||
400 | /* dummy/unused word 6 */ | ||
401 | musb_readl(&tx->tx_complete, 0) | ||
402 | ); | ||
403 | } | ||
404 | |||
405 | /* Context: controller irqlocked */ | ||
406 | static inline void | ||
407 | cppi_rndis_update(struct cppi_channel *c, int is_rx, | ||
408 | void __iomem *tibase, int is_rndis) | ||
409 | { | ||
410 | /* we may need to change the rndis flag for this cppi channel */ | ||
411 | if (c->is_rndis != is_rndis) { | ||
412 | u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG); | ||
413 | u32 temp = 1 << (c->index); | ||
414 | |||
415 | if (is_rx) | ||
416 | temp <<= 16; | ||
417 | if (is_rndis) | ||
418 | value |= temp; | ||
419 | else | ||
420 | value &= ~temp; | ||
421 | musb_writel(tibase, DAVINCI_RNDIS_REG, value); | ||
422 | c->is_rndis = is_rndis; | ||
423 | } | ||
424 | } | ||
425 | |||
426 | static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd) | ||
427 | { | ||
428 | pr_debug("RXBD/%s %08x: " | ||
429 | "nxt %08x buf %08x off.blen %08x opt.plen %08x\n", | ||
430 | tag, bd->dma, | ||
431 | bd->hw_next, bd->hw_bufp, bd->hw_off_len, | ||
432 | bd->hw_options); | ||
433 | } | ||
434 | |||
435 | static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) | ||
436 | { | ||
437 | #if MUSB_DEBUG > 0 | ||
438 | struct cppi_descriptor *bd; | ||
439 | |||
440 | if (!_dbg_level(level)) | ||
441 | return; | ||
442 | cppi_dump_rx(level, rx, tag); | ||
443 | if (rx->last_processed) | ||
444 | cppi_dump_rxbd("last", rx->last_processed); | ||
445 | for (bd = rx->head; bd; bd = bd->next) | ||
446 | cppi_dump_rxbd("active", bd); | ||
447 | #endif | ||
448 | } | ||
449 | |||
450 | |||
451 | /* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX; | ||
452 | * so we won't ever use it (see "CPPI RX Woes" below). | ||
453 | */ | ||
454 | static inline int cppi_autoreq_update(struct cppi_channel *rx, | ||
455 | void __iomem *tibase, int onepacket, unsigned n_bds) | ||
456 | { | ||
457 | u32 val; | ||
458 | |||
459 | #ifdef RNDIS_RX_IS_USABLE | ||
460 | u32 tmp; | ||
461 | /* assert(is_host_active(musb)) */ | ||
462 | |||
463 | /* start from "AutoReq never" */ | ||
464 | tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); | ||
465 | val = tmp & ~((0x3) << (rx->index * 2)); | ||
466 | |||
467 | /* HCD arranged reqpkt for packet #1. we arrange int | ||
468 | * for all but the last one, maybe in two segments. | ||
469 | */ | ||
470 | if (!onepacket) { | ||
471 | #if 0 | ||
472 | /* use two segments, autoreq "all" then the last "never" */ | ||
473 | val |= ((0x3) << (rx->index * 2)); | ||
474 | n_bds--; | ||
475 | #else | ||
476 | /* one segment, autoreq "all-but-last" */ | ||
477 | val |= ((0x1) << (rx->index * 2)); | ||
478 | #endif | ||
479 | } | ||
480 | |||
481 | if (val != tmp) { | ||
482 | int n = 100; | ||
483 | |||
484 | /* make sure that autoreq is updated before continuing */ | ||
485 | musb_writel(tibase, DAVINCI_AUTOREQ_REG, val); | ||
486 | do { | ||
487 | tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); | ||
488 | if (tmp == val) | ||
489 | break; | ||
490 | cpu_relax(); | ||
491 | } while (n-- > 0); | ||
492 | } | ||
493 | #endif | ||
494 | |||
495 | /* REQPKT is turned off after each segment */ | ||
496 | if (n_bds && rx->channel.actual_len) { | ||
497 | void __iomem *regs = rx->hw_ep->regs; | ||
498 | |||
499 | val = musb_readw(regs, MUSB_RXCSR); | ||
500 | if (!(val & MUSB_RXCSR_H_REQPKT)) { | ||
501 | val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS; | ||
502 | musb_writew(regs, MUSB_RXCSR, val); | ||
503 | /* flush writebufer */ | ||
504 | val = musb_readw(regs, MUSB_RXCSR); | ||
505 | } | ||
506 | } | ||
507 | return n_bds; | ||
508 | } | ||
509 | |||
510 | |||
511 | /* Buffer enqueuing Logic: | ||
512 | * | ||
513 | * - RX builds new queues each time, to help handle routine "early | ||
514 | * termination" cases (faults, including errors and short reads) | ||
515 | * more correctly. | ||
516 | * | ||
517 | * - for now, TX reuses the same queue of BDs every time | ||
518 | * | ||
519 | * REVISIT long term, we want a normal dynamic model. | ||
520 | * ... the goal will be to append to the | ||
521 | * existing queue, processing completed "dma buffers" (segments) on the fly. | ||
522 | * | ||
523 | * Otherwise we force an IRQ latency between requests, which slows us a lot | ||
524 | * (especially in "transparent" dma). Unfortunately that model seems to be | ||
525 | * inherent in the DMA model from the Mentor code, except in the rare case | ||
526 | * of transfers big enough (~128+ KB) that we could append "middle" segments | ||
527 | * in the TX paths. (RX can't do this, see below.) | ||
528 | * | ||
529 | * That's true even in the CPPI- friendly iso case, where most urbs have | ||
530 | * several small segments provided in a group and where the "packet at a time" | ||
531 | * "transparent" DMA model is always correct, even on the RX side. | ||
532 | */ | ||
533 | |||
534 | /* | ||
535 | * CPPI TX: | ||
536 | * ======== | ||
537 | * TX is a lot more reasonable than RX; it doesn't need to run in | ||
538 | * irq-per-packet mode very often. RNDIS mode seems to behave too | ||
539 | * (except how it handles the exactly-N-packets case). Building a | ||
540 | * txdma queue with multiple requests (urb or usb_request) looks | ||
541 | * like it would work ... but fault handling would need much testing. | ||
542 | * | ||
543 | * The main issue with TX mode RNDIS relates to transfer lengths that | ||
544 | * are an exact multiple of the packet length. It appears that there's | ||
545 | * a hiccup in that case (maybe the DMA completes before the ZLP gets | ||
546 | * written?) boiling down to not being able to rely on CPPI writing any | ||
547 | * terminating zero length packet before the next transfer is written. | ||
548 | * So that's punted to PIO; better yet, gadget drivers can avoid it. | ||
549 | * | ||
550 | * Plus, there's allegedly an undocumented constraint that rndis transfer | ||
551 | * length be a multiple of 64 bytes ... but the chip doesn't act that | ||
552 | * way, and we really don't _want_ that behavior anyway. | ||
553 | * | ||
554 | * On TX, "transparent" mode works ... although experiments have shown | ||
555 | * problems trying to use the SOP/EOP bits in different USB packets. | ||
556 | * | ||
557 | * REVISIT try to handle terminating zero length packets using CPPI | ||
558 | * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet | ||
559 | * links avoid that issue by forcing them to avoid zlps.) | ||
560 | */ | ||
561 | static void | ||
562 | cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) | ||
563 | { | ||
564 | unsigned maxpacket = tx->maxpacket; | ||
565 | dma_addr_t addr = tx->buf_dma + tx->offset; | ||
566 | size_t length = tx->buf_len - tx->offset; | ||
567 | struct cppi_descriptor *bd; | ||
568 | unsigned n_bds; | ||
569 | unsigned i; | ||
570 | struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram; | ||
571 | int rndis; | ||
572 | |||
573 | /* TX can use the CPPI "rndis" mode, where we can probably fit this | ||
574 | * transfer in one BD and one IRQ. The only time we would NOT want | ||
575 | * to use it is when hardware constraints prevent it, or if we'd | ||
576 | * trigger the "send a ZLP?" confusion. | ||
577 | */ | ||
578 | rndis = (maxpacket & 0x3f) == 0 | ||
579 | && length < 0xffff | ||
580 | && (length % maxpacket) != 0; | ||
581 | |||
582 | if (rndis) { | ||
583 | maxpacket = length; | ||
584 | n_bds = 1; | ||
585 | } else { | ||
586 | n_bds = length / maxpacket; | ||
587 | if (!length || (length % maxpacket)) | ||
588 | n_bds++; | ||
589 | n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD); | ||
590 | length = min(n_bds * maxpacket, length); | ||
591 | } | ||
592 | |||
593 | DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", | ||
594 | tx->index, | ||
595 | maxpacket, | ||
596 | rndis ? "rndis" : "transparent", | ||
597 | n_bds, | ||
598 | addr, length); | ||
599 | |||
600 | cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); | ||
601 | |||
602 | /* assuming here that channel_program is called during | ||
603 | * transfer initiation ... current code maintains state | ||
604 | * for one outstanding request only (no queues, not even | ||
605 | * the implicit ones of an iso urb). | ||
606 | */ | ||
607 | |||
608 | bd = tx->freelist; | ||
609 | tx->head = bd; | ||
610 | tx->last_processed = NULL; | ||
611 | |||
612 | /* FIXME use BD pool like RX side does, and just queue | ||
613 | * the minimum number for this request. | ||
614 | */ | ||
615 | |||
616 | /* Prepare queue of BDs first, then hand it to hardware. | ||
617 | * All BDs except maybe the last should be of full packet | ||
618 | * size; for RNDIS there _is_ only that last packet. | ||
619 | */ | ||
620 | for (i = 0; i < n_bds; ) { | ||
621 | if (++i < n_bds && bd->next) | ||
622 | bd->hw_next = bd->next->dma; | ||
623 | else | ||
624 | bd->hw_next = 0; | ||
625 | |||
626 | bd->hw_bufp = tx->buf_dma + tx->offset; | ||
627 | |||
628 | /* FIXME set EOP only on the last packet, | ||
629 | * SOP only on the first ... avoid IRQs | ||
630 | */ | ||
631 | if ((tx->offset + maxpacket) <= tx->buf_len) { | ||
632 | tx->offset += maxpacket; | ||
633 | bd->hw_off_len = maxpacket; | ||
634 | bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET | ||
635 | | CPPI_OWN_SET | maxpacket; | ||
636 | } else { | ||
637 | /* only this one may be a partial USB Packet */ | ||
638 | u32 partial_len; | ||
639 | |||
640 | partial_len = tx->buf_len - tx->offset; | ||
641 | tx->offset = tx->buf_len; | ||
642 | bd->hw_off_len = partial_len; | ||
643 | |||
644 | bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET | ||
645 | | CPPI_OWN_SET | partial_len; | ||
646 | if (partial_len == 0) | ||
647 | bd->hw_options |= CPPI_ZERO_SET; | ||
648 | } | ||
649 | |||
650 | DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n", | ||
651 | bd, bd->hw_next, bd->hw_bufp, | ||
652 | bd->hw_off_len, bd->hw_options); | ||
653 | |||
654 | /* update the last BD enqueued to the list */ | ||
655 | tx->tail = bd; | ||
656 | bd = bd->next; | ||
657 | } | ||
658 | |||
659 | /* BDs live in DMA-coherent memory, but writes might be pending */ | ||
660 | cpu_drain_writebuffer(); | ||
661 | |||
662 | /* Write to the HeadPtr in state RAM to trigger */ | ||
663 | musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma); | ||
664 | |||
665 | cppi_dump_tx(5, tx, "/S"); | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * CPPI RX Woes: | ||
670 | * ============= | ||
671 | * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte | ||
672 | * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back. | ||
673 | * (Full speed transfers have similar scenarios.) | ||
674 | * | ||
675 | * The correct behavior for Linux is that (a) fills the buffer with 300 bytes, | ||
676 | * and the next packet goes into a buffer that's queued later; while (b) fills | ||
677 | * the buffer with 1024 bytes. How to do that with CPPI? | ||
678 | * | ||
679 | * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but | ||
680 | * (b) loses **BADLY** because nothing (!) happens when that second packet | ||
681 | * fills the buffer, much less when a third one arrives. (Which makes this | ||
682 | * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination | ||
683 | * is optional, and it's fine if peripherals -- not hosts! -- pad messages | ||
684 | * out to end-of-buffer. Standard PCI host controller DMA descriptors | ||
685 | * implement that mode by default ... which is no accident.) | ||
686 | * | ||
687 | * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have | ||
688 | * converse problems: (b) is handled right, but (a) loses badly. CPPI RX | ||
689 | * ignores SOP/EOP markings and processes both of those BDs; so both packets | ||
690 | * are loaded into the buffer (with a 212 byte gap between them), and the next | ||
691 | * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP | ||
692 | * are intended as outputs for RX queues, not inputs...) | ||
693 | * | ||
694 | * - A variant of "transparent" mode -- one BD at a time -- is the only way to | ||
695 | * reliably make both cases work, with software handling both cases correctly | ||
696 | * and at the significant penalty of needing an IRQ per packet. (The lack of | ||
697 | * I/O overlap can be slightly ameliorated by enabling double buffering.) | ||
698 | * | ||
699 | * So how to get rid of IRQ-per-packet? The transparent multi-BD case could | ||
700 | * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK | ||
701 | * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors | ||
702 | * with guaranteed driver level fault recovery and scrubbing out what's left | ||
703 | * of that garbaged datastream. | ||
704 | * | ||
705 | * But there seems to be no way to identify the cases where CPPI RNDIS mode | ||
706 | * is appropriate -- which do NOT include RNDIS host drivers, but do include | ||
707 | * the CDC Ethernet driver! -- and the documentation is incomplete/wrong. | ||
708 | * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic | ||
709 | * that applies best on the peripheral side (and which could fail rudely). | ||
710 | * | ||
711 | * Leaving only "transparent" mode; we avoid multi-bd modes in almost all | ||
712 | * cases other than mass storage class. Otherwise we're correct but slow, | ||
713 | * since CPPI penalizes our need for a "true RNDIS" default mode. | ||
714 | */ | ||
715 | |||
716 | |||
717 | /* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY | ||
718 | * | ||
719 | * IFF | ||
720 | * (a) peripheral mode ... since rndis peripherals could pad their | ||
721 | * writes to hosts, causing i/o failure; or we'd have to cope with | ||
722 | * a largely unknowable variety of host side protocol variants | ||
723 | * (b) and short reads are NOT errors ... since full reads would | ||
724 | * cause those same i/o failures | ||
725 | * (c) and read length is | ||
726 | * - less than 64KB (max per cppi descriptor) | ||
727 | * - not a multiple of 4096 (g_zero default, full reads typical) | ||
728 | * - N (>1) packets long, ditto (full reads not EXPECTED) | ||
729 | * THEN | ||
730 | * try rx rndis mode | ||
731 | * | ||
732 | * Cost of heuristic failing: RXDMA wedges at the end of transfers that | ||
733 | * fill out the whole buffer. Buggy host side usb network drivers could | ||
734 | * trigger that, but "in the field" such bugs seem to be all but unknown. | ||
735 | * | ||
736 | * So this module parameter lets the heuristic be disabled. When using | ||
737 | * gadgetfs, the heuristic will probably need to be disabled. | ||
738 | */ | ||
739 | static int cppi_rx_rndis = 1; | ||
740 | |||
741 | module_param(cppi_rx_rndis, bool, 0); | ||
742 | MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic"); | ||
743 | |||
744 | |||
745 | /** | ||
746 | * cppi_next_rx_segment - dma read for the next chunk of a buffer | ||
747 | * @musb: the controller | ||
748 | * @rx: dma channel | ||
749 | * @onepacket: true unless caller treats short reads as errors, and | ||
750 | * performs fault recovery above usbcore. | ||
751 | * Context: controller irqlocked | ||
752 | * | ||
753 | * See above notes about why we can't use multi-BD RX queues except in | ||
754 | * rare cases (mass storage class), and can never use the hardware "rndis" | ||
755 | * mode (since it's not a "true" RNDIS mode) with complete safety.. | ||
756 | * | ||
757 | * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in | ||
758 | * code to recover from corrupted datastreams after each short transfer. | ||
759 | */ | ||
760 | static void | ||
761 | cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) | ||
762 | { | ||
763 | unsigned maxpacket = rx->maxpacket; | ||
764 | dma_addr_t addr = rx->buf_dma + rx->offset; | ||
765 | size_t length = rx->buf_len - rx->offset; | ||
766 | struct cppi_descriptor *bd, *tail; | ||
767 | unsigned n_bds; | ||
768 | unsigned i; | ||
769 | void __iomem *tibase = musb->ctrl_base; | ||
770 | int is_rndis = 0; | ||
771 | struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram; | ||
772 | |||
773 | if (onepacket) { | ||
774 | /* almost every USB driver, host or peripheral side */ | ||
775 | n_bds = 1; | ||
776 | |||
777 | /* maybe apply the heuristic above */ | ||
778 | if (cppi_rx_rndis | ||
779 | && is_peripheral_active(musb) | ||
780 | && length > maxpacket | ||
781 | && (length & ~0xffff) == 0 | ||
782 | && (length & 0x0fff) != 0 | ||
783 | && (length & (maxpacket - 1)) == 0) { | ||
784 | maxpacket = length; | ||
785 | is_rndis = 1; | ||
786 | } | ||
787 | } else { | ||
788 | /* virtually nothing except mass storage class */ | ||
789 | if (length > 0xffff) { | ||
790 | n_bds = 0xffff / maxpacket; | ||
791 | length = n_bds * maxpacket; | ||
792 | } else { | ||
793 | n_bds = length / maxpacket; | ||
794 | if (length % maxpacket) | ||
795 | n_bds++; | ||
796 | } | ||
797 | if (n_bds == 1) | ||
798 | onepacket = 1; | ||
799 | else | ||
800 | n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD); | ||
801 | } | ||
802 | |||
803 | /* In host mode, autorequest logic can generate some IN tokens; it's | ||
804 | * tricky since we can't leave REQPKT set in RXCSR after the transfer | ||
805 | * finishes. So: multipacket transfers involve two or more segments. | ||
806 | * And always at least two IRQs ... RNDIS mode is not an option. | ||
807 | */ | ||
808 | if (is_host_active(musb)) | ||
809 | n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds); | ||
810 | |||
811 | cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis); | ||
812 | |||
813 | length = min(n_bds * maxpacket, length); | ||
814 | |||
815 | DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " | ||
816 | "dma 0x%x len %u %u/%u\n", | ||
817 | rx->index, maxpacket, | ||
818 | onepacket | ||
819 | ? (is_rndis ? "rndis" : "onepacket") | ||
820 | : "multipacket", | ||
821 | n_bds, | ||
822 | musb_readl(tibase, | ||
823 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) | ||
824 | & 0xffff, | ||
825 | addr, length, rx->channel.actual_len, rx->buf_len); | ||
826 | |||
827 | /* only queue one segment at a time, since the hardware prevents | ||
828 | * correct queue shutdown after unexpected short packets | ||
829 | */ | ||
830 | bd = cppi_bd_alloc(rx); | ||
831 | rx->head = bd; | ||
832 | |||
833 | /* Build BDs for all packets in this segment */ | ||
834 | for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { | ||
835 | u32 bd_len; | ||
836 | |||
837 | if (i) { | ||
838 | bd = cppi_bd_alloc(rx); | ||
839 | if (!bd) | ||
840 | break; | ||
841 | tail->next = bd; | ||
842 | tail->hw_next = bd->dma; | ||
843 | } | ||
844 | bd->hw_next = 0; | ||
845 | |||
846 | /* all but the last packet will be maxpacket size */ | ||
847 | if (maxpacket < length) | ||
848 | bd_len = maxpacket; | ||
849 | else | ||
850 | bd_len = length; | ||
851 | |||
852 | bd->hw_bufp = addr; | ||
853 | addr += bd_len; | ||
854 | rx->offset += bd_len; | ||
855 | |||
856 | bd->hw_off_len = (0 /*offset*/ << 16) + bd_len; | ||
857 | bd->buflen = bd_len; | ||
858 | |||
859 | bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0); | ||
860 | length -= bd_len; | ||
861 | } | ||
862 | |||
863 | /* we always expect at least one reusable BD! */ | ||
864 | if (!tail) { | ||
865 | WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds); | ||
866 | return; | ||
867 | } else if (i < n_bds) | ||
868 | WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds); | ||
869 | |||
870 | tail->next = NULL; | ||
871 | tail->hw_next = 0; | ||
872 | |||
873 | bd = rx->head; | ||
874 | rx->tail = tail; | ||
875 | |||
876 | /* short reads and other faults should terminate this entire | ||
877 | * dma segment. we want one "dma packet" per dma segment, not | ||
878 | * one per USB packet, terminating the whole queue at once... | ||
879 | * NOTE that current hardware seems to ignore SOP and EOP. | ||
880 | */ | ||
881 | bd->hw_options |= CPPI_SOP_SET; | ||
882 | tail->hw_options |= CPPI_EOP_SET; | ||
883 | |||
884 | if (debug >= 5) { | ||
885 | struct cppi_descriptor *d; | ||
886 | |||
887 | for (d = rx->head; d; d = d->next) | ||
888 | cppi_dump_rxbd("S", d); | ||
889 | } | ||
890 | |||
891 | /* in case the preceding transfer left some state... */ | ||
892 | tail = rx->last_processed; | ||
893 | if (tail) { | ||
894 | tail->next = bd; | ||
895 | tail->hw_next = bd->dma; | ||
896 | } | ||
897 | |||
898 | core_rxirq_enable(tibase, rx->index + 1); | ||
899 | |||
900 | /* BDs live in DMA-coherent memory, but writes might be pending */ | ||
901 | cpu_drain_writebuffer(); | ||
902 | |||
903 | /* REVISIT specs say to write this AFTER the BUFCNT register | ||
904 | * below ... but that loses badly. | ||
905 | */ | ||
906 | musb_writel(&rx_ram->rx_head, 0, bd->dma); | ||
907 | |||
908 | /* bufferCount must be at least 3, and zeroes on completion | ||
909 | * unless it underflows below zero, or stops at two, or keeps | ||
910 | * growing ... grr. | ||
911 | */ | ||
912 | i = musb_readl(tibase, | ||
913 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) | ||
914 | & 0xffff; | ||
915 | |||
916 | if (!i) | ||
917 | musb_writel(tibase, | ||
918 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), | ||
919 | n_bds + 2); | ||
920 | else if (n_bds > (i - 3)) | ||
921 | musb_writel(tibase, | ||
922 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), | ||
923 | n_bds - (i - 3)); | ||
924 | |||
925 | i = musb_readl(tibase, | ||
926 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) | ||
927 | & 0xffff; | ||
928 | if (i < (2 + n_bds)) { | ||
929 | DBG(2, "bufcnt%d underrun - %d (for %d)\n", | ||
930 | rx->index, i, n_bds); | ||
931 | musb_writel(tibase, | ||
932 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), | ||
933 | n_bds + 2); | ||
934 | } | ||
935 | |||
936 | cppi_dump_rx(4, rx, "/S"); | ||
937 | } | ||
938 | |||
939 | /** | ||
940 | * cppi_channel_program - program channel for data transfer | ||
941 | * @ch: the channel | ||
942 | * @maxpacket: max packet size | ||
943 | * @mode: For RX, 1 unless the usb protocol driver promised to treat | ||
944 | * all short reads as errors and kick in high level fault recovery. | ||
945 | * For TX, ignored because of RNDIS mode races/glitches. | ||
946 | * @dma_addr: dma address of buffer | ||
947 | * @len: length of buffer | ||
948 | * Context: controller irqlocked | ||
949 | */ | ||
950 | static int cppi_channel_program(struct dma_channel *ch, | ||
951 | u16 maxpacket, u8 mode, | ||
952 | dma_addr_t dma_addr, u32 len) | ||
953 | { | ||
954 | struct cppi_channel *cppi_ch; | ||
955 | struct cppi *controller; | ||
956 | struct musb *musb; | ||
957 | |||
958 | cppi_ch = container_of(ch, struct cppi_channel, channel); | ||
959 | controller = cppi_ch->controller; | ||
960 | musb = controller->musb; | ||
961 | |||
962 | switch (ch->status) { | ||
963 | case MUSB_DMA_STATUS_BUS_ABORT: | ||
964 | case MUSB_DMA_STATUS_CORE_ABORT: | ||
965 | /* fault irq handler should have handled cleanup */ | ||
966 | WARNING("%cX DMA%d not cleaned up after abort!\n", | ||
967 | cppi_ch->transmit ? 'T' : 'R', | ||
968 | cppi_ch->index); | ||
969 | /* WARN_ON(1); */ | ||
970 | break; | ||
971 | case MUSB_DMA_STATUS_BUSY: | ||
972 | WARNING("program active channel? %cX DMA%d\n", | ||
973 | cppi_ch->transmit ? 'T' : 'R', | ||
974 | cppi_ch->index); | ||
975 | /* WARN_ON(1); */ | ||
976 | break; | ||
977 | case MUSB_DMA_STATUS_UNKNOWN: | ||
978 | DBG(1, "%cX DMA%d not allocated!\n", | ||
979 | cppi_ch->transmit ? 'T' : 'R', | ||
980 | cppi_ch->index); | ||
981 | /* FALLTHROUGH */ | ||
982 | case MUSB_DMA_STATUS_FREE: | ||
983 | break; | ||
984 | } | ||
985 | |||
986 | ch->status = MUSB_DMA_STATUS_BUSY; | ||
987 | |||
988 | /* set transfer parameters, then queue up its first segment */ | ||
989 | cppi_ch->buf_dma = dma_addr; | ||
990 | cppi_ch->offset = 0; | ||
991 | cppi_ch->maxpacket = maxpacket; | ||
992 | cppi_ch->buf_len = len; | ||
993 | |||
994 | /* TX channel? or RX? */ | ||
995 | if (cppi_ch->transmit) | ||
996 | cppi_next_tx_segment(musb, cppi_ch); | ||
997 | else | ||
998 | cppi_next_rx_segment(musb, cppi_ch, mode); | ||
999 | |||
1000 | return true; | ||
1001 | } | ||
1002 | |||
1003 | static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) | ||
1004 | { | ||
1005 | struct cppi_channel *rx = &cppi->rx[ch]; | ||
1006 | struct cppi_rx_stateram __iomem *state = rx->state_ram; | ||
1007 | struct cppi_descriptor *bd; | ||
1008 | struct cppi_descriptor *last = rx->last_processed; | ||
1009 | bool completed = false; | ||
1010 | bool acked = false; | ||
1011 | int i; | ||
1012 | dma_addr_t safe2ack; | ||
1013 | void __iomem *regs = rx->hw_ep->regs; | ||
1014 | |||
1015 | cppi_dump_rx(6, rx, "/K"); | ||
1016 | |||
1017 | bd = last ? last->next : rx->head; | ||
1018 | if (!bd) | ||
1019 | return false; | ||
1020 | |||
1021 | /* run through all completed BDs */ | ||
1022 | for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0); | ||
1023 | (safe2ack || completed) && bd && i < NUM_RXCHAN_BD; | ||
1024 | i++, bd = bd->next) { | ||
1025 | u16 len; | ||
1026 | |||
1027 | /* catch latest BD writes from CPPI */ | ||
1028 | rmb(); | ||
1029 | if (!completed && (bd->hw_options & CPPI_OWN_SET)) | ||
1030 | break; | ||
1031 | |||
1032 | DBG(5, "C/RXBD %08x: nxt %08x buf %08x " | ||
1033 | "off.len %08x opt.len %08x (%d)\n", | ||
1034 | bd->dma, bd->hw_next, bd->hw_bufp, | ||
1035 | bd->hw_off_len, bd->hw_options, | ||
1036 | rx->channel.actual_len); | ||
1037 | |||
1038 | /* actual packet received length */ | ||
1039 | if ((bd->hw_options & CPPI_SOP_SET) && !completed) | ||
1040 | len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK; | ||
1041 | else | ||
1042 | len = 0; | ||
1043 | |||
1044 | if (bd->hw_options & CPPI_EOQ_MASK) | ||
1045 | completed = true; | ||
1046 | |||
1047 | if (!completed && len < bd->buflen) { | ||
1048 | /* NOTE: when we get a short packet, RXCSR_H_REQPKT | ||
1049 | * must have been cleared, and no more DMA packets may | ||
1050 | * active be in the queue... TI docs didn't say, but | ||
1051 | * CPPI ignores those BDs even though OWN is still set. | ||
1052 | */ | ||
1053 | completed = true; | ||
1054 | DBG(3, "rx short %d/%d (%d)\n", | ||
1055 | len, bd->buflen, | ||
1056 | rx->channel.actual_len); | ||
1057 | } | ||
1058 | |||
1059 | /* If we got here, we expect to ack at least one BD; meanwhile | ||
1060 | * CPPI may completing other BDs while we scan this list... | ||
1061 | * | ||
1062 | * RACE: we can notice OWN cleared before CPPI raises the | ||
1063 | * matching irq by writing that BD as the completion pointer. | ||
1064 | * In such cases, stop scanning and wait for the irq, avoiding | ||
1065 | * lost acks and states where BD ownership is unclear. | ||
1066 | */ | ||
1067 | if (bd->dma == safe2ack) { | ||
1068 | musb_writel(&state->rx_complete, 0, safe2ack); | ||
1069 | safe2ack = musb_readl(&state->rx_complete, 0); | ||
1070 | acked = true; | ||
1071 | if (bd->dma == safe2ack) | ||
1072 | safe2ack = 0; | ||
1073 | } | ||
1074 | |||
1075 | rx->channel.actual_len += len; | ||
1076 | |||
1077 | cppi_bd_free(rx, last); | ||
1078 | last = bd; | ||
1079 | |||
1080 | /* stop scanning on end-of-segment */ | ||
1081 | if (bd->hw_next == 0) | ||
1082 | completed = true; | ||
1083 | } | ||
1084 | rx->last_processed = last; | ||
1085 | |||
1086 | /* dma abort, lost ack, or ... */ | ||
1087 | if (!acked && last) { | ||
1088 | int csr; | ||
1089 | |||
1090 | if (safe2ack == 0 || safe2ack == rx->last_processed->dma) | ||
1091 | musb_writel(&state->rx_complete, 0, safe2ack); | ||
1092 | if (safe2ack == 0) { | ||
1093 | cppi_bd_free(rx, last); | ||
1094 | rx->last_processed = NULL; | ||
1095 | |||
1096 | /* if we land here on the host side, H_REQPKT will | ||
1097 | * be clear and we need to restart the queue... | ||
1098 | */ | ||
1099 | WARN_ON(rx->head); | ||
1100 | } | ||
1101 | musb_ep_select(cppi->mregs, rx->index + 1); | ||
1102 | csr = musb_readw(regs, MUSB_RXCSR); | ||
1103 | if (csr & MUSB_RXCSR_DMAENAB) { | ||
1104 | DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", | ||
1105 | rx->index, | ||
1106 | rx->head, rx->tail, | ||
1107 | rx->last_processed | ||
1108 | ? rx->last_processed->dma | ||
1109 | : 0, | ||
1110 | completed ? ", completed" : "", | ||
1111 | csr); | ||
1112 | cppi_dump_rxq(4, "/what?", rx); | ||
1113 | } | ||
1114 | } | ||
1115 | if (!completed) { | ||
1116 | int csr; | ||
1117 | |||
1118 | rx->head = bd; | ||
1119 | |||
1120 | /* REVISIT seems like "autoreq all but EOP" doesn't... | ||
1121 | * setting it here "should" be racey, but seems to work | ||
1122 | */ | ||
1123 | csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); | ||
1124 | if (is_host_active(cppi->musb) | ||
1125 | && bd | ||
1126 | && !(csr & MUSB_RXCSR_H_REQPKT)) { | ||
1127 | csr |= MUSB_RXCSR_H_REQPKT; | ||
1128 | musb_writew(regs, MUSB_RXCSR, | ||
1129 | MUSB_RXCSR_H_WZC_BITS | csr); | ||
1130 | csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); | ||
1131 | } | ||
1132 | } else { | ||
1133 | rx->head = NULL; | ||
1134 | rx->tail = NULL; | ||
1135 | } | ||
1136 | |||
1137 | cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned"); | ||
1138 | return completed; | ||
1139 | } | ||
1140 | |||
1141 | void cppi_completion(struct musb *musb, u32 rx, u32 tx) | ||
1142 | { | ||
1143 | void __iomem *tibase; | ||
1144 | int i, index; | ||
1145 | struct cppi *cppi; | ||
1146 | struct musb_hw_ep *hw_ep = NULL; | ||
1147 | |||
1148 | cppi = container_of(musb->dma_controller, struct cppi, controller); | ||
1149 | |||
1150 | tibase = musb->ctrl_base; | ||
1151 | |||
1152 | /* process TX channels */ | ||
1153 | for (index = 0; tx; tx = tx >> 1, index++) { | ||
1154 | struct cppi_channel *tx_ch; | ||
1155 | struct cppi_tx_stateram __iomem *tx_ram; | ||
1156 | bool completed = false; | ||
1157 | struct cppi_descriptor *bd; | ||
1158 | |||
1159 | if (!(tx & 1)) | ||
1160 | continue; | ||
1161 | |||
1162 | tx_ch = cppi->tx + index; | ||
1163 | tx_ram = tx_ch->state_ram; | ||
1164 | |||
1165 | /* FIXME need a cppi_tx_scan() routine, which | ||
1166 | * can also be called from abort code | ||
1167 | */ | ||
1168 | |||
1169 | cppi_dump_tx(5, tx_ch, "/E"); | ||
1170 | |||
1171 | bd = tx_ch->head; | ||
1172 | |||
1173 | if (NULL == bd) { | ||
1174 | DBG(1, "null BD\n"); | ||
1175 | continue; | ||
1176 | } | ||
1177 | |||
1178 | /* run through all completed BDs */ | ||
1179 | for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; | ||
1180 | i++, bd = bd->next) { | ||
1181 | u16 len; | ||
1182 | |||
1183 | /* catch latest BD writes from CPPI */ | ||
1184 | rmb(); | ||
1185 | if (bd->hw_options & CPPI_OWN_SET) | ||
1186 | break; | ||
1187 | |||
1188 | DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n", | ||
1189 | bd, bd->hw_next, bd->hw_bufp, | ||
1190 | bd->hw_off_len, bd->hw_options); | ||
1191 | |||
1192 | len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; | ||
1193 | tx_ch->channel.actual_len += len; | ||
1194 | |||
1195 | tx_ch->last_processed = bd; | ||
1196 | |||
1197 | /* write completion register to acknowledge | ||
1198 | * processing of completed BDs, and possibly | ||
1199 | * release the IRQ; EOQ might not be set ... | ||
1200 | * | ||
1201 | * REVISIT use the same ack strategy as rx | ||
1202 | * | ||
1203 | * REVISIT have observed bit 18 set; huh?? | ||
1204 | */ | ||
1205 | /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ | ||
1206 | musb_writel(&tx_ram->tx_complete, 0, bd->dma); | ||
1207 | |||
1208 | /* stop scanning on end-of-segment */ | ||
1209 | if (bd->hw_next == 0) | ||
1210 | completed = true; | ||
1211 | } | ||
1212 | |||
1213 | /* on end of segment, maybe go to next one */ | ||
1214 | if (completed) { | ||
1215 | /* cppi_dump_tx(4, tx_ch, "/complete"); */ | ||
1216 | |||
1217 | /* transfer more, or report completion */ | ||
1218 | if (tx_ch->offset >= tx_ch->buf_len) { | ||
1219 | tx_ch->head = NULL; | ||
1220 | tx_ch->tail = NULL; | ||
1221 | tx_ch->channel.status = MUSB_DMA_STATUS_FREE; | ||
1222 | |||
1223 | hw_ep = tx_ch->hw_ep; | ||
1224 | |||
1225 | /* Peripheral role never repurposes the | ||
1226 | * endpoint, so immediate completion is | ||
1227 | * safe. Host role waits for the fifo | ||
1228 | * to empty (TXPKTRDY irq) before going | ||
1229 | * to the next queued bulk transfer. | ||
1230 | */ | ||
1231 | if (is_host_active(cppi->musb)) { | ||
1232 | #if 0 | ||
1233 | /* WORKAROUND because we may | ||
1234 | * not always get TXKPTRDY ... | ||
1235 | */ | ||
1236 | int csr; | ||
1237 | |||
1238 | csr = musb_readw(hw_ep->regs, | ||
1239 | MUSB_TXCSR); | ||
1240 | if (csr & MUSB_TXCSR_TXPKTRDY) | ||
1241 | #endif | ||
1242 | completed = false; | ||
1243 | } | ||
1244 | if (completed) | ||
1245 | musb_dma_completion(musb, index + 1, 1); | ||
1246 | |||
1247 | } else { | ||
1248 | /* Bigger transfer than we could fit in | ||
1249 | * that first batch of descriptors... | ||
1250 | */ | ||
1251 | cppi_next_tx_segment(musb, tx_ch); | ||
1252 | } | ||
1253 | } else | ||
1254 | tx_ch->head = bd; | ||
1255 | } | ||
1256 | |||
1257 | /* Start processing the RX block */ | ||
1258 | for (index = 0; rx; rx = rx >> 1, index++) { | ||
1259 | |||
1260 | if (rx & 1) { | ||
1261 | struct cppi_channel *rx_ch; | ||
1262 | |||
1263 | rx_ch = cppi->rx + index; | ||
1264 | |||
1265 | /* let incomplete dma segments finish */ | ||
1266 | if (!cppi_rx_scan(cppi, index)) | ||
1267 | continue; | ||
1268 | |||
1269 | /* start another dma segment if needed */ | ||
1270 | if (rx_ch->channel.actual_len != rx_ch->buf_len | ||
1271 | && rx_ch->channel.actual_len | ||
1272 | == rx_ch->offset) { | ||
1273 | cppi_next_rx_segment(musb, rx_ch, 1); | ||
1274 | continue; | ||
1275 | } | ||
1276 | |||
1277 | /* all segments completed! */ | ||
1278 | rx_ch->channel.status = MUSB_DMA_STATUS_FREE; | ||
1279 | |||
1280 | hw_ep = rx_ch->hw_ep; | ||
1281 | |||
1282 | core_rxirq_disable(tibase, index + 1); | ||
1283 | musb_dma_completion(musb, index + 1, 0); | ||
1284 | } | ||
1285 | } | ||
1286 | |||
1287 | /* write to CPPI EOI register to re-enable interrupts */ | ||
1288 | musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); | ||
1289 | } | ||
1290 | |||
1291 | /* Instantiate a software object representing a DMA controller. */ | ||
1292 | struct dma_controller *__init | ||
1293 | dma_controller_create(struct musb *musb, void __iomem *mregs) | ||
1294 | { | ||
1295 | struct cppi *controller; | ||
1296 | |||
1297 | controller = kzalloc(sizeof *controller, GFP_KERNEL); | ||
1298 | if (!controller) | ||
1299 | return NULL; | ||
1300 | |||
1301 | controller->mregs = mregs; | ||
1302 | controller->tibase = mregs - DAVINCI_BASE_OFFSET; | ||
1303 | |||
1304 | controller->musb = musb; | ||
1305 | controller->controller.start = cppi_controller_start; | ||
1306 | controller->controller.stop = cppi_controller_stop; | ||
1307 | controller->controller.channel_alloc = cppi_channel_allocate; | ||
1308 | controller->controller.channel_release = cppi_channel_release; | ||
1309 | controller->controller.channel_program = cppi_channel_program; | ||
1310 | controller->controller.channel_abort = cppi_channel_abort; | ||
1311 | |||
1312 | /* NOTE: allocating from on-chip SRAM would give the least | ||
1313 | * contention for memory access, if that ever matters here. | ||
1314 | */ | ||
1315 | |||
1316 | /* setup BufferPool */ | ||
1317 | controller->pool = dma_pool_create("cppi", | ||
1318 | controller->musb->controller, | ||
1319 | sizeof(struct cppi_descriptor), | ||
1320 | CPPI_DESCRIPTOR_ALIGN, 0); | ||
1321 | if (!controller->pool) { | ||
1322 | kfree(controller); | ||
1323 | return NULL; | ||
1324 | } | ||
1325 | |||
1326 | return &controller->controller; | ||
1327 | } | ||
1328 | |||
1329 | /* | ||
1330 | * Destroy a previously-instantiated DMA controller. | ||
1331 | */ | ||
1332 | void dma_controller_destroy(struct dma_controller *c) | ||
1333 | { | ||
1334 | struct cppi *cppi; | ||
1335 | |||
1336 | cppi = container_of(c, struct cppi, controller); | ||
1337 | |||
1338 | /* assert: caller stopped the controller first */ | ||
1339 | dma_pool_destroy(cppi->pool); | ||
1340 | |||
1341 | kfree(cppi); | ||
1342 | } | ||
1343 | |||
1344 | /* | ||
1345 | * Context: controller irqlocked, endpoint selected | ||
1346 | */ | ||
1347 | static int cppi_channel_abort(struct dma_channel *channel) | ||
1348 | { | ||
1349 | struct cppi_channel *cppi_ch; | ||
1350 | struct cppi *controller; | ||
1351 | void __iomem *mbase; | ||
1352 | void __iomem *tibase; | ||
1353 | void __iomem *regs; | ||
1354 | u32 value; | ||
1355 | struct cppi_descriptor *queue; | ||
1356 | |||
1357 | cppi_ch = container_of(channel, struct cppi_channel, channel); | ||
1358 | |||
1359 | controller = cppi_ch->controller; | ||
1360 | |||
1361 | switch (channel->status) { | ||
1362 | case MUSB_DMA_STATUS_BUS_ABORT: | ||
1363 | case MUSB_DMA_STATUS_CORE_ABORT: | ||
1364 | /* from RX or TX fault irq handler */ | ||
1365 | case MUSB_DMA_STATUS_BUSY: | ||
1366 | /* the hardware needs shutting down */ | ||
1367 | regs = cppi_ch->hw_ep->regs; | ||
1368 | break; | ||
1369 | case MUSB_DMA_STATUS_UNKNOWN: | ||
1370 | case MUSB_DMA_STATUS_FREE: | ||
1371 | return 0; | ||
1372 | default: | ||
1373 | return -EINVAL; | ||
1374 | } | ||
1375 | |||
1376 | if (!cppi_ch->transmit && cppi_ch->head) | ||
1377 | cppi_dump_rxq(3, "/abort", cppi_ch); | ||
1378 | |||
1379 | mbase = controller->mregs; | ||
1380 | tibase = controller->tibase; | ||
1381 | |||
1382 | queue = cppi_ch->head; | ||
1383 | cppi_ch->head = NULL; | ||
1384 | cppi_ch->tail = NULL; | ||
1385 | |||
1386 | /* REVISIT should rely on caller having done this, | ||
1387 | * and caller should rely on us not changing it. | ||
1388 | * peripheral code is safe ... check host too. | ||
1389 | */ | ||
1390 | musb_ep_select(mbase, cppi_ch->index + 1); | ||
1391 | |||
1392 | if (cppi_ch->transmit) { | ||
1393 | struct cppi_tx_stateram __iomem *tx_ram; | ||
1394 | int enabled; | ||
1395 | |||
1396 | /* mask interrupts raised to signal teardown complete. */ | ||
1397 | enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG) | ||
1398 | & (1 << cppi_ch->index); | ||
1399 | if (enabled) | ||
1400 | musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, | ||
1401 | (1 << cppi_ch->index)); | ||
1402 | |||
1403 | /* REVISIT put timeouts on these controller handshakes */ | ||
1404 | |||
1405 | cppi_dump_tx(6, cppi_ch, " (teardown)"); | ||
1406 | |||
1407 | /* teardown DMA engine then usb core */ | ||
1408 | do { | ||
1409 | value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); | ||
1410 | } while (!(value & CPPI_TEAR_READY)); | ||
1411 | musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index); | ||
1412 | |||
1413 | tx_ram = cppi_ch->state_ram; | ||
1414 | do { | ||
1415 | value = musb_readl(&tx_ram->tx_complete, 0); | ||
1416 | } while (0xFFFFFFFC != value); | ||
1417 | musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC); | ||
1418 | |||
1419 | /* FIXME clean up the transfer state ... here? | ||
1420 | * the completion routine should get called with | ||
1421 | * an appropriate status code. | ||
1422 | */ | ||
1423 | |||
1424 | value = musb_readw(regs, MUSB_TXCSR); | ||
1425 | value &= ~MUSB_TXCSR_DMAENAB; | ||
1426 | value |= MUSB_TXCSR_FLUSHFIFO; | ||
1427 | musb_writew(regs, MUSB_TXCSR, value); | ||
1428 | musb_writew(regs, MUSB_TXCSR, value); | ||
1429 | |||
1430 | /* re-enable interrupt */ | ||
1431 | if (enabled) | ||
1432 | musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, | ||
1433 | (1 << cppi_ch->index)); | ||
1434 | |||
1435 | /* While we scrub the TX state RAM, ensure that we clean | ||
1436 | * up any interrupt that's currently asserted: | ||
1437 | * 1. Write to completion Ptr value 0x1(bit 0 set) | ||
1438 | * (write back mode) | ||
1439 | * 2. Write to completion Ptr value 0x0(bit 0 cleared) | ||
1440 | * (compare mode) | ||
1441 | * Value written is compared(for bits 31:2) and when | ||
1442 | * equal, interrupt is deasserted. | ||
1443 | */ | ||
1444 | cppi_reset_tx(tx_ram, 1); | ||
1445 | musb_writel(&tx_ram->tx_complete, 0, 0); | ||
1446 | |||
1447 | cppi_dump_tx(5, cppi_ch, " (done teardown)"); | ||
1448 | |||
1449 | /* REVISIT tx side _should_ clean up the same way | ||
1450 | * as the RX side ... this does no cleanup at all! | ||
1451 | */ | ||
1452 | |||
1453 | } else /* RX */ { | ||
1454 | u16 csr; | ||
1455 | |||
1456 | /* NOTE: docs don't guarantee any of this works ... we | ||
1457 | * expect that if the usb core stops telling the cppi core | ||
1458 | * to pull more data from it, then it'll be safe to flush | ||
1459 | * current RX DMA state iff any pending fifo transfer is done. | ||
1460 | */ | ||
1461 | |||
1462 | core_rxirq_disable(tibase, cppi_ch->index + 1); | ||
1463 | |||
1464 | /* for host, ensure ReqPkt is never set again */ | ||
1465 | if (is_host_active(cppi_ch->controller->musb)) { | ||
1466 | value = musb_readl(tibase, DAVINCI_AUTOREQ_REG); | ||
1467 | value &= ~((0x3) << (cppi_ch->index * 2)); | ||
1468 | musb_writel(tibase, DAVINCI_AUTOREQ_REG, value); | ||
1469 | } | ||
1470 | |||
1471 | csr = musb_readw(regs, MUSB_RXCSR); | ||
1472 | |||
1473 | /* for host, clear (just) ReqPkt at end of current packet(s) */ | ||
1474 | if (is_host_active(cppi_ch->controller->musb)) { | ||
1475 | csr |= MUSB_RXCSR_H_WZC_BITS; | ||
1476 | csr &= ~MUSB_RXCSR_H_REQPKT; | ||
1477 | } else | ||
1478 | csr |= MUSB_RXCSR_P_WZC_BITS; | ||
1479 | |||
1480 | /* clear dma enable */ | ||
1481 | csr &= ~(MUSB_RXCSR_DMAENAB); | ||
1482 | musb_writew(regs, MUSB_RXCSR, csr); | ||
1483 | csr = musb_readw(regs, MUSB_RXCSR); | ||
1484 | |||
1485 | /* Quiesce: wait for current dma to finish (if not cleanup). | ||
1486 | * We can't use bit zero of stateram->rx_sop, since that | ||
1487 | * refers to an entire "DMA packet" not just emptying the | ||
1488 | * current fifo. Most segments need multiple usb packets. | ||
1489 | */ | ||
1490 | if (channel->status == MUSB_DMA_STATUS_BUSY) | ||
1491 | udelay(50); | ||
1492 | |||
1493 | /* scan the current list, reporting any data that was | ||
1494 | * transferred and acking any IRQ | ||
1495 | */ | ||
1496 | cppi_rx_scan(controller, cppi_ch->index); | ||
1497 | |||
1498 | /* clobber the existing state once it's idle | ||
1499 | * | ||
1500 | * NOTE: arguably, we should also wait for all the other | ||
1501 | * RX channels to quiesce (how??) and then temporarily | ||
1502 | * disable RXCPPI_CTRL_REG ... but it seems that we can | ||
1503 | * rely on the controller restarting from state ram, with | ||
1504 | * only RXCPPI_BUFCNT state being bogus. BUFCNT will | ||
1505 | * correct itself after the next DMA transfer though. | ||
1506 | * | ||
1507 | * REVISIT does using rndis mode change that? | ||
1508 | */ | ||
1509 | cppi_reset_rx(cppi_ch->state_ram); | ||
1510 | |||
1511 | /* next DMA request _should_ load cppi head ptr */ | ||
1512 | |||
1513 | /* ... we don't "free" that list, only mutate it in place. */ | ||
1514 | cppi_dump_rx(5, cppi_ch, " (done abort)"); | ||
1515 | |||
1516 | /* clean up previously pending bds */ | ||
1517 | cppi_bd_free(cppi_ch, cppi_ch->last_processed); | ||
1518 | cppi_ch->last_processed = NULL; | ||
1519 | |||
1520 | while (queue) { | ||
1521 | struct cppi_descriptor *tmp = queue->next; | ||
1522 | |||
1523 | cppi_bd_free(cppi_ch, queue); | ||
1524 | queue = tmp; | ||
1525 | } | ||
1526 | } | ||
1527 | |||
1528 | channel->status = MUSB_DMA_STATUS_FREE; | ||
1529 | cppi_ch->buf_dma = 0; | ||
1530 | cppi_ch->offset = 0; | ||
1531 | cppi_ch->buf_len = 0; | ||
1532 | cppi_ch->maxpacket = 0; | ||
1533 | return 0; | ||
1534 | } | ||
1535 | |||
1536 | /* TBD Queries: | ||
1537 | * | ||
1538 | * Power Management ... probably turn off cppi during suspend, restart; | ||
1539 | * check state ram? Clocking is presumably shared with usb core. | ||
1540 | */ | ||
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h new file mode 100644 index 000000000000..fc5216b5d2c5 --- /dev/null +++ b/drivers/usb/musb/cppi_dma.h | |||
@@ -0,0 +1,133 @@ | |||
1 | /* Copyright (C) 2005-2006 by Texas Instruments */ | ||
2 | |||
3 | #ifndef _CPPI_DMA_H_ | ||
4 | #define _CPPI_DMA_H_ | ||
5 | |||
6 | #include <linux/slab.h> | ||
7 | #include <linux/list.h> | ||
8 | #include <linux/smp_lock.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/dmapool.h> | ||
11 | |||
12 | #include "musb_dma.h" | ||
13 | #include "musb_core.h" | ||
14 | |||
15 | |||
16 | /* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers | ||
17 | * would seem to be shared with the TUSB6020 (over VLYNQ). | ||
18 | */ | ||
19 | |||
20 | #include "davinci.h" | ||
21 | |||
22 | |||
23 | /* CPPI RX/TX state RAM */ | ||
24 | |||
25 | struct cppi_tx_stateram { | ||
26 | u32 tx_head; /* "DMA packet" head descriptor */ | ||
27 | u32 tx_buf; | ||
28 | u32 tx_current; /* current descriptor */ | ||
29 | u32 tx_buf_current; | ||
30 | u32 tx_info; /* flags, remaining buflen */ | ||
31 | u32 tx_rem_len; | ||
32 | u32 tx_dummy; /* unused */ | ||
33 | u32 tx_complete; | ||
34 | }; | ||
35 | |||
36 | struct cppi_rx_stateram { | ||
37 | u32 rx_skipbytes; | ||
38 | u32 rx_head; | ||
39 | u32 rx_sop; /* "DMA packet" head descriptor */ | ||
40 | u32 rx_current; /* current descriptor */ | ||
41 | u32 rx_buf_current; | ||
42 | u32 rx_len_len; | ||
43 | u32 rx_cnt_cnt; | ||
44 | u32 rx_complete; | ||
45 | }; | ||
46 | |||
47 | /* hw_options bits in CPPI buffer descriptors */ | ||
48 | #define CPPI_SOP_SET ((u32)(1 << 31)) | ||
49 | #define CPPI_EOP_SET ((u32)(1 << 30)) | ||
50 | #define CPPI_OWN_SET ((u32)(1 << 29)) /* owned by cppi */ | ||
51 | #define CPPI_EOQ_MASK ((u32)(1 << 28)) | ||
52 | #define CPPI_ZERO_SET ((u32)(1 << 23)) /* rx saw zlp; tx issues one */ | ||
53 | #define CPPI_RXABT_MASK ((u32)(1 << 19)) /* need more rx buffers */ | ||
54 | |||
55 | #define CPPI_RECV_PKTLEN_MASK 0xFFFF | ||
56 | #define CPPI_BUFFER_LEN_MASK 0xFFFF | ||
57 | |||
58 | #define CPPI_TEAR_READY ((u32)(1 << 31)) | ||
59 | |||
60 | /* CPPI data structure definitions */ | ||
61 | |||
62 | #define CPPI_DESCRIPTOR_ALIGN 16 /* bytes; 5-dec docs say 4-byte align */ | ||
63 | |||
64 | struct cppi_descriptor { | ||
65 | /* hardware overlay */ | ||
66 | u32 hw_next; /* next buffer descriptor Pointer */ | ||
67 | u32 hw_bufp; /* i/o buffer pointer */ | ||
68 | u32 hw_off_len; /* buffer_offset16, buffer_length16 */ | ||
69 | u32 hw_options; /* flags: SOP, EOP etc*/ | ||
70 | |||
71 | struct cppi_descriptor *next; | ||
72 | dma_addr_t dma; /* address of this descriptor */ | ||
73 | u32 buflen; /* for RX: original buffer length */ | ||
74 | } __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN))); | ||
75 | |||
76 | |||
77 | struct cppi; | ||
78 | |||
79 | /* CPPI Channel Control structure */ | ||
80 | struct cppi_channel { | ||
81 | struct dma_channel channel; | ||
82 | |||
83 | /* back pointer to the DMA controller structure */ | ||
84 | struct cppi *controller; | ||
85 | |||
86 | /* which direction of which endpoint? */ | ||
87 | struct musb_hw_ep *hw_ep; | ||
88 | bool transmit; | ||
89 | u8 index; | ||
90 | |||
91 | /* DMA modes: RNDIS or "transparent" */ | ||
92 | u8 is_rndis; | ||
93 | |||
94 | /* book keeping for current transfer request */ | ||
95 | dma_addr_t buf_dma; | ||
96 | u32 buf_len; | ||
97 | u32 maxpacket; | ||
98 | u32 offset; /* dma requested */ | ||
99 | |||
100 | void __iomem *state_ram; /* CPPI state */ | ||
101 | |||
102 | struct cppi_descriptor *freelist; | ||
103 | |||
104 | /* BD management fields */ | ||
105 | struct cppi_descriptor *head; | ||
106 | struct cppi_descriptor *tail; | ||
107 | struct cppi_descriptor *last_processed; | ||
108 | |||
109 | /* use tx_complete in host role to track endpoints waiting for | ||
110 | * FIFONOTEMPTY to clear. | ||
111 | */ | ||
112 | struct list_head tx_complete; | ||
113 | }; | ||
114 | |||
115 | /* CPPI DMA controller object */ | ||
116 | struct cppi { | ||
117 | struct dma_controller controller; | ||
118 | struct musb *musb; | ||
119 | void __iomem *mregs; /* Mentor regs */ | ||
120 | void __iomem *tibase; /* TI/CPPI regs */ | ||
121 | |||
122 | struct cppi_channel tx[MUSB_C_NUM_EPT - 1]; | ||
123 | struct cppi_channel rx[MUSB_C_NUM_EPR - 1]; | ||
124 | |||
125 | struct dma_pool *pool; | ||
126 | |||
127 | struct list_head tx_complete; | ||
128 | }; | ||
129 | |||
130 | /* irq handling hook */ | ||
131 | extern void cppi_completion(struct musb *, u32 rx, u32 tx); | ||
132 | |||
133 | #endif /* end of ifndef _CPPI_DMA_H_ */ | ||
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c new file mode 100644 index 000000000000..75baf181a8cd --- /dev/null +++ b/drivers/usb/musb/davinci.c | |||
@@ -0,0 +1,462 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005-2006 by Texas Instruments | ||
3 | * | ||
4 | * This file is part of the Inventra Controller Driver for Linux. | ||
5 | * | ||
6 | * The Inventra Controller Driver for Linux is free software; you | ||
7 | * can redistribute it and/or modify it under the terms of the GNU | ||
8 | * General Public License version 2 as published by the Free Software | ||
9 | * Foundation. | ||
10 | * | ||
11 | * The Inventra Controller Driver for Linux is distributed in | ||
12 | * the hope that it will be useful, but WITHOUT ANY WARRANTY; | ||
13 | * without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | ||
15 | * License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with The Inventra Controller Driver for Linux ; if not, | ||
19 | * write to the Free Software Foundation, Inc., 59 Temple Place, | ||
20 | * Suite 330, Boston, MA 02111-1307 USA | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/list.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/clk.h> | ||
32 | #include <linux/io.h> | ||
33 | |||
34 | #include <asm/arch/hardware.h> | ||
35 | #include <asm/arch/memory.h> | ||
36 | #include <asm/arch/gpio.h> | ||
37 | #include <asm/mach-types.h> | ||
38 | |||
39 | #include "musb_core.h" | ||
40 | |||
41 | #ifdef CONFIG_MACH_DAVINCI_EVM | ||
42 | #include <asm/arch/i2c-client.h> | ||
43 | #endif | ||
44 | |||
45 | #include "davinci.h" | ||
46 | #include "cppi_dma.h" | ||
47 | |||
48 | |||
49 | /* REVISIT (PM) we should be able to keep the PHY in low power mode most | ||
50 | * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0 | ||
51 | * and, when in host mode, autosuspending idle root ports... PHYPLLON | ||
52 | * (overriding SUSPENDM?) then likely needs to stay off. | ||
53 | */ | ||
54 | |||
55 | static inline void phy_on(void) | ||
56 | { | ||
57 | /* start the on-chip PHY and its PLL */ | ||
58 | __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON, | ||
59 | (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR)); | ||
60 | while ((__raw_readl((void __force __iomem *) | ||
61 | IO_ADDRESS(USBPHY_CTL_PADDR)) | ||
62 | & USBPHY_PHYCLKGD) == 0) | ||
63 | cpu_relax(); | ||
64 | } | ||
65 | |||
66 | static inline void phy_off(void) | ||
67 | { | ||
68 | /* powerdown the on-chip PHY and its oscillator */ | ||
69 | __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *) | ||
70 | IO_ADDRESS(USBPHY_CTL_PADDR)); | ||
71 | } | ||
72 | |||
73 | static int dma_off = 1; | ||
74 | |||
75 | void musb_platform_enable(struct musb *musb) | ||
76 | { | ||
77 | u32 tmp, old, val; | ||
78 | |||
79 | /* workaround: setup irqs through both register sets */ | ||
80 | tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK) | ||
81 | << DAVINCI_USB_TXINT_SHIFT; | ||
82 | musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); | ||
83 | old = tmp; | ||
84 | tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK)) | ||
85 | << DAVINCI_USB_RXINT_SHIFT; | ||
86 | musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); | ||
87 | tmp |= old; | ||
88 | |||
89 | val = ~MUSB_INTR_SOF; | ||
90 | tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT); | ||
91 | musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); | ||
92 | |||
93 | if (is_dma_capable() && !dma_off) | ||
94 | printk(KERN_WARNING "%s %s: dma not reactivated\n", | ||
95 | __FILE__, __func__); | ||
96 | else | ||
97 | dma_off = 0; | ||
98 | |||
99 | /* force a DRVVBUS irq so we can start polling for ID change */ | ||
100 | if (is_otg_enabled(musb)) | ||
101 | musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, | ||
102 | DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Disable the HDRC and flush interrupts | ||
107 | */ | ||
108 | void musb_platform_disable(struct musb *musb) | ||
109 | { | ||
110 | /* because we don't set CTRLR.UINT, "important" to: | ||
111 | * - not read/write INTRUSB/INTRUSBE | ||
112 | * - (except during initial setup, as workaround) | ||
113 | * - use INTSETR/INTCLRR instead | ||
114 | */ | ||
115 | musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG, | ||
116 | DAVINCI_USB_USBINT_MASK | ||
117 | | DAVINCI_USB_TXINT_MASK | ||
118 | | DAVINCI_USB_RXINT_MASK); | ||
119 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
120 | musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0); | ||
121 | |||
122 | if (is_dma_capable() && !dma_off) | ||
123 | WARNING("dma still active\n"); | ||
124 | } | ||
125 | |||
126 | |||
127 | /* REVISIT it's not clear whether DaVinci can support full OTG. */ | ||
128 | |||
129 | static int vbus_state = -1; | ||
130 | |||
131 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
132 | #define portstate(stmt) stmt | ||
133 | #else | ||
134 | #define portstate(stmt) | ||
135 | #endif | ||
136 | |||
137 | |||
138 | /* VBUS SWITCHING IS BOARD-SPECIFIC */ | ||
139 | |||
140 | #ifdef CONFIG_MACH_DAVINCI_EVM | ||
141 | #ifndef CONFIG_MACH_DAVINCI_EVM_OTG | ||
142 | |||
143 | /* I2C operations are always synchronous, and require a task context. | ||
144 | * With unloaded systems, using the shared workqueue seems to suffice | ||
145 | * to satisfy the 100msec A_WAIT_VRISE timeout... | ||
146 | */ | ||
147 | static void evm_deferred_drvvbus(struct work_struct *ignored) | ||
148 | { | ||
149 | davinci_i2c_expander_op(0x3a, USB_DRVVBUS, vbus_state); | ||
150 | vbus_state = !vbus_state; | ||
151 | } | ||
152 | static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus); | ||
153 | |||
154 | #endif /* modified board */ | ||
155 | #endif /* EVM */ | ||
156 | |||
157 | static void davinci_source_power(struct musb *musb, int is_on, int immediate) | ||
158 | { | ||
159 | if (is_on) | ||
160 | is_on = 1; | ||
161 | |||
162 | if (vbus_state == is_on) | ||
163 | return; | ||
164 | vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */ | ||
165 | |||
166 | #ifdef CONFIG_MACH_DAVINCI_EVM | ||
167 | if (machine_is_davinci_evm()) { | ||
168 | #ifdef CONFIG_MACH_DAVINCI_EVM_OTG | ||
169 | /* modified EVM board switching VBUS with GPIO(6) not I2C | ||
170 | * NOTE: PINMUX0.RGB888 (bit23) must be clear | ||
171 | */ | ||
172 | if (is_on) | ||
173 | gpio_set(GPIO(6)); | ||
174 | else | ||
175 | gpio_clear(GPIO(6)); | ||
176 | immediate = 1; | ||
177 | #else | ||
178 | if (immediate) | ||
179 | davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on); | ||
180 | else | ||
181 | schedule_work(&evm_vbus_work); | ||
182 | #endif | ||
183 | } | ||
184 | #endif | ||
185 | if (immediate) | ||
186 | vbus_state = is_on; | ||
187 | } | ||
188 | |||
189 | static void davinci_set_vbus(struct musb *musb, int is_on) | ||
190 | { | ||
191 | WARN_ON(is_on && is_peripheral_active(musb)); | ||
192 | davinci_source_power(musb, is_on, 0); | ||
193 | } | ||
194 | |||
195 | |||
196 | #define POLL_SECONDS 2 | ||
197 | |||
198 | static struct timer_list otg_workaround; | ||
199 | |||
200 | static void otg_timer(unsigned long _musb) | ||
201 | { | ||
202 | struct musb *musb = (void *)_musb; | ||
203 | void __iomem *mregs = musb->mregs; | ||
204 | u8 devctl; | ||
205 | unsigned long flags; | ||
206 | |||
207 | /* We poll because DaVinci's won't expose several OTG-critical | ||
208 | * status change events (from the transceiver) otherwise. | ||
209 | */ | ||
210 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
211 | DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb)); | ||
212 | |||
213 | spin_lock_irqsave(&musb->lock, flags); | ||
214 | switch (musb->xceiv.state) { | ||
215 | case OTG_STATE_A_WAIT_VFALL: | ||
216 | /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL | ||
217 | * seems to mis-handle session "start" otherwise (or in our | ||
218 | * case "recover"), in routine "VBUS was valid by the time | ||
219 | * VBUSERR got reported during enumeration" cases. | ||
220 | */ | ||
221 | if (devctl & MUSB_DEVCTL_VBUS) { | ||
222 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
223 | break; | ||
224 | } | ||
225 | musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; | ||
226 | musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, | ||
227 | MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT); | ||
228 | break; | ||
229 | case OTG_STATE_B_IDLE: | ||
230 | if (!is_peripheral_enabled(musb)) | ||
231 | break; | ||
232 | |||
233 | /* There's no ID-changed IRQ, so we have no good way to tell | ||
234 | * when to switch to the A-Default state machine (by setting | ||
235 | * the DEVCTL.SESSION flag). | ||
236 | * | ||
237 | * Workaround: whenever we're in B_IDLE, try setting the | ||
238 | * session flag every few seconds. If it works, ID was | ||
239 | * grounded and we're now in the A-Default state machine. | ||
240 | * | ||
241 | * NOTE setting the session flag is _supposed_ to trigger | ||
242 | * SRP, but clearly it doesn't. | ||
243 | */ | ||
244 | musb_writeb(mregs, MUSB_DEVCTL, | ||
245 | devctl | MUSB_DEVCTL_SESSION); | ||
246 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
247 | if (devctl & MUSB_DEVCTL_BDEVICE) | ||
248 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
249 | else | ||
250 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
251 | break; | ||
252 | default: | ||
253 | break; | ||
254 | } | ||
255 | spin_unlock_irqrestore(&musb->lock, flags); | ||
256 | } | ||
257 | |||
258 | static irqreturn_t davinci_interrupt(int irq, void *__hci) | ||
259 | { | ||
260 | unsigned long flags; | ||
261 | irqreturn_t retval = IRQ_NONE; | ||
262 | struct musb *musb = __hci; | ||
263 | void __iomem *tibase = musb->ctrl_base; | ||
264 | u32 tmp; | ||
265 | |||
266 | spin_lock_irqsave(&musb->lock, flags); | ||
267 | |||
268 | /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through | ||
269 | * the Mentor registers (except for setup), use the TI ones and EOI. | ||
270 | * | ||
271 | * Docs describe irq "vector" registers asociated with the CPPI and | ||
272 | * USB EOI registers. These hold a bitmask corresponding to the | ||
273 | * current IRQ, not an irq handler address. Would using those bits | ||
274 | * resolve some of the races observed in this dispatch code?? | ||
275 | */ | ||
276 | |||
277 | /* CPPI interrupts share the same IRQ line, but have their own | ||
278 | * mask, state, "vector", and EOI registers. | ||
279 | */ | ||
280 | if (is_cppi_enabled()) { | ||
281 | u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); | ||
282 | u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); | ||
283 | |||
284 | if (cppi_tx || cppi_rx) { | ||
285 | DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx); | ||
286 | cppi_completion(musb, cppi_rx, cppi_tx); | ||
287 | retval = IRQ_HANDLED; | ||
288 | } | ||
289 | } | ||
290 | |||
291 | /* ack and handle non-CPPI interrupts */ | ||
292 | tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG); | ||
293 | musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp); | ||
294 | DBG(4, "IRQ %08x\n", tmp); | ||
295 | |||
296 | musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK) | ||
297 | >> DAVINCI_USB_RXINT_SHIFT; | ||
298 | musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK) | ||
299 | >> DAVINCI_USB_TXINT_SHIFT; | ||
300 | musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK) | ||
301 | >> DAVINCI_USB_USBINT_SHIFT; | ||
302 | |||
303 | /* DRVVBUS irqs are the only proxy we have (a very poor one!) for | ||
304 | * DaVinci's missing ID change IRQ. We need an ID change IRQ to | ||
305 | * switch appropriately between halves of the OTG state machine. | ||
306 | * Managing DEVCTL.SESSION per Mentor docs requires we know its | ||
307 | * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. | ||
308 | * Also, DRVVBUS pulses for SRP (but not at 5V) ... | ||
309 | */ | ||
310 | if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) { | ||
311 | int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG); | ||
312 | void __iomem *mregs = musb->mregs; | ||
313 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
314 | int err = musb->int_usb & MUSB_INTR_VBUSERROR; | ||
315 | |||
316 | err = is_host_enabled(musb) | ||
317 | && (musb->int_usb & MUSB_INTR_VBUSERROR); | ||
318 | if (err) { | ||
319 | /* The Mentor core doesn't debounce VBUS as needed | ||
320 | * to cope with device connect current spikes. This | ||
321 | * means it's not uncommon for bus-powered devices | ||
322 | * to get VBUS errors during enumeration. | ||
323 | * | ||
324 | * This is a workaround, but newer RTL from Mentor | ||
325 | * seems to allow a better one: "re"starting sessions | ||
326 | * without waiting (on EVM, a **long** time) for VBUS | ||
327 | * to stop registering in devctl. | ||
328 | */ | ||
329 | musb->int_usb &= ~MUSB_INTR_VBUSERROR; | ||
330 | musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; | ||
331 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
332 | WARNING("VBUS error workaround (delay coming)\n"); | ||
333 | } else if (is_host_enabled(musb) && drvvbus) { | ||
334 | musb->is_active = 1; | ||
335 | MUSB_HST_MODE(musb); | ||
336 | musb->xceiv.default_a = 1; | ||
337 | musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; | ||
338 | portstate(musb->port1_status |= USB_PORT_STAT_POWER); | ||
339 | del_timer(&otg_workaround); | ||
340 | } else { | ||
341 | musb->is_active = 0; | ||
342 | MUSB_DEV_MODE(musb); | ||
343 | musb->xceiv.default_a = 0; | ||
344 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
345 | portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); | ||
346 | } | ||
347 | |||
348 | /* NOTE: this must complete poweron within 100 msec */ | ||
349 | davinci_source_power(musb, drvvbus, 0); | ||
350 | DBG(2, "VBUS %s (%s)%s, devctl %02x\n", | ||
351 | drvvbus ? "on" : "off", | ||
352 | otg_state_string(musb), | ||
353 | err ? " ERROR" : "", | ||
354 | devctl); | ||
355 | retval = IRQ_HANDLED; | ||
356 | } | ||
357 | |||
358 | if (musb->int_tx || musb->int_rx || musb->int_usb) | ||
359 | retval |= musb_interrupt(musb); | ||
360 | |||
361 | /* irq stays asserted until EOI is written */ | ||
362 | musb_writel(tibase, DAVINCI_USB_EOI_REG, 0); | ||
363 | |||
364 | /* poll for ID change */ | ||
365 | if (is_otg_enabled(musb) | ||
366 | && musb->xceiv.state == OTG_STATE_B_IDLE) | ||
367 | mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); | ||
368 | |||
369 | spin_unlock_irqrestore(&musb->lock, flags); | ||
370 | |||
371 | /* REVISIT we sometimes get unhandled IRQs | ||
372 | * (e.g. ep0). not clear why... | ||
373 | */ | ||
374 | if (retval != IRQ_HANDLED) | ||
375 | DBG(5, "unhandled? %08x\n", tmp); | ||
376 | return IRQ_HANDLED; | ||
377 | } | ||
378 | |||
379 | int __init musb_platform_init(struct musb *musb) | ||
380 | { | ||
381 | void __iomem *tibase = musb->ctrl_base; | ||
382 | u32 revision; | ||
383 | |||
384 | musb->mregs += DAVINCI_BASE_OFFSET; | ||
385 | #if 0 | ||
386 | /* REVISIT there's something odd about clocking, this | ||
387 | * didn't appear do the job ... | ||
388 | */ | ||
389 | musb->clock = clk_get(pDevice, "usb"); | ||
390 | if (IS_ERR(musb->clock)) | ||
391 | return PTR_ERR(musb->clock); | ||
392 | |||
393 | status = clk_enable(musb->clock); | ||
394 | if (status < 0) | ||
395 | return -ENODEV; | ||
396 | #endif | ||
397 | |||
398 | /* returns zero if e.g. not clocked */ | ||
399 | revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); | ||
400 | if (revision == 0) | ||
401 | return -ENODEV; | ||
402 | |||
403 | if (is_host_enabled(musb)) | ||
404 | setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); | ||
405 | |||
406 | musb->board_set_vbus = davinci_set_vbus; | ||
407 | davinci_source_power(musb, 0, 1); | ||
408 | |||
409 | /* reset the controller */ | ||
410 | musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1); | ||
411 | |||
412 | /* start the on-chip PHY and its PLL */ | ||
413 | phy_on(); | ||
414 | |||
415 | msleep(5); | ||
416 | |||
417 | /* NOTE: irqs are in mixed mode, not bypass to pure-musb */ | ||
418 | pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n", | ||
419 | revision, __raw_readl((void __force __iomem *) | ||
420 | IO_ADDRESS(USBPHY_CTL_PADDR)), | ||
421 | musb_readb(tibase, DAVINCI_USB_CTRL_REG)); | ||
422 | |||
423 | musb->isr = davinci_interrupt; | ||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | int musb_platform_exit(struct musb *musb) | ||
428 | { | ||
429 | if (is_host_enabled(musb)) | ||
430 | del_timer_sync(&otg_workaround); | ||
431 | |||
432 | davinci_source_power(musb, 0 /*off*/, 1); | ||
433 | |||
434 | /* delay, to avoid problems with module reload */ | ||
435 | if (is_host_enabled(musb) && musb->xceiv.default_a) { | ||
436 | int maxdelay = 30; | ||
437 | u8 devctl, warn = 0; | ||
438 | |||
439 | /* if there's no peripheral connected, this can take a | ||
440 | * long time to fall, especially on EVM with huge C133. | ||
441 | */ | ||
442 | do { | ||
443 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
444 | if (!(devctl & MUSB_DEVCTL_VBUS)) | ||
445 | break; | ||
446 | if ((devctl & MUSB_DEVCTL_VBUS) != warn) { | ||
447 | warn = devctl & MUSB_DEVCTL_VBUS; | ||
448 | DBG(1, "VBUS %d\n", | ||
449 | warn >> MUSB_DEVCTL_VBUS_SHIFT); | ||
450 | } | ||
451 | msleep(1000); | ||
452 | maxdelay--; | ||
453 | } while (maxdelay > 0); | ||
454 | |||
455 | /* in OTG mode, another host might be connected */ | ||
456 | if (devctl & MUSB_DEVCTL_VBUS) | ||
457 | DBG(1, "VBUS off timeout (devctl %02x)\n", devctl); | ||
458 | } | ||
459 | |||
460 | phy_off(); | ||
461 | return 0; | ||
462 | } | ||
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h new file mode 100644 index 000000000000..7fb6238e270f --- /dev/null +++ b/drivers/usb/musb/davinci.h | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005-2006 by Texas Instruments | ||
3 | * | ||
4 | * The Inventra Controller Driver for Linux is free software; you | ||
5 | * can redistribute it and/or modify it under the terms of the GNU | ||
6 | * General Public License version 2 as published by the Free Software | ||
7 | * Foundation. | ||
8 | */ | ||
9 | |||
10 | #ifndef __MUSB_HDRDF_H__ | ||
11 | #define __MUSB_HDRDF_H__ | ||
12 | |||
13 | /* | ||
14 | * DaVinci-specific definitions | ||
15 | */ | ||
16 | |||
17 | /* Integrated highspeed/otg PHY */ | ||
18 | #define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34) | ||
19 | #define USBPHY_PHYCLKGD (1 << 8) | ||
20 | #define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */ | ||
21 | #define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */ | ||
22 | #define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */ | ||
23 | #define USBPHY_CLKO1SEL (1 << 3) | ||
24 | #define USBPHY_OSCPDWN (1 << 2) | ||
25 | #define USBPHY_PHYPDWN (1 << 0) | ||
26 | |||
27 | /* For now include usb OTG module registers here */ | ||
28 | #define DAVINCI_USB_VERSION_REG 0x00 | ||
29 | #define DAVINCI_USB_CTRL_REG 0x04 | ||
30 | #define DAVINCI_USB_STAT_REG 0x08 | ||
31 | #define DAVINCI_RNDIS_REG 0x10 | ||
32 | #define DAVINCI_AUTOREQ_REG 0x14 | ||
33 | #define DAVINCI_USB_INT_SOURCE_REG 0x20 | ||
34 | #define DAVINCI_USB_INT_SET_REG 0x24 | ||
35 | #define DAVINCI_USB_INT_SRC_CLR_REG 0x28 | ||
36 | #define DAVINCI_USB_INT_MASK_REG 0x2c | ||
37 | #define DAVINCI_USB_INT_MASK_SET_REG 0x30 | ||
38 | #define DAVINCI_USB_INT_MASK_CLR_REG 0x34 | ||
39 | #define DAVINCI_USB_INT_SRC_MASKED_REG 0x38 | ||
40 | #define DAVINCI_USB_EOI_REG 0x3c | ||
41 | #define DAVINCI_USB_EOI_INTVEC 0x40 | ||
42 | |||
43 | /* BEGIN CPPI-generic (?) */ | ||
44 | |||
45 | /* CPPI related registers */ | ||
46 | #define DAVINCI_TXCPPI_CTRL_REG 0x80 | ||
47 | #define DAVINCI_TXCPPI_TEAR_REG 0x84 | ||
48 | #define DAVINCI_CPPI_EOI_REG 0x88 | ||
49 | #define DAVINCI_CPPI_INTVEC_REG 0x8c | ||
50 | #define DAVINCI_TXCPPI_MASKED_REG 0x90 | ||
51 | #define DAVINCI_TXCPPI_RAW_REG 0x94 | ||
52 | #define DAVINCI_TXCPPI_INTENAB_REG 0x98 | ||
53 | #define DAVINCI_TXCPPI_INTCLR_REG 0x9c | ||
54 | |||
55 | #define DAVINCI_RXCPPI_CTRL_REG 0xC0 | ||
56 | #define DAVINCI_RXCPPI_MASKED_REG 0xD0 | ||
57 | #define DAVINCI_RXCPPI_RAW_REG 0xD4 | ||
58 | #define DAVINCI_RXCPPI_INTENAB_REG 0xD8 | ||
59 | #define DAVINCI_RXCPPI_INTCLR_REG 0xDC | ||
60 | |||
61 | #define DAVINCI_RXCPPI_BUFCNT0_REG 0xE0 | ||
62 | #define DAVINCI_RXCPPI_BUFCNT1_REG 0xE4 | ||
63 | #define DAVINCI_RXCPPI_BUFCNT2_REG 0xE8 | ||
64 | #define DAVINCI_RXCPPI_BUFCNT3_REG 0xEC | ||
65 | |||
66 | /* CPPI state RAM entries */ | ||
67 | #define DAVINCI_CPPI_STATERAM_BASE_OFFSET 0x100 | ||
68 | |||
69 | #define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \ | ||
70 | (DAVINCI_CPPI_STATERAM_BASE_OFFSET + ((chnum) * 0x40)) | ||
71 | #define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \ | ||
72 | (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40)) | ||
73 | |||
74 | /* CPPI masks */ | ||
75 | #define DAVINCI_DMA_CTRL_ENABLE 1 | ||
76 | #define DAVINCI_DMA_CTRL_DISABLE 0 | ||
77 | |||
78 | #define DAVINCI_DMA_ALL_CHANNELS_ENABLE 0xF | ||
79 | #define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF | ||
80 | |||
81 | /* END CPPI-generic (?) */ | ||
82 | |||
83 | #define DAVINCI_USB_TX_ENDPTS_MASK 0x1f /* ep0 + 4 tx */ | ||
84 | #define DAVINCI_USB_RX_ENDPTS_MASK 0x1e /* 4 rx */ | ||
85 | |||
86 | #define DAVINCI_USB_USBINT_SHIFT 16 | ||
87 | #define DAVINCI_USB_TXINT_SHIFT 0 | ||
88 | #define DAVINCI_USB_RXINT_SHIFT 8 | ||
89 | |||
90 | #define DAVINCI_INTR_DRVVBUS 0x0100 | ||
91 | |||
92 | #define DAVINCI_USB_USBINT_MASK 0x01ff0000 /* 8 Mentor, DRVVBUS */ | ||
93 | #define DAVINCI_USB_TXINT_MASK \ | ||
94 | (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT) | ||
95 | #define DAVINCI_USB_RXINT_MASK \ | ||
96 | (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT) | ||
97 | |||
98 | #define DAVINCI_BASE_OFFSET 0x400 | ||
99 | |||
100 | #endif /* __MUSB_HDRDF_H__ */ | ||
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c new file mode 100644 index 000000000000..c5b8f0296fcf --- /dev/null +++ b/drivers/usb/musb/musb_core.c | |||
@@ -0,0 +1,2253 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver core code | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | /* | ||
36 | * Inventra (Multipoint) Dual-Role Controller Driver for Linux. | ||
37 | * | ||
38 | * This consists of a Host Controller Driver (HCD) and a peripheral | ||
39 | * controller driver implementing the "Gadget" API; OTG support is | ||
40 | * in the works. These are normal Linux-USB controller drivers which | ||
41 | * use IRQs and have no dedicated thread. | ||
42 | * | ||
43 | * This version of the driver has only been used with products from | ||
44 | * Texas Instruments. Those products integrate the Inventra logic | ||
45 | * with other DMA, IRQ, and bus modules, as well as other logic that | ||
46 | * needs to be reflected in this driver. | ||
47 | * | ||
48 | * | ||
49 | * NOTE: the original Mentor code here was pretty much a collection | ||
50 | * of mechanisms that don't seem to have been fully integrated/working | ||
51 | * for *any* Linux kernel version. This version aims at Linux 2.6.now, | ||
52 | * Key open issues include: | ||
53 | * | ||
54 | * - Lack of host-side transaction scheduling, for all transfer types. | ||
55 | * The hardware doesn't do it; instead, software must. | ||
56 | * | ||
57 | * This is not an issue for OTG devices that don't support external | ||
58 | * hubs, but for more "normal" USB hosts it's a user issue that the | ||
59 | * "multipoint" support doesn't scale in the expected ways. That | ||
60 | * includes DaVinci EVM in a common non-OTG mode. | ||
61 | * | ||
62 | * * Control and bulk use dedicated endpoints, and there's as | ||
63 | * yet no mechanism to either (a) reclaim the hardware when | ||
64 | * peripherals are NAKing, which gets complicated with bulk | ||
65 | * endpoints, or (b) use more than a single bulk endpoint in | ||
66 | * each direction. | ||
67 | * | ||
68 | * RESULT: one device may be perceived as blocking another one. | ||
69 | * | ||
70 | * * Interrupt and isochronous will dynamically allocate endpoint | ||
71 | * hardware, but (a) there's no record keeping for bandwidth; | ||
72 | * (b) in the common case that few endpoints are available, there | ||
73 | * is no mechanism to reuse endpoints to talk to multiple devices. | ||
74 | * | ||
75 | * RESULT: At one extreme, bandwidth can be overcommitted in | ||
76 | * some hardware configurations, no faults will be reported. | ||
77 | * At the other extreme, the bandwidth capabilities which do | ||
78 | * exist tend to be severely undercommitted. You can't yet hook | ||
79 | * up both a keyboard and a mouse to an external USB hub. | ||
80 | */ | ||
81 | |||
82 | /* | ||
83 | * This gets many kinds of configuration information: | ||
84 | * - Kconfig for everything user-configurable | ||
85 | * - <asm/arch/hdrc_cnf.h> for SOC or family details | ||
86 | * - platform_device for addressing, irq, and platform_data | ||
87 | * - platform_data is mostly for board-specific informarion | ||
88 | * | ||
89 | * Most of the conditional compilation will (someday) vanish. | ||
90 | */ | ||
91 | |||
92 | #include <linux/module.h> | ||
93 | #include <linux/kernel.h> | ||
94 | #include <linux/sched.h> | ||
95 | #include <linux/slab.h> | ||
96 | #include <linux/init.h> | ||
97 | #include <linux/list.h> | ||
98 | #include <linux/kobject.h> | ||
99 | #include <linux/platform_device.h> | ||
100 | #include <linux/io.h> | ||
101 | |||
102 | #ifdef CONFIG_ARM | ||
103 | #include <asm/arch/hardware.h> | ||
104 | #include <asm/arch/memory.h> | ||
105 | #include <asm/mach-types.h> | ||
106 | #endif | ||
107 | |||
108 | #include "musb_core.h" | ||
109 | |||
110 | |||
111 | #ifdef CONFIG_ARCH_DAVINCI | ||
112 | #include "davinci.h" | ||
113 | #endif | ||
114 | |||
115 | |||
116 | |||
117 | unsigned debug; | ||
118 | module_param(debug, uint, S_IRUGO | S_IWUSR); | ||
119 | MODULE_PARM_DESC(debug, "Debug message level. Default = 0"); | ||
120 | |||
121 | #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia" | ||
122 | #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver" | ||
123 | |||
124 | #define MUSB_VERSION "6.0" | ||
125 | |||
126 | #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION | ||
127 | |||
128 | #define MUSB_DRIVER_NAME "musb_hdrc" | ||
129 | const char musb_driver_name[] = MUSB_DRIVER_NAME; | ||
130 | |||
131 | MODULE_DESCRIPTION(DRIVER_INFO); | ||
132 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
133 | MODULE_LICENSE("GPL"); | ||
134 | MODULE_ALIAS("platform:" MUSB_DRIVER_NAME); | ||
135 | |||
136 | |||
137 | /*-------------------------------------------------------------------------*/ | ||
138 | |||
139 | static inline struct musb *dev_to_musb(struct device *dev) | ||
140 | { | ||
141 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
142 | /* usbcore insists dev->driver_data is a "struct hcd *" */ | ||
143 | return hcd_to_musb(dev_get_drvdata(dev)); | ||
144 | #else | ||
145 | return dev_get_drvdata(dev); | ||
146 | #endif | ||
147 | } | ||
148 | |||
149 | /*-------------------------------------------------------------------------*/ | ||
150 | |||
151 | #ifndef CONFIG_USB_TUSB6010 | ||
152 | /* | ||
153 | * Load an endpoint's FIFO | ||
154 | */ | ||
155 | void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) | ||
156 | { | ||
157 | void __iomem *fifo = hw_ep->fifo; | ||
158 | |||
159 | prefetch((u8 *)src); | ||
160 | |||
161 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | ||
162 | 'T', hw_ep->epnum, fifo, len, src); | ||
163 | |||
164 | /* we can't assume unaligned reads work */ | ||
165 | if (likely((0x01 & (unsigned long) src) == 0)) { | ||
166 | u16 index = 0; | ||
167 | |||
168 | /* best case is 32bit-aligned source address */ | ||
169 | if ((0x02 & (unsigned long) src) == 0) { | ||
170 | if (len >= 4) { | ||
171 | writesl(fifo, src + index, len >> 2); | ||
172 | index += len & ~0x03; | ||
173 | } | ||
174 | if (len & 0x02) { | ||
175 | musb_writew(fifo, 0, *(u16 *)&src[index]); | ||
176 | index += 2; | ||
177 | } | ||
178 | } else { | ||
179 | if (len >= 2) { | ||
180 | writesw(fifo, src + index, len >> 1); | ||
181 | index += len & ~0x01; | ||
182 | } | ||
183 | } | ||
184 | if (len & 0x01) | ||
185 | musb_writeb(fifo, 0, src[index]); | ||
186 | } else { | ||
187 | /* byte aligned */ | ||
188 | writesb(fifo, src, len); | ||
189 | } | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * Unload an endpoint's FIFO | ||
194 | */ | ||
195 | void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) | ||
196 | { | ||
197 | void __iomem *fifo = hw_ep->fifo; | ||
198 | |||
199 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | ||
200 | 'R', hw_ep->epnum, fifo, len, dst); | ||
201 | |||
202 | /* we can't assume unaligned writes work */ | ||
203 | if (likely((0x01 & (unsigned long) dst) == 0)) { | ||
204 | u16 index = 0; | ||
205 | |||
206 | /* best case is 32bit-aligned destination address */ | ||
207 | if ((0x02 & (unsigned long) dst) == 0) { | ||
208 | if (len >= 4) { | ||
209 | readsl(fifo, dst, len >> 2); | ||
210 | index = len & ~0x03; | ||
211 | } | ||
212 | if (len & 0x02) { | ||
213 | *(u16 *)&dst[index] = musb_readw(fifo, 0); | ||
214 | index += 2; | ||
215 | } | ||
216 | } else { | ||
217 | if (len >= 2) { | ||
218 | readsw(fifo, dst, len >> 1); | ||
219 | index = len & ~0x01; | ||
220 | } | ||
221 | } | ||
222 | if (len & 0x01) | ||
223 | dst[index] = musb_readb(fifo, 0); | ||
224 | } else { | ||
225 | /* byte aligned */ | ||
226 | readsb(fifo, dst, len); | ||
227 | } | ||
228 | } | ||
229 | |||
230 | #endif /* normal PIO */ | ||
231 | |||
232 | |||
233 | /*-------------------------------------------------------------------------*/ | ||
234 | |||
235 | /* for high speed test mode; see USB 2.0 spec 7.1.20 */ | ||
236 | static const u8 musb_test_packet[53] = { | ||
237 | /* implicit SYNC then DATA0 to start */ | ||
238 | |||
239 | /* JKJKJKJK x9 */ | ||
240 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
241 | /* JJKKJJKK x8 */ | ||
242 | 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, | ||
243 | /* JJJJKKKK x8 */ | ||
244 | 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, | ||
245 | /* JJJJJJJKKKKKKK x8 */ | ||
246 | 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | ||
247 | /* JJJJJJJK x8 */ | ||
248 | 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, | ||
249 | /* JKKKKKKK x10, JK */ | ||
250 | 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e | ||
251 | |||
252 | /* implicit CRC16 then EOP to end */ | ||
253 | }; | ||
254 | |||
255 | void musb_load_testpacket(struct musb *musb) | ||
256 | { | ||
257 | void __iomem *regs = musb->endpoints[0].regs; | ||
258 | |||
259 | musb_ep_select(musb->mregs, 0); | ||
260 | musb_write_fifo(musb->control_ep, | ||
261 | sizeof(musb_test_packet), musb_test_packet); | ||
262 | musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY); | ||
263 | } | ||
264 | |||
265 | /*-------------------------------------------------------------------------*/ | ||
266 | |||
267 | const char *otg_state_string(struct musb *musb) | ||
268 | { | ||
269 | switch (musb->xceiv.state) { | ||
270 | case OTG_STATE_A_IDLE: return "a_idle"; | ||
271 | case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise"; | ||
272 | case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon"; | ||
273 | case OTG_STATE_A_HOST: return "a_host"; | ||
274 | case OTG_STATE_A_SUSPEND: return "a_suspend"; | ||
275 | case OTG_STATE_A_PERIPHERAL: return "a_peripheral"; | ||
276 | case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall"; | ||
277 | case OTG_STATE_A_VBUS_ERR: return "a_vbus_err"; | ||
278 | case OTG_STATE_B_IDLE: return "b_idle"; | ||
279 | case OTG_STATE_B_SRP_INIT: return "b_srp_init"; | ||
280 | case OTG_STATE_B_PERIPHERAL: return "b_peripheral"; | ||
281 | case OTG_STATE_B_WAIT_ACON: return "b_wait_acon"; | ||
282 | case OTG_STATE_B_HOST: return "b_host"; | ||
283 | default: return "UNDEFINED"; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | #ifdef CONFIG_USB_MUSB_OTG | ||
288 | |||
289 | /* | ||
290 | * See also USB_OTG_1-3.pdf 6.6.5 Timers | ||
291 | * REVISIT: Are the other timers done in the hardware? | ||
292 | */ | ||
293 | #define TB_ASE0_BRST 100 /* Min 3.125 ms */ | ||
294 | |||
295 | /* | ||
296 | * Handles OTG hnp timeouts, such as b_ase0_brst | ||
297 | */ | ||
298 | void musb_otg_timer_func(unsigned long data) | ||
299 | { | ||
300 | struct musb *musb = (struct musb *)data; | ||
301 | unsigned long flags; | ||
302 | |||
303 | spin_lock_irqsave(&musb->lock, flags); | ||
304 | switch (musb->xceiv.state) { | ||
305 | case OTG_STATE_B_WAIT_ACON: | ||
306 | DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n"); | ||
307 | musb_g_disconnect(musb); | ||
308 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
309 | musb->is_active = 0; | ||
310 | break; | ||
311 | case OTG_STATE_A_WAIT_BCON: | ||
312 | DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n"); | ||
313 | musb_hnp_stop(musb); | ||
314 | break; | ||
315 | default: | ||
316 | DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb)); | ||
317 | } | ||
318 | musb->ignore_disconnect = 0; | ||
319 | spin_unlock_irqrestore(&musb->lock, flags); | ||
320 | } | ||
321 | |||
322 | static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0); | ||
323 | |||
324 | /* | ||
325 | * Stops the B-device HNP state. Caller must take care of locking. | ||
326 | */ | ||
327 | void musb_hnp_stop(struct musb *musb) | ||
328 | { | ||
329 | struct usb_hcd *hcd = musb_to_hcd(musb); | ||
330 | void __iomem *mbase = musb->mregs; | ||
331 | u8 reg; | ||
332 | |||
333 | switch (musb->xceiv.state) { | ||
334 | case OTG_STATE_A_PERIPHERAL: | ||
335 | case OTG_STATE_A_WAIT_VFALL: | ||
336 | case OTG_STATE_A_WAIT_BCON: | ||
337 | DBG(1, "HNP: Switching back to A-host\n"); | ||
338 | musb_g_disconnect(musb); | ||
339 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
340 | MUSB_HST_MODE(musb); | ||
341 | musb->is_active = 0; | ||
342 | break; | ||
343 | case OTG_STATE_B_HOST: | ||
344 | DBG(1, "HNP: Disabling HR\n"); | ||
345 | hcd->self.is_b_host = 0; | ||
346 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
347 | MUSB_DEV_MODE(musb); | ||
348 | reg = musb_readb(mbase, MUSB_POWER); | ||
349 | reg |= MUSB_POWER_SUSPENDM; | ||
350 | musb_writeb(mbase, MUSB_POWER, reg); | ||
351 | /* REVISIT: Start SESSION_REQUEST here? */ | ||
352 | break; | ||
353 | default: | ||
354 | DBG(1, "HNP: Stopping in unknown state %s\n", | ||
355 | otg_state_string(musb)); | ||
356 | } | ||
357 | |||
358 | /* | ||
359 | * When returning to A state after HNP, avoid hub_port_rebounce(), | ||
360 | * which cause occasional OPT A "Did not receive reset after connect" | ||
361 | * errors. | ||
362 | */ | ||
363 | musb->port1_status &= | ||
364 | ~(1 << USB_PORT_FEAT_C_CONNECTION); | ||
365 | } | ||
366 | |||
367 | #endif | ||
368 | |||
369 | /* | ||
370 | * Interrupt Service Routine to record USB "global" interrupts. | ||
371 | * Since these do not happen often and signify things of | ||
372 | * paramount importance, it seems OK to check them individually; | ||
373 | * the order of the tests is specified in the manual | ||
374 | * | ||
375 | * @param musb instance pointer | ||
376 | * @param int_usb register contents | ||
377 | * @param devctl | ||
378 | * @param power | ||
379 | */ | ||
380 | |||
381 | #define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \ | ||
382 | | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \ | ||
383 | | MUSB_INTR_RESET) | ||
384 | |||
385 | static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | ||
386 | u8 devctl, u8 power) | ||
387 | { | ||
388 | irqreturn_t handled = IRQ_NONE; | ||
389 | void __iomem *mbase = musb->mregs; | ||
390 | |||
391 | DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl, | ||
392 | int_usb); | ||
393 | |||
394 | /* in host mode, the peripheral may issue remote wakeup. | ||
395 | * in peripheral mode, the host may resume the link. | ||
396 | * spurious RESUME irqs happen too, paired with SUSPEND. | ||
397 | */ | ||
398 | if (int_usb & MUSB_INTR_RESUME) { | ||
399 | handled = IRQ_HANDLED; | ||
400 | DBG(3, "RESUME (%s)\n", otg_state_string(musb)); | ||
401 | |||
402 | if (devctl & MUSB_DEVCTL_HM) { | ||
403 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
404 | switch (musb->xceiv.state) { | ||
405 | case OTG_STATE_A_SUSPEND: | ||
406 | /* remote wakeup? later, GetPortStatus | ||
407 | * will stop RESUME signaling | ||
408 | */ | ||
409 | |||
410 | if (power & MUSB_POWER_SUSPENDM) { | ||
411 | /* spurious */ | ||
412 | musb->int_usb &= ~MUSB_INTR_SUSPEND; | ||
413 | DBG(2, "Spurious SUSPENDM\n"); | ||
414 | break; | ||
415 | } | ||
416 | |||
417 | power &= ~MUSB_POWER_SUSPENDM; | ||
418 | musb_writeb(mbase, MUSB_POWER, | ||
419 | power | MUSB_POWER_RESUME); | ||
420 | |||
421 | musb->port1_status |= | ||
422 | (USB_PORT_STAT_C_SUSPEND << 16) | ||
423 | | MUSB_PORT_STAT_RESUME; | ||
424 | musb->rh_timer = jiffies | ||
425 | + msecs_to_jiffies(20); | ||
426 | |||
427 | musb->xceiv.state = OTG_STATE_A_HOST; | ||
428 | musb->is_active = 1; | ||
429 | usb_hcd_resume_root_hub(musb_to_hcd(musb)); | ||
430 | break; | ||
431 | case OTG_STATE_B_WAIT_ACON: | ||
432 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
433 | musb->is_active = 1; | ||
434 | MUSB_DEV_MODE(musb); | ||
435 | break; | ||
436 | default: | ||
437 | WARNING("bogus %s RESUME (%s)\n", | ||
438 | "host", | ||
439 | otg_state_string(musb)); | ||
440 | } | ||
441 | #endif | ||
442 | } else { | ||
443 | switch (musb->xceiv.state) { | ||
444 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
445 | case OTG_STATE_A_SUSPEND: | ||
446 | /* possibly DISCONNECT is upcoming */ | ||
447 | musb->xceiv.state = OTG_STATE_A_HOST; | ||
448 | usb_hcd_resume_root_hub(musb_to_hcd(musb)); | ||
449 | break; | ||
450 | #endif | ||
451 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
452 | case OTG_STATE_B_WAIT_ACON: | ||
453 | case OTG_STATE_B_PERIPHERAL: | ||
454 | /* disconnect while suspended? we may | ||
455 | * not get a disconnect irq... | ||
456 | */ | ||
457 | if ((devctl & MUSB_DEVCTL_VBUS) | ||
458 | != (3 << MUSB_DEVCTL_VBUS_SHIFT) | ||
459 | ) { | ||
460 | musb->int_usb |= MUSB_INTR_DISCONNECT; | ||
461 | musb->int_usb &= ~MUSB_INTR_SUSPEND; | ||
462 | break; | ||
463 | } | ||
464 | musb_g_resume(musb); | ||
465 | break; | ||
466 | case OTG_STATE_B_IDLE: | ||
467 | musb->int_usb &= ~MUSB_INTR_SUSPEND; | ||
468 | break; | ||
469 | #endif | ||
470 | default: | ||
471 | WARNING("bogus %s RESUME (%s)\n", | ||
472 | "peripheral", | ||
473 | otg_state_string(musb)); | ||
474 | } | ||
475 | } | ||
476 | } | ||
477 | |||
478 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
479 | /* see manual for the order of the tests */ | ||
480 | if (int_usb & MUSB_INTR_SESSREQ) { | ||
481 | DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb)); | ||
482 | |||
483 | /* IRQ arrives from ID pin sense or (later, if VBUS power | ||
484 | * is removed) SRP. responses are time critical: | ||
485 | * - turn on VBUS (with silicon-specific mechanism) | ||
486 | * - go through A_WAIT_VRISE | ||
487 | * - ... to A_WAIT_BCON. | ||
488 | * a_wait_vrise_tmout triggers VBUS_ERROR transitions | ||
489 | */ | ||
490 | musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); | ||
491 | musb->ep0_stage = MUSB_EP0_START; | ||
492 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
493 | MUSB_HST_MODE(musb); | ||
494 | musb_set_vbus(musb, 1); | ||
495 | |||
496 | handled = IRQ_HANDLED; | ||
497 | } | ||
498 | |||
499 | if (int_usb & MUSB_INTR_VBUSERROR) { | ||
500 | int ignore = 0; | ||
501 | |||
502 | /* During connection as an A-Device, we may see a short | ||
503 | * current spikes causing voltage drop, because of cable | ||
504 | * and peripheral capacitance combined with vbus draw. | ||
505 | * (So: less common with truly self-powered devices, where | ||
506 | * vbus doesn't act like a power supply.) | ||
507 | * | ||
508 | * Such spikes are short; usually less than ~500 usec, max | ||
509 | * of ~2 msec. That is, they're not sustained overcurrent | ||
510 | * errors, though they're reported using VBUSERROR irqs. | ||
511 | * | ||
512 | * Workarounds: (a) hardware: use self powered devices. | ||
513 | * (b) software: ignore non-repeated VBUS errors. | ||
514 | * | ||
515 | * REVISIT: do delays from lots of DEBUG_KERNEL checks | ||
516 | * make trouble here, keeping VBUS < 4.4V ? | ||
517 | */ | ||
518 | switch (musb->xceiv.state) { | ||
519 | case OTG_STATE_A_HOST: | ||
520 | /* recovery is dicey once we've gotten past the | ||
521 | * initial stages of enumeration, but if VBUS | ||
522 | * stayed ok at the other end of the link, and | ||
523 | * another reset is due (at least for high speed, | ||
524 | * to redo the chirp etc), it might work OK... | ||
525 | */ | ||
526 | case OTG_STATE_A_WAIT_BCON: | ||
527 | case OTG_STATE_A_WAIT_VRISE: | ||
528 | if (musb->vbuserr_retry) { | ||
529 | musb->vbuserr_retry--; | ||
530 | ignore = 1; | ||
531 | devctl |= MUSB_DEVCTL_SESSION; | ||
532 | musb_writeb(mbase, MUSB_DEVCTL, devctl); | ||
533 | } else { | ||
534 | musb->port1_status |= | ||
535 | (1 << USB_PORT_FEAT_OVER_CURRENT) | ||
536 | | (1 << USB_PORT_FEAT_C_OVER_CURRENT); | ||
537 | } | ||
538 | break; | ||
539 | default: | ||
540 | break; | ||
541 | } | ||
542 | |||
543 | DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n", | ||
544 | otg_state_string(musb), | ||
545 | devctl, | ||
546 | ({ char *s; | ||
547 | switch (devctl & MUSB_DEVCTL_VBUS) { | ||
548 | case 0 << MUSB_DEVCTL_VBUS_SHIFT: | ||
549 | s = "<SessEnd"; break; | ||
550 | case 1 << MUSB_DEVCTL_VBUS_SHIFT: | ||
551 | s = "<AValid"; break; | ||
552 | case 2 << MUSB_DEVCTL_VBUS_SHIFT: | ||
553 | s = "<VBusValid"; break; | ||
554 | /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */ | ||
555 | default: | ||
556 | s = "VALID"; break; | ||
557 | }; s; }), | ||
558 | VBUSERR_RETRY_COUNT - musb->vbuserr_retry, | ||
559 | musb->port1_status); | ||
560 | |||
561 | /* go through A_WAIT_VFALL then start a new session */ | ||
562 | if (!ignore) | ||
563 | musb_set_vbus(musb, 0); | ||
564 | handled = IRQ_HANDLED; | ||
565 | } | ||
566 | |||
567 | if (int_usb & MUSB_INTR_CONNECT) { | ||
568 | struct usb_hcd *hcd = musb_to_hcd(musb); | ||
569 | |||
570 | handled = IRQ_HANDLED; | ||
571 | musb->is_active = 1; | ||
572 | set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); | ||
573 | |||
574 | musb->ep0_stage = MUSB_EP0_START; | ||
575 | |||
576 | #ifdef CONFIG_USB_MUSB_OTG | ||
577 | /* flush endpoints when transitioning from Device Mode */ | ||
578 | if (is_peripheral_active(musb)) { | ||
579 | /* REVISIT HNP; just force disconnect */ | ||
580 | } | ||
581 | musb_writew(mbase, MUSB_INTRTXE, musb->epmask); | ||
582 | musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe); | ||
583 | musb_writeb(mbase, MUSB_INTRUSBE, 0xf7); | ||
584 | #endif | ||
585 | musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED | ||
586 | |USB_PORT_STAT_HIGH_SPEED | ||
587 | |USB_PORT_STAT_ENABLE | ||
588 | ); | ||
589 | musb->port1_status |= USB_PORT_STAT_CONNECTION | ||
590 | |(USB_PORT_STAT_C_CONNECTION << 16); | ||
591 | |||
592 | /* high vs full speed is just a guess until after reset */ | ||
593 | if (devctl & MUSB_DEVCTL_LSDEV) | ||
594 | musb->port1_status |= USB_PORT_STAT_LOW_SPEED; | ||
595 | |||
596 | if (hcd->status_urb) | ||
597 | usb_hcd_poll_rh_status(hcd); | ||
598 | else | ||
599 | usb_hcd_resume_root_hub(hcd); | ||
600 | |||
601 | MUSB_HST_MODE(musb); | ||
602 | |||
603 | /* indicate new connection to OTG machine */ | ||
604 | switch (musb->xceiv.state) { | ||
605 | case OTG_STATE_B_PERIPHERAL: | ||
606 | if (int_usb & MUSB_INTR_SUSPEND) { | ||
607 | DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n"); | ||
608 | musb->xceiv.state = OTG_STATE_B_HOST; | ||
609 | hcd->self.is_b_host = 1; | ||
610 | int_usb &= ~MUSB_INTR_SUSPEND; | ||
611 | } else | ||
612 | DBG(1, "CONNECT as b_peripheral???\n"); | ||
613 | break; | ||
614 | case OTG_STATE_B_WAIT_ACON: | ||
615 | DBG(1, "HNP: Waiting to switch to b_host state\n"); | ||
616 | musb->xceiv.state = OTG_STATE_B_HOST; | ||
617 | hcd->self.is_b_host = 1; | ||
618 | break; | ||
619 | default: | ||
620 | if ((devctl & MUSB_DEVCTL_VBUS) | ||
621 | == (3 << MUSB_DEVCTL_VBUS_SHIFT)) { | ||
622 | musb->xceiv.state = OTG_STATE_A_HOST; | ||
623 | hcd->self.is_b_host = 0; | ||
624 | } | ||
625 | break; | ||
626 | } | ||
627 | DBG(1, "CONNECT (%s) devctl %02x\n", | ||
628 | otg_state_string(musb), devctl); | ||
629 | } | ||
630 | #endif /* CONFIG_USB_MUSB_HDRC_HCD */ | ||
631 | |||
632 | /* mentor saves a bit: bus reset and babble share the same irq. | ||
633 | * only host sees babble; only peripheral sees bus reset. | ||
634 | */ | ||
635 | if (int_usb & MUSB_INTR_RESET) { | ||
636 | if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) { | ||
637 | /* | ||
638 | * Looks like non-HS BABBLE can be ignored, but | ||
639 | * HS BABBLE is an error condition. For HS the solution | ||
640 | * is to avoid babble in the first place and fix what | ||
641 | * caused BABBLE. When HS BABBLE happens we can only | ||
642 | * stop the session. | ||
643 | */ | ||
644 | if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV)) | ||
645 | DBG(1, "BABBLE devctl: %02x\n", devctl); | ||
646 | else { | ||
647 | ERR("Stopping host session -- babble\n"); | ||
648 | musb_writeb(mbase, MUSB_DEVCTL, 0); | ||
649 | } | ||
650 | } else if (is_peripheral_capable()) { | ||
651 | DBG(1, "BUS RESET as %s\n", otg_state_string(musb)); | ||
652 | switch (musb->xceiv.state) { | ||
653 | #ifdef CONFIG_USB_OTG | ||
654 | case OTG_STATE_A_SUSPEND: | ||
655 | /* We need to ignore disconnect on suspend | ||
656 | * otherwise tusb 2.0 won't reconnect after a | ||
657 | * power cycle, which breaks otg compliance. | ||
658 | */ | ||
659 | musb->ignore_disconnect = 1; | ||
660 | musb_g_reset(musb); | ||
661 | /* FALLTHROUGH */ | ||
662 | case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */ | ||
663 | DBG(1, "HNP: Setting timer as %s\n", | ||
664 | otg_state_string(musb)); | ||
665 | musb_otg_timer.data = (unsigned long)musb; | ||
666 | mod_timer(&musb_otg_timer, jiffies | ||
667 | + msecs_to_jiffies(100)); | ||
668 | break; | ||
669 | case OTG_STATE_A_PERIPHERAL: | ||
670 | musb_hnp_stop(musb); | ||
671 | break; | ||
672 | case OTG_STATE_B_WAIT_ACON: | ||
673 | DBG(1, "HNP: RESET (%s), to b_peripheral\n", | ||
674 | otg_state_string(musb)); | ||
675 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
676 | musb_g_reset(musb); | ||
677 | break; | ||
678 | #endif | ||
679 | case OTG_STATE_B_IDLE: | ||
680 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
681 | /* FALLTHROUGH */ | ||
682 | case OTG_STATE_B_PERIPHERAL: | ||
683 | musb_g_reset(musb); | ||
684 | break; | ||
685 | default: | ||
686 | DBG(1, "Unhandled BUS RESET as %s\n", | ||
687 | otg_state_string(musb)); | ||
688 | } | ||
689 | } | ||
690 | |||
691 | handled = IRQ_HANDLED; | ||
692 | } | ||
693 | schedule_work(&musb->irq_work); | ||
694 | |||
695 | return handled; | ||
696 | } | ||
697 | |||
698 | /* | ||
699 | * Interrupt Service Routine to record USB "global" interrupts. | ||
700 | * Since these do not happen often and signify things of | ||
701 | * paramount importance, it seems OK to check them individually; | ||
702 | * the order of the tests is specified in the manual | ||
703 | * | ||
704 | * @param musb instance pointer | ||
705 | * @param int_usb register contents | ||
706 | * @param devctl | ||
707 | * @param power | ||
708 | */ | ||
709 | static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb, | ||
710 | u8 devctl, u8 power) | ||
711 | { | ||
712 | irqreturn_t handled = IRQ_NONE; | ||
713 | |||
714 | #if 0 | ||
715 | /* REVISIT ... this would be for multiplexing periodic endpoints, or | ||
716 | * supporting transfer phasing to prevent exceeding ISO bandwidth | ||
717 | * limits of a given frame or microframe. | ||
718 | * | ||
719 | * It's not needed for peripheral side, which dedicates endpoints; | ||
720 | * though it _might_ use SOF irqs for other purposes. | ||
721 | * | ||
722 | * And it's not currently needed for host side, which also dedicates | ||
723 | * endpoints, relies on TX/RX interval registers, and isn't claimed | ||
724 | * to support ISO transfers yet. | ||
725 | */ | ||
726 | if (int_usb & MUSB_INTR_SOF) { | ||
727 | void __iomem *mbase = musb->mregs; | ||
728 | struct musb_hw_ep *ep; | ||
729 | u8 epnum; | ||
730 | u16 frame; | ||
731 | |||
732 | DBG(6, "START_OF_FRAME\n"); | ||
733 | handled = IRQ_HANDLED; | ||
734 | |||
735 | /* start any periodic Tx transfers waiting for current frame */ | ||
736 | frame = musb_readw(mbase, MUSB_FRAME); | ||
737 | ep = musb->endpoints; | ||
738 | for (epnum = 1; (epnum < musb->nr_endpoints) | ||
739 | && (musb->epmask >= (1 << epnum)); | ||
740 | epnum++, ep++) { | ||
741 | /* | ||
742 | * FIXME handle framecounter wraps (12 bits) | ||
743 | * eliminate duplicated StartUrb logic | ||
744 | */ | ||
745 | if (ep->dwWaitFrame >= frame) { | ||
746 | ep->dwWaitFrame = 0; | ||
747 | pr_debug("SOF --> periodic TX%s on %d\n", | ||
748 | ep->tx_channel ? " DMA" : "", | ||
749 | epnum); | ||
750 | if (!ep->tx_channel) | ||
751 | musb_h_tx_start(musb, epnum); | ||
752 | else | ||
753 | cppi_hostdma_start(musb, epnum); | ||
754 | } | ||
755 | } /* end of for loop */ | ||
756 | } | ||
757 | #endif | ||
758 | |||
759 | if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { | ||
760 | DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n", | ||
761 | otg_state_string(musb), | ||
762 | MUSB_MODE(musb), devctl); | ||
763 | handled = IRQ_HANDLED; | ||
764 | |||
765 | switch (musb->xceiv.state) { | ||
766 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
767 | case OTG_STATE_A_HOST: | ||
768 | case OTG_STATE_A_SUSPEND: | ||
769 | musb_root_disconnect(musb); | ||
770 | if (musb->a_wait_bcon != 0) | ||
771 | musb_platform_try_idle(musb, jiffies | ||
772 | + msecs_to_jiffies(musb->a_wait_bcon)); | ||
773 | break; | ||
774 | #endif /* HOST */ | ||
775 | #ifdef CONFIG_USB_MUSB_OTG | ||
776 | case OTG_STATE_B_HOST: | ||
777 | musb_hnp_stop(musb); | ||
778 | break; | ||
779 | case OTG_STATE_A_PERIPHERAL: | ||
780 | musb_hnp_stop(musb); | ||
781 | musb_root_disconnect(musb); | ||
782 | /* FALLTHROUGH */ | ||
783 | case OTG_STATE_B_WAIT_ACON: | ||
784 | /* FALLTHROUGH */ | ||
785 | #endif /* OTG */ | ||
786 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
787 | case OTG_STATE_B_PERIPHERAL: | ||
788 | case OTG_STATE_B_IDLE: | ||
789 | musb_g_disconnect(musb); | ||
790 | break; | ||
791 | #endif /* GADGET */ | ||
792 | default: | ||
793 | WARNING("unhandled DISCONNECT transition (%s)\n", | ||
794 | otg_state_string(musb)); | ||
795 | break; | ||
796 | } | ||
797 | |||
798 | schedule_work(&musb->irq_work); | ||
799 | } | ||
800 | |||
801 | if (int_usb & MUSB_INTR_SUSPEND) { | ||
802 | DBG(1, "SUSPEND (%s) devctl %02x power %02x\n", | ||
803 | otg_state_string(musb), devctl, power); | ||
804 | handled = IRQ_HANDLED; | ||
805 | |||
806 | switch (musb->xceiv.state) { | ||
807 | #ifdef CONFIG_USB_MUSB_OTG | ||
808 | case OTG_STATE_A_PERIPHERAL: | ||
809 | /* | ||
810 | * We cannot stop HNP here, devctl BDEVICE might be | ||
811 | * still set. | ||
812 | */ | ||
813 | break; | ||
814 | #endif | ||
815 | case OTG_STATE_B_PERIPHERAL: | ||
816 | musb_g_suspend(musb); | ||
817 | musb->is_active = is_otg_enabled(musb) | ||
818 | && musb->xceiv.gadget->b_hnp_enable; | ||
819 | if (musb->is_active) { | ||
820 | #ifdef CONFIG_USB_MUSB_OTG | ||
821 | musb->xceiv.state = OTG_STATE_B_WAIT_ACON; | ||
822 | DBG(1, "HNP: Setting timer for b_ase0_brst\n"); | ||
823 | musb_otg_timer.data = (unsigned long)musb; | ||
824 | mod_timer(&musb_otg_timer, jiffies | ||
825 | + msecs_to_jiffies(TB_ASE0_BRST)); | ||
826 | #endif | ||
827 | } | ||
828 | break; | ||
829 | case OTG_STATE_A_WAIT_BCON: | ||
830 | if (musb->a_wait_bcon != 0) | ||
831 | musb_platform_try_idle(musb, jiffies | ||
832 | + msecs_to_jiffies(musb->a_wait_bcon)); | ||
833 | break; | ||
834 | case OTG_STATE_A_HOST: | ||
835 | musb->xceiv.state = OTG_STATE_A_SUSPEND; | ||
836 | musb->is_active = is_otg_enabled(musb) | ||
837 | && musb->xceiv.host->b_hnp_enable; | ||
838 | break; | ||
839 | case OTG_STATE_B_HOST: | ||
840 | /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ | ||
841 | DBG(1, "REVISIT: SUSPEND as B_HOST\n"); | ||
842 | break; | ||
843 | default: | ||
844 | /* "should not happen" */ | ||
845 | musb->is_active = 0; | ||
846 | break; | ||
847 | } | ||
848 | schedule_work(&musb->irq_work); | ||
849 | } | ||
850 | |||
851 | |||
852 | return handled; | ||
853 | } | ||
854 | |||
855 | /*-------------------------------------------------------------------------*/ | ||
856 | |||
857 | /* | ||
858 | * Program the HDRC to start (enable interrupts, dma, etc.). | ||
859 | */ | ||
860 | void musb_start(struct musb *musb) | ||
861 | { | ||
862 | void __iomem *regs = musb->mregs; | ||
863 | u8 devctl = musb_readb(regs, MUSB_DEVCTL); | ||
864 | |||
865 | DBG(2, "<== devctl %02x\n", devctl); | ||
866 | |||
867 | /* Set INT enable registers, enable interrupts */ | ||
868 | musb_writew(regs, MUSB_INTRTXE, musb->epmask); | ||
869 | musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe); | ||
870 | musb_writeb(regs, MUSB_INTRUSBE, 0xf7); | ||
871 | |||
872 | musb_writeb(regs, MUSB_TESTMODE, 0); | ||
873 | |||
874 | /* put into basic highspeed mode and start session */ | ||
875 | musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE | ||
876 | | MUSB_POWER_SOFTCONN | ||
877 | | MUSB_POWER_HSENAB | ||
878 | /* ENSUSPEND wedges tusb */ | ||
879 | /* | MUSB_POWER_ENSUSPEND */ | ||
880 | ); | ||
881 | |||
882 | musb->is_active = 0; | ||
883 | devctl = musb_readb(regs, MUSB_DEVCTL); | ||
884 | devctl &= ~MUSB_DEVCTL_SESSION; | ||
885 | |||
886 | if (is_otg_enabled(musb)) { | ||
887 | /* session started after: | ||
888 | * (a) ID-grounded irq, host mode; | ||
889 | * (b) vbus present/connect IRQ, peripheral mode; | ||
890 | * (c) peripheral initiates, using SRP | ||
891 | */ | ||
892 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) | ||
893 | musb->is_active = 1; | ||
894 | else | ||
895 | devctl |= MUSB_DEVCTL_SESSION; | ||
896 | |||
897 | } else if (is_host_enabled(musb)) { | ||
898 | /* assume ID pin is hard-wired to ground */ | ||
899 | devctl |= MUSB_DEVCTL_SESSION; | ||
900 | |||
901 | } else /* peripheral is enabled */ { | ||
902 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) | ||
903 | musb->is_active = 1; | ||
904 | } | ||
905 | musb_platform_enable(musb); | ||
906 | musb_writeb(regs, MUSB_DEVCTL, devctl); | ||
907 | } | ||
908 | |||
909 | |||
910 | static void musb_generic_disable(struct musb *musb) | ||
911 | { | ||
912 | void __iomem *mbase = musb->mregs; | ||
913 | u16 temp; | ||
914 | |||
915 | /* disable interrupts */ | ||
916 | musb_writeb(mbase, MUSB_INTRUSBE, 0); | ||
917 | musb_writew(mbase, MUSB_INTRTXE, 0); | ||
918 | musb_writew(mbase, MUSB_INTRRXE, 0); | ||
919 | |||
920 | /* off */ | ||
921 | musb_writeb(mbase, MUSB_DEVCTL, 0); | ||
922 | |||
923 | /* flush pending interrupts */ | ||
924 | temp = musb_readb(mbase, MUSB_INTRUSB); | ||
925 | temp = musb_readw(mbase, MUSB_INTRTX); | ||
926 | temp = musb_readw(mbase, MUSB_INTRRX); | ||
927 | |||
928 | } | ||
929 | |||
930 | /* | ||
931 | * Make the HDRC stop (disable interrupts, etc.); | ||
932 | * reversible by musb_start | ||
933 | * called on gadget driver unregister | ||
934 | * with controller locked, irqs blocked | ||
935 | * acts as a NOP unless some role activated the hardware | ||
936 | */ | ||
937 | void musb_stop(struct musb *musb) | ||
938 | { | ||
939 | /* stop IRQs, timers, ... */ | ||
940 | musb_platform_disable(musb); | ||
941 | musb_generic_disable(musb); | ||
942 | DBG(3, "HDRC disabled\n"); | ||
943 | |||
944 | /* FIXME | ||
945 | * - mark host and/or peripheral drivers unusable/inactive | ||
946 | * - disable DMA (and enable it in HdrcStart) | ||
947 | * - make sure we can musb_start() after musb_stop(); with | ||
948 | * OTG mode, gadget driver module rmmod/modprobe cycles that | ||
949 | * - ... | ||
950 | */ | ||
951 | musb_platform_try_idle(musb, 0); | ||
952 | } | ||
953 | |||
954 | static void musb_shutdown(struct platform_device *pdev) | ||
955 | { | ||
956 | struct musb *musb = dev_to_musb(&pdev->dev); | ||
957 | unsigned long flags; | ||
958 | |||
959 | spin_lock_irqsave(&musb->lock, flags); | ||
960 | musb_platform_disable(musb); | ||
961 | musb_generic_disable(musb); | ||
962 | if (musb->clock) { | ||
963 | clk_put(musb->clock); | ||
964 | musb->clock = NULL; | ||
965 | } | ||
966 | spin_unlock_irqrestore(&musb->lock, flags); | ||
967 | |||
968 | /* FIXME power down */ | ||
969 | } | ||
970 | |||
971 | |||
972 | /*-------------------------------------------------------------------------*/ | ||
973 | |||
974 | /* | ||
975 | * The silicon either has hard-wired endpoint configurations, or else | ||
976 | * "dynamic fifo" sizing. The driver has support for both, though at this | ||
977 | * writing only the dynamic sizing is very well tested. We use normal | ||
978 | * idioms to so both modes are compile-tested, but dead code elimination | ||
979 | * leaves only the relevant one in the object file. | ||
980 | * | ||
981 | * We don't currently use dynamic fifo setup capability to do anything | ||
982 | * more than selecting one of a bunch of predefined configurations. | ||
983 | */ | ||
984 | #if defined(CONFIG_USB_TUSB6010) || \ | ||
985 | defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) | ||
986 | static ushort __initdata fifo_mode = 4; | ||
987 | #else | ||
988 | static ushort __initdata fifo_mode = 2; | ||
989 | #endif | ||
990 | |||
991 | /* "modprobe ... fifo_mode=1" etc */ | ||
992 | module_param(fifo_mode, ushort, 0); | ||
993 | MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration"); | ||
994 | |||
995 | |||
996 | enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed)); | ||
997 | enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed)); | ||
998 | |||
999 | struct fifo_cfg { | ||
1000 | u8 hw_ep_num; | ||
1001 | enum fifo_style style; | ||
1002 | enum buf_mode mode; | ||
1003 | u16 maxpacket; | ||
1004 | }; | ||
1005 | |||
1006 | /* | ||
1007 | * tables defining fifo_mode values. define more if you like. | ||
1008 | * for host side, make sure both halves of ep1 are set up. | ||
1009 | */ | ||
1010 | |||
1011 | /* mode 0 - fits in 2KB */ | ||
1012 | static struct fifo_cfg __initdata mode_0_cfg[] = { | ||
1013 | { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, | ||
1014 | { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, | ||
1015 | { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, }, | ||
1016 | { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
1017 | { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
1018 | }; | ||
1019 | |||
1020 | /* mode 1 - fits in 4KB */ | ||
1021 | static struct fifo_cfg __initdata mode_1_cfg[] = { | ||
1022 | { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, | ||
1023 | { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, }, | ||
1024 | { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, }, | ||
1025 | { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
1026 | { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
1027 | }; | ||
1028 | |||
1029 | /* mode 2 - fits in 4KB */ | ||
1030 | static struct fifo_cfg __initdata mode_2_cfg[] = { | ||
1031 | { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, | ||
1032 | { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, | ||
1033 | { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, | ||
1034 | { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, | ||
1035 | { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
1036 | { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
1037 | }; | ||
1038 | |||
1039 | /* mode 3 - fits in 4KB */ | ||
1040 | static struct fifo_cfg __initdata mode_3_cfg[] = { | ||
1041 | { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, | ||
1042 | { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, }, | ||
1043 | { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, | ||
1044 | { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, | ||
1045 | { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
1046 | { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, | ||
1047 | }; | ||
1048 | |||
1049 | /* mode 4 - fits in 16KB */ | ||
1050 | static struct fifo_cfg __initdata mode_4_cfg[] = { | ||
1051 | { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, | ||
1052 | { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, | ||
1053 | { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, | ||
1054 | { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, | ||
1055 | { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, }, | ||
1056 | { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, }, | ||
1057 | { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, }, | ||
1058 | { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, }, | ||
1059 | { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, }, | ||
1060 | { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, }, | ||
1061 | { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, }, | ||
1062 | { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, }, | ||
1063 | { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, }, | ||
1064 | { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, }, | ||
1065 | { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, }, | ||
1066 | { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, }, | ||
1067 | { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, }, | ||
1068 | { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, }, | ||
1069 | { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, }, | ||
1070 | { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, }, | ||
1071 | { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, }, | ||
1072 | { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, }, | ||
1073 | { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, }, | ||
1074 | { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, }, | ||
1075 | { .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, }, | ||
1076 | { .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, }, | ||
1077 | { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, }, | ||
1078 | { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, | ||
1079 | }; | ||
1080 | |||
1081 | |||
1082 | /* | ||
1083 | * configure a fifo; for non-shared endpoints, this may be called | ||
1084 | * once for a tx fifo and once for an rx fifo. | ||
1085 | * | ||
1086 | * returns negative errno or offset for next fifo. | ||
1087 | */ | ||
1088 | static int __init | ||
1089 | fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep, | ||
1090 | const struct fifo_cfg *cfg, u16 offset) | ||
1091 | { | ||
1092 | void __iomem *mbase = musb->mregs; | ||
1093 | int size = 0; | ||
1094 | u16 maxpacket = cfg->maxpacket; | ||
1095 | u16 c_off = offset >> 3; | ||
1096 | u8 c_size; | ||
1097 | |||
1098 | /* expect hw_ep has already been zero-initialized */ | ||
1099 | |||
1100 | size = ffs(max(maxpacket, (u16) 8)) - 1; | ||
1101 | maxpacket = 1 << size; | ||
1102 | |||
1103 | c_size = size - 3; | ||
1104 | if (cfg->mode == BUF_DOUBLE) { | ||
1105 | if ((offset + (maxpacket << 1)) > | ||
1106 | (1 << (musb->config->ram_bits + 2))) | ||
1107 | return -EMSGSIZE; | ||
1108 | c_size |= MUSB_FIFOSZ_DPB; | ||
1109 | } else { | ||
1110 | if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2))) | ||
1111 | return -EMSGSIZE; | ||
1112 | } | ||
1113 | |||
1114 | /* configure the FIFO */ | ||
1115 | musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum); | ||
1116 | |||
1117 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
1118 | /* EP0 reserved endpoint for control, bidirectional; | ||
1119 | * EP1 reserved for bulk, two unidirection halves. | ||
1120 | */ | ||
1121 | if (hw_ep->epnum == 1) | ||
1122 | musb->bulk_ep = hw_ep; | ||
1123 | /* REVISIT error check: be sure ep0 can both rx and tx ... */ | ||
1124 | #endif | ||
1125 | switch (cfg->style) { | ||
1126 | case FIFO_TX: | ||
1127 | musb_writeb(mbase, MUSB_TXFIFOSZ, c_size); | ||
1128 | musb_writew(mbase, MUSB_TXFIFOADD, c_off); | ||
1129 | hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); | ||
1130 | hw_ep->max_packet_sz_tx = maxpacket; | ||
1131 | break; | ||
1132 | case FIFO_RX: | ||
1133 | musb_writeb(mbase, MUSB_RXFIFOSZ, c_size); | ||
1134 | musb_writew(mbase, MUSB_RXFIFOADD, c_off); | ||
1135 | hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); | ||
1136 | hw_ep->max_packet_sz_rx = maxpacket; | ||
1137 | break; | ||
1138 | case FIFO_RXTX: | ||
1139 | musb_writeb(mbase, MUSB_TXFIFOSZ, c_size); | ||
1140 | musb_writew(mbase, MUSB_TXFIFOADD, c_off); | ||
1141 | hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); | ||
1142 | hw_ep->max_packet_sz_rx = maxpacket; | ||
1143 | |||
1144 | musb_writeb(mbase, MUSB_RXFIFOSZ, c_size); | ||
1145 | musb_writew(mbase, MUSB_RXFIFOADD, c_off); | ||
1146 | hw_ep->tx_double_buffered = hw_ep->rx_double_buffered; | ||
1147 | hw_ep->max_packet_sz_tx = maxpacket; | ||
1148 | |||
1149 | hw_ep->is_shared_fifo = true; | ||
1150 | break; | ||
1151 | } | ||
1152 | |||
1153 | /* NOTE rx and tx endpoint irqs aren't managed separately, | ||
1154 | * which happens to be ok | ||
1155 | */ | ||
1156 | musb->epmask |= (1 << hw_ep->epnum); | ||
1157 | |||
1158 | return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0)); | ||
1159 | } | ||
1160 | |||
1161 | static struct fifo_cfg __initdata ep0_cfg = { | ||
1162 | .style = FIFO_RXTX, .maxpacket = 64, | ||
1163 | }; | ||
1164 | |||
1165 | static int __init ep_config_from_table(struct musb *musb) | ||
1166 | { | ||
1167 | const struct fifo_cfg *cfg; | ||
1168 | unsigned i, n; | ||
1169 | int offset; | ||
1170 | struct musb_hw_ep *hw_ep = musb->endpoints; | ||
1171 | |||
1172 | switch (fifo_mode) { | ||
1173 | default: | ||
1174 | fifo_mode = 0; | ||
1175 | /* FALLTHROUGH */ | ||
1176 | case 0: | ||
1177 | cfg = mode_0_cfg; | ||
1178 | n = ARRAY_SIZE(mode_0_cfg); | ||
1179 | break; | ||
1180 | case 1: | ||
1181 | cfg = mode_1_cfg; | ||
1182 | n = ARRAY_SIZE(mode_1_cfg); | ||
1183 | break; | ||
1184 | case 2: | ||
1185 | cfg = mode_2_cfg; | ||
1186 | n = ARRAY_SIZE(mode_2_cfg); | ||
1187 | break; | ||
1188 | case 3: | ||
1189 | cfg = mode_3_cfg; | ||
1190 | n = ARRAY_SIZE(mode_3_cfg); | ||
1191 | break; | ||
1192 | case 4: | ||
1193 | cfg = mode_4_cfg; | ||
1194 | n = ARRAY_SIZE(mode_4_cfg); | ||
1195 | break; | ||
1196 | } | ||
1197 | |||
1198 | printk(KERN_DEBUG "%s: setup fifo_mode %d\n", | ||
1199 | musb_driver_name, fifo_mode); | ||
1200 | |||
1201 | |||
1202 | offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0); | ||
1203 | /* assert(offset > 0) */ | ||
1204 | |||
1205 | /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would | ||
1206 | * be better than static musb->config->num_eps and DYN_FIFO_SIZE... | ||
1207 | */ | ||
1208 | |||
1209 | for (i = 0; i < n; i++) { | ||
1210 | u8 epn = cfg->hw_ep_num; | ||
1211 | |||
1212 | if (epn >= musb->config->num_eps) { | ||
1213 | pr_debug("%s: invalid ep %d\n", | ||
1214 | musb_driver_name, epn); | ||
1215 | continue; | ||
1216 | } | ||
1217 | offset = fifo_setup(musb, hw_ep + epn, cfg++, offset); | ||
1218 | if (offset < 0) { | ||
1219 | pr_debug("%s: mem overrun, ep %d\n", | ||
1220 | musb_driver_name, epn); | ||
1221 | return -EINVAL; | ||
1222 | } | ||
1223 | epn++; | ||
1224 | musb->nr_endpoints = max(epn, musb->nr_endpoints); | ||
1225 | } | ||
1226 | |||
1227 | printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n", | ||
1228 | musb_driver_name, | ||
1229 | n + 1, musb->config->num_eps * 2 - 1, | ||
1230 | offset, (1 << (musb->config->ram_bits + 2))); | ||
1231 | |||
1232 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
1233 | if (!musb->bulk_ep) { | ||
1234 | pr_debug("%s: missing bulk\n", musb_driver_name); | ||
1235 | return -EINVAL; | ||
1236 | } | ||
1237 | #endif | ||
1238 | |||
1239 | return 0; | ||
1240 | } | ||
1241 | |||
1242 | |||
1243 | /* | ||
1244 | * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false | ||
1245 | * @param musb the controller | ||
1246 | */ | ||
1247 | static int __init ep_config_from_hw(struct musb *musb) | ||
1248 | { | ||
1249 | u8 epnum = 0, reg; | ||
1250 | struct musb_hw_ep *hw_ep; | ||
1251 | void *mbase = musb->mregs; | ||
1252 | |||
1253 | DBG(2, "<== static silicon ep config\n"); | ||
1254 | |||
1255 | /* FIXME pick up ep0 maxpacket size */ | ||
1256 | |||
1257 | for (epnum = 1; epnum < musb->config->num_eps; epnum++) { | ||
1258 | musb_ep_select(mbase, epnum); | ||
1259 | hw_ep = musb->endpoints + epnum; | ||
1260 | |||
1261 | /* read from core using indexed model */ | ||
1262 | reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE); | ||
1263 | if (!reg) { | ||
1264 | /* 0's returned when no more endpoints */ | ||
1265 | break; | ||
1266 | } | ||
1267 | musb->nr_endpoints++; | ||
1268 | musb->epmask |= (1 << epnum); | ||
1269 | |||
1270 | hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f); | ||
1271 | |||
1272 | /* shared TX/RX FIFO? */ | ||
1273 | if ((reg & 0xf0) == 0xf0) { | ||
1274 | hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx; | ||
1275 | hw_ep->is_shared_fifo = true; | ||
1276 | continue; | ||
1277 | } else { | ||
1278 | hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4); | ||
1279 | hw_ep->is_shared_fifo = false; | ||
1280 | } | ||
1281 | |||
1282 | /* FIXME set up hw_ep->{rx,tx}_double_buffered */ | ||
1283 | |||
1284 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
1285 | /* pick an RX/TX endpoint for bulk */ | ||
1286 | if (hw_ep->max_packet_sz_tx < 512 | ||
1287 | || hw_ep->max_packet_sz_rx < 512) | ||
1288 | continue; | ||
1289 | |||
1290 | /* REVISIT: this algorithm is lazy, we should at least | ||
1291 | * try to pick a double buffered endpoint. | ||
1292 | */ | ||
1293 | if (musb->bulk_ep) | ||
1294 | continue; | ||
1295 | musb->bulk_ep = hw_ep; | ||
1296 | #endif | ||
1297 | } | ||
1298 | |||
1299 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
1300 | if (!musb->bulk_ep) { | ||
1301 | pr_debug("%s: missing bulk\n", musb_driver_name); | ||
1302 | return -EINVAL; | ||
1303 | } | ||
1304 | #endif | ||
1305 | |||
1306 | return 0; | ||
1307 | } | ||
1308 | |||
1309 | enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, }; | ||
1310 | |||
1311 | /* Initialize MUSB (M)HDRC part of the USB hardware subsystem; | ||
1312 | * configure endpoints, or take their config from silicon | ||
1313 | */ | ||
1314 | static int __init musb_core_init(u16 musb_type, struct musb *musb) | ||
1315 | { | ||
1316 | #ifdef MUSB_AHB_ID | ||
1317 | u32 data; | ||
1318 | #endif | ||
1319 | u8 reg; | ||
1320 | char *type; | ||
1321 | u16 hwvers, rev_major, rev_minor; | ||
1322 | char aInfo[78], aRevision[32], aDate[12]; | ||
1323 | void __iomem *mbase = musb->mregs; | ||
1324 | int status = 0; | ||
1325 | int i; | ||
1326 | |||
1327 | /* log core options (read using indexed model) */ | ||
1328 | musb_ep_select(mbase, 0); | ||
1329 | reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); | ||
1330 | |||
1331 | strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); | ||
1332 | if (reg & MUSB_CONFIGDATA_DYNFIFO) | ||
1333 | strcat(aInfo, ", dyn FIFOs"); | ||
1334 | if (reg & MUSB_CONFIGDATA_MPRXE) { | ||
1335 | strcat(aInfo, ", bulk combine"); | ||
1336 | #ifdef C_MP_RX | ||
1337 | musb->bulk_combine = true; | ||
1338 | #else | ||
1339 | strcat(aInfo, " (X)"); /* no driver support */ | ||
1340 | #endif | ||
1341 | } | ||
1342 | if (reg & MUSB_CONFIGDATA_MPTXE) { | ||
1343 | strcat(aInfo, ", bulk split"); | ||
1344 | #ifdef C_MP_TX | ||
1345 | musb->bulk_split = true; | ||
1346 | #else | ||
1347 | strcat(aInfo, " (X)"); /* no driver support */ | ||
1348 | #endif | ||
1349 | } | ||
1350 | if (reg & MUSB_CONFIGDATA_HBRXE) { | ||
1351 | strcat(aInfo, ", HB-ISO Rx"); | ||
1352 | strcat(aInfo, " (X)"); /* no driver support */ | ||
1353 | } | ||
1354 | if (reg & MUSB_CONFIGDATA_HBTXE) { | ||
1355 | strcat(aInfo, ", HB-ISO Tx"); | ||
1356 | strcat(aInfo, " (X)"); /* no driver support */ | ||
1357 | } | ||
1358 | if (reg & MUSB_CONFIGDATA_SOFTCONE) | ||
1359 | strcat(aInfo, ", SoftConn"); | ||
1360 | |||
1361 | printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n", | ||
1362 | musb_driver_name, reg, aInfo); | ||
1363 | |||
1364 | #ifdef MUSB_AHB_ID | ||
1365 | data = musb_readl(mbase, 0x404); | ||
1366 | sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff), | ||
1367 | (data >> 16) & 0xff, (data >> 24) & 0xff); | ||
1368 | /* FIXME ID2 and ID3 are unused */ | ||
1369 | data = musb_readl(mbase, 0x408); | ||
1370 | printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data); | ||
1371 | data = musb_readl(mbase, 0x40c); | ||
1372 | printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data); | ||
1373 | reg = musb_readb(mbase, 0x400); | ||
1374 | musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC; | ||
1375 | #else | ||
1376 | aDate[0] = 0; | ||
1377 | #endif | ||
1378 | if (MUSB_CONTROLLER_MHDRC == musb_type) { | ||
1379 | musb->is_multipoint = 1; | ||
1380 | type = "M"; | ||
1381 | } else { | ||
1382 | musb->is_multipoint = 0; | ||
1383 | type = ""; | ||
1384 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
1385 | #ifndef CONFIG_USB_OTG_BLACKLIST_HUB | ||
1386 | printk(KERN_ERR | ||
1387 | "%s: kernel must blacklist external hubs\n", | ||
1388 | musb_driver_name); | ||
1389 | #endif | ||
1390 | #endif | ||
1391 | } | ||
1392 | |||
1393 | /* log release info */ | ||
1394 | hwvers = musb_readw(mbase, MUSB_HWVERS); | ||
1395 | rev_major = (hwvers >> 10) & 0x1f; | ||
1396 | rev_minor = hwvers & 0x3ff; | ||
1397 | snprintf(aRevision, 32, "%d.%d%s", rev_major, | ||
1398 | rev_minor, (hwvers & 0x8000) ? "RC" : ""); | ||
1399 | printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n", | ||
1400 | musb_driver_name, type, aRevision, aDate); | ||
1401 | |||
1402 | /* configure ep0 */ | ||
1403 | musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE; | ||
1404 | musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE; | ||
1405 | |||
1406 | /* discover endpoint configuration */ | ||
1407 | musb->nr_endpoints = 1; | ||
1408 | musb->epmask = 1; | ||
1409 | |||
1410 | if (reg & MUSB_CONFIGDATA_DYNFIFO) { | ||
1411 | if (musb->config->dyn_fifo) | ||
1412 | status = ep_config_from_table(musb); | ||
1413 | else { | ||
1414 | ERR("reconfigure software for Dynamic FIFOs\n"); | ||
1415 | status = -ENODEV; | ||
1416 | } | ||
1417 | } else { | ||
1418 | if (!musb->config->dyn_fifo) | ||
1419 | status = ep_config_from_hw(musb); | ||
1420 | else { | ||
1421 | ERR("reconfigure software for static FIFOs\n"); | ||
1422 | return -ENODEV; | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | if (status < 0) | ||
1427 | return status; | ||
1428 | |||
1429 | /* finish init, and print endpoint config */ | ||
1430 | for (i = 0; i < musb->nr_endpoints; i++) { | ||
1431 | struct musb_hw_ep *hw_ep = musb->endpoints + i; | ||
1432 | |||
1433 | hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase; | ||
1434 | #ifdef CONFIG_USB_TUSB6010 | ||
1435 | hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i); | ||
1436 | hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i); | ||
1437 | hw_ep->fifo_sync_va = | ||
1438 | musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i); | ||
1439 | |||
1440 | if (i == 0) | ||
1441 | hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF; | ||
1442 | else | ||
1443 | hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2); | ||
1444 | #endif | ||
1445 | |||
1446 | hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase; | ||
1447 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
1448 | hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase; | ||
1449 | hw_ep->rx_reinit = 1; | ||
1450 | hw_ep->tx_reinit = 1; | ||
1451 | #endif | ||
1452 | |||
1453 | if (hw_ep->max_packet_sz_tx) { | ||
1454 | printk(KERN_DEBUG | ||
1455 | "%s: hw_ep %d%s, %smax %d\n", | ||
1456 | musb_driver_name, i, | ||
1457 | hw_ep->is_shared_fifo ? "shared" : "tx", | ||
1458 | hw_ep->tx_double_buffered | ||
1459 | ? "doublebuffer, " : "", | ||
1460 | hw_ep->max_packet_sz_tx); | ||
1461 | } | ||
1462 | if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) { | ||
1463 | printk(KERN_DEBUG | ||
1464 | "%s: hw_ep %d%s, %smax %d\n", | ||
1465 | musb_driver_name, i, | ||
1466 | "rx", | ||
1467 | hw_ep->rx_double_buffered | ||
1468 | ? "doublebuffer, " : "", | ||
1469 | hw_ep->max_packet_sz_rx); | ||
1470 | } | ||
1471 | if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx)) | ||
1472 | DBG(1, "hw_ep %d not configured\n", i); | ||
1473 | } | ||
1474 | |||
1475 | return 0; | ||
1476 | } | ||
1477 | |||
1478 | /*-------------------------------------------------------------------------*/ | ||
1479 | |||
1480 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) | ||
1481 | |||
1482 | static irqreturn_t generic_interrupt(int irq, void *__hci) | ||
1483 | { | ||
1484 | unsigned long flags; | ||
1485 | irqreturn_t retval = IRQ_NONE; | ||
1486 | struct musb *musb = __hci; | ||
1487 | |||
1488 | spin_lock_irqsave(&musb->lock, flags); | ||
1489 | |||
1490 | musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); | ||
1491 | musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); | ||
1492 | musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); | ||
1493 | |||
1494 | if (musb->int_usb || musb->int_tx || musb->int_rx) | ||
1495 | retval = musb_interrupt(musb); | ||
1496 | |||
1497 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1498 | |||
1499 | /* REVISIT we sometimes get spurious IRQs on g_ep0 | ||
1500 | * not clear why... | ||
1501 | */ | ||
1502 | if (retval != IRQ_HANDLED) | ||
1503 | DBG(5, "spurious?\n"); | ||
1504 | |||
1505 | return IRQ_HANDLED; | ||
1506 | } | ||
1507 | |||
1508 | #else | ||
1509 | #define generic_interrupt NULL | ||
1510 | #endif | ||
1511 | |||
1512 | /* | ||
1513 | * handle all the irqs defined by the HDRC core. for now we expect: other | ||
1514 | * irq sources (phy, dma, etc) will be handled first, musb->int_* values | ||
1515 | * will be assigned, and the irq will already have been acked. | ||
1516 | * | ||
1517 | * called in irq context with spinlock held, irqs blocked | ||
1518 | */ | ||
1519 | irqreturn_t musb_interrupt(struct musb *musb) | ||
1520 | { | ||
1521 | irqreturn_t retval = IRQ_NONE; | ||
1522 | u8 devctl, power; | ||
1523 | int ep_num; | ||
1524 | u32 reg; | ||
1525 | |||
1526 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
1527 | power = musb_readb(musb->mregs, MUSB_POWER); | ||
1528 | |||
1529 | DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n", | ||
1530 | (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral", | ||
1531 | musb->int_usb, musb->int_tx, musb->int_rx); | ||
1532 | |||
1533 | /* the core can interrupt us for multiple reasons; docs have | ||
1534 | * a generic interrupt flowchart to follow | ||
1535 | */ | ||
1536 | if (musb->int_usb & STAGE0_MASK) | ||
1537 | retval |= musb_stage0_irq(musb, musb->int_usb, | ||
1538 | devctl, power); | ||
1539 | |||
1540 | /* "stage 1" is handling endpoint irqs */ | ||
1541 | |||
1542 | /* handle endpoint 0 first */ | ||
1543 | if (musb->int_tx & 1) { | ||
1544 | if (devctl & MUSB_DEVCTL_HM) | ||
1545 | retval |= musb_h_ep0_irq(musb); | ||
1546 | else | ||
1547 | retval |= musb_g_ep0_irq(musb); | ||
1548 | } | ||
1549 | |||
1550 | /* RX on endpoints 1-15 */ | ||
1551 | reg = musb->int_rx >> 1; | ||
1552 | ep_num = 1; | ||
1553 | while (reg) { | ||
1554 | if (reg & 1) { | ||
1555 | /* musb_ep_select(musb->mregs, ep_num); */ | ||
1556 | /* REVISIT just retval = ep->rx_irq(...) */ | ||
1557 | retval = IRQ_HANDLED; | ||
1558 | if (devctl & MUSB_DEVCTL_HM) { | ||
1559 | if (is_host_capable()) | ||
1560 | musb_host_rx(musb, ep_num); | ||
1561 | } else { | ||
1562 | if (is_peripheral_capable()) | ||
1563 | musb_g_rx(musb, ep_num); | ||
1564 | } | ||
1565 | } | ||
1566 | |||
1567 | reg >>= 1; | ||
1568 | ep_num++; | ||
1569 | } | ||
1570 | |||
1571 | /* TX on endpoints 1-15 */ | ||
1572 | reg = musb->int_tx >> 1; | ||
1573 | ep_num = 1; | ||
1574 | while (reg) { | ||
1575 | if (reg & 1) { | ||
1576 | /* musb_ep_select(musb->mregs, ep_num); */ | ||
1577 | /* REVISIT just retval |= ep->tx_irq(...) */ | ||
1578 | retval = IRQ_HANDLED; | ||
1579 | if (devctl & MUSB_DEVCTL_HM) { | ||
1580 | if (is_host_capable()) | ||
1581 | musb_host_tx(musb, ep_num); | ||
1582 | } else { | ||
1583 | if (is_peripheral_capable()) | ||
1584 | musb_g_tx(musb, ep_num); | ||
1585 | } | ||
1586 | } | ||
1587 | reg >>= 1; | ||
1588 | ep_num++; | ||
1589 | } | ||
1590 | |||
1591 | /* finish handling "global" interrupts after handling fifos */ | ||
1592 | if (musb->int_usb) | ||
1593 | retval |= musb_stage2_irq(musb, | ||
1594 | musb->int_usb, devctl, power); | ||
1595 | |||
1596 | return retval; | ||
1597 | } | ||
1598 | |||
1599 | |||
1600 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
1601 | static int __initdata use_dma = 1; | ||
1602 | |||
1603 | /* "modprobe ... use_dma=0" etc */ | ||
1604 | module_param(use_dma, bool, 0); | ||
1605 | MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); | ||
1606 | |||
1607 | void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) | ||
1608 | { | ||
1609 | u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
1610 | |||
1611 | /* called with controller lock already held */ | ||
1612 | |||
1613 | if (!epnum) { | ||
1614 | #ifndef CONFIG_USB_TUSB_OMAP_DMA | ||
1615 | if (!is_cppi_enabled()) { | ||
1616 | /* endpoint 0 */ | ||
1617 | if (devctl & MUSB_DEVCTL_HM) | ||
1618 | musb_h_ep0_irq(musb); | ||
1619 | else | ||
1620 | musb_g_ep0_irq(musb); | ||
1621 | } | ||
1622 | #endif | ||
1623 | } else { | ||
1624 | /* endpoints 1..15 */ | ||
1625 | if (transmit) { | ||
1626 | if (devctl & MUSB_DEVCTL_HM) { | ||
1627 | if (is_host_capable()) | ||
1628 | musb_host_tx(musb, epnum); | ||
1629 | } else { | ||
1630 | if (is_peripheral_capable()) | ||
1631 | musb_g_tx(musb, epnum); | ||
1632 | } | ||
1633 | } else { | ||
1634 | /* receive */ | ||
1635 | if (devctl & MUSB_DEVCTL_HM) { | ||
1636 | if (is_host_capable()) | ||
1637 | musb_host_rx(musb, epnum); | ||
1638 | } else { | ||
1639 | if (is_peripheral_capable()) | ||
1640 | musb_g_rx(musb, epnum); | ||
1641 | } | ||
1642 | } | ||
1643 | } | ||
1644 | } | ||
1645 | |||
1646 | #else | ||
1647 | #define use_dma 0 | ||
1648 | #endif | ||
1649 | |||
1650 | /*-------------------------------------------------------------------------*/ | ||
1651 | |||
1652 | #ifdef CONFIG_SYSFS | ||
1653 | |||
1654 | static ssize_t | ||
1655 | musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1656 | { | ||
1657 | struct musb *musb = dev_to_musb(dev); | ||
1658 | unsigned long flags; | ||
1659 | int ret = -EINVAL; | ||
1660 | |||
1661 | spin_lock_irqsave(&musb->lock, flags); | ||
1662 | ret = sprintf(buf, "%s\n", otg_state_string(musb)); | ||
1663 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1664 | |||
1665 | return ret; | ||
1666 | } | ||
1667 | |||
1668 | static ssize_t | ||
1669 | musb_mode_store(struct device *dev, struct device_attribute *attr, | ||
1670 | const char *buf, size_t n) | ||
1671 | { | ||
1672 | struct musb *musb = dev_to_musb(dev); | ||
1673 | unsigned long flags; | ||
1674 | |||
1675 | spin_lock_irqsave(&musb->lock, flags); | ||
1676 | if (!strncmp(buf, "host", 4)) | ||
1677 | musb_platform_set_mode(musb, MUSB_HOST); | ||
1678 | if (!strncmp(buf, "peripheral", 10)) | ||
1679 | musb_platform_set_mode(musb, MUSB_PERIPHERAL); | ||
1680 | if (!strncmp(buf, "otg", 3)) | ||
1681 | musb_platform_set_mode(musb, MUSB_OTG); | ||
1682 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1683 | |||
1684 | return n; | ||
1685 | } | ||
1686 | static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store); | ||
1687 | |||
1688 | static ssize_t | ||
1689 | musb_vbus_store(struct device *dev, struct device_attribute *attr, | ||
1690 | const char *buf, size_t n) | ||
1691 | { | ||
1692 | struct musb *musb = dev_to_musb(dev); | ||
1693 | unsigned long flags; | ||
1694 | unsigned long val; | ||
1695 | |||
1696 | if (sscanf(buf, "%lu", &val) < 1) { | ||
1697 | printk(KERN_ERR "Invalid VBUS timeout ms value\n"); | ||
1698 | return -EINVAL; | ||
1699 | } | ||
1700 | |||
1701 | spin_lock_irqsave(&musb->lock, flags); | ||
1702 | musb->a_wait_bcon = val; | ||
1703 | if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON) | ||
1704 | musb->is_active = 0; | ||
1705 | musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val)); | ||
1706 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1707 | |||
1708 | return n; | ||
1709 | } | ||
1710 | |||
1711 | static ssize_t | ||
1712 | musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1713 | { | ||
1714 | struct musb *musb = dev_to_musb(dev); | ||
1715 | unsigned long flags; | ||
1716 | unsigned long val; | ||
1717 | int vbus; | ||
1718 | |||
1719 | spin_lock_irqsave(&musb->lock, flags); | ||
1720 | val = musb->a_wait_bcon; | ||
1721 | vbus = musb_platform_get_vbus_status(musb); | ||
1722 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1723 | |||
1724 | return sprintf(buf, "Vbus %s, timeout %lu\n", | ||
1725 | vbus ? "on" : "off", val); | ||
1726 | } | ||
1727 | static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store); | ||
1728 | |||
1729 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
1730 | |||
1731 | /* Gadget drivers can't know that a host is connected so they might want | ||
1732 | * to start SRP, but users can. This allows userspace to trigger SRP. | ||
1733 | */ | ||
1734 | static ssize_t | ||
1735 | musb_srp_store(struct device *dev, struct device_attribute *attr, | ||
1736 | const char *buf, size_t n) | ||
1737 | { | ||
1738 | struct musb *musb = dev_to_musb(dev); | ||
1739 | unsigned short srp; | ||
1740 | |||
1741 | if (sscanf(buf, "%hu", &srp) != 1 | ||
1742 | || (srp != 1)) { | ||
1743 | printk(KERN_ERR "SRP: Value must be 1\n"); | ||
1744 | return -EINVAL; | ||
1745 | } | ||
1746 | |||
1747 | if (srp == 1) | ||
1748 | musb_g_wakeup(musb); | ||
1749 | |||
1750 | return n; | ||
1751 | } | ||
1752 | static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store); | ||
1753 | |||
1754 | #endif /* CONFIG_USB_GADGET_MUSB_HDRC */ | ||
1755 | |||
1756 | #endif /* sysfs */ | ||
1757 | |||
1758 | /* Only used to provide driver mode change events */ | ||
1759 | static void musb_irq_work(struct work_struct *data) | ||
1760 | { | ||
1761 | struct musb *musb = container_of(data, struct musb, irq_work); | ||
1762 | static int old_state; | ||
1763 | |||
1764 | if (musb->xceiv.state != old_state) { | ||
1765 | old_state = musb->xceiv.state; | ||
1766 | sysfs_notify(&musb->controller->kobj, NULL, "mode"); | ||
1767 | } | ||
1768 | } | ||
1769 | |||
1770 | /* -------------------------------------------------------------------------- | ||
1771 | * Init support | ||
1772 | */ | ||
1773 | |||
1774 | static struct musb *__init | ||
1775 | allocate_instance(struct device *dev, | ||
1776 | struct musb_hdrc_config *config, void __iomem *mbase) | ||
1777 | { | ||
1778 | struct musb *musb; | ||
1779 | struct musb_hw_ep *ep; | ||
1780 | int epnum; | ||
1781 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
1782 | struct usb_hcd *hcd; | ||
1783 | |||
1784 | hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id); | ||
1785 | if (!hcd) | ||
1786 | return NULL; | ||
1787 | /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ | ||
1788 | |||
1789 | musb = hcd_to_musb(hcd); | ||
1790 | INIT_LIST_HEAD(&musb->control); | ||
1791 | INIT_LIST_HEAD(&musb->in_bulk); | ||
1792 | INIT_LIST_HEAD(&musb->out_bulk); | ||
1793 | |||
1794 | hcd->uses_new_polling = 1; | ||
1795 | |||
1796 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; | ||
1797 | #else | ||
1798 | musb = kzalloc(sizeof *musb, GFP_KERNEL); | ||
1799 | if (!musb) | ||
1800 | return NULL; | ||
1801 | dev_set_drvdata(dev, musb); | ||
1802 | |||
1803 | #endif | ||
1804 | |||
1805 | musb->mregs = mbase; | ||
1806 | musb->ctrl_base = mbase; | ||
1807 | musb->nIrq = -ENODEV; | ||
1808 | musb->config = config; | ||
1809 | for (epnum = 0, ep = musb->endpoints; | ||
1810 | epnum < musb->config->num_eps; | ||
1811 | epnum++, ep++) { | ||
1812 | |||
1813 | ep->musb = musb; | ||
1814 | ep->epnum = epnum; | ||
1815 | } | ||
1816 | |||
1817 | musb->controller = dev; | ||
1818 | return musb; | ||
1819 | } | ||
1820 | |||
1821 | static void musb_free(struct musb *musb) | ||
1822 | { | ||
1823 | /* this has multiple entry modes. it handles fault cleanup after | ||
1824 | * probe(), where things may be partially set up, as well as rmmod | ||
1825 | * cleanup after everything's been de-activated. | ||
1826 | */ | ||
1827 | |||
1828 | #ifdef CONFIG_SYSFS | ||
1829 | device_remove_file(musb->controller, &dev_attr_mode); | ||
1830 | device_remove_file(musb->controller, &dev_attr_vbus); | ||
1831 | #ifdef CONFIG_USB_MUSB_OTG | ||
1832 | device_remove_file(musb->controller, &dev_attr_srp); | ||
1833 | #endif | ||
1834 | #endif | ||
1835 | |||
1836 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
1837 | musb_gadget_cleanup(musb); | ||
1838 | #endif | ||
1839 | |||
1840 | if (musb->nIrq >= 0) { | ||
1841 | disable_irq_wake(musb->nIrq); | ||
1842 | free_irq(musb->nIrq, musb); | ||
1843 | } | ||
1844 | if (is_dma_capable() && musb->dma_controller) { | ||
1845 | struct dma_controller *c = musb->dma_controller; | ||
1846 | |||
1847 | (void) c->stop(c); | ||
1848 | dma_controller_destroy(c); | ||
1849 | } | ||
1850 | |||
1851 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
1852 | musb_platform_exit(musb); | ||
1853 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
1854 | |||
1855 | if (musb->clock) { | ||
1856 | clk_disable(musb->clock); | ||
1857 | clk_put(musb->clock); | ||
1858 | } | ||
1859 | |||
1860 | #ifdef CONFIG_USB_MUSB_OTG | ||
1861 | put_device(musb->xceiv.dev); | ||
1862 | #endif | ||
1863 | |||
1864 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
1865 | usb_put_hcd(musb_to_hcd(musb)); | ||
1866 | #else | ||
1867 | kfree(musb); | ||
1868 | #endif | ||
1869 | } | ||
1870 | |||
1871 | /* | ||
1872 | * Perform generic per-controller initialization. | ||
1873 | * | ||
1874 | * @pDevice: the controller (already clocked, etc) | ||
1875 | * @nIrq: irq | ||
1876 | * @mregs: virtual address of controller registers, | ||
1877 | * not yet corrected for platform-specific offsets | ||
1878 | */ | ||
1879 | static int __init | ||
1880 | musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | ||
1881 | { | ||
1882 | int status; | ||
1883 | struct musb *musb; | ||
1884 | struct musb_hdrc_platform_data *plat = dev->platform_data; | ||
1885 | |||
1886 | /* The driver might handle more features than the board; OK. | ||
1887 | * Fail when the board needs a feature that's not enabled. | ||
1888 | */ | ||
1889 | if (!plat) { | ||
1890 | dev_dbg(dev, "no platform_data?\n"); | ||
1891 | return -ENODEV; | ||
1892 | } | ||
1893 | switch (plat->mode) { | ||
1894 | case MUSB_HOST: | ||
1895 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
1896 | break; | ||
1897 | #else | ||
1898 | goto bad_config; | ||
1899 | #endif | ||
1900 | case MUSB_PERIPHERAL: | ||
1901 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
1902 | break; | ||
1903 | #else | ||
1904 | goto bad_config; | ||
1905 | #endif | ||
1906 | case MUSB_OTG: | ||
1907 | #ifdef CONFIG_USB_MUSB_OTG | ||
1908 | break; | ||
1909 | #else | ||
1910 | bad_config: | ||
1911 | #endif | ||
1912 | default: | ||
1913 | dev_err(dev, "incompatible Kconfig role setting\n"); | ||
1914 | return -EINVAL; | ||
1915 | } | ||
1916 | |||
1917 | /* allocate */ | ||
1918 | musb = allocate_instance(dev, plat->config, ctrl); | ||
1919 | if (!musb) | ||
1920 | return -ENOMEM; | ||
1921 | |||
1922 | spin_lock_init(&musb->lock); | ||
1923 | musb->board_mode = plat->mode; | ||
1924 | musb->board_set_power = plat->set_power; | ||
1925 | musb->set_clock = plat->set_clock; | ||
1926 | musb->min_power = plat->min_power; | ||
1927 | |||
1928 | /* Clock usage is chip-specific ... functional clock (DaVinci, | ||
1929 | * OMAP2430), or PHY ref (some TUSB6010 boards). All this core | ||
1930 | * code does is make sure a clock handle is available; platform | ||
1931 | * code manages it during start/stop and suspend/resume. | ||
1932 | */ | ||
1933 | if (plat->clock) { | ||
1934 | musb->clock = clk_get(dev, plat->clock); | ||
1935 | if (IS_ERR(musb->clock)) { | ||
1936 | status = PTR_ERR(musb->clock); | ||
1937 | musb->clock = NULL; | ||
1938 | goto fail; | ||
1939 | } | ||
1940 | } | ||
1941 | |||
1942 | /* assume vbus is off */ | ||
1943 | |||
1944 | /* platform adjusts musb->mregs and musb->isr if needed, | ||
1945 | * and activates clocks | ||
1946 | */ | ||
1947 | musb->isr = generic_interrupt; | ||
1948 | status = musb_platform_init(musb); | ||
1949 | |||
1950 | if (status < 0) | ||
1951 | goto fail; | ||
1952 | if (!musb->isr) { | ||
1953 | status = -ENODEV; | ||
1954 | goto fail2; | ||
1955 | } | ||
1956 | |||
1957 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
1958 | if (use_dma && dev->dma_mask) { | ||
1959 | struct dma_controller *c; | ||
1960 | |||
1961 | c = dma_controller_create(musb, musb->mregs); | ||
1962 | musb->dma_controller = c; | ||
1963 | if (c) | ||
1964 | (void) c->start(c); | ||
1965 | } | ||
1966 | #endif | ||
1967 | /* ideally this would be abstracted in platform setup */ | ||
1968 | if (!is_dma_capable() || !musb->dma_controller) | ||
1969 | dev->dma_mask = NULL; | ||
1970 | |||
1971 | /* be sure interrupts are disabled before connecting ISR */ | ||
1972 | musb_platform_disable(musb); | ||
1973 | musb_generic_disable(musb); | ||
1974 | |||
1975 | /* setup musb parts of the core (especially endpoints) */ | ||
1976 | status = musb_core_init(plat->config->multipoint | ||
1977 | ? MUSB_CONTROLLER_MHDRC | ||
1978 | : MUSB_CONTROLLER_HDRC, musb); | ||
1979 | if (status < 0) | ||
1980 | goto fail2; | ||
1981 | |||
1982 | /* Init IRQ workqueue before request_irq */ | ||
1983 | INIT_WORK(&musb->irq_work, musb_irq_work); | ||
1984 | |||
1985 | /* attach to the IRQ */ | ||
1986 | if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) { | ||
1987 | dev_err(dev, "request_irq %d failed!\n", nIrq); | ||
1988 | status = -ENODEV; | ||
1989 | goto fail2; | ||
1990 | } | ||
1991 | musb->nIrq = nIrq; | ||
1992 | /* FIXME this handles wakeup irqs wrong */ | ||
1993 | if (enable_irq_wake(nIrq) == 0) | ||
1994 | device_init_wakeup(dev, 1); | ||
1995 | |||
1996 | pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n", | ||
1997 | musb_driver_name, | ||
1998 | ({char *s; | ||
1999 | switch (musb->board_mode) { | ||
2000 | case MUSB_HOST: s = "Host"; break; | ||
2001 | case MUSB_PERIPHERAL: s = "Peripheral"; break; | ||
2002 | default: s = "OTG"; break; | ||
2003 | }; s; }), | ||
2004 | ctrl, | ||
2005 | (is_dma_capable() && musb->dma_controller) | ||
2006 | ? "DMA" : "PIO", | ||
2007 | musb->nIrq); | ||
2008 | |||
2009 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
2010 | /* host side needs more setup, except for no-host modes */ | ||
2011 | if (musb->board_mode != MUSB_PERIPHERAL) { | ||
2012 | struct usb_hcd *hcd = musb_to_hcd(musb); | ||
2013 | |||
2014 | if (musb->board_mode == MUSB_OTG) | ||
2015 | hcd->self.otg_port = 1; | ||
2016 | musb->xceiv.host = &hcd->self; | ||
2017 | hcd->power_budget = 2 * (plat->power ? : 250); | ||
2018 | } | ||
2019 | #endif /* CONFIG_USB_MUSB_HDRC_HCD */ | ||
2020 | |||
2021 | /* For the host-only role, we can activate right away. | ||
2022 | * (We expect the ID pin to be forcibly grounded!!) | ||
2023 | * Otherwise, wait till the gadget driver hooks up. | ||
2024 | */ | ||
2025 | if (!is_otg_enabled(musb) && is_host_enabled(musb)) { | ||
2026 | MUSB_HST_MODE(musb); | ||
2027 | musb->xceiv.default_a = 1; | ||
2028 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
2029 | |||
2030 | status = usb_add_hcd(musb_to_hcd(musb), -1, 0); | ||
2031 | if (status) | ||
2032 | goto fail; | ||
2033 | |||
2034 | DBG(1, "%s mode, status %d, devctl %02x %c\n", | ||
2035 | "HOST", status, | ||
2036 | musb_readb(musb->mregs, MUSB_DEVCTL), | ||
2037 | (musb_readb(musb->mregs, MUSB_DEVCTL) | ||
2038 | & MUSB_DEVCTL_BDEVICE | ||
2039 | ? 'B' : 'A')); | ||
2040 | |||
2041 | } else /* peripheral is enabled */ { | ||
2042 | MUSB_DEV_MODE(musb); | ||
2043 | musb->xceiv.default_a = 0; | ||
2044 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
2045 | |||
2046 | status = musb_gadget_setup(musb); | ||
2047 | if (status) | ||
2048 | goto fail; | ||
2049 | |||
2050 | DBG(1, "%s mode, status %d, dev%02x\n", | ||
2051 | is_otg_enabled(musb) ? "OTG" : "PERIPHERAL", | ||
2052 | status, | ||
2053 | musb_readb(musb->mregs, MUSB_DEVCTL)); | ||
2054 | |||
2055 | } | ||
2056 | |||
2057 | return 0; | ||
2058 | |||
2059 | fail: | ||
2060 | if (musb->clock) | ||
2061 | clk_put(musb->clock); | ||
2062 | device_init_wakeup(dev, 0); | ||
2063 | musb_free(musb); | ||
2064 | return status; | ||
2065 | |||
2066 | #ifdef CONFIG_SYSFS | ||
2067 | status = device_create_file(dev, &dev_attr_mode); | ||
2068 | status = device_create_file(dev, &dev_attr_vbus); | ||
2069 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
2070 | status = device_create_file(dev, &dev_attr_srp); | ||
2071 | #endif /* CONFIG_USB_GADGET_MUSB_HDRC */ | ||
2072 | status = 0; | ||
2073 | #endif | ||
2074 | |||
2075 | return status; | ||
2076 | |||
2077 | fail2: | ||
2078 | musb_platform_exit(musb); | ||
2079 | goto fail; | ||
2080 | } | ||
2081 | |||
2082 | /*-------------------------------------------------------------------------*/ | ||
2083 | |||
2084 | /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just | ||
2085 | * bridge to a platform device; this driver then suffices. | ||
2086 | */ | ||
2087 | |||
2088 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
2089 | static u64 *orig_dma_mask; | ||
2090 | #endif | ||
2091 | |||
2092 | static int __init musb_probe(struct platform_device *pdev) | ||
2093 | { | ||
2094 | struct device *dev = &pdev->dev; | ||
2095 | int irq = platform_get_irq(pdev, 0); | ||
2096 | struct resource *iomem; | ||
2097 | void __iomem *base; | ||
2098 | |||
2099 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2100 | if (!iomem || irq == 0) | ||
2101 | return -ENODEV; | ||
2102 | |||
2103 | base = ioremap(iomem->start, iomem->end - iomem->start + 1); | ||
2104 | if (!base) { | ||
2105 | dev_err(dev, "ioremap failed\n"); | ||
2106 | return -ENOMEM; | ||
2107 | } | ||
2108 | |||
2109 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
2110 | /* clobbered by use_dma=n */ | ||
2111 | orig_dma_mask = dev->dma_mask; | ||
2112 | #endif | ||
2113 | return musb_init_controller(dev, irq, base); | ||
2114 | } | ||
2115 | |||
2116 | static int __devexit musb_remove(struct platform_device *pdev) | ||
2117 | { | ||
2118 | struct musb *musb = dev_to_musb(&pdev->dev); | ||
2119 | void __iomem *ctrl_base = musb->ctrl_base; | ||
2120 | |||
2121 | /* this gets called on rmmod. | ||
2122 | * - Host mode: host may still be active | ||
2123 | * - Peripheral mode: peripheral is deactivated (or never-activated) | ||
2124 | * - OTG mode: both roles are deactivated (or never-activated) | ||
2125 | */ | ||
2126 | musb_shutdown(pdev); | ||
2127 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
2128 | if (musb->board_mode == MUSB_HOST) | ||
2129 | usb_remove_hcd(musb_to_hcd(musb)); | ||
2130 | #endif | ||
2131 | musb_free(musb); | ||
2132 | iounmap(ctrl_base); | ||
2133 | device_init_wakeup(&pdev->dev, 0); | ||
2134 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
2135 | pdev->dev.dma_mask = orig_dma_mask; | ||
2136 | #endif | ||
2137 | return 0; | ||
2138 | } | ||
2139 | |||
2140 | #ifdef CONFIG_PM | ||
2141 | |||
2142 | static int musb_suspend(struct platform_device *pdev, pm_message_t message) | ||
2143 | { | ||
2144 | unsigned long flags; | ||
2145 | struct musb *musb = dev_to_musb(&pdev->dev); | ||
2146 | |||
2147 | if (!musb->clock) | ||
2148 | return 0; | ||
2149 | |||
2150 | spin_lock_irqsave(&musb->lock, flags); | ||
2151 | |||
2152 | if (is_peripheral_active(musb)) { | ||
2153 | /* FIXME force disconnect unless we know USB will wake | ||
2154 | * the system up quickly enough to respond ... | ||
2155 | */ | ||
2156 | } else if (is_host_active(musb)) { | ||
2157 | /* we know all the children are suspended; sometimes | ||
2158 | * they will even be wakeup-enabled. | ||
2159 | */ | ||
2160 | } | ||
2161 | |||
2162 | if (musb->set_clock) | ||
2163 | musb->set_clock(musb->clock, 0); | ||
2164 | else | ||
2165 | clk_disable(musb->clock); | ||
2166 | spin_unlock_irqrestore(&musb->lock, flags); | ||
2167 | return 0; | ||
2168 | } | ||
2169 | |||
2170 | static int musb_resume(struct platform_device *pdev) | ||
2171 | { | ||
2172 | unsigned long flags; | ||
2173 | struct musb *musb = dev_to_musb(&pdev->dev); | ||
2174 | |||
2175 | if (!musb->clock) | ||
2176 | return 0; | ||
2177 | |||
2178 | spin_lock_irqsave(&musb->lock, flags); | ||
2179 | |||
2180 | if (musb->set_clock) | ||
2181 | musb->set_clock(musb->clock, 1); | ||
2182 | else | ||
2183 | clk_enable(musb->clock); | ||
2184 | |||
2185 | /* for static cmos like DaVinci, register values were preserved | ||
2186 | * unless for some reason the whole soc powered down and we're | ||
2187 | * not treating that as a whole-system restart (e.g. swsusp) | ||
2188 | */ | ||
2189 | spin_unlock_irqrestore(&musb->lock, flags); | ||
2190 | return 0; | ||
2191 | } | ||
2192 | |||
2193 | #else | ||
2194 | #define musb_suspend NULL | ||
2195 | #define musb_resume NULL | ||
2196 | #endif | ||
2197 | |||
2198 | static struct platform_driver musb_driver = { | ||
2199 | .driver = { | ||
2200 | .name = (char *)musb_driver_name, | ||
2201 | .bus = &platform_bus_type, | ||
2202 | .owner = THIS_MODULE, | ||
2203 | }, | ||
2204 | .remove = __devexit_p(musb_remove), | ||
2205 | .shutdown = musb_shutdown, | ||
2206 | .suspend = musb_suspend, | ||
2207 | .resume = musb_resume, | ||
2208 | }; | ||
2209 | |||
2210 | /*-------------------------------------------------------------------------*/ | ||
2211 | |||
2212 | static int __init musb_init(void) | ||
2213 | { | ||
2214 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
2215 | if (usb_disabled()) | ||
2216 | return 0; | ||
2217 | #endif | ||
2218 | |||
2219 | pr_info("%s: version " MUSB_VERSION ", " | ||
2220 | #ifdef CONFIG_MUSB_PIO_ONLY | ||
2221 | "pio" | ||
2222 | #elif defined(CONFIG_USB_TI_CPPI_DMA) | ||
2223 | "cppi-dma" | ||
2224 | #elif defined(CONFIG_USB_INVENTRA_DMA) | ||
2225 | "musb-dma" | ||
2226 | #elif defined(CONFIG_USB_TUSB_OMAP_DMA) | ||
2227 | "tusb-omap-dma" | ||
2228 | #else | ||
2229 | "?dma?" | ||
2230 | #endif | ||
2231 | ", " | ||
2232 | #ifdef CONFIG_USB_MUSB_OTG | ||
2233 | "otg (peripheral+host)" | ||
2234 | #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) | ||
2235 | "peripheral" | ||
2236 | #elif defined(CONFIG_USB_MUSB_HDRC_HCD) | ||
2237 | "host" | ||
2238 | #endif | ||
2239 | ", debug=%d\n", | ||
2240 | musb_driver_name, debug); | ||
2241 | return platform_driver_probe(&musb_driver, musb_probe); | ||
2242 | } | ||
2243 | |||
2244 | /* make us init after usbcore and before usb | ||
2245 | * gadget and host-side drivers start to register | ||
2246 | */ | ||
2247 | subsys_initcall(musb_init); | ||
2248 | |||
2249 | static void __exit musb_cleanup(void) | ||
2250 | { | ||
2251 | platform_driver_unregister(&musb_driver); | ||
2252 | } | ||
2253 | module_exit(musb_cleanup); | ||
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h new file mode 100644 index 000000000000..82227251931b --- /dev/null +++ b/drivers/usb/musb/musb_core.h | |||
@@ -0,0 +1,488 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver defines | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #ifndef __MUSB_CORE_H__ | ||
36 | #define __MUSB_CORE_H__ | ||
37 | |||
38 | #include <linux/slab.h> | ||
39 | #include <linux/list.h> | ||
40 | #include <linux/interrupt.h> | ||
41 | #include <linux/smp_lock.h> | ||
42 | #include <linux/errno.h> | ||
43 | #include <linux/clk.h> | ||
44 | #include <linux/device.h> | ||
45 | #include <linux/usb/ch9.h> | ||
46 | #include <linux/usb/gadget.h> | ||
47 | #include <linux/usb.h> | ||
48 | #include <linux/usb/otg.h> | ||
49 | #include <linux/usb/musb.h> | ||
50 | |||
51 | struct musb; | ||
52 | struct musb_hw_ep; | ||
53 | struct musb_ep; | ||
54 | |||
55 | |||
56 | #include "musb_debug.h" | ||
57 | #include "musb_dma.h" | ||
58 | |||
59 | #include "musb_io.h" | ||
60 | #include "musb_regs.h" | ||
61 | |||
62 | #include "musb_gadget.h" | ||
63 | #include "../core/hcd.h" | ||
64 | #include "musb_host.h" | ||
65 | |||
66 | |||
67 | |||
68 | #ifdef CONFIG_USB_MUSB_OTG | ||
69 | |||
70 | #define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST) | ||
71 | #define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL) | ||
72 | #define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG) | ||
73 | |||
74 | /* NOTE: otg and peripheral-only state machines start at B_IDLE. | ||
75 | * OTG or host-only go to A_IDLE when ID is sensed. | ||
76 | */ | ||
77 | #define is_peripheral_active(m) (!(m)->is_host) | ||
78 | #define is_host_active(m) ((m)->is_host) | ||
79 | |||
80 | #else | ||
81 | #define is_peripheral_enabled(musb) is_peripheral_capable() | ||
82 | #define is_host_enabled(musb) is_host_capable() | ||
83 | #define is_otg_enabled(musb) 0 | ||
84 | |||
85 | #define is_peripheral_active(musb) is_peripheral_capable() | ||
86 | #define is_host_active(musb) is_host_capable() | ||
87 | #endif | ||
88 | |||
89 | #if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL) | ||
90 | /* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always | ||
91 | * override that choice selection (often USB_GADGET_DUMMY_HCD). | ||
92 | */ | ||
93 | #ifndef CONFIG_USB_GADGET_MUSB_HDRC | ||
94 | #error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC | ||
95 | #endif | ||
96 | #endif /* need MUSB gadget selection */ | ||
97 | |||
98 | |||
99 | #ifdef CONFIG_PROC_FS | ||
100 | #include <linux/fs.h> | ||
101 | #define MUSB_CONFIG_PROC_FS | ||
102 | #endif | ||
103 | |||
104 | /****************************** PERIPHERAL ROLE *****************************/ | ||
105 | |||
106 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
107 | |||
108 | #define is_peripheral_capable() (1) | ||
109 | |||
110 | extern irqreturn_t musb_g_ep0_irq(struct musb *); | ||
111 | extern void musb_g_tx(struct musb *, u8); | ||
112 | extern void musb_g_rx(struct musb *, u8); | ||
113 | extern void musb_g_reset(struct musb *); | ||
114 | extern void musb_g_suspend(struct musb *); | ||
115 | extern void musb_g_resume(struct musb *); | ||
116 | extern void musb_g_wakeup(struct musb *); | ||
117 | extern void musb_g_disconnect(struct musb *); | ||
118 | |||
119 | #else | ||
120 | |||
121 | #define is_peripheral_capable() (0) | ||
122 | |||
123 | static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; } | ||
124 | static inline void musb_g_reset(struct musb *m) {} | ||
125 | static inline void musb_g_suspend(struct musb *m) {} | ||
126 | static inline void musb_g_resume(struct musb *m) {} | ||
127 | static inline void musb_g_wakeup(struct musb *m) {} | ||
128 | static inline void musb_g_disconnect(struct musb *m) {} | ||
129 | |||
130 | #endif | ||
131 | |||
132 | /****************************** HOST ROLE ***********************************/ | ||
133 | |||
134 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
135 | |||
136 | #define is_host_capable() (1) | ||
137 | |||
138 | extern irqreturn_t musb_h_ep0_irq(struct musb *); | ||
139 | extern void musb_host_tx(struct musb *, u8); | ||
140 | extern void musb_host_rx(struct musb *, u8); | ||
141 | |||
142 | #else | ||
143 | |||
144 | #define is_host_capable() (0) | ||
145 | |||
146 | static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; } | ||
147 | static inline void musb_host_tx(struct musb *m, u8 e) {} | ||
148 | static inline void musb_host_rx(struct musb *m, u8 e) {} | ||
149 | |||
150 | #endif | ||
151 | |||
152 | |||
153 | /****************************** CONSTANTS ********************************/ | ||
154 | |||
155 | #ifndef MUSB_C_NUM_EPS | ||
156 | #define MUSB_C_NUM_EPS ((u8)16) | ||
157 | #endif | ||
158 | |||
159 | #ifndef MUSB_MAX_END0_PACKET | ||
160 | #define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE) | ||
161 | #endif | ||
162 | |||
163 | /* host side ep0 states */ | ||
164 | enum musb_h_ep0_state { | ||
165 | MUSB_EP0_IDLE, | ||
166 | MUSB_EP0_START, /* expect ack of setup */ | ||
167 | MUSB_EP0_IN, /* expect IN DATA */ | ||
168 | MUSB_EP0_OUT, /* expect ack of OUT DATA */ | ||
169 | MUSB_EP0_STATUS, /* expect ack of STATUS */ | ||
170 | } __attribute__ ((packed)); | ||
171 | |||
172 | /* peripheral side ep0 states */ | ||
173 | enum musb_g_ep0_state { | ||
174 | MUSB_EP0_STAGE_SETUP, /* idle, waiting for setup */ | ||
175 | MUSB_EP0_STAGE_TX, /* IN data */ | ||
176 | MUSB_EP0_STAGE_RX, /* OUT data */ | ||
177 | MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */ | ||
178 | MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */ | ||
179 | MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */ | ||
180 | } __attribute__ ((packed)); | ||
181 | |||
182 | /* OTG protocol constants */ | ||
183 | #define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */ | ||
184 | #define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */ | ||
185 | #define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */ | ||
186 | |||
187 | /*************************** REGISTER ACCESS ********************************/ | ||
188 | |||
189 | /* Endpoint registers (other than dynfifo setup) can be accessed either | ||
190 | * directly with the "flat" model, or after setting up an index register. | ||
191 | */ | ||
192 | |||
193 | #if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \ | ||
194 | || defined(CONFIG_ARCH_OMAP3430) | ||
195 | /* REVISIT indexed access seemed to | ||
196 | * misbehave (on DaVinci) for at least peripheral IN ... | ||
197 | */ | ||
198 | #define MUSB_FLAT_REG | ||
199 | #endif | ||
200 | |||
201 | /* TUSB mapping: "flat" plus ep0 special cases */ | ||
202 | #if defined(CONFIG_USB_TUSB6010) | ||
203 | #define musb_ep_select(_mbase, _epnum) \ | ||
204 | musb_writeb((_mbase), MUSB_INDEX, (_epnum)) | ||
205 | #define MUSB_EP_OFFSET MUSB_TUSB_OFFSET | ||
206 | |||
207 | /* "flat" mapping: each endpoint has its own i/o address */ | ||
208 | #elif defined(MUSB_FLAT_REG) | ||
209 | #define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum))) | ||
210 | #define MUSB_EP_OFFSET MUSB_FLAT_OFFSET | ||
211 | |||
212 | /* "indexed" mapping: INDEX register controls register bank select */ | ||
213 | #else | ||
214 | #define musb_ep_select(_mbase, _epnum) \ | ||
215 | musb_writeb((_mbase), MUSB_INDEX, (_epnum)) | ||
216 | #define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET | ||
217 | #endif | ||
218 | |||
219 | /****************************** FUNCTIONS ********************************/ | ||
220 | |||
221 | #define MUSB_HST_MODE(_musb)\ | ||
222 | { (_musb)->is_host = true; } | ||
223 | #define MUSB_DEV_MODE(_musb) \ | ||
224 | { (_musb)->is_host = false; } | ||
225 | |||
226 | #define test_devctl_hst_mode(_x) \ | ||
227 | (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM) | ||
228 | |||
229 | #define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral") | ||
230 | |||
231 | /******************************** TYPES *************************************/ | ||
232 | |||
233 | /* | ||
234 | * struct musb_hw_ep - endpoint hardware (bidirectional) | ||
235 | * | ||
236 | * Ordered slightly for better cacheline locality. | ||
237 | */ | ||
238 | struct musb_hw_ep { | ||
239 | struct musb *musb; | ||
240 | void __iomem *fifo; | ||
241 | void __iomem *regs; | ||
242 | |||
243 | #ifdef CONFIG_USB_TUSB6010 | ||
244 | void __iomem *conf; | ||
245 | #endif | ||
246 | |||
247 | /* index in musb->endpoints[] */ | ||
248 | u8 epnum; | ||
249 | |||
250 | /* hardware configuration, possibly dynamic */ | ||
251 | bool is_shared_fifo; | ||
252 | bool tx_double_buffered; | ||
253 | bool rx_double_buffered; | ||
254 | u16 max_packet_sz_tx; | ||
255 | u16 max_packet_sz_rx; | ||
256 | |||
257 | struct dma_channel *tx_channel; | ||
258 | struct dma_channel *rx_channel; | ||
259 | |||
260 | #ifdef CONFIG_USB_TUSB6010 | ||
261 | /* TUSB has "asynchronous" and "synchronous" dma modes */ | ||
262 | dma_addr_t fifo_async; | ||
263 | dma_addr_t fifo_sync; | ||
264 | void __iomem *fifo_sync_va; | ||
265 | #endif | ||
266 | |||
267 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
268 | void __iomem *target_regs; | ||
269 | |||
270 | /* currently scheduled peripheral endpoint */ | ||
271 | struct musb_qh *in_qh; | ||
272 | struct musb_qh *out_qh; | ||
273 | |||
274 | u8 rx_reinit; | ||
275 | u8 tx_reinit; | ||
276 | #endif | ||
277 | |||
278 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
279 | /* peripheral side */ | ||
280 | struct musb_ep ep_in; /* TX */ | ||
281 | struct musb_ep ep_out; /* RX */ | ||
282 | #endif | ||
283 | }; | ||
284 | |||
285 | static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep) | ||
286 | { | ||
287 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
288 | return next_request(&hw_ep->ep_in); | ||
289 | #else | ||
290 | return NULL; | ||
291 | #endif | ||
292 | } | ||
293 | |||
294 | static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep) | ||
295 | { | ||
296 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
297 | return next_request(&hw_ep->ep_out); | ||
298 | #else | ||
299 | return NULL; | ||
300 | #endif | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * struct musb - Driver instance data. | ||
305 | */ | ||
306 | struct musb { | ||
307 | /* device lock */ | ||
308 | spinlock_t lock; | ||
309 | struct clk *clock; | ||
310 | irqreturn_t (*isr)(int, void *); | ||
311 | struct work_struct irq_work; | ||
312 | |||
313 | /* this hub status bit is reserved by USB 2.0 and not seen by usbcore */ | ||
314 | #define MUSB_PORT_STAT_RESUME (1 << 31) | ||
315 | |||
316 | u32 port1_status; | ||
317 | |||
318 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
319 | unsigned long rh_timer; | ||
320 | |||
321 | enum musb_h_ep0_state ep0_stage; | ||
322 | |||
323 | /* bulk traffic normally dedicates endpoint hardware, and each | ||
324 | * direction has its own ring of host side endpoints. | ||
325 | * we try to progress the transfer at the head of each endpoint's | ||
326 | * queue until it completes or NAKs too much; then we try the next | ||
327 | * endpoint. | ||
328 | */ | ||
329 | struct musb_hw_ep *bulk_ep; | ||
330 | |||
331 | struct list_head control; /* of musb_qh */ | ||
332 | struct list_head in_bulk; /* of musb_qh */ | ||
333 | struct list_head out_bulk; /* of musb_qh */ | ||
334 | struct musb_qh *periodic[32]; /* tree of interrupt+iso */ | ||
335 | #endif | ||
336 | |||
337 | /* called with IRQs blocked; ON/nonzero implies starting a session, | ||
338 | * and waiting at least a_wait_vrise_tmout. | ||
339 | */ | ||
340 | void (*board_set_vbus)(struct musb *, int is_on); | ||
341 | |||
342 | struct dma_controller *dma_controller; | ||
343 | |||
344 | struct device *controller; | ||
345 | void __iomem *ctrl_base; | ||
346 | void __iomem *mregs; | ||
347 | |||
348 | #ifdef CONFIG_USB_TUSB6010 | ||
349 | dma_addr_t async; | ||
350 | dma_addr_t sync; | ||
351 | void __iomem *sync_va; | ||
352 | #endif | ||
353 | |||
354 | /* passed down from chip/board specific irq handlers */ | ||
355 | u8 int_usb; | ||
356 | u16 int_rx; | ||
357 | u16 int_tx; | ||
358 | |||
359 | struct otg_transceiver xceiv; | ||
360 | |||
361 | int nIrq; | ||
362 | |||
363 | struct musb_hw_ep endpoints[MUSB_C_NUM_EPS]; | ||
364 | #define control_ep endpoints | ||
365 | |||
366 | #define VBUSERR_RETRY_COUNT 3 | ||
367 | u16 vbuserr_retry; | ||
368 | u16 epmask; | ||
369 | u8 nr_endpoints; | ||
370 | |||
371 | u8 board_mode; /* enum musb_mode */ | ||
372 | int (*board_set_power)(int state); | ||
373 | |||
374 | int (*set_clock)(struct clk *clk, int is_active); | ||
375 | |||
376 | u8 min_power; /* vbus for periph, in mA/2 */ | ||
377 | |||
378 | bool is_host; | ||
379 | |||
380 | int a_wait_bcon; /* VBUS timeout in msecs */ | ||
381 | unsigned long idle_timeout; /* Next timeout in jiffies */ | ||
382 | |||
383 | /* active means connected and not suspended */ | ||
384 | unsigned is_active:1; | ||
385 | |||
386 | unsigned is_multipoint:1; | ||
387 | unsigned ignore_disconnect:1; /* during bus resets */ | ||
388 | |||
389 | #ifdef C_MP_TX | ||
390 | unsigned bulk_split:1; | ||
391 | #define can_bulk_split(musb,type) \ | ||
392 | (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split) | ||
393 | #else | ||
394 | #define can_bulk_split(musb, type) 0 | ||
395 | #endif | ||
396 | |||
397 | #ifdef C_MP_RX | ||
398 | unsigned bulk_combine:1; | ||
399 | #define can_bulk_combine(musb,type) \ | ||
400 | (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine) | ||
401 | #else | ||
402 | #define can_bulk_combine(musb, type) 0 | ||
403 | #endif | ||
404 | |||
405 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
406 | /* is_suspended means USB B_PERIPHERAL suspend */ | ||
407 | unsigned is_suspended:1; | ||
408 | |||
409 | /* may_wakeup means remote wakeup is enabled */ | ||
410 | unsigned may_wakeup:1; | ||
411 | |||
412 | /* is_self_powered is reported in device status and the | ||
413 | * config descriptor. is_bus_powered means B_PERIPHERAL | ||
414 | * draws some VBUS current; both can be true. | ||
415 | */ | ||
416 | unsigned is_self_powered:1; | ||
417 | unsigned is_bus_powered:1; | ||
418 | |||
419 | unsigned set_address:1; | ||
420 | unsigned test_mode:1; | ||
421 | unsigned softconnect:1; | ||
422 | |||
423 | u8 address; | ||
424 | u8 test_mode_nr; | ||
425 | u16 ackpend; /* ep0 */ | ||
426 | enum musb_g_ep0_state ep0_state; | ||
427 | struct usb_gadget g; /* the gadget */ | ||
428 | struct usb_gadget_driver *gadget_driver; /* its driver */ | ||
429 | #endif | ||
430 | |||
431 | struct musb_hdrc_config *config; | ||
432 | |||
433 | #ifdef MUSB_CONFIG_PROC_FS | ||
434 | struct proc_dir_entry *proc_entry; | ||
435 | #endif | ||
436 | }; | ||
437 | |||
438 | static inline void musb_set_vbus(struct musb *musb, int is_on) | ||
439 | { | ||
440 | musb->board_set_vbus(musb, is_on); | ||
441 | } | ||
442 | |||
443 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
444 | static inline struct musb *gadget_to_musb(struct usb_gadget *g) | ||
445 | { | ||
446 | return container_of(g, struct musb, g); | ||
447 | } | ||
448 | #endif | ||
449 | |||
450 | |||
451 | /***************************** Glue it together *****************************/ | ||
452 | |||
453 | extern const char musb_driver_name[]; | ||
454 | |||
455 | extern void musb_start(struct musb *musb); | ||
456 | extern void musb_stop(struct musb *musb); | ||
457 | |||
458 | extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src); | ||
459 | extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst); | ||
460 | |||
461 | extern void musb_load_testpacket(struct musb *); | ||
462 | |||
463 | extern irqreturn_t musb_interrupt(struct musb *); | ||
464 | |||
465 | extern void musb_platform_enable(struct musb *musb); | ||
466 | extern void musb_platform_disable(struct musb *musb); | ||
467 | |||
468 | extern void musb_hnp_stop(struct musb *musb); | ||
469 | |||
470 | extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode); | ||
471 | |||
472 | #if defined(CONFIG_USB_TUSB6010) || \ | ||
473 | defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) | ||
474 | extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout); | ||
475 | #else | ||
476 | #define musb_platform_try_idle(x, y) do {} while (0) | ||
477 | #endif | ||
478 | |||
479 | #ifdef CONFIG_USB_TUSB6010 | ||
480 | extern int musb_platform_get_vbus_status(struct musb *musb); | ||
481 | #else | ||
482 | #define musb_platform_get_vbus_status(x) 0 | ||
483 | #endif | ||
484 | |||
485 | extern int __init musb_platform_init(struct musb *musb); | ||
486 | extern int musb_platform_exit(struct musb *musb); | ||
487 | |||
488 | #endif /* __MUSB_CORE_H__ */ | ||
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h new file mode 100644 index 000000000000..4d2794441b15 --- /dev/null +++ b/drivers/usb/musb/musb_debug.h | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver debug defines | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #ifndef __MUSB_LINUX_DEBUG_H__ | ||
36 | #define __MUSB_LINUX_DEBUG_H__ | ||
37 | |||
38 | #define yprintk(facility, format, args...) \ | ||
39 | do { printk(facility "%s %d: " format , \ | ||
40 | __func__, __LINE__ , ## args); } while (0) | ||
41 | #define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args) | ||
42 | #define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args) | ||
43 | #define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args) | ||
44 | |||
45 | #define xprintk(level, facility, format, args...) do { \ | ||
46 | if (_dbg_level(level)) { \ | ||
47 | printk(facility "%s %d: " format , \ | ||
48 | __func__, __LINE__ , ## args); \ | ||
49 | } } while (0) | ||
50 | |||
51 | extern unsigned debug; | ||
52 | |||
53 | static inline int _dbg_level(unsigned l) | ||
54 | { | ||
55 | return debug >= l; | ||
56 | } | ||
57 | |||
58 | #define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args) | ||
59 | |||
60 | extern const char *otg_state_string(struct musb *); | ||
61 | |||
62 | #endif /* __MUSB_LINUX_DEBUG_H__ */ | ||
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h new file mode 100644 index 000000000000..0a2c4e3602c1 --- /dev/null +++ b/drivers/usb/musb/musb_dma.h | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver DMA controller abstraction | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #ifndef __MUSB_DMA_H__ | ||
36 | #define __MUSB_DMA_H__ | ||
37 | |||
38 | struct musb_hw_ep; | ||
39 | |||
40 | /* | ||
41 | * DMA Controller Abstraction | ||
42 | * | ||
43 | * DMA Controllers are abstracted to allow use of a variety of different | ||
44 | * implementations of DMA, as allowed by the Inventra USB cores. On the | ||
45 | * host side, usbcore sets up the DMA mappings and flushes caches; on the | ||
46 | * peripheral side, the gadget controller driver does. Responsibilities | ||
47 | * of a DMA controller driver include: | ||
48 | * | ||
49 | * - Handling the details of moving multiple USB packets | ||
50 | * in cooperation with the Inventra USB core, including especially | ||
51 | * the correct RX side treatment of short packets and buffer-full | ||
52 | * states (both of which terminate transfers). | ||
53 | * | ||
54 | * - Knowing the correlation between dma channels and the | ||
55 | * Inventra core's local endpoint resources and data direction. | ||
56 | * | ||
57 | * - Maintaining a list of allocated/available channels. | ||
58 | * | ||
59 | * - Updating channel status on interrupts, | ||
60 | * whether shared with the Inventra core or separate. | ||
61 | */ | ||
62 | |||
63 | #define DMA_ADDR_INVALID (~(dma_addr_t)0) | ||
64 | |||
65 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
66 | #define is_dma_capable() (1) | ||
67 | #else | ||
68 | #define is_dma_capable() (0) | ||
69 | #endif | ||
70 | |||
71 | #ifdef CONFIG_USB_TI_CPPI_DMA | ||
72 | #define is_cppi_enabled() 1 | ||
73 | #else | ||
74 | #define is_cppi_enabled() 0 | ||
75 | #endif | ||
76 | |||
77 | #ifdef CONFIG_USB_TUSB_OMAP_DMA | ||
78 | #define tusb_dma_omap() 1 | ||
79 | #else | ||
80 | #define tusb_dma_omap() 0 | ||
81 | #endif | ||
82 | |||
83 | /* | ||
84 | * DMA channel status ... updated by the dma controller driver whenever that | ||
85 | * status changes, and protected by the overall controller spinlock. | ||
86 | */ | ||
87 | enum dma_channel_status { | ||
88 | /* unallocated */ | ||
89 | MUSB_DMA_STATUS_UNKNOWN, | ||
90 | /* allocated ... but not busy, no errors */ | ||
91 | MUSB_DMA_STATUS_FREE, | ||
92 | /* busy ... transactions are active */ | ||
93 | MUSB_DMA_STATUS_BUSY, | ||
94 | /* transaction(s) aborted due to ... dma or memory bus error */ | ||
95 | MUSB_DMA_STATUS_BUS_ABORT, | ||
96 | /* transaction(s) aborted due to ... core error or USB fault */ | ||
97 | MUSB_DMA_STATUS_CORE_ABORT | ||
98 | }; | ||
99 | |||
100 | struct dma_controller; | ||
101 | |||
102 | /** | ||
103 | * struct dma_channel - A DMA channel. | ||
104 | * @private_data: channel-private data | ||
105 | * @max_len: the maximum number of bytes the channel can move in one | ||
106 | * transaction (typically representing many USB maximum-sized packets) | ||
107 | * @actual_len: how many bytes have been transferred | ||
108 | * @status: current channel status (updated e.g. on interrupt) | ||
109 | * @desired_mode: true if mode 1 is desired; false if mode 0 is desired | ||
110 | * | ||
111 | * channels are associated with an endpoint for the duration of at least | ||
112 | * one usb transfer. | ||
113 | */ | ||
114 | struct dma_channel { | ||
115 | void *private_data; | ||
116 | /* FIXME not void* private_data, but a dma_controller * */ | ||
117 | size_t max_len; | ||
118 | size_t actual_len; | ||
119 | enum dma_channel_status status; | ||
120 | bool desired_mode; | ||
121 | }; | ||
122 | |||
123 | /* | ||
124 | * dma_channel_status - return status of dma channel | ||
125 | * @c: the channel | ||
126 | * | ||
127 | * Returns the software's view of the channel status. If that status is BUSY | ||
128 | * then it's possible that the hardware has completed (or aborted) a transfer, | ||
129 | * so the driver needs to update that status. | ||
130 | */ | ||
131 | static inline enum dma_channel_status | ||
132 | dma_channel_status(struct dma_channel *c) | ||
133 | { | ||
134 | return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN; | ||
135 | } | ||
136 | |||
137 | /** | ||
138 | * struct dma_controller - A DMA Controller. | ||
139 | * @start: call this to start a DMA controller; | ||
140 | * return 0 on success, else negative errno | ||
141 | * @stop: call this to stop a DMA controller | ||
142 | * return 0 on success, else negative errno | ||
143 | * @channel_alloc: call this to allocate a DMA channel | ||
144 | * @channel_release: call this to release a DMA channel | ||
145 | * @channel_abort: call this to abort a pending DMA transaction, | ||
146 | * returning it to FREE (but allocated) state | ||
147 | * | ||
148 | * Controllers manage dma channels. | ||
149 | */ | ||
150 | struct dma_controller { | ||
151 | int (*start)(struct dma_controller *); | ||
152 | int (*stop)(struct dma_controller *); | ||
153 | struct dma_channel *(*channel_alloc)(struct dma_controller *, | ||
154 | struct musb_hw_ep *, u8 is_tx); | ||
155 | void (*channel_release)(struct dma_channel *); | ||
156 | int (*channel_program)(struct dma_channel *channel, | ||
157 | u16 maxpacket, u8 mode, | ||
158 | dma_addr_t dma_addr, | ||
159 | u32 length); | ||
160 | int (*channel_abort)(struct dma_channel *); | ||
161 | }; | ||
162 | |||
163 | /* called after channel_program(), may indicate a fault */ | ||
164 | extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit); | ||
165 | |||
166 | |||
167 | extern struct dma_controller *__init | ||
168 | dma_controller_create(struct musb *, void __iomem *); | ||
169 | |||
170 | extern void dma_controller_destroy(struct dma_controller *); | ||
171 | |||
172 | #endif /* __MUSB_DMA_H__ */ | ||
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c new file mode 100644 index 000000000000..d6a802c224fa --- /dev/null +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -0,0 +1,2031 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver peripheral support | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/list.h> | ||
37 | #include <linux/timer.h> | ||
38 | #include <linux/module.h> | ||
39 | #include <linux/smp.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/moduleparam.h> | ||
43 | #include <linux/stat.h> | ||
44 | #include <linux/dma-mapping.h> | ||
45 | |||
46 | #include "musb_core.h" | ||
47 | |||
48 | |||
49 | /* MUSB PERIPHERAL status 3-mar-2006: | ||
50 | * | ||
51 | * - EP0 seems solid. It passes both USBCV and usbtest control cases. | ||
52 | * Minor glitches: | ||
53 | * | ||
54 | * + remote wakeup to Linux hosts work, but saw USBCV failures; | ||
55 | * in one test run (operator error?) | ||
56 | * + endpoint halt tests -- in both usbtest and usbcv -- seem | ||
57 | * to break when dma is enabled ... is something wrongly | ||
58 | * clearing SENDSTALL? | ||
59 | * | ||
60 | * - Mass storage behaved ok when last tested. Network traffic patterns | ||
61 | * (with lots of short transfers etc) need retesting; they turn up the | ||
62 | * worst cases of the DMA, since short packets are typical but are not | ||
63 | * required. | ||
64 | * | ||
65 | * - TX/IN | ||
66 | * + both pio and dma behave in with network and g_zero tests | ||
67 | * + no cppi throughput issues other than no-hw-queueing | ||
68 | * + failed with FLAT_REG (DaVinci) | ||
69 | * + seems to behave with double buffering, PIO -and- CPPI | ||
70 | * + with gadgetfs + AIO, requests got lost? | ||
71 | * | ||
72 | * - RX/OUT | ||
73 | * + both pio and dma behave in with network and g_zero tests | ||
74 | * + dma is slow in typical case (short_not_ok is clear) | ||
75 | * + double buffering ok with PIO | ||
76 | * + double buffering *FAILS* with CPPI, wrong data bytes sometimes | ||
77 | * + request lossage observed with gadgetfs | ||
78 | * | ||
79 | * - ISO not tested ... might work, but only weakly isochronous | ||
80 | * | ||
81 | * - Gadget driver disabling of softconnect during bind() is ignored; so | ||
82 | * drivers can't hold off host requests until userspace is ready. | ||
83 | * (Workaround: they can turn it off later.) | ||
84 | * | ||
85 | * - PORTABILITY (assumes PIO works): | ||
86 | * + DaVinci, basically works with cppi dma | ||
87 | * + OMAP 2430, ditto with mentor dma | ||
88 | * + TUSB 6010, platform-specific dma in the works | ||
89 | */ | ||
90 | |||
91 | /* ----------------------------------------------------------------------- */ | ||
92 | |||
93 | /* | ||
94 | * Immediately complete a request. | ||
95 | * | ||
96 | * @param request the request to complete | ||
97 | * @param status the status to complete the request with | ||
98 | * Context: controller locked, IRQs blocked. | ||
99 | */ | ||
100 | void musb_g_giveback( | ||
101 | struct musb_ep *ep, | ||
102 | struct usb_request *request, | ||
103 | int status) | ||
104 | __releases(ep->musb->lock) | ||
105 | __acquires(ep->musb->lock) | ||
106 | { | ||
107 | struct musb_request *req; | ||
108 | struct musb *musb; | ||
109 | int busy = ep->busy; | ||
110 | |||
111 | req = to_musb_request(request); | ||
112 | |||
113 | list_del(&request->list); | ||
114 | if (req->request.status == -EINPROGRESS) | ||
115 | req->request.status = status; | ||
116 | musb = req->musb; | ||
117 | |||
118 | ep->busy = 1; | ||
119 | spin_unlock(&musb->lock); | ||
120 | if (is_dma_capable()) { | ||
121 | if (req->mapped) { | ||
122 | dma_unmap_single(musb->controller, | ||
123 | req->request.dma, | ||
124 | req->request.length, | ||
125 | req->tx | ||
126 | ? DMA_TO_DEVICE | ||
127 | : DMA_FROM_DEVICE); | ||
128 | req->request.dma = DMA_ADDR_INVALID; | ||
129 | req->mapped = 0; | ||
130 | } else if (req->request.dma != DMA_ADDR_INVALID) | ||
131 | dma_sync_single_for_cpu(musb->controller, | ||
132 | req->request.dma, | ||
133 | req->request.length, | ||
134 | req->tx | ||
135 | ? DMA_TO_DEVICE | ||
136 | : DMA_FROM_DEVICE); | ||
137 | } | ||
138 | if (request->status == 0) | ||
139 | DBG(5, "%s done request %p, %d/%d\n", | ||
140 | ep->end_point.name, request, | ||
141 | req->request.actual, req->request.length); | ||
142 | else | ||
143 | DBG(2, "%s request %p, %d/%d fault %d\n", | ||
144 | ep->end_point.name, request, | ||
145 | req->request.actual, req->request.length, | ||
146 | request->status); | ||
147 | req->request.complete(&req->ep->end_point, &req->request); | ||
148 | spin_lock(&musb->lock); | ||
149 | ep->busy = busy; | ||
150 | } | ||
151 | |||
152 | /* ----------------------------------------------------------------------- */ | ||
153 | |||
154 | /* | ||
155 | * Abort requests queued to an endpoint using the status. Synchronous. | ||
156 | * caller locked controller and blocked irqs, and selected this ep. | ||
157 | */ | ||
158 | static void nuke(struct musb_ep *ep, const int status) | ||
159 | { | ||
160 | struct musb_request *req = NULL; | ||
161 | void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; | ||
162 | |||
163 | ep->busy = 1; | ||
164 | |||
165 | if (is_dma_capable() && ep->dma) { | ||
166 | struct dma_controller *c = ep->musb->dma_controller; | ||
167 | int value; | ||
168 | if (ep->is_in) { | ||
169 | musb_writew(epio, MUSB_TXCSR, | ||
170 | 0 | MUSB_TXCSR_FLUSHFIFO); | ||
171 | musb_writew(epio, MUSB_TXCSR, | ||
172 | 0 | MUSB_TXCSR_FLUSHFIFO); | ||
173 | } else { | ||
174 | musb_writew(epio, MUSB_RXCSR, | ||
175 | 0 | MUSB_RXCSR_FLUSHFIFO); | ||
176 | musb_writew(epio, MUSB_RXCSR, | ||
177 | 0 | MUSB_RXCSR_FLUSHFIFO); | ||
178 | } | ||
179 | |||
180 | value = c->channel_abort(ep->dma); | ||
181 | DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value); | ||
182 | c->channel_release(ep->dma); | ||
183 | ep->dma = NULL; | ||
184 | } | ||
185 | |||
186 | while (!list_empty(&(ep->req_list))) { | ||
187 | req = container_of(ep->req_list.next, struct musb_request, | ||
188 | request.list); | ||
189 | musb_g_giveback(ep, &req->request, status); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /* ----------------------------------------------------------------------- */ | ||
194 | |||
195 | /* Data transfers - pure PIO, pure DMA, or mixed mode */ | ||
196 | |||
197 | /* | ||
198 | * This assumes the separate CPPI engine is responding to DMA requests | ||
199 | * from the usb core ... sequenced a bit differently from mentor dma. | ||
200 | */ | ||
201 | |||
202 | static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) | ||
203 | { | ||
204 | if (can_bulk_split(musb, ep->type)) | ||
205 | return ep->hw_ep->max_packet_sz_tx; | ||
206 | else | ||
207 | return ep->packet_sz; | ||
208 | } | ||
209 | |||
210 | |||
211 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
212 | |||
213 | /* Peripheral tx (IN) using Mentor DMA works as follows: | ||
214 | Only mode 0 is used for transfers <= wPktSize, | ||
215 | mode 1 is used for larger transfers, | ||
216 | |||
217 | One of the following happens: | ||
218 | - Host sends IN token which causes an endpoint interrupt | ||
219 | -> TxAvail | ||
220 | -> if DMA is currently busy, exit. | ||
221 | -> if queue is non-empty, txstate(). | ||
222 | |||
223 | - Request is queued by the gadget driver. | ||
224 | -> if queue was previously empty, txstate() | ||
225 | |||
226 | txstate() | ||
227 | -> start | ||
228 | /\ -> setup DMA | ||
229 | | (data is transferred to the FIFO, then sent out when | ||
230 | | IN token(s) are recd from Host. | ||
231 | | -> DMA interrupt on completion | ||
232 | | calls TxAvail. | ||
233 | | -> stop DMA, ~DmaEenab, | ||
234 | | -> set TxPktRdy for last short pkt or zlp | ||
235 | | -> Complete Request | ||
236 | | -> Continue next request (call txstate) | ||
237 | |___________________________________| | ||
238 | |||
239 | * Non-Mentor DMA engines can of course work differently, such as by | ||
240 | * upleveling from irq-per-packet to irq-per-buffer. | ||
241 | */ | ||
242 | |||
243 | #endif | ||
244 | |||
245 | /* | ||
246 | * An endpoint is transmitting data. This can be called either from | ||
247 | * the IRQ routine or from ep.queue() to kickstart a request on an | ||
248 | * endpoint. | ||
249 | * | ||
250 | * Context: controller locked, IRQs blocked, endpoint selected | ||
251 | */ | ||
252 | static void txstate(struct musb *musb, struct musb_request *req) | ||
253 | { | ||
254 | u8 epnum = req->epnum; | ||
255 | struct musb_ep *musb_ep; | ||
256 | void __iomem *epio = musb->endpoints[epnum].regs; | ||
257 | struct usb_request *request; | ||
258 | u16 fifo_count = 0, csr; | ||
259 | int use_dma = 0; | ||
260 | |||
261 | musb_ep = req->ep; | ||
262 | |||
263 | /* we shouldn't get here while DMA is active ... but we do ... */ | ||
264 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { | ||
265 | DBG(4, "dma pending...\n"); | ||
266 | return; | ||
267 | } | ||
268 | |||
269 | /* read TXCSR before */ | ||
270 | csr = musb_readw(epio, MUSB_TXCSR); | ||
271 | |||
272 | request = &req->request; | ||
273 | fifo_count = min(max_ep_writesize(musb, musb_ep), | ||
274 | (int)(request->length - request->actual)); | ||
275 | |||
276 | if (csr & MUSB_TXCSR_TXPKTRDY) { | ||
277 | DBG(5, "%s old packet still ready , txcsr %03x\n", | ||
278 | musb_ep->end_point.name, csr); | ||
279 | return; | ||
280 | } | ||
281 | |||
282 | if (csr & MUSB_TXCSR_P_SENDSTALL) { | ||
283 | DBG(5, "%s stalling, txcsr %03x\n", | ||
284 | musb_ep->end_point.name, csr); | ||
285 | return; | ||
286 | } | ||
287 | |||
288 | DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", | ||
289 | epnum, musb_ep->packet_sz, fifo_count, | ||
290 | csr); | ||
291 | |||
292 | #ifndef CONFIG_MUSB_PIO_ONLY | ||
293 | if (is_dma_capable() && musb_ep->dma) { | ||
294 | struct dma_controller *c = musb->dma_controller; | ||
295 | |||
296 | use_dma = (request->dma != DMA_ADDR_INVALID); | ||
297 | |||
298 | /* MUSB_TXCSR_P_ISO is still set correctly */ | ||
299 | |||
300 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
301 | { | ||
302 | size_t request_size; | ||
303 | |||
304 | /* setup DMA, then program endpoint CSR */ | ||
305 | request_size = min(request->length, | ||
306 | musb_ep->dma->max_len); | ||
307 | if (request_size <= musb_ep->packet_sz) | ||
308 | musb_ep->dma->desired_mode = 0; | ||
309 | else | ||
310 | musb_ep->dma->desired_mode = 1; | ||
311 | |||
312 | use_dma = use_dma && c->channel_program( | ||
313 | musb_ep->dma, musb_ep->packet_sz, | ||
314 | musb_ep->dma->desired_mode, | ||
315 | request->dma, request_size); | ||
316 | if (use_dma) { | ||
317 | if (musb_ep->dma->desired_mode == 0) { | ||
318 | /* ASSERT: DMAENAB is clear */ | ||
319 | csr &= ~(MUSB_TXCSR_AUTOSET | | ||
320 | MUSB_TXCSR_DMAMODE); | ||
321 | csr |= (MUSB_TXCSR_DMAENAB | | ||
322 | MUSB_TXCSR_MODE); | ||
323 | /* against programming guide */ | ||
324 | } else | ||
325 | csr |= (MUSB_TXCSR_AUTOSET | ||
326 | | MUSB_TXCSR_DMAENAB | ||
327 | | MUSB_TXCSR_DMAMODE | ||
328 | | MUSB_TXCSR_MODE); | ||
329 | |||
330 | csr &= ~MUSB_TXCSR_P_UNDERRUN; | ||
331 | musb_writew(epio, MUSB_TXCSR, csr); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | #elif defined(CONFIG_USB_TI_CPPI_DMA) | ||
336 | /* program endpoint CSR first, then setup DMA */ | ||
337 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
338 | | MUSB_TXCSR_DMAMODE | ||
339 | | MUSB_TXCSR_P_UNDERRUN | ||
340 | | MUSB_TXCSR_TXPKTRDY); | ||
341 | csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; | ||
342 | musb_writew(epio, MUSB_TXCSR, | ||
343 | (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) | ||
344 | | csr); | ||
345 | |||
346 | /* ensure writebuffer is empty */ | ||
347 | csr = musb_readw(epio, MUSB_TXCSR); | ||
348 | |||
349 | /* NOTE host side sets DMAENAB later than this; both are | ||
350 | * OK since the transfer dma glue (between CPPI and Mentor | ||
351 | * fifos) just tells CPPI it could start. Data only moves | ||
352 | * to the USB TX fifo when both fifos are ready. | ||
353 | */ | ||
354 | |||
355 | /* "mode" is irrelevant here; handle terminating ZLPs like | ||
356 | * PIO does, since the hardware RNDIS mode seems unreliable | ||
357 | * except for the last-packet-is-already-short case. | ||
358 | */ | ||
359 | use_dma = use_dma && c->channel_program( | ||
360 | musb_ep->dma, musb_ep->packet_sz, | ||
361 | 0, | ||
362 | request->dma, | ||
363 | request->length); | ||
364 | if (!use_dma) { | ||
365 | c->channel_release(musb_ep->dma); | ||
366 | musb_ep->dma = NULL; | ||
367 | /* ASSERT: DMAENAB clear */ | ||
368 | csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); | ||
369 | /* invariant: prequest->buf is non-null */ | ||
370 | } | ||
371 | #elif defined(CONFIG_USB_TUSB_OMAP_DMA) | ||
372 | use_dma = use_dma && c->channel_program( | ||
373 | musb_ep->dma, musb_ep->packet_sz, | ||
374 | request->zero, | ||
375 | request->dma, | ||
376 | request->length); | ||
377 | #endif | ||
378 | } | ||
379 | #endif | ||
380 | |||
381 | if (!use_dma) { | ||
382 | musb_write_fifo(musb_ep->hw_ep, fifo_count, | ||
383 | (u8 *) (request->buf + request->actual)); | ||
384 | request->actual += fifo_count; | ||
385 | csr |= MUSB_TXCSR_TXPKTRDY; | ||
386 | csr &= ~MUSB_TXCSR_P_UNDERRUN; | ||
387 | musb_writew(epio, MUSB_TXCSR, csr); | ||
388 | } | ||
389 | |||
390 | /* host may already have the data when this message shows... */ | ||
391 | DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", | ||
392 | musb_ep->end_point.name, use_dma ? "dma" : "pio", | ||
393 | request->actual, request->length, | ||
394 | musb_readw(epio, MUSB_TXCSR), | ||
395 | fifo_count, | ||
396 | musb_readw(epio, MUSB_TXMAXP)); | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * FIFO state update (e.g. data ready). | ||
401 | * Called from IRQ, with controller locked. | ||
402 | */ | ||
403 | void musb_g_tx(struct musb *musb, u8 epnum) | ||
404 | { | ||
405 | u16 csr; | ||
406 | struct usb_request *request; | ||
407 | u8 __iomem *mbase = musb->mregs; | ||
408 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; | ||
409 | void __iomem *epio = musb->endpoints[epnum].regs; | ||
410 | struct dma_channel *dma; | ||
411 | |||
412 | musb_ep_select(mbase, epnum); | ||
413 | request = next_request(musb_ep); | ||
414 | |||
415 | csr = musb_readw(epio, MUSB_TXCSR); | ||
416 | DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); | ||
417 | |||
418 | dma = is_dma_capable() ? musb_ep->dma : NULL; | ||
419 | do { | ||
420 | /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX | ||
421 | * probably rates reporting as a host error | ||
422 | */ | ||
423 | if (csr & MUSB_TXCSR_P_SENTSTALL) { | ||
424 | csr |= MUSB_TXCSR_P_WZC_BITS; | ||
425 | csr &= ~MUSB_TXCSR_P_SENTSTALL; | ||
426 | musb_writew(epio, MUSB_TXCSR, csr); | ||
427 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
428 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
429 | musb->dma_controller->channel_abort(dma); | ||
430 | } | ||
431 | |||
432 | if (request) | ||
433 | musb_g_giveback(musb_ep, request, -EPIPE); | ||
434 | |||
435 | break; | ||
436 | } | ||
437 | |||
438 | if (csr & MUSB_TXCSR_P_UNDERRUN) { | ||
439 | /* we NAKed, no big deal ... little reason to care */ | ||
440 | csr |= MUSB_TXCSR_P_WZC_BITS; | ||
441 | csr &= ~(MUSB_TXCSR_P_UNDERRUN | ||
442 | | MUSB_TXCSR_TXPKTRDY); | ||
443 | musb_writew(epio, MUSB_TXCSR, csr); | ||
444 | DBG(20, "underrun on ep%d, req %p\n", epnum, request); | ||
445 | } | ||
446 | |||
447 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
448 | /* SHOULD NOT HAPPEN ... has with cppi though, after | ||
449 | * changing SENDSTALL (and other cases); harmless? | ||
450 | */ | ||
451 | DBG(5, "%s dma still busy?\n", musb_ep->end_point.name); | ||
452 | break; | ||
453 | } | ||
454 | |||
455 | if (request) { | ||
456 | u8 is_dma = 0; | ||
457 | |||
458 | if (dma && (csr & MUSB_TXCSR_DMAENAB)) { | ||
459 | is_dma = 1; | ||
460 | csr |= MUSB_TXCSR_P_WZC_BITS; | ||
461 | csr &= ~(MUSB_TXCSR_DMAENAB | ||
462 | | MUSB_TXCSR_P_UNDERRUN | ||
463 | | MUSB_TXCSR_TXPKTRDY); | ||
464 | musb_writew(epio, MUSB_TXCSR, csr); | ||
465 | /* ensure writebuffer is empty */ | ||
466 | csr = musb_readw(epio, MUSB_TXCSR); | ||
467 | request->actual += musb_ep->dma->actual_len; | ||
468 | DBG(4, "TXCSR%d %04x, dma off, " | ||
469 | "len %zu, req %p\n", | ||
470 | epnum, csr, | ||
471 | musb_ep->dma->actual_len, | ||
472 | request); | ||
473 | } | ||
474 | |||
475 | if (is_dma || request->actual == request->length) { | ||
476 | |||
477 | /* First, maybe a terminating short packet. | ||
478 | * Some DMA engines might handle this by | ||
479 | * themselves. | ||
480 | */ | ||
481 | if ((request->zero | ||
482 | && request->length | ||
483 | && (request->length | ||
484 | % musb_ep->packet_sz) | ||
485 | == 0) | ||
486 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
487 | || (is_dma && | ||
488 | ((!dma->desired_mode) || | ||
489 | (request->actual & | ||
490 | (musb_ep->packet_sz - 1)))) | ||
491 | #endif | ||
492 | ) { | ||
493 | /* on dma completion, fifo may not | ||
494 | * be available yet ... | ||
495 | */ | ||
496 | if (csr & MUSB_TXCSR_TXPKTRDY) | ||
497 | break; | ||
498 | |||
499 | DBG(4, "sending zero pkt\n"); | ||
500 | musb_writew(epio, MUSB_TXCSR, | ||
501 | MUSB_TXCSR_MODE | ||
502 | | MUSB_TXCSR_TXPKTRDY); | ||
503 | request->zero = 0; | ||
504 | } | ||
505 | |||
506 | /* ... or if not, then complete it */ | ||
507 | musb_g_giveback(musb_ep, request, 0); | ||
508 | |||
509 | /* kickstart next transfer if appropriate; | ||
510 | * the packet that just completed might not | ||
511 | * be transmitted for hours or days. | ||
512 | * REVISIT for double buffering... | ||
513 | * FIXME revisit for stalls too... | ||
514 | */ | ||
515 | musb_ep_select(mbase, epnum); | ||
516 | csr = musb_readw(epio, MUSB_TXCSR); | ||
517 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) | ||
518 | break; | ||
519 | request = musb_ep->desc | ||
520 | ? next_request(musb_ep) | ||
521 | : NULL; | ||
522 | if (!request) { | ||
523 | DBG(4, "%s idle now\n", | ||
524 | musb_ep->end_point.name); | ||
525 | break; | ||
526 | } | ||
527 | } | ||
528 | |||
529 | txstate(musb, to_musb_request(request)); | ||
530 | } | ||
531 | |||
532 | } while (0); | ||
533 | } | ||
534 | |||
535 | /* ------------------------------------------------------------ */ | ||
536 | |||
537 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
538 | |||
539 | /* Peripheral rx (OUT) using Mentor DMA works as follows: | ||
540 | - Only mode 0 is used. | ||
541 | |||
542 | - Request is queued by the gadget class driver. | ||
543 | -> if queue was previously empty, rxstate() | ||
544 | |||
545 | - Host sends OUT token which causes an endpoint interrupt | ||
546 | /\ -> RxReady | ||
547 | | -> if request queued, call rxstate | ||
548 | | /\ -> setup DMA | ||
549 | | | -> DMA interrupt on completion | ||
550 | | | -> RxReady | ||
551 | | | -> stop DMA | ||
552 | | | -> ack the read | ||
553 | | | -> if data recd = max expected | ||
554 | | | by the request, or host | ||
555 | | | sent a short packet, | ||
556 | | | complete the request, | ||
557 | | | and start the next one. | ||
558 | | |_____________________________________| | ||
559 | | else just wait for the host | ||
560 | | to send the next OUT token. | ||
561 | |__________________________________________________| | ||
562 | |||
563 | * Non-Mentor DMA engines can of course work differently. | ||
564 | */ | ||
565 | |||
566 | #endif | ||
567 | |||
568 | /* | ||
569 | * Context: controller locked, IRQs blocked, endpoint selected | ||
570 | */ | ||
571 | static void rxstate(struct musb *musb, struct musb_request *req) | ||
572 | { | ||
573 | u16 csr = 0; | ||
574 | const u8 epnum = req->epnum; | ||
575 | struct usb_request *request = &req->request; | ||
576 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; | ||
577 | void __iomem *epio = musb->endpoints[epnum].regs; | ||
578 | u16 fifo_count = 0; | ||
579 | u16 len = musb_ep->packet_sz; | ||
580 | |||
581 | csr = musb_readw(epio, MUSB_RXCSR); | ||
582 | |||
583 | if (is_cppi_enabled() && musb_ep->dma) { | ||
584 | struct dma_controller *c = musb->dma_controller; | ||
585 | struct dma_channel *channel = musb_ep->dma; | ||
586 | |||
587 | /* NOTE: CPPI won't actually stop advancing the DMA | ||
588 | * queue after short packet transfers, so this is almost | ||
589 | * always going to run as IRQ-per-packet DMA so that | ||
590 | * faults will be handled correctly. | ||
591 | */ | ||
592 | if (c->channel_program(channel, | ||
593 | musb_ep->packet_sz, | ||
594 | !request->short_not_ok, | ||
595 | request->dma + request->actual, | ||
596 | request->length - request->actual)) { | ||
597 | |||
598 | /* make sure that if an rxpkt arrived after the irq, | ||
599 | * the cppi engine will be ready to take it as soon | ||
600 | * as DMA is enabled | ||
601 | */ | ||
602 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | ||
603 | | MUSB_RXCSR_DMAMODE); | ||
604 | csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; | ||
605 | musb_writew(epio, MUSB_RXCSR, csr); | ||
606 | return; | ||
607 | } | ||
608 | } | ||
609 | |||
610 | if (csr & MUSB_RXCSR_RXPKTRDY) { | ||
611 | len = musb_readw(epio, MUSB_RXCOUNT); | ||
612 | if (request->actual < request->length) { | ||
613 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
614 | if (is_dma_capable() && musb_ep->dma) { | ||
615 | struct dma_controller *c; | ||
616 | struct dma_channel *channel; | ||
617 | int use_dma = 0; | ||
618 | |||
619 | c = musb->dma_controller; | ||
620 | channel = musb_ep->dma; | ||
621 | |||
622 | /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in | ||
623 | * mode 0 only. So we do not get endpoint interrupts due to DMA | ||
624 | * completion. We only get interrupts from DMA controller. | ||
625 | * | ||
626 | * We could operate in DMA mode 1 if we knew the size of the tranfer | ||
627 | * in advance. For mass storage class, request->length = what the host | ||
628 | * sends, so that'd work. But for pretty much everything else, | ||
629 | * request->length is routinely more than what the host sends. For | ||
630 | * most these gadgets, end of is signified either by a short packet, | ||
631 | * or filling the last byte of the buffer. (Sending extra data in | ||
632 | * that last pckate should trigger an overflow fault.) But in mode 1, | ||
633 | * we don't get DMA completion interrrupt for short packets. | ||
634 | * | ||
635 | * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), | ||
636 | * to get endpoint interrupt on every DMA req, but that didn't seem | ||
637 | * to work reliably. | ||
638 | * | ||
639 | * REVISIT an updated g_file_storage can set req->short_not_ok, which | ||
640 | * then becomes usable as a runtime "use mode 1" hint... | ||
641 | */ | ||
642 | |||
643 | csr |= MUSB_RXCSR_DMAENAB; | ||
644 | #ifdef USE_MODE1 | ||
645 | csr |= MUSB_RXCSR_AUTOCLEAR; | ||
646 | /* csr |= MUSB_RXCSR_DMAMODE; */ | ||
647 | |||
648 | /* this special sequence (enabling and then | ||
649 | * disabling MUSB_RXCSR_DMAMODE) is required | ||
650 | * to get DMAReq to activate | ||
651 | */ | ||
652 | musb_writew(epio, MUSB_RXCSR, | ||
653 | csr | MUSB_RXCSR_DMAMODE); | ||
654 | #endif | ||
655 | musb_writew(epio, MUSB_RXCSR, csr); | ||
656 | |||
657 | if (request->actual < request->length) { | ||
658 | int transfer_size = 0; | ||
659 | #ifdef USE_MODE1 | ||
660 | transfer_size = min(request->length, | ||
661 | channel->max_len); | ||
662 | #else | ||
663 | transfer_size = len; | ||
664 | #endif | ||
665 | if (transfer_size <= musb_ep->packet_sz) | ||
666 | musb_ep->dma->desired_mode = 0; | ||
667 | else | ||
668 | musb_ep->dma->desired_mode = 1; | ||
669 | |||
670 | use_dma = c->channel_program( | ||
671 | channel, | ||
672 | musb_ep->packet_sz, | ||
673 | channel->desired_mode, | ||
674 | request->dma | ||
675 | + request->actual, | ||
676 | transfer_size); | ||
677 | } | ||
678 | |||
679 | if (use_dma) | ||
680 | return; | ||
681 | } | ||
682 | #endif /* Mentor's DMA */ | ||
683 | |||
684 | fifo_count = request->length - request->actual; | ||
685 | DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", | ||
686 | musb_ep->end_point.name, | ||
687 | len, fifo_count, | ||
688 | musb_ep->packet_sz); | ||
689 | |||
690 | fifo_count = min(len, fifo_count); | ||
691 | |||
692 | #ifdef CONFIG_USB_TUSB_OMAP_DMA | ||
693 | if (tusb_dma_omap() && musb_ep->dma) { | ||
694 | struct dma_controller *c = musb->dma_controller; | ||
695 | struct dma_channel *channel = musb_ep->dma; | ||
696 | u32 dma_addr = request->dma + request->actual; | ||
697 | int ret; | ||
698 | |||
699 | ret = c->channel_program(channel, | ||
700 | musb_ep->packet_sz, | ||
701 | channel->desired_mode, | ||
702 | dma_addr, | ||
703 | fifo_count); | ||
704 | if (ret) | ||
705 | return; | ||
706 | } | ||
707 | #endif | ||
708 | |||
709 | musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) | ||
710 | (request->buf + request->actual)); | ||
711 | request->actual += fifo_count; | ||
712 | |||
713 | /* REVISIT if we left anything in the fifo, flush | ||
714 | * it and report -EOVERFLOW | ||
715 | */ | ||
716 | |||
717 | /* ack the read! */ | ||
718 | csr |= MUSB_RXCSR_P_WZC_BITS; | ||
719 | csr &= ~MUSB_RXCSR_RXPKTRDY; | ||
720 | musb_writew(epio, MUSB_RXCSR, csr); | ||
721 | } | ||
722 | } | ||
723 | |||
724 | /* reach the end or short packet detected */ | ||
725 | if (request->actual == request->length || len < musb_ep->packet_sz) | ||
726 | musb_g_giveback(musb_ep, request, 0); | ||
727 | } | ||
728 | |||
729 | /* | ||
730 | * Data ready for a request; called from IRQ | ||
731 | */ | ||
732 | void musb_g_rx(struct musb *musb, u8 epnum) | ||
733 | { | ||
734 | u16 csr; | ||
735 | struct usb_request *request; | ||
736 | void __iomem *mbase = musb->mregs; | ||
737 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; | ||
738 | void __iomem *epio = musb->endpoints[epnum].regs; | ||
739 | struct dma_channel *dma; | ||
740 | |||
741 | musb_ep_select(mbase, epnum); | ||
742 | |||
743 | request = next_request(musb_ep); | ||
744 | |||
745 | csr = musb_readw(epio, MUSB_RXCSR); | ||
746 | dma = is_dma_capable() ? musb_ep->dma : NULL; | ||
747 | |||
748 | DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, | ||
749 | csr, dma ? " (dma)" : "", request); | ||
750 | |||
751 | if (csr & MUSB_RXCSR_P_SENTSTALL) { | ||
752 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
753 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
754 | (void) musb->dma_controller->channel_abort(dma); | ||
755 | request->actual += musb_ep->dma->actual_len; | ||
756 | } | ||
757 | |||
758 | csr |= MUSB_RXCSR_P_WZC_BITS; | ||
759 | csr &= ~MUSB_RXCSR_P_SENTSTALL; | ||
760 | musb_writew(epio, MUSB_RXCSR, csr); | ||
761 | |||
762 | if (request) | ||
763 | musb_g_giveback(musb_ep, request, -EPIPE); | ||
764 | goto done; | ||
765 | } | ||
766 | |||
767 | if (csr & MUSB_RXCSR_P_OVERRUN) { | ||
768 | /* csr |= MUSB_RXCSR_P_WZC_BITS; */ | ||
769 | csr &= ~MUSB_RXCSR_P_OVERRUN; | ||
770 | musb_writew(epio, MUSB_RXCSR, csr); | ||
771 | |||
772 | DBG(3, "%s iso overrun on %p\n", musb_ep->name, request); | ||
773 | if (request && request->status == -EINPROGRESS) | ||
774 | request->status = -EOVERFLOW; | ||
775 | } | ||
776 | if (csr & MUSB_RXCSR_INCOMPRX) { | ||
777 | /* REVISIT not necessarily an error */ | ||
778 | DBG(4, "%s, incomprx\n", musb_ep->end_point.name); | ||
779 | } | ||
780 | |||
781 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
782 | /* "should not happen"; likely RXPKTRDY pending for DMA */ | ||
783 | DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1, | ||
784 | "%s busy, csr %04x\n", | ||
785 | musb_ep->end_point.name, csr); | ||
786 | goto done; | ||
787 | } | ||
788 | |||
789 | if (dma && (csr & MUSB_RXCSR_DMAENAB)) { | ||
790 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | ||
791 | | MUSB_RXCSR_DMAENAB | ||
792 | | MUSB_RXCSR_DMAMODE); | ||
793 | musb_writew(epio, MUSB_RXCSR, | ||
794 | MUSB_RXCSR_P_WZC_BITS | csr); | ||
795 | |||
796 | request->actual += musb_ep->dma->actual_len; | ||
797 | |||
798 | DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", | ||
799 | epnum, csr, | ||
800 | musb_readw(epio, MUSB_RXCSR), | ||
801 | musb_ep->dma->actual_len, request); | ||
802 | |||
803 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) | ||
804 | /* Autoclear doesn't clear RxPktRdy for short packets */ | ||
805 | if ((dma->desired_mode == 0) | ||
806 | || (dma->actual_len | ||
807 | & (musb_ep->packet_sz - 1))) { | ||
808 | /* ack the read! */ | ||
809 | csr &= ~MUSB_RXCSR_RXPKTRDY; | ||
810 | musb_writew(epio, MUSB_RXCSR, csr); | ||
811 | } | ||
812 | |||
813 | /* incomplete, and not short? wait for next IN packet */ | ||
814 | if ((request->actual < request->length) | ||
815 | && (musb_ep->dma->actual_len | ||
816 | == musb_ep->packet_sz)) | ||
817 | goto done; | ||
818 | #endif | ||
819 | musb_g_giveback(musb_ep, request, 0); | ||
820 | |||
821 | request = next_request(musb_ep); | ||
822 | if (!request) | ||
823 | goto done; | ||
824 | |||
825 | /* don't start more i/o till the stall clears */ | ||
826 | musb_ep_select(mbase, epnum); | ||
827 | csr = musb_readw(epio, MUSB_RXCSR); | ||
828 | if (csr & MUSB_RXCSR_P_SENDSTALL) | ||
829 | goto done; | ||
830 | } | ||
831 | |||
832 | |||
833 | /* analyze request if the ep is hot */ | ||
834 | if (request) | ||
835 | rxstate(musb, to_musb_request(request)); | ||
836 | else | ||
837 | DBG(3, "packet waiting for %s%s request\n", | ||
838 | musb_ep->desc ? "" : "inactive ", | ||
839 | musb_ep->end_point.name); | ||
840 | |||
841 | done: | ||
842 | return; | ||
843 | } | ||
844 | |||
845 | /* ------------------------------------------------------------ */ | ||
846 | |||
847 | static int musb_gadget_enable(struct usb_ep *ep, | ||
848 | const struct usb_endpoint_descriptor *desc) | ||
849 | { | ||
850 | unsigned long flags; | ||
851 | struct musb_ep *musb_ep; | ||
852 | struct musb_hw_ep *hw_ep; | ||
853 | void __iomem *regs; | ||
854 | struct musb *musb; | ||
855 | void __iomem *mbase; | ||
856 | u8 epnum; | ||
857 | u16 csr; | ||
858 | unsigned tmp; | ||
859 | int status = -EINVAL; | ||
860 | |||
861 | if (!ep || !desc) | ||
862 | return -EINVAL; | ||
863 | |||
864 | musb_ep = to_musb_ep(ep); | ||
865 | hw_ep = musb_ep->hw_ep; | ||
866 | regs = hw_ep->regs; | ||
867 | musb = musb_ep->musb; | ||
868 | mbase = musb->mregs; | ||
869 | epnum = musb_ep->current_epnum; | ||
870 | |||
871 | spin_lock_irqsave(&musb->lock, flags); | ||
872 | |||
873 | if (musb_ep->desc) { | ||
874 | status = -EBUSY; | ||
875 | goto fail; | ||
876 | } | ||
877 | musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; | ||
878 | |||
879 | /* check direction and (later) maxpacket size against endpoint */ | ||
880 | if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum) | ||
881 | goto fail; | ||
882 | |||
883 | /* REVISIT this rules out high bandwidth periodic transfers */ | ||
884 | tmp = le16_to_cpu(desc->wMaxPacketSize); | ||
885 | if (tmp & ~0x07ff) | ||
886 | goto fail; | ||
887 | musb_ep->packet_sz = tmp; | ||
888 | |||
889 | /* enable the interrupts for the endpoint, set the endpoint | ||
890 | * packet size (or fail), set the mode, clear the fifo | ||
891 | */ | ||
892 | musb_ep_select(mbase, epnum); | ||
893 | if (desc->bEndpointAddress & USB_DIR_IN) { | ||
894 | u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); | ||
895 | |||
896 | if (hw_ep->is_shared_fifo) | ||
897 | musb_ep->is_in = 1; | ||
898 | if (!musb_ep->is_in) | ||
899 | goto fail; | ||
900 | if (tmp > hw_ep->max_packet_sz_tx) | ||
901 | goto fail; | ||
902 | |||
903 | int_txe |= (1 << epnum); | ||
904 | musb_writew(mbase, MUSB_INTRTXE, int_txe); | ||
905 | |||
906 | /* REVISIT if can_bulk_split(), use by updating "tmp"; | ||
907 | * likewise high bandwidth periodic tx | ||
908 | */ | ||
909 | musb_writew(regs, MUSB_TXMAXP, tmp); | ||
910 | |||
911 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; | ||
912 | if (musb_readw(regs, MUSB_TXCSR) | ||
913 | & MUSB_TXCSR_FIFONOTEMPTY) | ||
914 | csr |= MUSB_TXCSR_FLUSHFIFO; | ||
915 | if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) | ||
916 | csr |= MUSB_TXCSR_P_ISO; | ||
917 | |||
918 | /* set twice in case of double buffering */ | ||
919 | musb_writew(regs, MUSB_TXCSR, csr); | ||
920 | /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ | ||
921 | musb_writew(regs, MUSB_TXCSR, csr); | ||
922 | |||
923 | } else { | ||
924 | u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE); | ||
925 | |||
926 | if (hw_ep->is_shared_fifo) | ||
927 | musb_ep->is_in = 0; | ||
928 | if (musb_ep->is_in) | ||
929 | goto fail; | ||
930 | if (tmp > hw_ep->max_packet_sz_rx) | ||
931 | goto fail; | ||
932 | |||
933 | int_rxe |= (1 << epnum); | ||
934 | musb_writew(mbase, MUSB_INTRRXE, int_rxe); | ||
935 | |||
936 | /* REVISIT if can_bulk_combine() use by updating "tmp" | ||
937 | * likewise high bandwidth periodic rx | ||
938 | */ | ||
939 | musb_writew(regs, MUSB_RXMAXP, tmp); | ||
940 | |||
941 | /* force shared fifo to OUT-only mode */ | ||
942 | if (hw_ep->is_shared_fifo) { | ||
943 | csr = musb_readw(regs, MUSB_TXCSR); | ||
944 | csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); | ||
945 | musb_writew(regs, MUSB_TXCSR, csr); | ||
946 | } | ||
947 | |||
948 | csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; | ||
949 | if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) | ||
950 | csr |= MUSB_RXCSR_P_ISO; | ||
951 | else if (musb_ep->type == USB_ENDPOINT_XFER_INT) | ||
952 | csr |= MUSB_RXCSR_DISNYET; | ||
953 | |||
954 | /* set twice in case of double buffering */ | ||
955 | musb_writew(regs, MUSB_RXCSR, csr); | ||
956 | musb_writew(regs, MUSB_RXCSR, csr); | ||
957 | } | ||
958 | |||
959 | /* NOTE: all the I/O code _should_ work fine without DMA, in case | ||
960 | * for some reason you run out of channels here. | ||
961 | */ | ||
962 | if (is_dma_capable() && musb->dma_controller) { | ||
963 | struct dma_controller *c = musb->dma_controller; | ||
964 | |||
965 | musb_ep->dma = c->channel_alloc(c, hw_ep, | ||
966 | (desc->bEndpointAddress & USB_DIR_IN)); | ||
967 | } else | ||
968 | musb_ep->dma = NULL; | ||
969 | |||
970 | musb_ep->desc = desc; | ||
971 | musb_ep->busy = 0; | ||
972 | status = 0; | ||
973 | |||
974 | pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", | ||
975 | musb_driver_name, musb_ep->end_point.name, | ||
976 | ({ char *s; switch (musb_ep->type) { | ||
977 | case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; | ||
978 | case USB_ENDPOINT_XFER_INT: s = "int"; break; | ||
979 | default: s = "iso"; break; | ||
980 | }; s; }), | ||
981 | musb_ep->is_in ? "IN" : "OUT", | ||
982 | musb_ep->dma ? "dma, " : "", | ||
983 | musb_ep->packet_sz); | ||
984 | |||
985 | schedule_work(&musb->irq_work); | ||
986 | |||
987 | fail: | ||
988 | spin_unlock_irqrestore(&musb->lock, flags); | ||
989 | return status; | ||
990 | } | ||
991 | |||
992 | /* | ||
993 | * Disable an endpoint flushing all requests queued. | ||
994 | */ | ||
995 | static int musb_gadget_disable(struct usb_ep *ep) | ||
996 | { | ||
997 | unsigned long flags; | ||
998 | struct musb *musb; | ||
999 | u8 epnum; | ||
1000 | struct musb_ep *musb_ep; | ||
1001 | void __iomem *epio; | ||
1002 | int status = 0; | ||
1003 | |||
1004 | musb_ep = to_musb_ep(ep); | ||
1005 | musb = musb_ep->musb; | ||
1006 | epnum = musb_ep->current_epnum; | ||
1007 | epio = musb->endpoints[epnum].regs; | ||
1008 | |||
1009 | spin_lock_irqsave(&musb->lock, flags); | ||
1010 | musb_ep_select(musb->mregs, epnum); | ||
1011 | |||
1012 | /* zero the endpoint sizes */ | ||
1013 | if (musb_ep->is_in) { | ||
1014 | u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE); | ||
1015 | int_txe &= ~(1 << epnum); | ||
1016 | musb_writew(musb->mregs, MUSB_INTRTXE, int_txe); | ||
1017 | musb_writew(epio, MUSB_TXMAXP, 0); | ||
1018 | } else { | ||
1019 | u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE); | ||
1020 | int_rxe &= ~(1 << epnum); | ||
1021 | musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe); | ||
1022 | musb_writew(epio, MUSB_RXMAXP, 0); | ||
1023 | } | ||
1024 | |||
1025 | musb_ep->desc = NULL; | ||
1026 | |||
1027 | /* abort all pending DMA and requests */ | ||
1028 | nuke(musb_ep, -ESHUTDOWN); | ||
1029 | |||
1030 | schedule_work(&musb->irq_work); | ||
1031 | |||
1032 | spin_unlock_irqrestore(&(musb->lock), flags); | ||
1033 | |||
1034 | DBG(2, "%s\n", musb_ep->end_point.name); | ||
1035 | |||
1036 | return status; | ||
1037 | } | ||
1038 | |||
1039 | /* | ||
1040 | * Allocate a request for an endpoint. | ||
1041 | * Reused by ep0 code. | ||
1042 | */ | ||
1043 | struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) | ||
1044 | { | ||
1045 | struct musb_ep *musb_ep = to_musb_ep(ep); | ||
1046 | struct musb_request *request = NULL; | ||
1047 | |||
1048 | request = kzalloc(sizeof *request, gfp_flags); | ||
1049 | if (request) { | ||
1050 | INIT_LIST_HEAD(&request->request.list); | ||
1051 | request->request.dma = DMA_ADDR_INVALID; | ||
1052 | request->epnum = musb_ep->current_epnum; | ||
1053 | request->ep = musb_ep; | ||
1054 | } | ||
1055 | |||
1056 | return &request->request; | ||
1057 | } | ||
1058 | |||
1059 | /* | ||
1060 | * Free a request | ||
1061 | * Reused by ep0 code. | ||
1062 | */ | ||
1063 | void musb_free_request(struct usb_ep *ep, struct usb_request *req) | ||
1064 | { | ||
1065 | kfree(to_musb_request(req)); | ||
1066 | } | ||
1067 | |||
1068 | static LIST_HEAD(buffers); | ||
1069 | |||
1070 | struct free_record { | ||
1071 | struct list_head list; | ||
1072 | struct device *dev; | ||
1073 | unsigned bytes; | ||
1074 | dma_addr_t dma; | ||
1075 | }; | ||
1076 | |||
1077 | /* | ||
1078 | * Context: controller locked, IRQs blocked. | ||
1079 | */ | ||
1080 | static void musb_ep_restart(struct musb *musb, struct musb_request *req) | ||
1081 | { | ||
1082 | DBG(3, "<== %s request %p len %u on hw_ep%d\n", | ||
1083 | req->tx ? "TX/IN" : "RX/OUT", | ||
1084 | &req->request, req->request.length, req->epnum); | ||
1085 | |||
1086 | musb_ep_select(musb->mregs, req->epnum); | ||
1087 | if (req->tx) | ||
1088 | txstate(musb, req); | ||
1089 | else | ||
1090 | rxstate(musb, req); | ||
1091 | } | ||
1092 | |||
1093 | static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | ||
1094 | gfp_t gfp_flags) | ||
1095 | { | ||
1096 | struct musb_ep *musb_ep; | ||
1097 | struct musb_request *request; | ||
1098 | struct musb *musb; | ||
1099 | int status = 0; | ||
1100 | unsigned long lockflags; | ||
1101 | |||
1102 | if (!ep || !req) | ||
1103 | return -EINVAL; | ||
1104 | if (!req->buf) | ||
1105 | return -ENODATA; | ||
1106 | |||
1107 | musb_ep = to_musb_ep(ep); | ||
1108 | musb = musb_ep->musb; | ||
1109 | |||
1110 | request = to_musb_request(req); | ||
1111 | request->musb = musb; | ||
1112 | |||
1113 | if (request->ep != musb_ep) | ||
1114 | return -EINVAL; | ||
1115 | |||
1116 | DBG(4, "<== to %s request=%p\n", ep->name, req); | ||
1117 | |||
1118 | /* request is mine now... */ | ||
1119 | request->request.actual = 0; | ||
1120 | request->request.status = -EINPROGRESS; | ||
1121 | request->epnum = musb_ep->current_epnum; | ||
1122 | request->tx = musb_ep->is_in; | ||
1123 | |||
1124 | if (is_dma_capable() && musb_ep->dma) { | ||
1125 | if (request->request.dma == DMA_ADDR_INVALID) { | ||
1126 | request->request.dma = dma_map_single( | ||
1127 | musb->controller, | ||
1128 | request->request.buf, | ||
1129 | request->request.length, | ||
1130 | request->tx | ||
1131 | ? DMA_TO_DEVICE | ||
1132 | : DMA_FROM_DEVICE); | ||
1133 | request->mapped = 1; | ||
1134 | } else { | ||
1135 | dma_sync_single_for_device(musb->controller, | ||
1136 | request->request.dma, | ||
1137 | request->request.length, | ||
1138 | request->tx | ||
1139 | ? DMA_TO_DEVICE | ||
1140 | : DMA_FROM_DEVICE); | ||
1141 | request->mapped = 0; | ||
1142 | } | ||
1143 | } else if (!req->buf) { | ||
1144 | return -ENODATA; | ||
1145 | } else | ||
1146 | request->mapped = 0; | ||
1147 | |||
1148 | spin_lock_irqsave(&musb->lock, lockflags); | ||
1149 | |||
1150 | /* don't queue if the ep is down */ | ||
1151 | if (!musb_ep->desc) { | ||
1152 | DBG(4, "req %p queued to %s while ep %s\n", | ||
1153 | req, ep->name, "disabled"); | ||
1154 | status = -ESHUTDOWN; | ||
1155 | goto cleanup; | ||
1156 | } | ||
1157 | |||
1158 | /* add request to the list */ | ||
1159 | list_add_tail(&(request->request.list), &(musb_ep->req_list)); | ||
1160 | |||
1161 | /* it this is the head of the queue, start i/o ... */ | ||
1162 | if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next) | ||
1163 | musb_ep_restart(musb, request); | ||
1164 | |||
1165 | cleanup: | ||
1166 | spin_unlock_irqrestore(&musb->lock, lockflags); | ||
1167 | return status; | ||
1168 | } | ||
1169 | |||
1170 | static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) | ||
1171 | { | ||
1172 | struct musb_ep *musb_ep = to_musb_ep(ep); | ||
1173 | struct usb_request *r; | ||
1174 | unsigned long flags; | ||
1175 | int status = 0; | ||
1176 | struct musb *musb = musb_ep->musb; | ||
1177 | |||
1178 | if (!ep || !request || to_musb_request(request)->ep != musb_ep) | ||
1179 | return -EINVAL; | ||
1180 | |||
1181 | spin_lock_irqsave(&musb->lock, flags); | ||
1182 | |||
1183 | list_for_each_entry(r, &musb_ep->req_list, list) { | ||
1184 | if (r == request) | ||
1185 | break; | ||
1186 | } | ||
1187 | if (r != request) { | ||
1188 | DBG(3, "request %p not queued to %s\n", request, ep->name); | ||
1189 | status = -EINVAL; | ||
1190 | goto done; | ||
1191 | } | ||
1192 | |||
1193 | /* if the hardware doesn't have the request, easy ... */ | ||
1194 | if (musb_ep->req_list.next != &request->list || musb_ep->busy) | ||
1195 | musb_g_giveback(musb_ep, request, -ECONNRESET); | ||
1196 | |||
1197 | /* ... else abort the dma transfer ... */ | ||
1198 | else if (is_dma_capable() && musb_ep->dma) { | ||
1199 | struct dma_controller *c = musb->dma_controller; | ||
1200 | |||
1201 | musb_ep_select(musb->mregs, musb_ep->current_epnum); | ||
1202 | if (c->channel_abort) | ||
1203 | status = c->channel_abort(musb_ep->dma); | ||
1204 | else | ||
1205 | status = -EBUSY; | ||
1206 | if (status == 0) | ||
1207 | musb_g_giveback(musb_ep, request, -ECONNRESET); | ||
1208 | } else { | ||
1209 | /* NOTE: by sticking to easily tested hardware/driver states, | ||
1210 | * we leave counting of in-flight packets imprecise. | ||
1211 | */ | ||
1212 | musb_g_giveback(musb_ep, request, -ECONNRESET); | ||
1213 | } | ||
1214 | |||
1215 | done: | ||
1216 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1217 | return status; | ||
1218 | } | ||
1219 | |||
1220 | /* | ||
1221 | * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any | ||
1222 | * data but will queue requests. | ||
1223 | * | ||
1224 | * exported to ep0 code | ||
1225 | */ | ||
1226 | int musb_gadget_set_halt(struct usb_ep *ep, int value) | ||
1227 | { | ||
1228 | struct musb_ep *musb_ep = to_musb_ep(ep); | ||
1229 | u8 epnum = musb_ep->current_epnum; | ||
1230 | struct musb *musb = musb_ep->musb; | ||
1231 | void __iomem *epio = musb->endpoints[epnum].regs; | ||
1232 | void __iomem *mbase; | ||
1233 | unsigned long flags; | ||
1234 | u16 csr; | ||
1235 | struct musb_request *request = NULL; | ||
1236 | int status = 0; | ||
1237 | |||
1238 | if (!ep) | ||
1239 | return -EINVAL; | ||
1240 | mbase = musb->mregs; | ||
1241 | |||
1242 | spin_lock_irqsave(&musb->lock, flags); | ||
1243 | |||
1244 | if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { | ||
1245 | status = -EINVAL; | ||
1246 | goto done; | ||
1247 | } | ||
1248 | |||
1249 | musb_ep_select(mbase, epnum); | ||
1250 | |||
1251 | /* cannot portably stall with non-empty FIFO */ | ||
1252 | request = to_musb_request(next_request(musb_ep)); | ||
1253 | if (value && musb_ep->is_in) { | ||
1254 | csr = musb_readw(epio, MUSB_TXCSR); | ||
1255 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { | ||
1256 | DBG(3, "%s fifo busy, cannot halt\n", ep->name); | ||
1257 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1258 | return -EAGAIN; | ||
1259 | } | ||
1260 | |||
1261 | } | ||
1262 | |||
1263 | /* set/clear the stall and toggle bits */ | ||
1264 | DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear"); | ||
1265 | if (musb_ep->is_in) { | ||
1266 | csr = musb_readw(epio, MUSB_TXCSR); | ||
1267 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) | ||
1268 | csr |= MUSB_TXCSR_FLUSHFIFO; | ||
1269 | csr |= MUSB_TXCSR_P_WZC_BITS | ||
1270 | | MUSB_TXCSR_CLRDATATOG; | ||
1271 | if (value) | ||
1272 | csr |= MUSB_TXCSR_P_SENDSTALL; | ||
1273 | else | ||
1274 | csr &= ~(MUSB_TXCSR_P_SENDSTALL | ||
1275 | | MUSB_TXCSR_P_SENTSTALL); | ||
1276 | csr &= ~MUSB_TXCSR_TXPKTRDY; | ||
1277 | musb_writew(epio, MUSB_TXCSR, csr); | ||
1278 | } else { | ||
1279 | csr = musb_readw(epio, MUSB_RXCSR); | ||
1280 | csr |= MUSB_RXCSR_P_WZC_BITS | ||
1281 | | MUSB_RXCSR_FLUSHFIFO | ||
1282 | | MUSB_RXCSR_CLRDATATOG; | ||
1283 | if (value) | ||
1284 | csr |= MUSB_RXCSR_P_SENDSTALL; | ||
1285 | else | ||
1286 | csr &= ~(MUSB_RXCSR_P_SENDSTALL | ||
1287 | | MUSB_RXCSR_P_SENTSTALL); | ||
1288 | musb_writew(epio, MUSB_RXCSR, csr); | ||
1289 | } | ||
1290 | |||
1291 | done: | ||
1292 | |||
1293 | /* maybe start the first request in the queue */ | ||
1294 | if (!musb_ep->busy && !value && request) { | ||
1295 | DBG(3, "restarting the request\n"); | ||
1296 | musb_ep_restart(musb, request); | ||
1297 | } | ||
1298 | |||
1299 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1300 | return status; | ||
1301 | } | ||
1302 | |||
1303 | static int musb_gadget_fifo_status(struct usb_ep *ep) | ||
1304 | { | ||
1305 | struct musb_ep *musb_ep = to_musb_ep(ep); | ||
1306 | void __iomem *epio = musb_ep->hw_ep->regs; | ||
1307 | int retval = -EINVAL; | ||
1308 | |||
1309 | if (musb_ep->desc && !musb_ep->is_in) { | ||
1310 | struct musb *musb = musb_ep->musb; | ||
1311 | int epnum = musb_ep->current_epnum; | ||
1312 | void __iomem *mbase = musb->mregs; | ||
1313 | unsigned long flags; | ||
1314 | |||
1315 | spin_lock_irqsave(&musb->lock, flags); | ||
1316 | |||
1317 | musb_ep_select(mbase, epnum); | ||
1318 | /* FIXME return zero unless RXPKTRDY is set */ | ||
1319 | retval = musb_readw(epio, MUSB_RXCOUNT); | ||
1320 | |||
1321 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1322 | } | ||
1323 | return retval; | ||
1324 | } | ||
1325 | |||
1326 | static void musb_gadget_fifo_flush(struct usb_ep *ep) | ||
1327 | { | ||
1328 | struct musb_ep *musb_ep = to_musb_ep(ep); | ||
1329 | struct musb *musb = musb_ep->musb; | ||
1330 | u8 epnum = musb_ep->current_epnum; | ||
1331 | void __iomem *epio = musb->endpoints[epnum].regs; | ||
1332 | void __iomem *mbase; | ||
1333 | unsigned long flags; | ||
1334 | u16 csr, int_txe; | ||
1335 | |||
1336 | mbase = musb->mregs; | ||
1337 | |||
1338 | spin_lock_irqsave(&musb->lock, flags); | ||
1339 | musb_ep_select(mbase, (u8) epnum); | ||
1340 | |||
1341 | /* disable interrupts */ | ||
1342 | int_txe = musb_readw(mbase, MUSB_INTRTXE); | ||
1343 | musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); | ||
1344 | |||
1345 | if (musb_ep->is_in) { | ||
1346 | csr = musb_readw(epio, MUSB_TXCSR); | ||
1347 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { | ||
1348 | csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; | ||
1349 | musb_writew(epio, MUSB_TXCSR, csr); | ||
1350 | /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ | ||
1351 | musb_writew(epio, MUSB_TXCSR, csr); | ||
1352 | } | ||
1353 | } else { | ||
1354 | csr = musb_readw(epio, MUSB_RXCSR); | ||
1355 | csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; | ||
1356 | musb_writew(epio, MUSB_RXCSR, csr); | ||
1357 | musb_writew(epio, MUSB_RXCSR, csr); | ||
1358 | } | ||
1359 | |||
1360 | /* re-enable interrupt */ | ||
1361 | musb_writew(mbase, MUSB_INTRTXE, int_txe); | ||
1362 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1363 | } | ||
1364 | |||
1365 | static const struct usb_ep_ops musb_ep_ops = { | ||
1366 | .enable = musb_gadget_enable, | ||
1367 | .disable = musb_gadget_disable, | ||
1368 | .alloc_request = musb_alloc_request, | ||
1369 | .free_request = musb_free_request, | ||
1370 | .queue = musb_gadget_queue, | ||
1371 | .dequeue = musb_gadget_dequeue, | ||
1372 | .set_halt = musb_gadget_set_halt, | ||
1373 | .fifo_status = musb_gadget_fifo_status, | ||
1374 | .fifo_flush = musb_gadget_fifo_flush | ||
1375 | }; | ||
1376 | |||
1377 | /* ----------------------------------------------------------------------- */ | ||
1378 | |||
1379 | static int musb_gadget_get_frame(struct usb_gadget *gadget) | ||
1380 | { | ||
1381 | struct musb *musb = gadget_to_musb(gadget); | ||
1382 | |||
1383 | return (int)musb_readw(musb->mregs, MUSB_FRAME); | ||
1384 | } | ||
1385 | |||
1386 | static int musb_gadget_wakeup(struct usb_gadget *gadget) | ||
1387 | { | ||
1388 | struct musb *musb = gadget_to_musb(gadget); | ||
1389 | void __iomem *mregs = musb->mregs; | ||
1390 | unsigned long flags; | ||
1391 | int status = -EINVAL; | ||
1392 | u8 power, devctl; | ||
1393 | int retries; | ||
1394 | |||
1395 | spin_lock_irqsave(&musb->lock, flags); | ||
1396 | |||
1397 | switch (musb->xceiv.state) { | ||
1398 | case OTG_STATE_B_PERIPHERAL: | ||
1399 | /* NOTE: OTG state machine doesn't include B_SUSPENDED; | ||
1400 | * that's part of the standard usb 1.1 state machine, and | ||
1401 | * doesn't affect OTG transitions. | ||
1402 | */ | ||
1403 | if (musb->may_wakeup && musb->is_suspended) | ||
1404 | break; | ||
1405 | goto done; | ||
1406 | case OTG_STATE_B_IDLE: | ||
1407 | /* Start SRP ... OTG not required. */ | ||
1408 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
1409 | DBG(2, "Sending SRP: devctl: %02x\n", devctl); | ||
1410 | devctl |= MUSB_DEVCTL_SESSION; | ||
1411 | musb_writeb(mregs, MUSB_DEVCTL, devctl); | ||
1412 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
1413 | retries = 100; | ||
1414 | while (!(devctl & MUSB_DEVCTL_SESSION)) { | ||
1415 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
1416 | if (retries-- < 1) | ||
1417 | break; | ||
1418 | } | ||
1419 | retries = 10000; | ||
1420 | while (devctl & MUSB_DEVCTL_SESSION) { | ||
1421 | devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
1422 | if (retries-- < 1) | ||
1423 | break; | ||
1424 | } | ||
1425 | |||
1426 | /* Block idling for at least 1s */ | ||
1427 | musb_platform_try_idle(musb, | ||
1428 | jiffies + msecs_to_jiffies(1 * HZ)); | ||
1429 | |||
1430 | status = 0; | ||
1431 | goto done; | ||
1432 | default: | ||
1433 | DBG(2, "Unhandled wake: %s\n", otg_state_string(musb)); | ||
1434 | goto done; | ||
1435 | } | ||
1436 | |||
1437 | status = 0; | ||
1438 | |||
1439 | power = musb_readb(mregs, MUSB_POWER); | ||
1440 | power |= MUSB_POWER_RESUME; | ||
1441 | musb_writeb(mregs, MUSB_POWER, power); | ||
1442 | DBG(2, "issue wakeup\n"); | ||
1443 | |||
1444 | /* FIXME do this next chunk in a timer callback, no udelay */ | ||
1445 | mdelay(2); | ||
1446 | |||
1447 | power = musb_readb(mregs, MUSB_POWER); | ||
1448 | power &= ~MUSB_POWER_RESUME; | ||
1449 | musb_writeb(mregs, MUSB_POWER, power); | ||
1450 | done: | ||
1451 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1452 | return status; | ||
1453 | } | ||
1454 | |||
1455 | static int | ||
1456 | musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) | ||
1457 | { | ||
1458 | struct musb *musb = gadget_to_musb(gadget); | ||
1459 | |||
1460 | musb->is_self_powered = !!is_selfpowered; | ||
1461 | return 0; | ||
1462 | } | ||
1463 | |||
1464 | static void musb_pullup(struct musb *musb, int is_on) | ||
1465 | { | ||
1466 | u8 power; | ||
1467 | |||
1468 | power = musb_readb(musb->mregs, MUSB_POWER); | ||
1469 | if (is_on) | ||
1470 | power |= MUSB_POWER_SOFTCONN; | ||
1471 | else | ||
1472 | power &= ~MUSB_POWER_SOFTCONN; | ||
1473 | |||
1474 | /* FIXME if on, HdrcStart; if off, HdrcStop */ | ||
1475 | |||
1476 | DBG(3, "gadget %s D+ pullup %s\n", | ||
1477 | musb->gadget_driver->function, is_on ? "on" : "off"); | ||
1478 | musb_writeb(musb->mregs, MUSB_POWER, power); | ||
1479 | } | ||
1480 | |||
1481 | #if 0 | ||
1482 | static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) | ||
1483 | { | ||
1484 | DBG(2, "<= %s =>\n", __func__); | ||
1485 | |||
1486 | /* | ||
1487 | * FIXME iff driver's softconnect flag is set (as it is during probe, | ||
1488 | * though that can clear it), just musb_pullup(). | ||
1489 | */ | ||
1490 | |||
1491 | return -EINVAL; | ||
1492 | } | ||
1493 | #endif | ||
1494 | |||
1495 | static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) | ||
1496 | { | ||
1497 | struct musb *musb = gadget_to_musb(gadget); | ||
1498 | |||
1499 | if (!musb->xceiv.set_power) | ||
1500 | return -EOPNOTSUPP; | ||
1501 | return otg_set_power(&musb->xceiv, mA); | ||
1502 | } | ||
1503 | |||
1504 | static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) | ||
1505 | { | ||
1506 | struct musb *musb = gadget_to_musb(gadget); | ||
1507 | unsigned long flags; | ||
1508 | |||
1509 | is_on = !!is_on; | ||
1510 | |||
1511 | /* NOTE: this assumes we are sensing vbus; we'd rather | ||
1512 | * not pullup unless the B-session is active. | ||
1513 | */ | ||
1514 | spin_lock_irqsave(&musb->lock, flags); | ||
1515 | if (is_on != musb->softconnect) { | ||
1516 | musb->softconnect = is_on; | ||
1517 | musb_pullup(musb, is_on); | ||
1518 | } | ||
1519 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1520 | return 0; | ||
1521 | } | ||
1522 | |||
1523 | static const struct usb_gadget_ops musb_gadget_operations = { | ||
1524 | .get_frame = musb_gadget_get_frame, | ||
1525 | .wakeup = musb_gadget_wakeup, | ||
1526 | .set_selfpowered = musb_gadget_set_self_powered, | ||
1527 | /* .vbus_session = musb_gadget_vbus_session, */ | ||
1528 | .vbus_draw = musb_gadget_vbus_draw, | ||
1529 | .pullup = musb_gadget_pullup, | ||
1530 | }; | ||
1531 | |||
1532 | /* ----------------------------------------------------------------------- */ | ||
1533 | |||
1534 | /* Registration */ | ||
1535 | |||
1536 | /* Only this registration code "knows" the rule (from USB standards) | ||
1537 | * about there being only one external upstream port. It assumes | ||
1538 | * all peripheral ports are external... | ||
1539 | */ | ||
1540 | static struct musb *the_gadget; | ||
1541 | |||
1542 | static void musb_gadget_release(struct device *dev) | ||
1543 | { | ||
1544 | /* kref_put(WHAT) */ | ||
1545 | dev_dbg(dev, "%s\n", __func__); | ||
1546 | } | ||
1547 | |||
1548 | |||
1549 | static void __init | ||
1550 | init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) | ||
1551 | { | ||
1552 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
1553 | |||
1554 | memset(ep, 0, sizeof *ep); | ||
1555 | |||
1556 | ep->current_epnum = epnum; | ||
1557 | ep->musb = musb; | ||
1558 | ep->hw_ep = hw_ep; | ||
1559 | ep->is_in = is_in; | ||
1560 | |||
1561 | INIT_LIST_HEAD(&ep->req_list); | ||
1562 | |||
1563 | sprintf(ep->name, "ep%d%s", epnum, | ||
1564 | (!epnum || hw_ep->is_shared_fifo) ? "" : ( | ||
1565 | is_in ? "in" : "out")); | ||
1566 | ep->end_point.name = ep->name; | ||
1567 | INIT_LIST_HEAD(&ep->end_point.ep_list); | ||
1568 | if (!epnum) { | ||
1569 | ep->end_point.maxpacket = 64; | ||
1570 | ep->end_point.ops = &musb_g_ep0_ops; | ||
1571 | musb->g.ep0 = &ep->end_point; | ||
1572 | } else { | ||
1573 | if (is_in) | ||
1574 | ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; | ||
1575 | else | ||
1576 | ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; | ||
1577 | ep->end_point.ops = &musb_ep_ops; | ||
1578 | list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); | ||
1579 | } | ||
1580 | } | ||
1581 | |||
1582 | /* | ||
1583 | * Initialize the endpoints exposed to peripheral drivers, with backlinks | ||
1584 | * to the rest of the driver state. | ||
1585 | */ | ||
1586 | static inline void __init musb_g_init_endpoints(struct musb *musb) | ||
1587 | { | ||
1588 | u8 epnum; | ||
1589 | struct musb_hw_ep *hw_ep; | ||
1590 | unsigned count = 0; | ||
1591 | |||
1592 | /* intialize endpoint list just once */ | ||
1593 | INIT_LIST_HEAD(&(musb->g.ep_list)); | ||
1594 | |||
1595 | for (epnum = 0, hw_ep = musb->endpoints; | ||
1596 | epnum < musb->nr_endpoints; | ||
1597 | epnum++, hw_ep++) { | ||
1598 | if (hw_ep->is_shared_fifo /* || !epnum */) { | ||
1599 | init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); | ||
1600 | count++; | ||
1601 | } else { | ||
1602 | if (hw_ep->max_packet_sz_tx) { | ||
1603 | init_peripheral_ep(musb, &hw_ep->ep_in, | ||
1604 | epnum, 1); | ||
1605 | count++; | ||
1606 | } | ||
1607 | if (hw_ep->max_packet_sz_rx) { | ||
1608 | init_peripheral_ep(musb, &hw_ep->ep_out, | ||
1609 | epnum, 0); | ||
1610 | count++; | ||
1611 | } | ||
1612 | } | ||
1613 | } | ||
1614 | } | ||
1615 | |||
1616 | /* called once during driver setup to initialize and link into | ||
1617 | * the driver model; memory is zeroed. | ||
1618 | */ | ||
1619 | int __init musb_gadget_setup(struct musb *musb) | ||
1620 | { | ||
1621 | int status; | ||
1622 | |||
1623 | /* REVISIT minor race: if (erroneously) setting up two | ||
1624 | * musb peripherals at the same time, only the bus lock | ||
1625 | * is probably held. | ||
1626 | */ | ||
1627 | if (the_gadget) | ||
1628 | return -EBUSY; | ||
1629 | the_gadget = musb; | ||
1630 | |||
1631 | musb->g.ops = &musb_gadget_operations; | ||
1632 | musb->g.is_dualspeed = 1; | ||
1633 | musb->g.speed = USB_SPEED_UNKNOWN; | ||
1634 | |||
1635 | /* this "gadget" abstracts/virtualizes the controller */ | ||
1636 | strcpy(musb->g.dev.bus_id, "gadget"); | ||
1637 | musb->g.dev.parent = musb->controller; | ||
1638 | musb->g.dev.dma_mask = musb->controller->dma_mask; | ||
1639 | musb->g.dev.release = musb_gadget_release; | ||
1640 | musb->g.name = musb_driver_name; | ||
1641 | |||
1642 | if (is_otg_enabled(musb)) | ||
1643 | musb->g.is_otg = 1; | ||
1644 | |||
1645 | musb_g_init_endpoints(musb); | ||
1646 | |||
1647 | musb->is_active = 0; | ||
1648 | musb_platform_try_idle(musb, 0); | ||
1649 | |||
1650 | status = device_register(&musb->g.dev); | ||
1651 | if (status != 0) | ||
1652 | the_gadget = NULL; | ||
1653 | return status; | ||
1654 | } | ||
1655 | |||
1656 | void musb_gadget_cleanup(struct musb *musb) | ||
1657 | { | ||
1658 | if (musb != the_gadget) | ||
1659 | return; | ||
1660 | |||
1661 | device_unregister(&musb->g.dev); | ||
1662 | the_gadget = NULL; | ||
1663 | } | ||
1664 | |||
1665 | /* | ||
1666 | * Register the gadget driver. Used by gadget drivers when | ||
1667 | * registering themselves with the controller. | ||
1668 | * | ||
1669 | * -EINVAL something went wrong (not driver) | ||
1670 | * -EBUSY another gadget is already using the controller | ||
1671 | * -ENOMEM no memeory to perform the operation | ||
1672 | * | ||
1673 | * @param driver the gadget driver | ||
1674 | * @return <0 if error, 0 if everything is fine | ||
1675 | */ | ||
1676 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) | ||
1677 | { | ||
1678 | int retval; | ||
1679 | unsigned long flags; | ||
1680 | struct musb *musb = the_gadget; | ||
1681 | |||
1682 | if (!driver | ||
1683 | || driver->speed != USB_SPEED_HIGH | ||
1684 | || !driver->bind | ||
1685 | || !driver->setup) | ||
1686 | return -EINVAL; | ||
1687 | |||
1688 | /* driver must be initialized to support peripheral mode */ | ||
1689 | if (!musb || !(musb->board_mode == MUSB_OTG | ||
1690 | || musb->board_mode != MUSB_OTG)) { | ||
1691 | DBG(1, "%s, no dev??\n", __func__); | ||
1692 | return -ENODEV; | ||
1693 | } | ||
1694 | |||
1695 | DBG(3, "registering driver %s\n", driver->function); | ||
1696 | spin_lock_irqsave(&musb->lock, flags); | ||
1697 | |||
1698 | if (musb->gadget_driver) { | ||
1699 | DBG(1, "%s is already bound to %s\n", | ||
1700 | musb_driver_name, | ||
1701 | musb->gadget_driver->driver.name); | ||
1702 | retval = -EBUSY; | ||
1703 | } else { | ||
1704 | musb->gadget_driver = driver; | ||
1705 | musb->g.dev.driver = &driver->driver; | ||
1706 | driver->driver.bus = NULL; | ||
1707 | musb->softconnect = 1; | ||
1708 | retval = 0; | ||
1709 | } | ||
1710 | |||
1711 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1712 | |||
1713 | if (retval == 0) { | ||
1714 | retval = driver->bind(&musb->g); | ||
1715 | if (retval != 0) { | ||
1716 | DBG(3, "bind to driver %s failed --> %d\n", | ||
1717 | driver->driver.name, retval); | ||
1718 | musb->gadget_driver = NULL; | ||
1719 | musb->g.dev.driver = NULL; | ||
1720 | } | ||
1721 | |||
1722 | spin_lock_irqsave(&musb->lock, flags); | ||
1723 | |||
1724 | /* REVISIT always use otg_set_peripheral(), handling | ||
1725 | * issues including the root hub one below ... | ||
1726 | */ | ||
1727 | musb->xceiv.gadget = &musb->g; | ||
1728 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
1729 | musb->is_active = 1; | ||
1730 | |||
1731 | /* FIXME this ignores the softconnect flag. Drivers are | ||
1732 | * allowed hold the peripheral inactive until for example | ||
1733 | * userspace hooks up printer hardware or DSP codecs, so | ||
1734 | * hosts only see fully functional devices. | ||
1735 | */ | ||
1736 | |||
1737 | if (!is_otg_enabled(musb)) | ||
1738 | musb_start(musb); | ||
1739 | |||
1740 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1741 | |||
1742 | if (is_otg_enabled(musb)) { | ||
1743 | DBG(3, "OTG startup...\n"); | ||
1744 | |||
1745 | /* REVISIT: funcall to other code, which also | ||
1746 | * handles power budgeting ... this way also | ||
1747 | * ensures HdrcStart is indirectly called. | ||
1748 | */ | ||
1749 | retval = usb_add_hcd(musb_to_hcd(musb), -1, 0); | ||
1750 | if (retval < 0) { | ||
1751 | DBG(1, "add_hcd failed, %d\n", retval); | ||
1752 | spin_lock_irqsave(&musb->lock, flags); | ||
1753 | musb->xceiv.gadget = NULL; | ||
1754 | musb->xceiv.state = OTG_STATE_UNDEFINED; | ||
1755 | musb->gadget_driver = NULL; | ||
1756 | musb->g.dev.driver = NULL; | ||
1757 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1758 | } | ||
1759 | } | ||
1760 | } | ||
1761 | |||
1762 | return retval; | ||
1763 | } | ||
1764 | EXPORT_SYMBOL(usb_gadget_register_driver); | ||
1765 | |||
1766 | static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) | ||
1767 | { | ||
1768 | int i; | ||
1769 | struct musb_hw_ep *hw_ep; | ||
1770 | |||
1771 | /* don't disconnect if it's not connected */ | ||
1772 | if (musb->g.speed == USB_SPEED_UNKNOWN) | ||
1773 | driver = NULL; | ||
1774 | else | ||
1775 | musb->g.speed = USB_SPEED_UNKNOWN; | ||
1776 | |||
1777 | /* deactivate the hardware */ | ||
1778 | if (musb->softconnect) { | ||
1779 | musb->softconnect = 0; | ||
1780 | musb_pullup(musb, 0); | ||
1781 | } | ||
1782 | musb_stop(musb); | ||
1783 | |||
1784 | /* killing any outstanding requests will quiesce the driver; | ||
1785 | * then report disconnect | ||
1786 | */ | ||
1787 | if (driver) { | ||
1788 | for (i = 0, hw_ep = musb->endpoints; | ||
1789 | i < musb->nr_endpoints; | ||
1790 | i++, hw_ep++) { | ||
1791 | musb_ep_select(musb->mregs, i); | ||
1792 | if (hw_ep->is_shared_fifo /* || !epnum */) { | ||
1793 | nuke(&hw_ep->ep_in, -ESHUTDOWN); | ||
1794 | } else { | ||
1795 | if (hw_ep->max_packet_sz_tx) | ||
1796 | nuke(&hw_ep->ep_in, -ESHUTDOWN); | ||
1797 | if (hw_ep->max_packet_sz_rx) | ||
1798 | nuke(&hw_ep->ep_out, -ESHUTDOWN); | ||
1799 | } | ||
1800 | } | ||
1801 | |||
1802 | spin_unlock(&musb->lock); | ||
1803 | driver->disconnect(&musb->g); | ||
1804 | spin_lock(&musb->lock); | ||
1805 | } | ||
1806 | } | ||
1807 | |||
1808 | /* | ||
1809 | * Unregister the gadget driver. Used by gadget drivers when | ||
1810 | * unregistering themselves from the controller. | ||
1811 | * | ||
1812 | * @param driver the gadget driver to unregister | ||
1813 | */ | ||
1814 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | ||
1815 | { | ||
1816 | unsigned long flags; | ||
1817 | int retval = 0; | ||
1818 | struct musb *musb = the_gadget; | ||
1819 | |||
1820 | if (!driver || !driver->unbind || !musb) | ||
1821 | return -EINVAL; | ||
1822 | |||
1823 | /* REVISIT always use otg_set_peripheral() here too; | ||
1824 | * this needs to shut down the OTG engine. | ||
1825 | */ | ||
1826 | |||
1827 | spin_lock_irqsave(&musb->lock, flags); | ||
1828 | |||
1829 | #ifdef CONFIG_USB_MUSB_OTG | ||
1830 | musb_hnp_stop(musb); | ||
1831 | #endif | ||
1832 | |||
1833 | if (musb->gadget_driver == driver) { | ||
1834 | |||
1835 | (void) musb_gadget_vbus_draw(&musb->g, 0); | ||
1836 | |||
1837 | musb->xceiv.state = OTG_STATE_UNDEFINED; | ||
1838 | stop_activity(musb, driver); | ||
1839 | |||
1840 | DBG(3, "unregistering driver %s\n", driver->function); | ||
1841 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1842 | driver->unbind(&musb->g); | ||
1843 | spin_lock_irqsave(&musb->lock, flags); | ||
1844 | |||
1845 | musb->gadget_driver = NULL; | ||
1846 | musb->g.dev.driver = NULL; | ||
1847 | |||
1848 | musb->is_active = 0; | ||
1849 | musb_platform_try_idle(musb, 0); | ||
1850 | } else | ||
1851 | retval = -EINVAL; | ||
1852 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1853 | |||
1854 | if (is_otg_enabled(musb) && retval == 0) { | ||
1855 | usb_remove_hcd(musb_to_hcd(musb)); | ||
1856 | /* FIXME we need to be able to register another | ||
1857 | * gadget driver here and have everything work; | ||
1858 | * that currently misbehaves. | ||
1859 | */ | ||
1860 | } | ||
1861 | |||
1862 | return retval; | ||
1863 | } | ||
1864 | EXPORT_SYMBOL(usb_gadget_unregister_driver); | ||
1865 | |||
1866 | |||
1867 | /* ----------------------------------------------------------------------- */ | ||
1868 | |||
1869 | /* lifecycle operations called through plat_uds.c */ | ||
1870 | |||
1871 | void musb_g_resume(struct musb *musb) | ||
1872 | { | ||
1873 | musb->is_suspended = 0; | ||
1874 | switch (musb->xceiv.state) { | ||
1875 | case OTG_STATE_B_IDLE: | ||
1876 | break; | ||
1877 | case OTG_STATE_B_WAIT_ACON: | ||
1878 | case OTG_STATE_B_PERIPHERAL: | ||
1879 | musb->is_active = 1; | ||
1880 | if (musb->gadget_driver && musb->gadget_driver->resume) { | ||
1881 | spin_unlock(&musb->lock); | ||
1882 | musb->gadget_driver->resume(&musb->g); | ||
1883 | spin_lock(&musb->lock); | ||
1884 | } | ||
1885 | break; | ||
1886 | default: | ||
1887 | WARNING("unhandled RESUME transition (%s)\n", | ||
1888 | otg_state_string(musb)); | ||
1889 | } | ||
1890 | } | ||
1891 | |||
1892 | /* called when SOF packets stop for 3+ msec */ | ||
1893 | void musb_g_suspend(struct musb *musb) | ||
1894 | { | ||
1895 | u8 devctl; | ||
1896 | |||
1897 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
1898 | DBG(3, "devctl %02x\n", devctl); | ||
1899 | |||
1900 | switch (musb->xceiv.state) { | ||
1901 | case OTG_STATE_B_IDLE: | ||
1902 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) | ||
1903 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
1904 | break; | ||
1905 | case OTG_STATE_B_PERIPHERAL: | ||
1906 | musb->is_suspended = 1; | ||
1907 | if (musb->gadget_driver && musb->gadget_driver->suspend) { | ||
1908 | spin_unlock(&musb->lock); | ||
1909 | musb->gadget_driver->suspend(&musb->g); | ||
1910 | spin_lock(&musb->lock); | ||
1911 | } | ||
1912 | break; | ||
1913 | default: | ||
1914 | /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; | ||
1915 | * A_PERIPHERAL may need care too | ||
1916 | */ | ||
1917 | WARNING("unhandled SUSPEND transition (%s)\n", | ||
1918 | otg_state_string(musb)); | ||
1919 | } | ||
1920 | } | ||
1921 | |||
1922 | /* Called during SRP */ | ||
1923 | void musb_g_wakeup(struct musb *musb) | ||
1924 | { | ||
1925 | musb_gadget_wakeup(&musb->g); | ||
1926 | } | ||
1927 | |||
1928 | /* called when VBUS drops below session threshold, and in other cases */ | ||
1929 | void musb_g_disconnect(struct musb *musb) | ||
1930 | { | ||
1931 | void __iomem *mregs = musb->mregs; | ||
1932 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); | ||
1933 | |||
1934 | DBG(3, "devctl %02x\n", devctl); | ||
1935 | |||
1936 | /* clear HR */ | ||
1937 | musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); | ||
1938 | |||
1939 | /* don't draw vbus until new b-default session */ | ||
1940 | (void) musb_gadget_vbus_draw(&musb->g, 0); | ||
1941 | |||
1942 | musb->g.speed = USB_SPEED_UNKNOWN; | ||
1943 | if (musb->gadget_driver && musb->gadget_driver->disconnect) { | ||
1944 | spin_unlock(&musb->lock); | ||
1945 | musb->gadget_driver->disconnect(&musb->g); | ||
1946 | spin_lock(&musb->lock); | ||
1947 | } | ||
1948 | |||
1949 | switch (musb->xceiv.state) { | ||
1950 | default: | ||
1951 | #ifdef CONFIG_USB_MUSB_OTG | ||
1952 | DBG(2, "Unhandled disconnect %s, setting a_idle\n", | ||
1953 | otg_state_string(musb)); | ||
1954 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
1955 | break; | ||
1956 | case OTG_STATE_A_PERIPHERAL: | ||
1957 | musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; | ||
1958 | break; | ||
1959 | case OTG_STATE_B_WAIT_ACON: | ||
1960 | case OTG_STATE_B_HOST: | ||
1961 | #endif | ||
1962 | case OTG_STATE_B_PERIPHERAL: | ||
1963 | case OTG_STATE_B_IDLE: | ||
1964 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
1965 | break; | ||
1966 | case OTG_STATE_B_SRP_INIT: | ||
1967 | break; | ||
1968 | } | ||
1969 | |||
1970 | musb->is_active = 0; | ||
1971 | } | ||
1972 | |||
1973 | void musb_g_reset(struct musb *musb) | ||
1974 | __releases(musb->lock) | ||
1975 | __acquires(musb->lock) | ||
1976 | { | ||
1977 | void __iomem *mbase = musb->mregs; | ||
1978 | u8 devctl = musb_readb(mbase, MUSB_DEVCTL); | ||
1979 | u8 power; | ||
1980 | |||
1981 | DBG(3, "<== %s addr=%x driver '%s'\n", | ||
1982 | (devctl & MUSB_DEVCTL_BDEVICE) | ||
1983 | ? "B-Device" : "A-Device", | ||
1984 | musb_readb(mbase, MUSB_FADDR), | ||
1985 | musb->gadget_driver | ||
1986 | ? musb->gadget_driver->driver.name | ||
1987 | : NULL | ||
1988 | ); | ||
1989 | |||
1990 | /* report disconnect, if we didn't already (flushing EP state) */ | ||
1991 | if (musb->g.speed != USB_SPEED_UNKNOWN) | ||
1992 | musb_g_disconnect(musb); | ||
1993 | |||
1994 | /* clear HR */ | ||
1995 | else if (devctl & MUSB_DEVCTL_HR) | ||
1996 | musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); | ||
1997 | |||
1998 | |||
1999 | /* what speed did we negotiate? */ | ||
2000 | power = musb_readb(mbase, MUSB_POWER); | ||
2001 | musb->g.speed = (power & MUSB_POWER_HSMODE) | ||
2002 | ? USB_SPEED_HIGH : USB_SPEED_FULL; | ||
2003 | |||
2004 | /* start in USB_STATE_DEFAULT */ | ||
2005 | musb->is_active = 1; | ||
2006 | musb->is_suspended = 0; | ||
2007 | MUSB_DEV_MODE(musb); | ||
2008 | musb->address = 0; | ||
2009 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
2010 | |||
2011 | musb->may_wakeup = 0; | ||
2012 | musb->g.b_hnp_enable = 0; | ||
2013 | musb->g.a_alt_hnp_support = 0; | ||
2014 | musb->g.a_hnp_support = 0; | ||
2015 | |||
2016 | /* Normal reset, as B-Device; | ||
2017 | * or else after HNP, as A-Device | ||
2018 | */ | ||
2019 | if (devctl & MUSB_DEVCTL_BDEVICE) { | ||
2020 | musb->xceiv.state = OTG_STATE_B_PERIPHERAL; | ||
2021 | musb->g.is_a_peripheral = 0; | ||
2022 | } else if (is_otg_enabled(musb)) { | ||
2023 | musb->xceiv.state = OTG_STATE_A_PERIPHERAL; | ||
2024 | musb->g.is_a_peripheral = 1; | ||
2025 | } else | ||
2026 | WARN_ON(1); | ||
2027 | |||
2028 | /* start with default limits on VBUS power draw */ | ||
2029 | (void) musb_gadget_vbus_draw(&musb->g, | ||
2030 | is_otg_enabled(musb) ? 8 : 100); | ||
2031 | } | ||
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h new file mode 100644 index 000000000000..59502da9f739 --- /dev/null +++ b/drivers/usb/musb/musb_gadget.h | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver peripheral defines | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #ifndef __MUSB_GADGET_H | ||
36 | #define __MUSB_GADGET_H | ||
37 | |||
38 | struct musb_request { | ||
39 | struct usb_request request; | ||
40 | struct musb_ep *ep; | ||
41 | struct musb *musb; | ||
42 | u8 tx; /* endpoint direction */ | ||
43 | u8 epnum; | ||
44 | u8 mapped; | ||
45 | }; | ||
46 | |||
47 | static inline struct musb_request *to_musb_request(struct usb_request *req) | ||
48 | { | ||
49 | return req ? container_of(req, struct musb_request, request) : NULL; | ||
50 | } | ||
51 | |||
52 | extern struct usb_request * | ||
53 | musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); | ||
54 | extern void musb_free_request(struct usb_ep *ep, struct usb_request *req); | ||
55 | |||
56 | |||
57 | /* | ||
58 | * struct musb_ep - peripheral side view of endpoint rx or tx side | ||
59 | */ | ||
60 | struct musb_ep { | ||
61 | /* stuff towards the head is basically write-once. */ | ||
62 | struct usb_ep end_point; | ||
63 | char name[12]; | ||
64 | struct musb_hw_ep *hw_ep; | ||
65 | struct musb *musb; | ||
66 | u8 current_epnum; | ||
67 | |||
68 | /* ... when enabled/disabled ... */ | ||
69 | u8 type; | ||
70 | u8 is_in; | ||
71 | u16 packet_sz; | ||
72 | const struct usb_endpoint_descriptor *desc; | ||
73 | struct dma_channel *dma; | ||
74 | |||
75 | /* later things are modified based on usage */ | ||
76 | struct list_head req_list; | ||
77 | |||
78 | /* true if lock must be dropped but req_list may not be advanced */ | ||
79 | u8 busy; | ||
80 | }; | ||
81 | |||
82 | static inline struct musb_ep *to_musb_ep(struct usb_ep *ep) | ||
83 | { | ||
84 | return ep ? container_of(ep, struct musb_ep, end_point) : NULL; | ||
85 | } | ||
86 | |||
87 | static inline struct usb_request *next_request(struct musb_ep *ep) | ||
88 | { | ||
89 | struct list_head *queue = &ep->req_list; | ||
90 | |||
91 | if (list_empty(queue)) | ||
92 | return NULL; | ||
93 | return container_of(queue->next, struct usb_request, list); | ||
94 | } | ||
95 | |||
96 | extern void musb_g_tx(struct musb *musb, u8 epnum); | ||
97 | extern void musb_g_rx(struct musb *musb, u8 epnum); | ||
98 | |||
99 | extern const struct usb_ep_ops musb_g_ep0_ops; | ||
100 | |||
101 | extern int musb_gadget_setup(struct musb *); | ||
102 | extern void musb_gadget_cleanup(struct musb *); | ||
103 | |||
104 | extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); | ||
105 | |||
106 | extern int musb_gadget_set_halt(struct usb_ep *ep, int value); | ||
107 | |||
108 | #endif /* __MUSB_GADGET_H */ | ||
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c new file mode 100644 index 000000000000..a57652fff39c --- /dev/null +++ b/drivers/usb/musb/musb_gadget_ep0.c | |||
@@ -0,0 +1,983 @@ | |||
1 | /* | ||
2 | * MUSB OTG peripheral driver ep0 handling | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/list.h> | ||
37 | #include <linux/timer.h> | ||
38 | #include <linux/spinlock.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/device.h> | ||
41 | #include <linux/interrupt.h> | ||
42 | |||
43 | #include "musb_core.h" | ||
44 | |||
45 | /* ep0 is always musb->endpoints[0].ep_in */ | ||
46 | #define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0]) | ||
47 | |||
48 | /* | ||
49 | * locking note: we use only the controller lock, for simpler correctness. | ||
50 | * It's always held with IRQs blocked. | ||
51 | * | ||
52 | * It protects the ep0 request queue as well as ep0_state, not just the | ||
53 | * controller and indexed registers. And that lock stays held unless it | ||
54 | * needs to be dropped to allow reentering this driver ... like upcalls to | ||
55 | * the gadget driver, or adjusting endpoint halt status. | ||
56 | */ | ||
57 | |||
58 | static char *decode_ep0stage(u8 stage) | ||
59 | { | ||
60 | switch (stage) { | ||
61 | case MUSB_EP0_STAGE_SETUP: return "idle"; | ||
62 | case MUSB_EP0_STAGE_TX: return "in"; | ||
63 | case MUSB_EP0_STAGE_RX: return "out"; | ||
64 | case MUSB_EP0_STAGE_ACKWAIT: return "wait"; | ||
65 | case MUSB_EP0_STAGE_STATUSIN: return "in/status"; | ||
66 | case MUSB_EP0_STAGE_STATUSOUT: return "out/status"; | ||
67 | default: return "?"; | ||
68 | } | ||
69 | } | ||
70 | |||
71 | /* handle a standard GET_STATUS request | ||
72 | * Context: caller holds controller lock | ||
73 | */ | ||
74 | static int service_tx_status_request( | ||
75 | struct musb *musb, | ||
76 | const struct usb_ctrlrequest *ctrlrequest) | ||
77 | { | ||
78 | void __iomem *mbase = musb->mregs; | ||
79 | int handled = 1; | ||
80 | u8 result[2], epnum = 0; | ||
81 | const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; | ||
82 | |||
83 | result[1] = 0; | ||
84 | |||
85 | switch (recip) { | ||
86 | case USB_RECIP_DEVICE: | ||
87 | result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED; | ||
88 | result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; | ||
89 | #ifdef CONFIG_USB_MUSB_OTG | ||
90 | if (musb->g.is_otg) { | ||
91 | result[0] |= musb->g.b_hnp_enable | ||
92 | << USB_DEVICE_B_HNP_ENABLE; | ||
93 | result[0] |= musb->g.a_alt_hnp_support | ||
94 | << USB_DEVICE_A_ALT_HNP_SUPPORT; | ||
95 | result[0] |= musb->g.a_hnp_support | ||
96 | << USB_DEVICE_A_HNP_SUPPORT; | ||
97 | } | ||
98 | #endif | ||
99 | break; | ||
100 | |||
101 | case USB_RECIP_INTERFACE: | ||
102 | result[0] = 0; | ||
103 | break; | ||
104 | |||
105 | case USB_RECIP_ENDPOINT: { | ||
106 | int is_in; | ||
107 | struct musb_ep *ep; | ||
108 | u16 tmp; | ||
109 | void __iomem *regs; | ||
110 | |||
111 | epnum = (u8) ctrlrequest->wIndex; | ||
112 | if (!epnum) { | ||
113 | result[0] = 0; | ||
114 | break; | ||
115 | } | ||
116 | |||
117 | is_in = epnum & USB_DIR_IN; | ||
118 | if (is_in) { | ||
119 | epnum &= 0x0f; | ||
120 | ep = &musb->endpoints[epnum].ep_in; | ||
121 | } else { | ||
122 | ep = &musb->endpoints[epnum].ep_out; | ||
123 | } | ||
124 | regs = musb->endpoints[epnum].regs; | ||
125 | |||
126 | if (epnum >= MUSB_C_NUM_EPS || !ep->desc) { | ||
127 | handled = -EINVAL; | ||
128 | break; | ||
129 | } | ||
130 | |||
131 | musb_ep_select(mbase, epnum); | ||
132 | if (is_in) | ||
133 | tmp = musb_readw(regs, MUSB_TXCSR) | ||
134 | & MUSB_TXCSR_P_SENDSTALL; | ||
135 | else | ||
136 | tmp = musb_readw(regs, MUSB_RXCSR) | ||
137 | & MUSB_RXCSR_P_SENDSTALL; | ||
138 | musb_ep_select(mbase, 0); | ||
139 | |||
140 | result[0] = tmp ? 1 : 0; | ||
141 | } break; | ||
142 | |||
143 | default: | ||
144 | /* class, vendor, etc ... delegate */ | ||
145 | handled = 0; | ||
146 | break; | ||
147 | } | ||
148 | |||
149 | /* fill up the fifo; caller updates csr0 */ | ||
150 | if (handled > 0) { | ||
151 | u16 len = le16_to_cpu(ctrlrequest->wLength); | ||
152 | |||
153 | if (len > 2) | ||
154 | len = 2; | ||
155 | musb_write_fifo(&musb->endpoints[0], len, result); | ||
156 | } | ||
157 | |||
158 | return handled; | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * handle a control-IN request, the end0 buffer contains the current request | ||
163 | * that is supposed to be a standard control request. Assumes the fifo to | ||
164 | * be at least 2 bytes long. | ||
165 | * | ||
166 | * @return 0 if the request was NOT HANDLED, | ||
167 | * < 0 when error | ||
168 | * > 0 when the request is processed | ||
169 | * | ||
170 | * Context: caller holds controller lock | ||
171 | */ | ||
172 | static int | ||
173 | service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) | ||
174 | { | ||
175 | int handled = 0; /* not handled */ | ||
176 | |||
177 | if ((ctrlrequest->bRequestType & USB_TYPE_MASK) | ||
178 | == USB_TYPE_STANDARD) { | ||
179 | switch (ctrlrequest->bRequest) { | ||
180 | case USB_REQ_GET_STATUS: | ||
181 | handled = service_tx_status_request(musb, | ||
182 | ctrlrequest); | ||
183 | break; | ||
184 | |||
185 | /* case USB_REQ_SYNC_FRAME: */ | ||
186 | |||
187 | default: | ||
188 | break; | ||
189 | } | ||
190 | } | ||
191 | return handled; | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * Context: caller holds controller lock | ||
196 | */ | ||
197 | static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req) | ||
198 | { | ||
199 | musb_g_giveback(&musb->endpoints[0].ep_in, req, 0); | ||
200 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Tries to start B-device HNP negotiation if enabled via sysfs | ||
205 | */ | ||
206 | static inline void musb_try_b_hnp_enable(struct musb *musb) | ||
207 | { | ||
208 | void __iomem *mbase = musb->mregs; | ||
209 | u8 devctl; | ||
210 | |||
211 | DBG(1, "HNP: Setting HR\n"); | ||
212 | devctl = musb_readb(mbase, MUSB_DEVCTL); | ||
213 | musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Handle all control requests with no DATA stage, including standard | ||
218 | * requests such as: | ||
219 | * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized | ||
220 | * always delegated to the gadget driver | ||
221 | * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE | ||
222 | * always handled here, except for class/vendor/... features | ||
223 | * | ||
224 | * Context: caller holds controller lock | ||
225 | */ | ||
226 | static int | ||
227 | service_zero_data_request(struct musb *musb, | ||
228 | struct usb_ctrlrequest *ctrlrequest) | ||
229 | __releases(musb->lock) | ||
230 | __acquires(musb->lock) | ||
231 | { | ||
232 | int handled = -EINVAL; | ||
233 | void __iomem *mbase = musb->mregs; | ||
234 | const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; | ||
235 | |||
236 | /* the gadget driver handles everything except what we MUST handle */ | ||
237 | if ((ctrlrequest->bRequestType & USB_TYPE_MASK) | ||
238 | == USB_TYPE_STANDARD) { | ||
239 | switch (ctrlrequest->bRequest) { | ||
240 | case USB_REQ_SET_ADDRESS: | ||
241 | /* change it after the status stage */ | ||
242 | musb->set_address = true; | ||
243 | musb->address = (u8) (ctrlrequest->wValue & 0x7f); | ||
244 | handled = 1; | ||
245 | break; | ||
246 | |||
247 | case USB_REQ_CLEAR_FEATURE: | ||
248 | switch (recip) { | ||
249 | case USB_RECIP_DEVICE: | ||
250 | if (ctrlrequest->wValue | ||
251 | != USB_DEVICE_REMOTE_WAKEUP) | ||
252 | break; | ||
253 | musb->may_wakeup = 0; | ||
254 | handled = 1; | ||
255 | break; | ||
256 | case USB_RECIP_INTERFACE: | ||
257 | break; | ||
258 | case USB_RECIP_ENDPOINT:{ | ||
259 | const u8 num = ctrlrequest->wIndex & 0x0f; | ||
260 | struct musb_ep *musb_ep; | ||
261 | |||
262 | if (num == 0 | ||
263 | || num >= MUSB_C_NUM_EPS | ||
264 | || ctrlrequest->wValue | ||
265 | != USB_ENDPOINT_HALT) | ||
266 | break; | ||
267 | |||
268 | if (ctrlrequest->wIndex & USB_DIR_IN) | ||
269 | musb_ep = &musb->endpoints[num].ep_in; | ||
270 | else | ||
271 | musb_ep = &musb->endpoints[num].ep_out; | ||
272 | if (!musb_ep->desc) | ||
273 | break; | ||
274 | |||
275 | /* REVISIT do it directly, no locking games */ | ||
276 | spin_unlock(&musb->lock); | ||
277 | musb_gadget_set_halt(&musb_ep->end_point, 0); | ||
278 | spin_lock(&musb->lock); | ||
279 | |||
280 | /* select ep0 again */ | ||
281 | musb_ep_select(mbase, 0); | ||
282 | handled = 1; | ||
283 | } break; | ||
284 | default: | ||
285 | /* class, vendor, etc ... delegate */ | ||
286 | handled = 0; | ||
287 | break; | ||
288 | } | ||
289 | break; | ||
290 | |||
291 | case USB_REQ_SET_FEATURE: | ||
292 | switch (recip) { | ||
293 | case USB_RECIP_DEVICE: | ||
294 | handled = 1; | ||
295 | switch (ctrlrequest->wValue) { | ||
296 | case USB_DEVICE_REMOTE_WAKEUP: | ||
297 | musb->may_wakeup = 1; | ||
298 | break; | ||
299 | case USB_DEVICE_TEST_MODE: | ||
300 | if (musb->g.speed != USB_SPEED_HIGH) | ||
301 | goto stall; | ||
302 | if (ctrlrequest->wIndex & 0xff) | ||
303 | goto stall; | ||
304 | |||
305 | switch (ctrlrequest->wIndex >> 8) { | ||
306 | case 1: | ||
307 | pr_debug("TEST_J\n"); | ||
308 | /* TEST_J */ | ||
309 | musb->test_mode_nr = | ||
310 | MUSB_TEST_J; | ||
311 | break; | ||
312 | case 2: | ||
313 | /* TEST_K */ | ||
314 | pr_debug("TEST_K\n"); | ||
315 | musb->test_mode_nr = | ||
316 | MUSB_TEST_K; | ||
317 | break; | ||
318 | case 3: | ||
319 | /* TEST_SE0_NAK */ | ||
320 | pr_debug("TEST_SE0_NAK\n"); | ||
321 | musb->test_mode_nr = | ||
322 | MUSB_TEST_SE0_NAK; | ||
323 | break; | ||
324 | case 4: | ||
325 | /* TEST_PACKET */ | ||
326 | pr_debug("TEST_PACKET\n"); | ||
327 | musb->test_mode_nr = | ||
328 | MUSB_TEST_PACKET; | ||
329 | break; | ||
330 | default: | ||
331 | goto stall; | ||
332 | } | ||
333 | |||
334 | /* enter test mode after irq */ | ||
335 | if (handled > 0) | ||
336 | musb->test_mode = true; | ||
337 | break; | ||
338 | #ifdef CONFIG_USB_MUSB_OTG | ||
339 | case USB_DEVICE_B_HNP_ENABLE: | ||
340 | if (!musb->g.is_otg) | ||
341 | goto stall; | ||
342 | musb->g.b_hnp_enable = 1; | ||
343 | musb_try_b_hnp_enable(musb); | ||
344 | break; | ||
345 | case USB_DEVICE_A_HNP_SUPPORT: | ||
346 | if (!musb->g.is_otg) | ||
347 | goto stall; | ||
348 | musb->g.a_hnp_support = 1; | ||
349 | break; | ||
350 | case USB_DEVICE_A_ALT_HNP_SUPPORT: | ||
351 | if (!musb->g.is_otg) | ||
352 | goto stall; | ||
353 | musb->g.a_alt_hnp_support = 1; | ||
354 | break; | ||
355 | #endif | ||
356 | stall: | ||
357 | default: | ||
358 | handled = -EINVAL; | ||
359 | break; | ||
360 | } | ||
361 | break; | ||
362 | |||
363 | case USB_RECIP_INTERFACE: | ||
364 | break; | ||
365 | |||
366 | case USB_RECIP_ENDPOINT:{ | ||
367 | const u8 epnum = | ||
368 | ctrlrequest->wIndex & 0x0f; | ||
369 | struct musb_ep *musb_ep; | ||
370 | struct musb_hw_ep *ep; | ||
371 | void __iomem *regs; | ||
372 | int is_in; | ||
373 | u16 csr; | ||
374 | |||
375 | if (epnum == 0 | ||
376 | || epnum >= MUSB_C_NUM_EPS | ||
377 | || ctrlrequest->wValue | ||
378 | != USB_ENDPOINT_HALT) | ||
379 | break; | ||
380 | |||
381 | ep = musb->endpoints + epnum; | ||
382 | regs = ep->regs; | ||
383 | is_in = ctrlrequest->wIndex & USB_DIR_IN; | ||
384 | if (is_in) | ||
385 | musb_ep = &ep->ep_in; | ||
386 | else | ||
387 | musb_ep = &ep->ep_out; | ||
388 | if (!musb_ep->desc) | ||
389 | break; | ||
390 | |||
391 | musb_ep_select(mbase, epnum); | ||
392 | if (is_in) { | ||
393 | csr = musb_readw(regs, | ||
394 | MUSB_TXCSR); | ||
395 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) | ||
396 | csr |= MUSB_TXCSR_FLUSHFIFO; | ||
397 | csr |= MUSB_TXCSR_P_SENDSTALL | ||
398 | | MUSB_TXCSR_CLRDATATOG | ||
399 | | MUSB_TXCSR_P_WZC_BITS; | ||
400 | musb_writew(regs, MUSB_TXCSR, | ||
401 | csr); | ||
402 | } else { | ||
403 | csr = musb_readw(regs, | ||
404 | MUSB_RXCSR); | ||
405 | csr |= MUSB_RXCSR_P_SENDSTALL | ||
406 | | MUSB_RXCSR_FLUSHFIFO | ||
407 | | MUSB_RXCSR_CLRDATATOG | ||
408 | | MUSB_TXCSR_P_WZC_BITS; | ||
409 | musb_writew(regs, MUSB_RXCSR, | ||
410 | csr); | ||
411 | } | ||
412 | |||
413 | /* select ep0 again */ | ||
414 | musb_ep_select(mbase, 0); | ||
415 | handled = 1; | ||
416 | } break; | ||
417 | |||
418 | default: | ||
419 | /* class, vendor, etc ... delegate */ | ||
420 | handled = 0; | ||
421 | break; | ||
422 | } | ||
423 | break; | ||
424 | default: | ||
425 | /* delegate SET_CONFIGURATION, etc */ | ||
426 | handled = 0; | ||
427 | } | ||
428 | } else | ||
429 | handled = 0; | ||
430 | return handled; | ||
431 | } | ||
432 | |||
433 | /* we have an ep0out data packet | ||
434 | * Context: caller holds controller lock | ||
435 | */ | ||
436 | static void ep0_rxstate(struct musb *musb) | ||
437 | { | ||
438 | void __iomem *regs = musb->control_ep->regs; | ||
439 | struct usb_request *req; | ||
440 | u16 tmp; | ||
441 | |||
442 | req = next_ep0_request(musb); | ||
443 | |||
444 | /* read packet and ack; or stall because of gadget driver bug: | ||
445 | * should have provided the rx buffer before setup() returned. | ||
446 | */ | ||
447 | if (req) { | ||
448 | void *buf = req->buf + req->actual; | ||
449 | unsigned len = req->length - req->actual; | ||
450 | |||
451 | /* read the buffer */ | ||
452 | tmp = musb_readb(regs, MUSB_COUNT0); | ||
453 | if (tmp > len) { | ||
454 | req->status = -EOVERFLOW; | ||
455 | tmp = len; | ||
456 | } | ||
457 | musb_read_fifo(&musb->endpoints[0], tmp, buf); | ||
458 | req->actual += tmp; | ||
459 | tmp = MUSB_CSR0_P_SVDRXPKTRDY; | ||
460 | if (tmp < 64 || req->actual == req->length) { | ||
461 | musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; | ||
462 | tmp |= MUSB_CSR0_P_DATAEND; | ||
463 | } else | ||
464 | req = NULL; | ||
465 | } else | ||
466 | tmp = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL; | ||
467 | |||
468 | |||
469 | /* Completion handler may choose to stall, e.g. because the | ||
470 | * message just received holds invalid data. | ||
471 | */ | ||
472 | if (req) { | ||
473 | musb->ackpend = tmp; | ||
474 | musb_g_ep0_giveback(musb, req); | ||
475 | if (!musb->ackpend) | ||
476 | return; | ||
477 | musb->ackpend = 0; | ||
478 | } | ||
479 | musb_ep_select(musb->mregs, 0); | ||
480 | musb_writew(regs, MUSB_CSR0, tmp); | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * transmitting to the host (IN), this code might be called from IRQ | ||
485 | * and from kernel thread. | ||
486 | * | ||
487 | * Context: caller holds controller lock | ||
488 | */ | ||
489 | static void ep0_txstate(struct musb *musb) | ||
490 | { | ||
491 | void __iomem *regs = musb->control_ep->regs; | ||
492 | struct usb_request *request = next_ep0_request(musb); | ||
493 | u16 csr = MUSB_CSR0_TXPKTRDY; | ||
494 | u8 *fifo_src; | ||
495 | u8 fifo_count; | ||
496 | |||
497 | if (!request) { | ||
498 | /* WARN_ON(1); */ | ||
499 | DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0)); | ||
500 | return; | ||
501 | } | ||
502 | |||
503 | /* load the data */ | ||
504 | fifo_src = (u8 *) request->buf + request->actual; | ||
505 | fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE, | ||
506 | request->length - request->actual); | ||
507 | musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src); | ||
508 | request->actual += fifo_count; | ||
509 | |||
510 | /* update the flags */ | ||
511 | if (fifo_count < MUSB_MAX_END0_PACKET | ||
512 | || request->actual == request->length) { | ||
513 | musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; | ||
514 | csr |= MUSB_CSR0_P_DATAEND; | ||
515 | } else | ||
516 | request = NULL; | ||
517 | |||
518 | /* report completions as soon as the fifo's loaded; there's no | ||
519 | * win in waiting till this last packet gets acked. (other than | ||
520 | * very precise fault reporting, needed by USB TMC; possible with | ||
521 | * this hardware, but not usable from portable gadget drivers.) | ||
522 | */ | ||
523 | if (request) { | ||
524 | musb->ackpend = csr; | ||
525 | musb_g_ep0_giveback(musb, request); | ||
526 | if (!musb->ackpend) | ||
527 | return; | ||
528 | musb->ackpend = 0; | ||
529 | } | ||
530 | |||
531 | /* send it out, triggering a "txpktrdy cleared" irq */ | ||
532 | musb_ep_select(musb->mregs, 0); | ||
533 | musb_writew(regs, MUSB_CSR0, csr); | ||
534 | } | ||
535 | |||
536 | /* | ||
537 | * Read a SETUP packet (struct usb_ctrlrequest) from the hardware. | ||
538 | * Fields are left in USB byte-order. | ||
539 | * | ||
540 | * Context: caller holds controller lock. | ||
541 | */ | ||
542 | static void | ||
543 | musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req) | ||
544 | { | ||
545 | struct usb_request *r; | ||
546 | void __iomem *regs = musb->control_ep->regs; | ||
547 | |||
548 | musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req); | ||
549 | |||
550 | /* NOTE: earlier 2.6 versions changed setup packets to host | ||
551 | * order, but now USB packets always stay in USB byte order. | ||
552 | */ | ||
553 | DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n", | ||
554 | req->bRequestType, | ||
555 | req->bRequest, | ||
556 | le16_to_cpu(req->wValue), | ||
557 | le16_to_cpu(req->wIndex), | ||
558 | le16_to_cpu(req->wLength)); | ||
559 | |||
560 | /* clean up any leftover transfers */ | ||
561 | r = next_ep0_request(musb); | ||
562 | if (r) | ||
563 | musb_g_ep0_giveback(musb, r); | ||
564 | |||
565 | /* For zero-data requests we want to delay the STATUS stage to | ||
566 | * avoid SETUPEND errors. If we read data (OUT), delay accepting | ||
567 | * packets until there's a buffer to store them in. | ||
568 | * | ||
569 | * If we write data, the controller acts happier if we enable | ||
570 | * the TX FIFO right away, and give the controller a moment | ||
571 | * to switch modes... | ||
572 | */ | ||
573 | musb->set_address = false; | ||
574 | musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY; | ||
575 | if (req->wLength == 0) { | ||
576 | if (req->bRequestType & USB_DIR_IN) | ||
577 | musb->ackpend |= MUSB_CSR0_TXPKTRDY; | ||
578 | musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT; | ||
579 | } else if (req->bRequestType & USB_DIR_IN) { | ||
580 | musb->ep0_state = MUSB_EP0_STAGE_TX; | ||
581 | musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY); | ||
582 | while ((musb_readw(regs, MUSB_CSR0) | ||
583 | & MUSB_CSR0_RXPKTRDY) != 0) | ||
584 | cpu_relax(); | ||
585 | musb->ackpend = 0; | ||
586 | } else | ||
587 | musb->ep0_state = MUSB_EP0_STAGE_RX; | ||
588 | } | ||
589 | |||
590 | static int | ||
591 | forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) | ||
592 | __releases(musb->lock) | ||
593 | __acquires(musb->lock) | ||
594 | { | ||
595 | int retval; | ||
596 | if (!musb->gadget_driver) | ||
597 | return -EOPNOTSUPP; | ||
598 | spin_unlock(&musb->lock); | ||
599 | retval = musb->gadget_driver->setup(&musb->g, ctrlrequest); | ||
600 | spin_lock(&musb->lock); | ||
601 | return retval; | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * Handle peripheral ep0 interrupt | ||
606 | * | ||
607 | * Context: irq handler; we won't re-enter the driver that way. | ||
608 | */ | ||
609 | irqreturn_t musb_g_ep0_irq(struct musb *musb) | ||
610 | { | ||
611 | u16 csr; | ||
612 | u16 len; | ||
613 | void __iomem *mbase = musb->mregs; | ||
614 | void __iomem *regs = musb->endpoints[0].regs; | ||
615 | irqreturn_t retval = IRQ_NONE; | ||
616 | |||
617 | musb_ep_select(mbase, 0); /* select ep0 */ | ||
618 | csr = musb_readw(regs, MUSB_CSR0); | ||
619 | len = musb_readb(regs, MUSB_COUNT0); | ||
620 | |||
621 | DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n", | ||
622 | csr, len, | ||
623 | musb_readb(mbase, MUSB_FADDR), | ||
624 | decode_ep0stage(musb->ep0_state)); | ||
625 | |||
626 | /* I sent a stall.. need to acknowledge it now.. */ | ||
627 | if (csr & MUSB_CSR0_P_SENTSTALL) { | ||
628 | musb_writew(regs, MUSB_CSR0, | ||
629 | csr & ~MUSB_CSR0_P_SENTSTALL); | ||
630 | retval = IRQ_HANDLED; | ||
631 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
632 | csr = musb_readw(regs, MUSB_CSR0); | ||
633 | } | ||
634 | |||
635 | /* request ended "early" */ | ||
636 | if (csr & MUSB_CSR0_P_SETUPEND) { | ||
637 | musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND); | ||
638 | retval = IRQ_HANDLED; | ||
639 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
640 | csr = musb_readw(regs, MUSB_CSR0); | ||
641 | /* NOTE: request may need completion */ | ||
642 | } | ||
643 | |||
644 | /* docs from Mentor only describe tx, rx, and idle/setup states. | ||
645 | * we need to handle nuances around status stages, and also the | ||
646 | * case where status and setup stages come back-to-back ... | ||
647 | */ | ||
648 | switch (musb->ep0_state) { | ||
649 | |||
650 | case MUSB_EP0_STAGE_TX: | ||
651 | /* irq on clearing txpktrdy */ | ||
652 | if ((csr & MUSB_CSR0_TXPKTRDY) == 0) { | ||
653 | ep0_txstate(musb); | ||
654 | retval = IRQ_HANDLED; | ||
655 | } | ||
656 | break; | ||
657 | |||
658 | case MUSB_EP0_STAGE_RX: | ||
659 | /* irq on set rxpktrdy */ | ||
660 | if (csr & MUSB_CSR0_RXPKTRDY) { | ||
661 | ep0_rxstate(musb); | ||
662 | retval = IRQ_HANDLED; | ||
663 | } | ||
664 | break; | ||
665 | |||
666 | case MUSB_EP0_STAGE_STATUSIN: | ||
667 | /* end of sequence #2 (OUT/RX state) or #3 (no data) */ | ||
668 | |||
669 | /* update address (if needed) only @ the end of the | ||
670 | * status phase per usb spec, which also guarantees | ||
671 | * we get 10 msec to receive this irq... until this | ||
672 | * is done we won't see the next packet. | ||
673 | */ | ||
674 | if (musb->set_address) { | ||
675 | musb->set_address = false; | ||
676 | musb_writeb(mbase, MUSB_FADDR, musb->address); | ||
677 | } | ||
678 | |||
679 | /* enter test mode if needed (exit by reset) */ | ||
680 | else if (musb->test_mode) { | ||
681 | DBG(1, "entering TESTMODE\n"); | ||
682 | |||
683 | if (MUSB_TEST_PACKET == musb->test_mode_nr) | ||
684 | musb_load_testpacket(musb); | ||
685 | |||
686 | musb_writeb(mbase, MUSB_TESTMODE, | ||
687 | musb->test_mode_nr); | ||
688 | } | ||
689 | /* FALLTHROUGH */ | ||
690 | |||
691 | case MUSB_EP0_STAGE_STATUSOUT: | ||
692 | /* end of sequence #1: write to host (TX state) */ | ||
693 | { | ||
694 | struct usb_request *req; | ||
695 | |||
696 | req = next_ep0_request(musb); | ||
697 | if (req) | ||
698 | musb_g_ep0_giveback(musb, req); | ||
699 | } | ||
700 | retval = IRQ_HANDLED; | ||
701 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
702 | /* FALLTHROUGH */ | ||
703 | |||
704 | case MUSB_EP0_STAGE_SETUP: | ||
705 | if (csr & MUSB_CSR0_RXPKTRDY) { | ||
706 | struct usb_ctrlrequest setup; | ||
707 | int handled = 0; | ||
708 | |||
709 | if (len != 8) { | ||
710 | ERR("SETUP packet len %d != 8 ?\n", len); | ||
711 | break; | ||
712 | } | ||
713 | musb_read_setup(musb, &setup); | ||
714 | retval = IRQ_HANDLED; | ||
715 | |||
716 | /* sometimes the RESET won't be reported */ | ||
717 | if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) { | ||
718 | u8 power; | ||
719 | |||
720 | printk(KERN_NOTICE "%s: peripheral reset " | ||
721 | "irq lost!\n", | ||
722 | musb_driver_name); | ||
723 | power = musb_readb(mbase, MUSB_POWER); | ||
724 | musb->g.speed = (power & MUSB_POWER_HSMODE) | ||
725 | ? USB_SPEED_HIGH : USB_SPEED_FULL; | ||
726 | |||
727 | } | ||
728 | |||
729 | switch (musb->ep0_state) { | ||
730 | |||
731 | /* sequence #3 (no data stage), includes requests | ||
732 | * we can't forward (notably SET_ADDRESS and the | ||
733 | * device/endpoint feature set/clear operations) | ||
734 | * plus SET_CONFIGURATION and others we must | ||
735 | */ | ||
736 | case MUSB_EP0_STAGE_ACKWAIT: | ||
737 | handled = service_zero_data_request( | ||
738 | musb, &setup); | ||
739 | |||
740 | /* status stage might be immediate */ | ||
741 | if (handled > 0) { | ||
742 | musb->ackpend |= MUSB_CSR0_P_DATAEND; | ||
743 | musb->ep0_state = | ||
744 | MUSB_EP0_STAGE_STATUSIN; | ||
745 | } | ||
746 | break; | ||
747 | |||
748 | /* sequence #1 (IN to host), includes GET_STATUS | ||
749 | * requests that we can't forward, GET_DESCRIPTOR | ||
750 | * and others that we must | ||
751 | */ | ||
752 | case MUSB_EP0_STAGE_TX: | ||
753 | handled = service_in_request(musb, &setup); | ||
754 | if (handled > 0) { | ||
755 | musb->ackpend = MUSB_CSR0_TXPKTRDY | ||
756 | | MUSB_CSR0_P_DATAEND; | ||
757 | musb->ep0_state = | ||
758 | MUSB_EP0_STAGE_STATUSOUT; | ||
759 | } | ||
760 | break; | ||
761 | |||
762 | /* sequence #2 (OUT from host), always forward */ | ||
763 | default: /* MUSB_EP0_STAGE_RX */ | ||
764 | break; | ||
765 | } | ||
766 | |||
767 | DBG(3, "handled %d, csr %04x, ep0stage %s\n", | ||
768 | handled, csr, | ||
769 | decode_ep0stage(musb->ep0_state)); | ||
770 | |||
771 | /* unless we need to delegate this to the gadget | ||
772 | * driver, we know how to wrap this up: csr0 has | ||
773 | * not yet been written. | ||
774 | */ | ||
775 | if (handled < 0) | ||
776 | goto stall; | ||
777 | else if (handled > 0) | ||
778 | goto finish; | ||
779 | |||
780 | handled = forward_to_driver(musb, &setup); | ||
781 | if (handled < 0) { | ||
782 | musb_ep_select(mbase, 0); | ||
783 | stall: | ||
784 | DBG(3, "stall (%d)\n", handled); | ||
785 | musb->ackpend |= MUSB_CSR0_P_SENDSTALL; | ||
786 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
787 | finish: | ||
788 | musb_writew(regs, MUSB_CSR0, | ||
789 | musb->ackpend); | ||
790 | musb->ackpend = 0; | ||
791 | } | ||
792 | } | ||
793 | break; | ||
794 | |||
795 | case MUSB_EP0_STAGE_ACKWAIT: | ||
796 | /* This should not happen. But happens with tusb6010 with | ||
797 | * g_file_storage and high speed. Do nothing. | ||
798 | */ | ||
799 | retval = IRQ_HANDLED; | ||
800 | break; | ||
801 | |||
802 | default: | ||
803 | /* "can't happen" */ | ||
804 | WARN_ON(1); | ||
805 | musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL); | ||
806 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
807 | break; | ||
808 | } | ||
809 | |||
810 | return retval; | ||
811 | } | ||
812 | |||
813 | |||
814 | static int | ||
815 | musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) | ||
816 | { | ||
817 | /* always enabled */ | ||
818 | return -EINVAL; | ||
819 | } | ||
820 | |||
821 | static int musb_g_ep0_disable(struct usb_ep *e) | ||
822 | { | ||
823 | /* always enabled */ | ||
824 | return -EINVAL; | ||
825 | } | ||
826 | |||
827 | static int | ||
828 | musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags) | ||
829 | { | ||
830 | struct musb_ep *ep; | ||
831 | struct musb_request *req; | ||
832 | struct musb *musb; | ||
833 | int status; | ||
834 | unsigned long lockflags; | ||
835 | void __iomem *regs; | ||
836 | |||
837 | if (!e || !r) | ||
838 | return -EINVAL; | ||
839 | |||
840 | ep = to_musb_ep(e); | ||
841 | musb = ep->musb; | ||
842 | regs = musb->control_ep->regs; | ||
843 | |||
844 | req = to_musb_request(r); | ||
845 | req->musb = musb; | ||
846 | req->request.actual = 0; | ||
847 | req->request.status = -EINPROGRESS; | ||
848 | req->tx = ep->is_in; | ||
849 | |||
850 | spin_lock_irqsave(&musb->lock, lockflags); | ||
851 | |||
852 | if (!list_empty(&ep->req_list)) { | ||
853 | status = -EBUSY; | ||
854 | goto cleanup; | ||
855 | } | ||
856 | |||
857 | switch (musb->ep0_state) { | ||
858 | case MUSB_EP0_STAGE_RX: /* control-OUT data */ | ||
859 | case MUSB_EP0_STAGE_TX: /* control-IN data */ | ||
860 | case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */ | ||
861 | status = 0; | ||
862 | break; | ||
863 | default: | ||
864 | DBG(1, "ep0 request queued in state %d\n", | ||
865 | musb->ep0_state); | ||
866 | status = -EINVAL; | ||
867 | goto cleanup; | ||
868 | } | ||
869 | |||
870 | /* add request to the list */ | ||
871 | list_add_tail(&(req->request.list), &(ep->req_list)); | ||
872 | |||
873 | DBG(3, "queue to %s (%s), length=%d\n", | ||
874 | ep->name, ep->is_in ? "IN/TX" : "OUT/RX", | ||
875 | req->request.length); | ||
876 | |||
877 | musb_ep_select(musb->mregs, 0); | ||
878 | |||
879 | /* sequence #1, IN ... start writing the data */ | ||
880 | if (musb->ep0_state == MUSB_EP0_STAGE_TX) | ||
881 | ep0_txstate(musb); | ||
882 | |||
883 | /* sequence #3, no-data ... issue IN status */ | ||
884 | else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) { | ||
885 | if (req->request.length) | ||
886 | status = -EINVAL; | ||
887 | else { | ||
888 | musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; | ||
889 | musb_writew(regs, MUSB_CSR0, | ||
890 | musb->ackpend | MUSB_CSR0_P_DATAEND); | ||
891 | musb->ackpend = 0; | ||
892 | musb_g_ep0_giveback(ep->musb, r); | ||
893 | } | ||
894 | |||
895 | /* else for sequence #2 (OUT), caller provides a buffer | ||
896 | * before the next packet arrives. deferred responses | ||
897 | * (after SETUP is acked) are racey. | ||
898 | */ | ||
899 | } else if (musb->ackpend) { | ||
900 | musb_writew(regs, MUSB_CSR0, musb->ackpend); | ||
901 | musb->ackpend = 0; | ||
902 | } | ||
903 | |||
904 | cleanup: | ||
905 | spin_unlock_irqrestore(&musb->lock, lockflags); | ||
906 | return status; | ||
907 | } | ||
908 | |||
909 | static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req) | ||
910 | { | ||
911 | /* we just won't support this */ | ||
912 | return -EINVAL; | ||
913 | } | ||
914 | |||
915 | static int musb_g_ep0_halt(struct usb_ep *e, int value) | ||
916 | { | ||
917 | struct musb_ep *ep; | ||
918 | struct musb *musb; | ||
919 | void __iomem *base, *regs; | ||
920 | unsigned long flags; | ||
921 | int status; | ||
922 | u16 csr; | ||
923 | |||
924 | if (!e || !value) | ||
925 | return -EINVAL; | ||
926 | |||
927 | ep = to_musb_ep(e); | ||
928 | musb = ep->musb; | ||
929 | base = musb->mregs; | ||
930 | regs = musb->control_ep->regs; | ||
931 | status = 0; | ||
932 | |||
933 | spin_lock_irqsave(&musb->lock, flags); | ||
934 | |||
935 | if (!list_empty(&ep->req_list)) { | ||
936 | status = -EBUSY; | ||
937 | goto cleanup; | ||
938 | } | ||
939 | |||
940 | musb_ep_select(base, 0); | ||
941 | csr = musb->ackpend; | ||
942 | |||
943 | switch (musb->ep0_state) { | ||
944 | |||
945 | /* Stalls are usually issued after parsing SETUP packet, either | ||
946 | * directly in irq context from setup() or else later. | ||
947 | */ | ||
948 | case MUSB_EP0_STAGE_TX: /* control-IN data */ | ||
949 | case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */ | ||
950 | case MUSB_EP0_STAGE_RX: /* control-OUT data */ | ||
951 | csr = musb_readw(regs, MUSB_CSR0); | ||
952 | /* FALLTHROUGH */ | ||
953 | |||
954 | /* It's also OK to issue stalls during callbacks when a non-empty | ||
955 | * DATA stage buffer has been read (or even written). | ||
956 | */ | ||
957 | case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */ | ||
958 | case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */ | ||
959 | |||
960 | csr |= MUSB_CSR0_P_SENDSTALL; | ||
961 | musb_writew(regs, MUSB_CSR0, csr); | ||
962 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
963 | musb->ackpend = 0; | ||
964 | break; | ||
965 | default: | ||
966 | DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state); | ||
967 | status = -EINVAL; | ||
968 | } | ||
969 | |||
970 | cleanup: | ||
971 | spin_unlock_irqrestore(&musb->lock, flags); | ||
972 | return status; | ||
973 | } | ||
974 | |||
975 | const struct usb_ep_ops musb_g_ep0_ops = { | ||
976 | .enable = musb_g_ep0_enable, | ||
977 | .disable = musb_g_ep0_disable, | ||
978 | .alloc_request = musb_alloc_request, | ||
979 | .free_request = musb_free_request, | ||
980 | .queue = musb_g_ep0_queue, | ||
981 | .dequeue = musb_g_ep0_dequeue, | ||
982 | .set_halt = musb_g_ep0_halt, | ||
983 | }; | ||
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c new file mode 100644 index 000000000000..8b4be012669a --- /dev/null +++ b/drivers/usb/musb/musb_host.c | |||
@@ -0,0 +1,2170 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver host support | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #include <linux/module.h> | ||
36 | #include <linux/kernel.h> | ||
37 | #include <linux/delay.h> | ||
38 | #include <linux/sched.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/errno.h> | ||
41 | #include <linux/init.h> | ||
42 | #include <linux/list.h> | ||
43 | |||
44 | #include "musb_core.h" | ||
45 | #include "musb_host.h" | ||
46 | |||
47 | |||
48 | /* MUSB HOST status 22-mar-2006 | ||
49 | * | ||
50 | * - There's still lots of partial code duplication for fault paths, so | ||
51 | * they aren't handled as consistently as they need to be. | ||
52 | * | ||
53 | * - PIO mostly behaved when last tested. | ||
54 | * + including ep0, with all usbtest cases 9, 10 | ||
55 | * + usbtest 14 (ep0out) doesn't seem to run at all | ||
56 | * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest | ||
57 | * configurations, but otherwise double buffering passes basic tests. | ||
58 | * + for 2.6.N, for N > ~10, needs API changes for hcd framework. | ||
59 | * | ||
60 | * - DMA (CPPI) ... partially behaves, not currently recommended | ||
61 | * + about 1/15 the speed of typical EHCI implementations (PCI) | ||
62 | * + RX, all too often reqpkt seems to misbehave after tx | ||
63 | * + TX, no known issues (other than evident silicon issue) | ||
64 | * | ||
65 | * - DMA (Mentor/OMAP) ...has at least toggle update problems | ||
66 | * | ||
67 | * - Still no traffic scheduling code to make NAKing for bulk or control | ||
68 | * transfers unable to starve other requests; or to make efficient use | ||
69 | * of hardware with periodic transfers. (Note that network drivers | ||
70 | * commonly post bulk reads that stay pending for a long time; these | ||
71 | * would make very visible trouble.) | ||
72 | * | ||
73 | * - Not tested with HNP, but some SRP paths seem to behave. | ||
74 | * | ||
75 | * NOTE 24-August-2006: | ||
76 | * | ||
77 | * - Bulk traffic finally uses both sides of hardware ep1, freeing up an | ||
78 | * extra endpoint for periodic use enabling hub + keybd + mouse. That | ||
79 | * mostly works, except that with "usbnet" it's easy to trigger cases | ||
80 | * with "ping" where RX loses. (a) ping to davinci, even "ping -f", | ||
81 | * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses | ||
82 | * although ARP RX wins. (That test was done with a full speed link.) | ||
83 | */ | ||
84 | |||
85 | |||
86 | /* | ||
87 | * NOTE on endpoint usage: | ||
88 | * | ||
89 | * CONTROL transfers all go through ep0. BULK ones go through dedicated IN | ||
90 | * and OUT endpoints ... hardware is dedicated for those "async" queue(s). | ||
91 | * | ||
92 | * (Yes, bulk _could_ use more of the endpoints than that, and would even | ||
93 | * benefit from it ... one remote device may easily be NAKing while others | ||
94 | * need to perform transfers in that same direction. The same thing could | ||
95 | * be done in software though, assuming dma cooperates.) | ||
96 | * | ||
97 | * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. | ||
98 | * So far that scheduling is both dumb and optimistic: the endpoint will be | ||
99 | * "claimed" until its software queue is no longer refilled. No multiplexing | ||
100 | * of transfers between endpoints, or anything clever. | ||
101 | */ | ||
102 | |||
103 | |||
104 | static void musb_ep_program(struct musb *musb, u8 epnum, | ||
105 | struct urb *urb, unsigned int nOut, | ||
106 | u8 *buf, u32 len); | ||
107 | |||
108 | /* | ||
109 | * Clear TX fifo. Needed to avoid BABBLE errors. | ||
110 | */ | ||
111 | static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | ||
112 | { | ||
113 | void __iomem *epio = ep->regs; | ||
114 | u16 csr; | ||
115 | int retries = 1000; | ||
116 | |||
117 | csr = musb_readw(epio, MUSB_TXCSR); | ||
118 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { | ||
119 | DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr); | ||
120 | csr |= MUSB_TXCSR_FLUSHFIFO; | ||
121 | musb_writew(epio, MUSB_TXCSR, csr); | ||
122 | csr = musb_readw(epio, MUSB_TXCSR); | ||
123 | if (retries-- < 1) { | ||
124 | ERR("Could not flush host TX fifo: csr: %04x\n", csr); | ||
125 | return; | ||
126 | } | ||
127 | mdelay(1); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Start transmit. Caller is responsible for locking shared resources. | ||
133 | * musb must be locked. | ||
134 | */ | ||
135 | static inline void musb_h_tx_start(struct musb_hw_ep *ep) | ||
136 | { | ||
137 | u16 txcsr; | ||
138 | |||
139 | /* NOTE: no locks here; caller should lock and select EP */ | ||
140 | if (ep->epnum) { | ||
141 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); | ||
142 | txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; | ||
143 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); | ||
144 | } else { | ||
145 | txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; | ||
146 | musb_writew(ep->regs, MUSB_CSR0, txcsr); | ||
147 | } | ||
148 | |||
149 | } | ||
150 | |||
151 | static inline void cppi_host_txdma_start(struct musb_hw_ep *ep) | ||
152 | { | ||
153 | u16 txcsr; | ||
154 | |||
155 | /* NOTE: no locks here; caller should lock and select EP */ | ||
156 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); | ||
157 | txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; | ||
158 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Start the URB at the front of an endpoint's queue | ||
163 | * end must be claimed from the caller. | ||
164 | * | ||
165 | * Context: controller locked, irqs blocked | ||
166 | */ | ||
167 | static void | ||
168 | musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | ||
169 | { | ||
170 | u16 frame; | ||
171 | u32 len; | ||
172 | void *buf; | ||
173 | void __iomem *mbase = musb->mregs; | ||
174 | struct urb *urb = next_urb(qh); | ||
175 | struct musb_hw_ep *hw_ep = qh->hw_ep; | ||
176 | unsigned pipe = urb->pipe; | ||
177 | u8 address = usb_pipedevice(pipe); | ||
178 | int epnum = hw_ep->epnum; | ||
179 | |||
180 | /* initialize software qh state */ | ||
181 | qh->offset = 0; | ||
182 | qh->segsize = 0; | ||
183 | |||
184 | /* gather right source of data */ | ||
185 | switch (qh->type) { | ||
186 | case USB_ENDPOINT_XFER_CONTROL: | ||
187 | /* control transfers always start with SETUP */ | ||
188 | is_in = 0; | ||
189 | hw_ep->out_qh = qh; | ||
190 | musb->ep0_stage = MUSB_EP0_START; | ||
191 | buf = urb->setup_packet; | ||
192 | len = 8; | ||
193 | break; | ||
194 | case USB_ENDPOINT_XFER_ISOC: | ||
195 | qh->iso_idx = 0; | ||
196 | qh->frame = 0; | ||
197 | buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset; | ||
198 | len = urb->iso_frame_desc[0].length; | ||
199 | break; | ||
200 | default: /* bulk, interrupt */ | ||
201 | buf = urb->transfer_buffer; | ||
202 | len = urb->transfer_buffer_length; | ||
203 | } | ||
204 | |||
205 | DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", | ||
206 | qh, urb, address, qh->epnum, | ||
207 | is_in ? "in" : "out", | ||
208 | ({char *s; switch (qh->type) { | ||
209 | case USB_ENDPOINT_XFER_CONTROL: s = ""; break; | ||
210 | case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; | ||
211 | case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; | ||
212 | default: s = "-intr"; break; | ||
213 | }; s; }), | ||
214 | epnum, buf, len); | ||
215 | |||
216 | /* Configure endpoint */ | ||
217 | if (is_in || hw_ep->is_shared_fifo) | ||
218 | hw_ep->in_qh = qh; | ||
219 | else | ||
220 | hw_ep->out_qh = qh; | ||
221 | musb_ep_program(musb, epnum, urb, !is_in, buf, len); | ||
222 | |||
223 | /* transmit may have more work: start it when it is time */ | ||
224 | if (is_in) | ||
225 | return; | ||
226 | |||
227 | /* determine if the time is right for a periodic transfer */ | ||
228 | switch (qh->type) { | ||
229 | case USB_ENDPOINT_XFER_ISOC: | ||
230 | case USB_ENDPOINT_XFER_INT: | ||
231 | DBG(3, "check whether there's still time for periodic Tx\n"); | ||
232 | qh->iso_idx = 0; | ||
233 | frame = musb_readw(mbase, MUSB_FRAME); | ||
234 | /* FIXME this doesn't implement that scheduling policy ... | ||
235 | * or handle framecounter wrapping | ||
236 | */ | ||
237 | if ((urb->transfer_flags & URB_ISO_ASAP) | ||
238 | || (frame >= urb->start_frame)) { | ||
239 | /* REVISIT the SOF irq handler shouldn't duplicate | ||
240 | * this code; and we don't init urb->start_frame... | ||
241 | */ | ||
242 | qh->frame = 0; | ||
243 | goto start; | ||
244 | } else { | ||
245 | qh->frame = urb->start_frame; | ||
246 | /* enable SOF interrupt so we can count down */ | ||
247 | DBG(1, "SOF for %d\n", epnum); | ||
248 | #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ | ||
249 | musb_writeb(mbase, MUSB_INTRUSBE, 0xff); | ||
250 | #endif | ||
251 | } | ||
252 | break; | ||
253 | default: | ||
254 | start: | ||
255 | DBG(4, "Start TX%d %s\n", epnum, | ||
256 | hw_ep->tx_channel ? "dma" : "pio"); | ||
257 | |||
258 | if (!hw_ep->tx_channel) | ||
259 | musb_h_tx_start(hw_ep); | ||
260 | else if (is_cppi_enabled() || tusb_dma_omap()) | ||
261 | cppi_host_txdma_start(hw_ep); | ||
262 | } | ||
263 | } | ||
264 | |||
265 | /* caller owns controller lock, irqs are blocked */ | ||
266 | static void | ||
267 | __musb_giveback(struct musb *musb, struct urb *urb, int status) | ||
268 | __releases(musb->lock) | ||
269 | __acquires(musb->lock) | ||
270 | { | ||
271 | DBG(({ int level; switch (urb->status) { | ||
272 | case 0: | ||
273 | level = 4; | ||
274 | break; | ||
275 | /* common/boring faults */ | ||
276 | case -EREMOTEIO: | ||
277 | case -ESHUTDOWN: | ||
278 | case -ECONNRESET: | ||
279 | case -EPIPE: | ||
280 | level = 3; | ||
281 | break; | ||
282 | default: | ||
283 | level = 2; | ||
284 | break; | ||
285 | }; level; }), | ||
286 | "complete %p (%d), dev%d ep%d%s, %d/%d\n", | ||
287 | urb, urb->status, | ||
288 | usb_pipedevice(urb->pipe), | ||
289 | usb_pipeendpoint(urb->pipe), | ||
290 | usb_pipein(urb->pipe) ? "in" : "out", | ||
291 | urb->actual_length, urb->transfer_buffer_length | ||
292 | ); | ||
293 | |||
294 | spin_unlock(&musb->lock); | ||
295 | usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); | ||
296 | spin_lock(&musb->lock); | ||
297 | } | ||
298 | |||
299 | /* for bulk/interrupt endpoints only */ | ||
300 | static inline void | ||
301 | musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb) | ||
302 | { | ||
303 | struct usb_device *udev = urb->dev; | ||
304 | u16 csr; | ||
305 | void __iomem *epio = ep->regs; | ||
306 | struct musb_qh *qh; | ||
307 | |||
308 | /* FIXME: the current Mentor DMA code seems to have | ||
309 | * problems getting toggle correct. | ||
310 | */ | ||
311 | |||
312 | if (is_in || ep->is_shared_fifo) | ||
313 | qh = ep->in_qh; | ||
314 | else | ||
315 | qh = ep->out_qh; | ||
316 | |||
317 | if (!is_in) { | ||
318 | csr = musb_readw(epio, MUSB_TXCSR); | ||
319 | usb_settoggle(udev, qh->epnum, 1, | ||
320 | (csr & MUSB_TXCSR_H_DATATOGGLE) | ||
321 | ? 1 : 0); | ||
322 | } else { | ||
323 | csr = musb_readw(epio, MUSB_RXCSR); | ||
324 | usb_settoggle(udev, qh->epnum, 0, | ||
325 | (csr & MUSB_RXCSR_H_DATATOGGLE) | ||
326 | ? 1 : 0); | ||
327 | } | ||
328 | } | ||
329 | |||
330 | /* caller owns controller lock, irqs are blocked */ | ||
331 | static struct musb_qh * | ||
332 | musb_giveback(struct musb_qh *qh, struct urb *urb, int status) | ||
333 | { | ||
334 | int is_in; | ||
335 | struct musb_hw_ep *ep = qh->hw_ep; | ||
336 | struct musb *musb = ep->musb; | ||
337 | int ready = qh->is_ready; | ||
338 | |||
339 | if (ep->is_shared_fifo) | ||
340 | is_in = 1; | ||
341 | else | ||
342 | is_in = usb_pipein(urb->pipe); | ||
343 | |||
344 | /* save toggle eagerly, for paranoia */ | ||
345 | switch (qh->type) { | ||
346 | case USB_ENDPOINT_XFER_BULK: | ||
347 | case USB_ENDPOINT_XFER_INT: | ||
348 | musb_save_toggle(ep, is_in, urb); | ||
349 | break; | ||
350 | case USB_ENDPOINT_XFER_ISOC: | ||
351 | if (status == 0 && urb->error_count) | ||
352 | status = -EXDEV; | ||
353 | break; | ||
354 | } | ||
355 | |||
356 | usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); | ||
357 | |||
358 | qh->is_ready = 0; | ||
359 | __musb_giveback(musb, urb, status); | ||
360 | qh->is_ready = ready; | ||
361 | |||
362 | /* reclaim resources (and bandwidth) ASAP; deschedule it, and | ||
363 | * invalidate qh as soon as list_empty(&hep->urb_list) | ||
364 | */ | ||
365 | if (list_empty(&qh->hep->urb_list)) { | ||
366 | struct list_head *head; | ||
367 | |||
368 | if (is_in) | ||
369 | ep->rx_reinit = 1; | ||
370 | else | ||
371 | ep->tx_reinit = 1; | ||
372 | |||
373 | /* clobber old pointers to this qh */ | ||
374 | if (is_in || ep->is_shared_fifo) | ||
375 | ep->in_qh = NULL; | ||
376 | else | ||
377 | ep->out_qh = NULL; | ||
378 | qh->hep->hcpriv = NULL; | ||
379 | |||
380 | switch (qh->type) { | ||
381 | |||
382 | case USB_ENDPOINT_XFER_ISOC: | ||
383 | case USB_ENDPOINT_XFER_INT: | ||
384 | /* this is where periodic bandwidth should be | ||
385 | * de-allocated if it's tracked and allocated; | ||
386 | * and where we'd update the schedule tree... | ||
387 | */ | ||
388 | musb->periodic[ep->epnum] = NULL; | ||
389 | kfree(qh); | ||
390 | qh = NULL; | ||
391 | break; | ||
392 | |||
393 | case USB_ENDPOINT_XFER_CONTROL: | ||
394 | case USB_ENDPOINT_XFER_BULK: | ||
395 | /* fifo policy for these lists, except that NAKing | ||
396 | * should rotate a qh to the end (for fairness). | ||
397 | */ | ||
398 | head = qh->ring.prev; | ||
399 | list_del(&qh->ring); | ||
400 | kfree(qh); | ||
401 | qh = first_qh(head); | ||
402 | break; | ||
403 | } | ||
404 | } | ||
405 | return qh; | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * Advance this hardware endpoint's queue, completing the specified urb and | ||
410 | * advancing to either the next urb queued to that qh, or else invalidating | ||
411 | * that qh and advancing to the next qh scheduled after the current one. | ||
412 | * | ||
413 | * Context: caller owns controller lock, irqs are blocked | ||
414 | */ | ||
415 | static void | ||
416 | musb_advance_schedule(struct musb *musb, struct urb *urb, | ||
417 | struct musb_hw_ep *hw_ep, int is_in) | ||
418 | { | ||
419 | struct musb_qh *qh; | ||
420 | |||
421 | if (is_in || hw_ep->is_shared_fifo) | ||
422 | qh = hw_ep->in_qh; | ||
423 | else | ||
424 | qh = hw_ep->out_qh; | ||
425 | |||
426 | if (urb->status == -EINPROGRESS) | ||
427 | qh = musb_giveback(qh, urb, 0); | ||
428 | else | ||
429 | qh = musb_giveback(qh, urb, urb->status); | ||
430 | |||
431 | if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) { | ||
432 | DBG(4, "... next ep%d %cX urb %p\n", | ||
433 | hw_ep->epnum, is_in ? 'R' : 'T', | ||
434 | next_urb(qh)); | ||
435 | musb_start_urb(musb, is_in, qh); | ||
436 | } | ||
437 | } | ||
438 | |||
439 | static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) | ||
440 | { | ||
441 | /* we don't want fifo to fill itself again; | ||
442 | * ignore dma (various models), | ||
443 | * leave toggle alone (may not have been saved yet) | ||
444 | */ | ||
445 | csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; | ||
446 | csr &= ~(MUSB_RXCSR_H_REQPKT | ||
447 | | MUSB_RXCSR_H_AUTOREQ | ||
448 | | MUSB_RXCSR_AUTOCLEAR); | ||
449 | |||
450 | /* write 2x to allow double buffering */ | ||
451 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | ||
452 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | ||
453 | |||
454 | /* flush writebuffer */ | ||
455 | return musb_readw(hw_ep->regs, MUSB_RXCSR); | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * PIO RX for a packet (or part of it). | ||
460 | */ | ||
461 | static bool | ||
462 | musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) | ||
463 | { | ||
464 | u16 rx_count; | ||
465 | u8 *buf; | ||
466 | u16 csr; | ||
467 | bool done = false; | ||
468 | u32 length; | ||
469 | int do_flush = 0; | ||
470 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
471 | void __iomem *epio = hw_ep->regs; | ||
472 | struct musb_qh *qh = hw_ep->in_qh; | ||
473 | int pipe = urb->pipe; | ||
474 | void *buffer = urb->transfer_buffer; | ||
475 | |||
476 | /* musb_ep_select(mbase, epnum); */ | ||
477 | rx_count = musb_readw(epio, MUSB_RXCOUNT); | ||
478 | DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, | ||
479 | urb->transfer_buffer, qh->offset, | ||
480 | urb->transfer_buffer_length); | ||
481 | |||
482 | /* unload FIFO */ | ||
483 | if (usb_pipeisoc(pipe)) { | ||
484 | int status = 0; | ||
485 | struct usb_iso_packet_descriptor *d; | ||
486 | |||
487 | if (iso_err) { | ||
488 | status = -EILSEQ; | ||
489 | urb->error_count++; | ||
490 | } | ||
491 | |||
492 | d = urb->iso_frame_desc + qh->iso_idx; | ||
493 | buf = buffer + d->offset; | ||
494 | length = d->length; | ||
495 | if (rx_count > length) { | ||
496 | if (status == 0) { | ||
497 | status = -EOVERFLOW; | ||
498 | urb->error_count++; | ||
499 | } | ||
500 | DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); | ||
501 | do_flush = 1; | ||
502 | } else | ||
503 | length = rx_count; | ||
504 | urb->actual_length += length; | ||
505 | d->actual_length = length; | ||
506 | |||
507 | d->status = status; | ||
508 | |||
509 | /* see if we are done */ | ||
510 | done = (++qh->iso_idx >= urb->number_of_packets); | ||
511 | } else { | ||
512 | /* non-isoch */ | ||
513 | buf = buffer + qh->offset; | ||
514 | length = urb->transfer_buffer_length - qh->offset; | ||
515 | if (rx_count > length) { | ||
516 | if (urb->status == -EINPROGRESS) | ||
517 | urb->status = -EOVERFLOW; | ||
518 | DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); | ||
519 | do_flush = 1; | ||
520 | } else | ||
521 | length = rx_count; | ||
522 | urb->actual_length += length; | ||
523 | qh->offset += length; | ||
524 | |||
525 | /* see if we are done */ | ||
526 | done = (urb->actual_length == urb->transfer_buffer_length) | ||
527 | || (rx_count < qh->maxpacket) | ||
528 | || (urb->status != -EINPROGRESS); | ||
529 | if (done | ||
530 | && (urb->status == -EINPROGRESS) | ||
531 | && (urb->transfer_flags & URB_SHORT_NOT_OK) | ||
532 | && (urb->actual_length | ||
533 | < urb->transfer_buffer_length)) | ||
534 | urb->status = -EREMOTEIO; | ||
535 | } | ||
536 | |||
537 | musb_read_fifo(hw_ep, length, buf); | ||
538 | |||
539 | csr = musb_readw(epio, MUSB_RXCSR); | ||
540 | csr |= MUSB_RXCSR_H_WZC_BITS; | ||
541 | if (unlikely(do_flush)) | ||
542 | musb_h_flush_rxfifo(hw_ep, csr); | ||
543 | else { | ||
544 | /* REVISIT this assumes AUTOCLEAR is never set */ | ||
545 | csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); | ||
546 | if (!done) | ||
547 | csr |= MUSB_RXCSR_H_REQPKT; | ||
548 | musb_writew(epio, MUSB_RXCSR, csr); | ||
549 | } | ||
550 | |||
551 | return done; | ||
552 | } | ||
553 | |||
554 | /* we don't always need to reinit a given side of an endpoint... | ||
555 | * when we do, use tx/rx reinit routine and then construct a new CSR | ||
556 | * to address data toggle, NYET, and DMA or PIO. | ||
557 | * | ||
558 | * it's possible that driver bugs (especially for DMA) or aborting a | ||
559 | * transfer might have left the endpoint busier than it should be. | ||
560 | * the busy/not-empty tests are basically paranoia. | ||
561 | */ | ||
562 | static void | ||
563 | musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | ||
564 | { | ||
565 | u16 csr; | ||
566 | |||
567 | /* NOTE: we know the "rx" fifo reinit never triggers for ep0. | ||
568 | * That always uses tx_reinit since ep0 repurposes TX register | ||
569 | * offsets; the initial SETUP packet is also a kind of OUT. | ||
570 | */ | ||
571 | |||
572 | /* if programmed for Tx, put it in RX mode */ | ||
573 | if (ep->is_shared_fifo) { | ||
574 | csr = musb_readw(ep->regs, MUSB_TXCSR); | ||
575 | if (csr & MUSB_TXCSR_MODE) { | ||
576 | musb_h_tx_flush_fifo(ep); | ||
577 | musb_writew(ep->regs, MUSB_TXCSR, | ||
578 | MUSB_TXCSR_FRCDATATOG); | ||
579 | } | ||
580 | /* clear mode (and everything else) to enable Rx */ | ||
581 | musb_writew(ep->regs, MUSB_TXCSR, 0); | ||
582 | |||
583 | /* scrub all previous state, clearing toggle */ | ||
584 | } else { | ||
585 | csr = musb_readw(ep->regs, MUSB_RXCSR); | ||
586 | if (csr & MUSB_RXCSR_RXPKTRDY) | ||
587 | WARNING("rx%d, packet/%d ready?\n", ep->epnum, | ||
588 | musb_readw(ep->regs, MUSB_RXCOUNT)); | ||
589 | |||
590 | musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); | ||
591 | } | ||
592 | |||
593 | /* target addr and (for multipoint) hub addr/port */ | ||
594 | if (musb->is_multipoint) { | ||
595 | musb_writeb(ep->target_regs, MUSB_RXFUNCADDR, | ||
596 | qh->addr_reg); | ||
597 | musb_writeb(ep->target_regs, MUSB_RXHUBADDR, | ||
598 | qh->h_addr_reg); | ||
599 | musb_writeb(ep->target_regs, MUSB_RXHUBPORT, | ||
600 | qh->h_port_reg); | ||
601 | } else | ||
602 | musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); | ||
603 | |||
604 | /* protocol/endpoint, interval/NAKlimit, i/o size */ | ||
605 | musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); | ||
606 | musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); | ||
607 | /* NOTE: bulk combining rewrites high bits of maxpacket */ | ||
608 | musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket); | ||
609 | |||
610 | ep->rx_reinit = 0; | ||
611 | } | ||
612 | |||
613 | |||
614 | /* | ||
615 | * Program an HDRC endpoint as per the given URB | ||
616 | * Context: irqs blocked, controller lock held | ||
617 | */ | ||
618 | static void musb_ep_program(struct musb *musb, u8 epnum, | ||
619 | struct urb *urb, unsigned int is_out, | ||
620 | u8 *buf, u32 len) | ||
621 | { | ||
622 | struct dma_controller *dma_controller; | ||
623 | struct dma_channel *dma_channel; | ||
624 | u8 dma_ok; | ||
625 | void __iomem *mbase = musb->mregs; | ||
626 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
627 | void __iomem *epio = hw_ep->regs; | ||
628 | struct musb_qh *qh; | ||
629 | u16 packet_sz; | ||
630 | |||
631 | if (!is_out || hw_ep->is_shared_fifo) | ||
632 | qh = hw_ep->in_qh; | ||
633 | else | ||
634 | qh = hw_ep->out_qh; | ||
635 | |||
636 | packet_sz = qh->maxpacket; | ||
637 | |||
638 | DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " | ||
639 | "h_addr%02x h_port%02x bytes %d\n", | ||
640 | is_out ? "-->" : "<--", | ||
641 | epnum, urb, urb->dev->speed, | ||
642 | qh->addr_reg, qh->epnum, is_out ? "out" : "in", | ||
643 | qh->h_addr_reg, qh->h_port_reg, | ||
644 | len); | ||
645 | |||
646 | musb_ep_select(mbase, epnum); | ||
647 | |||
648 | /* candidate for DMA? */ | ||
649 | dma_controller = musb->dma_controller; | ||
650 | if (is_dma_capable() && epnum && dma_controller) { | ||
651 | dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; | ||
652 | if (!dma_channel) { | ||
653 | dma_channel = dma_controller->channel_alloc( | ||
654 | dma_controller, hw_ep, is_out); | ||
655 | if (is_out) | ||
656 | hw_ep->tx_channel = dma_channel; | ||
657 | else | ||
658 | hw_ep->rx_channel = dma_channel; | ||
659 | } | ||
660 | } else | ||
661 | dma_channel = NULL; | ||
662 | |||
663 | /* make sure we clear DMAEnab, autoSet bits from previous run */ | ||
664 | |||
665 | /* OUT/transmit/EP0 or IN/receive? */ | ||
666 | if (is_out) { | ||
667 | u16 csr; | ||
668 | u16 int_txe; | ||
669 | u16 load_count; | ||
670 | |||
671 | csr = musb_readw(epio, MUSB_TXCSR); | ||
672 | |||
673 | /* disable interrupt in case we flush */ | ||
674 | int_txe = musb_readw(mbase, MUSB_INTRTXE); | ||
675 | musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); | ||
676 | |||
677 | /* general endpoint setup */ | ||
678 | if (epnum) { | ||
679 | /* ASSERT: TXCSR_DMAENAB was already cleared */ | ||
680 | |||
681 | /* flush all old state, set default */ | ||
682 | musb_h_tx_flush_fifo(hw_ep); | ||
683 | csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT | ||
684 | | MUSB_TXCSR_DMAMODE | ||
685 | | MUSB_TXCSR_FRCDATATOG | ||
686 | | MUSB_TXCSR_H_RXSTALL | ||
687 | | MUSB_TXCSR_H_ERROR | ||
688 | | MUSB_TXCSR_TXPKTRDY | ||
689 | ); | ||
690 | csr |= MUSB_TXCSR_MODE; | ||
691 | |||
692 | if (usb_gettoggle(urb->dev, | ||
693 | qh->epnum, 1)) | ||
694 | csr |= MUSB_TXCSR_H_WR_DATATOGGLE | ||
695 | | MUSB_TXCSR_H_DATATOGGLE; | ||
696 | else | ||
697 | csr |= MUSB_TXCSR_CLRDATATOG; | ||
698 | |||
699 | /* twice in case of double packet buffering */ | ||
700 | musb_writew(epio, MUSB_TXCSR, csr); | ||
701 | /* REVISIT may need to clear FLUSHFIFO ... */ | ||
702 | musb_writew(epio, MUSB_TXCSR, csr); | ||
703 | csr = musb_readw(epio, MUSB_TXCSR); | ||
704 | } else { | ||
705 | /* endpoint 0: just flush */ | ||
706 | musb_writew(epio, MUSB_CSR0, | ||
707 | csr | MUSB_CSR0_FLUSHFIFO); | ||
708 | musb_writew(epio, MUSB_CSR0, | ||
709 | csr | MUSB_CSR0_FLUSHFIFO); | ||
710 | } | ||
711 | |||
712 | /* target addr and (for multipoint) hub addr/port */ | ||
713 | if (musb->is_multipoint) { | ||
714 | musb_writeb(mbase, | ||
715 | MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR), | ||
716 | qh->addr_reg); | ||
717 | musb_writeb(mbase, | ||
718 | MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR), | ||
719 | qh->h_addr_reg); | ||
720 | musb_writeb(mbase, | ||
721 | MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT), | ||
722 | qh->h_port_reg); | ||
723 | /* FIXME if !epnum, do the same for RX ... */ | ||
724 | } else | ||
725 | musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); | ||
726 | |||
727 | /* protocol/endpoint/interval/NAKlimit */ | ||
728 | if (epnum) { | ||
729 | musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); | ||
730 | if (can_bulk_split(musb, qh->type)) | ||
731 | musb_writew(epio, MUSB_TXMAXP, | ||
732 | packet_sz | ||
733 | | ((hw_ep->max_packet_sz_tx / | ||
734 | packet_sz) - 1) << 11); | ||
735 | else | ||
736 | musb_writew(epio, MUSB_TXMAXP, | ||
737 | packet_sz); | ||
738 | musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); | ||
739 | } else { | ||
740 | musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); | ||
741 | if (musb->is_multipoint) | ||
742 | musb_writeb(epio, MUSB_TYPE0, | ||
743 | qh->type_reg); | ||
744 | } | ||
745 | |||
746 | if (can_bulk_split(musb, qh->type)) | ||
747 | load_count = min((u32) hw_ep->max_packet_sz_tx, | ||
748 | len); | ||
749 | else | ||
750 | load_count = min((u32) packet_sz, len); | ||
751 | |||
752 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
753 | if (dma_channel) { | ||
754 | |||
755 | /* clear previous state */ | ||
756 | csr = musb_readw(epio, MUSB_TXCSR); | ||
757 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
758 | | MUSB_TXCSR_DMAMODE | ||
759 | | MUSB_TXCSR_DMAENAB); | ||
760 | csr |= MUSB_TXCSR_MODE; | ||
761 | musb_writew(epio, MUSB_TXCSR, | ||
762 | csr | MUSB_TXCSR_MODE); | ||
763 | |||
764 | qh->segsize = min(len, dma_channel->max_len); | ||
765 | |||
766 | if (qh->segsize <= packet_sz) | ||
767 | dma_channel->desired_mode = 0; | ||
768 | else | ||
769 | dma_channel->desired_mode = 1; | ||
770 | |||
771 | |||
772 | if (dma_channel->desired_mode == 0) { | ||
773 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
774 | | MUSB_TXCSR_DMAMODE); | ||
775 | csr |= (MUSB_TXCSR_DMAENAB); | ||
776 | /* against programming guide */ | ||
777 | } else | ||
778 | csr |= (MUSB_TXCSR_AUTOSET | ||
779 | | MUSB_TXCSR_DMAENAB | ||
780 | | MUSB_TXCSR_DMAMODE); | ||
781 | |||
782 | musb_writew(epio, MUSB_TXCSR, csr); | ||
783 | |||
784 | dma_ok = dma_controller->channel_program( | ||
785 | dma_channel, packet_sz, | ||
786 | dma_channel->desired_mode, | ||
787 | urb->transfer_dma, | ||
788 | qh->segsize); | ||
789 | if (dma_ok) { | ||
790 | load_count = 0; | ||
791 | } else { | ||
792 | dma_controller->channel_release(dma_channel); | ||
793 | if (is_out) | ||
794 | hw_ep->tx_channel = NULL; | ||
795 | else | ||
796 | hw_ep->rx_channel = NULL; | ||
797 | dma_channel = NULL; | ||
798 | } | ||
799 | } | ||
800 | #endif | ||
801 | |||
802 | /* candidate for DMA */ | ||
803 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | ||
804 | |||
805 | /* program endpoint CSRs first, then setup DMA. | ||
806 | * assume CPPI setup succeeds. | ||
807 | * defer enabling dma. | ||
808 | */ | ||
809 | csr = musb_readw(epio, MUSB_TXCSR); | ||
810 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
811 | | MUSB_TXCSR_DMAMODE | ||
812 | | MUSB_TXCSR_DMAENAB); | ||
813 | csr |= MUSB_TXCSR_MODE; | ||
814 | musb_writew(epio, MUSB_TXCSR, | ||
815 | csr | MUSB_TXCSR_MODE); | ||
816 | |||
817 | dma_channel->actual_len = 0L; | ||
818 | qh->segsize = len; | ||
819 | |||
820 | /* TX uses "rndis" mode automatically, but needs help | ||
821 | * to identify the zero-length-final-packet case. | ||
822 | */ | ||
823 | dma_ok = dma_controller->channel_program( | ||
824 | dma_channel, packet_sz, | ||
825 | (urb->transfer_flags | ||
826 | & URB_ZERO_PACKET) | ||
827 | == URB_ZERO_PACKET, | ||
828 | urb->transfer_dma, | ||
829 | qh->segsize); | ||
830 | if (dma_ok) { | ||
831 | load_count = 0; | ||
832 | } else { | ||
833 | dma_controller->channel_release(dma_channel); | ||
834 | hw_ep->tx_channel = NULL; | ||
835 | dma_channel = NULL; | ||
836 | |||
837 | /* REVISIT there's an error path here that | ||
838 | * needs handling: can't do dma, but | ||
839 | * there's no pio buffer address... | ||
840 | */ | ||
841 | } | ||
842 | } | ||
843 | |||
844 | if (load_count) { | ||
845 | /* ASSERT: TXCSR_DMAENAB was already cleared */ | ||
846 | |||
847 | /* PIO to load FIFO */ | ||
848 | qh->segsize = load_count; | ||
849 | musb_write_fifo(hw_ep, load_count, buf); | ||
850 | csr = musb_readw(epio, MUSB_TXCSR); | ||
851 | csr &= ~(MUSB_TXCSR_DMAENAB | ||
852 | | MUSB_TXCSR_DMAMODE | ||
853 | | MUSB_TXCSR_AUTOSET); | ||
854 | /* write CSR */ | ||
855 | csr |= MUSB_TXCSR_MODE; | ||
856 | |||
857 | if (epnum) | ||
858 | musb_writew(epio, MUSB_TXCSR, csr); | ||
859 | } | ||
860 | |||
861 | /* re-enable interrupt */ | ||
862 | musb_writew(mbase, MUSB_INTRTXE, int_txe); | ||
863 | |||
864 | /* IN/receive */ | ||
865 | } else { | ||
866 | u16 csr; | ||
867 | |||
868 | if (hw_ep->rx_reinit) { | ||
869 | musb_rx_reinit(musb, qh, hw_ep); | ||
870 | |||
871 | /* init new state: toggle and NYET, maybe DMA later */ | ||
872 | if (usb_gettoggle(urb->dev, qh->epnum, 0)) | ||
873 | csr = MUSB_RXCSR_H_WR_DATATOGGLE | ||
874 | | MUSB_RXCSR_H_DATATOGGLE; | ||
875 | else | ||
876 | csr = 0; | ||
877 | if (qh->type == USB_ENDPOINT_XFER_INT) | ||
878 | csr |= MUSB_RXCSR_DISNYET; | ||
879 | |||
880 | } else { | ||
881 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | ||
882 | |||
883 | if (csr & (MUSB_RXCSR_RXPKTRDY | ||
884 | | MUSB_RXCSR_DMAENAB | ||
885 | | MUSB_RXCSR_H_REQPKT)) | ||
886 | ERR("broken !rx_reinit, ep%d csr %04x\n", | ||
887 | hw_ep->epnum, csr); | ||
888 | |||
889 | /* scrub any stale state, leaving toggle alone */ | ||
890 | csr &= MUSB_RXCSR_DISNYET; | ||
891 | } | ||
892 | |||
893 | /* kick things off */ | ||
894 | |||
895 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | ||
896 | /* candidate for DMA */ | ||
897 | if (dma_channel) { | ||
898 | dma_channel->actual_len = 0L; | ||
899 | qh->segsize = len; | ||
900 | |||
901 | /* AUTOREQ is in a DMA register */ | ||
902 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | ||
903 | csr = musb_readw(hw_ep->regs, | ||
904 | MUSB_RXCSR); | ||
905 | |||
906 | /* unless caller treats short rx transfers as | ||
907 | * errors, we dare not queue multiple transfers. | ||
908 | */ | ||
909 | dma_ok = dma_controller->channel_program( | ||
910 | dma_channel, packet_sz, | ||
911 | !(urb->transfer_flags | ||
912 | & URB_SHORT_NOT_OK), | ||
913 | urb->transfer_dma, | ||
914 | qh->segsize); | ||
915 | if (!dma_ok) { | ||
916 | dma_controller->channel_release( | ||
917 | dma_channel); | ||
918 | hw_ep->rx_channel = NULL; | ||
919 | dma_channel = NULL; | ||
920 | } else | ||
921 | csr |= MUSB_RXCSR_DMAENAB; | ||
922 | } | ||
923 | } | ||
924 | |||
925 | csr |= MUSB_RXCSR_H_REQPKT; | ||
926 | DBG(7, "RXCSR%d := %04x\n", epnum, csr); | ||
927 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | ||
928 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | ||
929 | } | ||
930 | } | ||
931 | |||
932 | |||
933 | /* | ||
934 | * Service the default endpoint (ep0) as host. | ||
935 | * Return true until it's time to start the status stage. | ||
936 | */ | ||
937 | static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) | ||
938 | { | ||
939 | bool more = false; | ||
940 | u8 *fifo_dest = NULL; | ||
941 | u16 fifo_count = 0; | ||
942 | struct musb_hw_ep *hw_ep = musb->control_ep; | ||
943 | struct musb_qh *qh = hw_ep->in_qh; | ||
944 | struct usb_ctrlrequest *request; | ||
945 | |||
946 | switch (musb->ep0_stage) { | ||
947 | case MUSB_EP0_IN: | ||
948 | fifo_dest = urb->transfer_buffer + urb->actual_length; | ||
949 | fifo_count = min(len, ((u16) (urb->transfer_buffer_length | ||
950 | - urb->actual_length))); | ||
951 | if (fifo_count < len) | ||
952 | urb->status = -EOVERFLOW; | ||
953 | |||
954 | musb_read_fifo(hw_ep, fifo_count, fifo_dest); | ||
955 | |||
956 | urb->actual_length += fifo_count; | ||
957 | if (len < qh->maxpacket) { | ||
958 | /* always terminate on short read; it's | ||
959 | * rarely reported as an error. | ||
960 | */ | ||
961 | } else if (urb->actual_length < | ||
962 | urb->transfer_buffer_length) | ||
963 | more = true; | ||
964 | break; | ||
965 | case MUSB_EP0_START: | ||
966 | request = (struct usb_ctrlrequest *) urb->setup_packet; | ||
967 | |||
968 | if (!request->wLength) { | ||
969 | DBG(4, "start no-DATA\n"); | ||
970 | break; | ||
971 | } else if (request->bRequestType & USB_DIR_IN) { | ||
972 | DBG(4, "start IN-DATA\n"); | ||
973 | musb->ep0_stage = MUSB_EP0_IN; | ||
974 | more = true; | ||
975 | break; | ||
976 | } else { | ||
977 | DBG(4, "start OUT-DATA\n"); | ||
978 | musb->ep0_stage = MUSB_EP0_OUT; | ||
979 | more = true; | ||
980 | } | ||
981 | /* FALLTHROUGH */ | ||
982 | case MUSB_EP0_OUT: | ||
983 | fifo_count = min(qh->maxpacket, ((u16) | ||
984 | (urb->transfer_buffer_length | ||
985 | - urb->actual_length))); | ||
986 | |||
987 | if (fifo_count) { | ||
988 | fifo_dest = (u8 *) (urb->transfer_buffer | ||
989 | + urb->actual_length); | ||
990 | DBG(3, "Sending %d bytes to %p\n", | ||
991 | fifo_count, fifo_dest); | ||
992 | musb_write_fifo(hw_ep, fifo_count, fifo_dest); | ||
993 | |||
994 | urb->actual_length += fifo_count; | ||
995 | more = true; | ||
996 | } | ||
997 | break; | ||
998 | default: | ||
999 | ERR("bogus ep0 stage %d\n", musb->ep0_stage); | ||
1000 | break; | ||
1001 | } | ||
1002 | |||
1003 | return more; | ||
1004 | } | ||
1005 | |||
1006 | /* | ||
1007 | * Handle default endpoint interrupt as host. Only called in IRQ time | ||
1008 | * from the LinuxIsr() interrupt service routine. | ||
1009 | * | ||
1010 | * called with controller irqlocked | ||
1011 | */ | ||
1012 | irqreturn_t musb_h_ep0_irq(struct musb *musb) | ||
1013 | { | ||
1014 | struct urb *urb; | ||
1015 | u16 csr, len; | ||
1016 | int status = 0; | ||
1017 | void __iomem *mbase = musb->mregs; | ||
1018 | struct musb_hw_ep *hw_ep = musb->control_ep; | ||
1019 | void __iomem *epio = hw_ep->regs; | ||
1020 | struct musb_qh *qh = hw_ep->in_qh; | ||
1021 | bool complete = false; | ||
1022 | irqreturn_t retval = IRQ_NONE; | ||
1023 | |||
1024 | /* ep0 only has one queue, "in" */ | ||
1025 | urb = next_urb(qh); | ||
1026 | |||
1027 | musb_ep_select(mbase, 0); | ||
1028 | csr = musb_readw(epio, MUSB_CSR0); | ||
1029 | len = (csr & MUSB_CSR0_RXPKTRDY) | ||
1030 | ? musb_readb(epio, MUSB_COUNT0) | ||
1031 | : 0; | ||
1032 | |||
1033 | DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", | ||
1034 | csr, qh, len, urb, musb->ep0_stage); | ||
1035 | |||
1036 | /* if we just did status stage, we are done */ | ||
1037 | if (MUSB_EP0_STATUS == musb->ep0_stage) { | ||
1038 | retval = IRQ_HANDLED; | ||
1039 | complete = true; | ||
1040 | } | ||
1041 | |||
1042 | /* prepare status */ | ||
1043 | if (csr & MUSB_CSR0_H_RXSTALL) { | ||
1044 | DBG(6, "STALLING ENDPOINT\n"); | ||
1045 | status = -EPIPE; | ||
1046 | |||
1047 | } else if (csr & MUSB_CSR0_H_ERROR) { | ||
1048 | DBG(2, "no response, csr0 %04x\n", csr); | ||
1049 | status = -EPROTO; | ||
1050 | |||
1051 | } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { | ||
1052 | DBG(2, "control NAK timeout\n"); | ||
1053 | |||
1054 | /* NOTE: this code path would be a good place to PAUSE a | ||
1055 | * control transfer, if another one is queued, so that | ||
1056 | * ep0 is more likely to stay busy. | ||
1057 | * | ||
1058 | * if (qh->ring.next != &musb->control), then | ||
1059 | * we have a candidate... NAKing is *NOT* an error | ||
1060 | */ | ||
1061 | musb_writew(epio, MUSB_CSR0, 0); | ||
1062 | retval = IRQ_HANDLED; | ||
1063 | } | ||
1064 | |||
1065 | if (status) { | ||
1066 | DBG(6, "aborting\n"); | ||
1067 | retval = IRQ_HANDLED; | ||
1068 | if (urb) | ||
1069 | urb->status = status; | ||
1070 | complete = true; | ||
1071 | |||
1072 | /* use the proper sequence to abort the transfer */ | ||
1073 | if (csr & MUSB_CSR0_H_REQPKT) { | ||
1074 | csr &= ~MUSB_CSR0_H_REQPKT; | ||
1075 | musb_writew(epio, MUSB_CSR0, csr); | ||
1076 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | ||
1077 | musb_writew(epio, MUSB_CSR0, csr); | ||
1078 | } else { | ||
1079 | csr |= MUSB_CSR0_FLUSHFIFO; | ||
1080 | musb_writew(epio, MUSB_CSR0, csr); | ||
1081 | musb_writew(epio, MUSB_CSR0, csr); | ||
1082 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | ||
1083 | musb_writew(epio, MUSB_CSR0, csr); | ||
1084 | } | ||
1085 | |||
1086 | musb_writeb(epio, MUSB_NAKLIMIT0, 0); | ||
1087 | |||
1088 | /* clear it */ | ||
1089 | musb_writew(epio, MUSB_CSR0, 0); | ||
1090 | } | ||
1091 | |||
1092 | if (unlikely(!urb)) { | ||
1093 | /* stop endpoint since we have no place for its data, this | ||
1094 | * SHOULD NEVER HAPPEN! */ | ||
1095 | ERR("no URB for end 0\n"); | ||
1096 | |||
1097 | musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); | ||
1098 | musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); | ||
1099 | musb_writew(epio, MUSB_CSR0, 0); | ||
1100 | |||
1101 | goto done; | ||
1102 | } | ||
1103 | |||
1104 | if (!complete) { | ||
1105 | /* call common logic and prepare response */ | ||
1106 | if (musb_h_ep0_continue(musb, len, urb)) { | ||
1107 | /* more packets required */ | ||
1108 | csr = (MUSB_EP0_IN == musb->ep0_stage) | ||
1109 | ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; | ||
1110 | } else { | ||
1111 | /* data transfer complete; perform status phase */ | ||
1112 | if (usb_pipeout(urb->pipe) | ||
1113 | || !urb->transfer_buffer_length) | ||
1114 | csr = MUSB_CSR0_H_STATUSPKT | ||
1115 | | MUSB_CSR0_H_REQPKT; | ||
1116 | else | ||
1117 | csr = MUSB_CSR0_H_STATUSPKT | ||
1118 | | MUSB_CSR0_TXPKTRDY; | ||
1119 | |||
1120 | /* flag status stage */ | ||
1121 | musb->ep0_stage = MUSB_EP0_STATUS; | ||
1122 | |||
1123 | DBG(5, "ep0 STATUS, csr %04x\n", csr); | ||
1124 | |||
1125 | } | ||
1126 | musb_writew(epio, MUSB_CSR0, csr); | ||
1127 | retval = IRQ_HANDLED; | ||
1128 | } else | ||
1129 | musb->ep0_stage = MUSB_EP0_IDLE; | ||
1130 | |||
1131 | /* call completion handler if done */ | ||
1132 | if (complete) | ||
1133 | musb_advance_schedule(musb, urb, hw_ep, 1); | ||
1134 | done: | ||
1135 | return retval; | ||
1136 | } | ||
1137 | |||
1138 | |||
1139 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
1140 | |||
1141 | /* Host side TX (OUT) using Mentor DMA works as follows: | ||
1142 | submit_urb -> | ||
1143 | - if queue was empty, Program Endpoint | ||
1144 | - ... which starts DMA to fifo in mode 1 or 0 | ||
1145 | |||
1146 | DMA Isr (transfer complete) -> TxAvail() | ||
1147 | - Stop DMA (~DmaEnab) (<--- Alert ... currently happens | ||
1148 | only in musb_cleanup_urb) | ||
1149 | - TxPktRdy has to be set in mode 0 or for | ||
1150 | short packets in mode 1. | ||
1151 | */ | ||
1152 | |||
1153 | #endif | ||
1154 | |||
1155 | /* Service a Tx-Available or dma completion irq for the endpoint */ | ||
1156 | void musb_host_tx(struct musb *musb, u8 epnum) | ||
1157 | { | ||
1158 | int pipe; | ||
1159 | bool done = false; | ||
1160 | u16 tx_csr; | ||
1161 | size_t wLength = 0; | ||
1162 | u8 *buf = NULL; | ||
1163 | struct urb *urb; | ||
1164 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
1165 | void __iomem *epio = hw_ep->regs; | ||
1166 | struct musb_qh *qh = hw_ep->out_qh; | ||
1167 | u32 status = 0; | ||
1168 | void __iomem *mbase = musb->mregs; | ||
1169 | struct dma_channel *dma; | ||
1170 | |||
1171 | urb = next_urb(qh); | ||
1172 | |||
1173 | musb_ep_select(mbase, epnum); | ||
1174 | tx_csr = musb_readw(epio, MUSB_TXCSR); | ||
1175 | |||
1176 | /* with CPPI, DMA sometimes triggers "extra" irqs */ | ||
1177 | if (!urb) { | ||
1178 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | ||
1179 | goto finish; | ||
1180 | } | ||
1181 | |||
1182 | pipe = urb->pipe; | ||
1183 | dma = is_dma_capable() ? hw_ep->tx_channel : NULL; | ||
1184 | DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, | ||
1185 | dma ? ", dma" : ""); | ||
1186 | |||
1187 | /* check for errors */ | ||
1188 | if (tx_csr & MUSB_TXCSR_H_RXSTALL) { | ||
1189 | /* dma was disabled, fifo flushed */ | ||
1190 | DBG(3, "TX end %d stall\n", epnum); | ||
1191 | |||
1192 | /* stall; record URB status */ | ||
1193 | status = -EPIPE; | ||
1194 | |||
1195 | } else if (tx_csr & MUSB_TXCSR_H_ERROR) { | ||
1196 | /* (NON-ISO) dma was disabled, fifo flushed */ | ||
1197 | DBG(3, "TX 3strikes on ep=%d\n", epnum); | ||
1198 | |||
1199 | status = -ETIMEDOUT; | ||
1200 | |||
1201 | } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { | ||
1202 | DBG(6, "TX end=%d device not responding\n", epnum); | ||
1203 | |||
1204 | /* NOTE: this code path would be a good place to PAUSE a | ||
1205 | * transfer, if there's some other (nonperiodic) tx urb | ||
1206 | * that could use this fifo. (dma complicates it...) | ||
1207 | * | ||
1208 | * if (bulk && qh->ring.next != &musb->out_bulk), then | ||
1209 | * we have a candidate... NAKing is *NOT* an error | ||
1210 | */ | ||
1211 | musb_ep_select(mbase, epnum); | ||
1212 | musb_writew(epio, MUSB_TXCSR, | ||
1213 | MUSB_TXCSR_H_WZC_BITS | ||
1214 | | MUSB_TXCSR_TXPKTRDY); | ||
1215 | goto finish; | ||
1216 | } | ||
1217 | |||
1218 | if (status) { | ||
1219 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
1220 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
1221 | (void) musb->dma_controller->channel_abort(dma); | ||
1222 | } | ||
1223 | |||
1224 | /* do the proper sequence to abort the transfer in the | ||
1225 | * usb core; the dma engine should already be stopped. | ||
1226 | */ | ||
1227 | musb_h_tx_flush_fifo(hw_ep); | ||
1228 | tx_csr &= ~(MUSB_TXCSR_AUTOSET | ||
1229 | | MUSB_TXCSR_DMAENAB | ||
1230 | | MUSB_TXCSR_H_ERROR | ||
1231 | | MUSB_TXCSR_H_RXSTALL | ||
1232 | | MUSB_TXCSR_H_NAKTIMEOUT | ||
1233 | ); | ||
1234 | |||
1235 | musb_ep_select(mbase, epnum); | ||
1236 | musb_writew(epio, MUSB_TXCSR, tx_csr); | ||
1237 | /* REVISIT may need to clear FLUSHFIFO ... */ | ||
1238 | musb_writew(epio, MUSB_TXCSR, tx_csr); | ||
1239 | musb_writeb(epio, MUSB_TXINTERVAL, 0); | ||
1240 | |||
1241 | done = true; | ||
1242 | } | ||
1243 | |||
1244 | /* second cppi case */ | ||
1245 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
1246 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | ||
1247 | goto finish; | ||
1248 | |||
1249 | } | ||
1250 | |||
1251 | /* REVISIT this looks wrong... */ | ||
1252 | if (!status || dma || usb_pipeisoc(pipe)) { | ||
1253 | if (dma) | ||
1254 | wLength = dma->actual_len; | ||
1255 | else | ||
1256 | wLength = qh->segsize; | ||
1257 | qh->offset += wLength; | ||
1258 | |||
1259 | if (usb_pipeisoc(pipe)) { | ||
1260 | struct usb_iso_packet_descriptor *d; | ||
1261 | |||
1262 | d = urb->iso_frame_desc + qh->iso_idx; | ||
1263 | d->actual_length = qh->segsize; | ||
1264 | if (++qh->iso_idx >= urb->number_of_packets) { | ||
1265 | done = true; | ||
1266 | } else { | ||
1267 | d++; | ||
1268 | buf = urb->transfer_buffer + d->offset; | ||
1269 | wLength = d->length; | ||
1270 | } | ||
1271 | } else if (dma) { | ||
1272 | done = true; | ||
1273 | } else { | ||
1274 | /* see if we need to send more data, or ZLP */ | ||
1275 | if (qh->segsize < qh->maxpacket) | ||
1276 | done = true; | ||
1277 | else if (qh->offset == urb->transfer_buffer_length | ||
1278 | && !(urb->transfer_flags | ||
1279 | & URB_ZERO_PACKET)) | ||
1280 | done = true; | ||
1281 | if (!done) { | ||
1282 | buf = urb->transfer_buffer | ||
1283 | + qh->offset; | ||
1284 | wLength = urb->transfer_buffer_length | ||
1285 | - qh->offset; | ||
1286 | } | ||
1287 | } | ||
1288 | } | ||
1289 | |||
1290 | /* urb->status != -EINPROGRESS means request has been faulted, | ||
1291 | * so we must abort this transfer after cleanup | ||
1292 | */ | ||
1293 | if (urb->status != -EINPROGRESS) { | ||
1294 | done = true; | ||
1295 | if (status == 0) | ||
1296 | status = urb->status; | ||
1297 | } | ||
1298 | |||
1299 | if (done) { | ||
1300 | /* set status */ | ||
1301 | urb->status = status; | ||
1302 | urb->actual_length = qh->offset; | ||
1303 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); | ||
1304 | |||
1305 | } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) { | ||
1306 | /* WARN_ON(!buf); */ | ||
1307 | |||
1308 | /* REVISIT: some docs say that when hw_ep->tx_double_buffered, | ||
1309 | * (and presumably, fifo is not half-full) we should write TWO | ||
1310 | * packets before updating TXCSR ... other docs disagree ... | ||
1311 | */ | ||
1312 | /* PIO: start next packet in this URB */ | ||
1313 | wLength = min(qh->maxpacket, (u16) wLength); | ||
1314 | musb_write_fifo(hw_ep, wLength, buf); | ||
1315 | qh->segsize = wLength; | ||
1316 | |||
1317 | musb_ep_select(mbase, epnum); | ||
1318 | musb_writew(epio, MUSB_TXCSR, | ||
1319 | MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); | ||
1320 | } else | ||
1321 | DBG(1, "not complete, but dma enabled?\n"); | ||
1322 | |||
1323 | finish: | ||
1324 | return; | ||
1325 | } | ||
1326 | |||
1327 | |||
1328 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
1329 | |||
1330 | /* Host side RX (IN) using Mentor DMA works as follows: | ||
1331 | submit_urb -> | ||
1332 | - if queue was empty, ProgramEndpoint | ||
1333 | - first IN token is sent out (by setting ReqPkt) | ||
1334 | LinuxIsr -> RxReady() | ||
1335 | /\ => first packet is received | ||
1336 | | - Set in mode 0 (DmaEnab, ~ReqPkt) | ||
1337 | | -> DMA Isr (transfer complete) -> RxReady() | ||
1338 | | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) | ||
1339 | | - if urb not complete, send next IN token (ReqPkt) | ||
1340 | | | else complete urb. | ||
1341 | | | | ||
1342 | --------------------------- | ||
1343 | * | ||
1344 | * Nuances of mode 1: | ||
1345 | * For short packets, no ack (+RxPktRdy) is sent automatically | ||
1346 | * (even if AutoClear is ON) | ||
1347 | * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent | ||
1348 | * automatically => major problem, as collecting the next packet becomes | ||
1349 | * difficult. Hence mode 1 is not used. | ||
1350 | * | ||
1351 | * REVISIT | ||
1352 | * All we care about at this driver level is that | ||
1353 | * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; | ||
1354 | * (b) termination conditions are: short RX, or buffer full; | ||
1355 | * (c) fault modes include | ||
1356 | * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. | ||
1357 | * (and that endpoint's dma queue stops immediately) | ||
1358 | * - overflow (full, PLUS more bytes in the terminal packet) | ||
1359 | * | ||
1360 | * So for example, usb-storage sets URB_SHORT_NOT_OK, and would | ||
1361 | * thus be a great candidate for using mode 1 ... for all but the | ||
1362 | * last packet of one URB's transfer. | ||
1363 | */ | ||
1364 | |||
1365 | #endif | ||
1366 | |||
1367 | /* | ||
1368 | * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, | ||
1369 | * and high-bandwidth IN transfer cases. | ||
1370 | */ | ||
1371 | void musb_host_rx(struct musb *musb, u8 epnum) | ||
1372 | { | ||
1373 | struct urb *urb; | ||
1374 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
1375 | void __iomem *epio = hw_ep->regs; | ||
1376 | struct musb_qh *qh = hw_ep->in_qh; | ||
1377 | size_t xfer_len; | ||
1378 | void __iomem *mbase = musb->mregs; | ||
1379 | int pipe; | ||
1380 | u16 rx_csr, val; | ||
1381 | bool iso_err = false; | ||
1382 | bool done = false; | ||
1383 | u32 status; | ||
1384 | struct dma_channel *dma; | ||
1385 | |||
1386 | musb_ep_select(mbase, epnum); | ||
1387 | |||
1388 | urb = next_urb(qh); | ||
1389 | dma = is_dma_capable() ? hw_ep->rx_channel : NULL; | ||
1390 | status = 0; | ||
1391 | xfer_len = 0; | ||
1392 | |||
1393 | rx_csr = musb_readw(epio, MUSB_RXCSR); | ||
1394 | val = rx_csr; | ||
1395 | |||
1396 | if (unlikely(!urb)) { | ||
1397 | /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least | ||
1398 | * usbtest #11 (unlinks) triggers it regularly, sometimes | ||
1399 | * with fifo full. (Only with DMA??) | ||
1400 | */ | ||
1401 | DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, | ||
1402 | musb_readw(epio, MUSB_RXCOUNT)); | ||
1403 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | ||
1404 | return; | ||
1405 | } | ||
1406 | |||
1407 | pipe = urb->pipe; | ||
1408 | |||
1409 | DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", | ||
1410 | epnum, rx_csr, urb->actual_length, | ||
1411 | dma ? dma->actual_len : 0); | ||
1412 | |||
1413 | /* check for errors, concurrent stall & unlink is not really | ||
1414 | * handled yet! */ | ||
1415 | if (rx_csr & MUSB_RXCSR_H_RXSTALL) { | ||
1416 | DBG(3, "RX end %d STALL\n", epnum); | ||
1417 | |||
1418 | /* stall; record URB status */ | ||
1419 | status = -EPIPE; | ||
1420 | |||
1421 | } else if (rx_csr & MUSB_RXCSR_H_ERROR) { | ||
1422 | DBG(3, "end %d RX proto error\n", epnum); | ||
1423 | |||
1424 | status = -EPROTO; | ||
1425 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | ||
1426 | |||
1427 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { | ||
1428 | |||
1429 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { | ||
1430 | /* NOTE this code path would be a good place to PAUSE a | ||
1431 | * transfer, if there's some other (nonperiodic) rx urb | ||
1432 | * that could use this fifo. (dma complicates it...) | ||
1433 | * | ||
1434 | * if (bulk && qh->ring.next != &musb->in_bulk), then | ||
1435 | * we have a candidate... NAKing is *NOT* an error | ||
1436 | */ | ||
1437 | DBG(6, "RX end %d NAK timeout\n", epnum); | ||
1438 | musb_ep_select(mbase, epnum); | ||
1439 | musb_writew(epio, MUSB_RXCSR, | ||
1440 | MUSB_RXCSR_H_WZC_BITS | ||
1441 | | MUSB_RXCSR_H_REQPKT); | ||
1442 | |||
1443 | goto finish; | ||
1444 | } else { | ||
1445 | DBG(4, "RX end %d ISO data error\n", epnum); | ||
1446 | /* packet error reported later */ | ||
1447 | iso_err = true; | ||
1448 | } | ||
1449 | } | ||
1450 | |||
1451 | /* faults abort the transfer */ | ||
1452 | if (status) { | ||
1453 | /* clean up dma and collect transfer count */ | ||
1454 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
1455 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
1456 | (void) musb->dma_controller->channel_abort(dma); | ||
1457 | xfer_len = dma->actual_len; | ||
1458 | } | ||
1459 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | ||
1460 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | ||
1461 | done = true; | ||
1462 | goto finish; | ||
1463 | } | ||
1464 | |||
1465 | if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { | ||
1466 | /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ | ||
1467 | ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); | ||
1468 | goto finish; | ||
1469 | } | ||
1470 | |||
1471 | /* thorough shutdown for now ... given more precise fault handling | ||
1472 | * and better queueing support, we might keep a DMA pipeline going | ||
1473 | * while processing this irq for earlier completions. | ||
1474 | */ | ||
1475 | |||
1476 | /* FIXME this is _way_ too much in-line logic for Mentor DMA */ | ||
1477 | |||
1478 | #ifndef CONFIG_USB_INVENTRA_DMA | ||
1479 | if (rx_csr & MUSB_RXCSR_H_REQPKT) { | ||
1480 | /* REVISIT this happened for a while on some short reads... | ||
1481 | * the cleanup still needs investigation... looks bad... | ||
1482 | * and also duplicates dma cleanup code above ... plus, | ||
1483 | * shouldn't this be the "half full" double buffer case? | ||
1484 | */ | ||
1485 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
1486 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
1487 | (void) musb->dma_controller->channel_abort(dma); | ||
1488 | xfer_len = dma->actual_len; | ||
1489 | done = true; | ||
1490 | } | ||
1491 | |||
1492 | DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, | ||
1493 | xfer_len, dma ? ", dma" : ""); | ||
1494 | rx_csr &= ~MUSB_RXCSR_H_REQPKT; | ||
1495 | |||
1496 | musb_ep_select(mbase, epnum); | ||
1497 | musb_writew(epio, MUSB_RXCSR, | ||
1498 | MUSB_RXCSR_H_WZC_BITS | rx_csr); | ||
1499 | } | ||
1500 | #endif | ||
1501 | if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { | ||
1502 | xfer_len = dma->actual_len; | ||
1503 | |||
1504 | val &= ~(MUSB_RXCSR_DMAENAB | ||
1505 | | MUSB_RXCSR_H_AUTOREQ | ||
1506 | | MUSB_RXCSR_AUTOCLEAR | ||
1507 | | MUSB_RXCSR_RXPKTRDY); | ||
1508 | musb_writew(hw_ep->regs, MUSB_RXCSR, val); | ||
1509 | |||
1510 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
1511 | /* done if urb buffer is full or short packet is recd */ | ||
1512 | done = (urb->actual_length + xfer_len >= | ||
1513 | urb->transfer_buffer_length | ||
1514 | || dma->actual_len < qh->maxpacket); | ||
1515 | |||
1516 | /* send IN token for next packet, without AUTOREQ */ | ||
1517 | if (!done) { | ||
1518 | val |= MUSB_RXCSR_H_REQPKT; | ||
1519 | musb_writew(epio, MUSB_RXCSR, | ||
1520 | MUSB_RXCSR_H_WZC_BITS | val); | ||
1521 | } | ||
1522 | |||
1523 | DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, | ||
1524 | done ? "off" : "reset", | ||
1525 | musb_readw(epio, MUSB_RXCSR), | ||
1526 | musb_readw(epio, MUSB_RXCOUNT)); | ||
1527 | #else | ||
1528 | done = true; | ||
1529 | #endif | ||
1530 | } else if (urb->status == -EINPROGRESS) { | ||
1531 | /* if no errors, be sure a packet is ready for unloading */ | ||
1532 | if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { | ||
1533 | status = -EPROTO; | ||
1534 | ERR("Rx interrupt with no errors or packet!\n"); | ||
1535 | |||
1536 | /* FIXME this is another "SHOULD NEVER HAPPEN" */ | ||
1537 | |||
1538 | /* SCRUB (RX) */ | ||
1539 | /* do the proper sequence to abort the transfer */ | ||
1540 | musb_ep_select(mbase, epnum); | ||
1541 | val &= ~MUSB_RXCSR_H_REQPKT; | ||
1542 | musb_writew(epio, MUSB_RXCSR, val); | ||
1543 | goto finish; | ||
1544 | } | ||
1545 | |||
1546 | /* we are expecting IN packets */ | ||
1547 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
1548 | if (dma) { | ||
1549 | struct dma_controller *c; | ||
1550 | u16 rx_count; | ||
1551 | int ret; | ||
1552 | |||
1553 | rx_count = musb_readw(epio, MUSB_RXCOUNT); | ||
1554 | |||
1555 | DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n", | ||
1556 | epnum, rx_count, | ||
1557 | urb->transfer_dma | ||
1558 | + urb->actual_length, | ||
1559 | qh->offset, | ||
1560 | urb->transfer_buffer_length); | ||
1561 | |||
1562 | c = musb->dma_controller; | ||
1563 | |||
1564 | dma->desired_mode = 0; | ||
1565 | #ifdef USE_MODE1 | ||
1566 | /* because of the issue below, mode 1 will | ||
1567 | * only rarely behave with correct semantics. | ||
1568 | */ | ||
1569 | if ((urb->transfer_flags & | ||
1570 | URB_SHORT_NOT_OK) | ||
1571 | && (urb->transfer_buffer_length - | ||
1572 | urb->actual_length) | ||
1573 | > qh->maxpacket) | ||
1574 | dma->desired_mode = 1; | ||
1575 | #endif | ||
1576 | |||
1577 | /* Disadvantage of using mode 1: | ||
1578 | * It's basically usable only for mass storage class; essentially all | ||
1579 | * other protocols also terminate transfers on short packets. | ||
1580 | * | ||
1581 | * Details: | ||
1582 | * An extra IN token is sent at the end of the transfer (due to AUTOREQ) | ||
1583 | * If you try to use mode 1 for (transfer_buffer_length - 512), and try | ||
1584 | * to use the extra IN token to grab the last packet using mode 0, then | ||
1585 | * the problem is that you cannot be sure when the device will send the | ||
1586 | * last packet and RxPktRdy set. Sometimes the packet is recd too soon | ||
1587 | * such that it gets lost when RxCSR is re-set at the end of the mode 1 | ||
1588 | * transfer, while sometimes it is recd just a little late so that if you | ||
1589 | * try to configure for mode 0 soon after the mode 1 transfer is | ||
1590 | * completed, you will find rxcount 0. Okay, so you might think why not | ||
1591 | * wait for an interrupt when the pkt is recd. Well, you won't get any! | ||
1592 | */ | ||
1593 | |||
1594 | val = musb_readw(epio, MUSB_RXCSR); | ||
1595 | val &= ~MUSB_RXCSR_H_REQPKT; | ||
1596 | |||
1597 | if (dma->desired_mode == 0) | ||
1598 | val &= ~MUSB_RXCSR_H_AUTOREQ; | ||
1599 | else | ||
1600 | val |= MUSB_RXCSR_H_AUTOREQ; | ||
1601 | val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB; | ||
1602 | |||
1603 | musb_writew(epio, MUSB_RXCSR, | ||
1604 | MUSB_RXCSR_H_WZC_BITS | val); | ||
1605 | |||
1606 | /* REVISIT if when actual_length != 0, | ||
1607 | * transfer_buffer_length needs to be | ||
1608 | * adjusted first... | ||
1609 | */ | ||
1610 | ret = c->channel_program( | ||
1611 | dma, qh->maxpacket, | ||
1612 | dma->desired_mode, | ||
1613 | urb->transfer_dma | ||
1614 | + urb->actual_length, | ||
1615 | (dma->desired_mode == 0) | ||
1616 | ? rx_count | ||
1617 | : urb->transfer_buffer_length); | ||
1618 | |||
1619 | if (!ret) { | ||
1620 | c->channel_release(dma); | ||
1621 | hw_ep->rx_channel = NULL; | ||
1622 | dma = NULL; | ||
1623 | /* REVISIT reset CSR */ | ||
1624 | } | ||
1625 | } | ||
1626 | #endif /* Mentor DMA */ | ||
1627 | |||
1628 | if (!dma) { | ||
1629 | done = musb_host_packet_rx(musb, urb, | ||
1630 | epnum, iso_err); | ||
1631 | DBG(6, "read %spacket\n", done ? "last " : ""); | ||
1632 | } | ||
1633 | } | ||
1634 | |||
1635 | if (dma && usb_pipeisoc(pipe)) { | ||
1636 | struct usb_iso_packet_descriptor *d; | ||
1637 | int iso_stat = status; | ||
1638 | |||
1639 | d = urb->iso_frame_desc + qh->iso_idx; | ||
1640 | d->actual_length += xfer_len; | ||
1641 | if (iso_err) { | ||
1642 | iso_stat = -EILSEQ; | ||
1643 | urb->error_count++; | ||
1644 | } | ||
1645 | d->status = iso_stat; | ||
1646 | } | ||
1647 | |||
1648 | finish: | ||
1649 | urb->actual_length += xfer_len; | ||
1650 | qh->offset += xfer_len; | ||
1651 | if (done) { | ||
1652 | if (urb->status == -EINPROGRESS) | ||
1653 | urb->status = status; | ||
1654 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); | ||
1655 | } | ||
1656 | } | ||
1657 | |||
1658 | /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. | ||
1659 | * the software schedule associates multiple such nodes with a given | ||
1660 | * host side hardware endpoint + direction; scheduling may activate | ||
1661 | * that hardware endpoint. | ||
1662 | */ | ||
1663 | static int musb_schedule( | ||
1664 | struct musb *musb, | ||
1665 | struct musb_qh *qh, | ||
1666 | int is_in) | ||
1667 | { | ||
1668 | int idle; | ||
1669 | int best_diff; | ||
1670 | int best_end, epnum; | ||
1671 | struct musb_hw_ep *hw_ep = NULL; | ||
1672 | struct list_head *head = NULL; | ||
1673 | |||
1674 | /* use fixed hardware for control and bulk */ | ||
1675 | switch (qh->type) { | ||
1676 | case USB_ENDPOINT_XFER_CONTROL: | ||
1677 | head = &musb->control; | ||
1678 | hw_ep = musb->control_ep; | ||
1679 | break; | ||
1680 | case USB_ENDPOINT_XFER_BULK: | ||
1681 | hw_ep = musb->bulk_ep; | ||
1682 | if (is_in) | ||
1683 | head = &musb->in_bulk; | ||
1684 | else | ||
1685 | head = &musb->out_bulk; | ||
1686 | break; | ||
1687 | } | ||
1688 | if (head) { | ||
1689 | idle = list_empty(head); | ||
1690 | list_add_tail(&qh->ring, head); | ||
1691 | goto success; | ||
1692 | } | ||
1693 | |||
1694 | /* else, periodic transfers get muxed to other endpoints */ | ||
1695 | |||
1696 | /* FIXME this doesn't consider direction, so it can only | ||
1697 | * work for one half of the endpoint hardware, and assumes | ||
1698 | * the previous cases handled all non-shared endpoints... | ||
1699 | */ | ||
1700 | |||
1701 | /* we know this qh hasn't been scheduled, so all we need to do | ||
1702 | * is choose which hardware endpoint to put it on ... | ||
1703 | * | ||
1704 | * REVISIT what we really want here is a regular schedule tree | ||
1705 | * like e.g. OHCI uses, but for now musb->periodic is just an | ||
1706 | * array of the _single_ logical endpoint associated with a | ||
1707 | * given physical one (identity mapping logical->physical). | ||
1708 | * | ||
1709 | * that simplistic approach makes TT scheduling a lot simpler; | ||
1710 | * there is none, and thus none of its complexity... | ||
1711 | */ | ||
1712 | best_diff = 4096; | ||
1713 | best_end = -1; | ||
1714 | |||
1715 | for (epnum = 1; epnum < musb->nr_endpoints; epnum++) { | ||
1716 | int diff; | ||
1717 | |||
1718 | if (musb->periodic[epnum]) | ||
1719 | continue; | ||
1720 | hw_ep = &musb->endpoints[epnum]; | ||
1721 | if (hw_ep == musb->bulk_ep) | ||
1722 | continue; | ||
1723 | |||
1724 | if (is_in) | ||
1725 | diff = hw_ep->max_packet_sz_rx - qh->maxpacket; | ||
1726 | else | ||
1727 | diff = hw_ep->max_packet_sz_tx - qh->maxpacket; | ||
1728 | |||
1729 | if (diff > 0 && best_diff > diff) { | ||
1730 | best_diff = diff; | ||
1731 | best_end = epnum; | ||
1732 | } | ||
1733 | } | ||
1734 | if (best_end < 0) | ||
1735 | return -ENOSPC; | ||
1736 | |||
1737 | idle = 1; | ||
1738 | hw_ep = musb->endpoints + best_end; | ||
1739 | musb->periodic[best_end] = qh; | ||
1740 | DBG(4, "qh %p periodic slot %d\n", qh, best_end); | ||
1741 | success: | ||
1742 | qh->hw_ep = hw_ep; | ||
1743 | qh->hep->hcpriv = qh; | ||
1744 | if (idle) | ||
1745 | musb_start_urb(musb, is_in, qh); | ||
1746 | return 0; | ||
1747 | } | ||
1748 | |||
1749 | static int musb_urb_enqueue( | ||
1750 | struct usb_hcd *hcd, | ||
1751 | struct urb *urb, | ||
1752 | gfp_t mem_flags) | ||
1753 | { | ||
1754 | unsigned long flags; | ||
1755 | struct musb *musb = hcd_to_musb(hcd); | ||
1756 | struct usb_host_endpoint *hep = urb->ep; | ||
1757 | struct musb_qh *qh = hep->hcpriv; | ||
1758 | struct usb_endpoint_descriptor *epd = &hep->desc; | ||
1759 | int ret; | ||
1760 | unsigned type_reg; | ||
1761 | unsigned interval; | ||
1762 | |||
1763 | /* host role must be active */ | ||
1764 | if (!is_host_active(musb) || !musb->is_active) | ||
1765 | return -ENODEV; | ||
1766 | |||
1767 | spin_lock_irqsave(&musb->lock, flags); | ||
1768 | ret = usb_hcd_link_urb_to_ep(hcd, urb); | ||
1769 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1770 | if (ret) | ||
1771 | return ret; | ||
1772 | |||
1773 | /* DMA mapping was already done, if needed, and this urb is on | ||
1774 | * hep->urb_list ... so there's little to do unless hep wasn't | ||
1775 | * yet scheduled onto a live qh. | ||
1776 | * | ||
1777 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets | ||
1778 | * disabled, testing for empty qh->ring and avoiding qh setup costs | ||
1779 | * except for the first urb queued after a config change. | ||
1780 | */ | ||
1781 | if (qh) { | ||
1782 | urb->hcpriv = qh; | ||
1783 | return 0; | ||
1784 | } | ||
1785 | |||
1786 | /* Allocate and initialize qh, minimizing the work done each time | ||
1787 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. | ||
1788 | * | ||
1789 | * REVISIT consider a dedicated qh kmem_cache, so it's harder | ||
1790 | * for bugs in other kernel code to break this driver... | ||
1791 | */ | ||
1792 | qh = kzalloc(sizeof *qh, mem_flags); | ||
1793 | if (!qh) { | ||
1794 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
1795 | return -ENOMEM; | ||
1796 | } | ||
1797 | |||
1798 | qh->hep = hep; | ||
1799 | qh->dev = urb->dev; | ||
1800 | INIT_LIST_HEAD(&qh->ring); | ||
1801 | qh->is_ready = 1; | ||
1802 | |||
1803 | qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); | ||
1804 | |||
1805 | /* no high bandwidth support yet */ | ||
1806 | if (qh->maxpacket & ~0x7ff) { | ||
1807 | ret = -EMSGSIZE; | ||
1808 | goto done; | ||
1809 | } | ||
1810 | |||
1811 | qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | ||
1812 | qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; | ||
1813 | |||
1814 | /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ | ||
1815 | qh->addr_reg = (u8) usb_pipedevice(urb->pipe); | ||
1816 | |||
1817 | /* precompute rxtype/txtype/type0 register */ | ||
1818 | type_reg = (qh->type << 4) | qh->epnum; | ||
1819 | switch (urb->dev->speed) { | ||
1820 | case USB_SPEED_LOW: | ||
1821 | type_reg |= 0xc0; | ||
1822 | break; | ||
1823 | case USB_SPEED_FULL: | ||
1824 | type_reg |= 0x80; | ||
1825 | break; | ||
1826 | default: | ||
1827 | type_reg |= 0x40; | ||
1828 | } | ||
1829 | qh->type_reg = type_reg; | ||
1830 | |||
1831 | /* precompute rxinterval/txinterval register */ | ||
1832 | interval = min((u8)16, epd->bInterval); /* log encoding */ | ||
1833 | switch (qh->type) { | ||
1834 | case USB_ENDPOINT_XFER_INT: | ||
1835 | /* fullspeed uses linear encoding */ | ||
1836 | if (USB_SPEED_FULL == urb->dev->speed) { | ||
1837 | interval = epd->bInterval; | ||
1838 | if (!interval) | ||
1839 | interval = 1; | ||
1840 | } | ||
1841 | /* FALLTHROUGH */ | ||
1842 | case USB_ENDPOINT_XFER_ISOC: | ||
1843 | /* iso always uses log encoding */ | ||
1844 | break; | ||
1845 | default: | ||
1846 | /* REVISIT we actually want to use NAK limits, hinting to the | ||
1847 | * transfer scheduling logic to try some other qh, e.g. try | ||
1848 | * for 2 msec first: | ||
1849 | * | ||
1850 | * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; | ||
1851 | * | ||
1852 | * The downside of disabling this is that transfer scheduling | ||
1853 | * gets VERY unfair for nonperiodic transfers; a misbehaving | ||
1854 | * peripheral could make that hurt. Or for reads, one that's | ||
1855 | * perfectly normal: network and other drivers keep reads | ||
1856 | * posted at all times, having one pending for a week should | ||
1857 | * be perfectly safe. | ||
1858 | * | ||
1859 | * The upside of disabling it is avoidng transfer scheduling | ||
1860 | * code to put this aside for while. | ||
1861 | */ | ||
1862 | interval = 0; | ||
1863 | } | ||
1864 | qh->intv_reg = interval; | ||
1865 | |||
1866 | /* precompute addressing for external hub/tt ports */ | ||
1867 | if (musb->is_multipoint) { | ||
1868 | struct usb_device *parent = urb->dev->parent; | ||
1869 | |||
1870 | if (parent != hcd->self.root_hub) { | ||
1871 | qh->h_addr_reg = (u8) parent->devnum; | ||
1872 | |||
1873 | /* set up tt info if needed */ | ||
1874 | if (urb->dev->tt) { | ||
1875 | qh->h_port_reg = (u8) urb->dev->ttport; | ||
1876 | qh->h_addr_reg |= 0x80; | ||
1877 | } | ||
1878 | } | ||
1879 | } | ||
1880 | |||
1881 | /* invariant: hep->hcpriv is null OR the qh that's already scheduled. | ||
1882 | * until we get real dma queues (with an entry for each urb/buffer), | ||
1883 | * we only have work to do in the former case. | ||
1884 | */ | ||
1885 | spin_lock_irqsave(&musb->lock, flags); | ||
1886 | if (hep->hcpriv) { | ||
1887 | /* some concurrent activity submitted another urb to hep... | ||
1888 | * odd, rare, error prone, but legal. | ||
1889 | */ | ||
1890 | kfree(qh); | ||
1891 | ret = 0; | ||
1892 | } else | ||
1893 | ret = musb_schedule(musb, qh, | ||
1894 | epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); | ||
1895 | |||
1896 | if (ret == 0) { | ||
1897 | urb->hcpriv = qh; | ||
1898 | /* FIXME set urb->start_frame for iso/intr, it's tested in | ||
1899 | * musb_start_urb(), but otherwise only konicawc cares ... | ||
1900 | */ | ||
1901 | } | ||
1902 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1903 | |||
1904 | done: | ||
1905 | if (ret != 0) { | ||
1906 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
1907 | kfree(qh); | ||
1908 | } | ||
1909 | return ret; | ||
1910 | } | ||
1911 | |||
1912 | |||
1913 | /* | ||
1914 | * abort a transfer that's at the head of a hardware queue. | ||
1915 | * called with controller locked, irqs blocked | ||
1916 | * that hardware queue advances to the next transfer, unless prevented | ||
1917 | */ | ||
1918 | static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) | ||
1919 | { | ||
1920 | struct musb_hw_ep *ep = qh->hw_ep; | ||
1921 | void __iomem *epio = ep->regs; | ||
1922 | unsigned hw_end = ep->epnum; | ||
1923 | void __iomem *regs = ep->musb->mregs; | ||
1924 | u16 csr; | ||
1925 | int status = 0; | ||
1926 | |||
1927 | musb_ep_select(regs, hw_end); | ||
1928 | |||
1929 | if (is_dma_capable()) { | ||
1930 | struct dma_channel *dma; | ||
1931 | |||
1932 | dma = is_in ? ep->rx_channel : ep->tx_channel; | ||
1933 | if (dma) { | ||
1934 | status = ep->musb->dma_controller->channel_abort(dma); | ||
1935 | DBG(status ? 1 : 3, | ||
1936 | "abort %cX%d DMA for urb %p --> %d\n", | ||
1937 | is_in ? 'R' : 'T', ep->epnum, | ||
1938 | urb, status); | ||
1939 | urb->actual_length += dma->actual_len; | ||
1940 | } | ||
1941 | } | ||
1942 | |||
1943 | /* turn off DMA requests, discard state, stop polling ... */ | ||
1944 | if (is_in) { | ||
1945 | /* giveback saves bulk toggle */ | ||
1946 | csr = musb_h_flush_rxfifo(ep, 0); | ||
1947 | |||
1948 | /* REVISIT we still get an irq; should likely clear the | ||
1949 | * endpoint's irq status here to avoid bogus irqs. | ||
1950 | * clearing that status is platform-specific... | ||
1951 | */ | ||
1952 | } else { | ||
1953 | musb_h_tx_flush_fifo(ep); | ||
1954 | csr = musb_readw(epio, MUSB_TXCSR); | ||
1955 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
1956 | | MUSB_TXCSR_DMAENAB | ||
1957 | | MUSB_TXCSR_H_RXSTALL | ||
1958 | | MUSB_TXCSR_H_NAKTIMEOUT | ||
1959 | | MUSB_TXCSR_H_ERROR | ||
1960 | | MUSB_TXCSR_TXPKTRDY); | ||
1961 | musb_writew(epio, MUSB_TXCSR, csr); | ||
1962 | /* REVISIT may need to clear FLUSHFIFO ... */ | ||
1963 | musb_writew(epio, MUSB_TXCSR, csr); | ||
1964 | /* flush cpu writebuffer */ | ||
1965 | csr = musb_readw(epio, MUSB_TXCSR); | ||
1966 | } | ||
1967 | if (status == 0) | ||
1968 | musb_advance_schedule(ep->musb, urb, ep, is_in); | ||
1969 | return status; | ||
1970 | } | ||
1971 | |||
1972 | static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | ||
1973 | { | ||
1974 | struct musb *musb = hcd_to_musb(hcd); | ||
1975 | struct musb_qh *qh; | ||
1976 | struct list_head *sched; | ||
1977 | unsigned long flags; | ||
1978 | int ret; | ||
1979 | |||
1980 | DBG(4, "urb=%p, dev%d ep%d%s\n", urb, | ||
1981 | usb_pipedevice(urb->pipe), | ||
1982 | usb_pipeendpoint(urb->pipe), | ||
1983 | usb_pipein(urb->pipe) ? "in" : "out"); | ||
1984 | |||
1985 | spin_lock_irqsave(&musb->lock, flags); | ||
1986 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | ||
1987 | if (ret) | ||
1988 | goto done; | ||
1989 | |||
1990 | qh = urb->hcpriv; | ||
1991 | if (!qh) | ||
1992 | goto done; | ||
1993 | |||
1994 | /* Any URB not actively programmed into endpoint hardware can be | ||
1995 | * immediately given back. Such an URB must be at the head of its | ||
1996 | * endpoint queue, unless someday we get real DMA queues. And even | ||
1997 | * then, it might not be known to the hardware... | ||
1998 | * | ||
1999 | * Otherwise abort current transfer, pending dma, etc.; urb->status | ||
2000 | * has already been updated. This is a synchronous abort; it'd be | ||
2001 | * OK to hold off until after some IRQ, though. | ||
2002 | */ | ||
2003 | if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list) | ||
2004 | ret = -EINPROGRESS; | ||
2005 | else { | ||
2006 | switch (qh->type) { | ||
2007 | case USB_ENDPOINT_XFER_CONTROL: | ||
2008 | sched = &musb->control; | ||
2009 | break; | ||
2010 | case USB_ENDPOINT_XFER_BULK: | ||
2011 | if (usb_pipein(urb->pipe)) | ||
2012 | sched = &musb->in_bulk; | ||
2013 | else | ||
2014 | sched = &musb->out_bulk; | ||
2015 | break; | ||
2016 | default: | ||
2017 | /* REVISIT when we get a schedule tree, periodic | ||
2018 | * transfers won't always be at the head of a | ||
2019 | * singleton queue... | ||
2020 | */ | ||
2021 | sched = NULL; | ||
2022 | break; | ||
2023 | } | ||
2024 | } | ||
2025 | |||
2026 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ | ||
2027 | if (ret < 0 || (sched && qh != first_qh(sched))) { | ||
2028 | int ready = qh->is_ready; | ||
2029 | |||
2030 | ret = 0; | ||
2031 | qh->is_ready = 0; | ||
2032 | __musb_giveback(musb, urb, 0); | ||
2033 | qh->is_ready = ready; | ||
2034 | } else | ||
2035 | ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); | ||
2036 | done: | ||
2037 | spin_unlock_irqrestore(&musb->lock, flags); | ||
2038 | return ret; | ||
2039 | } | ||
2040 | |||
2041 | /* disable an endpoint */ | ||
2042 | static void | ||
2043 | musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) | ||
2044 | { | ||
2045 | u8 epnum = hep->desc.bEndpointAddress; | ||
2046 | unsigned long flags; | ||
2047 | struct musb *musb = hcd_to_musb(hcd); | ||
2048 | u8 is_in = epnum & USB_DIR_IN; | ||
2049 | struct musb_qh *qh = hep->hcpriv; | ||
2050 | struct urb *urb, *tmp; | ||
2051 | struct list_head *sched; | ||
2052 | |||
2053 | if (!qh) | ||
2054 | return; | ||
2055 | |||
2056 | spin_lock_irqsave(&musb->lock, flags); | ||
2057 | |||
2058 | switch (qh->type) { | ||
2059 | case USB_ENDPOINT_XFER_CONTROL: | ||
2060 | sched = &musb->control; | ||
2061 | break; | ||
2062 | case USB_ENDPOINT_XFER_BULK: | ||
2063 | if (is_in) | ||
2064 | sched = &musb->in_bulk; | ||
2065 | else | ||
2066 | sched = &musb->out_bulk; | ||
2067 | break; | ||
2068 | default: | ||
2069 | /* REVISIT when we get a schedule tree, periodic transfers | ||
2070 | * won't always be at the head of a singleton queue... | ||
2071 | */ | ||
2072 | sched = NULL; | ||
2073 | break; | ||
2074 | } | ||
2075 | |||
2076 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ | ||
2077 | |||
2078 | /* kick first urb off the hardware, if needed */ | ||
2079 | qh->is_ready = 0; | ||
2080 | if (!sched || qh == first_qh(sched)) { | ||
2081 | urb = next_urb(qh); | ||
2082 | |||
2083 | /* make software (then hardware) stop ASAP */ | ||
2084 | if (!urb->unlinked) | ||
2085 | urb->status = -ESHUTDOWN; | ||
2086 | |||
2087 | /* cleanup */ | ||
2088 | musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); | ||
2089 | } else | ||
2090 | urb = NULL; | ||
2091 | |||
2092 | /* then just nuke all the others */ | ||
2093 | list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list) | ||
2094 | musb_giveback(qh, urb, -ESHUTDOWN); | ||
2095 | |||
2096 | spin_unlock_irqrestore(&musb->lock, flags); | ||
2097 | } | ||
2098 | |||
2099 | static int musb_h_get_frame_number(struct usb_hcd *hcd) | ||
2100 | { | ||
2101 | struct musb *musb = hcd_to_musb(hcd); | ||
2102 | |||
2103 | return musb_readw(musb->mregs, MUSB_FRAME); | ||
2104 | } | ||
2105 | |||
2106 | static int musb_h_start(struct usb_hcd *hcd) | ||
2107 | { | ||
2108 | struct musb *musb = hcd_to_musb(hcd); | ||
2109 | |||
2110 | /* NOTE: musb_start() is called when the hub driver turns | ||
2111 | * on port power, or when (OTG) peripheral starts. | ||
2112 | */ | ||
2113 | hcd->state = HC_STATE_RUNNING; | ||
2114 | musb->port1_status = 0; | ||
2115 | return 0; | ||
2116 | } | ||
2117 | |||
2118 | static void musb_h_stop(struct usb_hcd *hcd) | ||
2119 | { | ||
2120 | musb_stop(hcd_to_musb(hcd)); | ||
2121 | hcd->state = HC_STATE_HALT; | ||
2122 | } | ||
2123 | |||
2124 | static int musb_bus_suspend(struct usb_hcd *hcd) | ||
2125 | { | ||
2126 | struct musb *musb = hcd_to_musb(hcd); | ||
2127 | |||
2128 | if (musb->xceiv.state == OTG_STATE_A_SUSPEND) | ||
2129 | return 0; | ||
2130 | |||
2131 | if (is_host_active(musb) && musb->is_active) { | ||
2132 | WARNING("trying to suspend as %s is_active=%i\n", | ||
2133 | otg_state_string(musb), musb->is_active); | ||
2134 | return -EBUSY; | ||
2135 | } else | ||
2136 | return 0; | ||
2137 | } | ||
2138 | |||
2139 | static int musb_bus_resume(struct usb_hcd *hcd) | ||
2140 | { | ||
2141 | /* resuming child port does the work */ | ||
2142 | return 0; | ||
2143 | } | ||
2144 | |||
2145 | const struct hc_driver musb_hc_driver = { | ||
2146 | .description = "musb-hcd", | ||
2147 | .product_desc = "MUSB HDRC host driver", | ||
2148 | .hcd_priv_size = sizeof(struct musb), | ||
2149 | .flags = HCD_USB2 | HCD_MEMORY, | ||
2150 | |||
2151 | /* not using irq handler or reset hooks from usbcore, since | ||
2152 | * those must be shared with peripheral code for OTG configs | ||
2153 | */ | ||
2154 | |||
2155 | .start = musb_h_start, | ||
2156 | .stop = musb_h_stop, | ||
2157 | |||
2158 | .get_frame_number = musb_h_get_frame_number, | ||
2159 | |||
2160 | .urb_enqueue = musb_urb_enqueue, | ||
2161 | .urb_dequeue = musb_urb_dequeue, | ||
2162 | .endpoint_disable = musb_h_disable, | ||
2163 | |||
2164 | .hub_status_data = musb_hub_status_data, | ||
2165 | .hub_control = musb_hub_control, | ||
2166 | .bus_suspend = musb_bus_suspend, | ||
2167 | .bus_resume = musb_bus_resume, | ||
2168 | /* .start_port_reset = NULL, */ | ||
2169 | /* .hub_irq_enable = NULL, */ | ||
2170 | }; | ||
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h new file mode 100644 index 000000000000..77bcdb9d5b32 --- /dev/null +++ b/drivers/usb/musb/musb_host.h | |||
@@ -0,0 +1,110 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver host defines | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #ifndef _MUSB_HOST_H | ||
36 | #define _MUSB_HOST_H | ||
37 | |||
38 | static inline struct usb_hcd *musb_to_hcd(struct musb *musb) | ||
39 | { | ||
40 | return container_of((void *) musb, struct usb_hcd, hcd_priv); | ||
41 | } | ||
42 | |||
43 | static inline struct musb *hcd_to_musb(struct usb_hcd *hcd) | ||
44 | { | ||
45 | return (struct musb *) (hcd->hcd_priv); | ||
46 | } | ||
47 | |||
48 | /* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */ | ||
49 | struct musb_qh { | ||
50 | struct usb_host_endpoint *hep; /* usbcore info */ | ||
51 | struct usb_device *dev; | ||
52 | struct musb_hw_ep *hw_ep; /* current binding */ | ||
53 | |||
54 | struct list_head ring; /* of musb_qh */ | ||
55 | /* struct musb_qh *next; */ /* for periodic tree */ | ||
56 | |||
57 | unsigned offset; /* in urb->transfer_buffer */ | ||
58 | unsigned segsize; /* current xfer fragment */ | ||
59 | |||
60 | u8 type_reg; /* {rx,tx} type register */ | ||
61 | u8 intv_reg; /* {rx,tx} interval register */ | ||
62 | u8 addr_reg; /* device address register */ | ||
63 | u8 h_addr_reg; /* hub address register */ | ||
64 | u8 h_port_reg; /* hub port register */ | ||
65 | |||
66 | u8 is_ready; /* safe to modify hw_ep */ | ||
67 | u8 type; /* XFERTYPE_* */ | ||
68 | u8 epnum; | ||
69 | u16 maxpacket; | ||
70 | u16 frame; /* for periodic schedule */ | ||
71 | unsigned iso_idx; /* in urb->iso_frame_desc[] */ | ||
72 | }; | ||
73 | |||
74 | /* map from control or bulk queue head to the first qh on that ring */ | ||
75 | static inline struct musb_qh *first_qh(struct list_head *q) | ||
76 | { | ||
77 | if (list_empty(q)) | ||
78 | return NULL; | ||
79 | return list_entry(q->next, struct musb_qh, ring); | ||
80 | } | ||
81 | |||
82 | |||
83 | extern void musb_root_disconnect(struct musb *musb); | ||
84 | |||
85 | struct usb_hcd; | ||
86 | |||
87 | extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf); | ||
88 | extern int musb_hub_control(struct usb_hcd *hcd, | ||
89 | u16 typeReq, u16 wValue, u16 wIndex, | ||
90 | char *buf, u16 wLength); | ||
91 | |||
92 | extern const struct hc_driver musb_hc_driver; | ||
93 | |||
94 | static inline struct urb *next_urb(struct musb_qh *qh) | ||
95 | { | ||
96 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
97 | struct list_head *queue; | ||
98 | |||
99 | if (!qh) | ||
100 | return NULL; | ||
101 | queue = &qh->hep->urb_list; | ||
102 | if (list_empty(queue)) | ||
103 | return NULL; | ||
104 | return list_entry(queue->next, struct urb, urb_list); | ||
105 | #else | ||
106 | return NULL; | ||
107 | #endif | ||
108 | } | ||
109 | |||
110 | #endif /* _MUSB_HOST_H */ | ||
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h new file mode 100644 index 000000000000..6bbedae83af8 --- /dev/null +++ b/drivers/usb/musb/musb_io.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver register I/O | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #ifndef __MUSB_LINUX_PLATFORM_ARCH_H__ | ||
36 | #define __MUSB_LINUX_PLATFORM_ARCH_H__ | ||
37 | |||
38 | #include <linux/io.h> | ||
39 | |||
40 | #ifndef CONFIG_ARM | ||
41 | static inline void readsl(const void __iomem *addr, void *buf, int len) | ||
42 | { insl((unsigned long)addr, buf, len); } | ||
43 | static inline void readsw(const void __iomem *addr, void *buf, int len) | ||
44 | { insw((unsigned long)addr, buf, len); } | ||
45 | static inline void readsb(const void __iomem *addr, void *buf, int len) | ||
46 | { insb((unsigned long)addr, buf, len); } | ||
47 | |||
48 | static inline void writesl(const void __iomem *addr, const void *buf, int len) | ||
49 | { outsl((unsigned long)addr, buf, len); } | ||
50 | static inline void writesw(const void __iomem *addr, const void *buf, int len) | ||
51 | { outsw((unsigned long)addr, buf, len); } | ||
52 | static inline void writesb(const void __iomem *addr, const void *buf, int len) | ||
53 | { outsb((unsigned long)addr, buf, len); } | ||
54 | |||
55 | #endif | ||
56 | |||
57 | /* NOTE: these offsets are all in bytes */ | ||
58 | |||
59 | static inline u16 musb_readw(const void __iomem *addr, unsigned offset) | ||
60 | { return __raw_readw(addr + offset); } | ||
61 | |||
62 | static inline u32 musb_readl(const void __iomem *addr, unsigned offset) | ||
63 | { return __raw_readl(addr + offset); } | ||
64 | |||
65 | |||
66 | static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data) | ||
67 | { __raw_writew(data, addr + offset); } | ||
68 | |||
69 | static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data) | ||
70 | { __raw_writel(data, addr + offset); } | ||
71 | |||
72 | |||
73 | #ifdef CONFIG_USB_TUSB6010 | ||
74 | |||
75 | /* | ||
76 | * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum. | ||
77 | */ | ||
78 | static inline u8 musb_readb(const void __iomem *addr, unsigned offset) | ||
79 | { | ||
80 | u16 tmp; | ||
81 | u8 val; | ||
82 | |||
83 | tmp = __raw_readw(addr + (offset & ~1)); | ||
84 | if (offset & 1) | ||
85 | val = (tmp >> 8); | ||
86 | else | ||
87 | val = tmp & 0xff; | ||
88 | |||
89 | return val; | ||
90 | } | ||
91 | |||
92 | static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) | ||
93 | { | ||
94 | u16 tmp; | ||
95 | |||
96 | tmp = __raw_readw(addr + (offset & ~1)); | ||
97 | if (offset & 1) | ||
98 | tmp = (data << 8) | (tmp & 0xff); | ||
99 | else | ||
100 | tmp = (tmp & 0xff00) | data; | ||
101 | |||
102 | __raw_writew(tmp, addr + (offset & ~1)); | ||
103 | } | ||
104 | |||
105 | #else | ||
106 | |||
107 | static inline u8 musb_readb(const void __iomem *addr, unsigned offset) | ||
108 | { return __raw_readb(addr + offset); } | ||
109 | |||
110 | static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) | ||
111 | { __raw_writeb(data, addr + offset); } | ||
112 | |||
113 | #endif /* CONFIG_USB_TUSB6010 */ | ||
114 | |||
115 | #endif | ||
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h new file mode 100644 index 000000000000..9c228661aa5a --- /dev/null +++ b/drivers/usb/musb/musb_regs.h | |||
@@ -0,0 +1,300 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver register defines | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #ifndef __MUSB_REGS_H__ | ||
36 | #define __MUSB_REGS_H__ | ||
37 | |||
38 | #define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */ | ||
39 | |||
40 | /* | ||
41 | * Common USB registers | ||
42 | */ | ||
43 | |||
44 | #define MUSB_FADDR 0x00 /* 8-bit */ | ||
45 | #define MUSB_POWER 0x01 /* 8-bit */ | ||
46 | |||
47 | #define MUSB_INTRTX 0x02 /* 16-bit */ | ||
48 | #define MUSB_INTRRX 0x04 | ||
49 | #define MUSB_INTRTXE 0x06 | ||
50 | #define MUSB_INTRRXE 0x08 | ||
51 | #define MUSB_INTRUSB 0x0A /* 8 bit */ | ||
52 | #define MUSB_INTRUSBE 0x0B /* 8 bit */ | ||
53 | #define MUSB_FRAME 0x0C | ||
54 | #define MUSB_INDEX 0x0E /* 8 bit */ | ||
55 | #define MUSB_TESTMODE 0x0F /* 8 bit */ | ||
56 | |||
57 | /* Get offset for a given FIFO from musb->mregs */ | ||
58 | #ifdef CONFIG_USB_TUSB6010 | ||
59 | #define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) | ||
60 | #else | ||
61 | #define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) | ||
62 | #endif | ||
63 | |||
64 | /* | ||
65 | * Additional Control Registers | ||
66 | */ | ||
67 | |||
68 | #define MUSB_DEVCTL 0x60 /* 8 bit */ | ||
69 | |||
70 | /* These are always controlled through the INDEX register */ | ||
71 | #define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */ | ||
72 | #define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */ | ||
73 | #define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */ | ||
74 | #define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */ | ||
75 | |||
76 | /* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */ | ||
77 | #define MUSB_HWVERS 0x6C /* 8 bit */ | ||
78 | |||
79 | #define MUSB_EPINFO 0x78 /* 8 bit */ | ||
80 | #define MUSB_RAMINFO 0x79 /* 8 bit */ | ||
81 | #define MUSB_LINKINFO 0x7a /* 8 bit */ | ||
82 | #define MUSB_VPLEN 0x7b /* 8 bit */ | ||
83 | #define MUSB_HS_EOF1 0x7c /* 8 bit */ | ||
84 | #define MUSB_FS_EOF1 0x7d /* 8 bit */ | ||
85 | #define MUSB_LS_EOF1 0x7e /* 8 bit */ | ||
86 | |||
87 | /* Offsets to endpoint registers */ | ||
88 | #define MUSB_TXMAXP 0x00 | ||
89 | #define MUSB_TXCSR 0x02 | ||
90 | #define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */ | ||
91 | #define MUSB_RXMAXP 0x04 | ||
92 | #define MUSB_RXCSR 0x06 | ||
93 | #define MUSB_RXCOUNT 0x08 | ||
94 | #define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */ | ||
95 | #define MUSB_TXTYPE 0x0A | ||
96 | #define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */ | ||
97 | #define MUSB_TXINTERVAL 0x0B | ||
98 | #define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */ | ||
99 | #define MUSB_RXTYPE 0x0C | ||
100 | #define MUSB_RXINTERVAL 0x0D | ||
101 | #define MUSB_FIFOSIZE 0x0F | ||
102 | #define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */ | ||
103 | |||
104 | /* Offsets to endpoint registers in indexed model (using INDEX register) */ | ||
105 | #define MUSB_INDEXED_OFFSET(_epnum, _offset) \ | ||
106 | (0x10 + (_offset)) | ||
107 | |||
108 | /* Offsets to endpoint registers in flat models */ | ||
109 | #define MUSB_FLAT_OFFSET(_epnum, _offset) \ | ||
110 | (0x100 + (0x10*(_epnum)) + (_offset)) | ||
111 | |||
112 | #ifdef CONFIG_USB_TUSB6010 | ||
113 | /* TUSB6010 EP0 configuration register is special */ | ||
114 | #define MUSB_TUSB_OFFSET(_epnum, _offset) \ | ||
115 | (0x10 + _offset) | ||
116 | #include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */ | ||
117 | #endif | ||
118 | |||
119 | /* "bus control"/target registers, for host side multipoint (external hubs) */ | ||
120 | #define MUSB_TXFUNCADDR 0x00 | ||
121 | #define MUSB_TXHUBADDR 0x02 | ||
122 | #define MUSB_TXHUBPORT 0x03 | ||
123 | |||
124 | #define MUSB_RXFUNCADDR 0x04 | ||
125 | #define MUSB_RXHUBADDR 0x06 | ||
126 | #define MUSB_RXHUBPORT 0x07 | ||
127 | |||
128 | #define MUSB_BUSCTL_OFFSET(_epnum, _offset) \ | ||
129 | (0x80 + (8*(_epnum)) + (_offset)) | ||
130 | |||
131 | /* | ||
132 | * MUSB Register bits | ||
133 | */ | ||
134 | |||
135 | /* POWER */ | ||
136 | #define MUSB_POWER_ISOUPDATE 0x80 | ||
137 | #define MUSB_POWER_SOFTCONN 0x40 | ||
138 | #define MUSB_POWER_HSENAB 0x20 | ||
139 | #define MUSB_POWER_HSMODE 0x10 | ||
140 | #define MUSB_POWER_RESET 0x08 | ||
141 | #define MUSB_POWER_RESUME 0x04 | ||
142 | #define MUSB_POWER_SUSPENDM 0x02 | ||
143 | #define MUSB_POWER_ENSUSPEND 0x01 | ||
144 | |||
145 | /* INTRUSB */ | ||
146 | #define MUSB_INTR_SUSPEND 0x01 | ||
147 | #define MUSB_INTR_RESUME 0x02 | ||
148 | #define MUSB_INTR_RESET 0x04 | ||
149 | #define MUSB_INTR_BABBLE 0x04 | ||
150 | #define MUSB_INTR_SOF 0x08 | ||
151 | #define MUSB_INTR_CONNECT 0x10 | ||
152 | #define MUSB_INTR_DISCONNECT 0x20 | ||
153 | #define MUSB_INTR_SESSREQ 0x40 | ||
154 | #define MUSB_INTR_VBUSERROR 0x80 /* For SESSION end */ | ||
155 | |||
156 | /* DEVCTL */ | ||
157 | #define MUSB_DEVCTL_BDEVICE 0x80 | ||
158 | #define MUSB_DEVCTL_FSDEV 0x40 | ||
159 | #define MUSB_DEVCTL_LSDEV 0x20 | ||
160 | #define MUSB_DEVCTL_VBUS 0x18 | ||
161 | #define MUSB_DEVCTL_VBUS_SHIFT 3 | ||
162 | #define MUSB_DEVCTL_HM 0x04 | ||
163 | #define MUSB_DEVCTL_HR 0x02 | ||
164 | #define MUSB_DEVCTL_SESSION 0x01 | ||
165 | |||
166 | /* TESTMODE */ | ||
167 | #define MUSB_TEST_FORCE_HOST 0x80 | ||
168 | #define MUSB_TEST_FIFO_ACCESS 0x40 | ||
169 | #define MUSB_TEST_FORCE_FS 0x20 | ||
170 | #define MUSB_TEST_FORCE_HS 0x10 | ||
171 | #define MUSB_TEST_PACKET 0x08 | ||
172 | #define MUSB_TEST_K 0x04 | ||
173 | #define MUSB_TEST_J 0x02 | ||
174 | #define MUSB_TEST_SE0_NAK 0x01 | ||
175 | |||
176 | /* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */ | ||
177 | #define MUSB_FIFOSZ_DPB 0x10 | ||
178 | /* Allocation size (8, 16, 32, ... 4096) */ | ||
179 | #define MUSB_FIFOSZ_SIZE 0x0f | ||
180 | |||
181 | /* CSR0 */ | ||
182 | #define MUSB_CSR0_FLUSHFIFO 0x0100 | ||
183 | #define MUSB_CSR0_TXPKTRDY 0x0002 | ||
184 | #define MUSB_CSR0_RXPKTRDY 0x0001 | ||
185 | |||
186 | /* CSR0 in Peripheral mode */ | ||
187 | #define MUSB_CSR0_P_SVDSETUPEND 0x0080 | ||
188 | #define MUSB_CSR0_P_SVDRXPKTRDY 0x0040 | ||
189 | #define MUSB_CSR0_P_SENDSTALL 0x0020 | ||
190 | #define MUSB_CSR0_P_SETUPEND 0x0010 | ||
191 | #define MUSB_CSR0_P_DATAEND 0x0008 | ||
192 | #define MUSB_CSR0_P_SENTSTALL 0x0004 | ||
193 | |||
194 | /* CSR0 in Host mode */ | ||
195 | #define MUSB_CSR0_H_DIS_PING 0x0800 | ||
196 | #define MUSB_CSR0_H_WR_DATATOGGLE 0x0400 /* Set to allow setting: */ | ||
197 | #define MUSB_CSR0_H_DATATOGGLE 0x0200 /* Data toggle control */ | ||
198 | #define MUSB_CSR0_H_NAKTIMEOUT 0x0080 | ||
199 | #define MUSB_CSR0_H_STATUSPKT 0x0040 | ||
200 | #define MUSB_CSR0_H_REQPKT 0x0020 | ||
201 | #define MUSB_CSR0_H_ERROR 0x0010 | ||
202 | #define MUSB_CSR0_H_SETUPPKT 0x0008 | ||
203 | #define MUSB_CSR0_H_RXSTALL 0x0004 | ||
204 | |||
205 | /* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */ | ||
206 | #define MUSB_CSR0_P_WZC_BITS \ | ||
207 | (MUSB_CSR0_P_SENTSTALL) | ||
208 | #define MUSB_CSR0_H_WZC_BITS \ | ||
209 | (MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \ | ||
210 | | MUSB_CSR0_RXPKTRDY) | ||
211 | |||
212 | /* TxType/RxType */ | ||
213 | #define MUSB_TYPE_SPEED 0xc0 | ||
214 | #define MUSB_TYPE_SPEED_SHIFT 6 | ||
215 | #define MUSB_TYPE_PROTO 0x30 /* Implicitly zero for ep0 */ | ||
216 | #define MUSB_TYPE_PROTO_SHIFT 4 | ||
217 | #define MUSB_TYPE_REMOTE_END 0xf /* Implicitly zero for ep0 */ | ||
218 | |||
219 | /* CONFIGDATA */ | ||
220 | #define MUSB_CONFIGDATA_MPRXE 0x80 /* Auto bulk pkt combining */ | ||
221 | #define MUSB_CONFIGDATA_MPTXE 0x40 /* Auto bulk pkt splitting */ | ||
222 | #define MUSB_CONFIGDATA_BIGENDIAN 0x20 | ||
223 | #define MUSB_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */ | ||
224 | #define MUSB_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */ | ||
225 | #define MUSB_CONFIGDATA_DYNFIFO 0x04 /* Dynamic FIFO sizing */ | ||
226 | #define MUSB_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */ | ||
227 | #define MUSB_CONFIGDATA_UTMIDW 0x01 /* Data width 0/1 => 8/16bits */ | ||
228 | |||
229 | /* TXCSR in Peripheral and Host mode */ | ||
230 | #define MUSB_TXCSR_AUTOSET 0x8000 | ||
231 | #define MUSB_TXCSR_MODE 0x2000 | ||
232 | #define MUSB_TXCSR_DMAENAB 0x1000 | ||
233 | #define MUSB_TXCSR_FRCDATATOG 0x0800 | ||
234 | #define MUSB_TXCSR_DMAMODE 0x0400 | ||
235 | #define MUSB_TXCSR_CLRDATATOG 0x0040 | ||
236 | #define MUSB_TXCSR_FLUSHFIFO 0x0008 | ||
237 | #define MUSB_TXCSR_FIFONOTEMPTY 0x0002 | ||
238 | #define MUSB_TXCSR_TXPKTRDY 0x0001 | ||
239 | |||
240 | /* TXCSR in Peripheral mode */ | ||
241 | #define MUSB_TXCSR_P_ISO 0x4000 | ||
242 | #define MUSB_TXCSR_P_INCOMPTX 0x0080 | ||
243 | #define MUSB_TXCSR_P_SENTSTALL 0x0020 | ||
244 | #define MUSB_TXCSR_P_SENDSTALL 0x0010 | ||
245 | #define MUSB_TXCSR_P_UNDERRUN 0x0004 | ||
246 | |||
247 | /* TXCSR in Host mode */ | ||
248 | #define MUSB_TXCSR_H_WR_DATATOGGLE 0x0200 | ||
249 | #define MUSB_TXCSR_H_DATATOGGLE 0x0100 | ||
250 | #define MUSB_TXCSR_H_NAKTIMEOUT 0x0080 | ||
251 | #define MUSB_TXCSR_H_RXSTALL 0x0020 | ||
252 | #define MUSB_TXCSR_H_ERROR 0x0004 | ||
253 | |||
254 | /* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */ | ||
255 | #define MUSB_TXCSR_P_WZC_BITS \ | ||
256 | (MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \ | ||
257 | | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY) | ||
258 | #define MUSB_TXCSR_H_WZC_BITS \ | ||
259 | (MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \ | ||
260 | | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY) | ||
261 | |||
262 | /* RXCSR in Peripheral and Host mode */ | ||
263 | #define MUSB_RXCSR_AUTOCLEAR 0x8000 | ||
264 | #define MUSB_RXCSR_DMAENAB 0x2000 | ||
265 | #define MUSB_RXCSR_DISNYET 0x1000 | ||
266 | #define MUSB_RXCSR_PID_ERR 0x1000 | ||
267 | #define MUSB_RXCSR_DMAMODE 0x0800 | ||
268 | #define MUSB_RXCSR_INCOMPRX 0x0100 | ||
269 | #define MUSB_RXCSR_CLRDATATOG 0x0080 | ||
270 | #define MUSB_RXCSR_FLUSHFIFO 0x0010 | ||
271 | #define MUSB_RXCSR_DATAERROR 0x0008 | ||
272 | #define MUSB_RXCSR_FIFOFULL 0x0002 | ||
273 | #define MUSB_RXCSR_RXPKTRDY 0x0001 | ||
274 | |||
275 | /* RXCSR in Peripheral mode */ | ||
276 | #define MUSB_RXCSR_P_ISO 0x4000 | ||
277 | #define MUSB_RXCSR_P_SENTSTALL 0x0040 | ||
278 | #define MUSB_RXCSR_P_SENDSTALL 0x0020 | ||
279 | #define MUSB_RXCSR_P_OVERRUN 0x0004 | ||
280 | |||
281 | /* RXCSR in Host mode */ | ||
282 | #define MUSB_RXCSR_H_AUTOREQ 0x4000 | ||
283 | #define MUSB_RXCSR_H_WR_DATATOGGLE 0x0400 | ||
284 | #define MUSB_RXCSR_H_DATATOGGLE 0x0200 | ||
285 | #define MUSB_RXCSR_H_RXSTALL 0x0040 | ||
286 | #define MUSB_RXCSR_H_REQPKT 0x0020 | ||
287 | #define MUSB_RXCSR_H_ERROR 0x0004 | ||
288 | |||
289 | /* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */ | ||
290 | #define MUSB_RXCSR_P_WZC_BITS \ | ||
291 | (MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \ | ||
292 | | MUSB_RXCSR_RXPKTRDY) | ||
293 | #define MUSB_RXCSR_H_WZC_BITS \ | ||
294 | (MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \ | ||
295 | | MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY) | ||
296 | |||
297 | /* HUBADDR */ | ||
298 | #define MUSB_HUBADDR_MULTI_TT 0x80 | ||
299 | |||
300 | #endif /* __MUSB_REGS_H__ */ | ||
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c new file mode 100644 index 000000000000..e0e9ce584175 --- /dev/null +++ b/drivers/usb/musb/musb_virthub.c | |||
@@ -0,0 +1,425 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver virtual root hub support | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #include <linux/module.h> | ||
36 | #include <linux/kernel.h> | ||
37 | #include <linux/sched.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/errno.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/time.h> | ||
42 | #include <linux/timer.h> | ||
43 | |||
44 | #include <asm/unaligned.h> | ||
45 | |||
46 | #include "musb_core.h" | ||
47 | |||
48 | |||
49 | static void musb_port_suspend(struct musb *musb, bool do_suspend) | ||
50 | { | ||
51 | u8 power; | ||
52 | void __iomem *mbase = musb->mregs; | ||
53 | |||
54 | if (!is_host_active(musb)) | ||
55 | return; | ||
56 | |||
57 | /* NOTE: this doesn't necessarily put PHY into low power mode, | ||
58 | * turning off its clock; that's a function of PHY integration and | ||
59 | * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect | ||
60 | * SE0 changing to connect (J) or wakeup (K) states. | ||
61 | */ | ||
62 | power = musb_readb(mbase, MUSB_POWER); | ||
63 | if (do_suspend) { | ||
64 | int retries = 10000; | ||
65 | |||
66 | power &= ~MUSB_POWER_RESUME; | ||
67 | power |= MUSB_POWER_SUSPENDM; | ||
68 | musb_writeb(mbase, MUSB_POWER, power); | ||
69 | |||
70 | /* Needed for OPT A tests */ | ||
71 | power = musb_readb(mbase, MUSB_POWER); | ||
72 | while (power & MUSB_POWER_SUSPENDM) { | ||
73 | power = musb_readb(mbase, MUSB_POWER); | ||
74 | if (retries-- < 1) | ||
75 | break; | ||
76 | } | ||
77 | |||
78 | DBG(3, "Root port suspended, power %02x\n", power); | ||
79 | |||
80 | musb->port1_status |= USB_PORT_STAT_SUSPEND; | ||
81 | switch (musb->xceiv.state) { | ||
82 | case OTG_STATE_A_HOST: | ||
83 | musb->xceiv.state = OTG_STATE_A_SUSPEND; | ||
84 | musb->is_active = is_otg_enabled(musb) | ||
85 | && musb->xceiv.host->b_hnp_enable; | ||
86 | musb_platform_try_idle(musb, 0); | ||
87 | break; | ||
88 | #ifdef CONFIG_USB_MUSB_OTG | ||
89 | case OTG_STATE_B_HOST: | ||
90 | musb->xceiv.state = OTG_STATE_B_WAIT_ACON; | ||
91 | musb->is_active = is_otg_enabled(musb) | ||
92 | && musb->xceiv.host->b_hnp_enable; | ||
93 | musb_platform_try_idle(musb, 0); | ||
94 | break; | ||
95 | #endif | ||
96 | default: | ||
97 | DBG(1, "bogus rh suspend? %s\n", | ||
98 | otg_state_string(musb)); | ||
99 | } | ||
100 | } else if (power & MUSB_POWER_SUSPENDM) { | ||
101 | power &= ~MUSB_POWER_SUSPENDM; | ||
102 | power |= MUSB_POWER_RESUME; | ||
103 | musb_writeb(mbase, MUSB_POWER, power); | ||
104 | |||
105 | DBG(3, "Root port resuming, power %02x\n", power); | ||
106 | |||
107 | /* later, GetPortStatus will stop RESUME signaling */ | ||
108 | musb->port1_status |= MUSB_PORT_STAT_RESUME; | ||
109 | musb->rh_timer = jiffies + msecs_to_jiffies(20); | ||
110 | } | ||
111 | } | ||
112 | |||
113 | static void musb_port_reset(struct musb *musb, bool do_reset) | ||
114 | { | ||
115 | u8 power; | ||
116 | void __iomem *mbase = musb->mregs; | ||
117 | |||
118 | #ifdef CONFIG_USB_MUSB_OTG | ||
119 | if (musb->xceiv.state == OTG_STATE_B_IDLE) { | ||
120 | DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n"); | ||
121 | musb->port1_status &= ~USB_PORT_STAT_RESET; | ||
122 | return; | ||
123 | } | ||
124 | #endif | ||
125 | |||
126 | if (!is_host_active(musb)) | ||
127 | return; | ||
128 | |||
129 | /* NOTE: caller guarantees it will turn off the reset when | ||
130 | * the appropriate amount of time has passed | ||
131 | */ | ||
132 | power = musb_readb(mbase, MUSB_POWER); | ||
133 | if (do_reset) { | ||
134 | |||
135 | /* | ||
136 | * If RESUME is set, we must make sure it stays minimum 20 ms. | ||
137 | * Then we must clear RESUME and wait a bit to let musb start | ||
138 | * generating SOFs. If we don't do this, OPT HS A 6.8 tests | ||
139 | * fail with "Error! Did not receive an SOF before suspend | ||
140 | * detected". | ||
141 | */ | ||
142 | if (power & MUSB_POWER_RESUME) { | ||
143 | while (time_before(jiffies, musb->rh_timer)) | ||
144 | msleep(1); | ||
145 | musb_writeb(mbase, MUSB_POWER, | ||
146 | power & ~MUSB_POWER_RESUME); | ||
147 | msleep(1); | ||
148 | } | ||
149 | |||
150 | musb->ignore_disconnect = true; | ||
151 | power &= 0xf0; | ||
152 | musb_writeb(mbase, MUSB_POWER, | ||
153 | power | MUSB_POWER_RESET); | ||
154 | |||
155 | musb->port1_status |= USB_PORT_STAT_RESET; | ||
156 | musb->port1_status &= ~USB_PORT_STAT_ENABLE; | ||
157 | musb->rh_timer = jiffies + msecs_to_jiffies(50); | ||
158 | } else { | ||
159 | DBG(4, "root port reset stopped\n"); | ||
160 | musb_writeb(mbase, MUSB_POWER, | ||
161 | power & ~MUSB_POWER_RESET); | ||
162 | |||
163 | musb->ignore_disconnect = false; | ||
164 | |||
165 | power = musb_readb(mbase, MUSB_POWER); | ||
166 | if (power & MUSB_POWER_HSMODE) { | ||
167 | DBG(4, "high-speed device connected\n"); | ||
168 | musb->port1_status |= USB_PORT_STAT_HIGH_SPEED; | ||
169 | } | ||
170 | |||
171 | musb->port1_status &= ~USB_PORT_STAT_RESET; | ||
172 | musb->port1_status |= USB_PORT_STAT_ENABLE | ||
173 | | (USB_PORT_STAT_C_RESET << 16) | ||
174 | | (USB_PORT_STAT_C_ENABLE << 16); | ||
175 | usb_hcd_poll_rh_status(musb_to_hcd(musb)); | ||
176 | |||
177 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; | ||
178 | } | ||
179 | } | ||
180 | |||
181 | void musb_root_disconnect(struct musb *musb) | ||
182 | { | ||
183 | musb->port1_status = (1 << USB_PORT_FEAT_POWER) | ||
184 | | (1 << USB_PORT_FEAT_C_CONNECTION); | ||
185 | |||
186 | usb_hcd_poll_rh_status(musb_to_hcd(musb)); | ||
187 | musb->is_active = 0; | ||
188 | |||
189 | switch (musb->xceiv.state) { | ||
190 | case OTG_STATE_A_HOST: | ||
191 | case OTG_STATE_A_SUSPEND: | ||
192 | musb->xceiv.state = OTG_STATE_A_WAIT_BCON; | ||
193 | musb->is_active = 0; | ||
194 | break; | ||
195 | case OTG_STATE_A_WAIT_VFALL: | ||
196 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
197 | break; | ||
198 | default: | ||
199 | DBG(1, "host disconnect (%s)\n", otg_state_string(musb)); | ||
200 | } | ||
201 | } | ||
202 | |||
203 | |||
204 | /*---------------------------------------------------------------------*/ | ||
205 | |||
206 | /* Caller may or may not hold musb->lock */ | ||
207 | int musb_hub_status_data(struct usb_hcd *hcd, char *buf) | ||
208 | { | ||
209 | struct musb *musb = hcd_to_musb(hcd); | ||
210 | int retval = 0; | ||
211 | |||
212 | /* called in_irq() via usb_hcd_poll_rh_status() */ | ||
213 | if (musb->port1_status & 0xffff0000) { | ||
214 | *buf = 0x02; | ||
215 | retval = 1; | ||
216 | } | ||
217 | return retval; | ||
218 | } | ||
219 | |||
220 | int musb_hub_control( | ||
221 | struct usb_hcd *hcd, | ||
222 | u16 typeReq, | ||
223 | u16 wValue, | ||
224 | u16 wIndex, | ||
225 | char *buf, | ||
226 | u16 wLength) | ||
227 | { | ||
228 | struct musb *musb = hcd_to_musb(hcd); | ||
229 | u32 temp; | ||
230 | int retval = 0; | ||
231 | unsigned long flags; | ||
232 | |||
233 | spin_lock_irqsave(&musb->lock, flags); | ||
234 | |||
235 | if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) { | ||
236 | spin_unlock_irqrestore(&musb->lock, flags); | ||
237 | return -ESHUTDOWN; | ||
238 | } | ||
239 | |||
240 | /* hub features: always zero, setting is a NOP | ||
241 | * port features: reported, sometimes updated when host is active | ||
242 | * no indicators | ||
243 | */ | ||
244 | switch (typeReq) { | ||
245 | case ClearHubFeature: | ||
246 | case SetHubFeature: | ||
247 | switch (wValue) { | ||
248 | case C_HUB_OVER_CURRENT: | ||
249 | case C_HUB_LOCAL_POWER: | ||
250 | break; | ||
251 | default: | ||
252 | goto error; | ||
253 | } | ||
254 | break; | ||
255 | case ClearPortFeature: | ||
256 | if ((wIndex & 0xff) != 1) | ||
257 | goto error; | ||
258 | |||
259 | switch (wValue) { | ||
260 | case USB_PORT_FEAT_ENABLE: | ||
261 | break; | ||
262 | case USB_PORT_FEAT_SUSPEND: | ||
263 | musb_port_suspend(musb, false); | ||
264 | break; | ||
265 | case USB_PORT_FEAT_POWER: | ||
266 | if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) | ||
267 | musb_set_vbus(musb, 0); | ||
268 | break; | ||
269 | case USB_PORT_FEAT_C_CONNECTION: | ||
270 | case USB_PORT_FEAT_C_ENABLE: | ||
271 | case USB_PORT_FEAT_C_OVER_CURRENT: | ||
272 | case USB_PORT_FEAT_C_RESET: | ||
273 | case USB_PORT_FEAT_C_SUSPEND: | ||
274 | break; | ||
275 | default: | ||
276 | goto error; | ||
277 | } | ||
278 | DBG(5, "clear feature %d\n", wValue); | ||
279 | musb->port1_status &= ~(1 << wValue); | ||
280 | break; | ||
281 | case GetHubDescriptor: | ||
282 | { | ||
283 | struct usb_hub_descriptor *desc = (void *)buf; | ||
284 | |||
285 | desc->bDescLength = 9; | ||
286 | desc->bDescriptorType = 0x29; | ||
287 | desc->bNbrPorts = 1; | ||
288 | desc->wHubCharacteristics = __constant_cpu_to_le16( | ||
289 | 0x0001 /* per-port power switching */ | ||
290 | | 0x0010 /* no overcurrent reporting */ | ||
291 | ); | ||
292 | desc->bPwrOn2PwrGood = 5; /* msec/2 */ | ||
293 | desc->bHubContrCurrent = 0; | ||
294 | |||
295 | /* workaround bogus struct definition */ | ||
296 | desc->DeviceRemovable[0] = 0x02; /* port 1 */ | ||
297 | desc->DeviceRemovable[1] = 0xff; | ||
298 | } | ||
299 | break; | ||
300 | case GetHubStatus: | ||
301 | temp = 0; | ||
302 | *(__le32 *) buf = cpu_to_le32(temp); | ||
303 | break; | ||
304 | case GetPortStatus: | ||
305 | if (wIndex != 1) | ||
306 | goto error; | ||
307 | |||
308 | /* finish RESET signaling? */ | ||
309 | if ((musb->port1_status & USB_PORT_STAT_RESET) | ||
310 | && time_after_eq(jiffies, musb->rh_timer)) | ||
311 | musb_port_reset(musb, false); | ||
312 | |||
313 | /* finish RESUME signaling? */ | ||
314 | if ((musb->port1_status & MUSB_PORT_STAT_RESUME) | ||
315 | && time_after_eq(jiffies, musb->rh_timer)) { | ||
316 | u8 power; | ||
317 | |||
318 | power = musb_readb(musb->mregs, MUSB_POWER); | ||
319 | power &= ~MUSB_POWER_RESUME; | ||
320 | DBG(4, "root port resume stopped, power %02x\n", | ||
321 | power); | ||
322 | musb_writeb(musb->mregs, MUSB_POWER, power); | ||
323 | |||
324 | /* ISSUE: DaVinci (RTL 1.300) disconnects after | ||
325 | * resume of high speed peripherals (but not full | ||
326 | * speed ones). | ||
327 | */ | ||
328 | |||
329 | musb->is_active = 1; | ||
330 | musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | ||
331 | | MUSB_PORT_STAT_RESUME); | ||
332 | musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; | ||
333 | usb_hcd_poll_rh_status(musb_to_hcd(musb)); | ||
334 | /* NOTE: it might really be A_WAIT_BCON ... */ | ||
335 | musb->xceiv.state = OTG_STATE_A_HOST; | ||
336 | } | ||
337 | |||
338 | put_unaligned(cpu_to_le32(musb->port1_status | ||
339 | & ~MUSB_PORT_STAT_RESUME), | ||
340 | (__le32 *) buf); | ||
341 | |||
342 | /* port change status is more interesting */ | ||
343 | DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n", | ||
344 | musb->port1_status); | ||
345 | break; | ||
346 | case SetPortFeature: | ||
347 | if ((wIndex & 0xff) != 1) | ||
348 | goto error; | ||
349 | |||
350 | switch (wValue) { | ||
351 | case USB_PORT_FEAT_POWER: | ||
352 | /* NOTE: this controller has a strange state machine | ||
353 | * that involves "requesting sessions" according to | ||
354 | * magic side effects from incompletely-described | ||
355 | * rules about startup... | ||
356 | * | ||
357 | * This call is what really starts the host mode; be | ||
358 | * very careful about side effects if you reorder any | ||
359 | * initialization logic, e.g. for OTG, or change any | ||
360 | * logic relating to VBUS power-up. | ||
361 | */ | ||
362 | if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) | ||
363 | musb_start(musb); | ||
364 | break; | ||
365 | case USB_PORT_FEAT_RESET: | ||
366 | musb_port_reset(musb, true); | ||
367 | break; | ||
368 | case USB_PORT_FEAT_SUSPEND: | ||
369 | musb_port_suspend(musb, true); | ||
370 | break; | ||
371 | case USB_PORT_FEAT_TEST: | ||
372 | if (unlikely(is_host_active(musb))) | ||
373 | goto error; | ||
374 | |||
375 | wIndex >>= 8; | ||
376 | switch (wIndex) { | ||
377 | case 1: | ||
378 | pr_debug("TEST_J\n"); | ||
379 | temp = MUSB_TEST_J; | ||
380 | break; | ||
381 | case 2: | ||
382 | pr_debug("TEST_K\n"); | ||
383 | temp = MUSB_TEST_K; | ||
384 | break; | ||
385 | case 3: | ||
386 | pr_debug("TEST_SE0_NAK\n"); | ||
387 | temp = MUSB_TEST_SE0_NAK; | ||
388 | break; | ||
389 | case 4: | ||
390 | pr_debug("TEST_PACKET\n"); | ||
391 | temp = MUSB_TEST_PACKET; | ||
392 | musb_load_testpacket(musb); | ||
393 | break; | ||
394 | case 5: | ||
395 | pr_debug("TEST_FORCE_ENABLE\n"); | ||
396 | temp = MUSB_TEST_FORCE_HOST | ||
397 | | MUSB_TEST_FORCE_HS; | ||
398 | |||
399 | musb_writeb(musb->mregs, MUSB_DEVCTL, | ||
400 | MUSB_DEVCTL_SESSION); | ||
401 | break; | ||
402 | case 6: | ||
403 | pr_debug("TEST_FIFO_ACCESS\n"); | ||
404 | temp = MUSB_TEST_FIFO_ACCESS; | ||
405 | break; | ||
406 | default: | ||
407 | goto error; | ||
408 | } | ||
409 | musb_writeb(musb->mregs, MUSB_TESTMODE, temp); | ||
410 | break; | ||
411 | default: | ||
412 | goto error; | ||
413 | } | ||
414 | DBG(5, "set feature %d\n", wValue); | ||
415 | musb->port1_status |= 1 << wValue; | ||
416 | break; | ||
417 | |||
418 | default: | ||
419 | error: | ||
420 | /* "protocol stall" on error */ | ||
421 | retval = -EPIPE; | ||
422 | } | ||
423 | spin_unlock_irqrestore(&musb->lock, flags); | ||
424 | return retval; | ||
425 | } | ||
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c new file mode 100644 index 000000000000..9ba8fb7fcd24 --- /dev/null +++ b/drivers/usb/musb/musbhsdma.c | |||
@@ -0,0 +1,433 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver - support for Mentor's DMA controller | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2007 by Texas Instruments | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
19 | * 02110-1301 USA | ||
20 | * | ||
21 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
22 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
23 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
24 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
25 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
26 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
27 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
28 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
30 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
31 | * | ||
32 | */ | ||
33 | #include <linux/device.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include "musb_core.h" | ||
37 | |||
38 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) | ||
39 | #include "omap2430.h" | ||
40 | #endif | ||
41 | |||
42 | #define MUSB_HSDMA_BASE 0x200 | ||
43 | #define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0) | ||
44 | #define MUSB_HSDMA_CONTROL 0x4 | ||
45 | #define MUSB_HSDMA_ADDRESS 0x8 | ||
46 | #define MUSB_HSDMA_COUNT 0xc | ||
47 | |||
48 | #define MUSB_HSDMA_CHANNEL_OFFSET(_bChannel, _offset) \ | ||
49 | (MUSB_HSDMA_BASE + (_bChannel << 4) + _offset) | ||
50 | |||
51 | /* control register (16-bit): */ | ||
52 | #define MUSB_HSDMA_ENABLE_SHIFT 0 | ||
53 | #define MUSB_HSDMA_TRANSMIT_SHIFT 1 | ||
54 | #define MUSB_HSDMA_MODE1_SHIFT 2 | ||
55 | #define MUSB_HSDMA_IRQENABLE_SHIFT 3 | ||
56 | #define MUSB_HSDMA_ENDPOINT_SHIFT 4 | ||
57 | #define MUSB_HSDMA_BUSERROR_SHIFT 8 | ||
58 | #define MUSB_HSDMA_BURSTMODE_SHIFT 9 | ||
59 | #define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT) | ||
60 | #define MUSB_HSDMA_BURSTMODE_UNSPEC 0 | ||
61 | #define MUSB_HSDMA_BURSTMODE_INCR4 1 | ||
62 | #define MUSB_HSDMA_BURSTMODE_INCR8 2 | ||
63 | #define MUSB_HSDMA_BURSTMODE_INCR16 3 | ||
64 | |||
65 | #define MUSB_HSDMA_CHANNELS 8 | ||
66 | |||
67 | struct musb_dma_controller; | ||
68 | |||
69 | struct musb_dma_channel { | ||
70 | struct dma_channel Channel; | ||
71 | struct musb_dma_controller *controller; | ||
72 | u32 dwStartAddress; | ||
73 | u32 len; | ||
74 | u16 wMaxPacketSize; | ||
75 | u8 bIndex; | ||
76 | u8 epnum; | ||
77 | u8 transmit; | ||
78 | }; | ||
79 | |||
80 | struct musb_dma_controller { | ||
81 | struct dma_controller Controller; | ||
82 | struct musb_dma_channel aChannel[MUSB_HSDMA_CHANNELS]; | ||
83 | void *pDmaPrivate; | ||
84 | void __iomem *pCoreBase; | ||
85 | u8 bChannelCount; | ||
86 | u8 bmUsedChannels; | ||
87 | u8 irq; | ||
88 | }; | ||
89 | |||
90 | static int dma_controller_start(struct dma_controller *c) | ||
91 | { | ||
92 | /* nothing to do */ | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static void dma_channel_release(struct dma_channel *pChannel); | ||
97 | |||
98 | static int dma_controller_stop(struct dma_controller *c) | ||
99 | { | ||
100 | struct musb_dma_controller *controller = | ||
101 | container_of(c, struct musb_dma_controller, Controller); | ||
102 | struct musb *musb = (struct musb *) controller->pDmaPrivate; | ||
103 | struct dma_channel *pChannel; | ||
104 | u8 bBit; | ||
105 | |||
106 | if (controller->bmUsedChannels != 0) { | ||
107 | dev_err(musb->controller, | ||
108 | "Stopping DMA controller while channel active\n"); | ||
109 | |||
110 | for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) { | ||
111 | if (controller->bmUsedChannels & (1 << bBit)) { | ||
112 | pChannel = &controller->aChannel[bBit].Channel; | ||
113 | dma_channel_release(pChannel); | ||
114 | |||
115 | if (!controller->bmUsedChannels) | ||
116 | break; | ||
117 | } | ||
118 | } | ||
119 | } | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | static struct dma_channel *dma_channel_allocate(struct dma_controller *c, | ||
124 | struct musb_hw_ep *hw_ep, u8 transmit) | ||
125 | { | ||
126 | u8 bBit; | ||
127 | struct dma_channel *pChannel = NULL; | ||
128 | struct musb_dma_channel *pImplChannel = NULL; | ||
129 | struct musb_dma_controller *controller = | ||
130 | container_of(c, struct musb_dma_controller, Controller); | ||
131 | |||
132 | for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) { | ||
133 | if (!(controller->bmUsedChannels & (1 << bBit))) { | ||
134 | controller->bmUsedChannels |= (1 << bBit); | ||
135 | pImplChannel = &(controller->aChannel[bBit]); | ||
136 | pImplChannel->controller = controller; | ||
137 | pImplChannel->bIndex = bBit; | ||
138 | pImplChannel->epnum = hw_ep->epnum; | ||
139 | pImplChannel->transmit = transmit; | ||
140 | pChannel = &(pImplChannel->Channel); | ||
141 | pChannel->private_data = pImplChannel; | ||
142 | pChannel->status = MUSB_DMA_STATUS_FREE; | ||
143 | pChannel->max_len = 0x10000; | ||
144 | /* Tx => mode 1; Rx => mode 0 */ | ||
145 | pChannel->desired_mode = transmit; | ||
146 | pChannel->actual_len = 0; | ||
147 | break; | ||
148 | } | ||
149 | } | ||
150 | return pChannel; | ||
151 | } | ||
152 | |||
153 | static void dma_channel_release(struct dma_channel *pChannel) | ||
154 | { | ||
155 | struct musb_dma_channel *pImplChannel = | ||
156 | (struct musb_dma_channel *) pChannel->private_data; | ||
157 | |||
158 | pChannel->actual_len = 0; | ||
159 | pImplChannel->dwStartAddress = 0; | ||
160 | pImplChannel->len = 0; | ||
161 | |||
162 | pImplChannel->controller->bmUsedChannels &= | ||
163 | ~(1 << pImplChannel->bIndex); | ||
164 | |||
165 | pChannel->status = MUSB_DMA_STATUS_UNKNOWN; | ||
166 | } | ||
167 | |||
168 | static void configure_channel(struct dma_channel *pChannel, | ||
169 | u16 packet_sz, u8 mode, | ||
170 | dma_addr_t dma_addr, u32 len) | ||
171 | { | ||
172 | struct musb_dma_channel *pImplChannel = | ||
173 | (struct musb_dma_channel *) pChannel->private_data; | ||
174 | struct musb_dma_controller *controller = pImplChannel->controller; | ||
175 | void __iomem *mbase = controller->pCoreBase; | ||
176 | u8 bChannel = pImplChannel->bIndex; | ||
177 | u16 csr = 0; | ||
178 | |||
179 | DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n", | ||
180 | pChannel, packet_sz, dma_addr, len, mode); | ||
181 | |||
182 | if (mode) { | ||
183 | csr |= 1 << MUSB_HSDMA_MODE1_SHIFT; | ||
184 | BUG_ON(len < packet_sz); | ||
185 | |||
186 | if (packet_sz >= 64) { | ||
187 | csr |= MUSB_HSDMA_BURSTMODE_INCR16 | ||
188 | << MUSB_HSDMA_BURSTMODE_SHIFT; | ||
189 | } else if (packet_sz >= 32) { | ||
190 | csr |= MUSB_HSDMA_BURSTMODE_INCR8 | ||
191 | << MUSB_HSDMA_BURSTMODE_SHIFT; | ||
192 | } else if (packet_sz >= 16) { | ||
193 | csr |= MUSB_HSDMA_BURSTMODE_INCR4 | ||
194 | << MUSB_HSDMA_BURSTMODE_SHIFT; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT) | ||
199 | | (1 << MUSB_HSDMA_ENABLE_SHIFT) | ||
200 | | (1 << MUSB_HSDMA_IRQENABLE_SHIFT) | ||
201 | | (pImplChannel->transmit | ||
202 | ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT) | ||
203 | : 0); | ||
204 | |||
205 | /* address/count */ | ||
206 | musb_writel(mbase, | ||
207 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS), | ||
208 | dma_addr); | ||
209 | musb_writel(mbase, | ||
210 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT), | ||
211 | len); | ||
212 | |||
213 | /* control (this should start things) */ | ||
214 | musb_writew(mbase, | ||
215 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL), | ||
216 | csr); | ||
217 | } | ||
218 | |||
219 | static int dma_channel_program(struct dma_channel *pChannel, | ||
220 | u16 packet_sz, u8 mode, | ||
221 | dma_addr_t dma_addr, u32 len) | ||
222 | { | ||
223 | struct musb_dma_channel *pImplChannel = | ||
224 | (struct musb_dma_channel *) pChannel->private_data; | ||
225 | |||
226 | DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", | ||
227 | pImplChannel->epnum, | ||
228 | pImplChannel->transmit ? "Tx" : "Rx", | ||
229 | packet_sz, dma_addr, len, mode); | ||
230 | |||
231 | BUG_ON(pChannel->status == MUSB_DMA_STATUS_UNKNOWN || | ||
232 | pChannel->status == MUSB_DMA_STATUS_BUSY); | ||
233 | |||
234 | pChannel->actual_len = 0; | ||
235 | pImplChannel->dwStartAddress = dma_addr; | ||
236 | pImplChannel->len = len; | ||
237 | pImplChannel->wMaxPacketSize = packet_sz; | ||
238 | pChannel->status = MUSB_DMA_STATUS_BUSY; | ||
239 | |||
240 | if ((mode == 1) && (len >= packet_sz)) | ||
241 | configure_channel(pChannel, packet_sz, 1, dma_addr, len); | ||
242 | else | ||
243 | configure_channel(pChannel, packet_sz, 0, dma_addr, len); | ||
244 | |||
245 | return true; | ||
246 | } | ||
247 | |||
248 | static int dma_channel_abort(struct dma_channel *pChannel) | ||
249 | { | ||
250 | struct musb_dma_channel *pImplChannel = | ||
251 | (struct musb_dma_channel *) pChannel->private_data; | ||
252 | u8 bChannel = pImplChannel->bIndex; | ||
253 | void __iomem *mbase = pImplChannel->controller->pCoreBase; | ||
254 | u16 csr; | ||
255 | |||
256 | if (pChannel->status == MUSB_DMA_STATUS_BUSY) { | ||
257 | if (pImplChannel->transmit) { | ||
258 | |||
259 | csr = musb_readw(mbase, | ||
260 | MUSB_EP_OFFSET(pImplChannel->epnum, | ||
261 | MUSB_TXCSR)); | ||
262 | csr &= ~(MUSB_TXCSR_AUTOSET | | ||
263 | MUSB_TXCSR_DMAENAB | | ||
264 | MUSB_TXCSR_DMAMODE); | ||
265 | musb_writew(mbase, | ||
266 | MUSB_EP_OFFSET(pImplChannel->epnum, | ||
267 | MUSB_TXCSR), | ||
268 | csr); | ||
269 | } else { | ||
270 | csr = musb_readw(mbase, | ||
271 | MUSB_EP_OFFSET(pImplChannel->epnum, | ||
272 | MUSB_RXCSR)); | ||
273 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | | ||
274 | MUSB_RXCSR_DMAENAB | | ||
275 | MUSB_RXCSR_DMAMODE); | ||
276 | musb_writew(mbase, | ||
277 | MUSB_EP_OFFSET(pImplChannel->epnum, | ||
278 | MUSB_RXCSR), | ||
279 | csr); | ||
280 | } | ||
281 | |||
282 | musb_writew(mbase, | ||
283 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL), | ||
284 | 0); | ||
285 | musb_writel(mbase, | ||
286 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS), | ||
287 | 0); | ||
288 | musb_writel(mbase, | ||
289 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT), | ||
290 | 0); | ||
291 | |||
292 | pChannel->status = MUSB_DMA_STATUS_FREE; | ||
293 | } | ||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | static irqreturn_t dma_controller_irq(int irq, void *private_data) | ||
298 | { | ||
299 | struct musb_dma_controller *controller = | ||
300 | (struct musb_dma_controller *)private_data; | ||
301 | struct musb_dma_channel *pImplChannel; | ||
302 | struct musb *musb = controller->pDmaPrivate; | ||
303 | void __iomem *mbase = controller->pCoreBase; | ||
304 | struct dma_channel *pChannel; | ||
305 | u8 bChannel; | ||
306 | u16 csr; | ||
307 | u32 dwAddress; | ||
308 | u8 int_hsdma; | ||
309 | irqreturn_t retval = IRQ_NONE; | ||
310 | unsigned long flags; | ||
311 | |||
312 | spin_lock_irqsave(&musb->lock, flags); | ||
313 | |||
314 | int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR); | ||
315 | if (!int_hsdma) | ||
316 | goto done; | ||
317 | |||
318 | for (bChannel = 0; bChannel < MUSB_HSDMA_CHANNELS; bChannel++) { | ||
319 | if (int_hsdma & (1 << bChannel)) { | ||
320 | pImplChannel = (struct musb_dma_channel *) | ||
321 | &(controller->aChannel[bChannel]); | ||
322 | pChannel = &pImplChannel->Channel; | ||
323 | |||
324 | csr = musb_readw(mbase, | ||
325 | MUSB_HSDMA_CHANNEL_OFFSET(bChannel, | ||
326 | MUSB_HSDMA_CONTROL)); | ||
327 | |||
328 | if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) | ||
329 | pImplChannel->Channel.status = | ||
330 | MUSB_DMA_STATUS_BUS_ABORT; | ||
331 | else { | ||
332 | u8 devctl; | ||
333 | |||
334 | dwAddress = musb_readl(mbase, | ||
335 | MUSB_HSDMA_CHANNEL_OFFSET( | ||
336 | bChannel, | ||
337 | MUSB_HSDMA_ADDRESS)); | ||
338 | pChannel->actual_len = dwAddress | ||
339 | - pImplChannel->dwStartAddress; | ||
340 | |||
341 | DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n", | ||
342 | pChannel, pImplChannel->dwStartAddress, | ||
343 | dwAddress, pChannel->actual_len, | ||
344 | pImplChannel->len, | ||
345 | (pChannel->actual_len | ||
346 | < pImplChannel->len) ? | ||
347 | "=> reconfig 0" : "=> complete"); | ||
348 | |||
349 | devctl = musb_readb(mbase, MUSB_DEVCTL); | ||
350 | |||
351 | pChannel->status = MUSB_DMA_STATUS_FREE; | ||
352 | |||
353 | /* completed */ | ||
354 | if ((devctl & MUSB_DEVCTL_HM) | ||
355 | && (pImplChannel->transmit) | ||
356 | && ((pChannel->desired_mode == 0) | ||
357 | || (pChannel->actual_len & | ||
358 | (pImplChannel->wMaxPacketSize - 1))) | ||
359 | ) { | ||
360 | /* Send out the packet */ | ||
361 | musb_ep_select(mbase, | ||
362 | pImplChannel->epnum); | ||
363 | musb_writew(mbase, MUSB_EP_OFFSET( | ||
364 | pImplChannel->epnum, | ||
365 | MUSB_TXCSR), | ||
366 | MUSB_TXCSR_TXPKTRDY); | ||
367 | } else | ||
368 | musb_dma_completion( | ||
369 | musb, | ||
370 | pImplChannel->epnum, | ||
371 | pImplChannel->transmit); | ||
372 | } | ||
373 | } | ||
374 | } | ||
375 | retval = IRQ_HANDLED; | ||
376 | done: | ||
377 | spin_unlock_irqrestore(&musb->lock, flags); | ||
378 | return retval; | ||
379 | } | ||
380 | |||
381 | void dma_controller_destroy(struct dma_controller *c) | ||
382 | { | ||
383 | struct musb_dma_controller *controller; | ||
384 | |||
385 | controller = container_of(c, struct musb_dma_controller, Controller); | ||
386 | if (!controller) | ||
387 | return; | ||
388 | |||
389 | if (controller->irq) | ||
390 | free_irq(controller->irq, c); | ||
391 | |||
392 | kfree(controller); | ||
393 | } | ||
394 | |||
395 | struct dma_controller *__init | ||
396 | dma_controller_create(struct musb *musb, void __iomem *pCoreBase) | ||
397 | { | ||
398 | struct musb_dma_controller *controller; | ||
399 | struct device *dev = musb->controller; | ||
400 | struct platform_device *pdev = to_platform_device(dev); | ||
401 | int irq = platform_get_irq(pdev, 1); | ||
402 | |||
403 | if (irq == 0) { | ||
404 | dev_err(dev, "No DMA interrupt line!\n"); | ||
405 | return NULL; | ||
406 | } | ||
407 | |||
408 | controller = kzalloc(sizeof(struct musb_dma_controller), GFP_KERNEL); | ||
409 | if (!controller) | ||
410 | return NULL; | ||
411 | |||
412 | controller->bChannelCount = MUSB_HSDMA_CHANNELS; | ||
413 | controller->pDmaPrivate = musb; | ||
414 | controller->pCoreBase = pCoreBase; | ||
415 | |||
416 | controller->Controller.start = dma_controller_start; | ||
417 | controller->Controller.stop = dma_controller_stop; | ||
418 | controller->Controller.channel_alloc = dma_channel_allocate; | ||
419 | controller->Controller.channel_release = dma_channel_release; | ||
420 | controller->Controller.channel_program = dma_channel_program; | ||
421 | controller->Controller.channel_abort = dma_channel_abort; | ||
422 | |||
423 | if (request_irq(irq, dma_controller_irq, IRQF_DISABLED, | ||
424 | musb->controller->bus_id, &controller->Controller)) { | ||
425 | dev_err(dev, "request_irq %d failed!\n", irq); | ||
426 | dma_controller_destroy(&controller->Controller); | ||
427 | return NULL; | ||
428 | } | ||
429 | |||
430 | controller->irq = irq; | ||
431 | |||
432 | return &controller->Controller; | ||
433 | } | ||
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c new file mode 100644 index 000000000000..298b22e6ad0d --- /dev/null +++ b/drivers/usb/musb/omap2430.c | |||
@@ -0,0 +1,324 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005-2007 by Texas Instruments | ||
3 | * Some code has been taken from tusb6010.c | ||
4 | * Copyrights for that are attributable to: | ||
5 | * Copyright (C) 2006 Nokia Corporation | ||
6 | * Jarkko Nikula <jarkko.nikula@nokia.com> | ||
7 | * Tony Lindgren <tony@atomide.com> | ||
8 | * | ||
9 | * This file is part of the Inventra Controller Driver for Linux. | ||
10 | * | ||
11 | * The Inventra Controller Driver for Linux is free software; you | ||
12 | * can redistribute it and/or modify it under the terms of the GNU | ||
13 | * General Public License version 2 as published by the Free Software | ||
14 | * Foundation. | ||
15 | * | ||
16 | * The Inventra Controller Driver for Linux is distributed in | ||
17 | * the hope that it will be useful, but WITHOUT ANY WARRANTY; | ||
18 | * without even the implied warranty of MERCHANTABILITY or | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | ||
20 | * License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with The Inventra Controller Driver for Linux ; if not, | ||
24 | * write to the Free Software Foundation, Inc., 59 Temple Place, | ||
25 | * Suite 330, Boston, MA 02111-1307 USA | ||
26 | * | ||
27 | */ | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/list.h> | ||
34 | #include <linux/clk.h> | ||
35 | #include <linux/io.h> | ||
36 | |||
37 | #include <asm/mach-types.h> | ||
38 | #include <asm/arch/hardware.h> | ||
39 | #include <asm/arch/mux.h> | ||
40 | |||
41 | #include "musb_core.h" | ||
42 | #include "omap2430.h" | ||
43 | |||
44 | #ifdef CONFIG_ARCH_OMAP3430 | ||
45 | #define get_cpu_rev() 2 | ||
46 | #endif | ||
47 | |||
48 | #define MUSB_TIMEOUT_A_WAIT_BCON 1100 | ||
49 | |||
50 | static struct timer_list musb_idle_timer; | ||
51 | |||
52 | static void musb_do_idle(unsigned long _musb) | ||
53 | { | ||
54 | struct musb *musb = (void *)_musb; | ||
55 | unsigned long flags; | ||
56 | u8 power; | ||
57 | u8 devctl; | ||
58 | |||
59 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
60 | |||
61 | spin_lock_irqsave(&musb->lock, flags); | ||
62 | |||
63 | switch (musb->xceiv.state) { | ||
64 | case OTG_STATE_A_WAIT_BCON: | ||
65 | devctl &= ~MUSB_DEVCTL_SESSION; | ||
66 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
67 | |||
68 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
69 | if (devctl & MUSB_DEVCTL_BDEVICE) { | ||
70 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
71 | MUSB_DEV_MODE(musb); | ||
72 | } else { | ||
73 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
74 | MUSB_HST_MODE(musb); | ||
75 | } | ||
76 | break; | ||
77 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
78 | case OTG_STATE_A_SUSPEND: | ||
79 | /* finish RESUME signaling? */ | ||
80 | if (musb->port1_status & MUSB_PORT_STAT_RESUME) { | ||
81 | power = musb_readb(musb->mregs, MUSB_POWER); | ||
82 | power &= ~MUSB_POWER_RESUME; | ||
83 | DBG(1, "root port resume stopped, power %02x\n", power); | ||
84 | musb_writeb(musb->mregs, MUSB_POWER, power); | ||
85 | musb->is_active = 1; | ||
86 | musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | ||
87 | | MUSB_PORT_STAT_RESUME); | ||
88 | musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; | ||
89 | usb_hcd_poll_rh_status(musb_to_hcd(musb)); | ||
90 | /* NOTE: it might really be A_WAIT_BCON ... */ | ||
91 | musb->xceiv.state = OTG_STATE_A_HOST; | ||
92 | } | ||
93 | break; | ||
94 | #endif | ||
95 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
96 | case OTG_STATE_A_HOST: | ||
97 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
98 | if (devctl & MUSB_DEVCTL_BDEVICE) | ||
99 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
100 | else | ||
101 | musb->xceiv.state = OTG_STATE_A_WAIT_BCON; | ||
102 | #endif | ||
103 | default: | ||
104 | break; | ||
105 | } | ||
106 | spin_unlock_irqrestore(&musb->lock, flags); | ||
107 | } | ||
108 | |||
109 | |||
110 | void musb_platform_try_idle(struct musb *musb, unsigned long timeout) | ||
111 | { | ||
112 | unsigned long default_timeout = jiffies + msecs_to_jiffies(3); | ||
113 | static unsigned long last_timer; | ||
114 | |||
115 | if (timeout == 0) | ||
116 | timeout = default_timeout; | ||
117 | |||
118 | /* Never idle if active, or when VBUS timeout is not set as host */ | ||
119 | if (musb->is_active || ((musb->a_wait_bcon == 0) | ||
120 | && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) { | ||
121 | DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); | ||
122 | del_timer(&musb_idle_timer); | ||
123 | last_timer = jiffies; | ||
124 | return; | ||
125 | } | ||
126 | |||
127 | if (time_after(last_timer, timeout)) { | ||
128 | if (!timer_pending(&musb_idle_timer)) | ||
129 | last_timer = timeout; | ||
130 | else { | ||
131 | DBG(4, "Longer idle timer already pending, ignoring\n"); | ||
132 | return; | ||
133 | } | ||
134 | } | ||
135 | last_timer = timeout; | ||
136 | |||
137 | DBG(4, "%s inactive, for idle timer for %lu ms\n", | ||
138 | otg_state_string(musb), | ||
139 | (unsigned long)jiffies_to_msecs(timeout - jiffies)); | ||
140 | mod_timer(&musb_idle_timer, timeout); | ||
141 | } | ||
142 | |||
143 | void musb_platform_enable(struct musb *musb) | ||
144 | { | ||
145 | } | ||
146 | void musb_platform_disable(struct musb *musb) | ||
147 | { | ||
148 | } | ||
149 | static void omap_vbus_power(struct musb *musb, int is_on, int sleeping) | ||
150 | { | ||
151 | } | ||
152 | |||
153 | static void omap_set_vbus(struct musb *musb, int is_on) | ||
154 | { | ||
155 | u8 devctl; | ||
156 | /* HDRC controls CPEN, but beware current surges during device | ||
157 | * connect. They can trigger transient overcurrent conditions | ||
158 | * that must be ignored. | ||
159 | */ | ||
160 | |||
161 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
162 | |||
163 | if (is_on) { | ||
164 | musb->is_active = 1; | ||
165 | musb->xceiv.default_a = 1; | ||
166 | musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; | ||
167 | devctl |= MUSB_DEVCTL_SESSION; | ||
168 | |||
169 | MUSB_HST_MODE(musb); | ||
170 | } else { | ||
171 | musb->is_active = 0; | ||
172 | |||
173 | /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and | ||
174 | * jumping right to B_IDLE... | ||
175 | */ | ||
176 | |||
177 | musb->xceiv.default_a = 0; | ||
178 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
179 | devctl &= ~MUSB_DEVCTL_SESSION; | ||
180 | |||
181 | MUSB_DEV_MODE(musb); | ||
182 | } | ||
183 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
184 | |||
185 | DBG(1, "VBUS %s, devctl %02x " | ||
186 | /* otg %3x conf %08x prcm %08x */ "\n", | ||
187 | otg_state_string(musb), | ||
188 | musb_readb(musb->mregs, MUSB_DEVCTL)); | ||
189 | } | ||
190 | static int omap_set_power(struct otg_transceiver *x, unsigned mA) | ||
191 | { | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static int musb_platform_resume(struct musb *musb); | ||
196 | |||
197 | void musb_platform_set_mode(struct musb *musb, u8 musb_mode) | ||
198 | { | ||
199 | u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
200 | |||
201 | devctl |= MUSB_DEVCTL_SESSION; | ||
202 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
203 | |||
204 | switch (musb_mode) { | ||
205 | case MUSB_HOST: | ||
206 | otg_set_host(&musb->xceiv, musb->xceiv.host); | ||
207 | break; | ||
208 | case MUSB_PERIPHERAL: | ||
209 | otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget); | ||
210 | break; | ||
211 | case MUSB_OTG: | ||
212 | break; | ||
213 | } | ||
214 | } | ||
215 | |||
216 | int __init musb_platform_init(struct musb *musb) | ||
217 | { | ||
218 | u32 l; | ||
219 | |||
220 | #if defined(CONFIG_ARCH_OMAP2430) | ||
221 | omap_cfg_reg(AE5_2430_USB0HS_STP); | ||
222 | #endif | ||
223 | |||
224 | musb_platform_resume(musb); | ||
225 | |||
226 | l = omap_readl(OTG_SYSCONFIG); | ||
227 | l &= ~ENABLEWAKEUP; /* disable wakeup */ | ||
228 | l &= ~NOSTDBY; /* remove possible nostdby */ | ||
229 | l |= SMARTSTDBY; /* enable smart standby */ | ||
230 | l &= ~AUTOIDLE; /* disable auto idle */ | ||
231 | l &= ~NOIDLE; /* remove possible noidle */ | ||
232 | l |= SMARTIDLE; /* enable smart idle */ | ||
233 | l |= AUTOIDLE; /* enable auto idle */ | ||
234 | omap_writel(l, OTG_SYSCONFIG); | ||
235 | |||
236 | l = omap_readl(OTG_INTERFSEL); | ||
237 | l |= ULPI_12PIN; | ||
238 | omap_writel(l, OTG_INTERFSEL); | ||
239 | |||
240 | pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " | ||
241 | "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", | ||
242 | omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG), | ||
243 | omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL), | ||
244 | omap_readl(OTG_SIMENABLE)); | ||
245 | |||
246 | omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1); | ||
247 | |||
248 | if (is_host_enabled(musb)) | ||
249 | musb->board_set_vbus = omap_set_vbus; | ||
250 | if (is_peripheral_enabled(musb)) | ||
251 | musb->xceiv.set_power = omap_set_power; | ||
252 | musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON; | ||
253 | |||
254 | setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | int musb_platform_suspend(struct musb *musb) | ||
260 | { | ||
261 | u32 l; | ||
262 | |||
263 | if (!musb->clock) | ||
264 | return 0; | ||
265 | |||
266 | /* in any role */ | ||
267 | l = omap_readl(OTG_FORCESTDBY); | ||
268 | l |= ENABLEFORCE; /* enable MSTANDBY */ | ||
269 | omap_writel(l, OTG_FORCESTDBY); | ||
270 | |||
271 | l = omap_readl(OTG_SYSCONFIG); | ||
272 | l |= ENABLEWAKEUP; /* enable wakeup */ | ||
273 | omap_writel(l, OTG_SYSCONFIG); | ||
274 | |||
275 | if (musb->xceiv.set_suspend) | ||
276 | musb->xceiv.set_suspend(&musb->xceiv, 1); | ||
277 | |||
278 | if (musb->set_clock) | ||
279 | musb->set_clock(musb->clock, 0); | ||
280 | else | ||
281 | clk_disable(musb->clock); | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static int musb_platform_resume(struct musb *musb) | ||
287 | { | ||
288 | u32 l; | ||
289 | |||
290 | if (!musb->clock) | ||
291 | return 0; | ||
292 | |||
293 | if (musb->xceiv.set_suspend) | ||
294 | musb->xceiv.set_suspend(&musb->xceiv, 0); | ||
295 | |||
296 | if (musb->set_clock) | ||
297 | musb->set_clock(musb->clock, 1); | ||
298 | else | ||
299 | clk_enable(musb->clock); | ||
300 | |||
301 | l = omap_readl(OTG_SYSCONFIG); | ||
302 | l &= ~ENABLEWAKEUP; /* disable wakeup */ | ||
303 | omap_writel(l, OTG_SYSCONFIG); | ||
304 | |||
305 | l = omap_readl(OTG_FORCESTDBY); | ||
306 | l &= ~ENABLEFORCE; /* disable MSTANDBY */ | ||
307 | omap_writel(l, OTG_FORCESTDBY); | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | |||
313 | int musb_platform_exit(struct musb *musb) | ||
314 | { | ||
315 | |||
316 | omap_vbus_power(musb, 0 /*off*/, 1); | ||
317 | |||
318 | musb_platform_suspend(musb); | ||
319 | |||
320 | clk_put(musb->clock); | ||
321 | musb->clock = 0; | ||
322 | |||
323 | return 0; | ||
324 | } | ||
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h new file mode 100644 index 000000000000..786a62071f72 --- /dev/null +++ b/drivers/usb/musb/omap2430.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005-2006 by Texas Instruments | ||
3 | * | ||
4 | * The Inventra Controller Driver for Linux is free software; you | ||
5 | * can redistribute it and/or modify it under the terms of the GNU | ||
6 | * General Public License version 2 as published by the Free Software | ||
7 | * Foundation. | ||
8 | */ | ||
9 | |||
10 | #ifndef __MUSB_OMAP243X_H__ | ||
11 | #define __MUSB_OMAP243X_H__ | ||
12 | |||
13 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) | ||
14 | #include <asm/arch/hardware.h> | ||
15 | #include <asm/arch/usb.h> | ||
16 | |||
17 | /* | ||
18 | * OMAP2430-specific definitions | ||
19 | */ | ||
20 | |||
21 | #define MENTOR_BASE_OFFSET 0 | ||
22 | #if defined(CONFIG_ARCH_OMAP2430) | ||
23 | #define OMAP_HSOTG_BASE (OMAP243X_HS_BASE) | ||
24 | #elif defined(CONFIG_ARCH_OMAP3430) | ||
25 | #define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE) | ||
26 | #endif | ||
27 | #define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset)) | ||
28 | #define OTG_REVISION OMAP_HSOTG(0x0) | ||
29 | #define OTG_SYSCONFIG OMAP_HSOTG(0x4) | ||
30 | # define MIDLEMODE 12 /* bit position */ | ||
31 | # define FORCESTDBY (0 << MIDLEMODE) | ||
32 | # define NOSTDBY (1 << MIDLEMODE) | ||
33 | # define SMARTSTDBY (2 << MIDLEMODE) | ||
34 | # define SIDLEMODE 3 /* bit position */ | ||
35 | # define FORCEIDLE (0 << SIDLEMODE) | ||
36 | # define NOIDLE (1 << SIDLEMODE) | ||
37 | # define SMARTIDLE (2 << SIDLEMODE) | ||
38 | # define ENABLEWAKEUP (1 << 2) | ||
39 | # define SOFTRST (1 << 1) | ||
40 | # define AUTOIDLE (1 << 0) | ||
41 | #define OTG_SYSSTATUS OMAP_HSOTG(0x8) | ||
42 | # define RESETDONE (1 << 0) | ||
43 | #define OTG_INTERFSEL OMAP_HSOTG(0xc) | ||
44 | # define EXTCP (1 << 2) | ||
45 | # define PHYSEL 0 /* bit position */ | ||
46 | # define UTMI_8BIT (0 << PHYSEL) | ||
47 | # define ULPI_12PIN (1 << PHYSEL) | ||
48 | # define ULPI_8PIN (2 << PHYSEL) | ||
49 | #define OTG_SIMENABLE OMAP_HSOTG(0x10) | ||
50 | # define TM1 (1 << 0) | ||
51 | #define OTG_FORCESTDBY OMAP_HSOTG(0x14) | ||
52 | # define ENABLEFORCE (1 << 0) | ||
53 | |||
54 | #endif /* CONFIG_ARCH_OMAP2430 */ | ||
55 | |||
56 | #endif /* __MUSB_OMAP243X_H__ */ | ||
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c new file mode 100644 index 000000000000..b73b036f3d77 --- /dev/null +++ b/drivers/usb/musb/tusb6010.c | |||
@@ -0,0 +1,1151 @@ | |||
1 | /* | ||
2 | * TUSB6010 USB 2.0 OTG Dual Role controller | ||
3 | * | ||
4 | * Copyright (C) 2006 Nokia Corporation | ||
5 | * Jarkko Nikula <jarkko.nikula@nokia.com> | ||
6 | * Tony Lindgren <tony@atomide.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * Notes: | ||
13 | * - Driver assumes that interface to external host (main CPU) is | ||
14 | * configured for NOR FLASH interface instead of VLYNQ serial | ||
15 | * interface. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/usb.h> | ||
23 | #include <linux/irq.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | |||
26 | #include "musb_core.h" | ||
27 | |||
28 | static void tusb_source_power(struct musb *musb, int is_on); | ||
29 | |||
30 | #define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf) | ||
31 | #define TUSB_REV_MINOR(reg_val) (reg_val & 0xf) | ||
32 | |||
33 | /* | ||
34 | * Checks the revision. We need to use the DMA register as 3.0 does not | ||
35 | * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV. | ||
36 | */ | ||
37 | u8 tusb_get_revision(struct musb *musb) | ||
38 | { | ||
39 | void __iomem *tbase = musb->ctrl_base; | ||
40 | u32 die_id; | ||
41 | u8 rev; | ||
42 | |||
43 | rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff; | ||
44 | if (TUSB_REV_MAJOR(rev) == 3) { | ||
45 | die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, | ||
46 | TUSB_DIDR1_HI)); | ||
47 | if (die_id >= TUSB_DIDR1_HI_REV_31) | ||
48 | rev |= 1; | ||
49 | } | ||
50 | |||
51 | return rev; | ||
52 | } | ||
53 | |||
54 | static int __init tusb_print_revision(struct musb *musb) | ||
55 | { | ||
56 | void __iomem *tbase = musb->ctrl_base; | ||
57 | u8 rev; | ||
58 | |||
59 | rev = tusb_get_revision(musb); | ||
60 | |||
61 | pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n", | ||
62 | "prcm", | ||
63 | TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)), | ||
64 | TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)), | ||
65 | "int", | ||
66 | TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)), | ||
67 | TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)), | ||
68 | "gpio", | ||
69 | TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)), | ||
70 | TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)), | ||
71 | "dma", | ||
72 | TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)), | ||
73 | TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)), | ||
74 | "dieid", | ||
75 | TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)), | ||
76 | "rev", | ||
77 | TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev)); | ||
78 | |||
79 | return tusb_get_revision(musb); | ||
80 | } | ||
81 | |||
82 | #define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \ | ||
83 | | TUSB_PHY_OTG_CTRL_TESTM0) | ||
84 | |||
85 | /* | ||
86 | * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0. | ||
87 | * Disables power detection in PHY for the duration of idle. | ||
88 | */ | ||
89 | static void tusb_wbus_quirk(struct musb *musb, int enabled) | ||
90 | { | ||
91 | void __iomem *tbase = musb->ctrl_base; | ||
92 | static u32 phy_otg_ctrl, phy_otg_ena; | ||
93 | u32 tmp; | ||
94 | |||
95 | if (enabled) { | ||
96 | phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL); | ||
97 | phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); | ||
98 | tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | ||
99 | | phy_otg_ena | WBUS_QUIRK_MASK; | ||
100 | musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp); | ||
101 | tmp = phy_otg_ena & ~WBUS_QUIRK_MASK; | ||
102 | tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2; | ||
103 | musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); | ||
104 | DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n", | ||
105 | musb_readl(tbase, TUSB_PHY_OTG_CTRL), | ||
106 | musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); | ||
107 | } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE) | ||
108 | & TUSB_PHY_OTG_CTRL_TESTM2) { | ||
109 | tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl; | ||
110 | musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp); | ||
111 | tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena; | ||
112 | musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); | ||
113 | DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n", | ||
114 | musb_readl(tbase, TUSB_PHY_OTG_CTRL), | ||
115 | musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); | ||
116 | phy_otg_ctrl = 0; | ||
117 | phy_otg_ena = 0; | ||
118 | } | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * TUSB 6010 may use a parallel bus that doesn't support byte ops; | ||
123 | * so both loading and unloading FIFOs need explicit byte counts. | ||
124 | */ | ||
125 | |||
126 | static inline void | ||
127 | tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len) | ||
128 | { | ||
129 | u32 val; | ||
130 | int i; | ||
131 | |||
132 | if (len > 4) { | ||
133 | for (i = 0; i < (len >> 2); i++) { | ||
134 | memcpy(&val, buf, 4); | ||
135 | musb_writel(fifo, 0, val); | ||
136 | buf += 4; | ||
137 | } | ||
138 | len %= 4; | ||
139 | } | ||
140 | if (len > 0) { | ||
141 | /* Write the rest 1 - 3 bytes to FIFO */ | ||
142 | memcpy(&val, buf, len); | ||
143 | musb_writel(fifo, 0, val); | ||
144 | } | ||
145 | } | ||
146 | |||
147 | static inline void tusb_fifo_read_unaligned(void __iomem *fifo, | ||
148 | void __iomem *buf, u16 len) | ||
149 | { | ||
150 | u32 val; | ||
151 | int i; | ||
152 | |||
153 | if (len > 4) { | ||
154 | for (i = 0; i < (len >> 2); i++) { | ||
155 | val = musb_readl(fifo, 0); | ||
156 | memcpy(buf, &val, 4); | ||
157 | buf += 4; | ||
158 | } | ||
159 | len %= 4; | ||
160 | } | ||
161 | if (len > 0) { | ||
162 | /* Read the rest 1 - 3 bytes from FIFO */ | ||
163 | val = musb_readl(fifo, 0); | ||
164 | memcpy(buf, &val, len); | ||
165 | } | ||
166 | } | ||
167 | |||
168 | void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf) | ||
169 | { | ||
170 | void __iomem *ep_conf = hw_ep->conf; | ||
171 | void __iomem *fifo = hw_ep->fifo; | ||
172 | u8 epnum = hw_ep->epnum; | ||
173 | |||
174 | prefetch(buf); | ||
175 | |||
176 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | ||
177 | 'T', epnum, fifo, len, buf); | ||
178 | |||
179 | if (epnum) | ||
180 | musb_writel(ep_conf, TUSB_EP_TX_OFFSET, | ||
181 | TUSB_EP_CONFIG_XFR_SIZE(len)); | ||
182 | else | ||
183 | musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX | | ||
184 | TUSB_EP0_CONFIG_XFR_SIZE(len)); | ||
185 | |||
186 | if (likely((0x01 & (unsigned long) buf) == 0)) { | ||
187 | |||
188 | /* Best case is 32bit-aligned destination address */ | ||
189 | if ((0x02 & (unsigned long) buf) == 0) { | ||
190 | if (len >= 4) { | ||
191 | writesl(fifo, buf, len >> 2); | ||
192 | buf += (len & ~0x03); | ||
193 | len &= 0x03; | ||
194 | } | ||
195 | } else { | ||
196 | if (len >= 2) { | ||
197 | u32 val; | ||
198 | int i; | ||
199 | |||
200 | /* Cannot use writesw, fifo is 32-bit */ | ||
201 | for (i = 0; i < (len >> 2); i++) { | ||
202 | val = (u32)(*(u16 *)buf); | ||
203 | buf += 2; | ||
204 | val |= (*(u16 *)buf) << 16; | ||
205 | buf += 2; | ||
206 | musb_writel(fifo, 0, val); | ||
207 | } | ||
208 | len &= 0x03; | ||
209 | } | ||
210 | } | ||
211 | } | ||
212 | |||
213 | if (len > 0) | ||
214 | tusb_fifo_write_unaligned(fifo, buf, len); | ||
215 | } | ||
216 | |||
217 | void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf) | ||
218 | { | ||
219 | void __iomem *ep_conf = hw_ep->conf; | ||
220 | void __iomem *fifo = hw_ep->fifo; | ||
221 | u8 epnum = hw_ep->epnum; | ||
222 | |||
223 | DBG(4, "%cX ep%d fifo %p count %d buf %p\n", | ||
224 | 'R', epnum, fifo, len, buf); | ||
225 | |||
226 | if (epnum) | ||
227 | musb_writel(ep_conf, TUSB_EP_RX_OFFSET, | ||
228 | TUSB_EP_CONFIG_XFR_SIZE(len)); | ||
229 | else | ||
230 | musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len)); | ||
231 | |||
232 | if (likely((0x01 & (unsigned long) buf) == 0)) { | ||
233 | |||
234 | /* Best case is 32bit-aligned destination address */ | ||
235 | if ((0x02 & (unsigned long) buf) == 0) { | ||
236 | if (len >= 4) { | ||
237 | readsl(fifo, buf, len >> 2); | ||
238 | buf += (len & ~0x03); | ||
239 | len &= 0x03; | ||
240 | } | ||
241 | } else { | ||
242 | if (len >= 2) { | ||
243 | u32 val; | ||
244 | int i; | ||
245 | |||
246 | /* Cannot use readsw, fifo is 32-bit */ | ||
247 | for (i = 0; i < (len >> 2); i++) { | ||
248 | val = musb_readl(fifo, 0); | ||
249 | *(u16 *)buf = (u16)(val & 0xffff); | ||
250 | buf += 2; | ||
251 | *(u16 *)buf = (u16)(val >> 16); | ||
252 | buf += 2; | ||
253 | } | ||
254 | len &= 0x03; | ||
255 | } | ||
256 | } | ||
257 | } | ||
258 | |||
259 | if (len > 0) | ||
260 | tusb_fifo_read_unaligned(fifo, buf, len); | ||
261 | } | ||
262 | |||
263 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
264 | |||
265 | /* This is used by gadget drivers, and OTG transceiver logic, allowing | ||
266 | * at most mA current to be drawn from VBUS during a Default-B session | ||
267 | * (that is, while VBUS exceeds 4.4V). In Default-A (including pure host | ||
268 | * mode), or low power Default-B sessions, something else supplies power. | ||
269 | * Caller must take care of locking. | ||
270 | */ | ||
271 | static int tusb_draw_power(struct otg_transceiver *x, unsigned mA) | ||
272 | { | ||
273 | struct musb *musb = container_of(x, struct musb, xceiv); | ||
274 | void __iomem *tbase = musb->ctrl_base; | ||
275 | u32 reg; | ||
276 | |||
277 | /* | ||
278 | * Keep clock active when enabled. Note that this is not tied to | ||
279 | * drawing VBUS, as with OTG mA can be less than musb->min_power. | ||
280 | */ | ||
281 | if (musb->set_clock) { | ||
282 | if (mA) | ||
283 | musb->set_clock(musb->clock, 1); | ||
284 | else | ||
285 | musb->set_clock(musb->clock, 0); | ||
286 | } | ||
287 | |||
288 | /* tps65030 seems to consume max 100mA, with maybe 60mA available | ||
289 | * (measured on one board) for things other than tps and tusb. | ||
290 | * | ||
291 | * Boards sharing the CPU clock with CLKIN will need to prevent | ||
292 | * certain idle sleep states while the USB link is active. | ||
293 | * | ||
294 | * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }. | ||
295 | * The actual current usage would be very board-specific. For now, | ||
296 | * it's simpler to just use an aggregate (also board-specific). | ||
297 | */ | ||
298 | if (x->default_a || mA < (musb->min_power << 1)) | ||
299 | mA = 0; | ||
300 | |||
301 | reg = musb_readl(tbase, TUSB_PRCM_MNGMT); | ||
302 | if (mA) { | ||
303 | musb->is_bus_powered = 1; | ||
304 | reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN; | ||
305 | } else { | ||
306 | musb->is_bus_powered = 0; | ||
307 | reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); | ||
308 | } | ||
309 | musb_writel(tbase, TUSB_PRCM_MNGMT, reg); | ||
310 | |||
311 | DBG(2, "draw max %d mA VBUS\n", mA); | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | #else | ||
316 | #define tusb_draw_power NULL | ||
317 | #endif | ||
318 | |||
319 | /* workaround for issue 13: change clock during chip idle | ||
320 | * (to be fixed in rev3 silicon) ... symptoms include disconnect | ||
321 | * or looping suspend/resume cycles | ||
322 | */ | ||
323 | static void tusb_set_clock_source(struct musb *musb, unsigned mode) | ||
324 | { | ||
325 | void __iomem *tbase = musb->ctrl_base; | ||
326 | u32 reg; | ||
327 | |||
328 | reg = musb_readl(tbase, TUSB_PRCM_CONF); | ||
329 | reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3); | ||
330 | |||
331 | /* 0 = refclk (clkin, XI) | ||
332 | * 1 = PHY 60 MHz (internal PLL) | ||
333 | * 2 = not supported | ||
334 | * 3 = what? | ||
335 | */ | ||
336 | if (mode > 0) | ||
337 | reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3); | ||
338 | |||
339 | musb_writel(tbase, TUSB_PRCM_CONF, reg); | ||
340 | |||
341 | /* FIXME tusb6010_platform_retime(mode == 0); */ | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * Idle TUSB6010 until next wake-up event; NOR access always wakes. | ||
346 | * Other code ensures that we idle unless we're connected _and_ the | ||
347 | * USB link is not suspended ... and tells us the relevant wakeup | ||
348 | * events. SW_EN for voltage is handled separately. | ||
349 | */ | ||
350 | void tusb_allow_idle(struct musb *musb, u32 wakeup_enables) | ||
351 | { | ||
352 | void __iomem *tbase = musb->ctrl_base; | ||
353 | u32 reg; | ||
354 | |||
355 | if ((wakeup_enables & TUSB_PRCM_WBUS) | ||
356 | && (tusb_get_revision(musb) == TUSB_REV_30)) | ||
357 | tusb_wbus_quirk(musb, 1); | ||
358 | |||
359 | tusb_set_clock_source(musb, 0); | ||
360 | |||
361 | wakeup_enables |= TUSB_PRCM_WNORCS; | ||
362 | musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables); | ||
363 | |||
364 | /* REVISIT writeup of WID implies that if WID set and ID is grounded, | ||
365 | * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared. | ||
366 | * Presumably that's mostly to save power, hence WID is immaterial ... | ||
367 | */ | ||
368 | |||
369 | reg = musb_readl(tbase, TUSB_PRCM_MNGMT); | ||
370 | /* issue 4: when driving vbus, use hipower (vbus_det) comparator */ | ||
371 | if (is_host_active(musb)) { | ||
372 | reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; | ||
373 | reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN; | ||
374 | } else { | ||
375 | reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN; | ||
376 | reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; | ||
377 | } | ||
378 | reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE; | ||
379 | musb_writel(tbase, TUSB_PRCM_MNGMT, reg); | ||
380 | |||
381 | DBG(6, "idle, wake on %02x\n", wakeup_enables); | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * Updates cable VBUS status. Caller must take care of locking. | ||
386 | */ | ||
387 | int musb_platform_get_vbus_status(struct musb *musb) | ||
388 | { | ||
389 | void __iomem *tbase = musb->ctrl_base; | ||
390 | u32 otg_stat, prcm_mngmt; | ||
391 | int ret = 0; | ||
392 | |||
393 | otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); | ||
394 | prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT); | ||
395 | |||
396 | /* Temporarily enable VBUS detection if it was disabled for | ||
397 | * suspend mode. Unless it's enabled otg_stat and devctl will | ||
398 | * not show correct VBUS state. | ||
399 | */ | ||
400 | if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) { | ||
401 | u32 tmp = prcm_mngmt; | ||
402 | tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; | ||
403 | musb_writel(tbase, TUSB_PRCM_MNGMT, tmp); | ||
404 | otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); | ||
405 | musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt); | ||
406 | } | ||
407 | |||
408 | if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) | ||
409 | ret = 1; | ||
410 | |||
411 | return ret; | ||
412 | } | ||
413 | |||
414 | static struct timer_list musb_idle_timer; | ||
415 | |||
416 | static void musb_do_idle(unsigned long _musb) | ||
417 | { | ||
418 | struct musb *musb = (void *)_musb; | ||
419 | unsigned long flags; | ||
420 | |||
421 | spin_lock_irqsave(&musb->lock, flags); | ||
422 | |||
423 | switch (musb->xceiv.state) { | ||
424 | case OTG_STATE_A_WAIT_BCON: | ||
425 | if ((musb->a_wait_bcon != 0) | ||
426 | && (musb->idle_timeout == 0 | ||
427 | || time_after(jiffies, musb->idle_timeout))) { | ||
428 | DBG(4, "Nothing connected %s, turning off VBUS\n", | ||
429 | otg_state_string(musb)); | ||
430 | } | ||
431 | /* FALLTHROUGH */ | ||
432 | case OTG_STATE_A_IDLE: | ||
433 | tusb_source_power(musb, 0); | ||
434 | default: | ||
435 | break; | ||
436 | } | ||
437 | |||
438 | if (!musb->is_active) { | ||
439 | u32 wakeups; | ||
440 | |||
441 | /* wait until khubd handles port change status */ | ||
442 | if (is_host_active(musb) && (musb->port1_status >> 16)) | ||
443 | goto done; | ||
444 | |||
445 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
446 | if (is_peripheral_enabled(musb) && !musb->gadget_driver) | ||
447 | wakeups = 0; | ||
448 | else { | ||
449 | wakeups = TUSB_PRCM_WHOSTDISCON | ||
450 | | TUSB_PRCM_WBUS | ||
451 | | TUSB_PRCM_WVBUS; | ||
452 | if (is_otg_enabled(musb)) | ||
453 | wakeups |= TUSB_PRCM_WID; | ||
454 | } | ||
455 | #else | ||
456 | wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS; | ||
457 | #endif | ||
458 | tusb_allow_idle(musb, wakeups); | ||
459 | } | ||
460 | done: | ||
461 | spin_unlock_irqrestore(&musb->lock, flags); | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * Maybe put TUSB6010 into idle mode mode depending on USB link status, | ||
466 | * like "disconnected" or "suspended". We'll be woken out of it by | ||
467 | * connect, resume, or disconnect. | ||
468 | * | ||
469 | * Needs to be called as the last function everywhere where there is | ||
470 | * register access to TUSB6010 because of NOR flash wake-up. | ||
471 | * Caller should own controller spinlock. | ||
472 | * | ||
473 | * Delay because peripheral enables D+ pullup 3msec after SE0, and | ||
474 | * we don't want to treat that full speed J as a wakeup event. | ||
475 | * ... peripherals must draw only suspend current after 10 msec. | ||
476 | */ | ||
477 | void musb_platform_try_idle(struct musb *musb, unsigned long timeout) | ||
478 | { | ||
479 | unsigned long default_timeout = jiffies + msecs_to_jiffies(3); | ||
480 | static unsigned long last_timer; | ||
481 | |||
482 | if (timeout == 0) | ||
483 | timeout = default_timeout; | ||
484 | |||
485 | /* Never idle if active, or when VBUS timeout is not set as host */ | ||
486 | if (musb->is_active || ((musb->a_wait_bcon == 0) | ||
487 | && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) { | ||
488 | DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); | ||
489 | del_timer(&musb_idle_timer); | ||
490 | last_timer = jiffies; | ||
491 | return; | ||
492 | } | ||
493 | |||
494 | if (time_after(last_timer, timeout)) { | ||
495 | if (!timer_pending(&musb_idle_timer)) | ||
496 | last_timer = timeout; | ||
497 | else { | ||
498 | DBG(4, "Longer idle timer already pending, ignoring\n"); | ||
499 | return; | ||
500 | } | ||
501 | } | ||
502 | last_timer = timeout; | ||
503 | |||
504 | DBG(4, "%s inactive, for idle timer for %lu ms\n", | ||
505 | otg_state_string(musb), | ||
506 | (unsigned long)jiffies_to_msecs(timeout - jiffies)); | ||
507 | mod_timer(&musb_idle_timer, timeout); | ||
508 | } | ||
509 | |||
510 | /* ticks of 60 MHz clock */ | ||
511 | #define DEVCLOCK 60000000 | ||
512 | #define OTG_TIMER_MS(msecs) ((msecs) \ | ||
513 | ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \ | ||
514 | | TUSB_DEV_OTG_TIMER_ENABLE) \ | ||
515 | : 0) | ||
516 | |||
517 | static void tusb_source_power(struct musb *musb, int is_on) | ||
518 | { | ||
519 | void __iomem *tbase = musb->ctrl_base; | ||
520 | u32 conf, prcm, timer; | ||
521 | u8 devctl; | ||
522 | |||
523 | /* HDRC controls CPEN, but beware current surges during device | ||
524 | * connect. They can trigger transient overcurrent conditions | ||
525 | * that must be ignored. | ||
526 | */ | ||
527 | |||
528 | prcm = musb_readl(tbase, TUSB_PRCM_MNGMT); | ||
529 | conf = musb_readl(tbase, TUSB_DEV_CONF); | ||
530 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
531 | |||
532 | if (is_on) { | ||
533 | if (musb->set_clock) | ||
534 | musb->set_clock(musb->clock, 1); | ||
535 | timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE); | ||
536 | musb->xceiv.default_a = 1; | ||
537 | musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; | ||
538 | devctl |= MUSB_DEVCTL_SESSION; | ||
539 | |||
540 | conf |= TUSB_DEV_CONF_USB_HOST_MODE; | ||
541 | MUSB_HST_MODE(musb); | ||
542 | } else { | ||
543 | u32 otg_stat; | ||
544 | |||
545 | timer = 0; | ||
546 | |||
547 | /* If ID pin is grounded, we want to be a_idle */ | ||
548 | otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); | ||
549 | if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) { | ||
550 | switch (musb->xceiv.state) { | ||
551 | case OTG_STATE_A_WAIT_VRISE: | ||
552 | case OTG_STATE_A_WAIT_BCON: | ||
553 | musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; | ||
554 | break; | ||
555 | case OTG_STATE_A_WAIT_VFALL: | ||
556 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
557 | break; | ||
558 | default: | ||
559 | musb->xceiv.state = OTG_STATE_A_IDLE; | ||
560 | } | ||
561 | musb->is_active = 0; | ||
562 | musb->xceiv.default_a = 1; | ||
563 | MUSB_HST_MODE(musb); | ||
564 | } else { | ||
565 | musb->is_active = 0; | ||
566 | musb->xceiv.default_a = 0; | ||
567 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
568 | MUSB_DEV_MODE(musb); | ||
569 | } | ||
570 | |||
571 | devctl &= ~MUSB_DEVCTL_SESSION; | ||
572 | conf &= ~TUSB_DEV_CONF_USB_HOST_MODE; | ||
573 | if (musb->set_clock) | ||
574 | musb->set_clock(musb->clock, 0); | ||
575 | } | ||
576 | prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); | ||
577 | |||
578 | musb_writel(tbase, TUSB_PRCM_MNGMT, prcm); | ||
579 | musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer); | ||
580 | musb_writel(tbase, TUSB_DEV_CONF, conf); | ||
581 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
582 | |||
583 | DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n", | ||
584 | otg_state_string(musb), | ||
585 | musb_readb(musb->mregs, MUSB_DEVCTL), | ||
586 | musb_readl(tbase, TUSB_DEV_OTG_STAT), | ||
587 | conf, prcm); | ||
588 | } | ||
589 | |||
590 | /* | ||
591 | * Sets the mode to OTG, peripheral or host by changing the ID detection. | ||
592 | * Caller must take care of locking. | ||
593 | * | ||
594 | * Note that if a mini-A cable is plugged in the ID line will stay down as | ||
595 | * the weak ID pull-up is not able to pull the ID up. | ||
596 | * | ||
597 | * REVISIT: It would be possible to add support for changing between host | ||
598 | * and peripheral modes in non-OTG configurations by reconfiguring hardware | ||
599 | * and then setting musb->board_mode. For now, only support OTG mode. | ||
600 | */ | ||
601 | void musb_platform_set_mode(struct musb *musb, u8 musb_mode) | ||
602 | { | ||
603 | void __iomem *tbase = musb->ctrl_base; | ||
604 | u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf; | ||
605 | |||
606 | if (musb->board_mode != MUSB_OTG) { | ||
607 | ERR("Changing mode currently only supported in OTG mode\n"); | ||
608 | return; | ||
609 | } | ||
610 | |||
611 | otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); | ||
612 | phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL); | ||
613 | phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); | ||
614 | dev_conf = musb_readl(tbase, TUSB_DEV_CONF); | ||
615 | |||
616 | switch (musb_mode) { | ||
617 | |||
618 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
619 | case MUSB_HOST: /* Disable PHY ID detect, ground ID */ | ||
620 | phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
621 | phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
622 | dev_conf |= TUSB_DEV_CONF_ID_SEL; | ||
623 | dev_conf &= ~TUSB_DEV_CONF_SOFT_ID; | ||
624 | break; | ||
625 | #endif | ||
626 | |||
627 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
628 | case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */ | ||
629 | phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
630 | phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
631 | dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID); | ||
632 | break; | ||
633 | #endif | ||
634 | |||
635 | #ifdef CONFIG_USB_MUSB_OTG | ||
636 | case MUSB_OTG: /* Use PHY ID detection */ | ||
637 | phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
638 | phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
639 | dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID); | ||
640 | break; | ||
641 | #endif | ||
642 | |||
643 | default: | ||
644 | DBG(2, "Trying to set unknown mode %i\n", musb_mode); | ||
645 | } | ||
646 | |||
647 | musb_writel(tbase, TUSB_PHY_OTG_CTRL, | ||
648 | TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl); | ||
649 | musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, | ||
650 | TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena); | ||
651 | musb_writel(tbase, TUSB_DEV_CONF, dev_conf); | ||
652 | |||
653 | otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); | ||
654 | if ((musb_mode == MUSB_PERIPHERAL) && | ||
655 | !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) | ||
656 | INFO("Cannot be peripheral with mini-A cable " | ||
657 | "otg_stat: %08x\n", otg_stat); | ||
658 | } | ||
659 | |||
660 | static inline unsigned long | ||
661 | tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) | ||
662 | { | ||
663 | u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); | ||
664 | unsigned long idle_timeout = 0; | ||
665 | |||
666 | /* ID pin */ | ||
667 | if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) { | ||
668 | int default_a; | ||
669 | |||
670 | if (is_otg_enabled(musb)) | ||
671 | default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS); | ||
672 | else | ||
673 | default_a = is_host_enabled(musb); | ||
674 | DBG(2, "Default-%c\n", default_a ? 'A' : 'B'); | ||
675 | musb->xceiv.default_a = default_a; | ||
676 | tusb_source_power(musb, default_a); | ||
677 | |||
678 | /* Don't allow idling immediately */ | ||
679 | if (default_a) | ||
680 | idle_timeout = jiffies + (HZ * 3); | ||
681 | } | ||
682 | |||
683 | /* VBUS state change */ | ||
684 | if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) { | ||
685 | |||
686 | /* B-dev state machine: no vbus ~= disconnect */ | ||
687 | if ((is_otg_enabled(musb) && !musb->xceiv.default_a) | ||
688 | || !is_host_enabled(musb)) { | ||
689 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
690 | /* ? musb_root_disconnect(musb); */ | ||
691 | musb->port1_status &= | ||
692 | ~(USB_PORT_STAT_CONNECTION | ||
693 | | USB_PORT_STAT_ENABLE | ||
694 | | USB_PORT_STAT_LOW_SPEED | ||
695 | | USB_PORT_STAT_HIGH_SPEED | ||
696 | | USB_PORT_STAT_TEST | ||
697 | ); | ||
698 | #endif | ||
699 | |||
700 | if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) { | ||
701 | DBG(1, "Forcing disconnect (no interrupt)\n"); | ||
702 | if (musb->xceiv.state != OTG_STATE_B_IDLE) { | ||
703 | /* INTR_DISCONNECT can hide... */ | ||
704 | musb->xceiv.state = OTG_STATE_B_IDLE; | ||
705 | musb->int_usb |= MUSB_INTR_DISCONNECT; | ||
706 | } | ||
707 | musb->is_active = 0; | ||
708 | } | ||
709 | DBG(2, "vbus change, %s, otg %03x\n", | ||
710 | otg_state_string(musb), otg_stat); | ||
711 | idle_timeout = jiffies + (1 * HZ); | ||
712 | schedule_work(&musb->irq_work); | ||
713 | |||
714 | } else /* A-dev state machine */ { | ||
715 | DBG(2, "vbus change, %s, otg %03x\n", | ||
716 | otg_state_string(musb), otg_stat); | ||
717 | |||
718 | switch (musb->xceiv.state) { | ||
719 | case OTG_STATE_A_IDLE: | ||
720 | DBG(2, "Got SRP, turning on VBUS\n"); | ||
721 | musb_set_vbus(musb, 1); | ||
722 | |||
723 | /* CONNECT can wake if a_wait_bcon is set */ | ||
724 | if (musb->a_wait_bcon != 0) | ||
725 | musb->is_active = 0; | ||
726 | else | ||
727 | musb->is_active = 1; | ||
728 | |||
729 | /* | ||
730 | * OPT FS A TD.4.6 needs few seconds for | ||
731 | * A_WAIT_VRISE | ||
732 | */ | ||
733 | idle_timeout = jiffies + (2 * HZ); | ||
734 | |||
735 | break; | ||
736 | case OTG_STATE_A_WAIT_VRISE: | ||
737 | /* ignore; A-session-valid < VBUS_VALID/2, | ||
738 | * we monitor this with the timer | ||
739 | */ | ||
740 | break; | ||
741 | case OTG_STATE_A_WAIT_VFALL: | ||
742 | /* REVISIT this irq triggers during short | ||
743 | * spikes caused by enumeration ... | ||
744 | */ | ||
745 | if (musb->vbuserr_retry) { | ||
746 | musb->vbuserr_retry--; | ||
747 | tusb_source_power(musb, 1); | ||
748 | } else { | ||
749 | musb->vbuserr_retry | ||
750 | = VBUSERR_RETRY_COUNT; | ||
751 | tusb_source_power(musb, 0); | ||
752 | } | ||
753 | break; | ||
754 | default: | ||
755 | break; | ||
756 | } | ||
757 | } | ||
758 | } | ||
759 | |||
760 | /* OTG timer expiration */ | ||
761 | if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) { | ||
762 | u8 devctl; | ||
763 | |||
764 | DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat); | ||
765 | |||
766 | switch (musb->xceiv.state) { | ||
767 | case OTG_STATE_A_WAIT_VRISE: | ||
768 | /* VBUS has probably been valid for a while now, | ||
769 | * but may well have bounced out of range a bit | ||
770 | */ | ||
771 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
772 | if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) { | ||
773 | if ((devctl & MUSB_DEVCTL_VBUS) | ||
774 | != MUSB_DEVCTL_VBUS) { | ||
775 | DBG(2, "devctl %02x\n", devctl); | ||
776 | break; | ||
777 | } | ||
778 | musb->xceiv.state = OTG_STATE_A_WAIT_BCON; | ||
779 | musb->is_active = 0; | ||
780 | idle_timeout = jiffies | ||
781 | + msecs_to_jiffies(musb->a_wait_bcon); | ||
782 | } else { | ||
783 | /* REVISIT report overcurrent to hub? */ | ||
784 | ERR("vbus too slow, devctl %02x\n", devctl); | ||
785 | tusb_source_power(musb, 0); | ||
786 | } | ||
787 | break; | ||
788 | case OTG_STATE_A_WAIT_BCON: | ||
789 | if (musb->a_wait_bcon != 0) | ||
790 | idle_timeout = jiffies | ||
791 | + msecs_to_jiffies(musb->a_wait_bcon); | ||
792 | break; | ||
793 | case OTG_STATE_A_SUSPEND: | ||
794 | break; | ||
795 | case OTG_STATE_B_WAIT_ACON: | ||
796 | break; | ||
797 | default: | ||
798 | break; | ||
799 | } | ||
800 | } | ||
801 | schedule_work(&musb->irq_work); | ||
802 | |||
803 | return idle_timeout; | ||
804 | } | ||
805 | |||
806 | static irqreturn_t tusb_interrupt(int irq, void *__hci) | ||
807 | { | ||
808 | struct musb *musb = __hci; | ||
809 | void __iomem *tbase = musb->ctrl_base; | ||
810 | unsigned long flags, idle_timeout = 0; | ||
811 | u32 int_mask, int_src; | ||
812 | |||
813 | spin_lock_irqsave(&musb->lock, flags); | ||
814 | |||
815 | /* Mask all interrupts to allow using both edge and level GPIO irq */ | ||
816 | int_mask = musb_readl(tbase, TUSB_INT_MASK); | ||
817 | musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS); | ||
818 | |||
819 | int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS; | ||
820 | DBG(3, "TUSB IRQ %08x\n", int_src); | ||
821 | |||
822 | musb->int_usb = (u8) int_src; | ||
823 | |||
824 | /* Acknowledge wake-up source interrupts */ | ||
825 | if (int_src & TUSB_INT_SRC_DEV_WAKEUP) { | ||
826 | u32 reg; | ||
827 | u32 i; | ||
828 | |||
829 | if (tusb_get_revision(musb) == TUSB_REV_30) | ||
830 | tusb_wbus_quirk(musb, 0); | ||
831 | |||
832 | /* there are issues re-locking the PLL on wakeup ... */ | ||
833 | |||
834 | /* work around issue 8 */ | ||
835 | for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) { | ||
836 | musb_writel(tbase, TUSB_SCRATCH_PAD, 0); | ||
837 | musb_writel(tbase, TUSB_SCRATCH_PAD, i); | ||
838 | reg = musb_readl(tbase, TUSB_SCRATCH_PAD); | ||
839 | if (reg == i) | ||
840 | break; | ||
841 | DBG(6, "TUSB NOR not ready\n"); | ||
842 | } | ||
843 | |||
844 | /* work around issue 13 (2nd half) */ | ||
845 | tusb_set_clock_source(musb, 1); | ||
846 | |||
847 | reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE); | ||
848 | musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); | ||
849 | if (reg & ~TUSB_PRCM_WNORCS) { | ||
850 | musb->is_active = 1; | ||
851 | schedule_work(&musb->irq_work); | ||
852 | } | ||
853 | DBG(3, "wake %sactive %02x\n", | ||
854 | musb->is_active ? "" : "in", reg); | ||
855 | |||
856 | /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */ | ||
857 | } | ||
858 | |||
859 | if (int_src & TUSB_INT_SRC_USB_IP_CONN) | ||
860 | del_timer(&musb_idle_timer); | ||
861 | |||
862 | /* OTG state change reports (annoyingly) not issued by Mentor core */ | ||
863 | if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG | ||
864 | | TUSB_INT_SRC_OTG_TIMEOUT | ||
865 | | TUSB_INT_SRC_ID_STATUS_CHNG)) | ||
866 | idle_timeout = tusb_otg_ints(musb, int_src, tbase); | ||
867 | |||
868 | /* TX dma callback must be handled here, RX dma callback is | ||
869 | * handled in tusb_omap_dma_cb. | ||
870 | */ | ||
871 | if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) { | ||
872 | u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC); | ||
873 | u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK); | ||
874 | |||
875 | DBG(3, "DMA IRQ %08x\n", dma_src); | ||
876 | real_dma_src = ~real_dma_src & dma_src; | ||
877 | if (tusb_dma_omap() && real_dma_src) { | ||
878 | int tx_source = (real_dma_src & 0xffff); | ||
879 | int i; | ||
880 | |||
881 | for (i = 1; i <= 15; i++) { | ||
882 | if (tx_source & (1 << i)) { | ||
883 | DBG(3, "completing ep%i %s\n", i, "tx"); | ||
884 | musb_dma_completion(musb, i, 1); | ||
885 | } | ||
886 | } | ||
887 | } | ||
888 | musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src); | ||
889 | } | ||
890 | |||
891 | /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */ | ||
892 | if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) { | ||
893 | u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC); | ||
894 | |||
895 | musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src); | ||
896 | musb->int_rx = (((musb_src >> 16) & 0xffff) << 1); | ||
897 | musb->int_tx = (musb_src & 0xffff); | ||
898 | } else { | ||
899 | musb->int_rx = 0; | ||
900 | musb->int_tx = 0; | ||
901 | } | ||
902 | |||
903 | if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff)) | ||
904 | musb_interrupt(musb); | ||
905 | |||
906 | /* Acknowledge TUSB interrupts. Clear only non-reserved bits */ | ||
907 | musb_writel(tbase, TUSB_INT_SRC_CLEAR, | ||
908 | int_src & ~TUSB_INT_MASK_RESERVED_BITS); | ||
909 | |||
910 | musb_platform_try_idle(musb, idle_timeout); | ||
911 | |||
912 | musb_writel(tbase, TUSB_INT_MASK, int_mask); | ||
913 | spin_unlock_irqrestore(&musb->lock, flags); | ||
914 | |||
915 | return IRQ_HANDLED; | ||
916 | } | ||
917 | |||
918 | static int dma_off; | ||
919 | |||
920 | /* | ||
921 | * Enables TUSB6010. Caller must take care of locking. | ||
922 | * REVISIT: | ||
923 | * - Check what is unnecessary in MGC_HdrcStart() | ||
924 | */ | ||
925 | void musb_platform_enable(struct musb *musb) | ||
926 | { | ||
927 | void __iomem *tbase = musb->ctrl_base; | ||
928 | |||
929 | /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF. | ||
930 | * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */ | ||
931 | musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF); | ||
932 | |||
933 | /* Setup TUSB interrupt, disable DMA and GPIO interrupts */ | ||
934 | musb_writel(tbase, TUSB_USBIP_INT_MASK, 0); | ||
935 | musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff); | ||
936 | musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff); | ||
937 | |||
938 | /* Clear all subsystem interrups */ | ||
939 | musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff); | ||
940 | musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff); | ||
941 | musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff); | ||
942 | |||
943 | /* Acknowledge pending interrupt(s) */ | ||
944 | musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS); | ||
945 | |||
946 | /* Only 0 clock cycles for minimum interrupt de-assertion time and | ||
947 | * interrupt polarity active low seems to work reliably here */ | ||
948 | musb_writel(tbase, TUSB_INT_CTRL_CONF, | ||
949 | TUSB_INT_CTRL_CONF_INT_RELCYC(0)); | ||
950 | |||
951 | set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); | ||
952 | |||
953 | /* maybe force into the Default-A OTG state machine */ | ||
954 | if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) | ||
955 | & TUSB_DEV_OTG_STAT_ID_STATUS)) | ||
956 | musb_writel(tbase, TUSB_INT_SRC_SET, | ||
957 | TUSB_INT_SRC_ID_STATUS_CHNG); | ||
958 | |||
959 | if (is_dma_capable() && dma_off) | ||
960 | printk(KERN_WARNING "%s %s: dma not reactivated\n", | ||
961 | __FILE__, __func__); | ||
962 | else | ||
963 | dma_off = 1; | ||
964 | } | ||
965 | |||
966 | /* | ||
967 | * Disables TUSB6010. Caller must take care of locking. | ||
968 | */ | ||
969 | void musb_platform_disable(struct musb *musb) | ||
970 | { | ||
971 | void __iomem *tbase = musb->ctrl_base; | ||
972 | |||
973 | /* FIXME stop DMA, IRQs, timers, ... */ | ||
974 | |||
975 | /* disable all IRQs */ | ||
976 | musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS); | ||
977 | musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff); | ||
978 | musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff); | ||
979 | musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff); | ||
980 | |||
981 | del_timer(&musb_idle_timer); | ||
982 | |||
983 | if (is_dma_capable() && !dma_off) { | ||
984 | printk(KERN_WARNING "%s %s: dma still active\n", | ||
985 | __FILE__, __func__); | ||
986 | dma_off = 1; | ||
987 | } | ||
988 | } | ||
989 | |||
990 | /* | ||
991 | * Sets up TUSB6010 CPU interface specific signals and registers | ||
992 | * Note: Settings optimized for OMAP24xx | ||
993 | */ | ||
994 | static void __init tusb_setup_cpu_interface(struct musb *musb) | ||
995 | { | ||
996 | void __iomem *tbase = musb->ctrl_base; | ||
997 | |||
998 | /* | ||
999 | * Disable GPIO[5:0] pullups (used as output DMA requests) | ||
1000 | * Don't disable GPIO[7:6] as they are needed for wake-up. | ||
1001 | */ | ||
1002 | musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F); | ||
1003 | |||
1004 | /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */ | ||
1005 | musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF); | ||
1006 | |||
1007 | /* Turn GPIO[5:0] to DMAREQ[5:0] signals */ | ||
1008 | musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f)); | ||
1009 | |||
1010 | /* Burst size 16x16 bits, all six DMA requests enabled, DMA request | ||
1011 | * de-assertion time 2 system clocks p 62 */ | ||
1012 | musb_writel(tbase, TUSB_DMA_REQ_CONF, | ||
1013 | TUSB_DMA_REQ_CONF_BURST_SIZE(2) | | ||
1014 | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) | | ||
1015 | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2)); | ||
1016 | |||
1017 | /* Set 0 wait count for synchronous burst access */ | ||
1018 | musb_writel(tbase, TUSB_WAIT_COUNT, 1); | ||
1019 | } | ||
1020 | |||
1021 | static int __init tusb_start(struct musb *musb) | ||
1022 | { | ||
1023 | void __iomem *tbase = musb->ctrl_base; | ||
1024 | int ret = 0; | ||
1025 | unsigned long flags; | ||
1026 | u32 reg; | ||
1027 | |||
1028 | if (musb->board_set_power) | ||
1029 | ret = musb->board_set_power(1); | ||
1030 | if (ret != 0) { | ||
1031 | printk(KERN_ERR "tusb: Cannot enable TUSB6010\n"); | ||
1032 | return ret; | ||
1033 | } | ||
1034 | |||
1035 | spin_lock_irqsave(&musb->lock, flags); | ||
1036 | |||
1037 | if (musb_readl(tbase, TUSB_PROD_TEST_RESET) != | ||
1038 | TUSB_PROD_TEST_RESET_VAL) { | ||
1039 | printk(KERN_ERR "tusb: Unable to detect TUSB6010\n"); | ||
1040 | goto err; | ||
1041 | } | ||
1042 | |||
1043 | ret = tusb_print_revision(musb); | ||
1044 | if (ret < 2) { | ||
1045 | printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n", | ||
1046 | ret); | ||
1047 | goto err; | ||
1048 | } | ||
1049 | |||
1050 | /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when | ||
1051 | * NOR FLASH interface is used */ | ||
1052 | musb_writel(tbase, TUSB_VLYNQ_CTRL, 8); | ||
1053 | |||
1054 | /* Select PHY free running 60MHz as a system clock */ | ||
1055 | tusb_set_clock_source(musb, 1); | ||
1056 | |||
1057 | /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for | ||
1058 | * power saving, enable VBus detect and session end comparators, | ||
1059 | * enable IDpullup, enable VBus charging */ | ||
1060 | musb_writel(tbase, TUSB_PRCM_MNGMT, | ||
1061 | TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) | | ||
1062 | TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN | | ||
1063 | TUSB_PRCM_MNGMT_OTG_SESS_END_EN | | ||
1064 | TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN | | ||
1065 | TUSB_PRCM_MNGMT_OTG_ID_PULLUP); | ||
1066 | tusb_setup_cpu_interface(musb); | ||
1067 | |||
1068 | /* simplify: always sense/pullup ID pins, as if in OTG mode */ | ||
1069 | reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); | ||
1070 | reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
1071 | musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg); | ||
1072 | |||
1073 | reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL); | ||
1074 | reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; | ||
1075 | musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg); | ||
1076 | |||
1077 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1078 | |||
1079 | return 0; | ||
1080 | |||
1081 | err: | ||
1082 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1083 | |||
1084 | if (musb->board_set_power) | ||
1085 | musb->board_set_power(0); | ||
1086 | |||
1087 | return -ENODEV; | ||
1088 | } | ||
1089 | |||
1090 | int __init musb_platform_init(struct musb *musb) | ||
1091 | { | ||
1092 | struct platform_device *pdev; | ||
1093 | struct resource *mem; | ||
1094 | void __iomem *sync; | ||
1095 | int ret; | ||
1096 | |||
1097 | pdev = to_platform_device(musb->controller); | ||
1098 | |||
1099 | /* dma address for async dma */ | ||
1100 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1101 | musb->async = mem->start; | ||
1102 | |||
1103 | /* dma address for sync dma */ | ||
1104 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1105 | if (!mem) { | ||
1106 | pr_debug("no sync dma resource?\n"); | ||
1107 | return -ENODEV; | ||
1108 | } | ||
1109 | musb->sync = mem->start; | ||
1110 | |||
1111 | sync = ioremap(mem->start, mem->end - mem->start + 1); | ||
1112 | if (!sync) { | ||
1113 | pr_debug("ioremap for sync failed\n"); | ||
1114 | return -ENOMEM; | ||
1115 | } | ||
1116 | musb->sync_va = sync; | ||
1117 | |||
1118 | /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400, | ||
1119 | * FIFOs at 0x600, TUSB at 0x800 | ||
1120 | */ | ||
1121 | musb->mregs += TUSB_BASE_OFFSET; | ||
1122 | |||
1123 | ret = tusb_start(musb); | ||
1124 | if (ret) { | ||
1125 | printk(KERN_ERR "Could not start tusb6010 (%d)\n", | ||
1126 | ret); | ||
1127 | return -ENODEV; | ||
1128 | } | ||
1129 | musb->isr = tusb_interrupt; | ||
1130 | |||
1131 | if (is_host_enabled(musb)) | ||
1132 | musb->board_set_vbus = tusb_source_power; | ||
1133 | if (is_peripheral_enabled(musb)) | ||
1134 | musb->xceiv.set_power = tusb_draw_power; | ||
1135 | |||
1136 | setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); | ||
1137 | |||
1138 | return ret; | ||
1139 | } | ||
1140 | |||
1141 | int musb_platform_exit(struct musb *musb) | ||
1142 | { | ||
1143 | del_timer_sync(&musb_idle_timer); | ||
1144 | |||
1145 | if (musb->board_set_power) | ||
1146 | musb->board_set_power(0); | ||
1147 | |||
1148 | iounmap(musb->sync_va); | ||
1149 | |||
1150 | return 0; | ||
1151 | } | ||
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h new file mode 100644 index 000000000000..ab8c96286ce6 --- /dev/null +++ b/drivers/usb/musb/tusb6010.h | |||
@@ -0,0 +1,233 @@ | |||
1 | /* | ||
2 | * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller | ||
3 | * | ||
4 | * Copyright (C) 2006 Nokia Corporation | ||
5 | * Jarkko Nikula <jarkko.nikula@nokia.com> | ||
6 | * Tony Lindgren <tony@atomide.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #ifndef __TUSB6010_H__ | ||
14 | #define __TUSB6010_H__ | ||
15 | |||
16 | extern u8 tusb_get_revision(struct musb *musb); | ||
17 | |||
18 | #ifdef CONFIG_USB_TUSB6010 | ||
19 | #define musb_in_tusb() 1 | ||
20 | #else | ||
21 | #define musb_in_tusb() 0 | ||
22 | #endif | ||
23 | |||
24 | #ifdef CONFIG_USB_TUSB_OMAP_DMA | ||
25 | #define tusb_dma_omap() 1 | ||
26 | #else | ||
27 | #define tusb_dma_omap() 0 | ||
28 | #endif | ||
29 | |||
30 | /* VLYNQ control register. 32-bit at offset 0x000 */ | ||
31 | #define TUSB_VLYNQ_CTRL 0x004 | ||
32 | |||
33 | /* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */ | ||
34 | #define TUSB_BASE_OFFSET 0x400 | ||
35 | |||
36 | /* FIFO registers 32-bit at offset 0x600 */ | ||
37 | #define TUSB_FIFO_BASE 0x600 | ||
38 | |||
39 | /* Device System & Control registers. 32-bit at offset 0x800 */ | ||
40 | #define TUSB_SYS_REG_BASE 0x800 | ||
41 | |||
42 | #define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000) | ||
43 | #define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16) | ||
44 | #define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15) | ||
45 | #define TUSB_DEV_CONF_SOFT_ID (1 << 1) | ||
46 | #define TUSB_DEV_CONF_ID_SEL (1 << 0) | ||
47 | |||
48 | #define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004) | ||
49 | #define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008) | ||
50 | #define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24) | ||
51 | #define TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP (1 << 23) | ||
52 | #define TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN (1 << 19) | ||
53 | #define TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN (1 << 18) | ||
54 | #define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17) | ||
55 | #define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16) | ||
56 | #define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15) | ||
57 | #define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14) | ||
58 | #define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13) | ||
59 | #define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12) | ||
60 | #define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11) | ||
61 | #define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10) | ||
62 | #define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9) | ||
63 | #define TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v) (((v) & 3) << 7) | ||
64 | #define TUSB_PHY_OTG_CTRL_PD (1 << 6) | ||
65 | #define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5) | ||
66 | #define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4) | ||
67 | #define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3) | ||
68 | #define TUSB_PHY_OTG_CTRL_RESET (1 << 2) | ||
69 | #define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1) | ||
70 | #define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0) | ||
71 | |||
72 | /*OTG status register */ | ||
73 | #define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c) | ||
74 | #define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8) | ||
75 | #define TUSB_DEV_OTG_STAT_SESS_END (1 << 7) | ||
76 | #define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6) | ||
77 | #define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5) | ||
78 | #define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4) | ||
79 | #define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3) | ||
80 | #define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2) | ||
81 | #define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0) | ||
82 | #define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1) | ||
83 | #define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0) | ||
84 | |||
85 | #define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010) | ||
86 | # define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31) | ||
87 | # define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff) | ||
88 | #define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014) | ||
89 | |||
90 | /* PRCM configuration register */ | ||
91 | #define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018) | ||
92 | #define TUSB_PRCM_CONF_SFW_CPEN (1 << 24) | ||
93 | #define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16) | ||
94 | |||
95 | /* PRCM management register */ | ||
96 | #define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c) | ||
97 | #define TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v) (((v) & 0xf) << 25) | ||
98 | #define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24) | ||
99 | #define TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v) (((v) & 0xf) << 20) | ||
100 | #define TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN (1 << 19) | ||
101 | #define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18) | ||
102 | #define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17) | ||
103 | #define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10) | ||
104 | #define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9) | ||
105 | #define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8) | ||
106 | #define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4) | ||
107 | #define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3) | ||
108 | #define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2) | ||
109 | #define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1) | ||
110 | #define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0) | ||
111 | |||
112 | /* Wake-up source clear and mask registers */ | ||
113 | #define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020) | ||
114 | #define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028) | ||
115 | #define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c) | ||
116 | #define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13) | ||
117 | #define TUSB_PRCM_WGPIO_7 (1 << 12) | ||
118 | #define TUSB_PRCM_WGPIO_6 (1 << 11) | ||
119 | #define TUSB_PRCM_WGPIO_5 (1 << 10) | ||
120 | #define TUSB_PRCM_WGPIO_4 (1 << 9) | ||
121 | #define TUSB_PRCM_WGPIO_3 (1 << 8) | ||
122 | #define TUSB_PRCM_WGPIO_2 (1 << 7) | ||
123 | #define TUSB_PRCM_WGPIO_1 (1 << 6) | ||
124 | #define TUSB_PRCM_WGPIO_0 (1 << 5) | ||
125 | #define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */ | ||
126 | #define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */ | ||
127 | #define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */ | ||
128 | #define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */ | ||
129 | #define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */ | ||
130 | |||
131 | #define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030) | ||
132 | #define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034) | ||
133 | #define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038) | ||
134 | #define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c) | ||
135 | #define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040) | ||
136 | #define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044) | ||
137 | #define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048) | ||
138 | #define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c) | ||
139 | #define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050) | ||
140 | #define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054) | ||
141 | #define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058) | ||
142 | #define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c) | ||
143 | #define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060) | ||
144 | #define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064) | ||
145 | #define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068) | ||
146 | #define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c) | ||
147 | |||
148 | /* NOR flash interrupt source registers */ | ||
149 | #define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070) | ||
150 | #define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074) | ||
151 | #define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078) | ||
152 | #define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c) | ||
153 | #define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24) | ||
154 | #define TUSB_INT_SRC_USB_IP_CORE (1 << 17) | ||
155 | #define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16) | ||
156 | #define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15) | ||
157 | #define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14) | ||
158 | #define TUSB_INT_SRC_DEV_WAKEUP (1 << 13) | ||
159 | #define TUSB_INT_SRC_DEV_READY (1 << 12) | ||
160 | #define TUSB_INT_SRC_USB_IP_TX (1 << 9) | ||
161 | #define TUSB_INT_SRC_USB_IP_RX (1 << 8) | ||
162 | #define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7) | ||
163 | #define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6) | ||
164 | #define TUSB_INT_SRC_USB_IP_DISCON (1 << 5) | ||
165 | #define TUSB_INT_SRC_USB_IP_CONN (1 << 4) | ||
166 | #define TUSB_INT_SRC_USB_IP_SOF (1 << 3) | ||
167 | #define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2) | ||
168 | #define TUSB_INT_SRC_USB_IP_RESUME (1 << 1) | ||
169 | #define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0) | ||
170 | |||
171 | /* NOR flash interrupt registers reserved bits. Must be written as 0 */ | ||
172 | #define TUSB_INT_MASK_RESERVED_17 (0x3fff << 17) | ||
173 | #define TUSB_INT_MASK_RESERVED_13 (1 << 13) | ||
174 | #define TUSB_INT_MASK_RESERVED_8 (0xf << 8) | ||
175 | #define TUSB_INT_SRC_RESERVED_26 (0x1f << 26) | ||
176 | #define TUSB_INT_SRC_RESERVED_18 (0x3f << 18) | ||
177 | #define TUSB_INT_SRC_RESERVED_10 (0x03 << 10) | ||
178 | |||
179 | /* Reserved bits for NOR flash interrupt mask and clear register */ | ||
180 | #define TUSB_INT_MASK_RESERVED_BITS (TUSB_INT_MASK_RESERVED_17 | \ | ||
181 | TUSB_INT_MASK_RESERVED_13 | \ | ||
182 | TUSB_INT_MASK_RESERVED_8) | ||
183 | |||
184 | /* Reserved bits for NOR flash interrupt status register */ | ||
185 | #define TUSB_INT_SRC_RESERVED_BITS (TUSB_INT_SRC_RESERVED_26 | \ | ||
186 | TUSB_INT_SRC_RESERVED_18 | \ | ||
187 | TUSB_INT_SRC_RESERVED_10) | ||
188 | |||
189 | #define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080) | ||
190 | #define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084) | ||
191 | #define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100) | ||
192 | #define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104) | ||
193 | #define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108) | ||
194 | #define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148) | ||
195 | |||
196 | /* Offsets from each ep base register */ | ||
197 | #define TUSB_EP_TX_OFFSET 0x10c /* EP_IN in docs */ | ||
198 | #define TUSB_EP_RX_OFFSET 0x14c /* EP_OUT in docs */ | ||
199 | #define TUSB_EP_MAX_PACKET_SIZE_OFFSET 0x188 | ||
200 | |||
201 | #define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8) | ||
202 | #define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4) | ||
203 | #define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8) | ||
204 | |||
205 | /* Device System & Control register bitfields */ | ||
206 | #define TUSB_INT_CTRL_CONF_INT_RELCYC(v) (((v) & 0x7) << 18) | ||
207 | #define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17) | ||
208 | #define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16) | ||
209 | #define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24) | ||
210 | #define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26) | ||
211 | #define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v) (((v) & 0x3f) << 20) | ||
212 | #define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v) (((v) & 0xf) << 16) | ||
213 | #define TUSB_EP0_CONFIG_SW_EN (1 << 8) | ||
214 | #define TUSB_EP0_CONFIG_DIR_TX (1 << 7) | ||
215 | #define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f) | ||
216 | #define TUSB_EP_CONFIG_SW_EN (1 << 31) | ||
217 | #define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff) | ||
218 | #define TUSB_PROD_TEST_RESET_VAL 0xa596 | ||
219 | #define TUSB_EP_FIFO(ep) (TUSB_FIFO_BASE + (ep) * 0x20) | ||
220 | |||
221 | #define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8) | ||
222 | #define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc) | ||
223 | #define TUSB_DIDR1_HI_CHIP_REV(v) (((v) >> 17) & 0xf) | ||
224 | #define TUSB_DIDR1_HI_REV_20 0 | ||
225 | #define TUSB_DIDR1_HI_REV_30 1 | ||
226 | #define TUSB_DIDR1_HI_REV_31 2 | ||
227 | |||
228 | #define TUSB_REV_10 0x10 | ||
229 | #define TUSB_REV_20 0x20 | ||
230 | #define TUSB_REV_30 0x30 | ||
231 | #define TUSB_REV_31 0x31 | ||
232 | |||
233 | #endif /* __TUSB6010_H__ */ | ||
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c new file mode 100644 index 000000000000..52f7f29cebda --- /dev/null +++ b/drivers/usb/musb/tusb6010_omap.c | |||
@@ -0,0 +1,719 @@ | |||
1 | /* | ||
2 | * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface | ||
3 | * | ||
4 | * Copyright (C) 2006 Nokia Corporation | ||
5 | * Tony Lindgren <tony@atomide.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/usb.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/dma-mapping.h> | ||
18 | #include <asm/arch/dma.h> | ||
19 | #include <asm/arch/mux.h> | ||
20 | |||
21 | #include "musb_core.h" | ||
22 | |||
23 | #define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) | ||
24 | |||
25 | #define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */ | ||
26 | |||
27 | struct tusb_omap_dma_ch { | ||
28 | struct musb *musb; | ||
29 | void __iomem *tbase; | ||
30 | unsigned long phys_offset; | ||
31 | int epnum; | ||
32 | u8 tx; | ||
33 | struct musb_hw_ep *hw_ep; | ||
34 | |||
35 | int ch; | ||
36 | s8 dmareq; | ||
37 | s8 sync_dev; | ||
38 | |||
39 | struct tusb_omap_dma *tusb_dma; | ||
40 | |||
41 | void __iomem *dma_addr; | ||
42 | |||
43 | u32 len; | ||
44 | u16 packet_sz; | ||
45 | u16 transfer_packet_sz; | ||
46 | u32 transfer_len; | ||
47 | u32 completed_len; | ||
48 | }; | ||
49 | |||
50 | struct tusb_omap_dma { | ||
51 | struct dma_controller controller; | ||
52 | struct musb *musb; | ||
53 | void __iomem *tbase; | ||
54 | |||
55 | int ch; | ||
56 | s8 dmareq; | ||
57 | s8 sync_dev; | ||
58 | unsigned multichannel:1; | ||
59 | }; | ||
60 | |||
61 | static int tusb_omap_dma_start(struct dma_controller *c) | ||
62 | { | ||
63 | struct tusb_omap_dma *tusb_dma; | ||
64 | |||
65 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); | ||
66 | |||
67 | /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static int tusb_omap_dma_stop(struct dma_controller *c) | ||
73 | { | ||
74 | struct tusb_omap_dma *tusb_dma; | ||
75 | |||
76 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); | ||
77 | |||
78 | /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Allocate dmareq0 to the current channel unless it's already taken | ||
85 | */ | ||
86 | static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat) | ||
87 | { | ||
88 | u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); | ||
89 | |||
90 | if (reg != 0) { | ||
91 | DBG(3, "ep%i dmareq0 is busy for ep%i\n", | ||
92 | chdat->epnum, reg & 0xf); | ||
93 | return -EAGAIN; | ||
94 | } | ||
95 | |||
96 | if (chdat->tx) | ||
97 | reg = (1 << 4) | chdat->epnum; | ||
98 | else | ||
99 | reg = chdat->epnum; | ||
100 | |||
101 | musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); | ||
102 | |||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat) | ||
107 | { | ||
108 | u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); | ||
109 | |||
110 | if ((reg & 0xf) != chdat->epnum) { | ||
111 | printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n", | ||
112 | chdat->epnum, reg & 0xf); | ||
113 | return; | ||
114 | } | ||
115 | musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0); | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in | ||
120 | * musb_gadget.c. | ||
121 | */ | ||
122 | static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data) | ||
123 | { | ||
124 | struct dma_channel *channel = (struct dma_channel *)data; | ||
125 | struct tusb_omap_dma_ch *chdat = to_chdat(channel); | ||
126 | struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; | ||
127 | struct musb *musb = chdat->musb; | ||
128 | struct musb_hw_ep *hw_ep = chdat->hw_ep; | ||
129 | void __iomem *ep_conf = hw_ep->conf; | ||
130 | void __iomem *mbase = musb->mregs; | ||
131 | unsigned long remaining, flags, pio; | ||
132 | int ch; | ||
133 | |||
134 | spin_lock_irqsave(&musb->lock, flags); | ||
135 | |||
136 | if (tusb_dma->multichannel) | ||
137 | ch = chdat->ch; | ||
138 | else | ||
139 | ch = tusb_dma->ch; | ||
140 | |||
141 | if (ch_status != OMAP_DMA_BLOCK_IRQ) | ||
142 | printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status); | ||
143 | |||
144 | DBG(3, "ep%i %s dma callback ch: %i status: %x\n", | ||
145 | chdat->epnum, chdat->tx ? "tx" : "rx", | ||
146 | ch, ch_status); | ||
147 | |||
148 | if (chdat->tx) | ||
149 | remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET); | ||
150 | else | ||
151 | remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET); | ||
152 | |||
153 | remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining); | ||
154 | |||
155 | /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */ | ||
156 | if (unlikely(remaining > chdat->transfer_len)) { | ||
157 | DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n", | ||
158 | chdat->tx ? "tx" : "rx", chdat->ch, | ||
159 | remaining); | ||
160 | remaining = 0; | ||
161 | } | ||
162 | |||
163 | channel->actual_len = chdat->transfer_len - remaining; | ||
164 | pio = chdat->len - channel->actual_len; | ||
165 | |||
166 | DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len); | ||
167 | |||
168 | /* Transfer remaining 1 - 31 bytes */ | ||
169 | if (pio > 0 && pio < 32) { | ||
170 | u8 *buf; | ||
171 | |||
172 | DBG(3, "Using PIO for remaining %lu bytes\n", pio); | ||
173 | buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len; | ||
174 | if (chdat->tx) { | ||
175 | dma_cache_maint(phys_to_virt((u32)chdat->dma_addr), | ||
176 | chdat->transfer_len, DMA_TO_DEVICE); | ||
177 | musb_write_fifo(hw_ep, pio, buf); | ||
178 | } else { | ||
179 | musb_read_fifo(hw_ep, pio, buf); | ||
180 | dma_cache_maint(phys_to_virt((u32)chdat->dma_addr), | ||
181 | chdat->transfer_len, DMA_FROM_DEVICE); | ||
182 | } | ||
183 | channel->actual_len += pio; | ||
184 | } | ||
185 | |||
186 | if (!tusb_dma->multichannel) | ||
187 | tusb_omap_free_shared_dmareq(chdat); | ||
188 | |||
189 | channel->status = MUSB_DMA_STATUS_FREE; | ||
190 | |||
191 | /* Handle only RX callbacks here. TX callbacks must be handled based | ||
192 | * on the TUSB DMA status interrupt. | ||
193 | * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback | ||
194 | * interrupt for RX and TX. | ||
195 | */ | ||
196 | if (!chdat->tx) | ||
197 | musb_dma_completion(musb, chdat->epnum, chdat->tx); | ||
198 | |||
199 | /* We must terminate short tx transfers manually by setting TXPKTRDY. | ||
200 | * REVISIT: This same problem may occur with other MUSB dma as well. | ||
201 | * Easy to test with g_ether by pinging the MUSB board with ping -s54. | ||
202 | */ | ||
203 | if ((chdat->transfer_len < chdat->packet_sz) | ||
204 | || (chdat->transfer_len % chdat->packet_sz != 0)) { | ||
205 | u16 csr; | ||
206 | |||
207 | if (chdat->tx) { | ||
208 | DBG(3, "terminating short tx packet\n"); | ||
209 | musb_ep_select(mbase, chdat->epnum); | ||
210 | csr = musb_readw(hw_ep->regs, MUSB_TXCSR); | ||
211 | csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY | ||
212 | | MUSB_TXCSR_P_WZC_BITS; | ||
213 | musb_writew(hw_ep->regs, MUSB_TXCSR, csr); | ||
214 | } | ||
215 | } | ||
216 | |||
217 | spin_unlock_irqrestore(&musb->lock, flags); | ||
218 | } | ||
219 | |||
220 | static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, | ||
221 | u8 rndis_mode, dma_addr_t dma_addr, u32 len) | ||
222 | { | ||
223 | struct tusb_omap_dma_ch *chdat = to_chdat(channel); | ||
224 | struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; | ||
225 | struct musb *musb = chdat->musb; | ||
226 | struct musb_hw_ep *hw_ep = chdat->hw_ep; | ||
227 | void __iomem *mbase = musb->mregs; | ||
228 | void __iomem *ep_conf = hw_ep->conf; | ||
229 | dma_addr_t fifo = hw_ep->fifo_sync; | ||
230 | struct omap_dma_channel_params dma_params; | ||
231 | u32 dma_remaining; | ||
232 | int src_burst, dst_burst; | ||
233 | u16 csr; | ||
234 | int ch; | ||
235 | s8 dmareq; | ||
236 | s8 sync_dev; | ||
237 | |||
238 | if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz)) | ||
239 | return false; | ||
240 | |||
241 | /* | ||
242 | * HW issue #10: Async dma will eventually corrupt the XFR_SIZE | ||
243 | * register which will cause missed DMA interrupt. We could try to | ||
244 | * use a timer for the callback, but it is unsafe as the XFR_SIZE | ||
245 | * register is corrupt, and we won't know if the DMA worked. | ||
246 | */ | ||
247 | if (dma_addr & 0x2) | ||
248 | return false; | ||
249 | |||
250 | /* | ||
251 | * Because of HW issue #10, it seems like mixing sync DMA and async | ||
252 | * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before | ||
253 | * using the channel for DMA. | ||
254 | */ | ||
255 | if (chdat->tx) | ||
256 | dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET); | ||
257 | else | ||
258 | dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET); | ||
259 | |||
260 | dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining); | ||
261 | if (dma_remaining) { | ||
262 | DBG(2, "Busy %s dma ch%i, not using: %08x\n", | ||
263 | chdat->tx ? "tx" : "rx", chdat->ch, | ||
264 | dma_remaining); | ||
265 | return false; | ||
266 | } | ||
267 | |||
268 | chdat->transfer_len = len & ~0x1f; | ||
269 | |||
270 | if (len < packet_sz) | ||
271 | chdat->transfer_packet_sz = chdat->transfer_len; | ||
272 | else | ||
273 | chdat->transfer_packet_sz = packet_sz; | ||
274 | |||
275 | if (tusb_dma->multichannel) { | ||
276 | ch = chdat->ch; | ||
277 | dmareq = chdat->dmareq; | ||
278 | sync_dev = chdat->sync_dev; | ||
279 | } else { | ||
280 | if (tusb_omap_use_shared_dmareq(chdat) != 0) { | ||
281 | DBG(3, "could not get dma for ep%i\n", chdat->epnum); | ||
282 | return false; | ||
283 | } | ||
284 | if (tusb_dma->ch < 0) { | ||
285 | /* REVISIT: This should get blocked earlier, happens | ||
286 | * with MSC ErrorRecoveryTest | ||
287 | */ | ||
288 | WARN_ON(1); | ||
289 | return false; | ||
290 | } | ||
291 | |||
292 | ch = tusb_dma->ch; | ||
293 | dmareq = tusb_dma->dmareq; | ||
294 | sync_dev = tusb_dma->sync_dev; | ||
295 | omap_set_dma_callback(ch, tusb_omap_dma_cb, channel); | ||
296 | } | ||
297 | |||
298 | chdat->packet_sz = packet_sz; | ||
299 | chdat->len = len; | ||
300 | channel->actual_len = 0; | ||
301 | chdat->dma_addr = (void __iomem *)dma_addr; | ||
302 | channel->status = MUSB_DMA_STATUS_BUSY; | ||
303 | |||
304 | /* Since we're recycling dma areas, we need to clean or invalidate */ | ||
305 | if (chdat->tx) | ||
306 | dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE); | ||
307 | else | ||
308 | dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE); | ||
309 | |||
310 | /* Use 16-bit transfer if dma_addr is not 32-bit aligned */ | ||
311 | if ((dma_addr & 0x3) == 0) { | ||
312 | dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; | ||
313 | dma_params.elem_count = 8; /* Elements in frame */ | ||
314 | } else { | ||
315 | dma_params.data_type = OMAP_DMA_DATA_TYPE_S16; | ||
316 | dma_params.elem_count = 16; /* Elements in frame */ | ||
317 | fifo = hw_ep->fifo_async; | ||
318 | } | ||
319 | |||
320 | dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */ | ||
321 | |||
322 | DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n", | ||
323 | chdat->epnum, chdat->tx ? "tx" : "rx", | ||
324 | ch, dma_addr, chdat->transfer_len, len, | ||
325 | chdat->transfer_packet_sz, packet_sz); | ||
326 | |||
327 | /* | ||
328 | * Prepare omap DMA for transfer | ||
329 | */ | ||
330 | if (chdat->tx) { | ||
331 | dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; | ||
332 | dma_params.src_start = (unsigned long)dma_addr; | ||
333 | dma_params.src_ei = 0; | ||
334 | dma_params.src_fi = 0; | ||
335 | |||
336 | dma_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX; | ||
337 | dma_params.dst_start = (unsigned long)fifo; | ||
338 | dma_params.dst_ei = 1; | ||
339 | dma_params.dst_fi = -31; /* Loop 32 byte window */ | ||
340 | |||
341 | dma_params.trigger = sync_dev; | ||
342 | dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; | ||
343 | dma_params.src_or_dst_synch = 0; /* Dest sync */ | ||
344 | |||
345 | src_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 read */ | ||
346 | dst_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 write */ | ||
347 | } else { | ||
348 | dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX; | ||
349 | dma_params.src_start = (unsigned long)fifo; | ||
350 | dma_params.src_ei = 1; | ||
351 | dma_params.src_fi = -31; /* Loop 32 byte window */ | ||
352 | |||
353 | dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; | ||
354 | dma_params.dst_start = (unsigned long)dma_addr; | ||
355 | dma_params.dst_ei = 0; | ||
356 | dma_params.dst_fi = 0; | ||
357 | |||
358 | dma_params.trigger = sync_dev; | ||
359 | dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; | ||
360 | dma_params.src_or_dst_synch = 1; /* Source sync */ | ||
361 | |||
362 | src_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 read */ | ||
363 | dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */ | ||
364 | } | ||
365 | |||
366 | DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n", | ||
367 | chdat->epnum, chdat->tx ? "tx" : "rx", | ||
368 | (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16, | ||
369 | ((dma_addr & 0x3) == 0) ? "sync" : "async", | ||
370 | dma_params.src_start, dma_params.dst_start); | ||
371 | |||
372 | omap_set_dma_params(ch, &dma_params); | ||
373 | omap_set_dma_src_burst_mode(ch, src_burst); | ||
374 | omap_set_dma_dest_burst_mode(ch, dst_burst); | ||
375 | omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED); | ||
376 | |||
377 | /* | ||
378 | * Prepare MUSB for DMA transfer | ||
379 | */ | ||
380 | if (chdat->tx) { | ||
381 | musb_ep_select(mbase, chdat->epnum); | ||
382 | csr = musb_readw(hw_ep->regs, MUSB_TXCSR); | ||
383 | csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB | ||
384 | | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); | ||
385 | csr &= ~MUSB_TXCSR_P_UNDERRUN; | ||
386 | musb_writew(hw_ep->regs, MUSB_TXCSR, csr); | ||
387 | } else { | ||
388 | musb_ep_select(mbase, chdat->epnum); | ||
389 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | ||
390 | csr |= MUSB_RXCSR_DMAENAB; | ||
391 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE); | ||
392 | musb_writew(hw_ep->regs, MUSB_RXCSR, | ||
393 | csr | MUSB_RXCSR_P_WZC_BITS); | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Start DMA transfer | ||
398 | */ | ||
399 | omap_start_dma(ch); | ||
400 | |||
401 | if (chdat->tx) { | ||
402 | /* Send transfer_packet_sz packets at a time */ | ||
403 | musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, | ||
404 | chdat->transfer_packet_sz); | ||
405 | |||
406 | musb_writel(ep_conf, TUSB_EP_TX_OFFSET, | ||
407 | TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); | ||
408 | } else { | ||
409 | /* Receive transfer_packet_sz packets at a time */ | ||
410 | musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, | ||
411 | chdat->transfer_packet_sz << 16); | ||
412 | |||
413 | musb_writel(ep_conf, TUSB_EP_RX_OFFSET, | ||
414 | TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); | ||
415 | } | ||
416 | |||
417 | return true; | ||
418 | } | ||
419 | |||
420 | static int tusb_omap_dma_abort(struct dma_channel *channel) | ||
421 | { | ||
422 | struct tusb_omap_dma_ch *chdat = to_chdat(channel); | ||
423 | struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; | ||
424 | |||
425 | if (!tusb_dma->multichannel) { | ||
426 | if (tusb_dma->ch >= 0) { | ||
427 | omap_stop_dma(tusb_dma->ch); | ||
428 | omap_free_dma(tusb_dma->ch); | ||
429 | tusb_dma->ch = -1; | ||
430 | } | ||
431 | |||
432 | tusb_dma->dmareq = -1; | ||
433 | tusb_dma->sync_dev = -1; | ||
434 | } | ||
435 | |||
436 | channel->status = MUSB_DMA_STATUS_FREE; | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | |||
441 | static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat) | ||
442 | { | ||
443 | u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); | ||
444 | int i, dmareq_nr = -1; | ||
445 | |||
446 | const int sync_dev[6] = { | ||
447 | OMAP24XX_DMA_EXT_DMAREQ0, | ||
448 | OMAP24XX_DMA_EXT_DMAREQ1, | ||
449 | OMAP242X_DMA_EXT_DMAREQ2, | ||
450 | OMAP242X_DMA_EXT_DMAREQ3, | ||
451 | OMAP242X_DMA_EXT_DMAREQ4, | ||
452 | OMAP242X_DMA_EXT_DMAREQ5, | ||
453 | }; | ||
454 | |||
455 | for (i = 0; i < MAX_DMAREQ; i++) { | ||
456 | int cur = (reg & (0xf << (i * 5))) >> (i * 5); | ||
457 | if (cur == 0) { | ||
458 | dmareq_nr = i; | ||
459 | break; | ||
460 | } | ||
461 | } | ||
462 | |||
463 | if (dmareq_nr == -1) | ||
464 | return -EAGAIN; | ||
465 | |||
466 | reg |= (chdat->epnum << (dmareq_nr * 5)); | ||
467 | if (chdat->tx) | ||
468 | reg |= ((1 << 4) << (dmareq_nr * 5)); | ||
469 | musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); | ||
470 | |||
471 | chdat->dmareq = dmareq_nr; | ||
472 | chdat->sync_dev = sync_dev[chdat->dmareq]; | ||
473 | |||
474 | return 0; | ||
475 | } | ||
476 | |||
477 | static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat) | ||
478 | { | ||
479 | u32 reg; | ||
480 | |||
481 | if (!chdat || chdat->dmareq < 0) | ||
482 | return; | ||
483 | |||
484 | reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); | ||
485 | reg &= ~(0x1f << (chdat->dmareq * 5)); | ||
486 | musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); | ||
487 | |||
488 | chdat->dmareq = -1; | ||
489 | chdat->sync_dev = -1; | ||
490 | } | ||
491 | |||
492 | static struct dma_channel *dma_channel_pool[MAX_DMAREQ]; | ||
493 | |||
494 | static struct dma_channel * | ||
495 | tusb_omap_dma_allocate(struct dma_controller *c, | ||
496 | struct musb_hw_ep *hw_ep, | ||
497 | u8 tx) | ||
498 | { | ||
499 | int ret, i; | ||
500 | const char *dev_name; | ||
501 | struct tusb_omap_dma *tusb_dma; | ||
502 | struct musb *musb; | ||
503 | void __iomem *tbase; | ||
504 | struct dma_channel *channel = NULL; | ||
505 | struct tusb_omap_dma_ch *chdat = NULL; | ||
506 | u32 reg; | ||
507 | |||
508 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); | ||
509 | musb = tusb_dma->musb; | ||
510 | tbase = musb->ctrl_base; | ||
511 | |||
512 | reg = musb_readl(tbase, TUSB_DMA_INT_MASK); | ||
513 | if (tx) | ||
514 | reg &= ~(1 << hw_ep->epnum); | ||
515 | else | ||
516 | reg &= ~(1 << (hw_ep->epnum + 15)); | ||
517 | musb_writel(tbase, TUSB_DMA_INT_MASK, reg); | ||
518 | |||
519 | /* REVISIT: Why does dmareq5 not work? */ | ||
520 | if (hw_ep->epnum == 0) { | ||
521 | DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx"); | ||
522 | return NULL; | ||
523 | } | ||
524 | |||
525 | for (i = 0; i < MAX_DMAREQ; i++) { | ||
526 | struct dma_channel *ch = dma_channel_pool[i]; | ||
527 | if (ch->status == MUSB_DMA_STATUS_UNKNOWN) { | ||
528 | ch->status = MUSB_DMA_STATUS_FREE; | ||
529 | channel = ch; | ||
530 | chdat = ch->private_data; | ||
531 | break; | ||
532 | } | ||
533 | } | ||
534 | |||
535 | if (!channel) | ||
536 | return NULL; | ||
537 | |||
538 | if (tx) { | ||
539 | chdat->tx = 1; | ||
540 | dev_name = "TUSB transmit"; | ||
541 | } else { | ||
542 | chdat->tx = 0; | ||
543 | dev_name = "TUSB receive"; | ||
544 | } | ||
545 | |||
546 | chdat->musb = tusb_dma->musb; | ||
547 | chdat->tbase = tusb_dma->tbase; | ||
548 | chdat->hw_ep = hw_ep; | ||
549 | chdat->epnum = hw_ep->epnum; | ||
550 | chdat->dmareq = -1; | ||
551 | chdat->completed_len = 0; | ||
552 | chdat->tusb_dma = tusb_dma; | ||
553 | |||
554 | channel->max_len = 0x7fffffff; | ||
555 | channel->desired_mode = 0; | ||
556 | channel->actual_len = 0; | ||
557 | |||
558 | if (tusb_dma->multichannel) { | ||
559 | ret = tusb_omap_dma_allocate_dmareq(chdat); | ||
560 | if (ret != 0) | ||
561 | goto free_dmareq; | ||
562 | |||
563 | ret = omap_request_dma(chdat->sync_dev, dev_name, | ||
564 | tusb_omap_dma_cb, channel, &chdat->ch); | ||
565 | if (ret != 0) | ||
566 | goto free_dmareq; | ||
567 | } else if (tusb_dma->ch == -1) { | ||
568 | tusb_dma->dmareq = 0; | ||
569 | tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0; | ||
570 | |||
571 | /* Callback data gets set later in the shared dmareq case */ | ||
572 | ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared", | ||
573 | tusb_omap_dma_cb, NULL, &tusb_dma->ch); | ||
574 | if (ret != 0) | ||
575 | goto free_dmareq; | ||
576 | |||
577 | chdat->dmareq = -1; | ||
578 | chdat->ch = -1; | ||
579 | } | ||
580 | |||
581 | DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n", | ||
582 | chdat->epnum, | ||
583 | chdat->tx ? "tx" : "rx", | ||
584 | chdat->ch >= 0 ? "dedicated" : "shared", | ||
585 | chdat->ch >= 0 ? chdat->ch : tusb_dma->ch, | ||
586 | chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq, | ||
587 | chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev); | ||
588 | |||
589 | return channel; | ||
590 | |||
591 | free_dmareq: | ||
592 | tusb_omap_dma_free_dmareq(chdat); | ||
593 | |||
594 | DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum); | ||
595 | channel->status = MUSB_DMA_STATUS_UNKNOWN; | ||
596 | |||
597 | return NULL; | ||
598 | } | ||
599 | |||
600 | static void tusb_omap_dma_release(struct dma_channel *channel) | ||
601 | { | ||
602 | struct tusb_omap_dma_ch *chdat = to_chdat(channel); | ||
603 | struct musb *musb = chdat->musb; | ||
604 | void __iomem *tbase = musb->ctrl_base; | ||
605 | u32 reg; | ||
606 | |||
607 | DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch); | ||
608 | |||
609 | reg = musb_readl(tbase, TUSB_DMA_INT_MASK); | ||
610 | if (chdat->tx) | ||
611 | reg |= (1 << chdat->epnum); | ||
612 | else | ||
613 | reg |= (1 << (chdat->epnum + 15)); | ||
614 | musb_writel(tbase, TUSB_DMA_INT_MASK, reg); | ||
615 | |||
616 | reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR); | ||
617 | if (chdat->tx) | ||
618 | reg |= (1 << chdat->epnum); | ||
619 | else | ||
620 | reg |= (1 << (chdat->epnum + 15)); | ||
621 | musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg); | ||
622 | |||
623 | channel->status = MUSB_DMA_STATUS_UNKNOWN; | ||
624 | |||
625 | if (chdat->ch >= 0) { | ||
626 | omap_stop_dma(chdat->ch); | ||
627 | omap_free_dma(chdat->ch); | ||
628 | chdat->ch = -1; | ||
629 | } | ||
630 | |||
631 | if (chdat->dmareq >= 0) | ||
632 | tusb_omap_dma_free_dmareq(chdat); | ||
633 | |||
634 | channel = NULL; | ||
635 | } | ||
636 | |||
637 | void dma_controller_destroy(struct dma_controller *c) | ||
638 | { | ||
639 | struct tusb_omap_dma *tusb_dma; | ||
640 | int i; | ||
641 | |||
642 | tusb_dma = container_of(c, struct tusb_omap_dma, controller); | ||
643 | for (i = 0; i < MAX_DMAREQ; i++) { | ||
644 | struct dma_channel *ch = dma_channel_pool[i]; | ||
645 | if (ch) { | ||
646 | kfree(ch->private_data); | ||
647 | kfree(ch); | ||
648 | } | ||
649 | } | ||
650 | |||
651 | if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0) | ||
652 | omap_free_dma(tusb_dma->ch); | ||
653 | |||
654 | kfree(tusb_dma); | ||
655 | } | ||
656 | |||
657 | struct dma_controller *__init | ||
658 | dma_controller_create(struct musb *musb, void __iomem *base) | ||
659 | { | ||
660 | void __iomem *tbase = musb->ctrl_base; | ||
661 | struct tusb_omap_dma *tusb_dma; | ||
662 | int i; | ||
663 | |||
664 | /* REVISIT: Get dmareq lines used from board-*.c */ | ||
665 | |||
666 | musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff); | ||
667 | musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0); | ||
668 | |||
669 | musb_writel(tbase, TUSB_DMA_REQ_CONF, | ||
670 | TUSB_DMA_REQ_CONF_BURST_SIZE(2) | ||
671 | | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) | ||
672 | | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2)); | ||
673 | |||
674 | tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL); | ||
675 | if (!tusb_dma) | ||
676 | goto cleanup; | ||
677 | |||
678 | tusb_dma->musb = musb; | ||
679 | tusb_dma->tbase = musb->ctrl_base; | ||
680 | |||
681 | tusb_dma->ch = -1; | ||
682 | tusb_dma->dmareq = -1; | ||
683 | tusb_dma->sync_dev = -1; | ||
684 | |||
685 | tusb_dma->controller.start = tusb_omap_dma_start; | ||
686 | tusb_dma->controller.stop = tusb_omap_dma_stop; | ||
687 | tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate; | ||
688 | tusb_dma->controller.channel_release = tusb_omap_dma_release; | ||
689 | tusb_dma->controller.channel_program = tusb_omap_dma_program; | ||
690 | tusb_dma->controller.channel_abort = tusb_omap_dma_abort; | ||
691 | |||
692 | if (tusb_get_revision(musb) >= TUSB_REV_30) | ||
693 | tusb_dma->multichannel = 1; | ||
694 | |||
695 | for (i = 0; i < MAX_DMAREQ; i++) { | ||
696 | struct dma_channel *ch; | ||
697 | struct tusb_omap_dma_ch *chdat; | ||
698 | |||
699 | ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL); | ||
700 | if (!ch) | ||
701 | goto cleanup; | ||
702 | |||
703 | dma_channel_pool[i] = ch; | ||
704 | |||
705 | chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL); | ||
706 | if (!chdat) | ||
707 | goto cleanup; | ||
708 | |||
709 | ch->status = MUSB_DMA_STATUS_UNKNOWN; | ||
710 | ch->private_data = chdat; | ||
711 | } | ||
712 | |||
713 | return &tusb_dma->controller; | ||
714 | |||
715 | cleanup: | ||
716 | dma_controller_destroy(&tusb_dma->controller); | ||
717 | |||
718 | return NULL; | ||
719 | } | ||
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index 8878c1767fc8..70338f4ec918 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig | |||
@@ -499,9 +499,10 @@ config USB_SERIAL_SAFE_PADDED | |||
499 | config USB_SERIAL_SIERRAWIRELESS | 499 | config USB_SERIAL_SIERRAWIRELESS |
500 | tristate "USB Sierra Wireless Driver" | 500 | tristate "USB Sierra Wireless Driver" |
501 | help | 501 | help |
502 | Say M here if you want to use a Sierra Wireless device (if | 502 | Say M here if you want to use Sierra Wireless devices. |
503 | using an PC 5220 or AC580 please use the Airprime driver | 503 | |
504 | instead). | 504 | Many deviecs have a feature known as TRU-Install, for those devices |
505 | to work properly the USB Storage Sierra feature must be enabled. | ||
505 | 506 | ||
506 | To compile this driver as a module, choose M here: the | 507 | To compile this driver as a module, choose M here: the |
507 | module will be called sierra. | 508 | module will be called sierra. |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 838717250145..984f6eff4c47 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -563,6 +563,7 @@ static struct usb_device_id id_table_combined [] = { | |||
563 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, | 563 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, |
564 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, | 564 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, |
565 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, | 565 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, |
566 | { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, | ||
566 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, | 567 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, |
567 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, | 568 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, |
568 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, | 569 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, |
@@ -637,6 +638,7 @@ static struct usb_device_id id_table_combined [] = { | |||
637 | { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) }, | 638 | { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) }, |
638 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, | 639 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, |
639 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, | 640 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, |
641 | { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, | ||
640 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, | 642 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, |
641 | { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, | 643 | { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, |
642 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, | 644 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, |
@@ -646,6 +648,10 @@ static struct usb_device_id id_table_combined [] = { | |||
646 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 648 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
647 | { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), | 649 | { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), |
648 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 650 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
651 | { USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID), | ||
652 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
653 | { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID), | ||
654 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
649 | { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, | 655 | { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, |
650 | { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, | 656 | { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, |
651 | { }, /* Optional parameter entry */ | 657 | { }, /* Optional parameter entry */ |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index a577ea44dcf9..382265bba969 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -524,7 +524,9 @@ | |||
524 | #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ | 524 | #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ |
525 | #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ | 525 | #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ |
526 | #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ | 526 | #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ |
527 | #define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ | ||
527 | #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ | 528 | #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ |
529 | #define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ | ||
528 | 530 | ||
529 | /* | 531 | /* |
530 | * Definitions for ID TECH (www.idt-net.com) devices | 532 | * Definitions for ID TECH (www.idt-net.com) devices |
@@ -815,6 +817,11 @@ | |||
815 | #define OLIMEX_VID 0x15BA | 817 | #define OLIMEX_VID 0x15BA |
816 | #define OLIMEX_ARM_USB_OCD_PID 0x0003 | 818 | #define OLIMEX_ARM_USB_OCD_PID 0x0003 |
817 | 819 | ||
820 | /* Luminary Micro Stellaris Boards, VID = FTDI_VID */ | ||
821 | /* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */ | ||
822 | #define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8 | ||
823 | #define LMI_LM3S_EVAL_BOARD_PID 0xbcd9 | ||
824 | |||
818 | /* www.elsterelectricity.com Elster Unicom III Optical Probe */ | 825 | /* www.elsterelectricity.com Elster Unicom III Optical Probe */ |
819 | #define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */ | 826 | #define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */ |
820 | 827 | ||
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 2e663f1afd5e..d95382088075 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c | |||
@@ -38,8 +38,6 @@ | |||
38 | #include <linux/usb.h> | 38 | #include <linux/usb.h> |
39 | #include <linux/usb/serial.h> | 39 | #include <linux/usb/serial.h> |
40 | 40 | ||
41 | #include <linux/version.h> | ||
42 | |||
43 | /* the mode to be set when the port ist opened */ | 41 | /* the mode to be set when the port ist opened */ |
44 | static int initial_mode = 1; | 42 | static int initial_mode = 1; |
45 | 43 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e4eca95f2b0f..9f9cd36455f4 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -173,6 +173,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po | |||
173 | #define KYOCERA_PRODUCT_KPC680 0x180a | 173 | #define KYOCERA_PRODUCT_KPC680 0x180a |
174 | 174 | ||
175 | #define ANYDATA_VENDOR_ID 0x16d5 | 175 | #define ANYDATA_VENDOR_ID 0x16d5 |
176 | #define ANYDATA_PRODUCT_ADU_620UW 0x6202 | ||
176 | #define ANYDATA_PRODUCT_ADU_E100A 0x6501 | 177 | #define ANYDATA_PRODUCT_ADU_E100A 0x6501 |
177 | #define ANYDATA_PRODUCT_ADU_500A 0x6502 | 178 | #define ANYDATA_PRODUCT_ADU_500A 0x6502 |
178 | 179 | ||
@@ -186,6 +187,23 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po | |||
186 | #define BANDRICH_VENDOR_ID 0x1A8D | 187 | #define BANDRICH_VENDOR_ID 0x1A8D |
187 | #define BANDRICH_PRODUCT_C100_1 0x1002 | 188 | #define BANDRICH_PRODUCT_C100_1 0x1002 |
188 | #define BANDRICH_PRODUCT_C100_2 0x1003 | 189 | #define BANDRICH_PRODUCT_C100_2 0x1003 |
190 | #define BANDRICH_PRODUCT_1004 0x1004 | ||
191 | #define BANDRICH_PRODUCT_1005 0x1005 | ||
192 | #define BANDRICH_PRODUCT_1006 0x1006 | ||
193 | #define BANDRICH_PRODUCT_1007 0x1007 | ||
194 | #define BANDRICH_PRODUCT_1008 0x1008 | ||
195 | #define BANDRICH_PRODUCT_1009 0x1009 | ||
196 | #define BANDRICH_PRODUCT_100A 0x100a | ||
197 | |||
198 | #define BANDRICH_PRODUCT_100B 0x100b | ||
199 | #define BANDRICH_PRODUCT_100C 0x100c | ||
200 | #define BANDRICH_PRODUCT_100D 0x100d | ||
201 | #define BANDRICH_PRODUCT_100E 0x100e | ||
202 | |||
203 | #define BANDRICH_PRODUCT_100F 0x100f | ||
204 | #define BANDRICH_PRODUCT_1010 0x1010 | ||
205 | #define BANDRICH_PRODUCT_1011 0x1011 | ||
206 | #define BANDRICH_PRODUCT_1012 0x1012 | ||
189 | 207 | ||
190 | #define AMOI_VENDOR_ID 0x1614 | 208 | #define AMOI_VENDOR_ID 0x1614 |
191 | #define AMOI_PRODUCT_9508 0x0800 | 209 | #define AMOI_PRODUCT_9508 0x0800 |
@@ -197,6 +215,10 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po | |||
197 | #define TELIT_VENDOR_ID 0x1bc7 | 215 | #define TELIT_VENDOR_ID 0x1bc7 |
198 | #define TELIT_PRODUCT_UC864E 0x1003 | 216 | #define TELIT_PRODUCT_UC864E 0x1003 |
199 | 217 | ||
218 | /* ZTE PRODUCTS */ | ||
219 | #define ZTE_VENDOR_ID 0x19d2 | ||
220 | #define ZTE_PRODUCT_MF628 0x0015 | ||
221 | |||
200 | static struct usb_device_id option_ids[] = { | 222 | static struct usb_device_id option_ids[] = { |
201 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 223 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
202 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | 224 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, |
@@ -297,17 +319,34 @@ static struct usb_device_id option_ids[] = { | |||
297 | { USB_DEVICE(DELL_VENDOR_ID, 0x8138) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */ | 319 | { USB_DEVICE(DELL_VENDOR_ID, 0x8138) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */ |
298 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, | 320 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, |
299 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, | 321 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, |
322 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, | ||
300 | { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, | 323 | { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, |
301 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) }, | 324 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) }, |
302 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, | 325 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, |
303 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, | 326 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, |
304 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, | 327 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, |
328 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) }, | ||
329 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005) }, | ||
330 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006) }, | ||
331 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007) }, | ||
332 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008) }, | ||
333 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009) }, | ||
334 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A) }, | ||
335 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B) }, | ||
336 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C) }, | ||
337 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D) }, | ||
338 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E) }, | ||
339 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F) }, | ||
340 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010) }, | ||
341 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011) }, | ||
342 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012) }, | ||
305 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, | 343 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, |
306 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, | 344 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, |
307 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ | 345 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ |
308 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ | 346 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ |
309 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ | 347 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ |
310 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, | 348 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, |
349 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, | ||
311 | { } /* Terminating entry */ | 350 | { } /* Terminating entry */ |
312 | }; | 351 | }; |
313 | MODULE_DEVICE_TABLE(usb, option_ids); | 352 | MODULE_DEVICE_TABLE(usb, option_ids); |
@@ -346,11 +385,7 @@ static struct usb_serial_driver option_1port_device = { | |||
346 | .read_int_callback = option_instat_callback, | 385 | .read_int_callback = option_instat_callback, |
347 | }; | 386 | }; |
348 | 387 | ||
349 | #ifdef CONFIG_USB_DEBUG | ||
350 | static int debug; | 388 | static int debug; |
351 | #else | ||
352 | #define debug 0 | ||
353 | #endif | ||
354 | 389 | ||
355 | /* per port private data */ | 390 | /* per port private data */ |
356 | 391 | ||
@@ -954,8 +989,5 @@ MODULE_DESCRIPTION(DRIVER_DESC); | |||
954 | MODULE_VERSION(DRIVER_VERSION); | 989 | MODULE_VERSION(DRIVER_VERSION); |
955 | MODULE_LICENSE("GPL"); | 990 | MODULE_LICENSE("GPL"); |
956 | 991 | ||
957 | #ifdef CONFIG_USB_DEBUG | ||
958 | module_param(debug, bool, S_IRUGO | S_IWUSR); | 992 | module_param(debug, bool, S_IRUGO | S_IWUSR); |
959 | MODULE_PARM_DESC(debug, "Debug messages"); | 993 | MODULE_PARM_DESC(debug, "Debug messages"); |
960 | #endif | ||
961 | |||
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 2c9c446ad625..1ede1441cb1b 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -90,7 +90,6 @@ static struct usb_device_id id_table [] = { | |||
90 | { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) }, | 90 | { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) }, |
91 | { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) }, | 91 | { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) }, |
92 | { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) }, | 92 | { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) }, |
93 | { USB_DEVICE(HL340_VENDOR_ID, HL340_PRODUCT_ID) }, | ||
94 | { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, | 93 | { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, |
95 | { } /* Terminating entry */ | 94 | { } /* Terminating entry */ |
96 | }; | 95 | }; |
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 6ac3bbcf7a22..a3bd039c78e9 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
@@ -107,10 +107,6 @@ | |||
107 | #define COREGA_VENDOR_ID 0x07aa | 107 | #define COREGA_VENDOR_ID 0x07aa |
108 | #define COREGA_PRODUCT_ID 0x002a | 108 | #define COREGA_PRODUCT_ID 0x002a |
109 | 109 | ||
110 | /* HL HL-340 (ID: 4348:5523) */ | ||
111 | #define HL340_VENDOR_ID 0x4348 | ||
112 | #define HL340_PRODUCT_ID 0x5523 | ||
113 | |||
114 | /* Y.C. Cable U.S.A., Inc - USB to RS-232 */ | 110 | /* Y.C. Cable U.S.A., Inc - USB to RS-232 */ |
115 | #define YCCABLE_VENDOR_ID 0x05ad | 111 | #define YCCABLE_VENDOR_ID 0x05ad |
116 | #define YCCABLE_PRODUCT_ID 0x0fba | 112 | #define YCCABLE_PRODUCT_ID 0x0fba |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 2f6f1523ec56..706033753adb 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -14,7 +14,7 @@ | |||
14 | Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> | 14 | Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define DRIVER_VERSION "v.1.2.9c" | 17 | #define DRIVER_VERSION "v.1.2.13a" |
18 | #define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>" | 18 | #define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>" |
19 | #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" | 19 | #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" |
20 | 20 | ||
@@ -31,6 +31,7 @@ | |||
31 | #define SWIMS_USB_REQUEST_SetPower 0x00 | 31 | #define SWIMS_USB_REQUEST_SetPower 0x00 |
32 | #define SWIMS_USB_REQUEST_SetNmea 0x07 | 32 | #define SWIMS_USB_REQUEST_SetNmea 0x07 |
33 | #define SWIMS_USB_REQUEST_SetMode 0x0B | 33 | #define SWIMS_USB_REQUEST_SetMode 0x0B |
34 | #define SWIMS_USB_REQUEST_GetSwocInfo 0x0A | ||
34 | #define SWIMS_SET_MODE_Modem 0x0001 | 35 | #define SWIMS_SET_MODE_Modem 0x0001 |
35 | 36 | ||
36 | /* per port private data */ | 37 | /* per port private data */ |
@@ -40,18 +41,11 @@ | |||
40 | 41 | ||
41 | static int debug; | 42 | static int debug; |
42 | static int nmea; | 43 | static int nmea; |
43 | static int truinstall = 1; | ||
44 | |||
45 | enum devicetype { | ||
46 | DEVICE_3_PORT = 0, | ||
47 | DEVICE_1_PORT = 1, | ||
48 | DEVICE_INSTALLER = 2, | ||
49 | }; | ||
50 | 44 | ||
51 | static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) | 45 | static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) |
52 | { | 46 | { |
53 | int result; | 47 | int result; |
54 | dev_dbg(&udev->dev, "%s", "SET POWER STATE\n"); | 48 | dev_dbg(&udev->dev, "%s", __func__); |
55 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 49 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
56 | SWIMS_USB_REQUEST_SetPower, /* __u8 request */ | 50 | SWIMS_USB_REQUEST_SetPower, /* __u8 request */ |
57 | USB_TYPE_VENDOR, /* __u8 request type */ | 51 | USB_TYPE_VENDOR, /* __u8 request type */ |
@@ -63,25 +57,10 @@ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) | |||
63 | return result; | 57 | return result; |
64 | } | 58 | } |
65 | 59 | ||
66 | static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode) | ||
67 | { | ||
68 | int result; | ||
69 | dev_dbg(&udev->dev, "%s", "DEVICE MODE SWITCH\n"); | ||
70 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | ||
71 | SWIMS_USB_REQUEST_SetMode, /* __u8 request */ | ||
72 | USB_TYPE_VENDOR, /* __u8 request type */ | ||
73 | eSWocMode, /* __u16 value */ | ||
74 | 0x0000, /* __u16 index */ | ||
75 | NULL, /* void *data */ | ||
76 | 0, /* __u16 size */ | ||
77 | USB_CTRL_SET_TIMEOUT); /* int timeout */ | ||
78 | return result; | ||
79 | } | ||
80 | |||
81 | static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable) | 60 | static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable) |
82 | { | 61 | { |
83 | int result; | 62 | int result; |
84 | dev_dbg(&udev->dev, "%s", "NMEA Enable sent\n"); | 63 | dev_dbg(&udev->dev, "%s", __func__); |
85 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 64 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
86 | SWIMS_USB_REQUEST_SetNmea, /* __u8 request */ | 65 | SWIMS_USB_REQUEST_SetNmea, /* __u8 request */ |
87 | USB_TYPE_VENDOR, /* __u8 request type */ | 66 | USB_TYPE_VENDOR, /* __u8 request type */ |
@@ -97,6 +76,7 @@ static int sierra_calc_num_ports(struct usb_serial *serial) | |||
97 | { | 76 | { |
98 | int result; | 77 | int result; |
99 | int *num_ports = usb_get_serial_data(serial); | 78 | int *num_ports = usb_get_serial_data(serial); |
79 | dev_dbg(&serial->dev->dev, "%s", __func__); | ||
100 | 80 | ||
101 | result = *num_ports; | 81 | result = *num_ports; |
102 | 82 | ||
@@ -110,22 +90,23 @@ static int sierra_calc_num_ports(struct usb_serial *serial) | |||
110 | 90 | ||
111 | static int sierra_calc_interface(struct usb_serial *serial) | 91 | static int sierra_calc_interface(struct usb_serial *serial) |
112 | { | 92 | { |
113 | int interface; | 93 | int interface; |
114 | struct usb_interface *p_interface; | 94 | struct usb_interface *p_interface; |
115 | struct usb_host_interface *p_host_interface; | 95 | struct usb_host_interface *p_host_interface; |
96 | dev_dbg(&serial->dev->dev, "%s", __func__); | ||
116 | 97 | ||
117 | /* Get the interface structure pointer from the serial struct */ | 98 | /* Get the interface structure pointer from the serial struct */ |
118 | p_interface = serial->interface; | 99 | p_interface = serial->interface; |
119 | 100 | ||
120 | /* Get a pointer to the host interface structure */ | 101 | /* Get a pointer to the host interface structure */ |
121 | p_host_interface = p_interface->cur_altsetting; | 102 | p_host_interface = p_interface->cur_altsetting; |
122 | 103 | ||
123 | /* read the interface descriptor for this active altsetting | 104 | /* read the interface descriptor for this active altsetting |
124 | * to find out the interface number we are on | 105 | * to find out the interface number we are on |
125 | */ | 106 | */ |
126 | interface = p_host_interface->desc.bInterfaceNumber; | 107 | interface = p_host_interface->desc.bInterfaceNumber; |
127 | 108 | ||
128 | return interface; | 109 | return interface; |
129 | } | 110 | } |
130 | 111 | ||
131 | static int sierra_probe(struct usb_serial *serial, | 112 | static int sierra_probe(struct usb_serial *serial, |
@@ -135,43 +116,40 @@ static int sierra_probe(struct usb_serial *serial, | |||
135 | struct usb_device *udev; | 116 | struct usb_device *udev; |
136 | int *num_ports; | 117 | int *num_ports; |
137 | u8 ifnum; | 118 | u8 ifnum; |
119 | u8 numendpoints; | ||
120 | |||
121 | dev_dbg(&serial->dev->dev, "%s", __func__); | ||
138 | 122 | ||
139 | num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL); | 123 | num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL); |
140 | if (!num_ports) | 124 | if (!num_ports) |
141 | return -ENOMEM; | 125 | return -ENOMEM; |
142 | 126 | ||
143 | ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; | 127 | ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; |
128 | numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints; | ||
144 | udev = serial->dev; | 129 | udev = serial->dev; |
145 | 130 | ||
146 | /* Figure out the interface number from the serial structure */ | 131 | /* Figure out the interface number from the serial structure */ |
147 | ifnum = sierra_calc_interface(serial); | 132 | ifnum = sierra_calc_interface(serial); |
148 | |||
149 | /* | ||
150 | * If this interface supports more than 1 alternate | ||
151 | * select the 2nd one | ||
152 | */ | ||
153 | if (serial->interface->num_altsetting == 2) { | ||
154 | dev_dbg(&udev->dev, | ||
155 | "Selecting alt setting for interface %d\n", | ||
156 | ifnum); | ||
157 | 133 | ||
158 | /* We know the alternate setting is 1 for the MC8785 */ | 134 | /* |
159 | usb_set_interface(udev, ifnum, 1); | 135 | * If this interface supports more than 1 alternate |
160 | } | 136 | * select the 2nd one |
137 | */ | ||
138 | if (serial->interface->num_altsetting == 2) { | ||
139 | dev_dbg(&udev->dev, "Selecting alt setting for interface %d\n", | ||
140 | ifnum); | ||
141 | /* We know the alternate setting is 1 for the MC8785 */ | ||
142 | usb_set_interface(udev, ifnum, 1); | ||
143 | } | ||
161 | 144 | ||
162 | /* Check if in installer mode */ | 145 | /* Dummy interface present on some SKUs should be ignored */ |
163 | if (truinstall && id->driver_info == DEVICE_INSTALLER) { | 146 | if (ifnum == 0x99) |
164 | dev_dbg(&udev->dev, "%s", "FOUND TRU-INSTALL DEVICE(SW)\n"); | ||
165 | result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem); | ||
166 | /* Don't bind to the device when in installer mode */ | ||
167 | kfree(num_ports); | ||
168 | return -EIO; | ||
169 | } else if (id->driver_info == DEVICE_1_PORT) | ||
170 | *num_ports = 1; | ||
171 | else if (ifnum == 0x99) | ||
172 | *num_ports = 0; | 147 | *num_ports = 0; |
148 | else if (numendpoints <= 3) | ||
149 | *num_ports = 1; | ||
173 | else | 150 | else |
174 | *num_ports = 3; | 151 | *num_ports = (numendpoints-1)/2; |
152 | |||
175 | /* | 153 | /* |
176 | * save off our num_ports info so that we can use it in the | 154 | * save off our num_ports info so that we can use it in the |
177 | * calc_num_ports callback | 155 | * calc_num_ports callback |
@@ -187,40 +165,50 @@ static struct usb_device_id id_table [] = { | |||
187 | { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ | 165 | { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ |
188 | { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ | 166 | { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ |
189 | { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ | 167 | { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ |
168 | { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */ | ||
190 | { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ | 169 | { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ |
191 | { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ | 170 | { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ |
192 | { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ | 171 | { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ |
193 | { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ | 172 | { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ |
194 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, /* Sierra Wireless C597 */ | 173 | /* Sierra Wireless C597 */ |
174 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, | ||
175 | /* Sierra Wireless Device */ | ||
176 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) }, | ||
177 | { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */ | ||
195 | 178 | ||
196 | { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ | 179 | { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ |
197 | { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ | 180 | { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ |
198 | { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ | 181 | { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ |
199 | { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ | 182 | { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ |
200 | { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Thinkpad internal) */ | 183 | { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Lenovo) */ |
201 | { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */ | 184 | { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */ |
202 | { USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */ | 185 | { USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */ |
203 | { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ | 186 | { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ |
204 | { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ | 187 | { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ |
205 | { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/ | 188 | { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */ |
206 | { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/ | 189 | { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */ |
207 | { USB_DEVICE(0x1199, 0x683B), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless MC8785 Composite*/ | 190 | { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */ |
191 | { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */ | ||
192 | { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */ | ||
193 | { USB_DEVICE(0x1199, 0x683E) }, /* Sierra Wireless MC8790 */ | ||
208 | { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ | 194 | { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ |
209 | { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ | 195 | { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ |
210 | { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */ | 196 | { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */ |
211 | { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */ | 197 | { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */ |
212 | { USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */ | 198 | { USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */ |
213 | { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ | 199 | { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ |
214 | { USB_DEVICE(0x1199, 0x6859), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */ | 200 | { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */ |
215 | { USB_DEVICE(0x1199, 0x685A), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */ | 201 | { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */ |
216 | 202 | /* Sierra Wireless C885 */ | |
217 | { USB_DEVICE(0x1199, 0x6468) }, /* Sierra Wireless MP3G - EVDO */ | 203 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)}, |
218 | { USB_DEVICE(0x1199, 0x6469) }, /* Sierra Wireless MP3G - UMTS/HSPA */ | 204 | /* Sierra Wireless Device */ |
219 | 205 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)}, | |
220 | { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */ | 206 | /* Sierra Wireless Device */ |
221 | { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */ | 207 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, |
208 | |||
209 | { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ | ||
210 | { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ | ||
222 | 211 | ||
223 | { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER}, | ||
224 | { } | 212 | { } |
225 | }; | 213 | }; |
226 | MODULE_DEVICE_TABLE(usb, id_table); | 214 | MODULE_DEVICE_TABLE(usb, id_table); |
@@ -268,13 +256,19 @@ static int sierra_send_setup(struct tty_struct *tty, | |||
268 | if (portdata->rts_state) | 256 | if (portdata->rts_state) |
269 | val |= 0x02; | 257 | val |= 0x02; |
270 | 258 | ||
271 | /* Determine which port is targeted */ | 259 | /* If composite device then properly report interface */ |
272 | if (port->bulk_out_endpointAddress == 2) | 260 | if (serial->num_ports == 1) |
273 | interface = 0; | 261 | interface = sierra_calc_interface(serial); |
274 | else if (port->bulk_out_endpointAddress == 4) | 262 | |
275 | interface = 1; | 263 | /* Otherwise the need to do non-composite mapping */ |
276 | else if (port->bulk_out_endpointAddress == 5) | 264 | else { |
277 | interface = 2; | 265 | if (port->bulk_out_endpointAddress == 2) |
266 | interface = 0; | ||
267 | else if (port->bulk_out_endpointAddress == 4) | ||
268 | interface = 1; | ||
269 | else if (port->bulk_out_endpointAddress == 5) | ||
270 | interface = 2; | ||
271 | } | ||
278 | 272 | ||
279 | return usb_control_msg(serial->dev, | 273 | return usb_control_msg(serial->dev, |
280 | usb_rcvctrlpipe(serial->dev, 0), | 274 | usb_rcvctrlpipe(serial->dev, 0), |
@@ -713,7 +707,7 @@ static void sierra_shutdown(struct usb_serial *serial) | |||
713 | static struct usb_serial_driver sierra_device = { | 707 | static struct usb_serial_driver sierra_device = { |
714 | .driver = { | 708 | .driver = { |
715 | .owner = THIS_MODULE, | 709 | .owner = THIS_MODULE, |
716 | .name = "sierra1", | 710 | .name = "sierra", |
717 | }, | 711 | }, |
718 | .description = "Sierra USB modem", | 712 | .description = "Sierra USB modem", |
719 | .id_table = id_table, | 713 | .id_table = id_table, |
@@ -769,14 +763,8 @@ MODULE_DESCRIPTION(DRIVER_DESC); | |||
769 | MODULE_VERSION(DRIVER_VERSION); | 763 | MODULE_VERSION(DRIVER_VERSION); |
770 | MODULE_LICENSE("GPL"); | 764 | MODULE_LICENSE("GPL"); |
771 | 765 | ||
772 | module_param(truinstall, bool, 0); | 766 | module_param(nmea, bool, S_IRUGO | S_IWUSR); |
773 | MODULE_PARM_DESC(truinstall, "TRU-Install support"); | ||
774 | |||
775 | module_param(nmea, bool, 0); | ||
776 | MODULE_PARM_DESC(nmea, "NMEA streaming"); | 767 | MODULE_PARM_DESC(nmea, "NMEA streaming"); |
777 | 768 | ||
778 | #ifdef CONFIG_USB_DEBUG | ||
779 | module_param(debug, bool, S_IRUGO | S_IWUSR); | 769 | module_param(debug, bool, S_IRUGO | S_IWUSR); |
780 | MODULE_PARM_DESC(debug, "Debug messages"); | 770 | MODULE_PARM_DESC(debug, "Debug messages"); |
781 | #endif | ||
782 | |||
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 8c2d531eedea..b157c48e8b78 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -122,9 +122,6 @@ static void return_serial(struct usb_serial *serial) | |||
122 | 122 | ||
123 | dbg("%s", __func__); | 123 | dbg("%s", __func__); |
124 | 124 | ||
125 | if (serial == NULL) | ||
126 | return; | ||
127 | |||
128 | for (i = 0; i < serial->num_ports; ++i) | 125 | for (i = 0; i < serial->num_ports; ++i) |
129 | serial_table[serial->minor + i] = NULL; | 126 | serial_table[serial->minor + i] = NULL; |
130 | } | 127 | } |
@@ -142,7 +139,8 @@ static void destroy_serial(struct kref *kref) | |||
142 | serial->type->shutdown(serial); | 139 | serial->type->shutdown(serial); |
143 | 140 | ||
144 | /* return the minor range that this device had */ | 141 | /* return the minor range that this device had */ |
145 | return_serial(serial); | 142 | if (serial->minor != SERIAL_TTY_NO_MINOR) |
143 | return_serial(serial); | ||
146 | 144 | ||
147 | for (i = 0; i < serial->num_ports; ++i) | 145 | for (i = 0; i < serial->num_ports; ++i) |
148 | serial->port[i]->port.count = 0; | 146 | serial->port[i]->port.count = 0; |
@@ -575,6 +573,7 @@ static struct usb_serial *create_serial(struct usb_device *dev, | |||
575 | serial->interface = interface; | 573 | serial->interface = interface; |
576 | kref_init(&serial->kref); | 574 | kref_init(&serial->kref); |
577 | mutex_init(&serial->disc_mutex); | 575 | mutex_init(&serial->disc_mutex); |
576 | serial->minor = SERIAL_TTY_NO_MINOR; | ||
578 | 577 | ||
579 | return serial; | 578 | return serial; |
580 | } | 579 | } |
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig index 3d9249632ae1..c76034672c18 100644 --- a/drivers/usb/storage/Kconfig +++ b/drivers/usb/storage/Kconfig | |||
@@ -146,6 +146,18 @@ config USB_STORAGE_KARMA | |||
146 | on the resulting scsi device node returns the Karma to normal | 146 | on the resulting scsi device node returns the Karma to normal |
147 | operation. | 147 | operation. |
148 | 148 | ||
149 | config USB_STORAGE_SIERRA | ||
150 | bool "Sierra Wireless TRU-Install Feature Support" | ||
151 | depends on USB_STORAGE | ||
152 | help | ||
153 | Say Y here to include additional code to support Sierra Wireless | ||
154 | products with the TRU-Install feature (e.g., AC597E, AC881U). | ||
155 | |||
156 | This code switches the Sierra Wireless device from being in | ||
157 | Mass Storage mode to Modem mode. It also has the ability to | ||
158 | support host software upgrades should full Linux support be added | ||
159 | to TRU-Install. | ||
160 | |||
149 | config USB_STORAGE_CYPRESS_ATACB | 161 | config USB_STORAGE_CYPRESS_ATACB |
150 | bool "SAT emulation on Cypress USB/ATA Bridge with ATACB" | 162 | bool "SAT emulation on Cypress USB/ATA Bridge with ATACB" |
151 | depends on USB_STORAGE | 163 | depends on USB_STORAGE |
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile index 4c596c766c53..bc3415b475c9 100644 --- a/drivers/usb/storage/Makefile +++ b/drivers/usb/storage/Makefile | |||
@@ -21,6 +21,7 @@ usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += jumpshot.o | |||
21 | usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o | 21 | usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o |
22 | usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o | 22 | usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o |
23 | usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o | 23 | usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o |
24 | usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA) += sierra_ms.o | ||
24 | usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o | 25 | usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o |
25 | 26 | ||
26 | usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ | 27 | usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ |
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c new file mode 100644 index 000000000000..4359a2cb42df --- /dev/null +++ b/drivers/usb/storage/sierra_ms.c | |||
@@ -0,0 +1,207 @@ | |||
1 | #include <scsi/scsi.h> | ||
2 | #include <scsi/scsi_host.h> | ||
3 | #include <scsi/scsi_cmnd.h> | ||
4 | #include <scsi/scsi_device.h> | ||
5 | #include <linux/usb.h> | ||
6 | |||
7 | #include "usb.h" | ||
8 | #include "transport.h" | ||
9 | #include "protocol.h" | ||
10 | #include "scsiglue.h" | ||
11 | #include "sierra_ms.h" | ||
12 | #include "debug.h" | ||
13 | |||
14 | #define SWIMS_USB_REQUEST_SetSwocMode 0x0B | ||
15 | #define SWIMS_USB_REQUEST_GetSwocInfo 0x0A | ||
16 | #define SWIMS_USB_INDEX_SetMode 0x0000 | ||
17 | #define SWIMS_SET_MODE_Modem 0x0001 | ||
18 | |||
19 | #define TRU_NORMAL 0x01 | ||
20 | #define TRU_FORCE_MS 0x02 | ||
21 | #define TRU_FORCE_MODEM 0x03 | ||
22 | |||
23 | static unsigned int swi_tru_install = 1; | ||
24 | module_param(swi_tru_install, uint, S_IRUGO | S_IWUSR); | ||
25 | MODULE_PARM_DESC(swi_tru_install, "TRU-Install mode (1=Full Logic (def)," | ||
26 | " 2=Force CD-Rom, 3=Force Modem)"); | ||
27 | |||
28 | struct swoc_info { | ||
29 | __u8 rev; | ||
30 | __u8 reserved[8]; | ||
31 | __u16 LinuxSKU; | ||
32 | __u16 LinuxVer; | ||
33 | __u8 reserved2[47]; | ||
34 | } __attribute__((__packed__)); | ||
35 | |||
36 | static bool containsFullLinuxPackage(struct swoc_info *swocInfo) | ||
37 | { | ||
38 | if ((swocInfo->LinuxSKU >= 0x2100 && swocInfo->LinuxSKU <= 0x2FFF) || | ||
39 | (swocInfo->LinuxSKU >= 0x7100 && swocInfo->LinuxSKU <= 0x7FFF)) | ||
40 | return true; | ||
41 | else | ||
42 | return false; | ||
43 | } | ||
44 | |||
45 | static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode) | ||
46 | { | ||
47 | int result; | ||
48 | US_DEBUGP("SWIMS: %s", "DEVICE MODE SWITCH\n"); | ||
49 | result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | ||
50 | SWIMS_USB_REQUEST_SetSwocMode, /* __u8 request */ | ||
51 | USB_TYPE_VENDOR | USB_DIR_OUT, /* __u8 request type */ | ||
52 | eSWocMode, /* __u16 value */ | ||
53 | 0x0000, /* __u16 index */ | ||
54 | NULL, /* void *data */ | ||
55 | 0, /* __u16 size */ | ||
56 | USB_CTRL_SET_TIMEOUT); /* int timeout */ | ||
57 | return result; | ||
58 | } | ||
59 | |||
60 | |||
61 | static int sierra_get_swoc_info(struct usb_device *udev, | ||
62 | struct swoc_info *swocInfo) | ||
63 | { | ||
64 | int result; | ||
65 | |||
66 | US_DEBUGP("SWIMS: Attempting to get TRU-Install info.\n"); | ||
67 | |||
68 | result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), | ||
69 | SWIMS_USB_REQUEST_GetSwocInfo, /* __u8 request */ | ||
70 | USB_TYPE_VENDOR | USB_DIR_IN, /* __u8 request type */ | ||
71 | 0, /* __u16 value */ | ||
72 | 0, /* __u16 index */ | ||
73 | (void *) swocInfo, /* void *data */ | ||
74 | sizeof(struct swoc_info), /* __u16 size */ | ||
75 | USB_CTRL_SET_TIMEOUT); /* int timeout */ | ||
76 | |||
77 | swocInfo->LinuxSKU = le16_to_cpu(swocInfo->LinuxSKU); | ||
78 | swocInfo->LinuxVer = le16_to_cpu(swocInfo->LinuxVer); | ||
79 | return result; | ||
80 | } | ||
81 | |||
82 | static void debug_swoc(struct swoc_info *swocInfo) | ||
83 | { | ||
84 | US_DEBUGP("SWIMS: SWoC Rev: %02d \n", swocInfo->rev); | ||
85 | US_DEBUGP("SWIMS: Linux SKU: %04X \n", swocInfo->LinuxSKU); | ||
86 | US_DEBUGP("SWIMS: Linux Version: %04X \n", swocInfo->LinuxVer); | ||
87 | } | ||
88 | |||
89 | |||
90 | static ssize_t show_truinst(struct device *dev, struct device_attribute *attr, | ||
91 | char *buf) | ||
92 | { | ||
93 | struct swoc_info *swocInfo; | ||
94 | struct usb_interface *intf = to_usb_interface(dev); | ||
95 | struct usb_device *udev = interface_to_usbdev(intf); | ||
96 | int result; | ||
97 | if (swi_tru_install == TRU_FORCE_MS) { | ||
98 | result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n"); | ||
99 | } else { | ||
100 | swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL); | ||
101 | if (!swocInfo) { | ||
102 | US_DEBUGP("SWIMS: Allocation failure\n"); | ||
103 | snprintf(buf, PAGE_SIZE, "Error\n"); | ||
104 | return -ENOMEM; | ||
105 | } | ||
106 | result = sierra_get_swoc_info(udev, swocInfo); | ||
107 | if (result < 0) { | ||
108 | US_DEBUGP("SWIMS: failed SWoC query\n"); | ||
109 | kfree(swocInfo); | ||
110 | snprintf(buf, PAGE_SIZE, "Error\n"); | ||
111 | return -EIO; | ||
112 | } | ||
113 | debug_swoc(swocInfo); | ||
114 | result = snprintf(buf, PAGE_SIZE, | ||
115 | "REV=%02d SKU=%04X VER=%04X\n", | ||
116 | swocInfo->rev, | ||
117 | swocInfo->LinuxSKU, | ||
118 | swocInfo->LinuxVer); | ||
119 | kfree(swocInfo); | ||
120 | } | ||
121 | return result; | ||
122 | } | ||
123 | static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL); | ||
124 | |||
125 | int sierra_ms_init(struct us_data *us) | ||
126 | { | ||
127 | int result, retries; | ||
128 | signed long delay_t; | ||
129 | struct swoc_info *swocInfo; | ||
130 | struct usb_device *udev; | ||
131 | struct Scsi_Host *sh; | ||
132 | struct scsi_device *sd; | ||
133 | |||
134 | delay_t = 2; | ||
135 | retries = 3; | ||
136 | result = 0; | ||
137 | udev = us->pusb_dev; | ||
138 | |||
139 | sh = us_to_host(us); | ||
140 | sd = scsi_get_host_dev(sh); | ||
141 | |||
142 | US_DEBUGP("SWIMS: sierra_ms_init called\n"); | ||
143 | |||
144 | /* Force Modem mode */ | ||
145 | if (swi_tru_install == TRU_FORCE_MODEM) { | ||
146 | US_DEBUGP("SWIMS: %s", "Forcing Modem Mode\n"); | ||
147 | result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem); | ||
148 | if (result < 0) | ||
149 | US_DEBUGP("SWIMS: Failed to switch to modem mode.\n"); | ||
150 | return -EIO; | ||
151 | } | ||
152 | /* Force Mass Storage mode (keep CD-Rom) */ | ||
153 | else if (swi_tru_install == TRU_FORCE_MS) { | ||
154 | US_DEBUGP("SWIMS: %s", "Forcing Mass Storage Mode\n"); | ||
155 | goto complete; | ||
156 | } | ||
157 | /* Normal TRU-Install Logic */ | ||
158 | else { | ||
159 | US_DEBUGP("SWIMS: %s", "Normal SWoC Logic\n"); | ||
160 | |||
161 | swocInfo = kmalloc(sizeof(struct swoc_info), | ||
162 | GFP_KERNEL); | ||
163 | if (!swocInfo) { | ||
164 | US_DEBUGP("SWIMS: %s", "Allocation failure\n"); | ||
165 | return -ENOMEM; | ||
166 | } | ||
167 | |||
168 | retries = 3; | ||
169 | do { | ||
170 | retries--; | ||
171 | result = sierra_get_swoc_info(udev, swocInfo); | ||
172 | if (result < 0) { | ||
173 | US_DEBUGP("SWIMS: %s", "Failed SWoC query\n"); | ||
174 | schedule_timeout_uninterruptible(2*HZ); | ||
175 | } | ||
176 | } while (retries && result < 0); | ||
177 | |||
178 | if (result < 0) { | ||
179 | US_DEBUGP("SWIMS: %s", | ||
180 | "Completely failed SWoC query\n"); | ||
181 | kfree(swocInfo); | ||
182 | return -EIO; | ||
183 | } | ||
184 | |||
185 | debug_swoc(swocInfo); | ||
186 | |||
187 | /* If there is not Linux software on the TRU-Install device | ||
188 | * then switch to modem mode | ||
189 | */ | ||
190 | if (!containsFullLinuxPackage(swocInfo)) { | ||
191 | US_DEBUGP("SWIMS: %s", | ||
192 | "Switching to Modem Mode\n"); | ||
193 | result = sierra_set_ms_mode(udev, | ||
194 | SWIMS_SET_MODE_Modem); | ||
195 | if (result < 0) | ||
196 | US_DEBUGP("SWIMS: Failed to switch modem\n"); | ||
197 | kfree(swocInfo); | ||
198 | return -EIO; | ||
199 | } | ||
200 | kfree(swocInfo); | ||
201 | } | ||
202 | complete: | ||
203 | result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst); | ||
204 | |||
205 | return USB_STOR_TRANSPORT_GOOD; | ||
206 | } | ||
207 | |||
diff --git a/drivers/usb/storage/sierra_ms.h b/drivers/usb/storage/sierra_ms.h new file mode 100644 index 000000000000..bb48634ac1fc --- /dev/null +++ b/drivers/usb/storage/sierra_ms.h | |||
@@ -0,0 +1,4 @@ | |||
1 | #ifndef _SIERRA_MS_H_ | ||
2 | #define _SIERRA_MS_H_ | ||
3 | extern int sierra_ms_init(struct us_data *us); | ||
4 | #endif | ||
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index fcbbfdb7b2b0..3523a0bfa0ff 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c | |||
@@ -1032,8 +1032,21 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us) | |||
1032 | 1032 | ||
1033 | /* try to compute the actual residue, based on how much data | 1033 | /* try to compute the actual residue, based on how much data |
1034 | * was really transferred and what the device tells us */ | 1034 | * was really transferred and what the device tells us */ |
1035 | if (residue) { | 1035 | if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) { |
1036 | if (!(us->fflags & US_FL_IGNORE_RESIDUE)) { | 1036 | |
1037 | /* Heuristically detect devices that generate bogus residues | ||
1038 | * by seeing what happens with INQUIRY and READ CAPACITY | ||
1039 | * commands. | ||
1040 | */ | ||
1041 | if (bcs->Status == US_BULK_STAT_OK && | ||
1042 | scsi_get_resid(srb) == 0 && | ||
1043 | ((srb->cmnd[0] == INQUIRY && | ||
1044 | transfer_length == 36) || | ||
1045 | (srb->cmnd[0] == READ_CAPACITY && | ||
1046 | transfer_length == 8))) { | ||
1047 | us->fflags |= US_FL_IGNORE_RESIDUE; | ||
1048 | |||
1049 | } else { | ||
1037 | residue = min(residue, transfer_length); | 1050 | residue = min(residue, transfer_length); |
1038 | scsi_set_resid(srb, max(scsi_get_resid(srb), | 1051 | scsi_set_resid(srb, max(scsi_get_resid(srb), |
1039 | (int) residue)); | 1052 | (int) residue)); |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 7ae69f55aa96..ba412e68d474 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -225,6 +225,13 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370, | |||
225 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 225 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
226 | US_FL_MAX_SECTORS_64 ), | 226 | US_FL_MAX_SECTORS_64 ), |
227 | 227 | ||
228 | /* Reported by Cedric Godin <cedric@belbone.be> */ | ||
229 | UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551, | ||
230 | "Nokia", | ||
231 | "5300", | ||
232 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
233 | US_FL_FIX_CAPACITY ), | ||
234 | |||
228 | /* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */ | 235 | /* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */ |
229 | UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210, | 236 | UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210, |
230 | "SMSC", | 237 | "SMSC", |
@@ -356,14 +363,14 @@ UNUSUAL_DEV( 0x04b0, 0x040f, 0x0100, 0x0200, | |||
356 | US_FL_FIX_CAPACITY), | 363 | US_FL_FIX_CAPACITY), |
357 | 364 | ||
358 | /* Reported by Emil Larsson <emil@swip.net> */ | 365 | /* Reported by Emil Larsson <emil@swip.net> */ |
359 | UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0110, | 366 | UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0111, |
360 | "NIKON", | 367 | "NIKON", |
361 | "NIKON DSC D80", | 368 | "NIKON DSC D80", |
362 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 369 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
363 | US_FL_FIX_CAPACITY), | 370 | US_FL_FIX_CAPACITY), |
364 | 371 | ||
365 | /* Reported by Ortwin Glueck <odi@odi.ch> */ | 372 | /* Reported by Ortwin Glueck <odi@odi.ch> */ |
366 | UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0110, | 373 | UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0111, |
367 | "NIKON", | 374 | "NIKON", |
368 | "NIKON DSC D40", | 375 | "NIKON DSC D40", |
369 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 376 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
@@ -1185,6 +1192,13 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff, | |||
1185 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1192 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
1186 | US_FL_FIX_INQUIRY ), | 1193 | US_FL_FIX_INQUIRY ), |
1187 | 1194 | ||
1195 | /* Reported by Rauch Wolke <rauchwolke@gmx.net> */ | ||
1196 | UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff, | ||
1197 | "Simple Tech/Datafab", | ||
1198 | "CF+SM Reader", | ||
1199 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
1200 | US_FL_IGNORE_RESIDUE ), | ||
1201 | |||
1188 | /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant | 1202 | /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant |
1189 | * to the USB storage specification in two ways: | 1203 | * to the USB storage specification in two ways: |
1190 | * - They tell us they are using transport protocol CBI. In reality they | 1204 | * - They tell us they are using transport protocol CBI. In reality they |
@@ -1562,6 +1576,7 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100, | |||
1562 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1576 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
1563 | 0), | 1577 | 0), |
1564 | 1578 | ||
1579 | #ifdef CONFIG_USB_STORAGE_SIERRA | ||
1565 | /* Reported by Kevin Lloyd <linux@sierrawireless.com> | 1580 | /* Reported by Kevin Lloyd <linux@sierrawireless.com> |
1566 | * Entry is needed for the initializer function override, | 1581 | * Entry is needed for the initializer function override, |
1567 | * which instructs the device to load as a modem | 1582 | * which instructs the device to load as a modem |
@@ -1570,8 +1585,9 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100, | |||
1570 | UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999, | 1585 | UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999, |
1571 | "Sierra Wireless", | 1586 | "Sierra Wireless", |
1572 | "USB MMC Storage", | 1587 | "USB MMC Storage", |
1573 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1588 | US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init, |
1574 | US_FL_IGNORE_DEVICE), | 1589 | 0), |
1590 | #endif | ||
1575 | 1591 | ||
1576 | /* Reported by Jaco Kroon <jaco@kroon.co.za> | 1592 | /* Reported by Jaco Kroon <jaco@kroon.co.za> |
1577 | * The usb-storage module found on the Digitech GNX4 (and supposedly other | 1593 | * The usb-storage module found on the Digitech GNX4 (and supposedly other |
@@ -1743,6 +1759,15 @@ UNUSUAL_DEV( 0x22b8, 0x4810, 0x0001, 0x0002, | |||
1743 | US_FL_FIX_CAPACITY), | 1759 | US_FL_FIX_CAPACITY), |
1744 | 1760 | ||
1745 | /* | 1761 | /* |
1762 | * Patch by Jost Diederichs <jost@qdusa.com> | ||
1763 | */ | ||
1764 | UNUSUAL_DEV(0x22b8, 0x6410, 0x0001, 0x9999, | ||
1765 | "Motorola Inc.", | ||
1766 | "Motorola Phone (RAZRV3xx)", | ||
1767 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
1768 | US_FL_FIX_CAPACITY), | ||
1769 | |||
1770 | /* | ||
1746 | * Patch by Constantin Baranov <const@tltsu.ru> | 1771 | * Patch by Constantin Baranov <const@tltsu.ru> |
1747 | * Report by Andreas Koenecke. | 1772 | * Report by Andreas Koenecke. |
1748 | * Motorola ROKR Z6. | 1773 | * Motorola ROKR Z6. |
@@ -1767,6 +1792,13 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010, | |||
1767 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1792 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
1768 | US_FL_FIX_CAPACITY ), | 1793 | US_FL_FIX_CAPACITY ), |
1769 | 1794 | ||
1795 | /* Reported by Andrey Rahmatullin <wrar@altlinux.org> */ | ||
1796 | UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, | ||
1797 | "iRiver", | ||
1798 | "MP3 T10", | ||
1799 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
1800 | US_FL_IGNORE_RESIDUE ), | ||
1801 | |||
1770 | /* | 1802 | /* |
1771 | * David Härdeman <david@2gen.com> | 1803 | * David Härdeman <david@2gen.com> |
1772 | * The key makes the SCSI stack print confusing (but harmless) messages | 1804 | * The key makes the SCSI stack print confusing (but harmless) messages |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index bfea851be985..73679aa506de 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
@@ -102,6 +102,9 @@ | |||
102 | #ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB | 102 | #ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB |
103 | #include "cypress_atacb.h" | 103 | #include "cypress_atacb.h" |
104 | #endif | 104 | #endif |
105 | #ifdef CONFIG_USB_STORAGE_SIERRA | ||
106 | #include "sierra_ms.h" | ||
107 | #endif | ||
105 | 108 | ||
106 | /* Some informational data */ | 109 | /* Some informational data */ |
107 | MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); | 110 | MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); |
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c index 4bd569e479a7..314d18694b6a 100644 --- a/drivers/video/arkfb.c +++ b/drivers/video/arkfb.c | |||
@@ -11,7 +11,6 @@ | |||
11 | * Code is based on s3fb | 11 | * Code is based on s3fb |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/version.h> | ||
15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
16 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
17 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index e7018a2f56af..9c5925927ece 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c | |||
@@ -39,7 +39,9 @@ | |||
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | #if defined(CONFIG_ARCH_AT91) | 41 | #if defined(CONFIG_ARCH_AT91) |
42 | #define ATMEL_LCDFB_FBINFO_DEFAULT FBINFO_DEFAULT | 42 | #define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \ |
43 | | FBINFO_PARTIAL_PAN_OK \ | ||
44 | | FBINFO_HWACCEL_YPAN) | ||
43 | 45 | ||
44 | static inline void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo, | 46 | static inline void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo, |
45 | struct fb_var_screeninfo *var) | 47 | struct fb_var_screeninfo *var) |
@@ -177,7 +179,7 @@ static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = { | |||
177 | .type = FB_TYPE_PACKED_PIXELS, | 179 | .type = FB_TYPE_PACKED_PIXELS, |
178 | .visual = FB_VISUAL_TRUECOLOR, | 180 | .visual = FB_VISUAL_TRUECOLOR, |
179 | .xpanstep = 0, | 181 | .xpanstep = 0, |
180 | .ypanstep = 0, | 182 | .ypanstep = 1, |
181 | .ywrapstep = 0, | 183 | .ywrapstep = 0, |
182 | .accel = FB_ACCEL_NONE, | 184 | .accel = FB_ACCEL_NONE, |
183 | }; | 185 | }; |
@@ -240,9 +242,11 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo) | |||
240 | { | 242 | { |
241 | struct fb_info *info = sinfo->info; | 243 | struct fb_info *info = sinfo->info; |
242 | struct fb_var_screeninfo *var = &info->var; | 244 | struct fb_var_screeninfo *var = &info->var; |
245 | unsigned int smem_len; | ||
243 | 246 | ||
244 | info->fix.smem_len = (var->xres_virtual * var->yres_virtual | 247 | smem_len = (var->xres_virtual * var->yres_virtual |
245 | * ((var->bits_per_pixel + 7) / 8)); | 248 | * ((var->bits_per_pixel + 7) / 8)); |
249 | info->fix.smem_len = max(smem_len, sinfo->smem_len); | ||
246 | 250 | ||
247 | info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len, | 251 | info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len, |
248 | (dma_addr_t *)&info->fix.smem_start, GFP_KERNEL); | 252 | (dma_addr_t *)&info->fix.smem_start, GFP_KERNEL); |
@@ -794,6 +798,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev) | |||
794 | sinfo->default_monspecs = pdata_sinfo->default_monspecs; | 798 | sinfo->default_monspecs = pdata_sinfo->default_monspecs; |
795 | sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control; | 799 | sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control; |
796 | sinfo->guard_time = pdata_sinfo->guard_time; | 800 | sinfo->guard_time = pdata_sinfo->guard_time; |
801 | sinfo->smem_len = pdata_sinfo->smem_len; | ||
797 | sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight; | 802 | sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight; |
798 | sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode; | 803 | sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode; |
799 | } else { | 804 | } else { |
diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c index 4d13f68436e6..aa95f8350242 100644 --- a/drivers/video/aty/radeon_accel.c +++ b/drivers/video/aty/radeon_accel.c | |||
@@ -55,6 +55,10 @@ static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo, | |||
55 | OUTREG(DP_WRITE_MSK, 0xffffffff); | 55 | OUTREG(DP_WRITE_MSK, 0xffffffff); |
56 | OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM)); | 56 | OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM)); |
57 | 57 | ||
58 | radeon_fifo_wait(2); | ||
59 | OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL); | ||
60 | OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE)); | ||
61 | |||
58 | radeon_fifo_wait(2); | 62 | radeon_fifo_wait(2); |
59 | OUTREG(DST_Y_X, (region->dy << 16) | region->dx); | 63 | OUTREG(DST_Y_X, (region->dy << 16) | region->dx); |
60 | OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height); | 64 | OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height); |
@@ -116,6 +120,10 @@ static void radeonfb_prim_copyarea(struct radeonfb_info *rinfo, | |||
116 | OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0) | 120 | OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0) |
117 | | (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0)); | 121 | | (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0)); |
118 | 122 | ||
123 | radeon_fifo_wait(2); | ||
124 | OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL); | ||
125 | OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE)); | ||
126 | |||
119 | radeon_fifo_wait(3); | 127 | radeon_fifo_wait(3); |
120 | OUTREG(SRC_Y_X, (sy << 16) | sx); | 128 | OUTREG(SRC_Y_X, (sy << 16) | sx); |
121 | OUTREG(DST_Y_X, (dy << 16) | dx); | 129 | OUTREG(DST_Y_X, (dy << 16) | dx); |
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c index 940467aed13f..7644ed249564 100644 --- a/drivers/video/bf54x-lq043fb.c +++ b/drivers/video/bf54x-lq043fb.c | |||
@@ -58,7 +58,7 @@ | |||
58 | #include <asm/gpio.h> | 58 | #include <asm/gpio.h> |
59 | #include <asm/portmux.h> | 59 | #include <asm/portmux.h> |
60 | 60 | ||
61 | #include <asm/mach/bf54x-lq043.h> | 61 | #include <mach/bf54x-lq043.h> |
62 | 62 | ||
63 | #define NO_BL_SUPPORT | 63 | #define NO_BL_SUPPORT |
64 | 64 | ||
@@ -733,7 +733,6 @@ static int bfin_bf54x_remove(struct platform_device *pdev) | |||
733 | static int bfin_bf54x_suspend(struct platform_device *pdev, pm_message_t state) | 733 | static int bfin_bf54x_suspend(struct platform_device *pdev, pm_message_t state) |
734 | { | 734 | { |
735 | struct fb_info *fbinfo = platform_get_drvdata(pdev); | 735 | struct fb_info *fbinfo = platform_get_drvdata(pdev); |
736 | struct bfin_bf54xfb_info *info = fbinfo->par; | ||
737 | 736 | ||
738 | bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() & ~EPPI_EN); | 737 | bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() & ~EPPI_EN); |
739 | disable_dma(CH_EPPI0); | 738 | disable_dma(CH_EPPI0); |
@@ -747,8 +746,18 @@ static int bfin_bf54x_resume(struct platform_device *pdev) | |||
747 | struct fb_info *fbinfo = platform_get_drvdata(pdev); | 746 | struct fb_info *fbinfo = platform_get_drvdata(pdev); |
748 | struct bfin_bf54xfb_info *info = fbinfo->par; | 747 | struct bfin_bf54xfb_info *info = fbinfo->par; |
749 | 748 | ||
750 | enable_dma(CH_EPPI0); | 749 | if (info->lq043_open_cnt) { |
751 | bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() | EPPI_EN); | 750 | |
751 | bfin_write_EPPI0_CONTROL(0); | ||
752 | SSYNC(); | ||
753 | |||
754 | config_dma(info); | ||
755 | config_ppi(info); | ||
756 | |||
757 | /* start dma */ | ||
758 | enable_dma(CH_EPPI0); | ||
759 | bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() | EPPI_EN); | ||
760 | } | ||
752 | 761 | ||
753 | return 0; | 762 | return 0; |
754 | } | 763 | } |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 33859934a8e4..c6299e8a041d 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
@@ -2518,7 +2518,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, | |||
2518 | c = vc->vc_video_erase_char; | 2518 | c = vc->vc_video_erase_char; |
2519 | vc->vc_video_erase_char = | 2519 | vc->vc_video_erase_char = |
2520 | ((c & 0xfe00) >> 1) | (c & 0xff); | 2520 | ((c & 0xfe00) >> 1) | (c & 0xff); |
2521 | c = vc->vc_def_color; | 2521 | c = vc->vc_scrl_erase_char; |
2522 | vc->vc_scrl_erase_char = | 2522 | vc->vc_scrl_erase_char = |
2523 | ((c & 0xFE00) >> 1) | (c & 0xFF); | 2523 | ((c & 0xFE00) >> 1) | (c & 0xFF); |
2524 | vc->vc_attr >>= 1; | 2524 | vc->vc_attr >>= 1; |
@@ -2551,7 +2551,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, | |||
2551 | if (vc->vc_can_do_color) { | 2551 | if (vc->vc_can_do_color) { |
2552 | vc->vc_video_erase_char = | 2552 | vc->vc_video_erase_char = |
2553 | ((c & 0xff00) << 1) | (c & 0xff); | 2553 | ((c & 0xff00) << 1) | (c & 0xff); |
2554 | c = vc->vc_def_color; | 2554 | c = vc->vc_scrl_erase_char; |
2555 | vc->vc_scrl_erase_char = | 2555 | vc->vc_scrl_erase_char = |
2556 | ((c & 0xFF00) << 1) | (c & 0xFF); | 2556 | ((c & 0xFF00) << 1) | (c & 0xFF); |
2557 | vc->vc_attr <<= 1; | 2557 | vc->vc_attr <<= 1; |
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index 59df132cc375..4835bdc4e9f1 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c | |||
@@ -114,6 +114,17 @@ static struct vm_operations_struct fb_deferred_io_vm_ops = { | |||
114 | .page_mkwrite = fb_deferred_io_mkwrite, | 114 | .page_mkwrite = fb_deferred_io_mkwrite, |
115 | }; | 115 | }; |
116 | 116 | ||
117 | static int fb_deferred_io_set_page_dirty(struct page *page) | ||
118 | { | ||
119 | if (!PageDirty(page)) | ||
120 | SetPageDirty(page); | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static const struct address_space_operations fb_deferred_io_aops = { | ||
125 | .set_page_dirty = fb_deferred_io_set_page_dirty, | ||
126 | }; | ||
127 | |||
117 | static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) | 128 | static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) |
118 | { | 129 | { |
119 | vma->vm_ops = &fb_deferred_io_vm_ops; | 130 | vma->vm_ops = &fb_deferred_io_vm_ops; |
@@ -163,6 +174,14 @@ void fb_deferred_io_init(struct fb_info *info) | |||
163 | } | 174 | } |
164 | EXPORT_SYMBOL_GPL(fb_deferred_io_init); | 175 | EXPORT_SYMBOL_GPL(fb_deferred_io_init); |
165 | 176 | ||
177 | void fb_deferred_io_open(struct fb_info *info, | ||
178 | struct inode *inode, | ||
179 | struct file *file) | ||
180 | { | ||
181 | file->f_mapping->a_ops = &fb_deferred_io_aops; | ||
182 | } | ||
183 | EXPORT_SYMBOL_GPL(fb_deferred_io_open); | ||
184 | |||
166 | void fb_deferred_io_cleanup(struct fb_info *info) | 185 | void fb_deferred_io_cleanup(struct fb_info *info) |
167 | { | 186 | { |
168 | void *screen_base = (void __force *) info->screen_base; | 187 | void *screen_base = (void __force *) info->screen_base; |
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 6b487801eeae..98843c2ecf73 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c | |||
@@ -1344,6 +1344,10 @@ fb_open(struct inode *inode, struct file *file) | |||
1344 | if (res) | 1344 | if (res) |
1345 | module_put(info->fbops->owner); | 1345 | module_put(info->fbops->owner); |
1346 | } | 1346 | } |
1347 | #ifdef CONFIG_FB_DEFERRED_IO | ||
1348 | if (info->fbdefio) | ||
1349 | fb_deferred_io_open(info, inode, file); | ||
1350 | #endif | ||
1347 | out: | 1351 | out: |
1348 | unlock_kernel(); | 1352 | unlock_kernel(); |
1349 | return res; | 1353 | return res; |
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c index bd320a2bfb7c..fb51197d1c98 100644 --- a/drivers/video/fsl-diu-fb.c +++ b/drivers/video/fsl-diu-fb.c | |||
@@ -479,6 +479,10 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var, | |||
479 | base_plane_width = machine_data->fsl_diu_info[0]->var.xres; | 479 | base_plane_width = machine_data->fsl_diu_info[0]->var.xres; |
480 | base_plane_height = machine_data->fsl_diu_info[0]->var.yres; | 480 | base_plane_height = machine_data->fsl_diu_info[0]->var.yres; |
481 | 481 | ||
482 | if (mfbi->x_aoi_d < 0) | ||
483 | mfbi->x_aoi_d = 0; | ||
484 | if (mfbi->y_aoi_d < 0) | ||
485 | mfbi->y_aoi_d = 0; | ||
482 | switch (index) { | 486 | switch (index) { |
483 | case 0: | 487 | case 0: |
484 | if (mfbi->x_aoi_d != 0) | 488 | if (mfbi->x_aoi_d != 0) |
@@ -778,6 +782,22 @@ static void unmap_video_memory(struct fb_info *info) | |||
778 | } | 782 | } |
779 | 783 | ||
780 | /* | 784 | /* |
785 | * Using the fb_var_screeninfo in fb_info we set the aoi of this | ||
786 | * particular framebuffer. It is a light version of fsl_diu_set_par. | ||
787 | */ | ||
788 | static int fsl_diu_set_aoi(struct fb_info *info) | ||
789 | { | ||
790 | struct fb_var_screeninfo *var = &info->var; | ||
791 | struct mfb_info *mfbi = info->par; | ||
792 | struct diu_ad *ad = mfbi->ad; | ||
793 | |||
794 | /* AOI should not be greater than display size */ | ||
795 | ad->offset_xyi = cpu_to_le32((var->yoffset << 16) | var->xoffset); | ||
796 | ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d); | ||
797 | return 0; | ||
798 | } | ||
799 | |||
800 | /* | ||
781 | * Using the fb_var_screeninfo in fb_info we set the resolution of this | 801 | * Using the fb_var_screeninfo in fb_info we set the resolution of this |
782 | * particular framebuffer. This function alters the fb_fix_screeninfo stored | 802 | * particular framebuffer. This function alters the fb_fix_screeninfo stored |
783 | * in fb_info. It does not alter var in fb_info since we are using that | 803 | * in fb_info. It does not alter var in fb_info since we are using that |
@@ -817,11 +837,11 @@ static int fsl_diu_set_par(struct fb_info *info) | |||
817 | diu_ops.get_pixel_format(var->bits_per_pixel, | 837 | diu_ops.get_pixel_format(var->bits_per_pixel, |
818 | machine_data->monitor_port); | 838 | machine_data->monitor_port); |
819 | ad->addr = cpu_to_le32(info->fix.smem_start); | 839 | ad->addr = cpu_to_le32(info->fix.smem_start); |
820 | ad->src_size_g_alpha = cpu_to_le32((var->yres << 12) | | 840 | ad->src_size_g_alpha = cpu_to_le32((var->yres_virtual << 12) | |
821 | var->xres) | mfbi->g_alpha; | 841 | var->xres_virtual) | mfbi->g_alpha; |
822 | /* fix me. AOI should not be greater than display size */ | 842 | /* AOI should not be greater than display size */ |
823 | ad->aoi_size = cpu_to_le32((var->yres << 16) | var->xres); | 843 | ad->aoi_size = cpu_to_le32((var->yres << 16) | var->xres); |
824 | ad->offset_xyi = 0; | 844 | ad->offset_xyi = cpu_to_le32((var->yoffset << 16) | var->xoffset); |
825 | ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d); | 845 | ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d); |
826 | 846 | ||
827 | /* Disable chroma keying function */ | 847 | /* Disable chroma keying function */ |
@@ -921,6 +941,8 @@ static int fsl_diu_pan_display(struct fb_var_screeninfo *var, | |||
921 | else | 941 | else |
922 | info->var.vmode &= ~FB_VMODE_YWRAP; | 942 | info->var.vmode &= ~FB_VMODE_YWRAP; |
923 | 943 | ||
944 | fsl_diu_set_aoi(info); | ||
945 | |||
924 | return 0; | 946 | return 0; |
925 | } | 947 | } |
926 | 948 | ||
@@ -989,7 +1011,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd, | |||
989 | pr_debug("set AOI display offset of index %d to (%d,%d)\n", | 1011 | pr_debug("set AOI display offset of index %d to (%d,%d)\n", |
990 | mfbi->index, aoi_d.x_aoi_d, aoi_d.y_aoi_d); | 1012 | mfbi->index, aoi_d.x_aoi_d, aoi_d.y_aoi_d); |
991 | fsl_diu_check_var(&info->var, info); | 1013 | fsl_diu_check_var(&info->var, info); |
992 | fsl_diu_set_par(info); | 1014 | fsl_diu_set_aoi(info); |
993 | break; | 1015 | break; |
994 | case MFB_GET_AOID: | 1016 | case MFB_GET_AOID: |
995 | aoi_d.x_aoi_d = mfbi->x_aoi_d; | 1017 | aoi_d.x_aoi_d = mfbi->x_aoi_d; |
diff --git a/drivers/video/matrox/i2c-matroxfb.c b/drivers/video/matrox/i2c-matroxfb.c index 75ee5a12e549..c14e3e2212b3 100644 --- a/drivers/video/matrox/i2c-matroxfb.c +++ b/drivers/video/matrox/i2c-matroxfb.c | |||
@@ -87,13 +87,7 @@ static int matroxfb_gpio_getscl(void* data) { | |||
87 | return (matroxfb_read_gpio(b->minfo) & b->mask.clock) ? 1 : 0; | 87 | return (matroxfb_read_gpio(b->minfo) & b->mask.clock) ? 1 : 0; |
88 | } | 88 | } |
89 | 89 | ||
90 | static struct i2c_adapter matrox_i2c_adapter_template = | 90 | static const struct i2c_algo_bit_data matrox_i2c_algo_template = |
91 | { | ||
92 | .owner = THIS_MODULE, | ||
93 | .id = I2C_HW_B_G400, | ||
94 | }; | ||
95 | |||
96 | static struct i2c_algo_bit_data matrox_i2c_algo_template = | ||
97 | { | 91 | { |
98 | .setsda = matroxfb_gpio_setsda, | 92 | .setsda = matroxfb_gpio_setsda, |
99 | .setscl = matroxfb_gpio_setscl, | 93 | .setscl = matroxfb_gpio_setscl, |
@@ -112,7 +106,7 @@ static int i2c_bus_reg(struct i2c_bit_adapter* b, struct matrox_fb_info* minfo, | |||
112 | b->minfo = minfo; | 106 | b->minfo = minfo; |
113 | b->mask.data = data; | 107 | b->mask.data = data; |
114 | b->mask.clock = clock; | 108 | b->mask.clock = clock; |
115 | b->adapter = matrox_i2c_adapter_template; | 109 | b->adapter.owner = THIS_MODULE; |
116 | snprintf(b->adapter.name, sizeof(b->adapter.name), name, | 110 | snprintf(b->adapter.name, sizeof(b->adapter.name), name, |
117 | minfo->fbcon.node); | 111 | minfo->fbcon.node); |
118 | i2c_set_adapdata(&b->adapter, b); | 112 | i2c_set_adapdata(&b->adapter, b); |
@@ -187,6 +181,17 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) { | |||
187 | MAT_DATA, MAT_CLK, "MAVEN:fb%u", 0); | 181 | MAT_DATA, MAT_CLK, "MAVEN:fb%u", 0); |
188 | if (err) | 182 | if (err) |
189 | printk(KERN_INFO "i2c-matroxfb: Could not register Maven i2c bus. Continuing anyway.\n"); | 183 | printk(KERN_INFO "i2c-matroxfb: Could not register Maven i2c bus. Continuing anyway.\n"); |
184 | else { | ||
185 | struct i2c_board_info maven_info = { | ||
186 | I2C_BOARD_INFO("maven", 0x1b), | ||
187 | }; | ||
188 | unsigned short const addr_list[2] = { | ||
189 | 0x1b, I2C_CLIENT_END | ||
190 | }; | ||
191 | |||
192 | i2c_new_probed_device(&m2info->maven.adapter, | ||
193 | &maven_info, addr_list); | ||
194 | } | ||
190 | } | 195 | } |
191 | return m2info; | 196 | return m2info; |
192 | fail_ddc1:; | 197 | fail_ddc1:; |
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c index 89da27bd5c49..042408a8c631 100644 --- a/drivers/video/matrox/matroxfb_maven.c +++ b/drivers/video/matrox/matroxfb_maven.c | |||
@@ -19,8 +19,6 @@ | |||
19 | #include <linux/matroxfb.h> | 19 | #include <linux/matroxfb.h> |
20 | #include <asm/div64.h> | 20 | #include <asm/div64.h> |
21 | 21 | ||
22 | #define MAVEN_I2CID (0x1B) | ||
23 | |||
24 | #define MGATVO_B 1 | 22 | #define MGATVO_B 1 |
25 | #define MGATVO_C 2 | 23 | #define MGATVO_C 2 |
26 | 24 | ||
@@ -128,7 +126,7 @@ static int get_ctrl_id(__u32 v4l2_id) { | |||
128 | 126 | ||
129 | struct maven_data { | 127 | struct maven_data { |
130 | struct matrox_fb_info* primary_head; | 128 | struct matrox_fb_info* primary_head; |
131 | struct i2c_client client; | 129 | struct i2c_client *client; |
132 | int version; | 130 | int version; |
133 | }; | 131 | }; |
134 | 132 | ||
@@ -974,7 +972,7 @@ static inline int maven_compute_timming(struct maven_data* md, | |||
974 | 972 | ||
975 | static int maven_program_timming(struct maven_data* md, | 973 | static int maven_program_timming(struct maven_data* md, |
976 | const struct mavenregs* m) { | 974 | const struct mavenregs* m) { |
977 | struct i2c_client* c = &md->client; | 975 | struct i2c_client *c = md->client; |
978 | 976 | ||
979 | if (m->mode == MATROXFB_OUTPUT_MODE_MONITOR) { | 977 | if (m->mode == MATROXFB_OUTPUT_MODE_MONITOR) { |
980 | LR(0x80); | 978 | LR(0x80); |
@@ -1011,7 +1009,7 @@ static int maven_program_timming(struct maven_data* md, | |||
1011 | } | 1009 | } |
1012 | 1010 | ||
1013 | static inline int maven_resync(struct maven_data* md) { | 1011 | static inline int maven_resync(struct maven_data* md) { |
1014 | struct i2c_client* c = &md->client; | 1012 | struct i2c_client *c = md->client; |
1015 | maven_set_reg(c, 0x95, 0x20); /* start whole thing */ | 1013 | maven_set_reg(c, 0x95, 0x20); /* start whole thing */ |
1016 | return 0; | 1014 | return 0; |
1017 | } | 1015 | } |
@@ -1069,48 +1067,48 @@ static int maven_set_control (struct maven_data* md, | |||
1069 | maven_compute_bwlevel(md, &blacklevel, &whitelevel); | 1067 | maven_compute_bwlevel(md, &blacklevel, &whitelevel); |
1070 | blacklevel = (blacklevel >> 2) | ((blacklevel & 3) << 8); | 1068 | blacklevel = (blacklevel >> 2) | ((blacklevel & 3) << 8); |
1071 | whitelevel = (whitelevel >> 2) | ((whitelevel & 3) << 8); | 1069 | whitelevel = (whitelevel >> 2) | ((whitelevel & 3) << 8); |
1072 | maven_set_reg_pair(&md->client, 0x0e, blacklevel); | 1070 | maven_set_reg_pair(md->client, 0x0e, blacklevel); |
1073 | maven_set_reg_pair(&md->client, 0x1e, whitelevel); | 1071 | maven_set_reg_pair(md->client, 0x1e, whitelevel); |
1074 | } | 1072 | } |
1075 | break; | 1073 | break; |
1076 | case V4L2_CID_SATURATION: | 1074 | case V4L2_CID_SATURATION: |
1077 | { | 1075 | { |
1078 | maven_set_reg(&md->client, 0x20, p->value); | 1076 | maven_set_reg(md->client, 0x20, p->value); |
1079 | maven_set_reg(&md->client, 0x22, p->value); | 1077 | maven_set_reg(md->client, 0x22, p->value); |
1080 | } | 1078 | } |
1081 | break; | 1079 | break; |
1082 | case V4L2_CID_HUE: | 1080 | case V4L2_CID_HUE: |
1083 | { | 1081 | { |
1084 | maven_set_reg(&md->client, 0x25, p->value); | 1082 | maven_set_reg(md->client, 0x25, p->value); |
1085 | } | 1083 | } |
1086 | break; | 1084 | break; |
1087 | case V4L2_CID_GAMMA: | 1085 | case V4L2_CID_GAMMA: |
1088 | { | 1086 | { |
1089 | const struct maven_gamma* g; | 1087 | const struct maven_gamma* g; |
1090 | g = maven_compute_gamma(md); | 1088 | g = maven_compute_gamma(md); |
1091 | maven_set_reg(&md->client, 0x83, g->reg83); | 1089 | maven_set_reg(md->client, 0x83, g->reg83); |
1092 | maven_set_reg(&md->client, 0x84, g->reg84); | 1090 | maven_set_reg(md->client, 0x84, g->reg84); |
1093 | maven_set_reg(&md->client, 0x85, g->reg85); | 1091 | maven_set_reg(md->client, 0x85, g->reg85); |
1094 | maven_set_reg(&md->client, 0x86, g->reg86); | 1092 | maven_set_reg(md->client, 0x86, g->reg86); |
1095 | maven_set_reg(&md->client, 0x87, g->reg87); | 1093 | maven_set_reg(md->client, 0x87, g->reg87); |
1096 | maven_set_reg(&md->client, 0x88, g->reg88); | 1094 | maven_set_reg(md->client, 0x88, g->reg88); |
1097 | maven_set_reg(&md->client, 0x89, g->reg89); | 1095 | maven_set_reg(md->client, 0x89, g->reg89); |
1098 | maven_set_reg(&md->client, 0x8a, g->reg8a); | 1096 | maven_set_reg(md->client, 0x8a, g->reg8a); |
1099 | maven_set_reg(&md->client, 0x8b, g->reg8b); | 1097 | maven_set_reg(md->client, 0x8b, g->reg8b); |
1100 | } | 1098 | } |
1101 | break; | 1099 | break; |
1102 | case MATROXFB_CID_TESTOUT: | 1100 | case MATROXFB_CID_TESTOUT: |
1103 | { | 1101 | { |
1104 | unsigned char val | 1102 | unsigned char val |
1105 | = maven_get_reg(&md->client,0x8d); | 1103 | = maven_get_reg(md->client, 0x8d); |
1106 | if (p->value) val |= 0x10; | 1104 | if (p->value) val |= 0x10; |
1107 | else val &= ~0x10; | 1105 | else val &= ~0x10; |
1108 | maven_set_reg(&md->client, 0x8d, val); | 1106 | maven_set_reg(md->client, 0x8d, val); |
1109 | } | 1107 | } |
1110 | break; | 1108 | break; |
1111 | case MATROXFB_CID_DEFLICKER: | 1109 | case MATROXFB_CID_DEFLICKER: |
1112 | { | 1110 | { |
1113 | maven_set_reg(&md->client, 0x93, maven_compute_deflicker(md)); | 1111 | maven_set_reg(md->client, 0x93, maven_compute_deflicker(md)); |
1114 | } | 1112 | } |
1115 | break; | 1113 | break; |
1116 | } | 1114 | } |
@@ -1189,6 +1187,7 @@ static int maven_init_client(struct i2c_client* clnt) { | |||
1189 | MINFO_FROM(container_of(clnt->adapter, struct i2c_bit_adapter, adapter)->minfo); | 1187 | MINFO_FROM(container_of(clnt->adapter, struct i2c_bit_adapter, adapter)->minfo); |
1190 | 1188 | ||
1191 | md->primary_head = MINFO; | 1189 | md->primary_head = MINFO; |
1190 | md->client = clnt; | ||
1192 | down_write(&ACCESS_FBINFO(altout.lock)); | 1191 | down_write(&ACCESS_FBINFO(altout.lock)); |
1193 | ACCESS_FBINFO(outputs[1]).output = &maven_altout; | 1192 | ACCESS_FBINFO(outputs[1]).output = &maven_altout; |
1194 | ACCESS_FBINFO(outputs[1]).src = ACCESS_FBINFO(outputs[1]).default_src; | 1193 | ACCESS_FBINFO(outputs[1]).src = ACCESS_FBINFO(outputs[1]).default_src; |
@@ -1232,14 +1231,11 @@ static int maven_shutdown_client(struct i2c_client* clnt) { | |||
1232 | return 0; | 1231 | return 0; |
1233 | } | 1232 | } |
1234 | 1233 | ||
1235 | static const unsigned short normal_i2c[] = { MAVEN_I2CID, I2C_CLIENT_END }; | 1234 | static int maven_probe(struct i2c_client *client, |
1236 | I2C_CLIENT_INSMOD; | 1235 | const struct i2c_device_id *id) |
1237 | 1236 | { | |
1238 | static struct i2c_driver maven_driver; | 1237 | struct i2c_adapter *adapter = client->adapter; |
1239 | 1238 | int err = -ENODEV; | |
1240 | static int maven_detect_client(struct i2c_adapter* adapter, int address, int kind) { | ||
1241 | int err = 0; | ||
1242 | struct i2c_client* new_client; | ||
1243 | struct maven_data* data; | 1239 | struct maven_data* data; |
1244 | 1240 | ||
1245 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA | | 1241 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA | |
@@ -1250,50 +1246,37 @@ static int maven_detect_client(struct i2c_adapter* adapter, int address, int kin | |||
1250 | err = -ENOMEM; | 1246 | err = -ENOMEM; |
1251 | goto ERROR0; | 1247 | goto ERROR0; |
1252 | } | 1248 | } |
1253 | new_client = &data->client; | 1249 | i2c_set_clientdata(client, data); |
1254 | i2c_set_clientdata(new_client, data); | 1250 | err = maven_init_client(client); |
1255 | new_client->addr = address; | ||
1256 | new_client->adapter = adapter; | ||
1257 | new_client->driver = &maven_driver; | ||
1258 | new_client->flags = 0; | ||
1259 | strlcpy(new_client->name, "maven", I2C_NAME_SIZE); | ||
1260 | if ((err = i2c_attach_client(new_client))) | ||
1261 | goto ERROR3; | ||
1262 | err = maven_init_client(new_client); | ||
1263 | if (err) | 1251 | if (err) |
1264 | goto ERROR4; | 1252 | goto ERROR4; |
1265 | return 0; | 1253 | return 0; |
1266 | ERROR4:; | 1254 | ERROR4:; |
1267 | i2c_detach_client(new_client); | 1255 | kfree(data); |
1268 | ERROR3:; | ||
1269 | kfree(new_client); | ||
1270 | ERROR0:; | 1256 | ERROR0:; |
1271 | return err; | 1257 | return err; |
1272 | } | 1258 | } |
1273 | 1259 | ||
1274 | static int maven_attach_adapter(struct i2c_adapter* adapter) { | 1260 | static int maven_remove(struct i2c_client *client) |
1275 | if (adapter->id == I2C_HW_B_G400) | 1261 | { |
1276 | return i2c_probe(adapter, &addr_data, &maven_detect_client); | ||
1277 | return 0; | ||
1278 | } | ||
1279 | |||
1280 | static int maven_detach_client(struct i2c_client* client) { | ||
1281 | int err; | ||
1282 | |||
1283 | if ((err = i2c_detach_client(client))) | ||
1284 | return err; | ||
1285 | maven_shutdown_client(client); | 1262 | maven_shutdown_client(client); |
1286 | kfree(i2c_get_clientdata(client)); | 1263 | kfree(i2c_get_clientdata(client)); |
1287 | return 0; | 1264 | return 0; |
1288 | } | 1265 | } |
1289 | 1266 | ||
1267 | static const struct i2c_device_id maven_id[] = { | ||
1268 | { "maven", 0 }, | ||
1269 | { } | ||
1270 | }; | ||
1271 | MODULE_DEVICE_TABLE(i2c, maven_id); | ||
1272 | |||
1290 | static struct i2c_driver maven_driver={ | 1273 | static struct i2c_driver maven_driver={ |
1291 | .driver = { | 1274 | .driver = { |
1292 | .name = "maven", | 1275 | .name = "maven", |
1293 | }, | 1276 | }, |
1294 | .id = I2C_DRIVERID_MGATVO, | 1277 | .probe = maven_probe, |
1295 | .attach_adapter = maven_attach_adapter, | 1278 | .remove = maven_remove, |
1296 | .detach_client = maven_detach_client, | 1279 | .id_table = maven_id, |
1297 | }; | 1280 | }; |
1298 | 1281 | ||
1299 | static int __init matroxfb_maven_init(void) | 1282 | static int __init matroxfb_maven_init(void) |
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c index 3f1ca2adda3d..c6dd924976a4 100644 --- a/drivers/video/pm2fb.c +++ b/drivers/video/pm2fb.c | |||
@@ -1746,6 +1746,7 @@ static void __devexit pm2fb_remove(struct pci_dev *pdev) | |||
1746 | release_mem_region(fix->mmio_start, fix->mmio_len); | 1746 | release_mem_region(fix->mmio_start, fix->mmio_len); |
1747 | 1747 | ||
1748 | pci_set_drvdata(pdev, NULL); | 1748 | pci_set_drvdata(pdev, NULL); |
1749 | fb_dealloc_cmap(&info->cmap); | ||
1749 | kfree(info->pixmap.addr); | 1750 | kfree(info->pixmap.addr); |
1750 | kfree(info); | 1751 | kfree(info); |
1751 | } | 1752 | } |
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index e7aa7ae8fca8..97204497d9f7 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c | |||
@@ -1031,7 +1031,9 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi) | |||
1031 | pxa_gpio_mode(GPIO74_LCD_FCLK_MD); | 1031 | pxa_gpio_mode(GPIO74_LCD_FCLK_MD); |
1032 | pxa_gpio_mode(GPIO75_LCD_LCLK_MD); | 1032 | pxa_gpio_mode(GPIO75_LCD_LCLK_MD); |
1033 | pxa_gpio_mode(GPIO76_LCD_PCLK_MD); | 1033 | pxa_gpio_mode(GPIO76_LCD_PCLK_MD); |
1034 | pxa_gpio_mode(GPIO77_LCD_ACBIAS_MD); | 1034 | |
1035 | if ((lccr0 & LCCR0_PAS) == 0) | ||
1036 | pxa_gpio_mode(GPIO77_LCD_ACBIAS_MD); | ||
1035 | } | 1037 | } |
1036 | 1038 | ||
1037 | static void pxafb_enable_controller(struct pxafb_info *fbi) | 1039 | static void pxafb_enable_controller(struct pxafb_info *fbi) |
@@ -1400,6 +1402,8 @@ static void pxafb_decode_mach_info(struct pxafb_info *fbi, | |||
1400 | if (lcd_conn == LCD_MONO_STN_8BPP) | 1402 | if (lcd_conn == LCD_MONO_STN_8BPP) |
1401 | fbi->lccr0 |= LCCR0_DPD; | 1403 | fbi->lccr0 |= LCCR0_DPD; |
1402 | 1404 | ||
1405 | fbi->lccr0 |= (lcd_conn & LCD_ALTERNATE_MAPPING) ? LCCR0_LDDALT : 0; | ||
1406 | |||
1403 | fbi->lccr3 = LCCR3_Acb((inf->lcd_conn >> 10) & 0xff); | 1407 | fbi->lccr3 = LCCR3_Acb((inf->lcd_conn >> 10) & 0xff); |
1404 | fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0; | 1408 | fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0; |
1405 | fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0; | 1409 | fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0; |
@@ -1673,53 +1677,63 @@ MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)"); | |||
1673 | #define pxafb_setup_options() (0) | 1677 | #define pxafb_setup_options() (0) |
1674 | #endif | 1678 | #endif |
1675 | 1679 | ||
1676 | static int __devinit pxafb_probe(struct platform_device *dev) | ||
1677 | { | ||
1678 | struct pxafb_info *fbi; | ||
1679 | struct pxafb_mach_info *inf; | ||
1680 | struct resource *r; | ||
1681 | int irq, ret; | ||
1682 | |||
1683 | dev_dbg(&dev->dev, "pxafb_probe\n"); | ||
1684 | |||
1685 | inf = dev->dev.platform_data; | ||
1686 | ret = -ENOMEM; | ||
1687 | fbi = NULL; | ||
1688 | if (!inf) | ||
1689 | goto failed; | ||
1690 | |||
1691 | ret = pxafb_parse_options(&dev->dev, g_options); | ||
1692 | if (ret < 0) | ||
1693 | goto failed; | ||
1694 | |||
1695 | #ifdef DEBUG_VAR | 1680 | #ifdef DEBUG_VAR |
1696 | /* Check for various illegal bit-combinations. Currently only | 1681 | /* Check for various illegal bit-combinations. Currently only |
1697 | * a warning is given. */ | 1682 | * a warning is given. */ |
1683 | static void __devinit pxafb_check_options(struct device *dev, | ||
1684 | struct pxafb_mach_info *inf) | ||
1685 | { | ||
1686 | if (inf->lcd_conn) | ||
1687 | return; | ||
1698 | 1688 | ||
1699 | if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK) | 1689 | if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK) |
1700 | dev_warn(&dev->dev, "machine LCCR0 setting contains " | 1690 | dev_warn(dev, "machine LCCR0 setting contains " |
1701 | "illegal bits: %08x\n", | 1691 | "illegal bits: %08x\n", |
1702 | inf->lccr0 & LCCR0_INVALID_CONFIG_MASK); | 1692 | inf->lccr0 & LCCR0_INVALID_CONFIG_MASK); |
1703 | if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK) | 1693 | if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK) |
1704 | dev_warn(&dev->dev, "machine LCCR3 setting contains " | 1694 | dev_warn(dev, "machine LCCR3 setting contains " |
1705 | "illegal bits: %08x\n", | 1695 | "illegal bits: %08x\n", |
1706 | inf->lccr3 & LCCR3_INVALID_CONFIG_MASK); | 1696 | inf->lccr3 & LCCR3_INVALID_CONFIG_MASK); |
1707 | if (inf->lccr0 & LCCR0_DPD && | 1697 | if (inf->lccr0 & LCCR0_DPD && |
1708 | ((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas || | 1698 | ((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas || |
1709 | (inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl || | 1699 | (inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl || |
1710 | (inf->lccr0 & LCCR0_CMS) != LCCR0_Mono)) | 1700 | (inf->lccr0 & LCCR0_CMS) != LCCR0_Mono)) |
1711 | dev_warn(&dev->dev, "Double Pixel Data (DPD) mode is " | 1701 | dev_warn(dev, "Double Pixel Data (DPD) mode is " |
1712 | "only valid in passive mono" | 1702 | "only valid in passive mono" |
1713 | " single panel mode\n"); | 1703 | " single panel mode\n"); |
1714 | if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act && | 1704 | if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act && |
1715 | (inf->lccr0 & LCCR0_SDS) == LCCR0_Dual) | 1705 | (inf->lccr0 & LCCR0_SDS) == LCCR0_Dual) |
1716 | dev_warn(&dev->dev, "Dual panel only valid in passive mode\n"); | 1706 | dev_warn(dev, "Dual panel only valid in passive mode\n"); |
1717 | if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas && | 1707 | if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas && |
1718 | (inf->modes->upper_margin || inf->modes->lower_margin)) | 1708 | (inf->modes->upper_margin || inf->modes->lower_margin)) |
1719 | dev_warn(&dev->dev, "Upper and lower margins must be 0 in " | 1709 | dev_warn(dev, "Upper and lower margins must be 0 in " |
1720 | "passive mode\n"); | 1710 | "passive mode\n"); |
1711 | } | ||
1712 | #else | ||
1713 | #define pxafb_check_options(...) do {} while (0) | ||
1721 | #endif | 1714 | #endif |
1722 | 1715 | ||
1716 | static int __devinit pxafb_probe(struct platform_device *dev) | ||
1717 | { | ||
1718 | struct pxafb_info *fbi; | ||
1719 | struct pxafb_mach_info *inf; | ||
1720 | struct resource *r; | ||
1721 | int irq, ret; | ||
1722 | |||
1723 | dev_dbg(&dev->dev, "pxafb_probe\n"); | ||
1724 | |||
1725 | inf = dev->dev.platform_data; | ||
1726 | ret = -ENOMEM; | ||
1727 | fbi = NULL; | ||
1728 | if (!inf) | ||
1729 | goto failed; | ||
1730 | |||
1731 | ret = pxafb_parse_options(&dev->dev, g_options); | ||
1732 | if (ret < 0) | ||
1733 | goto failed; | ||
1734 | |||
1735 | pxafb_check_options(&dev->dev, inf); | ||
1736 | |||
1723 | dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n", | 1737 | dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n", |
1724 | inf->modes->xres, | 1738 | inf->modes->xres, |
1725 | inf->modes->yres, | 1739 | inf->modes->yres, |
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index 8361bd0e3df1..4dcec48a1d78 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c | |||
@@ -11,7 +11,6 @@ | |||
11 | * which is based on the code of neofb. | 11 | * which is based on the code of neofb. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/version.h> | ||
15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
16 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
17 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index f6ef6cca73cd..4c32c06579a0 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -595,6 +595,8 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
595 | info->fbops = &sh_mobile_lcdc_ops; | 595 | info->fbops = &sh_mobile_lcdc_ops; |
596 | info->var.xres = info->var.xres_virtual = cfg->lcd_cfg.xres; | 596 | info->var.xres = info->var.xres_virtual = cfg->lcd_cfg.xres; |
597 | info->var.yres = info->var.yres_virtual = cfg->lcd_cfg.yres; | 597 | info->var.yres = info->var.yres_virtual = cfg->lcd_cfg.yres; |
598 | info->var.width = cfg->lcd_size_cfg.width; | ||
599 | info->var.height = cfg->lcd_size_cfg.height; | ||
598 | info->var.activate = FB_ACTIVATE_NOW; | 600 | info->var.activate = FB_ACTIVATE_NOW; |
599 | error = sh_mobile_lcdc_set_bpp(&info->var, cfg->bpp); | 601 | error = sh_mobile_lcdc_set_bpp(&info->var, cfg->bpp); |
600 | if (error) | 602 | if (error) |
diff --git a/drivers/video/vermilion/vermilion.h b/drivers/video/vermilion/vermilion.h index c4aba59d4809..7491abfcf1fc 100644 --- a/drivers/video/vermilion/vermilion.h +++ b/drivers/video/vermilion/vermilion.h | |||
@@ -30,7 +30,6 @@ | |||
30 | #define _VERMILION_H_ | 30 | #define _VERMILION_H_ |
31 | 31 | ||
32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
33 | #include <linux/version.h> | ||
34 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
35 | #include <asm/atomic.h> | 34 | #include <asm/atomic.h> |
36 | #include <linux/mutex.h> | 35 | #include <linux/mutex.h> |
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c index 34aae7a2a62b..3df17dc8c3d7 100644 --- a/drivers/video/vt8623fb.c +++ b/drivers/video/vt8623fb.c | |||
@@ -12,7 +12,6 @@ | |||
12 | * (http://davesdomain.org.uk/viafb/) | 12 | * (http://davesdomain.org.uk/viafb/) |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/version.h> | ||
16 | #include <linux/module.h> | 15 | #include <linux/module.h> |
17 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
18 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c index 7b3a8423f485..5da3d2423cc0 100644 --- a/drivers/video/xilinxfb.c +++ b/drivers/video/xilinxfb.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/version.h> | ||
28 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
29 | #include <linux/string.h> | 28 | #include <linux/string.h> |
30 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index bfef604160d1..62eab43152d2 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -158,7 +158,7 @@ static inline s64 towards_target(struct virtio_balloon *vb) | |||
158 | vb->vdev->config->get(vb->vdev, | 158 | vb->vdev->config->get(vb->vdev, |
159 | offsetof(struct virtio_balloon_config, num_pages), | 159 | offsetof(struct virtio_balloon_config, num_pages), |
160 | &v, sizeof(v)); | 160 | &v, sizeof(v)); |
161 | return v - vb->num_pages; | 161 | return (s64)v - vb->num_pages; |
162 | } | 162 | } |
163 | 163 | ||
164 | static void update_balloon_size(struct virtio_balloon *vb) | 164 | static void update_balloon_size(struct virtio_balloon *vb) |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 32b9fe153641..c51036716700 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -285,10 +285,11 @@ config ALIM1535_WDT | |||
285 | 285 | ||
286 | config ALIM7101_WDT | 286 | config ALIM7101_WDT |
287 | tristate "ALi M7101 PMU Computer Watchdog" | 287 | tristate "ALi M7101 PMU Computer Watchdog" |
288 | depends on X86 && PCI | 288 | depends on PCI |
289 | help | 289 | help |
290 | This is the driver for the hardware watchdog on the ALi M7101 PMU | 290 | This is the driver for the hardware watchdog on the ALi M7101 PMU |
291 | as used in the x86 Cobalt servers. | 291 | as used in the x86 Cobalt servers and also found in some |
292 | SPARC Netra servers too. | ||
292 | 293 | ||
293 | To compile this driver as a module, choose M here: the | 294 | To compile this driver as a module, choose M here: the |
294 | module will be called alim7101_wdt. | 295 | module will be called alim7101_wdt. |
@@ -464,6 +465,16 @@ config PC87413_WDT | |||
464 | 465 | ||
465 | Most people will say N. | 466 | Most people will say N. |
466 | 467 | ||
468 | config RDC321X_WDT | ||
469 | tristate "RDC R-321x SoC watchdog" | ||
470 | depends on X86_RDC321X | ||
471 | help | ||
472 | This is the driver for the built in hardware watchdog | ||
473 | in the RDC R-321x SoC. | ||
474 | |||
475 | To compile this driver as a module, choose M here: the | ||
476 | module will be called rdc321x_wdt. | ||
477 | |||
467 | config 60XX_WDT | 478 | config 60XX_WDT |
468 | tristate "SBC-60XX Watchdog Timer" | 479 | tristate "SBC-60XX Watchdog Timer" |
469 | depends on X86 | 480 | depends on X86 |
@@ -632,6 +643,16 @@ config SBC_EPX_C3_WATCHDOG | |||
632 | 643 | ||
633 | # MIPS Architecture | 644 | # MIPS Architecture |
634 | 645 | ||
646 | config RC32434_WDT | ||
647 | tristate "IDT RC32434 SoC Watchdog Timer" | ||
648 | depends on MIKROTIK_RB532 | ||
649 | help | ||
650 | Hardware driver for the IDT RC32434 SoC built-in | ||
651 | watchdog timer. | ||
652 | |||
653 | To compile this driver as a module, choose M here: the | ||
654 | module will be called rc32434_wdt. | ||
655 | |||
635 | config INDYDOG | 656 | config INDYDOG |
636 | tristate "Indy/I2 Hardware Watchdog" | 657 | tristate "Indy/I2 Hardware Watchdog" |
637 | depends on SGI_HAS_INDYDOG | 658 | depends on SGI_HAS_INDYDOG |
@@ -691,10 +712,6 @@ config MPC5200_WDT | |||
691 | tristate "MPC5200 Watchdog Timer" | 712 | tristate "MPC5200 Watchdog Timer" |
692 | depends on PPC_MPC52xx | 713 | depends on PPC_MPC52xx |
693 | 714 | ||
694 | config 8xx_WDT | ||
695 | tristate "MPC8xx Watchdog Timer" | ||
696 | depends on 8xx | ||
697 | |||
698 | config 8xxx_WDT | 715 | config 8xxx_WDT |
699 | tristate "MPC8xxx Platform Watchdog Timer" | 716 | tristate "MPC8xxx Platform Watchdog Timer" |
700 | depends on PPC_8xx || PPC_83xx || PPC_86xx | 717 | depends on PPC_8xx || PPC_83xx || PPC_86xx |
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index ca3dc043d786..e0ef123fbdea 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile | |||
@@ -75,6 +75,7 @@ obj-$(CONFIG_HP_WATCHDOG) += hpwdt.o | |||
75 | obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o | 75 | obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o |
76 | obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o | 76 | obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o |
77 | obj-$(CONFIG_PC87413_WDT) += pc87413_wdt.o | 77 | obj-$(CONFIG_PC87413_WDT) += pc87413_wdt.o |
78 | obj-$(CONFIG_RDC321X_WDT) += rdc321x_wdt.o | ||
78 | obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o | 79 | obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o |
79 | obj-$(CONFIG_SBC8360_WDT) += sbc8360.o | 80 | obj-$(CONFIG_SBC8360_WDT) += sbc8360.o |
80 | obj-$(CONFIG_SBC7240_WDT) += sbc7240_wdt.o | 81 | obj-$(CONFIG_SBC7240_WDT) += sbc7240_wdt.o |
@@ -94,6 +95,7 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o | |||
94 | # M68KNOMMU Architecture | 95 | # M68KNOMMU Architecture |
95 | 96 | ||
96 | # MIPS Architecture | 97 | # MIPS Architecture |
98 | obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o | ||
97 | obj-$(CONFIG_INDYDOG) += indydog.o | 99 | obj-$(CONFIG_INDYDOG) += indydog.o |
98 | obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o | 100 | obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o |
99 | obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o | 101 | obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o |
@@ -104,7 +106,6 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o | |||
104 | # PARISC Architecture | 106 | # PARISC Architecture |
105 | 107 | ||
106 | # POWERPC Architecture | 108 | # POWERPC Architecture |
107 | obj-$(CONFIG_8xx_WDT) += mpc8xx_wdt.o | ||
108 | obj-$(CONFIG_MPC5200_WDT) += mpc5200_wdt.o | 109 | obj-$(CONFIG_MPC5200_WDT) += mpc5200_wdt.o |
109 | obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o | 110 | obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o |
110 | obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o | 111 | obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o |
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c index d061f0ad2d20..993e5f52afef 100644 --- a/drivers/watchdog/at91rm9200_wdt.c +++ b/drivers/watchdog/at91rm9200_wdt.c | |||
@@ -241,7 +241,7 @@ static int at91wdt_resume(struct platform_device *pdev) | |||
241 | { | 241 | { |
242 | if (at91wdt_busy) | 242 | if (at91wdt_busy) |
243 | at91_wdt_start(); | 243 | at91_wdt_start(); |
244 | return 0; | 244 | return 0; |
245 | } | 245 | } |
246 | 246 | ||
247 | #else | 247 | #else |
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index d039d5f2fd1c..a3765e0be4a8 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
@@ -116,6 +116,7 @@ static unsigned int reload; /* the computed soft_margin */ | |||
116 | static int nowayout = WATCHDOG_NOWAYOUT; | 116 | static int nowayout = WATCHDOG_NOWAYOUT; |
117 | static char expect_release; | 117 | static char expect_release; |
118 | static unsigned long hpwdt_is_open; | 118 | static unsigned long hpwdt_is_open; |
119 | static unsigned int allow_kdump; | ||
119 | 120 | ||
120 | static void __iomem *pci_mem_addr; /* the PCI-memory address */ | 121 | static void __iomem *pci_mem_addr; /* the PCI-memory address */ |
121 | static unsigned long __iomem *hpwdt_timer_reg; | 122 | static unsigned long __iomem *hpwdt_timer_reg; |
@@ -221,19 +222,19 @@ static int __devinit cru_detect(unsigned long map_entry, | |||
221 | 222 | ||
222 | if (cmn_regs.u1.ral != 0) { | 223 | if (cmn_regs.u1.ral != 0) { |
223 | printk(KERN_WARNING | 224 | printk(KERN_WARNING |
224 | "hpwdt: Call succeeded but with an error: 0x%x\n", | 225 | "hpwdt: Call succeeded but with an error: 0x%x\n", |
225 | cmn_regs.u1.ral); | 226 | cmn_regs.u1.ral); |
226 | } else { | 227 | } else { |
227 | physical_bios_base = cmn_regs.u2.rebx; | 228 | physical_bios_base = cmn_regs.u2.rebx; |
228 | physical_bios_offset = cmn_regs.u4.redx; | 229 | physical_bios_offset = cmn_regs.u4.redx; |
229 | cru_length = cmn_regs.u3.recx; | 230 | cru_length = cmn_regs.u3.recx; |
230 | cru_physical_address = | 231 | cru_physical_address = |
231 | physical_bios_base + physical_bios_offset; | 232 | physical_bios_base + physical_bios_offset; |
232 | 233 | ||
233 | /* If the values look OK, then map it in. */ | 234 | /* If the values look OK, then map it in. */ |
234 | if ((physical_bios_base + physical_bios_offset)) { | 235 | if ((physical_bios_base + physical_bios_offset)) { |
235 | cru_rom_addr = | 236 | cru_rom_addr = |
236 | ioremap(cru_physical_address, cru_length); | 237 | ioremap(cru_physical_address, cru_length); |
237 | if (cru_rom_addr) | 238 | if (cru_rom_addr) |
238 | retval = 0; | 239 | retval = 0; |
239 | } | 240 | } |
@@ -356,7 +357,6 @@ asm(".text \n\t" | |||
356 | "call *%r12 \n\t" | 357 | "call *%r12 \n\t" |
357 | "pushfq \n\t" | 358 | "pushfq \n\t" |
358 | "popq %r12 \n\t" | 359 | "popq %r12 \n\t" |
359 | "popfq \n\t" | ||
360 | "movl %eax, (%r9) \n\t" | 360 | "movl %eax, (%r9) \n\t" |
361 | "movl %ebx, 4(%r9) \n\t" | 361 | "movl %ebx, 4(%r9) \n\t" |
362 | "movl %ecx, 8(%r9) \n\t" | 362 | "movl %ecx, 8(%r9) \n\t" |
@@ -390,10 +390,10 @@ static void __devinit dmi_find_cru(const struct dmi_header *dm) | |||
390 | smbios_cru64_ptr = (struct smbios_cru64_info *) dm; | 390 | smbios_cru64_ptr = (struct smbios_cru64_info *) dm; |
391 | if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) { | 391 | if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) { |
392 | cru_physical_address = | 392 | cru_physical_address = |
393 | smbios_cru64_ptr->physical_address + | 393 | smbios_cru64_ptr->physical_address + |
394 | smbios_cru64_ptr->double_offset; | 394 | smbios_cru64_ptr->double_offset; |
395 | cru_rom_addr = ioremap(cru_physical_address, | 395 | cru_rom_addr = ioremap(cru_physical_address, |
396 | smbios_cru64_ptr->double_length); | 396 | smbios_cru64_ptr->double_length); |
397 | } | 397 | } |
398 | } | 398 | } |
399 | } | 399 | } |
@@ -405,7 +405,7 @@ static int __devinit detect_cru_service(void) | |||
405 | dmi_walk(dmi_find_cru); | 405 | dmi_walk(dmi_find_cru); |
406 | 406 | ||
407 | /* if cru_rom_addr has been set then we found a CRU service */ | 407 | /* if cru_rom_addr has been set then we found a CRU service */ |
408 | return ((cru_rom_addr != NULL) ? 0: -ENODEV); | 408 | return ((cru_rom_addr != NULL) ? 0 : -ENODEV); |
409 | } | 409 | } |
410 | 410 | ||
411 | /* ------------------------------------------------------------------------- */ | 411 | /* ------------------------------------------------------------------------- */ |
@@ -413,34 +413,6 @@ static int __devinit detect_cru_service(void) | |||
413 | #endif | 413 | #endif |
414 | 414 | ||
415 | /* | 415 | /* |
416 | * NMI Handler | ||
417 | */ | ||
418 | static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason, | ||
419 | void *data) | ||
420 | { | ||
421 | unsigned long rom_pl; | ||
422 | static int die_nmi_called; | ||
423 | |||
424 | if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI) | ||
425 | return NOTIFY_OK; | ||
426 | |||
427 | spin_lock_irqsave(&rom_lock, rom_pl); | ||
428 | if (!die_nmi_called) | ||
429 | asminline_call(&cmn_regs, cru_rom_addr); | ||
430 | die_nmi_called = 1; | ||
431 | spin_unlock_irqrestore(&rom_lock, rom_pl); | ||
432 | if (cmn_regs.u1.ral == 0) { | ||
433 | printk(KERN_WARNING "hpwdt: An NMI occurred, " | ||
434 | "but unable to determine source.\n"); | ||
435 | } else { | ||
436 | panic("An NMI occurred, please see the Integrated " | ||
437 | "Management Log for details.\n"); | ||
438 | } | ||
439 | |||
440 | return NOTIFY_STOP; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * Watchdog operations | 416 | * Watchdog operations |
445 | */ | 417 | */ |
446 | static void hpwdt_start(void) | 418 | static void hpwdt_start(void) |
@@ -484,6 +456,36 @@ static int hpwdt_change_timer(int new_margin) | |||
484 | } | 456 | } |
485 | 457 | ||
486 | /* | 458 | /* |
459 | * NMI Handler | ||
460 | */ | ||
461 | static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason, | ||
462 | void *data) | ||
463 | { | ||
464 | unsigned long rom_pl; | ||
465 | static int die_nmi_called; | ||
466 | |||
467 | if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI) | ||
468 | return NOTIFY_OK; | ||
469 | |||
470 | spin_lock_irqsave(&rom_lock, rom_pl); | ||
471 | if (!die_nmi_called) | ||
472 | asminline_call(&cmn_regs, cru_rom_addr); | ||
473 | die_nmi_called = 1; | ||
474 | spin_unlock_irqrestore(&rom_lock, rom_pl); | ||
475 | if (cmn_regs.u1.ral == 0) { | ||
476 | printk(KERN_WARNING "hpwdt: An NMI occurred, " | ||
477 | "but unable to determine source.\n"); | ||
478 | } else { | ||
479 | if (allow_kdump) | ||
480 | hpwdt_stop(); | ||
481 | panic("An NMI occurred, please see the Integrated " | ||
482 | "Management Log for details.\n"); | ||
483 | } | ||
484 | |||
485 | return NOTIFY_STOP; | ||
486 | } | ||
487 | |||
488 | /* | ||
487 | * /dev/watchdog handling | 489 | * /dev/watchdog handling |
488 | */ | 490 | */ |
489 | static int hpwdt_open(struct inode *inode, struct file *file) | 491 | static int hpwdt_open(struct inode *inode, struct file *file) |
@@ -625,17 +627,18 @@ static struct notifier_block die_notifier = { | |||
625 | */ | 627 | */ |
626 | 628 | ||
627 | static int __devinit hpwdt_init_one(struct pci_dev *dev, | 629 | static int __devinit hpwdt_init_one(struct pci_dev *dev, |
628 | const struct pci_device_id *ent) | 630 | const struct pci_device_id *ent) |
629 | { | 631 | { |
630 | int retval; | 632 | int retval; |
631 | 633 | ||
632 | /* | 634 | /* |
633 | * First let's find out if we are on an iLO2 server. We will | 635 | * First let's find out if we are on an iLO2 server. We will |
634 | * not run on a legacy ASM box. | 636 | * not run on a legacy ASM box. |
637 | * So we only support the G5 ProLiant servers and higher. | ||
635 | */ | 638 | */ |
636 | if (dev->subsystem_vendor != PCI_VENDOR_ID_HP) { | 639 | if (dev->subsystem_vendor != PCI_VENDOR_ID_HP) { |
637 | dev_warn(&dev->dev, | 640 | dev_warn(&dev->dev, |
638 | "This server does not have an iLO2 ASIC.\n"); | 641 | "This server does not have an iLO2 ASIC.\n"); |
639 | return -ENODEV; | 642 | return -ENODEV; |
640 | } | 643 | } |
641 | 644 | ||
@@ -669,7 +672,7 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev, | |||
669 | retval = detect_cru_service(); | 672 | retval = detect_cru_service(); |
670 | if (retval < 0) { | 673 | if (retval < 0) { |
671 | dev_warn(&dev->dev, | 674 | dev_warn(&dev->dev, |
672 | "Unable to detect the %d Bit CRU Service.\n", | 675 | "Unable to detect the %d Bit CRU Service.\n", |
673 | HPWDT_ARCH); | 676 | HPWDT_ARCH); |
674 | goto error_get_cru; | 677 | goto error_get_cru; |
675 | } | 678 | } |
@@ -684,7 +687,7 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev, | |||
684 | retval = register_die_notifier(&die_notifier); | 687 | retval = register_die_notifier(&die_notifier); |
685 | if (retval != 0) { | 688 | if (retval != 0) { |
686 | dev_warn(&dev->dev, | 689 | dev_warn(&dev->dev, |
687 | "Unable to register a die notifier (err=%d).\n", | 690 | "Unable to register a die notifier (err=%d).\n", |
688 | retval); | 691 | retval); |
689 | goto error_die_notifier; | 692 | goto error_die_notifier; |
690 | } | 693 | } |
@@ -699,8 +702,9 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev, | |||
699 | 702 | ||
700 | printk(KERN_INFO | 703 | printk(KERN_INFO |
701 | "hp Watchdog Timer Driver: 1.00" | 704 | "hp Watchdog Timer Driver: 1.00" |
702 | ", timer margin: %d seconds( nowayout=%d).\n", | 705 | ", timer margin: %d seconds (nowayout=%d)" |
703 | soft_margin, nowayout); | 706 | ", allow kernel dump: %s (default = 0/OFF).\n", |
707 | soft_margin, nowayout, (allow_kdump == 0) ? "OFF" : "ON"); | ||
704 | 708 | ||
705 | return 0; | 709 | return 0; |
706 | 710 | ||
@@ -755,6 +759,9 @@ MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | |||
755 | module_param(soft_margin, int, 0); | 759 | module_param(soft_margin, int, 0); |
756 | MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds"); | 760 | MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds"); |
757 | 761 | ||
762 | module_param(allow_kdump, int, 0); | ||
763 | MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); | ||
764 | |||
758 | module_param(nowayout, int, 0); | 765 | module_param(nowayout, int, 0); |
759 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" | 766 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" |
760 | __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | 767 | __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); |
diff --git a/drivers/watchdog/mpc8xx_wdt.c b/drivers/watchdog/mpc8xx_wdt.c deleted file mode 100644 index 1336425acf20..000000000000 --- a/drivers/watchdog/mpc8xx_wdt.c +++ /dev/null | |||
@@ -1,170 +0,0 @@ | |||
1 | /* | ||
2 | * mpc8xx_wdt.c - MPC8xx watchdog userspace interface | ||
3 | * | ||
4 | * Author: Florian Schirmer <jolt@tuxbox.org> | ||
5 | * | ||
6 | * 2002 (c) Florian Schirmer <jolt@tuxbox.org> This file is licensed under | ||
7 | * the terms of the GNU General Public License version 2. This program | ||
8 | * is licensed "as is" without any warranty of any kind, whether express | ||
9 | * or implied. | ||
10 | */ | ||
11 | |||
12 | #include <linux/fs.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/miscdevice.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/watchdog.h> | ||
18 | #include <asm/8xx_immap.h> | ||
19 | #include <linux/uaccess.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <syslib/m8xx_wdt.h> | ||
22 | |||
23 | static unsigned long wdt_opened; | ||
24 | static int wdt_status; | ||
25 | static spinlock_t wdt_lock; | ||
26 | |||
27 | static void mpc8xx_wdt_handler_disable(void) | ||
28 | { | ||
29 | volatile uint __iomem *piscr; | ||
30 | piscr = (uint *)&((immap_t *)IMAP_ADDR)->im_sit.sit_piscr; | ||
31 | |||
32 | if (!m8xx_has_internal_rtc) | ||
33 | m8xx_wdt_stop_timer(); | ||
34 | else | ||
35 | out_be32(piscr, in_be32(piscr) & ~(PISCR_PIE | PISCR_PTE)); | ||
36 | printk(KERN_NOTICE "mpc8xx_wdt: keep-alive handler deactivated\n"); | ||
37 | } | ||
38 | |||
39 | static void mpc8xx_wdt_handler_enable(void) | ||
40 | { | ||
41 | volatile uint __iomem *piscr; | ||
42 | piscr = (uint *)&((immap_t *)IMAP_ADDR)->im_sit.sit_piscr; | ||
43 | |||
44 | if (!m8xx_has_internal_rtc) | ||
45 | m8xx_wdt_install_timer(); | ||
46 | else | ||
47 | out_be32(piscr, in_be32(piscr) | PISCR_PIE | PISCR_PTE); | ||
48 | printk(KERN_NOTICE "mpc8xx_wdt: keep-alive handler activated\n"); | ||
49 | } | ||
50 | |||
51 | static int mpc8xx_wdt_open(struct inode *inode, struct file *file) | ||
52 | { | ||
53 | if (test_and_set_bit(0, &wdt_opened)) | ||
54 | return -EBUSY; | ||
55 | m8xx_wdt_reset(); | ||
56 | mpc8xx_wdt_handler_disable(); | ||
57 | return nonseekable_open(inode, file); | ||
58 | } | ||
59 | |||
60 | static int mpc8xx_wdt_release(struct inode *inode, struct file *file) | ||
61 | { | ||
62 | m8xx_wdt_reset(); | ||
63 | #if !defined(CONFIG_WATCHDOG_NOWAYOUT) | ||
64 | mpc8xx_wdt_handler_enable(); | ||
65 | #endif | ||
66 | clear_bit(0, &wdt_opened); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static ssize_t mpc8xx_wdt_write(struct file *file, const char *data, | ||
71 | size_t len, loff_t *ppos) | ||
72 | { | ||
73 | if (len) { | ||
74 | spin_lock(&wdt_lock); | ||
75 | m8xx_wdt_reset(); | ||
76 | spin_unlock(&wdt_lock); | ||
77 | } | ||
78 | return len; | ||
79 | } | ||
80 | |||
81 | static long mpc8xx_wdt_ioctl(struct file *file, | ||
82 | unsigned int cmd, unsigned long arg) | ||
83 | { | ||
84 | int timeout; | ||
85 | static struct watchdog_info info = { | ||
86 | .options = WDIOF_KEEPALIVEPING, | ||
87 | .firmware_version = 0, | ||
88 | .identity = "MPC8xx watchdog", | ||
89 | }; | ||
90 | |||
91 | switch (cmd) { | ||
92 | case WDIOC_GETSUPPORT: | ||
93 | if (copy_to_user((void *)arg, &info, sizeof(info))) | ||
94 | return -EFAULT; | ||
95 | break; | ||
96 | |||
97 | case WDIOC_GETSTATUS: | ||
98 | case WDIOC_GETBOOTSTATUS: | ||
99 | if (put_user(wdt_status, (int *)arg)) | ||
100 | return -EFAULT; | ||
101 | wdt_status &= ~WDIOF_KEEPALIVEPING; | ||
102 | break; | ||
103 | |||
104 | case WDIOC_GETTEMP: | ||
105 | return -EOPNOTSUPP; | ||
106 | |||
107 | case WDIOC_SETOPTIONS: | ||
108 | return -EOPNOTSUPP; | ||
109 | |||
110 | case WDIOC_KEEPALIVE: | ||
111 | spin_lock(&wdt_lock); | ||
112 | m8xx_wdt_reset(); | ||
113 | wdt_status |= WDIOF_KEEPALIVEPING; | ||
114 | spin_unlock(&wdt_lock); | ||
115 | break; | ||
116 | |||
117 | case WDIOC_SETTIMEOUT: | ||
118 | return -EOPNOTSUPP; | ||
119 | |||
120 | case WDIOC_GETTIMEOUT: | ||
121 | spin_lock(&wdt_lock); | ||
122 | timeout = m8xx_wdt_get_timeout(); | ||
123 | spin_unlock(&wdt_lock); | ||
124 | if (put_user(timeout, (int *)arg)) | ||
125 | return -EFAULT; | ||
126 | break; | ||
127 | |||
128 | default: | ||
129 | return -ENOTTY; | ||
130 | } | ||
131 | |||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | static const struct file_operations mpc8xx_wdt_fops = { | ||
136 | .owner = THIS_MODULE, | ||
137 | .llseek = no_llseek, | ||
138 | .write = mpc8xx_wdt_write, | ||
139 | .unlocked_ioctl = mpc8xx_wdt_ioctl, | ||
140 | .open = mpc8xx_wdt_open, | ||
141 | .release = mpc8xx_wdt_release, | ||
142 | }; | ||
143 | |||
144 | static struct miscdevice mpc8xx_wdt_miscdev = { | ||
145 | .minor = WATCHDOG_MINOR, | ||
146 | .name = "watchdog", | ||
147 | .fops = &mpc8xx_wdt_fops, | ||
148 | }; | ||
149 | |||
150 | static int __init mpc8xx_wdt_init(void) | ||
151 | { | ||
152 | spin_lock_init(&wdt_lock); | ||
153 | return misc_register(&mpc8xx_wdt_miscdev); | ||
154 | } | ||
155 | |||
156 | static void __exit mpc8xx_wdt_exit(void) | ||
157 | { | ||
158 | misc_deregister(&mpc8xx_wdt_miscdev); | ||
159 | |||
160 | m8xx_wdt_reset(); | ||
161 | mpc8xx_wdt_handler_enable(); | ||
162 | } | ||
163 | |||
164 | module_init(mpc8xx_wdt_init); | ||
165 | module_exit(mpc8xx_wdt_exit); | ||
166 | |||
167 | MODULE_AUTHOR("Florian Schirmer <jolt@tuxbox.org>"); | ||
168 | MODULE_DESCRIPTION("MPC8xx watchdog driver"); | ||
169 | MODULE_LICENSE("GPL"); | ||
170 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c index f2094960e662..38c588ee694f 100644 --- a/drivers/watchdog/mpc8xxx_wdt.c +++ b/drivers/watchdog/mpc8xxx_wdt.c | |||
@@ -48,6 +48,7 @@ struct mpc8xxx_wdt_type { | |||
48 | }; | 48 | }; |
49 | 49 | ||
50 | static struct mpc8xxx_wdt __iomem *wd_base; | 50 | static struct mpc8xxx_wdt __iomem *wd_base; |
51 | static int mpc8xxx_wdt_init_late(void); | ||
51 | 52 | ||
52 | static u16 timeout = 0xffff; | 53 | static u16 timeout = 0xffff; |
53 | module_param(timeout, ushort, 0); | 54 | module_param(timeout, ushort, 0); |
@@ -213,6 +214,12 @@ static int __devinit mpc8xxx_wdt_probe(struct of_device *ofdev, | |||
213 | else | 214 | else |
214 | timeout_sec = timeout / freq; | 215 | timeout_sec = timeout / freq; |
215 | 216 | ||
217 | #ifdef MODULE | ||
218 | ret = mpc8xxx_wdt_init_late(); | ||
219 | if (ret) | ||
220 | goto err_unmap; | ||
221 | #endif | ||
222 | |||
216 | pr_info("WDT driver for MPC8xxx initialized. mode:%s timeout=%d " | 223 | pr_info("WDT driver for MPC8xxx initialized. mode:%s timeout=%d " |
217 | "(%d seconds)\n", reset ? "reset" : "interrupt", timeout, | 224 | "(%d seconds)\n", reset ? "reset" : "interrupt", timeout, |
218 | timeout_sec); | 225 | timeout_sec); |
@@ -280,7 +287,7 @@ static struct of_platform_driver mpc8xxx_wdt_driver = { | |||
280 | * very early to start pinging the watchdog (misc devices are not yet | 287 | * very early to start pinging the watchdog (misc devices are not yet |
281 | * available), and later module_init() just registers the misc device. | 288 | * available), and later module_init() just registers the misc device. |
282 | */ | 289 | */ |
283 | static int __init mpc8xxx_wdt_init_late(void) | 290 | static int mpc8xxx_wdt_init_late(void) |
284 | { | 291 | { |
285 | int ret; | 292 | int ret; |
286 | 293 | ||
@@ -295,7 +302,9 @@ static int __init mpc8xxx_wdt_init_late(void) | |||
295 | } | 302 | } |
296 | return 0; | 303 | return 0; |
297 | } | 304 | } |
305 | #ifndef MODULE | ||
298 | module_init(mpc8xxx_wdt_init_late); | 306 | module_init(mpc8xxx_wdt_init_late); |
307 | #endif | ||
299 | 308 | ||
300 | static int __init mpc8xxx_wdt_init(void) | 309 | static int __init mpc8xxx_wdt_init(void) |
301 | { | 310 | { |
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c index e91ada72da1d..484c215e9f3f 100644 --- a/drivers/watchdog/pc87413_wdt.c +++ b/drivers/watchdog/pc87413_wdt.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/moduleparam.h> | 32 | #include <linux/moduleparam.h> |
33 | #include <linux/version.h> | ||
34 | #include <linux/io.h> | 33 | #include <linux/io.h> |
35 | #include <linux/uaccess.h> | 34 | #include <linux/uaccess.h> |
36 | 35 | ||
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c new file mode 100644 index 000000000000..6756bcb009ed --- /dev/null +++ b/drivers/watchdog/rc32434_wdt.c | |||
@@ -0,0 +1,344 @@ | |||
1 | /* | ||
2 | * IDT Interprise 79RC32434 watchdog driver | ||
3 | * | ||
4 | * Copyright (C) 2006, Ondrej Zajicek <santiago@crfreenet.org> | ||
5 | * Copyright (C) 2008, Florian Fainelli <florian@openwrt.org> | ||
6 | * | ||
7 | * based on | ||
8 | * SoftDog 0.05: A Software Watchdog Device | ||
9 | * | ||
10 | * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/miscdevice.h> | ||
25 | #include <linux/watchdog.h> | ||
26 | #include <linux/reboot.h> | ||
27 | #include <linux/smp_lock.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/platform_device.h> | ||
30 | #include <linux/uaccess.h> | ||
31 | |||
32 | #include <asm/bootinfo.h> | ||
33 | #include <asm/time.h> | ||
34 | #include <asm/mach-rc32434/integ.h> | ||
35 | |||
36 | #define MAX_TIMEOUT 20 | ||
37 | #define RC32434_WDT_INTERVAL (15 * HZ) | ||
38 | |||
39 | #define VERSION "0.2" | ||
40 | |||
41 | static struct { | ||
42 | struct completion stop; | ||
43 | int running; | ||
44 | struct timer_list timer; | ||
45 | int queue; | ||
46 | int default_ticks; | ||
47 | unsigned long inuse; | ||
48 | } rc32434_wdt_device; | ||
49 | |||
50 | static struct integ __iomem *wdt_reg; | ||
51 | static int ticks = 100 * HZ; | ||
52 | |||
53 | static int expect_close; | ||
54 | static int timeout; | ||
55 | |||
56 | static int nowayout = WATCHDOG_NOWAYOUT; | ||
57 | module_param(nowayout, int, 0); | ||
58 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" | ||
59 | __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | ||
60 | |||
61 | |||
62 | static void rc32434_wdt_start(void) | ||
63 | { | ||
64 | u32 val; | ||
65 | |||
66 | if (!rc32434_wdt_device.inuse) { | ||
67 | writel(0, &wdt_reg->wtcount); | ||
68 | |||
69 | val = RC32434_ERR_WRE; | ||
70 | writel(readl(&wdt_reg->errcs) | val, &wdt_reg->errcs); | ||
71 | |||
72 | val = RC32434_WTC_EN; | ||
73 | writel(readl(&wdt_reg->wtc) | val, &wdt_reg->wtc); | ||
74 | } | ||
75 | rc32434_wdt_device.running++; | ||
76 | } | ||
77 | |||
78 | static void rc32434_wdt_stop(void) | ||
79 | { | ||
80 | u32 val; | ||
81 | |||
82 | if (rc32434_wdt_device.running) { | ||
83 | |||
84 | val = ~RC32434_WTC_EN; | ||
85 | writel(readl(&wdt_reg->wtc) & val, &wdt_reg->wtc); | ||
86 | |||
87 | val = ~RC32434_ERR_WRE; | ||
88 | writel(readl(&wdt_reg->errcs) & val, &wdt_reg->errcs); | ||
89 | |||
90 | rc32434_wdt_device.running = 0; | ||
91 | } | ||
92 | } | ||
93 | |||
94 | static void rc32434_wdt_set(int new_timeout) | ||
95 | { | ||
96 | u32 cmp = new_timeout * HZ; | ||
97 | u32 state, val; | ||
98 | |||
99 | timeout = new_timeout; | ||
100 | /* | ||
101 | * store and disable WTC | ||
102 | */ | ||
103 | state = (u32)(readl(&wdt_reg->wtc) & RC32434_WTC_EN); | ||
104 | val = ~RC32434_WTC_EN; | ||
105 | writel(readl(&wdt_reg->wtc) & val, &wdt_reg->wtc); | ||
106 | |||
107 | writel(0, &wdt_reg->wtcount); | ||
108 | writel(cmp, &wdt_reg->wtcompare); | ||
109 | |||
110 | /* | ||
111 | * restore WTC | ||
112 | */ | ||
113 | |||
114 | writel(readl(&wdt_reg->wtc) | state, &wdt_reg); | ||
115 | } | ||
116 | |||
117 | static void rc32434_wdt_reset(void) | ||
118 | { | ||
119 | ticks = rc32434_wdt_device.default_ticks; | ||
120 | } | ||
121 | |||
122 | static void rc32434_wdt_update(unsigned long unused) | ||
123 | { | ||
124 | if (rc32434_wdt_device.running) | ||
125 | ticks--; | ||
126 | |||
127 | writel(0, &wdt_reg->wtcount); | ||
128 | |||
129 | if (rc32434_wdt_device.queue && ticks) | ||
130 | mod_timer(&rc32434_wdt_device.timer, | ||
131 | jiffies + RC32434_WDT_INTERVAL); | ||
132 | else | ||
133 | complete(&rc32434_wdt_device.stop); | ||
134 | } | ||
135 | |||
136 | static int rc32434_wdt_open(struct inode *inode, struct file *file) | ||
137 | { | ||
138 | if (test_and_set_bit(0, &rc32434_wdt_device.inuse)) | ||
139 | return -EBUSY; | ||
140 | |||
141 | if (nowayout) | ||
142 | __module_get(THIS_MODULE); | ||
143 | |||
144 | return nonseekable_open(inode, file); | ||
145 | } | ||
146 | |||
147 | static int rc32434_wdt_release(struct inode *inode, struct file *file) | ||
148 | { | ||
149 | if (expect_close && nowayout == 0) { | ||
150 | rc32434_wdt_stop(); | ||
151 | printk(KERN_INFO KBUILD_MODNAME ": disabling watchdog timer\n"); | ||
152 | module_put(THIS_MODULE); | ||
153 | } else | ||
154 | printk(KERN_CRIT KBUILD_MODNAME | ||
155 | ": device closed unexpectedly. WDT will not stop !\n"); | ||
156 | |||
157 | clear_bit(0, &rc32434_wdt_device.inuse); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static ssize_t rc32434_wdt_write(struct file *file, const char *data, | ||
162 | size_t len, loff_t *ppos) | ||
163 | { | ||
164 | if (len) { | ||
165 | if (!nowayout) { | ||
166 | size_t i; | ||
167 | |||
168 | /* In case it was set long ago */ | ||
169 | expect_close = 0; | ||
170 | |||
171 | for (i = 0; i != len; i++) { | ||
172 | char c; | ||
173 | if (get_user(c, data + i)) | ||
174 | return -EFAULT; | ||
175 | if (c == 'V') | ||
176 | expect_close = 1; | ||
177 | } | ||
178 | } | ||
179 | rc32434_wdt_update(0); | ||
180 | return len; | ||
181 | } | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static int rc32434_wdt_ioctl(struct inode *inode, struct file *file, | ||
186 | unsigned int cmd, unsigned long arg) | ||
187 | { | ||
188 | void __user *argp = (void __user *)arg; | ||
189 | int new_timeout; | ||
190 | unsigned int value; | ||
191 | static struct watchdog_info ident = { | ||
192 | .options = WDIOF_SETTIMEOUT | | ||
193 | WDIOF_KEEPALIVEPING | | ||
194 | WDIOF_MAGICCLOSE, | ||
195 | .identity = "RC32434_WDT Watchdog", | ||
196 | }; | ||
197 | switch (cmd) { | ||
198 | case WDIOC_KEEPALIVE: | ||
199 | rc32434_wdt_reset(); | ||
200 | break; | ||
201 | case WDIOC_GETSTATUS: | ||
202 | case WDIOC_GETBOOTSTATUS: | ||
203 | value = readl(&wdt_reg->wtcount); | ||
204 | if (copy_to_user(argp, &value, sizeof(int))) | ||
205 | return -EFAULT; | ||
206 | break; | ||
207 | case WDIOC_GETSUPPORT: | ||
208 | if (copy_to_user(argp, &ident, sizeof(ident))) | ||
209 | return -EFAULT; | ||
210 | break; | ||
211 | case WDIOC_SETOPTIONS: | ||
212 | if (copy_from_user(&value, argp, sizeof(int))) | ||
213 | return -EFAULT; | ||
214 | switch (value) { | ||
215 | case WDIOS_ENABLECARD: | ||
216 | rc32434_wdt_start(); | ||
217 | break; | ||
218 | case WDIOS_DISABLECARD: | ||
219 | rc32434_wdt_stop(); | ||
220 | default: | ||
221 | return -EINVAL; | ||
222 | } | ||
223 | break; | ||
224 | case WDIOC_SETTIMEOUT: | ||
225 | if (copy_from_user(&new_timeout, argp, sizeof(int))) | ||
226 | return -EFAULT; | ||
227 | if (new_timeout < 1) | ||
228 | return -EINVAL; | ||
229 | if (new_timeout > MAX_TIMEOUT) | ||
230 | return -EINVAL; | ||
231 | rc32434_wdt_set(new_timeout); | ||
232 | case WDIOC_GETTIMEOUT: | ||
233 | return copy_to_user(argp, &timeout, sizeof(int)); | ||
234 | default: | ||
235 | return -ENOTTY; | ||
236 | } | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static struct file_operations rc32434_wdt_fops = { | ||
242 | .owner = THIS_MODULE, | ||
243 | .llseek = no_llseek, | ||
244 | .write = rc32434_wdt_write, | ||
245 | .ioctl = rc32434_wdt_ioctl, | ||
246 | .open = rc32434_wdt_open, | ||
247 | .release = rc32434_wdt_release, | ||
248 | }; | ||
249 | |||
250 | static struct miscdevice rc32434_wdt_miscdev = { | ||
251 | .minor = WATCHDOG_MINOR, | ||
252 | .name = "watchdog", | ||
253 | .fops = &rc32434_wdt_fops, | ||
254 | }; | ||
255 | |||
256 | static char banner[] = KERN_INFO KBUILD_MODNAME | ||
257 | ": Watchdog Timer version " VERSION ", timer margin: %d sec\n"; | ||
258 | |||
259 | static int rc32434_wdt_probe(struct platform_device *pdev) | ||
260 | { | ||
261 | int ret; | ||
262 | struct resource *r; | ||
263 | |||
264 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rb500_wdt_res"); | ||
265 | if (!r) { | ||
266 | printk(KERN_ERR KBUILD_MODNAME | ||
267 | "failed to retrieve resources\n"); | ||
268 | return -ENODEV; | ||
269 | } | ||
270 | |||
271 | wdt_reg = ioremap_nocache(r->start, r->end - r->start); | ||
272 | if (!wdt_reg) { | ||
273 | printk(KERN_ERR KBUILD_MODNAME | ||
274 | "failed to remap I/O resources\n"); | ||
275 | return -ENXIO; | ||
276 | } | ||
277 | |||
278 | ret = misc_register(&rc32434_wdt_miscdev); | ||
279 | |||
280 | if (ret < 0) { | ||
281 | printk(KERN_ERR KBUILD_MODNAME | ||
282 | "failed to register watchdog device\n"); | ||
283 | goto unmap; | ||
284 | } | ||
285 | |||
286 | init_completion(&rc32434_wdt_device.stop); | ||
287 | rc32434_wdt_device.queue = 0; | ||
288 | |||
289 | clear_bit(0, &rc32434_wdt_device.inuse); | ||
290 | |||
291 | setup_timer(&rc32434_wdt_device.timer, rc32434_wdt_update, 0L); | ||
292 | |||
293 | rc32434_wdt_device.default_ticks = ticks; | ||
294 | |||
295 | rc32434_wdt_start(); | ||
296 | |||
297 | printk(banner, timeout); | ||
298 | |||
299 | return 0; | ||
300 | |||
301 | unmap: | ||
302 | iounmap(wdt_reg); | ||
303 | return ret; | ||
304 | } | ||
305 | |||
306 | static int rc32434_wdt_remove(struct platform_device *pdev) | ||
307 | { | ||
308 | if (rc32434_wdt_device.queue) { | ||
309 | rc32434_wdt_device.queue = 0; | ||
310 | wait_for_completion(&rc32434_wdt_device.stop); | ||
311 | } | ||
312 | misc_deregister(&rc32434_wdt_miscdev); | ||
313 | |||
314 | iounmap(wdt_reg); | ||
315 | |||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | static struct platform_driver rc32434_wdt = { | ||
320 | .probe = rc32434_wdt_probe, | ||
321 | .remove = rc32434_wdt_remove, | ||
322 | .driver = { | ||
323 | .name = "rc32434_wdt", | ||
324 | } | ||
325 | }; | ||
326 | |||
327 | static int __init rc32434_wdt_init(void) | ||
328 | { | ||
329 | return platform_driver_register(&rc32434_wdt); | ||
330 | } | ||
331 | |||
332 | static void __exit rc32434_wdt_exit(void) | ||
333 | { | ||
334 | platform_driver_unregister(&rc32434_wdt); | ||
335 | } | ||
336 | |||
337 | module_init(rc32434_wdt_init); | ||
338 | module_exit(rc32434_wdt_exit); | ||
339 | |||
340 | MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>," | ||
341 | "Florian Fainelli <florian@openwrt.org>"); | ||
342 | MODULE_DESCRIPTION("Driver for the IDT RC32434 SoC watchdog"); | ||
343 | MODULE_LICENSE("GPL"); | ||
344 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c new file mode 100644 index 000000000000..9108efa73e7d --- /dev/null +++ b/drivers/watchdog/rdc321x_wdt.c | |||
@@ -0,0 +1,285 @@ | |||
1 | /* | ||
2 | * RDC321x watchdog driver | ||
3 | * | ||
4 | * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org> | ||
5 | * | ||
6 | * This driver is highly inspired from the cpu5_wdt driver | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/moduleparam.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/miscdevice.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/ioport.h> | ||
32 | #include <linux/timer.h> | ||
33 | #include <linux/completion.h> | ||
34 | #include <linux/jiffies.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <linux/watchdog.h> | ||
37 | #include <linux/io.h> | ||
38 | #include <linux/uaccess.h> | ||
39 | |||
40 | #include <asm/mach-rdc321x/rdc321x_defs.h> | ||
41 | |||
42 | #define RDC_WDT_MASK 0x80000000 /* Mask */ | ||
43 | #define RDC_WDT_EN 0x00800000 /* Enable bit */ | ||
44 | #define RDC_WDT_WTI 0x00200000 /* Generate CPU reset/NMI/WDT on timeout */ | ||
45 | #define RDC_WDT_RST 0x00100000 /* Reset bit */ | ||
46 | #define RDC_WDT_WIF 0x00040000 /* WDT IRQ Flag */ | ||
47 | #define RDC_WDT_IRT 0x00000100 /* IRQ Routing table */ | ||
48 | #define RDC_WDT_CNT 0x00000001 /* WDT count */ | ||
49 | |||
50 | #define RDC_CLS_TMR 0x80003844 /* Clear timer */ | ||
51 | |||
52 | #define RDC_WDT_INTERVAL (HZ/10+1) | ||
53 | |||
54 | static int ticks = 1000; | ||
55 | |||
56 | /* some device data */ | ||
57 | |||
58 | static struct { | ||
59 | struct completion stop; | ||
60 | int running; | ||
61 | struct timer_list timer; | ||
62 | int queue; | ||
63 | int default_ticks; | ||
64 | unsigned long inuse; | ||
65 | spinlock_t lock; | ||
66 | } rdc321x_wdt_device; | ||
67 | |||
68 | /* generic helper functions */ | ||
69 | |||
70 | static void rdc321x_wdt_trigger(unsigned long unused) | ||
71 | { | ||
72 | unsigned long flags; | ||
73 | |||
74 | if (rdc321x_wdt_device.running) | ||
75 | ticks--; | ||
76 | |||
77 | /* keep watchdog alive */ | ||
78 | spin_lock_irqsave(&rdc321x_wdt_device.lock, flags); | ||
79 | outl(RDC_WDT_EN | inl(RDC3210_CFGREG_DATA), | ||
80 | RDC3210_CFGREG_DATA); | ||
81 | spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags); | ||
82 | |||
83 | /* requeue?? */ | ||
84 | if (rdc321x_wdt_device.queue && ticks) | ||
85 | mod_timer(&rdc321x_wdt_device.timer, | ||
86 | jiffies + RDC_WDT_INTERVAL); | ||
87 | else { | ||
88 | /* ticks doesn't matter anyway */ | ||
89 | complete(&rdc321x_wdt_device.stop); | ||
90 | } | ||
91 | |||
92 | } | ||
93 | |||
94 | static void rdc321x_wdt_reset(void) | ||
95 | { | ||
96 | ticks = rdc321x_wdt_device.default_ticks; | ||
97 | } | ||
98 | |||
99 | static void rdc321x_wdt_start(void) | ||
100 | { | ||
101 | unsigned long flags; | ||
102 | |||
103 | if (!rdc321x_wdt_device.queue) { | ||
104 | rdc321x_wdt_device.queue = 1; | ||
105 | |||
106 | /* Clear the timer */ | ||
107 | spin_lock_irqsave(&rdc321x_wdt_device.lock, flags); | ||
108 | outl(RDC_CLS_TMR, RDC3210_CFGREG_ADDR); | ||
109 | |||
110 | /* Enable watchdog and set the timeout to 81.92 us */ | ||
111 | outl(RDC_WDT_EN | RDC_WDT_CNT, RDC3210_CFGREG_DATA); | ||
112 | spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags); | ||
113 | |||
114 | mod_timer(&rdc321x_wdt_device.timer, | ||
115 | jiffies + RDC_WDT_INTERVAL); | ||
116 | } | ||
117 | |||
118 | /* if process dies, counter is not decremented */ | ||
119 | rdc321x_wdt_device.running++; | ||
120 | } | ||
121 | |||
122 | static int rdc321x_wdt_stop(void) | ||
123 | { | ||
124 | if (rdc321x_wdt_device.running) | ||
125 | rdc321x_wdt_device.running = 0; | ||
126 | |||
127 | ticks = rdc321x_wdt_device.default_ticks; | ||
128 | |||
129 | return -EIO; | ||
130 | } | ||
131 | |||
132 | /* filesystem operations */ | ||
133 | static int rdc321x_wdt_open(struct inode *inode, struct file *file) | ||
134 | { | ||
135 | if (test_and_set_bit(0, &rdc321x_wdt_device.inuse)) | ||
136 | return -EBUSY; | ||
137 | |||
138 | return nonseekable_open(inode, file); | ||
139 | } | ||
140 | |||
141 | static int rdc321x_wdt_release(struct inode *inode, struct file *file) | ||
142 | { | ||
143 | clear_bit(0, &rdc321x_wdt_device.inuse); | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static int rdc321x_wdt_ioctl(struct inode *inode, struct file *file, | ||
148 | unsigned int cmd, unsigned long arg) | ||
149 | { | ||
150 | void __user *argp = (void __user *)arg; | ||
151 | unsigned int value; | ||
152 | static struct watchdog_info ident = { | ||
153 | .options = WDIOF_CARDRESET, | ||
154 | .identity = "RDC321x WDT", | ||
155 | }; | ||
156 | unsigned long flags; | ||
157 | |||
158 | switch (cmd) { | ||
159 | case WDIOC_KEEPALIVE: | ||
160 | rdc321x_wdt_reset(); | ||
161 | break; | ||
162 | case WDIOC_GETSTATUS: | ||
163 | /* Read the value from the DATA register */ | ||
164 | spin_lock_irqsave(&rdc321x_wdt_device.lock, flags); | ||
165 | value = inl(RDC3210_CFGREG_DATA); | ||
166 | spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags); | ||
167 | if (copy_to_user(argp, &value, sizeof(int))) | ||
168 | return -EFAULT; | ||
169 | break; | ||
170 | case WDIOC_GETSUPPORT: | ||
171 | if (copy_to_user(argp, &ident, sizeof(ident))) | ||
172 | return -EFAULT; | ||
173 | break; | ||
174 | case WDIOC_SETOPTIONS: | ||
175 | if (copy_from_user(&value, argp, sizeof(int))) | ||
176 | return -EFAULT; | ||
177 | switch (value) { | ||
178 | case WDIOS_ENABLECARD: | ||
179 | rdc321x_wdt_start(); | ||
180 | break; | ||
181 | case WDIOS_DISABLECARD: | ||
182 | return rdc321x_wdt_stop(); | ||
183 | default: | ||
184 | return -EINVAL; | ||
185 | } | ||
186 | break; | ||
187 | default: | ||
188 | return -ENOTTY; | ||
189 | } | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf, | ||
194 | size_t count, loff_t *ppos) | ||
195 | { | ||
196 | if (!count) | ||
197 | return -EIO; | ||
198 | |||
199 | rdc321x_wdt_reset(); | ||
200 | |||
201 | return count; | ||
202 | } | ||
203 | |||
204 | static const struct file_operations rdc321x_wdt_fops = { | ||
205 | .owner = THIS_MODULE, | ||
206 | .llseek = no_llseek, | ||
207 | .ioctl = rdc321x_wdt_ioctl, | ||
208 | .open = rdc321x_wdt_open, | ||
209 | .write = rdc321x_wdt_write, | ||
210 | .release = rdc321x_wdt_release, | ||
211 | }; | ||
212 | |||
213 | static struct miscdevice rdc321x_wdt_misc = { | ||
214 | .minor = WATCHDOG_MINOR, | ||
215 | .name = "watchdog", | ||
216 | .fops = &rdc321x_wdt_fops, | ||
217 | }; | ||
218 | |||
219 | static int __devinit rdc321x_wdt_probe(struct platform_device *pdev) | ||
220 | { | ||
221 | int err; | ||
222 | |||
223 | err = misc_register(&rdc321x_wdt_misc); | ||
224 | if (err < 0) { | ||
225 | printk(KERN_ERR PFX "watchdog misc_register failed\n"); | ||
226 | return err; | ||
227 | } | ||
228 | |||
229 | spin_lock_init(&rdc321x_wdt_device.lock); | ||
230 | |||
231 | /* Reset the watchdog */ | ||
232 | outl(RDC_WDT_RST, RDC3210_CFGREG_DATA); | ||
233 | |||
234 | init_completion(&rdc321x_wdt_device.stop); | ||
235 | rdc321x_wdt_device.queue = 0; | ||
236 | |||
237 | clear_bit(0, &rdc321x_wdt_device.inuse); | ||
238 | |||
239 | setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0); | ||
240 | |||
241 | rdc321x_wdt_device.default_ticks = ticks; | ||
242 | |||
243 | printk(KERN_INFO PFX "watchdog init success\n"); | ||
244 | |||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static int rdc321x_wdt_remove(struct platform_device *pdev) | ||
249 | { | ||
250 | if (rdc321x_wdt_device.queue) { | ||
251 | rdc321x_wdt_device.queue = 0; | ||
252 | wait_for_completion(&rdc321x_wdt_device.stop); | ||
253 | } | ||
254 | |||
255 | misc_deregister(&rdc321x_wdt_misc); | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static struct platform_driver rdc321x_wdt_driver = { | ||
261 | .probe = rdc321x_wdt_probe, | ||
262 | .remove = rdc321x_wdt_remove, | ||
263 | .driver = { | ||
264 | .owner = THIS_MODULE, | ||
265 | .name = "rdc321x-wdt", | ||
266 | }, | ||
267 | }; | ||
268 | |||
269 | static int __init rdc321x_wdt_init(void) | ||
270 | { | ||
271 | return platform_driver_register(&rdc321x_wdt_driver); | ||
272 | } | ||
273 | |||
274 | static void __exit rdc321x_wdt_exit(void) | ||
275 | { | ||
276 | platform_driver_unregister(&rdc321x_wdt_driver); | ||
277 | } | ||
278 | |||
279 | module_init(rdc321x_wdt_init); | ||
280 | module_exit(rdc321x_wdt_exit); | ||
281 | |||
282 | MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); | ||
283 | MODULE_DESCRIPTION("RDC321x watchdog driver"); | ||
284 | MODULE_LICENSE("GPL"); | ||
285 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 3da2b90d2fe6..86d42801de45 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c | |||
@@ -21,18 +21,6 @@ | |||
21 | * You should have received a copy of the GNU General Public License | 21 | * You should have received a copy of the GNU General Public License |
22 | * along with this program; if not, write to the Free Software | 22 | * along with this program; if not, write to the Free Software |
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
24 | * | ||
25 | * Changelog: | ||
26 | * 05-Oct-2004 BJD Added semaphore init to stop crashes on open | ||
27 | * Fixed tmr_count / wdt_count confusion | ||
28 | * Added configurable debug | ||
29 | * | ||
30 | * 11-Jan-2005 BJD Fixed divide-by-2 in timeout code | ||
31 | * | ||
32 | * 25-Jan-2005 DA Added suspend/resume support | ||
33 | * Replaced reboot notifier with .shutdown method | ||
34 | * | ||
35 | * 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA | ||
36 | */ | 24 | */ |
37 | 25 | ||
38 | #include <linux/module.h> | 26 | #include <linux/module.h> |
@@ -157,8 +145,6 @@ static void s3c2410wdt_start(void) | |||
157 | writel(wdt_count, wdt_base + S3C2410_WTCNT); | 145 | writel(wdt_count, wdt_base + S3C2410_WTCNT); |
158 | writel(wtcon, wdt_base + S3C2410_WTCON); | 146 | writel(wtcon, wdt_base + S3C2410_WTCON); |
159 | spin_unlock(&wdt_lock); | 147 | spin_unlock(&wdt_lock); |
160 | |||
161 | return 0; | ||
162 | } | 148 | } |
163 | 149 | ||
164 | static int s3c2410wdt_set_heartbeat(int timeout) | 150 | static int s3c2410wdt_set_heartbeat(int timeout) |
@@ -367,7 +353,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev) | |||
367 | return -ENOENT; | 353 | return -ENOENT; |
368 | } | 354 | } |
369 | 355 | ||
370 | size = (res->end-res->start)+1; | 356 | size = (res->end - res->start) + 1; |
371 | wdt_mem = request_mem_region(res->start, size, pdev->name); | 357 | wdt_mem = request_mem_region(res->start, size, pdev->name); |
372 | if (wdt_mem == NULL) { | 358 | if (wdt_mem == NULL) { |
373 | dev_err(dev, "failed to get memory region\n"); | 359 | dev_err(dev, "failed to get memory region\n"); |
@@ -376,7 +362,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev) | |||
376 | } | 362 | } |
377 | 363 | ||
378 | wdt_base = ioremap(res->start, size); | 364 | wdt_base = ioremap(res->start, size); |
379 | if (wdt_base == 0) { | 365 | if (wdt_base == NULL) { |
380 | dev_err(dev, "failed to ioremap() region\n"); | 366 | dev_err(dev, "failed to ioremap() region\n"); |
381 | ret = -EINVAL; | 367 | ret = -EINVAL; |
382 | goto err_req; | 368 | goto err_req; |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index a5bc91ae6ff6..d0e87cbe157c 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -102,7 +102,7 @@ static void do_suspend(void) | |||
102 | /* XXX use normal device tree? */ | 102 | /* XXX use normal device tree? */ |
103 | xenbus_suspend(); | 103 | xenbus_suspend(); |
104 | 104 | ||
105 | err = stop_machine_run(xen_suspend, &cancelled, 0); | 105 | err = stop_machine(xen_suspend, &cancelled, &cpumask_of_cpu(0)); |
106 | if (err) { | 106 | if (err) { |
107 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); | 107 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); |
108 | goto out; | 108 | goto out; |