diff options
Diffstat (limited to 'drivers')
298 files changed, 20506 insertions, 5397 deletions
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index 0bba148a2c61..4ced54f7a5d9 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h | |||
@@ -76,12 +76,9 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node, | |||
76 | * evgpe - GPE handling and dispatch | 76 | * evgpe - GPE handling and dispatch |
77 | */ | 77 | */ |
78 | acpi_status | 78 | acpi_status |
79 | acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | 79 | acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info); |
80 | u8 type); | ||
81 | 80 | ||
82 | acpi_status | 81 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); |
83 | acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, | ||
84 | u8 write_to_hardware); | ||
85 | 82 | ||
86 | acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); | 83 | acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); |
87 | 84 | ||
@@ -122,9 +119,6 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, | |||
122 | u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list); | 119 | u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list); |
123 | 120 | ||
124 | acpi_status | 121 | acpi_status |
125 | acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type); | ||
126 | |||
127 | acpi_status | ||
128 | acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info); | 122 | acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info); |
129 | 123 | ||
130 | acpi_status acpi_ev_gpe_initialize(void); | 124 | acpi_status acpi_ev_gpe_initialize(void); |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 81e64f478679..13cb80caacde 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -426,6 +426,8 @@ struct acpi_gpe_event_info { | |||
426 | struct acpi_gpe_register_info *register_info; /* Backpointer to register info */ | 426 | struct acpi_gpe_register_info *register_info; /* Backpointer to register info */ |
427 | u8 flags; /* Misc info about this GPE */ | 427 | u8 flags; /* Misc info about this GPE */ |
428 | u8 gpe_number; /* This GPE */ | 428 | u8 gpe_number; /* This GPE */ |
429 | u8 runtime_count; | ||
430 | u8 wakeup_count; | ||
429 | }; | 431 | }; |
430 | 432 | ||
431 | /* Information about a GPE register pair, one per each status/enable pair in an array */ | 433 | /* Information about a GPE register pair, one per each status/enable pair in an array */ |
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 64062b1be3ee..07f6e2ea2ee5 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h | |||
@@ -287,8 +287,10 @@ struct acpi_object_buffer_field { | |||
287 | 287 | ||
288 | struct acpi_object_notify_handler { | 288 | struct acpi_object_notify_handler { |
289 | ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */ | 289 | ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */ |
290 | u32 handler_type; | ||
290 | acpi_notify_handler handler; | 291 | acpi_notify_handler handler; |
291 | void *context; | 292 | void *context; |
293 | struct acpi_object_notify_handler *next; | ||
292 | }; | 294 | }; |
293 | 295 | ||
294 | struct acpi_object_addr_handler { | 296 | struct acpi_object_addr_handler { |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index afacf4416c73..0b453467a5a0 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
@@ -54,54 +54,9 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); | |||
54 | 54 | ||
55 | /******************************************************************************* | 55 | /******************************************************************************* |
56 | * | 56 | * |
57 | * FUNCTION: acpi_ev_set_gpe_type | ||
58 | * | ||
59 | * PARAMETERS: gpe_event_info - GPE to set | ||
60 | * Type - New type | ||
61 | * | ||
62 | * RETURN: Status | ||
63 | * | ||
64 | * DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run) | ||
65 | * | ||
66 | ******************************************************************************/ | ||
67 | |||
68 | acpi_status | ||
69 | acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type) | ||
70 | { | ||
71 | acpi_status status; | ||
72 | |||
73 | ACPI_FUNCTION_TRACE(ev_set_gpe_type); | ||
74 | |||
75 | /* Validate type and update register enable masks */ | ||
76 | |||
77 | switch (type) { | ||
78 | case ACPI_GPE_TYPE_WAKE: | ||
79 | case ACPI_GPE_TYPE_RUNTIME: | ||
80 | case ACPI_GPE_TYPE_WAKE_RUN: | ||
81 | break; | ||
82 | |||
83 | default: | ||
84 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
85 | } | ||
86 | |||
87 | /* Disable the GPE if currently enabled */ | ||
88 | |||
89 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
90 | |||
91 | /* Clear the type bits and insert the new Type */ | ||
92 | |||
93 | gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK; | ||
94 | gpe_event_info->flags |= type; | ||
95 | return_ACPI_STATUS(status); | ||
96 | } | ||
97 | |||
98 | /******************************************************************************* | ||
99 | * | ||
100 | * FUNCTION: acpi_ev_update_gpe_enable_masks | 57 | * FUNCTION: acpi_ev_update_gpe_enable_masks |
101 | * | 58 | * |
102 | * PARAMETERS: gpe_event_info - GPE to update | 59 | * PARAMETERS: gpe_event_info - GPE to update |
103 | * Type - What to do: ACPI_GPE_DISABLE or | ||
104 | * ACPI_GPE_ENABLE | ||
105 | * | 60 | * |
106 | * RETURN: Status | 61 | * RETURN: Status |
107 | * | 62 | * |
@@ -110,8 +65,7 @@ acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type) | |||
110 | ******************************************************************************/ | 65 | ******************************************************************************/ |
111 | 66 | ||
112 | acpi_status | 67 | acpi_status |
113 | acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | 68 | acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info) |
114 | u8 type) | ||
115 | { | 69 | { |
116 | struct acpi_gpe_register_info *gpe_register_info; | 70 | struct acpi_gpe_register_info *gpe_register_info; |
117 | u8 register_bit; | 71 | u8 register_bit; |
@@ -127,37 +81,14 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | |||
127 | (1 << | 81 | (1 << |
128 | (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); | 82 | (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); |
129 | 83 | ||
130 | /* 1) Disable case. Simply clear all enable bits */ | 84 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit); |
131 | 85 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); | |
132 | if (type == ACPI_GPE_DISABLE) { | ||
133 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, | ||
134 | register_bit); | ||
135 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); | ||
136 | return_ACPI_STATUS(AE_OK); | ||
137 | } | ||
138 | |||
139 | /* 2) Enable case. Set/Clear the appropriate enable bits */ | ||
140 | 86 | ||
141 | switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { | 87 | if (gpe_event_info->runtime_count) |
142 | case ACPI_GPE_TYPE_WAKE: | ||
143 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); | ||
144 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); | ||
145 | break; | ||
146 | |||
147 | case ACPI_GPE_TYPE_RUNTIME: | ||
148 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, | ||
149 | register_bit); | ||
150 | ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); | 88 | ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); |
151 | break; | ||
152 | 89 | ||
153 | case ACPI_GPE_TYPE_WAKE_RUN: | 90 | if (gpe_event_info->wakeup_count) |
154 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); | 91 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); |
155 | ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); | ||
156 | break; | ||
157 | |||
158 | default: | ||
159 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
160 | } | ||
161 | 92 | ||
162 | return_ACPI_STATUS(AE_OK); | 93 | return_ACPI_STATUS(AE_OK); |
163 | } | 94 | } |
@@ -167,8 +98,6 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | |||
167 | * FUNCTION: acpi_ev_enable_gpe | 98 | * FUNCTION: acpi_ev_enable_gpe |
168 | * | 99 | * |
169 | * PARAMETERS: gpe_event_info - GPE to enable | 100 | * PARAMETERS: gpe_event_info - GPE to enable |
170 | * write_to_hardware - Enable now, or just mark data structs | ||
171 | * (WAKE GPEs should be deferred) | ||
172 | * | 101 | * |
173 | * RETURN: Status | 102 | * RETURN: Status |
174 | * | 103 | * |
@@ -176,9 +105,7 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | |||
176 | * | 105 | * |
177 | ******************************************************************************/ | 106 | ******************************************************************************/ |
178 | 107 | ||
179 | acpi_status | 108 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) |
180 | acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, | ||
181 | u8 write_to_hardware) | ||
182 | { | 109 | { |
183 | acpi_status status; | 110 | acpi_status status; |
184 | 111 | ||
@@ -186,47 +113,20 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, | |||
186 | 113 | ||
187 | /* Make sure HW enable masks are updated */ | 114 | /* Make sure HW enable masks are updated */ |
188 | 115 | ||
189 | status = | 116 | status = acpi_ev_update_gpe_enable_masks(gpe_event_info); |
190 | acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE); | 117 | if (ACPI_FAILURE(status)) |
191 | if (ACPI_FAILURE(status)) { | ||
192 | return_ACPI_STATUS(status); | 118 | return_ACPI_STATUS(status); |
193 | } | ||
194 | 119 | ||
195 | /* Mark wake-enabled or HW enable, or both */ | 120 | /* Mark wake-enabled or HW enable, or both */ |
196 | 121 | ||
197 | switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { | 122 | if (gpe_event_info->runtime_count) { |
198 | case ACPI_GPE_TYPE_WAKE: | 123 | /* Clear the GPE (of stale events), then enable it */ |
199 | 124 | status = acpi_hw_clear_gpe(gpe_event_info); | |
200 | ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); | 125 | if (ACPI_FAILURE(status)) |
201 | break; | 126 | return_ACPI_STATUS(status); |
202 | |||
203 | case ACPI_GPE_TYPE_WAKE_RUN: | ||
204 | |||
205 | ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); | ||
206 | |||
207 | /*lint -fallthrough */ | ||
208 | |||
209 | case ACPI_GPE_TYPE_RUNTIME: | ||
210 | |||
211 | ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); | ||
212 | |||
213 | if (write_to_hardware) { | ||
214 | |||
215 | /* Clear the GPE (of stale events), then enable it */ | ||
216 | |||
217 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
218 | if (ACPI_FAILURE(status)) { | ||
219 | return_ACPI_STATUS(status); | ||
220 | } | ||
221 | |||
222 | /* Enable the requested runtime GPE */ | ||
223 | |||
224 | status = acpi_hw_write_gpe_enable_reg(gpe_event_info); | ||
225 | } | ||
226 | break; | ||
227 | 127 | ||
228 | default: | 128 | /* Enable the requested runtime GPE */ |
229 | return_ACPI_STATUS(AE_BAD_PARAMETER); | 129 | status = acpi_hw_write_gpe_enable_reg(gpe_event_info); |
230 | } | 130 | } |
231 | 131 | ||
232 | return_ACPI_STATUS(AE_OK); | 132 | return_ACPI_STATUS(AE_OK); |
@@ -252,34 +152,9 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
252 | 152 | ||
253 | /* Make sure HW enable masks are updated */ | 153 | /* Make sure HW enable masks are updated */ |
254 | 154 | ||
255 | status = | 155 | status = acpi_ev_update_gpe_enable_masks(gpe_event_info); |
256 | acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE); | 156 | if (ACPI_FAILURE(status)) |
257 | if (ACPI_FAILURE(status)) { | ||
258 | return_ACPI_STATUS(status); | 157 | return_ACPI_STATUS(status); |
259 | } | ||
260 | |||
261 | /* Clear the appropriate enabled flags for this GPE */ | ||
262 | |||
263 | switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { | ||
264 | case ACPI_GPE_TYPE_WAKE: | ||
265 | ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); | ||
266 | break; | ||
267 | |||
268 | case ACPI_GPE_TYPE_WAKE_RUN: | ||
269 | ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); | ||
270 | |||
271 | /* fallthrough */ | ||
272 | |||
273 | case ACPI_GPE_TYPE_RUNTIME: | ||
274 | |||
275 | /* Disable the requested runtime GPE */ | ||
276 | |||
277 | ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); | ||
278 | break; | ||
279 | |||
280 | default: | ||
281 | break; | ||
282 | } | ||
283 | 158 | ||
284 | /* | 159 | /* |
285 | * Even if we don't know the GPE type, make sure that we always | 160 | * Even if we don't know the GPE type, make sure that we always |
@@ -521,7 +396,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
521 | 396 | ||
522 | /* Set the GPE flags for return to enabled state */ | 397 | /* Set the GPE flags for return to enabled state */ |
523 | 398 | ||
524 | (void)acpi_ev_enable_gpe(gpe_event_info, FALSE); | 399 | (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); |
525 | 400 | ||
526 | /* | 401 | /* |
527 | * Take a snapshot of the GPE info for this level - we copy the info to | 402 | * Take a snapshot of the GPE info for this level - we copy the info to |
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 247920900187..3d4c4aca11cd 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c | |||
@@ -258,7 +258,6 @@ acpi_ev_save_method_info(acpi_handle obj_handle, | |||
258 | u32 gpe_number; | 258 | u32 gpe_number; |
259 | char name[ACPI_NAME_SIZE + 1]; | 259 | char name[ACPI_NAME_SIZE + 1]; |
260 | u8 type; | 260 | u8 type; |
261 | acpi_status status; | ||
262 | 261 | ||
263 | ACPI_FUNCTION_TRACE(ev_save_method_info); | 262 | ACPI_FUNCTION_TRACE(ev_save_method_info); |
264 | 263 | ||
@@ -325,26 +324,20 @@ acpi_ev_save_method_info(acpi_handle obj_handle, | |||
325 | 324 | ||
326 | /* | 325 | /* |
327 | * Now we can add this information to the gpe_event_info block for use | 326 | * Now we can add this information to the gpe_event_info block for use |
328 | * during dispatch of this GPE. Default type is RUNTIME, although this may | 327 | * during dispatch of this GPE. |
329 | * change when the _PRW methods are executed later. | ||
330 | */ | 328 | */ |
331 | gpe_event_info = | 329 | gpe_event_info = |
332 | &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; | 330 | &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; |
333 | 331 | ||
334 | gpe_event_info->flags = (u8) | 332 | gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD); |
335 | (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME); | ||
336 | 333 | ||
337 | gpe_event_info->dispatch.method_node = | 334 | gpe_event_info->dispatch.method_node = |
338 | (struct acpi_namespace_node *)obj_handle; | 335 | (struct acpi_namespace_node *)obj_handle; |
339 | 336 | ||
340 | /* Update enable mask, but don't enable the HW GPE as of yet */ | ||
341 | |||
342 | status = acpi_ev_enable_gpe(gpe_event_info, FALSE); | ||
343 | |||
344 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, | 337 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, |
345 | "Registered GPE method %s as GPE number 0x%.2X\n", | 338 | "Registered GPE method %s as GPE number 0x%.2X\n", |
346 | name, gpe_number)); | 339 | name, gpe_number)); |
347 | return_ACPI_STATUS(status); | 340 | return_ACPI_STATUS(AE_OK); |
348 | } | 341 | } |
349 | 342 | ||
350 | /******************************************************************************* | 343 | /******************************************************************************* |
@@ -454,20 +447,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, | |||
454 | gpe_block-> | 447 | gpe_block-> |
455 | block_base_number]; | 448 | block_base_number]; |
456 | 449 | ||
457 | /* Mark GPE for WAKE-ONLY but WAKE_DISABLED */ | 450 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; |
458 | |||
459 | gpe_event_info->flags &= | ||
460 | ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED); | ||
461 | |||
462 | status = | ||
463 | acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE); | ||
464 | if (ACPI_FAILURE(status)) { | ||
465 | goto cleanup; | ||
466 | } | ||
467 | |||
468 | status = | ||
469 | acpi_ev_update_gpe_enable_masks(gpe_event_info, | ||
470 | ACPI_GPE_DISABLE); | ||
471 | } | 451 | } |
472 | 452 | ||
473 | cleanup: | 453 | cleanup: |
@@ -989,7 +969,6 @@ acpi_status | |||
989 | acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, | 969 | acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, |
990 | struct acpi_gpe_block_info *gpe_block) | 970 | struct acpi_gpe_block_info *gpe_block) |
991 | { | 971 | { |
992 | acpi_status status; | ||
993 | struct acpi_gpe_event_info *gpe_event_info; | 972 | struct acpi_gpe_event_info *gpe_event_info; |
994 | struct acpi_gpe_walk_info gpe_info; | 973 | struct acpi_gpe_walk_info gpe_info; |
995 | u32 wake_gpe_count; | 974 | u32 wake_gpe_count; |
@@ -1019,42 +998,50 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, | |||
1019 | gpe_info.gpe_block = gpe_block; | 998 | gpe_info.gpe_block = gpe_block; |
1020 | gpe_info.gpe_device = gpe_device; | 999 | gpe_info.gpe_device = gpe_device; |
1021 | 1000 | ||
1022 | status = | 1001 | acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, |
1023 | acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
1024 | ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, | 1002 | ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, |
1025 | acpi_ev_match_prw_and_gpe, NULL, | 1003 | acpi_ev_match_prw_and_gpe, NULL, |
1026 | &gpe_info, NULL); | 1004 | &gpe_info, NULL); |
1027 | } | 1005 | } |
1028 | 1006 | ||
1029 | /* | 1007 | /* |
1030 | * Enable all GPEs in this block that have these attributes: | 1008 | * Enable all GPEs that have a corresponding method and aren't |
1031 | * 1) are "runtime" or "run/wake" GPEs, and | 1009 | * capable of generating wakeups. Any other GPEs within this block |
1032 | * 2) have a corresponding _Lxx or _Exx method | 1010 | * must be enabled via the acpi_enable_gpe() interface. |
1033 | * | ||
1034 | * Any other GPEs within this block must be enabled via the | ||
1035 | * acpi_enable_gpe() external interface. | ||
1036 | */ | 1011 | */ |
1037 | wake_gpe_count = 0; | 1012 | wake_gpe_count = 0; |
1038 | gpe_enabled_count = 0; | 1013 | gpe_enabled_count = 0; |
1014 | if (gpe_device == acpi_gbl_fadt_gpe_device) | ||
1015 | gpe_device = NULL; | ||
1039 | 1016 | ||
1040 | for (i = 0; i < gpe_block->register_count; i++) { | 1017 | for (i = 0; i < gpe_block->register_count; i++) { |
1041 | for (j = 0; j < 8; j++) { | 1018 | for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { |
1019 | acpi_status status; | ||
1020 | acpi_size gpe_index; | ||
1021 | int gpe_number; | ||
1042 | 1022 | ||
1043 | /* Get the info block for this particular GPE */ | 1023 | /* Get the info block for this particular GPE */ |
1024 | gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j; | ||
1025 | gpe_event_info = &gpe_block->event_info[gpe_index]; | ||
1044 | 1026 | ||
1045 | gpe_event_info = &gpe_block->event_info[((acpi_size) i * | 1027 | if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) { |
1046 | ACPI_GPE_REGISTER_WIDTH) | ||
1047 | + j]; | ||
1048 | |||
1049 | if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | ||
1050 | ACPI_GPE_DISPATCH_METHOD) && | ||
1051 | (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) { | ||
1052 | gpe_enabled_count++; | ||
1053 | } | ||
1054 | |||
1055 | if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) { | ||
1056 | wake_gpe_count++; | 1028 | wake_gpe_count++; |
1029 | if (acpi_gbl_leave_wake_gpes_disabled) | ||
1030 | continue; | ||
1057 | } | 1031 | } |
1032 | |||
1033 | if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) | ||
1034 | continue; | ||
1035 | |||
1036 | gpe_number = gpe_index + gpe_block->block_base_number; | ||
1037 | status = acpi_enable_gpe(gpe_device, gpe_number, | ||
1038 | ACPI_GPE_TYPE_RUNTIME); | ||
1039 | if (ACPI_FAILURE(status)) | ||
1040 | ACPI_ERROR((AE_INFO, | ||
1041 | "Failed to enable GPE %02X\n", | ||
1042 | gpe_number)); | ||
1043 | else | ||
1044 | gpe_enabled_count++; | ||
1058 | } | 1045 | } |
1059 | } | 1046 | } |
1060 | 1047 | ||
@@ -1062,15 +1049,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, | |||
1062 | "Found %u Wake, Enabled %u Runtime GPEs in this block\n", | 1049 | "Found %u Wake, Enabled %u Runtime GPEs in this block\n", |
1063 | wake_gpe_count, gpe_enabled_count)); | 1050 | wake_gpe_count, gpe_enabled_count)); |
1064 | 1051 | ||
1065 | /* Enable all valid runtime GPEs found above */ | 1052 | return_ACPI_STATUS(AE_OK); |
1066 | |||
1067 | status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block, NULL); | ||
1068 | if (ACPI_FAILURE(status)) { | ||
1069 | ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p", | ||
1070 | gpe_block)); | ||
1071 | } | ||
1072 | |||
1073 | return_ACPI_STATUS(status); | ||
1074 | } | 1053 | } |
1075 | 1054 | ||
1076 | /******************************************************************************* | 1055 | /******************************************************************************* |
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index ce224e1eaa89..8f0fac6c4366 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c | |||
@@ -259,9 +259,15 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) | |||
259 | 259 | ||
260 | handler_obj = notify_info->notify.handler_obj; | 260 | handler_obj = notify_info->notify.handler_obj; |
261 | if (handler_obj) { | 261 | if (handler_obj) { |
262 | handler_obj->notify.handler(notify_info->notify.node, | 262 | struct acpi_object_notify_handler *notifier; |
263 | notify_info->notify.value, | 263 | |
264 | handler_obj->notify.context); | 264 | notifier = &handler_obj->notify; |
265 | while (notifier) { | ||
266 | notifier->handler(notify_info->notify.node, | ||
267 | notify_info->notify.value, | ||
268 | notifier->context); | ||
269 | notifier = notifier->next; | ||
270 | } | ||
265 | } | 271 | } |
266 | 272 | ||
267 | /* All done with the info object */ | 273 | /* All done with the info object */ |
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 2fe0809d4eb2..474e2cab603d 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c | |||
@@ -218,6 +218,72 @@ ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler) | |||
218 | 218 | ||
219 | /******************************************************************************* | 219 | /******************************************************************************* |
220 | * | 220 | * |
221 | * FUNCTION: acpi_populate_handler_object | ||
222 | * | ||
223 | * PARAMETERS: handler_obj - Handler object to populate | ||
224 | * handler_type - The type of handler: | ||
225 | * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) | ||
226 | * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) | ||
227 | * ACPI_ALL_NOTIFY: both system and device | ||
228 | * handler - Address of the handler | ||
229 | * context - Value passed to the handler on each GPE | ||
230 | * next - Address of a handler object to link to | ||
231 | * | ||
232 | * RETURN: None | ||
233 | * | ||
234 | * DESCRIPTION: Populate a handler object. | ||
235 | * | ||
236 | ******************************************************************************/ | ||
237 | static void | ||
238 | acpi_populate_handler_object(struct acpi_object_notify_handler *handler_obj, | ||
239 | u32 handler_type, | ||
240 | acpi_notify_handler handler, void *context, | ||
241 | struct acpi_object_notify_handler *next) | ||
242 | { | ||
243 | handler_obj->handler_type = handler_type; | ||
244 | handler_obj->handler = handler; | ||
245 | handler_obj->context = context; | ||
246 | handler_obj->next = next; | ||
247 | } | ||
248 | |||
249 | /******************************************************************************* | ||
250 | * | ||
251 | * FUNCTION: acpi_add_handler_object | ||
252 | * | ||
253 | * PARAMETERS: parent_obj - Parent of the new object | ||
254 | * handler - Address of the handler | ||
255 | * context - Value passed to the handler on each GPE | ||
256 | * | ||
257 | * RETURN: Status | ||
258 | * | ||
259 | * DESCRIPTION: Create a new handler object and populate it. | ||
260 | * | ||
261 | ******************************************************************************/ | ||
262 | static acpi_status | ||
263 | acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj, | ||
264 | acpi_notify_handler handler, void *context) | ||
265 | { | ||
266 | struct acpi_object_notify_handler *handler_obj; | ||
267 | |||
268 | /* The parent must not be a defice notify handler object. */ | ||
269 | if (parent_obj->handler_type & ACPI_DEVICE_NOTIFY) | ||
270 | return AE_BAD_PARAMETER; | ||
271 | |||
272 | handler_obj = ACPI_ALLOCATE_ZEROED(sizeof(*handler_obj)); | ||
273 | if (!handler_obj) | ||
274 | return AE_NO_MEMORY; | ||
275 | |||
276 | acpi_populate_handler_object(handler_obj, | ||
277 | ACPI_SYSTEM_NOTIFY, | ||
278 | handler, context, | ||
279 | parent_obj->next); | ||
280 | parent_obj->next = handler_obj; | ||
281 | |||
282 | return AE_OK; | ||
283 | } | ||
284 | |||
285 | /******************************************************************************* | ||
286 | * | ||
221 | * FUNCTION: acpi_install_notify_handler | 287 | * FUNCTION: acpi_install_notify_handler |
222 | * | 288 | * |
223 | * PARAMETERS: Device - The device for which notifies will be handled | 289 | * PARAMETERS: Device - The device for which notifies will be handled |
@@ -316,15 +382,32 @@ acpi_install_notify_handler(acpi_handle device, | |||
316 | obj_desc = acpi_ns_get_attached_object(node); | 382 | obj_desc = acpi_ns_get_attached_object(node); |
317 | if (obj_desc) { | 383 | if (obj_desc) { |
318 | 384 | ||
319 | /* Object exists - make sure there's no handler */ | 385 | /* Object exists. */ |
320 | 386 | ||
321 | if (((handler_type & ACPI_SYSTEM_NOTIFY) && | 387 | /* For a device notify, make sure there's no handler. */ |
322 | obj_desc->common_notify.system_notify) || | 388 | if ((handler_type & ACPI_DEVICE_NOTIFY) && |
323 | ((handler_type & ACPI_DEVICE_NOTIFY) && | 389 | obj_desc->common_notify.device_notify) { |
324 | obj_desc->common_notify.device_notify)) { | ||
325 | status = AE_ALREADY_EXISTS; | 390 | status = AE_ALREADY_EXISTS; |
326 | goto unlock_and_exit; | 391 | goto unlock_and_exit; |
327 | } | 392 | } |
393 | |||
394 | /* System notifies may have more handlers installed. */ | ||
395 | notify_obj = obj_desc->common_notify.system_notify; | ||
396 | |||
397 | if ((handler_type & ACPI_SYSTEM_NOTIFY) && notify_obj) { | ||
398 | struct acpi_object_notify_handler *parent_obj; | ||
399 | |||
400 | if (handler_type & ACPI_DEVICE_NOTIFY) { | ||
401 | status = AE_ALREADY_EXISTS; | ||
402 | goto unlock_and_exit; | ||
403 | } | ||
404 | |||
405 | parent_obj = ¬ify_obj->notify; | ||
406 | status = acpi_add_handler_object(parent_obj, | ||
407 | handler, | ||
408 | context); | ||
409 | goto unlock_and_exit; | ||
410 | } | ||
328 | } else { | 411 | } else { |
329 | /* Create a new object */ | 412 | /* Create a new object */ |
330 | 413 | ||
@@ -356,9 +439,10 @@ acpi_install_notify_handler(acpi_handle device, | |||
356 | goto unlock_and_exit; | 439 | goto unlock_and_exit; |
357 | } | 440 | } |
358 | 441 | ||
359 | notify_obj->notify.node = node; | 442 | acpi_populate_handler_object(¬ify_obj->notify, |
360 | notify_obj->notify.handler = handler; | 443 | handler_type, |
361 | notify_obj->notify.context = context; | 444 | handler, context, |
445 | NULL); | ||
362 | 446 | ||
363 | if (handler_type & ACPI_SYSTEM_NOTIFY) { | 447 | if (handler_type & ACPI_SYSTEM_NOTIFY) { |
364 | obj_desc->common_notify.system_notify = notify_obj; | 448 | obj_desc->common_notify.system_notify = notify_obj; |
@@ -418,6 +502,10 @@ acpi_remove_notify_handler(acpi_handle device, | |||
418 | goto exit; | 502 | goto exit; |
419 | } | 503 | } |
420 | 504 | ||
505 | |||
506 | /* Make sure all deferred tasks are completed */ | ||
507 | acpi_os_wait_events_complete(NULL); | ||
508 | |||
421 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | 509 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); |
422 | if (ACPI_FAILURE(status)) { | 510 | if (ACPI_FAILURE(status)) { |
423 | goto exit; | 511 | goto exit; |
@@ -445,15 +533,6 @@ acpi_remove_notify_handler(acpi_handle device, | |||
445 | goto unlock_and_exit; | 533 | goto unlock_and_exit; |
446 | } | 534 | } |
447 | 535 | ||
448 | /* Make sure all deferred tasks are completed */ | ||
449 | |||
450 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
451 | acpi_os_wait_events_complete(NULL); | ||
452 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
453 | if (ACPI_FAILURE(status)) { | ||
454 | goto exit; | ||
455 | } | ||
456 | |||
457 | if (handler_type & ACPI_SYSTEM_NOTIFY) { | 536 | if (handler_type & ACPI_SYSTEM_NOTIFY) { |
458 | acpi_gbl_system_notify.node = NULL; | 537 | acpi_gbl_system_notify.node = NULL; |
459 | acpi_gbl_system_notify.handler = NULL; | 538 | acpi_gbl_system_notify.handler = NULL; |
@@ -488,28 +567,60 @@ acpi_remove_notify_handler(acpi_handle device, | |||
488 | /* Object exists - make sure there's an existing handler */ | 567 | /* Object exists - make sure there's an existing handler */ |
489 | 568 | ||
490 | if (handler_type & ACPI_SYSTEM_NOTIFY) { | 569 | if (handler_type & ACPI_SYSTEM_NOTIFY) { |
570 | struct acpi_object_notify_handler *handler_obj; | ||
571 | struct acpi_object_notify_handler *parent_obj; | ||
572 | |||
491 | notify_obj = obj_desc->common_notify.system_notify; | 573 | notify_obj = obj_desc->common_notify.system_notify; |
492 | if (!notify_obj) { | 574 | if (!notify_obj) { |
493 | status = AE_NOT_EXIST; | 575 | status = AE_NOT_EXIST; |
494 | goto unlock_and_exit; | 576 | goto unlock_and_exit; |
495 | } | 577 | } |
496 | 578 | ||
497 | if (notify_obj->notify.handler != handler) { | 579 | handler_obj = ¬ify_obj->notify; |
580 | parent_obj = NULL; | ||
581 | while (handler_obj->handler != handler) { | ||
582 | if (handler_obj->next) { | ||
583 | parent_obj = handler_obj; | ||
584 | handler_obj = handler_obj->next; | ||
585 | } else { | ||
586 | break; | ||
587 | } | ||
588 | } | ||
589 | |||
590 | if (handler_obj->handler != handler) { | ||
498 | status = AE_BAD_PARAMETER; | 591 | status = AE_BAD_PARAMETER; |
499 | goto unlock_and_exit; | 592 | goto unlock_and_exit; |
500 | } | 593 | } |
501 | /* Make sure all deferred tasks are completed */ | ||
502 | 594 | ||
503 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 595 | /* |
504 | acpi_os_wait_events_complete(NULL); | 596 | * Remove the handler. There are three possible cases. |
505 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | 597 | * First, we may need to remove a non-embedded object. |
506 | if (ACPI_FAILURE(status)) { | 598 | * Second, we may need to remove the embedded object's |
507 | goto exit; | 599 | * handler data, while non-embedded objects exist. |
600 | * Finally, we may need to remove the embedded object | ||
601 | * entirely along with its container. | ||
602 | */ | ||
603 | if (parent_obj) { | ||
604 | /* Non-embedded object is being removed. */ | ||
605 | parent_obj->next = handler_obj->next; | ||
606 | ACPI_FREE(handler_obj); | ||
607 | } else if (notify_obj->notify.next) { | ||
608 | /* | ||
609 | * The handler matches the embedded object, but | ||
610 | * there are more handler objects in the list. | ||
611 | * Replace the embedded object's data with the | ||
612 | * first next object's data and remove that | ||
613 | * object. | ||
614 | */ | ||
615 | parent_obj = ¬ify_obj->notify; | ||
616 | handler_obj = notify_obj->notify.next; | ||
617 | *parent_obj = *handler_obj; | ||
618 | ACPI_FREE(handler_obj); | ||
619 | } else { | ||
620 | /* No more handler objects in the list. */ | ||
621 | obj_desc->common_notify.system_notify = NULL; | ||
622 | acpi_ut_remove_reference(notify_obj); | ||
508 | } | 623 | } |
509 | |||
510 | /* Remove the handler */ | ||
511 | obj_desc->common_notify.system_notify = NULL; | ||
512 | acpi_ut_remove_reference(notify_obj); | ||
513 | } | 624 | } |
514 | 625 | ||
515 | if (handler_type & ACPI_DEVICE_NOTIFY) { | 626 | if (handler_type & ACPI_DEVICE_NOTIFY) { |
@@ -523,14 +634,6 @@ acpi_remove_notify_handler(acpi_handle device, | |||
523 | status = AE_BAD_PARAMETER; | 634 | status = AE_BAD_PARAMETER; |
524 | goto unlock_and_exit; | 635 | goto unlock_and_exit; |
525 | } | 636 | } |
526 | /* Make sure all deferred tasks are completed */ | ||
527 | |||
528 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
529 | acpi_os_wait_events_complete(NULL); | ||
530 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
531 | if (ACPI_FAILURE(status)) { | ||
532 | goto exit; | ||
533 | } | ||
534 | 637 | ||
535 | /* Remove the handler */ | 638 | /* Remove the handler */ |
536 | obj_desc->common_notify.device_notify = NULL; | 639 | obj_desc->common_notify.device_notify = NULL; |
@@ -617,13 +720,6 @@ acpi_install_gpe_handler(acpi_handle gpe_device, | |||
617 | handler->context = context; | 720 | handler->context = context; |
618 | handler->method_node = gpe_event_info->dispatch.method_node; | 721 | handler->method_node = gpe_event_info->dispatch.method_node; |
619 | 722 | ||
620 | /* Disable the GPE before installing the handler */ | ||
621 | |||
622 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
623 | if (ACPI_FAILURE(status)) { | ||
624 | goto unlock_and_exit; | ||
625 | } | ||
626 | |||
627 | /* Install the handler */ | 723 | /* Install the handler */ |
628 | 724 | ||
629 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | 725 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); |
@@ -707,13 +803,6 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, | |||
707 | goto unlock_and_exit; | 803 | goto unlock_and_exit; |
708 | } | 804 | } |
709 | 805 | ||
710 | /* Disable the GPE before removing the handler */ | ||
711 | |||
712 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
713 | if (ACPI_FAILURE(status)) { | ||
714 | goto unlock_and_exit; | ||
715 | } | ||
716 | |||
717 | /* Make sure all deferred tasks are completed */ | 806 | /* Make sure all deferred tasks are completed */ |
718 | 807 | ||
719 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | 808 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index eed7a38d25f2..124c157215bf 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c | |||
@@ -201,23 +201,27 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event) | |||
201 | 201 | ||
202 | /******************************************************************************* | 202 | /******************************************************************************* |
203 | * | 203 | * |
204 | * FUNCTION: acpi_set_gpe_type | 204 | * FUNCTION: acpi_set_gpe |
205 | * | 205 | * |
206 | * PARAMETERS: gpe_device - Parent GPE Device | 206 | * PARAMETERS: gpe_device - Parent GPE Device |
207 | * gpe_number - GPE level within the GPE block | 207 | * gpe_number - GPE level within the GPE block |
208 | * Type - New GPE type | 208 | * action - Enable or disable |
209 | * Called from ISR or not | ||
209 | * | 210 | * |
210 | * RETURN: Status | 211 | * RETURN: Status |
211 | * | 212 | * |
212 | * DESCRIPTION: Set the type of an individual GPE | 213 | * DESCRIPTION: Enable or disable an ACPI event (general purpose) |
213 | * | 214 | * |
214 | ******************************************************************************/ | 215 | ******************************************************************************/ |
215 | acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type) | 216 | acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) |
216 | { | 217 | { |
217 | acpi_status status = AE_OK; | 218 | acpi_status status = AE_OK; |
219 | acpi_cpu_flags flags; | ||
218 | struct acpi_gpe_event_info *gpe_event_info; | 220 | struct acpi_gpe_event_info *gpe_event_info; |
219 | 221 | ||
220 | ACPI_FUNCTION_TRACE(acpi_set_gpe_type); | 222 | ACPI_FUNCTION_TRACE(acpi_set_gpe); |
223 | |||
224 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
221 | 225 | ||
222 | /* Ensure that we have a valid GPE number */ | 226 | /* Ensure that we have a valid GPE number */ |
223 | 227 | ||
@@ -227,19 +231,29 @@ acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type) | |||
227 | goto unlock_and_exit; | 231 | goto unlock_and_exit; |
228 | } | 232 | } |
229 | 233 | ||
230 | if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == type) { | 234 | /* Perform the action */ |
231 | return_ACPI_STATUS(AE_OK); | 235 | |
232 | } | 236 | switch (action) { |
237 | case ACPI_GPE_ENABLE: | ||
238 | status = acpi_ev_enable_gpe(gpe_event_info); | ||
239 | break; | ||
233 | 240 | ||
234 | /* Set the new type (will disable GPE if currently enabled) */ | 241 | case ACPI_GPE_DISABLE: |
242 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
243 | break; | ||
235 | 244 | ||
236 | status = acpi_ev_set_gpe_type(gpe_event_info, type); | 245 | default: |
246 | ACPI_ERROR((AE_INFO, "Invalid action\n")); | ||
247 | status = AE_BAD_PARAMETER; | ||
248 | break; | ||
249 | } | ||
237 | 250 | ||
238 | unlock_and_exit: | 251 | unlock_and_exit: |
252 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
239 | return_ACPI_STATUS(status); | 253 | return_ACPI_STATUS(status); |
240 | } | 254 | } |
241 | 255 | ||
242 | ACPI_EXPORT_SYMBOL(acpi_set_gpe_type) | 256 | ACPI_EXPORT_SYMBOL(acpi_set_gpe) |
243 | 257 | ||
244 | /******************************************************************************* | 258 | /******************************************************************************* |
245 | * | 259 | * |
@@ -247,15 +261,14 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe_type) | |||
247 | * | 261 | * |
248 | * PARAMETERS: gpe_device - Parent GPE Device | 262 | * PARAMETERS: gpe_device - Parent GPE Device |
249 | * gpe_number - GPE level within the GPE block | 263 | * gpe_number - GPE level within the GPE block |
250 | * Flags - Just enable, or also wake enable? | 264 | * type - Purpose the GPE will be used for |
251 | * Called from ISR or not | ||
252 | * | 265 | * |
253 | * RETURN: Status | 266 | * RETURN: Status |
254 | * | 267 | * |
255 | * DESCRIPTION: Enable an ACPI event (general purpose) | 268 | * DESCRIPTION: Take a reference to a GPE and enable it if necessary |
256 | * | 269 | * |
257 | ******************************************************************************/ | 270 | ******************************************************************************/ |
258 | acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) | 271 | acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type) |
259 | { | 272 | { |
260 | acpi_status status = AE_OK; | 273 | acpi_status status = AE_OK; |
261 | acpi_cpu_flags flags; | 274 | acpi_cpu_flags flags; |
@@ -263,6 +276,9 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) | |||
263 | 276 | ||
264 | ACPI_FUNCTION_TRACE(acpi_enable_gpe); | 277 | ACPI_FUNCTION_TRACE(acpi_enable_gpe); |
265 | 278 | ||
279 | if (type & ~ACPI_GPE_TYPE_WAKE_RUN) | ||
280 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
281 | |||
266 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | 282 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); |
267 | 283 | ||
268 | /* Ensure that we have a valid GPE number */ | 284 | /* Ensure that we have a valid GPE number */ |
@@ -273,15 +289,32 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) | |||
273 | goto unlock_and_exit; | 289 | goto unlock_and_exit; |
274 | } | 290 | } |
275 | 291 | ||
276 | /* Perform the enable */ | 292 | if (type & ACPI_GPE_TYPE_RUNTIME) { |
293 | if (++gpe_event_info->runtime_count == 1) { | ||
294 | status = acpi_ev_enable_gpe(gpe_event_info); | ||
295 | if (ACPI_FAILURE(status)) | ||
296 | gpe_event_info->runtime_count--; | ||
297 | } | ||
298 | } | ||
299 | |||
300 | if (type & ACPI_GPE_TYPE_WAKE) { | ||
301 | if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { | ||
302 | status = AE_BAD_PARAMETER; | ||
303 | goto unlock_and_exit; | ||
304 | } | ||
277 | 305 | ||
278 | status = acpi_ev_enable_gpe(gpe_event_info, TRUE); | 306 | /* |
307 | * Wake-up GPEs are only enabled right prior to putting the | ||
308 | * system into a sleep state. | ||
309 | */ | ||
310 | if (++gpe_event_info->wakeup_count == 1) | ||
311 | acpi_ev_update_gpe_enable_masks(gpe_event_info); | ||
312 | } | ||
279 | 313 | ||
280 | unlock_and_exit: | 314 | unlock_and_exit: |
281 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 315 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
282 | return_ACPI_STATUS(status); | 316 | return_ACPI_STATUS(status); |
283 | } | 317 | } |
284 | |||
285 | ACPI_EXPORT_SYMBOL(acpi_enable_gpe) | 318 | ACPI_EXPORT_SYMBOL(acpi_enable_gpe) |
286 | 319 | ||
287 | /******************************************************************************* | 320 | /******************************************************************************* |
@@ -290,15 +323,14 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe) | |||
290 | * | 323 | * |
291 | * PARAMETERS: gpe_device - Parent GPE Device | 324 | * PARAMETERS: gpe_device - Parent GPE Device |
292 | * gpe_number - GPE level within the GPE block | 325 | * gpe_number - GPE level within the GPE block |
293 | * Flags - Just disable, or also wake disable? | 326 | * type - Purpose the GPE won't be used for any more |
294 | * Called from ISR or not | ||
295 | * | 327 | * |
296 | * RETURN: Status | 328 | * RETURN: Status |
297 | * | 329 | * |
298 | * DESCRIPTION: Disable an ACPI event (general purpose) | 330 | * DESCRIPTION: Release a reference to a GPE and disable it if necessary |
299 | * | 331 | * |
300 | ******************************************************************************/ | 332 | ******************************************************************************/ |
301 | acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) | 333 | acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type) |
302 | { | 334 | { |
303 | acpi_status status = AE_OK; | 335 | acpi_status status = AE_OK; |
304 | acpi_cpu_flags flags; | 336 | acpi_cpu_flags flags; |
@@ -306,6 +338,9 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) | |||
306 | 338 | ||
307 | ACPI_FUNCTION_TRACE(acpi_disable_gpe); | 339 | ACPI_FUNCTION_TRACE(acpi_disable_gpe); |
308 | 340 | ||
341 | if (type & ~ACPI_GPE_TYPE_WAKE_RUN) | ||
342 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
343 | |||
309 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | 344 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); |
310 | /* Ensure that we have a valid GPE number */ | 345 | /* Ensure that we have a valid GPE number */ |
311 | 346 | ||
@@ -315,13 +350,24 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) | |||
315 | goto unlock_and_exit; | 350 | goto unlock_and_exit; |
316 | } | 351 | } |
317 | 352 | ||
318 | status = acpi_ev_disable_gpe(gpe_event_info); | 353 | if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) { |
354 | if (--gpe_event_info->runtime_count == 0) | ||
355 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
356 | } | ||
357 | |||
358 | if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) { | ||
359 | /* | ||
360 | * Wake-up GPEs are not enabled after leaving system sleep | ||
361 | * states, so we don't need to disable them here. | ||
362 | */ | ||
363 | if (--gpe_event_info->wakeup_count == 0) | ||
364 | acpi_ev_update_gpe_enable_masks(gpe_event_info); | ||
365 | } | ||
319 | 366 | ||
320 | unlock_and_exit: | 367 | unlock_and_exit: |
321 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 368 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
322 | return_ACPI_STATUS(status); | 369 | return_ACPI_STATUS(status); |
323 | } | 370 | } |
324 | |||
325 | ACPI_EXPORT_SYMBOL(acpi_disable_gpe) | 371 | ACPI_EXPORT_SYMBOL(acpi_disable_gpe) |
326 | 372 | ||
327 | /******************************************************************************* | 373 | /******************************************************************************* |
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 8a95e8329df7..f53fbe307c9d 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c | |||
@@ -422,11 +422,10 @@ static int acpi_button_add(struct acpi_device *device) | |||
422 | 422 | ||
423 | if (device->wakeup.flags.valid) { | 423 | if (device->wakeup.flags.valid) { |
424 | /* Button's GPE is run-wake GPE */ | 424 | /* Button's GPE is run-wake GPE */ |
425 | acpi_set_gpe_type(device->wakeup.gpe_device, | ||
426 | device->wakeup.gpe_number, | ||
427 | ACPI_GPE_TYPE_WAKE_RUN); | ||
428 | acpi_enable_gpe(device->wakeup.gpe_device, | 425 | acpi_enable_gpe(device->wakeup.gpe_device, |
429 | device->wakeup.gpe_number); | 426 | device->wakeup.gpe_number, |
427 | ACPI_GPE_TYPE_WAKE_RUN); | ||
428 | device->wakeup.run_wake_count++; | ||
430 | device->wakeup.state.enabled = 1; | 429 | device->wakeup.state.enabled = 1; |
431 | } | 430 | } |
432 | 431 | ||
@@ -446,6 +445,14 @@ static int acpi_button_remove(struct acpi_device *device, int type) | |||
446 | { | 445 | { |
447 | struct acpi_button *button = acpi_driver_data(device); | 446 | struct acpi_button *button = acpi_driver_data(device); |
448 | 447 | ||
448 | if (device->wakeup.flags.valid) { | ||
449 | acpi_disable_gpe(device->wakeup.gpe_device, | ||
450 | device->wakeup.gpe_number, | ||
451 | ACPI_GPE_TYPE_WAKE_RUN); | ||
452 | device->wakeup.run_wake_count--; | ||
453 | device->wakeup.state.enabled = 0; | ||
454 | } | ||
455 | |||
449 | acpi_button_remove_fs(device); | 456 | acpi_button_remove_fs(device); |
450 | input_unregister_device(button->input); | 457 | input_unregister_device(button->input); |
451 | kfree(button); | 458 | kfree(button); |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d6471bb6852f..27e0b92b2e39 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -307,7 +307,11 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
307 | pr_debug(PREFIX "transaction start\n"); | 307 | pr_debug(PREFIX "transaction start\n"); |
308 | /* disable GPE during transaction if storm is detected */ | 308 | /* disable GPE during transaction if storm is detected */ |
309 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | 309 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { |
310 | acpi_disable_gpe(NULL, ec->gpe); | 310 | /* |
311 | * It has to be disabled at the hardware level regardless of the | ||
312 | * GPE reference counting, so that it doesn't trigger. | ||
313 | */ | ||
314 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); | ||
311 | } | 315 | } |
312 | 316 | ||
313 | status = acpi_ec_transaction_unlocked(ec, t); | 317 | status = acpi_ec_transaction_unlocked(ec, t); |
@@ -316,8 +320,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
316 | ec_check_sci_sync(ec, acpi_ec_read_status(ec)); | 320 | ec_check_sci_sync(ec, acpi_ec_read_status(ec)); |
317 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | 321 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { |
318 | msleep(1); | 322 | msleep(1); |
319 | /* it is safe to enable GPE outside of transaction */ | 323 | /* |
320 | acpi_enable_gpe(NULL, ec->gpe); | 324 | * It is safe to enable the GPE outside of the transaction. Use |
325 | * acpi_set_gpe() for that, since we used it to disable the GPE | ||
326 | * above. | ||
327 | */ | ||
328 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); | ||
321 | } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { | 329 | } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { |
322 | pr_info(PREFIX "GPE storm detected, " | 330 | pr_info(PREFIX "GPE storm detected, " |
323 | "transactions will use polling mode\n"); | 331 | "transactions will use polling mode\n"); |
@@ -788,8 +796,8 @@ static int ec_install_handlers(struct acpi_ec *ec) | |||
788 | &acpi_ec_gpe_handler, ec); | 796 | &acpi_ec_gpe_handler, ec); |
789 | if (ACPI_FAILURE(status)) | 797 | if (ACPI_FAILURE(status)) |
790 | return -ENODEV; | 798 | return -ENODEV; |
791 | acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); | 799 | |
792 | acpi_enable_gpe(NULL, ec->gpe); | 800 | acpi_enable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); |
793 | status = acpi_install_address_space_handler(ec->handle, | 801 | status = acpi_install_address_space_handler(ec->handle, |
794 | ACPI_ADR_SPACE_EC, | 802 | ACPI_ADR_SPACE_EC, |
795 | &acpi_ec_space_handler, | 803 | &acpi_ec_space_handler, |
@@ -806,6 +814,7 @@ static int ec_install_handlers(struct acpi_ec *ec) | |||
806 | } else { | 814 | } else { |
807 | acpi_remove_gpe_handler(NULL, ec->gpe, | 815 | acpi_remove_gpe_handler(NULL, ec->gpe, |
808 | &acpi_ec_gpe_handler); | 816 | &acpi_ec_gpe_handler); |
817 | acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); | ||
809 | return -ENODEV; | 818 | return -ENODEV; |
810 | } | 819 | } |
811 | } | 820 | } |
@@ -816,6 +825,7 @@ static int ec_install_handlers(struct acpi_ec *ec) | |||
816 | 825 | ||
817 | static void ec_remove_handlers(struct acpi_ec *ec) | 826 | static void ec_remove_handlers(struct acpi_ec *ec) |
818 | { | 827 | { |
828 | acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); | ||
819 | if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, | 829 | if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, |
820 | ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) | 830 | ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) |
821 | pr_err(PREFIX "failed to remove space handler\n"); | 831 | pr_err(PREFIX "failed to remove space handler\n"); |
@@ -1057,16 +1067,16 @@ error: | |||
1057 | static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state) | 1067 | static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state) |
1058 | { | 1068 | { |
1059 | struct acpi_ec *ec = acpi_driver_data(device); | 1069 | struct acpi_ec *ec = acpi_driver_data(device); |
1060 | /* Stop using GPE */ | 1070 | /* Stop using the GPE, but keep it reference counted. */ |
1061 | acpi_disable_gpe(NULL, ec->gpe); | 1071 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); |
1062 | return 0; | 1072 | return 0; |
1063 | } | 1073 | } |
1064 | 1074 | ||
1065 | static int acpi_ec_resume(struct acpi_device *device) | 1075 | static int acpi_ec_resume(struct acpi_device *device) |
1066 | { | 1076 | { |
1067 | struct acpi_ec *ec = acpi_driver_data(device); | 1077 | struct acpi_ec *ec = acpi_driver_data(device); |
1068 | /* Enable use of GPE back */ | 1078 | /* Enable the GPE again, but don't reference count it once more. */ |
1069 | acpi_enable_gpe(NULL, ec->gpe); | 1079 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); |
1070 | return 0; | 1080 | return 0; |
1071 | } | 1081 | } |
1072 | 1082 | ||
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index cb28e0502acc..9c4c962e46e3 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
@@ -36,8 +36,6 @@ static inline int acpi_debug_init(void) { return 0; } | |||
36 | int acpi_power_init(void); | 36 | int acpi_power_init(void); |
37 | int acpi_device_sleep_wake(struct acpi_device *dev, | 37 | int acpi_device_sleep_wake(struct acpi_device *dev, |
38 | int enable, int sleep_state, int dev_state); | 38 | int enable, int sleep_state, int dev_state); |
39 | int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state); | ||
40 | int acpi_disable_wakeup_device_power(struct acpi_device *dev); | ||
41 | int acpi_power_get_inferred_state(struct acpi_device *device); | 39 | int acpi_power_get_inferred_state(struct acpi_device *device); |
42 | int acpi_power_transition(struct acpi_device *device, int state); | 40 | int acpi_power_transition(struct acpi_device *device, int state); |
43 | extern int acpi_power_nocheck; | 41 | extern int acpi_power_nocheck; |
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c index a5a77b78a723..2ef04098cc1d 100644 --- a/drivers/acpi/pci_bind.c +++ b/drivers/acpi/pci_bind.c | |||
@@ -26,7 +26,9 @@ | |||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
29 | #include <linux/pci-acpi.h> | ||
29 | #include <linux/acpi.h> | 30 | #include <linux/acpi.h> |
31 | #include <linux/pm_runtime.h> | ||
30 | #include <acpi/acpi_bus.h> | 32 | #include <acpi/acpi_bus.h> |
31 | #include <acpi/acpi_drivers.h> | 33 | #include <acpi/acpi_drivers.h> |
32 | 34 | ||
@@ -38,7 +40,13 @@ static int acpi_pci_unbind(struct acpi_device *device) | |||
38 | struct pci_dev *dev; | 40 | struct pci_dev *dev; |
39 | 41 | ||
40 | dev = acpi_get_pci_dev(device->handle); | 42 | dev = acpi_get_pci_dev(device->handle); |
41 | if (!dev || !dev->subordinate) | 43 | if (!dev) |
44 | goto out; | ||
45 | |||
46 | device_set_run_wake(&dev->dev, false); | ||
47 | pci_acpi_remove_pm_notifier(device); | ||
48 | |||
49 | if (!dev->subordinate) | ||
42 | goto out; | 50 | goto out; |
43 | 51 | ||
44 | acpi_pci_irq_del_prt(dev->subordinate); | 52 | acpi_pci_irq_del_prt(dev->subordinate); |
@@ -62,6 +70,10 @@ static int acpi_pci_bind(struct acpi_device *device) | |||
62 | if (!dev) | 70 | if (!dev) |
63 | return 0; | 71 | return 0; |
64 | 72 | ||
73 | pci_acpi_add_pm_notifier(device, dev); | ||
74 | if (device->wakeup.flags.run_wake) | ||
75 | device_set_run_wake(&dev->dev, true); | ||
76 | |||
65 | /* | 77 | /* |
66 | * Install the 'bind' function to facilitate callbacks for | 78 | * Install the 'bind' function to facilitate callbacks for |
67 | * children of the P2P bridge. | 79 | * children of the P2P bridge. |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 64f55b6db73c..d724736d56c8 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/proc_fs.h> | 30 | #include <linux/proc_fs.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/pm.h> | 32 | #include <linux/pm.h> |
33 | #include <linux/pm_runtime.h> | ||
33 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
34 | #include <linux/pci-acpi.h> | 35 | #include <linux/pci-acpi.h> |
35 | #include <linux/acpi.h> | 36 | #include <linux/acpi.h> |
@@ -528,6 +529,10 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
528 | if (flags != base_flags) | 529 | if (flags != base_flags) |
529 | acpi_pci_osc_support(root, flags); | 530 | acpi_pci_osc_support(root, flags); |
530 | 531 | ||
532 | pci_acpi_add_bus_pm_notifier(device, root->bus); | ||
533 | if (device->wakeup.flags.run_wake) | ||
534 | device_set_run_wake(root->bus->bridge, true); | ||
535 | |||
531 | return 0; | 536 | return 0; |
532 | 537 | ||
533 | end: | 538 | end: |
@@ -549,6 +554,9 @@ static int acpi_pci_root_remove(struct acpi_device *device, int type) | |||
549 | { | 554 | { |
550 | struct acpi_pci_root *root = acpi_driver_data(device); | 555 | struct acpi_pci_root *root = acpi_driver_data(device); |
551 | 556 | ||
557 | device_set_run_wake(root->bus->bridge, false); | ||
558 | pci_acpi_remove_bus_pm_notifier(device); | ||
559 | |||
552 | kfree(root); | 560 | kfree(root); |
553 | return 0; | 561 | return 0; |
554 | } | 562 | } |
@@ -558,6 +566,7 @@ static int __init acpi_pci_root_init(void) | |||
558 | if (acpi_pci_disabled) | 566 | if (acpi_pci_disabled) |
559 | return 0; | 567 | return 0; |
560 | 568 | ||
569 | pci_acpi_crs_quirks(); | ||
561 | if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0) | 570 | if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0) |
562 | return -ENODEV; | 571 | return -ENODEV; |
563 | 572 | ||
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 3e009674f333..fb7fc24fe727 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -741,19 +741,40 @@ acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device, | |||
741 | return AE_OK; | 741 | return AE_OK; |
742 | } | 742 | } |
743 | 743 | ||
744 | static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | 744 | static void acpi_bus_set_run_wake_flags(struct acpi_device *device) |
745 | { | 745 | { |
746 | acpi_status status = 0; | ||
747 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
748 | union acpi_object *package = NULL; | ||
749 | int psw_error; | ||
750 | |||
751 | struct acpi_device_id button_device_ids[] = { | 746 | struct acpi_device_id button_device_ids[] = { |
752 | {"PNP0C0D", 0}, | 747 | {"PNP0C0D", 0}, |
753 | {"PNP0C0C", 0}, | 748 | {"PNP0C0C", 0}, |
754 | {"PNP0C0E", 0}, | 749 | {"PNP0C0E", 0}, |
755 | {"", 0}, | 750 | {"", 0}, |
756 | }; | 751 | }; |
752 | acpi_status status; | ||
753 | acpi_event_status event_status; | ||
754 | |||
755 | device->wakeup.run_wake_count = 0; | ||
756 | device->wakeup.flags.notifier_present = 0; | ||
757 | |||
758 | /* Power button, Lid switch always enable wakeup */ | ||
759 | if (!acpi_match_device_ids(device, button_device_ids)) { | ||
760 | device->wakeup.flags.run_wake = 1; | ||
761 | device->wakeup.flags.always_enabled = 1; | ||
762 | return; | ||
763 | } | ||
764 | |||
765 | status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number, | ||
766 | ACPI_NOT_ISR, &event_status); | ||
767 | if (status == AE_OK) | ||
768 | device->wakeup.flags.run_wake = | ||
769 | !!(event_status & ACPI_EVENT_FLAG_HANDLE); | ||
770 | } | ||
771 | |||
772 | static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | ||
773 | { | ||
774 | acpi_status status = 0; | ||
775 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
776 | union acpi_object *package = NULL; | ||
777 | int psw_error; | ||
757 | 778 | ||
758 | /* _PRW */ | 779 | /* _PRW */ |
759 | status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer); | 780 | status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer); |
@@ -773,6 +794,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | |||
773 | 794 | ||
774 | device->wakeup.flags.valid = 1; | 795 | device->wakeup.flags.valid = 1; |
775 | device->wakeup.prepare_count = 0; | 796 | device->wakeup.prepare_count = 0; |
797 | acpi_bus_set_run_wake_flags(device); | ||
776 | /* Call _PSW/_DSW object to disable its ability to wake the sleeping | 798 | /* Call _PSW/_DSW object to disable its ability to wake the sleeping |
777 | * system for the ACPI device with the _PRW object. | 799 | * system for the ACPI device with the _PRW object. |
778 | * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW. | 800 | * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW. |
@@ -784,10 +806,6 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | |||
784 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 806 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
785 | "error in _DSW or _PSW evaluation\n")); | 807 | "error in _DSW or _PSW evaluation\n")); |
786 | 808 | ||
787 | /* Power button, Lid switch always enable wakeup */ | ||
788 | if (!acpi_match_device_ids(device, button_device_ids)) | ||
789 | device->wakeup.flags.run_wake = 1; | ||
790 | |||
791 | end: | 809 | end: |
792 | if (ACPI_FAILURE(status)) | 810 | if (ACPI_FAILURE(status)) |
793 | device->flags.wake_capable = 0; | 811 | device->flags.wake_capable = 0; |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 79d33d908b5a..3bde594a9979 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -745,9 +745,18 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | |||
745 | return -ENODEV; | 745 | return -ENODEV; |
746 | } | 746 | } |
747 | 747 | ||
748 | error = enable ? | 748 | if (enable) { |
749 | acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) : | 749 | error = acpi_enable_wakeup_device_power(adev, |
750 | acpi_disable_wakeup_device_power(adev); | 750 | acpi_target_sleep_state); |
751 | if (!error) | ||
752 | acpi_enable_gpe(adev->wakeup.gpe_device, | ||
753 | adev->wakeup.gpe_number, | ||
754 | ACPI_GPE_TYPE_WAKE); | ||
755 | } else { | ||
756 | acpi_disable_gpe(adev->wakeup.gpe_device, adev->wakeup.gpe_number, | ||
757 | ACPI_GPE_TYPE_WAKE); | ||
758 | error = acpi_disable_wakeup_device_power(adev); | ||
759 | } | ||
751 | if (!error) | 760 | if (!error) |
752 | dev_info(dev, "wake-up capability %s by ACPI\n", | 761 | dev_info(dev, "wake-up capability %s by ACPI\n", |
753 | enable ? "enabled" : "disabled"); | 762 | enable ? "enabled" : "disabled"); |
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c index d11282975f35..a206a12da78a 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/system.c | |||
@@ -387,10 +387,10 @@ static ssize_t counter_set(struct kobject *kobj, | |||
387 | if (index < num_gpes) { | 387 | if (index < num_gpes) { |
388 | if (!strcmp(buf, "disable\n") && | 388 | if (!strcmp(buf, "disable\n") && |
389 | (status & ACPI_EVENT_FLAG_ENABLED)) | 389 | (status & ACPI_EVENT_FLAG_ENABLED)) |
390 | result = acpi_disable_gpe(handle, index); | 390 | result = acpi_set_gpe(handle, index, ACPI_GPE_DISABLE); |
391 | else if (!strcmp(buf, "enable\n") && | 391 | else if (!strcmp(buf, "enable\n") && |
392 | !(status & ACPI_EVENT_FLAG_ENABLED)) | 392 | !(status & ACPI_EVENT_FLAG_ENABLED)) |
393 | result = acpi_enable_gpe(handle, index); | 393 | result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE); |
394 | else if (!strcmp(buf, "clear\n") && | 394 | else if (!strcmp(buf, "clear\n") && |
395 | (status & ACPI_EVENT_FLAG_SET)) | 395 | (status & ACPI_EVENT_FLAG_SET)) |
396 | result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR); | 396 | result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR); |
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index e0ee0c036f5a..4b9d339a6e28 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c | |||
@@ -21,12 +21,12 @@ | |||
21 | ACPI_MODULE_NAME("wakeup_devices") | 21 | ACPI_MODULE_NAME("wakeup_devices") |
22 | 22 | ||
23 | /** | 23 | /** |
24 | * acpi_enable_wakeup_device_prep - prepare wakeup devices | 24 | * acpi_enable_wakeup_device_prep - Prepare wake-up devices. |
25 | * @sleep_state: ACPI state | 25 | * @sleep_state: ACPI system sleep state. |
26 | * Enable all wakup devices power if the devices' wakeup level | 26 | * |
27 | * is higher than requested sleep level | 27 | * Enable all wake-up devices' power, unless the requested system sleep state is |
28 | * too deep. | ||
28 | */ | 29 | */ |
29 | |||
30 | void acpi_enable_wakeup_device_prep(u8 sleep_state) | 30 | void acpi_enable_wakeup_device_prep(u8 sleep_state) |
31 | { | 31 | { |
32 | struct list_head *node, *next; | 32 | struct list_head *node, *next; |
@@ -36,9 +36,8 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state) | |||
36 | struct acpi_device, | 36 | struct acpi_device, |
37 | wakeup_list); | 37 | wakeup_list); |
38 | 38 | ||
39 | if (!dev->wakeup.flags.valid || | 39 | if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled |
40 | !dev->wakeup.state.enabled || | 40 | || (sleep_state > (u32) dev->wakeup.sleep_state)) |
41 | (sleep_state > (u32) dev->wakeup.sleep_state)) | ||
42 | continue; | 41 | continue; |
43 | 42 | ||
44 | acpi_enable_wakeup_device_power(dev, sleep_state); | 43 | acpi_enable_wakeup_device_power(dev, sleep_state); |
@@ -46,9 +45,12 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state) | |||
46 | } | 45 | } |
47 | 46 | ||
48 | /** | 47 | /** |
49 | * acpi_enable_wakeup_device - enable wakeup devices | 48 | * acpi_enable_wakeup_device - Enable wake-up device GPEs. |
50 | * @sleep_state: ACPI state | 49 | * @sleep_state: ACPI system sleep state. |
51 | * Enable all wakup devices's GPE | 50 | * |
51 | * Enable all wake-up devices' GPEs, with the assumption that | ||
52 | * acpi_disable_all_gpes() was executed before, so we don't need to disable any | ||
53 | * GPEs here. | ||
52 | */ | 54 | */ |
53 | void acpi_enable_wakeup_device(u8 sleep_state) | 55 | void acpi_enable_wakeup_device(u8 sleep_state) |
54 | { | 56 | { |
@@ -65,29 +67,22 @@ void acpi_enable_wakeup_device(u8 sleep_state) | |||
65 | if (!dev->wakeup.flags.valid) | 67 | if (!dev->wakeup.flags.valid) |
66 | continue; | 68 | continue; |
67 | 69 | ||
68 | /* If users want to disable run-wake GPE, | ||
69 | * we only disable it for wake and leave it for runtime | ||
70 | */ | ||
71 | if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count) | 70 | if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count) |
72 | || sleep_state > (u32) dev->wakeup.sleep_state) { | 71 | || sleep_state > (u32) dev->wakeup.sleep_state) |
73 | if (dev->wakeup.flags.run_wake) { | ||
74 | /* set_gpe_type will disable GPE, leave it like that */ | ||
75 | acpi_set_gpe_type(dev->wakeup.gpe_device, | ||
76 | dev->wakeup.gpe_number, | ||
77 | ACPI_GPE_TYPE_RUNTIME); | ||
78 | } | ||
79 | continue; | 72 | continue; |
80 | } | 73 | |
81 | if (!dev->wakeup.flags.run_wake) | 74 | /* The wake-up power should have been enabled already. */ |
82 | acpi_enable_gpe(dev->wakeup.gpe_device, | 75 | acpi_set_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number, |
83 | dev->wakeup.gpe_number); | 76 | ACPI_GPE_ENABLE); |
84 | } | 77 | } |
85 | } | 78 | } |
86 | 79 | ||
87 | /** | 80 | /** |
88 | * acpi_disable_wakeup_device - disable devices' wakeup capability | 81 | * acpi_disable_wakeup_device - Disable devices' wakeup capability. |
89 | * @sleep_state: ACPI state | 82 | * @sleep_state: ACPI system sleep state. |
90 | * Disable all wakup devices's GPE and wakeup capability | 83 | * |
84 | * This function only affects devices with wakeup.state.enabled set, which means | ||
85 | * that it reverses the changes made by acpi_enable_wakeup_device_prep(). | ||
91 | */ | 86 | */ |
92 | void acpi_disable_wakeup_device(u8 sleep_state) | 87 | void acpi_disable_wakeup_device(u8 sleep_state) |
93 | { | 88 | { |
@@ -97,30 +92,11 @@ void acpi_disable_wakeup_device(u8 sleep_state) | |||
97 | struct acpi_device *dev = | 92 | struct acpi_device *dev = |
98 | container_of(node, struct acpi_device, wakeup_list); | 93 | container_of(node, struct acpi_device, wakeup_list); |
99 | 94 | ||
100 | if (!dev->wakeup.flags.valid) | 95 | if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled |
101 | continue; | 96 | || (sleep_state > (u32) dev->wakeup.sleep_state)) |
102 | |||
103 | if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count) | ||
104 | || sleep_state > (u32) dev->wakeup.sleep_state) { | ||
105 | if (dev->wakeup.flags.run_wake) { | ||
106 | acpi_set_gpe_type(dev->wakeup.gpe_device, | ||
107 | dev->wakeup.gpe_number, | ||
108 | ACPI_GPE_TYPE_WAKE_RUN); | ||
109 | /* Re-enable it, since set_gpe_type will disable it */ | ||
110 | acpi_enable_gpe(dev->wakeup.gpe_device, | ||
111 | dev->wakeup.gpe_number); | ||
112 | } | ||
113 | continue; | 97 | continue; |
114 | } | ||
115 | 98 | ||
116 | acpi_disable_wakeup_device_power(dev); | 99 | acpi_disable_wakeup_device_power(dev); |
117 | /* Never disable run-wake GPE */ | ||
118 | if (!dev->wakeup.flags.run_wake) { | ||
119 | acpi_disable_gpe(dev->wakeup.gpe_device, | ||
120 | dev->wakeup.gpe_number); | ||
121 | acpi_clear_gpe(dev->wakeup.gpe_device, | ||
122 | dev->wakeup.gpe_number, ACPI_NOT_ISR); | ||
123 | } | ||
124 | } | 100 | } |
125 | } | 101 | } |
126 | 102 | ||
@@ -134,13 +110,11 @@ int __init acpi_wakeup_device_init(void) | |||
134 | struct acpi_device, | 110 | struct acpi_device, |
135 | wakeup_list); | 111 | wakeup_list); |
136 | /* In case user doesn't load button driver */ | 112 | /* In case user doesn't load button driver */ |
137 | if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled) | 113 | if (!dev->wakeup.flags.always_enabled || |
114 | dev->wakeup.state.enabled) | ||
138 | continue; | 115 | continue; |
139 | acpi_set_gpe_type(dev->wakeup.gpe_device, | 116 | acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number, |
140 | dev->wakeup.gpe_number, | 117 | ACPI_GPE_TYPE_WAKE); |
141 | ACPI_GPE_TYPE_WAKE_RUN); | ||
142 | acpi_enable_gpe(dev->wakeup.gpe_device, | ||
143 | dev->wakeup.gpe_number); | ||
144 | dev->wakeup.state.enabled = 1; | 118 | dev->wakeup.state.enabled = 1; |
145 | } | 119 | } |
146 | mutex_unlock(&acpi_device_lock); | 120 | mutex_unlock(&acpi_device_lock); |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index b34390347c16..a6a736a7dbf2 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -3082,8 +3082,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3082 | ahci_save_initial_config(pdev, hpriv); | 3082 | ahci_save_initial_config(pdev, hpriv); |
3083 | 3083 | ||
3084 | /* prepare host */ | 3084 | /* prepare host */ |
3085 | if (hpriv->cap & HOST_CAP_NCQ) | 3085 | if (hpriv->cap & HOST_CAP_NCQ) { |
3086 | pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA; | 3086 | pi.flags |= ATA_FLAG_NCQ; |
3087 | /* Auto-activate optimization is supposed to be supported on | ||
3088 | all AHCI controllers indicating NCQ support, but it seems | ||
3089 | to be broken at least on some NVIDIA MCP79 chipsets. | ||
3090 | Until we get info on which NVIDIA chipsets don't have this | ||
3091 | issue, if any, disable AA on all NVIDIA AHCIs. */ | ||
3092 | if (pdev->vendor != PCI_VENDOR_ID_NVIDIA) | ||
3093 | pi.flags |= ATA_FLAG_FPDMA_AA; | ||
3094 | } | ||
3087 | 3095 | ||
3088 | if (hpriv->cap & HOST_CAP_PMP) | 3096 | if (hpriv->cap & HOST_CAP_PMP) |
3089 | pi.flags |= ATA_FLAG_PMP; | 3097 | pi.flags |= ATA_FLAG_PMP; |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 51042f0ba7e1..7eff828b2117 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -243,10 +243,12 @@ static int index_to_minor(int index) | |||
243 | static int __devinit virtblk_probe(struct virtio_device *vdev) | 243 | static int __devinit virtblk_probe(struct virtio_device *vdev) |
244 | { | 244 | { |
245 | struct virtio_blk *vblk; | 245 | struct virtio_blk *vblk; |
246 | struct request_queue *q; | ||
246 | int err; | 247 | int err; |
247 | u64 cap; | 248 | u64 cap; |
248 | u32 v; | 249 | u32 v, blk_size, sg_elems, opt_io_size; |
249 | u32 blk_size, sg_elems; | 250 | u16 min_io_size; |
251 | u8 physical_block_exp, alignment_offset; | ||
250 | 252 | ||
251 | if (index_to_minor(index) >= 1 << MINORBITS) | 253 | if (index_to_minor(index) >= 1 << MINORBITS) |
252 | return -ENOSPC; | 254 | return -ENOSPC; |
@@ -293,13 +295,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
293 | goto out_mempool; | 295 | goto out_mempool; |
294 | } | 296 | } |
295 | 297 | ||
296 | vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); | 298 | q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); |
297 | if (!vblk->disk->queue) { | 299 | if (!q) { |
298 | err = -ENOMEM; | 300 | err = -ENOMEM; |
299 | goto out_put_disk; | 301 | goto out_put_disk; |
300 | } | 302 | } |
301 | 303 | ||
302 | vblk->disk->queue->queuedata = vblk; | 304 | q->queuedata = vblk; |
303 | 305 | ||
304 | if (index < 26) { | 306 | if (index < 26) { |
305 | sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); | 307 | sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); |
@@ -323,10 +325,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
323 | 325 | ||
324 | /* If barriers are supported, tell block layer that queue is ordered */ | 326 | /* If barriers are supported, tell block layer that queue is ordered */ |
325 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) | 327 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) |
326 | blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH, | 328 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, |
327 | virtblk_prepare_flush); | 329 | virtblk_prepare_flush); |
328 | else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) | 330 | else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) |
329 | blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL); | 331 | blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL); |
330 | 332 | ||
331 | /* If disk is read-only in the host, the guest should obey */ | 333 | /* If disk is read-only in the host, the guest should obey */ |
332 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) | 334 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) |
@@ -345,14 +347,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
345 | set_capacity(vblk->disk, cap); | 347 | set_capacity(vblk->disk, cap); |
346 | 348 | ||
347 | /* We can handle whatever the host told us to handle. */ | 349 | /* We can handle whatever the host told us to handle. */ |
348 | blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2); | 350 | blk_queue_max_phys_segments(q, vblk->sg_elems-2); |
349 | blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2); | 351 | blk_queue_max_hw_segments(q, vblk->sg_elems-2); |
350 | 352 | ||
351 | /* No need to bounce any requests */ | 353 | /* No need to bounce any requests */ |
352 | blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY); | 354 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
353 | 355 | ||
354 | /* No real sector limit. */ | 356 | /* No real sector limit. */ |
355 | blk_queue_max_sectors(vblk->disk->queue, -1U); | 357 | blk_queue_max_sectors(q, -1U); |
356 | 358 | ||
357 | /* Host can optionally specify maximum segment size and number of | 359 | /* Host can optionally specify maximum segment size and number of |
358 | * segments. */ | 360 | * segments. */ |
@@ -360,16 +362,45 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
360 | offsetof(struct virtio_blk_config, size_max), | 362 | offsetof(struct virtio_blk_config, size_max), |
361 | &v); | 363 | &v); |
362 | if (!err) | 364 | if (!err) |
363 | blk_queue_max_segment_size(vblk->disk->queue, v); | 365 | blk_queue_max_segment_size(q, v); |
364 | else | 366 | else |
365 | blk_queue_max_segment_size(vblk->disk->queue, -1U); | 367 | blk_queue_max_segment_size(q, -1U); |
366 | 368 | ||
367 | /* Host can optionally specify the block size of the device */ | 369 | /* Host can optionally specify the block size of the device */ |
368 | err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, | 370 | err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, |
369 | offsetof(struct virtio_blk_config, blk_size), | 371 | offsetof(struct virtio_blk_config, blk_size), |
370 | &blk_size); | 372 | &blk_size); |
371 | if (!err) | 373 | if (!err) |
372 | blk_queue_logical_block_size(vblk->disk->queue, blk_size); | 374 | blk_queue_logical_block_size(q, blk_size); |
375 | else | ||
376 | blk_size = queue_logical_block_size(q); | ||
377 | |||
378 | /* Use topology information if available */ | ||
379 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | ||
380 | offsetof(struct virtio_blk_config, physical_block_exp), | ||
381 | &physical_block_exp); | ||
382 | if (!err && physical_block_exp) | ||
383 | blk_queue_physical_block_size(q, | ||
384 | blk_size * (1 << physical_block_exp)); | ||
385 | |||
386 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | ||
387 | offsetof(struct virtio_blk_config, alignment_offset), | ||
388 | &alignment_offset); | ||
389 | if (!err && alignment_offset) | ||
390 | blk_queue_alignment_offset(q, blk_size * alignment_offset); | ||
391 | |||
392 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | ||
393 | offsetof(struct virtio_blk_config, min_io_size), | ||
394 | &min_io_size); | ||
395 | if (!err && min_io_size) | ||
396 | blk_queue_io_min(q, blk_size * min_io_size); | ||
397 | |||
398 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | ||
399 | offsetof(struct virtio_blk_config, opt_io_size), | ||
400 | &opt_io_size); | ||
401 | if (!err && opt_io_size) | ||
402 | blk_queue_io_opt(q, blk_size * opt_io_size); | ||
403 | |||
373 | 404 | ||
374 | add_disk(vblk->disk); | 405 | add_disk(vblk->disk); |
375 | return 0; | 406 | return 0; |
@@ -412,7 +443,7 @@ static struct virtio_device_id id_table[] = { | |||
412 | static unsigned int features[] = { | 443 | static unsigned int features[] = { |
413 | VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, | 444 | VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, |
414 | VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, | 445 | VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, |
415 | VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH | 446 | VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY |
416 | }; | 447 | }; |
417 | 448 | ||
418 | /* | 449 | /* |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index e023682be2c4..3141dd3b6e53 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -666,6 +666,14 @@ config VIRTIO_CONSOLE | |||
666 | help | 666 | help |
667 | Virtio console for use with lguest and other hypervisors. | 667 | Virtio console for use with lguest and other hypervisors. |
668 | 668 | ||
669 | Also serves as a general-purpose serial device for data | ||
670 | transfer between the guest and host. Character devices at | ||
671 | /dev/vportNpn will be created when corresponding ports are | ||
672 | found, where N is the device number and n is the port number | ||
673 | within that device. If specified by the host, a sysfs | ||
674 | attribute called 'name' will be populated with a name for | ||
675 | the port which can be used by udev scripts to create a | ||
676 | symlink to the device. | ||
669 | 677 | ||
670 | config HVCS | 678 | config HVCS |
671 | tristate "IBM Hypervisor Virtual Console Server support" | 679 | tristate "IBM Hypervisor Virtual Console Server support" |
diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c index 0afc8b82212e..5fe4631e2a61 100644 --- a/drivers/char/hvc_beat.c +++ b/drivers/char/hvc_beat.c | |||
@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt) | |||
84 | return cnt; | 84 | return cnt; |
85 | } | 85 | } |
86 | 86 | ||
87 | static struct hv_ops hvc_beat_get_put_ops = { | 87 | static const struct hv_ops hvc_beat_get_put_ops = { |
88 | .get_chars = hvc_beat_get_chars, | 88 | .get_chars = hvc_beat_get_chars, |
89 | .put_chars = hvc_beat_put_chars, | 89 | .put_chars = hvc_beat_put_chars, |
90 | }; | 90 | }; |
@@ -99,7 +99,7 @@ static int hvc_beat_config(char *p) | |||
99 | 99 | ||
100 | static int __init hvc_beat_console_init(void) | 100 | static int __init hvc_beat_console_init(void) |
101 | { | 101 | { |
102 | if (hvc_beat_useit && machine_is_compatible("Beat")) { | 102 | if (hvc_beat_useit && of_machine_is_compatible("Beat")) { |
103 | hvc_instantiate(0, 0, &hvc_beat_get_put_ops); | 103 | hvc_instantiate(0, 0, &hvc_beat_get_put_ops); |
104 | } | 104 | } |
105 | return 0; | 105 | return 0; |
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 416d3423150d..d8dac5820f0e 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c | |||
@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index) | |||
125 | * console interfaces but can still be used as a tty device. This has to be | 125 | * console interfaces but can still be used as a tty device. This has to be |
126 | * static because kmalloc will not work during early console init. | 126 | * static because kmalloc will not work during early console init. |
127 | */ | 127 | */ |
128 | static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES]; | 128 | static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES]; |
129 | static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] = | 129 | static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] = |
130 | {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1}; | 130 | {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1}; |
131 | 131 | ||
@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kref *kref) | |||
247 | * vty adapters do NOT get an hvc_instantiate() callback since they | 247 | * vty adapters do NOT get an hvc_instantiate() callback since they |
248 | * appear after early console init. | 248 | * appear after early console init. |
249 | */ | 249 | */ |
250 | int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops) | 250 | int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) |
251 | { | 251 | { |
252 | struct hvc_struct *hp; | 252 | struct hvc_struct *hp; |
253 | 253 | ||
@@ -749,7 +749,8 @@ static const struct tty_operations hvc_ops = { | |||
749 | }; | 749 | }; |
750 | 750 | ||
751 | struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data, | 751 | struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data, |
752 | struct hv_ops *ops, int outbuf_size) | 752 | const struct hv_ops *ops, |
753 | int outbuf_size) | ||
753 | { | 754 | { |
754 | struct hvc_struct *hp; | 755 | struct hvc_struct *hp; |
755 | int i; | 756 | int i; |
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h index 10950ca706d8..52ddf4d3716c 100644 --- a/drivers/char/hvc_console.h +++ b/drivers/char/hvc_console.h | |||
@@ -55,7 +55,7 @@ struct hvc_struct { | |||
55 | int outbuf_size; | 55 | int outbuf_size; |
56 | int n_outbuf; | 56 | int n_outbuf; |
57 | uint32_t vtermno; | 57 | uint32_t vtermno; |
58 | struct hv_ops *ops; | 58 | const struct hv_ops *ops; |
59 | int irq_requested; | 59 | int irq_requested; |
60 | int data; | 60 | int data; |
61 | struct winsize ws; | 61 | struct winsize ws; |
@@ -76,11 +76,12 @@ struct hv_ops { | |||
76 | }; | 76 | }; |
77 | 77 | ||
78 | /* Register a vterm and a slot index for use as a console (console_init) */ | 78 | /* Register a vterm and a slot index for use as a console (console_init) */ |
79 | extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops); | 79 | extern int hvc_instantiate(uint32_t vtermno, int index, |
80 | const struct hv_ops *ops); | ||
80 | 81 | ||
81 | /* register a vterm for hvc tty operation (module_init or hotplug add) */ | 82 | /* register a vterm for hvc tty operation (module_init or hotplug add) */ |
82 | extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data, | 83 | extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data, |
83 | struct hv_ops *ops, int outbuf_size); | 84 | const struct hv_ops *ops, int outbuf_size); |
84 | /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ | 85 | /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ |
85 | extern int hvc_remove(struct hvc_struct *hp); | 86 | extern int hvc_remove(struct hvc_struct *hp); |
86 | 87 | ||
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c index 936d05bf37fa..fd0242676a2a 100644 --- a/drivers/char/hvc_iseries.c +++ b/drivers/char/hvc_iseries.c | |||
@@ -197,7 +197,7 @@ done: | |||
197 | return sent; | 197 | return sent; |
198 | } | 198 | } |
199 | 199 | ||
200 | static struct hv_ops hvc_get_put_ops = { | 200 | static const struct hv_ops hvc_get_put_ops = { |
201 | .get_chars = get_chars, | 201 | .get_chars = get_chars, |
202 | .put_chars = put_chars, | 202 | .put_chars = put_chars, |
203 | .notifier_add = notifier_add_irq, | 203 | .notifier_add = notifier_add_irq, |
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c index fe62bd0e17b7..21681a81cc35 100644 --- a/drivers/char/hvc_iucv.c +++ b/drivers/char/hvc_iucv.c | |||
@@ -922,7 +922,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev) | |||
922 | 922 | ||
923 | 923 | ||
924 | /* HVC operations */ | 924 | /* HVC operations */ |
925 | static struct hv_ops hvc_iucv_ops = { | 925 | static const struct hv_ops hvc_iucv_ops = { |
926 | .get_chars = hvc_iucv_get_chars, | 926 | .get_chars = hvc_iucv_get_chars, |
927 | .put_chars = hvc_iucv_put_chars, | 927 | .put_chars = hvc_iucv_put_chars, |
928 | .notifier_add = hvc_iucv_notifier_add, | 928 | .notifier_add = hvc_iucv_notifier_add, |
diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c index 88590d040046..61c4a61558d9 100644 --- a/drivers/char/hvc_rtas.c +++ b/drivers/char/hvc_rtas.c | |||
@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count) | |||
71 | return i; | 71 | return i; |
72 | } | 72 | } |
73 | 73 | ||
74 | static struct hv_ops hvc_rtas_get_put_ops = { | 74 | static const struct hv_ops hvc_rtas_get_put_ops = { |
75 | .get_chars = hvc_rtas_read_console, | 75 | .get_chars = hvc_rtas_read_console, |
76 | .put_chars = hvc_rtas_write_console, | 76 | .put_chars = hvc_rtas_write_console, |
77 | }; | 77 | }; |
diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c index bd63ba878a56..b0957e61a7be 100644 --- a/drivers/char/hvc_udbg.c +++ b/drivers/char/hvc_udbg.c | |||
@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count) | |||
58 | return i; | 58 | return i; |
59 | } | 59 | } |
60 | 60 | ||
61 | static struct hv_ops hvc_udbg_ops = { | 61 | static const struct hv_ops hvc_udbg_ops = { |
62 | .get_chars = hvc_udbg_get, | 62 | .get_chars = hvc_udbg_get, |
63 | .put_chars = hvc_udbg_put, | 63 | .put_chars = hvc_udbg_put, |
64 | }; | 64 | }; |
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c index 10be343d6ae7..27370e99c66f 100644 --- a/drivers/char/hvc_vio.c +++ b/drivers/char/hvc_vio.c | |||
@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count) | |||
77 | return got; | 77 | return got; |
78 | } | 78 | } |
79 | 79 | ||
80 | static struct hv_ops hvc_get_put_ops = { | 80 | static const struct hv_ops hvc_get_put_ops = { |
81 | .get_chars = filtered_get_chars, | 81 | .get_chars = filtered_get_chars, |
82 | .put_chars = hvc_put_chars, | 82 | .put_chars = hvc_put_chars, |
83 | .notifier_add = notifier_add_irq, | 83 | .notifier_add = notifier_add_irq, |
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c index b1a71638c772..60446f82a3fc 100644 --- a/drivers/char/hvc_xen.c +++ b/drivers/char/hvc_xen.c | |||
@@ -122,7 +122,7 @@ static int read_console(uint32_t vtermno, char *buf, int len) | |||
122 | return recv; | 122 | return recv; |
123 | } | 123 | } |
124 | 124 | ||
125 | static struct hv_ops hvc_ops = { | 125 | static const struct hv_ops hvc_ops = { |
126 | .get_chars = read_console, | 126 | .get_chars = read_console, |
127 | .put_chars = write_console, | 127 | .put_chars = write_console, |
128 | .notifier_add = notifier_add_irq, | 128 | .notifier_add = notifier_add_irq, |
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 87060266ef91..6ea1014697d1 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
@@ -186,3 +186,15 @@ config HW_RANDOM_MXC_RNGA | |||
186 | module will be called mxc-rnga. | 186 | module will be called mxc-rnga. |
187 | 187 | ||
188 | If unsure, say Y. | 188 | If unsure, say Y. |
189 | |||
190 | config HW_RANDOM_NOMADIK | ||
191 | tristate "ST-Ericsson Nomadik Random Number Generator support" | ||
192 | depends on HW_RANDOM && PLAT_NOMADIK | ||
193 | ---help--- | ||
194 | This driver provides kernel-side support for the Random Number | ||
195 | Generator hardware found on ST-Ericsson SoCs (8815 and 8500). | ||
196 | |||
197 | To compile this driver as a module, choose M here: the | ||
198 | module will be called nomadik-rng. | ||
199 | |||
200 | If unsure, say Y. | ||
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 5eeb1303f0d0..4273308aa1e3 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile | |||
@@ -18,3 +18,4 @@ obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o | |||
18 | obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o | 18 | obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o |
19 | obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o | 19 | obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o |
20 | obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o | 20 | obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o |
21 | obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o | ||
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c new file mode 100644 index 000000000000..a8b4c4010144 --- /dev/null +++ b/drivers/char/hw_random/nomadik-rng.c | |||
@@ -0,0 +1,103 @@ | |||
1 | /* | ||
2 | * Nomadik RNG support | ||
3 | * Copyright 2009 Alessandro Rubini | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/device.h> | ||
15 | #include <linux/amba/bus.h> | ||
16 | #include <linux/hw_random.h> | ||
17 | #include <linux/io.h> | ||
18 | |||
19 | static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) | ||
20 | { | ||
21 | void __iomem *base = (void __iomem *)rng->priv; | ||
22 | |||
23 | /* | ||
24 | * The register is 32 bits and gives 16 random bits (low half). | ||
25 | * A subsequent read will delay the core for 400ns, so we just read | ||
26 | * once and accept the very unlikely very small delay, even if wait==0. | ||
27 | */ | ||
28 | *(u16 *)data = __raw_readl(base + 8) & 0xffff; | ||
29 | return 2; | ||
30 | } | ||
31 | |||
32 | /* we have at most one RNG per machine, granted */ | ||
33 | static struct hwrng nmk_rng = { | ||
34 | .name = "nomadik", | ||
35 | .read = nmk_rng_read, | ||
36 | }; | ||
37 | |||
38 | static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id) | ||
39 | { | ||
40 | void __iomem *base; | ||
41 | int ret; | ||
42 | |||
43 | ret = amba_request_regions(dev, dev->dev.init_name); | ||
44 | if (ret) | ||
45 | return ret; | ||
46 | ret = -ENOMEM; | ||
47 | base = ioremap(dev->res.start, resource_size(&dev->res)); | ||
48 | if (!base) | ||
49 | goto out_release; | ||
50 | nmk_rng.priv = (unsigned long)base; | ||
51 | ret = hwrng_register(&nmk_rng); | ||
52 | if (ret) | ||
53 | goto out_unmap; | ||
54 | return 0; | ||
55 | |||
56 | out_unmap: | ||
57 | iounmap(base); | ||
58 | out_release: | ||
59 | amba_release_regions(dev); | ||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | static int nmk_rng_remove(struct amba_device *dev) | ||
64 | { | ||
65 | void __iomem *base = (void __iomem *)nmk_rng.priv; | ||
66 | hwrng_unregister(&nmk_rng); | ||
67 | iounmap(base); | ||
68 | amba_release_regions(dev); | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static struct amba_id nmk_rng_ids[] = { | ||
73 | { | ||
74 | .id = 0x000805e1, | ||
75 | .mask = 0x000fffff, /* top bits are rev and cfg: accept all */ | ||
76 | }, | ||
77 | {0, 0}, | ||
78 | }; | ||
79 | |||
80 | static struct amba_driver nmk_rng_driver = { | ||
81 | .drv = { | ||
82 | .owner = THIS_MODULE, | ||
83 | .name = "rng", | ||
84 | }, | ||
85 | .probe = nmk_rng_probe, | ||
86 | .remove = nmk_rng_remove, | ||
87 | .id_table = nmk_rng_ids, | ||
88 | }; | ||
89 | |||
90 | static int __init nmk_rng_init(void) | ||
91 | { | ||
92 | return amba_driver_register(&nmk_rng_driver); | ||
93 | } | ||
94 | |||
95 | static void __devexit nmk_rng_exit(void) | ||
96 | { | ||
97 | amba_driver_unregister(&nmk_rng_driver); | ||
98 | } | ||
99 | |||
100 | module_init(nmk_rng_init); | ||
101 | module_exit(nmk_rng_exit); | ||
102 | |||
103 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index a035ae39a359..213373b5f17f 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -1,18 +1,6 @@ | |||
1 | /*D:300 | 1 | /* |
2 | * The Guest console driver | 2 | * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation |
3 | * | 3 | * Copyright (C) 2009, 2010 Red Hat, Inc. |
4 | * Writing console drivers is one of the few remaining Dark Arts in Linux. | ||
5 | * Fortunately for us, the path of virtual consoles has been well-trodden by | ||
6 | * the PowerPC folks, who wrote "hvc_console.c" to generically support any | ||
7 | * virtual console. We use that infrastructure which only requires us to write | ||
8 | * the basic put_chars and get_chars functions and call the right register | ||
9 | * functions. | ||
10 | :*/ | ||
11 | |||
12 | /*M:002 The console can be flooded: while the Guest is processing input the | ||
13 | * Host can send more. Buffering in the Host could alleviate this, but it is a | ||
14 | * difficult problem in general. :*/ | ||
15 | /* Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation | ||
16 | * | 4 | * |
17 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
18 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -28,142 +16,694 @@ | |||
28 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
29 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
30 | */ | 18 | */ |
19 | #include <linux/cdev.h> | ||
20 | #include <linux/debugfs.h> | ||
21 | #include <linux/device.h> | ||
31 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/fs.h> | ||
32 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/list.h> | ||
26 | #include <linux/poll.h> | ||
27 | #include <linux/sched.h> | ||
28 | #include <linux/spinlock.h> | ||
33 | #include <linux/virtio.h> | 29 | #include <linux/virtio.h> |
34 | #include <linux/virtio_console.h> | 30 | #include <linux/virtio_console.h> |
31 | #include <linux/wait.h> | ||
32 | #include <linux/workqueue.h> | ||
35 | #include "hvc_console.h" | 33 | #include "hvc_console.h" |
36 | 34 | ||
37 | /*D:340 These represent our input and output console queues, and the virtio | 35 | /* |
38 | * operations for them. */ | 36 | * This is a global struct for storing common data for all the devices |
39 | static struct virtqueue *in_vq, *out_vq; | 37 | * this driver handles. |
40 | static struct virtio_device *vdev; | 38 | * |
39 | * Mainly, it has a linked list for all the consoles in one place so | ||
40 | * that callbacks from hvc for get_chars(), put_chars() work properly | ||
41 | * across multiple devices and multiple ports per device. | ||
42 | */ | ||
43 | struct ports_driver_data { | ||
44 | /* Used for registering chardevs */ | ||
45 | struct class *class; | ||
46 | |||
47 | /* Used for exporting per-port information to debugfs */ | ||
48 | struct dentry *debugfs_dir; | ||
49 | |||
50 | /* Number of devices this driver is handling */ | ||
51 | unsigned int index; | ||
52 | |||
53 | /* | ||
54 | * This is used to keep track of the number of hvc consoles | ||
55 | * spawned by this driver. This number is given as the first | ||
56 | * argument to hvc_alloc(). To correctly map an initial | ||
57 | * console spawned via hvc_instantiate to the console being | ||
58 | * hooked up via hvc_alloc, we need to pass the same vtermno. | ||
59 | * | ||
60 | * We also just assume the first console being initialised was | ||
61 | * the first one that got used as the initial console. | ||
62 | */ | ||
63 | unsigned int next_vtermno; | ||
64 | |||
65 | /* All the console devices handled by this driver */ | ||
66 | struct list_head consoles; | ||
67 | }; | ||
68 | static struct ports_driver_data pdrvdata; | ||
69 | |||
70 | DEFINE_SPINLOCK(pdrvdata_lock); | ||
71 | |||
72 | /* This struct holds information that's relevant only for console ports */ | ||
73 | struct console { | ||
74 | /* We'll place all consoles in a list in the pdrvdata struct */ | ||
75 | struct list_head list; | ||
76 | |||
77 | /* The hvc device associated with this console port */ | ||
78 | struct hvc_struct *hvc; | ||
79 | |||
80 | /* | ||
81 | * This number identifies the number that we used to register | ||
82 | * with hvc in hvc_instantiate() and hvc_alloc(); this is the | ||
83 | * number passed on by the hvc callbacks to us to | ||
84 | * differentiate between the other console ports handled by | ||
85 | * this driver | ||
86 | */ | ||
87 | u32 vtermno; | ||
88 | }; | ||
89 | |||
90 | struct port_buffer { | ||
91 | char *buf; | ||
92 | |||
93 | /* size of the buffer in *buf above */ | ||
94 | size_t size; | ||
95 | |||
96 | /* used length of the buffer */ | ||
97 | size_t len; | ||
98 | /* offset in the buf from which to consume data */ | ||
99 | size_t offset; | ||
100 | }; | ||
101 | |||
102 | /* | ||
103 | * This is a per-device struct that stores data common to all the | ||
104 | * ports for that device (vdev->priv). | ||
105 | */ | ||
106 | struct ports_device { | ||
107 | /* | ||
108 | * Workqueue handlers where we process deferred work after | ||
109 | * notification | ||
110 | */ | ||
111 | struct work_struct control_work; | ||
112 | struct work_struct config_work; | ||
113 | |||
114 | struct list_head ports; | ||
115 | |||
116 | /* To protect the list of ports */ | ||
117 | spinlock_t ports_lock; | ||
118 | |||
119 | /* To protect the vq operations for the control channel */ | ||
120 | spinlock_t cvq_lock; | ||
121 | |||
122 | /* The current config space is stored here */ | ||
123 | struct virtio_console_config config; | ||
124 | |||
125 | /* The virtio device we're associated with */ | ||
126 | struct virtio_device *vdev; | ||
127 | |||
128 | /* | ||
129 | * A couple of virtqueues for the control channel: one for | ||
130 | * guest->host transfers, one for host->guest transfers | ||
131 | */ | ||
132 | struct virtqueue *c_ivq, *c_ovq; | ||
133 | |||
134 | /* Array of per-port IO virtqueues */ | ||
135 | struct virtqueue **in_vqs, **out_vqs; | ||
136 | |||
137 | /* Used for numbering devices for sysfs and debugfs */ | ||
138 | unsigned int drv_index; | ||
139 | |||
140 | /* Major number for this device. Ports will be created as minors. */ | ||
141 | int chr_major; | ||
142 | }; | ||
143 | |||
144 | /* This struct holds the per-port data */ | ||
145 | struct port { | ||
146 | /* Next port in the list, head is in the ports_device */ | ||
147 | struct list_head list; | ||
148 | |||
149 | /* Pointer to the parent virtio_console device */ | ||
150 | struct ports_device *portdev; | ||
151 | |||
152 | /* The current buffer from which data has to be fed to readers */ | ||
153 | struct port_buffer *inbuf; | ||
154 | |||
155 | /* | ||
156 | * To protect the operations on the in_vq associated with this | ||
157 | * port. Has to be a spinlock because it can be called from | ||
158 | * interrupt context (get_char()). | ||
159 | */ | ||
160 | spinlock_t inbuf_lock; | ||
161 | |||
162 | /* The IO vqs for this port */ | ||
163 | struct virtqueue *in_vq, *out_vq; | ||
164 | |||
165 | /* File in the debugfs directory that exposes this port's information */ | ||
166 | struct dentry *debugfs_file; | ||
167 | |||
168 | /* | ||
169 | * The entries in this struct will be valid if this port is | ||
170 | * hooked up to an hvc console | ||
171 | */ | ||
172 | struct console cons; | ||
173 | |||
174 | /* Each port associates with a separate char device */ | ||
175 | struct cdev cdev; | ||
176 | struct device *dev; | ||
177 | |||
178 | /* A waitqueue for poll() or blocking read operations */ | ||
179 | wait_queue_head_t waitqueue; | ||
180 | |||
181 | /* The 'name' of the port that we expose via sysfs properties */ | ||
182 | char *name; | ||
183 | |||
184 | /* The 'id' to identify the port with the Host */ | ||
185 | u32 id; | ||
186 | |||
187 | /* Is the host device open */ | ||
188 | bool host_connected; | ||
189 | |||
190 | /* We should allow only one process to open a port */ | ||
191 | bool guest_connected; | ||
192 | }; | ||
193 | |||
194 | /* This is the very early arch-specified put chars function. */ | ||
195 | static int (*early_put_chars)(u32, const char *, int); | ||
196 | |||
197 | static struct port *find_port_by_vtermno(u32 vtermno) | ||
198 | { | ||
199 | struct port *port; | ||
200 | struct console *cons; | ||
201 | unsigned long flags; | ||
202 | |||
203 | spin_lock_irqsave(&pdrvdata_lock, flags); | ||
204 | list_for_each_entry(cons, &pdrvdata.consoles, list) { | ||
205 | if (cons->vtermno == vtermno) { | ||
206 | port = container_of(cons, struct port, cons); | ||
207 | goto out; | ||
208 | } | ||
209 | } | ||
210 | port = NULL; | ||
211 | out: | ||
212 | spin_unlock_irqrestore(&pdrvdata_lock, flags); | ||
213 | return port; | ||
214 | } | ||
215 | |||
216 | static struct port *find_port_by_id(struct ports_device *portdev, u32 id) | ||
217 | { | ||
218 | struct port *port; | ||
219 | unsigned long flags; | ||
220 | |||
221 | spin_lock_irqsave(&portdev->ports_lock, flags); | ||
222 | list_for_each_entry(port, &portdev->ports, list) | ||
223 | if (port->id == id) | ||
224 | goto out; | ||
225 | port = NULL; | ||
226 | out: | ||
227 | spin_unlock_irqrestore(&portdev->ports_lock, flags); | ||
228 | |||
229 | return port; | ||
230 | } | ||
231 | |||
232 | static struct port *find_port_by_vq(struct ports_device *portdev, | ||
233 | struct virtqueue *vq) | ||
234 | { | ||
235 | struct port *port; | ||
236 | unsigned long flags; | ||
237 | |||
238 | spin_lock_irqsave(&portdev->ports_lock, flags); | ||
239 | list_for_each_entry(port, &portdev->ports, list) | ||
240 | if (port->in_vq == vq || port->out_vq == vq) | ||
241 | goto out; | ||
242 | port = NULL; | ||
243 | out: | ||
244 | spin_unlock_irqrestore(&portdev->ports_lock, flags); | ||
245 | return port; | ||
246 | } | ||
247 | |||
248 | static bool is_console_port(struct port *port) | ||
249 | { | ||
250 | if (port->cons.hvc) | ||
251 | return true; | ||
252 | return false; | ||
253 | } | ||
254 | |||
255 | static inline bool use_multiport(struct ports_device *portdev) | ||
256 | { | ||
257 | /* | ||
258 | * This condition can be true when put_chars is called from | ||
259 | * early_init | ||
260 | */ | ||
261 | if (!portdev->vdev) | ||
262 | return 0; | ||
263 | return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); | ||
264 | } | ||
41 | 265 | ||
42 | /* This is our input buffer, and how much data is left in it. */ | 266 | static void free_buf(struct port_buffer *buf) |
43 | static unsigned int in_len; | 267 | { |
44 | static char *in, *inbuf; | 268 | kfree(buf->buf); |
269 | kfree(buf); | ||
270 | } | ||
271 | |||
272 | static struct port_buffer *alloc_buf(size_t buf_size) | ||
273 | { | ||
274 | struct port_buffer *buf; | ||
45 | 275 | ||
46 | /* The operations for our console. */ | 276 | buf = kmalloc(sizeof(*buf), GFP_KERNEL); |
47 | static struct hv_ops virtio_cons; | 277 | if (!buf) |
278 | goto fail; | ||
279 | buf->buf = kzalloc(buf_size, GFP_KERNEL); | ||
280 | if (!buf->buf) | ||
281 | goto free_buf; | ||
282 | buf->len = 0; | ||
283 | buf->offset = 0; | ||
284 | buf->size = buf_size; | ||
285 | return buf; | ||
286 | |||
287 | free_buf: | ||
288 | kfree(buf); | ||
289 | fail: | ||
290 | return NULL; | ||
291 | } | ||
292 | |||
293 | /* Callers should take appropriate locks */ | ||
294 | static void *get_inbuf(struct port *port) | ||
295 | { | ||
296 | struct port_buffer *buf; | ||
297 | struct virtqueue *vq; | ||
298 | unsigned int len; | ||
48 | 299 | ||
49 | /* The hvc device */ | 300 | vq = port->in_vq; |
50 | static struct hvc_struct *hvc; | 301 | buf = vq->vq_ops->get_buf(vq, &len); |
302 | if (buf) { | ||
303 | buf->len = len; | ||
304 | buf->offset = 0; | ||
305 | } | ||
306 | return buf; | ||
307 | } | ||
51 | 308 | ||
52 | /*D:310 The put_chars() callback is pretty straightforward. | 309 | /* |
310 | * Create a scatter-gather list representing our input buffer and put | ||
311 | * it in the queue. | ||
53 | * | 312 | * |
54 | * We turn the characters into a scatter-gather list, add it to the output | 313 | * Callers should take appropriate locks. |
55 | * queue and then kick the Host. Then we sit here waiting for it to finish: | 314 | */ |
56 | * inefficient in theory, but in practice implementations will do it | 315 | static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) |
57 | * immediately (lguest's Launcher does). */ | ||
58 | static int put_chars(u32 vtermno, const char *buf, int count) | ||
59 | { | 316 | { |
60 | struct scatterlist sg[1]; | 317 | struct scatterlist sg[1]; |
318 | int ret; | ||
319 | |||
320 | sg_init_one(sg, buf->buf, buf->size); | ||
321 | |||
322 | ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf); | ||
323 | vq->vq_ops->kick(vq); | ||
324 | return ret; | ||
325 | } | ||
326 | |||
327 | /* Discard any unread data this port has. Callers lockers. */ | ||
328 | static void discard_port_data(struct port *port) | ||
329 | { | ||
330 | struct port_buffer *buf; | ||
331 | struct virtqueue *vq; | ||
61 | unsigned int len; | 332 | unsigned int len; |
333 | int ret; | ||
62 | 334 | ||
63 | /* This is a convenient routine to initialize a single-elem sg list */ | 335 | vq = port->in_vq; |
64 | sg_init_one(sg, buf, count); | 336 | if (port->inbuf) |
337 | buf = port->inbuf; | ||
338 | else | ||
339 | buf = vq->vq_ops->get_buf(vq, &len); | ||
65 | 340 | ||
66 | /* add_buf wants a token to identify this buffer: we hand it any | 341 | ret = 0; |
67 | * non-NULL pointer, since there's only ever one buffer. */ | 342 | while (buf) { |
68 | if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) >= 0) { | 343 | if (add_inbuf(vq, buf) < 0) { |
69 | /* Tell Host to go! */ | 344 | ret++; |
70 | out_vq->vq_ops->kick(out_vq); | 345 | free_buf(buf); |
71 | /* Chill out until it's done with the buffer. */ | 346 | } |
72 | while (!out_vq->vq_ops->get_buf(out_vq, &len)) | 347 | buf = vq->vq_ops->get_buf(vq, &len); |
73 | cpu_relax(); | ||
74 | } | 348 | } |
349 | port->inbuf = NULL; | ||
350 | if (ret) | ||
351 | dev_warn(port->dev, "Errors adding %d buffers back to vq\n", | ||
352 | ret); | ||
353 | } | ||
75 | 354 | ||
76 | /* We're expected to return the amount of data we wrote: all of it. */ | 355 | static bool port_has_data(struct port *port) |
77 | return count; | 356 | { |
357 | unsigned long flags; | ||
358 | bool ret; | ||
359 | |||
360 | spin_lock_irqsave(&port->inbuf_lock, flags); | ||
361 | if (port->inbuf) { | ||
362 | ret = true; | ||
363 | goto out; | ||
364 | } | ||
365 | port->inbuf = get_inbuf(port); | ||
366 | if (port->inbuf) { | ||
367 | ret = true; | ||
368 | goto out; | ||
369 | } | ||
370 | ret = false; | ||
371 | out: | ||
372 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | ||
373 | return ret; | ||
78 | } | 374 | } |
79 | 375 | ||
80 | /* Create a scatter-gather list representing our input buffer and put it in the | 376 | static ssize_t send_control_msg(struct port *port, unsigned int event, |
81 | * queue. */ | 377 | unsigned int value) |
82 | static void add_inbuf(void) | ||
83 | { | 378 | { |
84 | struct scatterlist sg[1]; | 379 | struct scatterlist sg[1]; |
85 | sg_init_one(sg, inbuf, PAGE_SIZE); | 380 | struct virtio_console_control cpkt; |
381 | struct virtqueue *vq; | ||
382 | int len; | ||
383 | |||
384 | if (!use_multiport(port->portdev)) | ||
385 | return 0; | ||
386 | |||
387 | cpkt.id = port->id; | ||
388 | cpkt.event = event; | ||
389 | cpkt.value = value; | ||
390 | |||
391 | vq = port->portdev->c_ovq; | ||
86 | 392 | ||
87 | /* We should always be able to add one buffer to an empty queue. */ | 393 | sg_init_one(sg, &cpkt, sizeof(cpkt)); |
88 | if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) < 0) | 394 | if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) { |
89 | BUG(); | 395 | vq->vq_ops->kick(vq); |
90 | in_vq->vq_ops->kick(in_vq); | 396 | while (!vq->vq_ops->get_buf(vq, &len)) |
397 | cpu_relax(); | ||
398 | } | ||
399 | return 0; | ||
91 | } | 400 | } |
92 | 401 | ||
93 | /*D:350 get_chars() is the callback from the hvc_console infrastructure when | 402 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) |
94 | * an interrupt is received. | ||
95 | * | ||
96 | * Most of the code deals with the fact that the hvc_console() infrastructure | ||
97 | * only asks us for 16 bytes at a time. We keep in_offset and in_used fields | ||
98 | * for partially-filled buffers. */ | ||
99 | static int get_chars(u32 vtermno, char *buf, int count) | ||
100 | { | 403 | { |
101 | /* If we don't have an input queue yet, we can't get input. */ | 404 | struct scatterlist sg[1]; |
102 | BUG_ON(!in_vq); | 405 | struct virtqueue *out_vq; |
406 | ssize_t ret; | ||
407 | unsigned int len; | ||
408 | |||
409 | out_vq = port->out_vq; | ||
410 | |||
411 | sg_init_one(sg, in_buf, in_count); | ||
412 | ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf); | ||
413 | |||
414 | /* Tell Host to go! */ | ||
415 | out_vq->vq_ops->kick(out_vq); | ||
416 | |||
417 | if (ret < 0) { | ||
418 | len = 0; | ||
419 | goto fail; | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * Wait till the host acknowledges it pushed out the data we | ||
424 | * sent. Also ensure we return to userspace the number of | ||
425 | * bytes that were successfully consumed by the host. | ||
426 | */ | ||
427 | while (!out_vq->vq_ops->get_buf(out_vq, &len)) | ||
428 | cpu_relax(); | ||
429 | fail: | ||
430 | /* We're expected to return the amount of data we wrote */ | ||
431 | return len; | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * Give out the data that's requested from the buffer that we have | ||
436 | * queued up. | ||
437 | */ | ||
438 | static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, | ||
439 | bool to_user) | ||
440 | { | ||
441 | struct port_buffer *buf; | ||
442 | unsigned long flags; | ||
443 | |||
444 | if (!out_count || !port_has_data(port)) | ||
445 | return 0; | ||
446 | |||
447 | buf = port->inbuf; | ||
448 | out_count = min(out_count, buf->len - buf->offset); | ||
449 | |||
450 | if (to_user) { | ||
451 | ssize_t ret; | ||
452 | |||
453 | ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); | ||
454 | if (ret) | ||
455 | return -EFAULT; | ||
456 | } else { | ||
457 | memcpy(out_buf, buf->buf + buf->offset, out_count); | ||
458 | } | ||
459 | |||
460 | buf->offset += out_count; | ||
461 | |||
462 | if (buf->offset == buf->len) { | ||
463 | /* | ||
464 | * We're done using all the data in this buffer. | ||
465 | * Re-queue so that the Host can send us more data. | ||
466 | */ | ||
467 | spin_lock_irqsave(&port->inbuf_lock, flags); | ||
468 | port->inbuf = NULL; | ||
469 | |||
470 | if (add_inbuf(port->in_vq, buf) < 0) | ||
471 | dev_warn(port->dev, "failed add_buf\n"); | ||
472 | |||
473 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | ||
474 | } | ||
475 | /* Return the number of bytes actually copied */ | ||
476 | return out_count; | ||
477 | } | ||
103 | 478 | ||
104 | /* No buffer? Try to get one. */ | 479 | /* The condition that must be true for polling to end */ |
105 | if (!in_len) { | 480 | static bool wait_is_over(struct port *port) |
106 | in = in_vq->vq_ops->get_buf(in_vq, &in_len); | 481 | { |
107 | if (!in) | 482 | return port_has_data(port) || !port->host_connected; |
483 | } | ||
484 | |||
485 | static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | ||
486 | size_t count, loff_t *offp) | ||
487 | { | ||
488 | struct port *port; | ||
489 | ssize_t ret; | ||
490 | |||
491 | port = filp->private_data; | ||
492 | |||
493 | if (!port_has_data(port)) { | ||
494 | /* | ||
495 | * If nothing's connected on the host just return 0 in | ||
496 | * case of list_empty; this tells the userspace app | ||
497 | * that there's no connection | ||
498 | */ | ||
499 | if (!port->host_connected) | ||
108 | return 0; | 500 | return 0; |
501 | if (filp->f_flags & O_NONBLOCK) | ||
502 | return -EAGAIN; | ||
503 | |||
504 | ret = wait_event_interruptible(port->waitqueue, | ||
505 | wait_is_over(port)); | ||
506 | if (ret < 0) | ||
507 | return ret; | ||
508 | } | ||
509 | /* | ||
510 | * We could've received a disconnection message while we were | ||
511 | * waiting for more data. | ||
512 | * | ||
513 | * This check is not clubbed in the if() statement above as we | ||
514 | * might receive some data as well as the host could get | ||
515 | * disconnected after we got woken up from our wait. So we | ||
516 | * really want to give off whatever data we have and only then | ||
517 | * check for host_connected. | ||
518 | */ | ||
519 | if (!port_has_data(port) && !port->host_connected) | ||
520 | return 0; | ||
521 | |||
522 | return fill_readbuf(port, ubuf, count, true); | ||
523 | } | ||
524 | |||
525 | static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | ||
526 | size_t count, loff_t *offp) | ||
527 | { | ||
528 | struct port *port; | ||
529 | char *buf; | ||
530 | ssize_t ret; | ||
531 | |||
532 | port = filp->private_data; | ||
533 | |||
534 | count = min((size_t)(32 * 1024), count); | ||
535 | |||
536 | buf = kmalloc(count, GFP_KERNEL); | ||
537 | if (!buf) | ||
538 | return -ENOMEM; | ||
539 | |||
540 | ret = copy_from_user(buf, ubuf, count); | ||
541 | if (ret) { | ||
542 | ret = -EFAULT; | ||
543 | goto free_buf; | ||
109 | } | 544 | } |
110 | 545 | ||
111 | /* You want more than we have to give? Well, try wanting less! */ | 546 | ret = send_buf(port, buf, count); |
112 | if (in_len < count) | 547 | free_buf: |
113 | count = in_len; | 548 | kfree(buf); |
549 | return ret; | ||
550 | } | ||
551 | |||
552 | static unsigned int port_fops_poll(struct file *filp, poll_table *wait) | ||
553 | { | ||
554 | struct port *port; | ||
555 | unsigned int ret; | ||
556 | |||
557 | port = filp->private_data; | ||
558 | poll_wait(filp, &port->waitqueue, wait); | ||
559 | |||
560 | ret = 0; | ||
561 | if (port->inbuf) | ||
562 | ret |= POLLIN | POLLRDNORM; | ||
563 | if (port->host_connected) | ||
564 | ret |= POLLOUT; | ||
565 | if (!port->host_connected) | ||
566 | ret |= POLLHUP; | ||
567 | |||
568 | return ret; | ||
569 | } | ||
570 | |||
571 | static int port_fops_release(struct inode *inode, struct file *filp) | ||
572 | { | ||
573 | struct port *port; | ||
574 | |||
575 | port = filp->private_data; | ||
576 | |||
577 | /* Notify host of port being closed */ | ||
578 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); | ||
579 | |||
580 | spin_lock_irq(&port->inbuf_lock); | ||
581 | port->guest_connected = false; | ||
582 | |||
583 | discard_port_data(port); | ||
584 | |||
585 | spin_unlock_irq(&port->inbuf_lock); | ||
586 | |||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static int port_fops_open(struct inode *inode, struct file *filp) | ||
591 | { | ||
592 | struct cdev *cdev = inode->i_cdev; | ||
593 | struct port *port; | ||
594 | |||
595 | port = container_of(cdev, struct port, cdev); | ||
596 | filp->private_data = port; | ||
597 | |||
598 | /* | ||
599 | * Don't allow opening of console port devices -- that's done | ||
600 | * via /dev/hvc | ||
601 | */ | ||
602 | if (is_console_port(port)) | ||
603 | return -ENXIO; | ||
604 | |||
605 | /* Allow only one process to open a particular port at a time */ | ||
606 | spin_lock_irq(&port->inbuf_lock); | ||
607 | if (port->guest_connected) { | ||
608 | spin_unlock_irq(&port->inbuf_lock); | ||
609 | return -EMFILE; | ||
610 | } | ||
114 | 611 | ||
115 | /* Copy across to their buffer and increment offset. */ | 612 | port->guest_connected = true; |
116 | memcpy(buf, in, count); | 613 | spin_unlock_irq(&port->inbuf_lock); |
117 | in += count; | ||
118 | in_len -= count; | ||
119 | 614 | ||
120 | /* Finished? Re-register buffer so Host will use it again. */ | 615 | /* Notify host of port being opened */ |
121 | if (in_len == 0) | 616 | send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); |
122 | add_inbuf(); | ||
123 | 617 | ||
124 | return count; | 618 | return 0; |
125 | } | 619 | } |
126 | /*:*/ | ||
127 | 620 | ||
128 | /*D:320 Console drivers are initialized very early so boot messages can go out, | 621 | /* |
129 | * so we do things slightly differently from the generic virtio initialization | 622 | * The file operations that we support: programs in the guest can open |
130 | * of the net and block drivers. | 623 | * a console device, read from it, write to it, poll for data and |
624 | * close it. The devices are at | ||
625 | * /dev/vport<device number>p<port number> | ||
626 | */ | ||
627 | static const struct file_operations port_fops = { | ||
628 | .owner = THIS_MODULE, | ||
629 | .open = port_fops_open, | ||
630 | .read = port_fops_read, | ||
631 | .write = port_fops_write, | ||
632 | .poll = port_fops_poll, | ||
633 | .release = port_fops_release, | ||
634 | }; | ||
635 | |||
636 | /* | ||
637 | * The put_chars() callback is pretty straightforward. | ||
131 | * | 638 | * |
132 | * At this stage, the console is output-only. It's too early to set up a | 639 | * We turn the characters into a scatter-gather list, add it to the |
133 | * virtqueue, so we let the drivers do some boutique early-output thing. */ | 640 | * output queue and then kick the Host. Then we sit here waiting for |
134 | int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) | 641 | * it to finish: inefficient in theory, but in practice |
642 | * implementations will do it immediately (lguest's Launcher does). | ||
643 | */ | ||
644 | static int put_chars(u32 vtermno, const char *buf, int count) | ||
135 | { | 645 | { |
136 | virtio_cons.put_chars = put_chars; | 646 | struct port *port; |
137 | return hvc_instantiate(0, 0, &virtio_cons); | 647 | |
648 | port = find_port_by_vtermno(vtermno); | ||
649 | if (!port) | ||
650 | return 0; | ||
651 | |||
652 | if (unlikely(early_put_chars)) | ||
653 | return early_put_chars(vtermno, buf, count); | ||
654 | |||
655 | return send_buf(port, (void *)buf, count); | ||
138 | } | 656 | } |
139 | 657 | ||
140 | /* | 658 | /* |
141 | * virtio console configuration. This supports: | 659 | * get_chars() is the callback from the hvc_console infrastructure |
142 | * - console resize | 660 | * when an interrupt is received. |
661 | * | ||
662 | * We call out to fill_readbuf that gets us the required data from the | ||
663 | * buffers that are queued up. | ||
143 | */ | 664 | */ |
144 | static void virtcons_apply_config(struct virtio_device *dev) | 665 | static int get_chars(u32 vtermno, char *buf, int count) |
145 | { | 666 | { |
667 | struct port *port; | ||
668 | |||
669 | port = find_port_by_vtermno(vtermno); | ||
670 | if (!port) | ||
671 | return 0; | ||
672 | |||
673 | /* If we don't have an input queue yet, we can't get input. */ | ||
674 | BUG_ON(!port->in_vq); | ||
675 | |||
676 | return fill_readbuf(port, buf, count, false); | ||
677 | } | ||
678 | |||
679 | static void resize_console(struct port *port) | ||
680 | { | ||
681 | struct virtio_device *vdev; | ||
146 | struct winsize ws; | 682 | struct winsize ws; |
147 | 683 | ||
148 | if (virtio_has_feature(dev, VIRTIO_CONSOLE_F_SIZE)) { | 684 | vdev = port->portdev->vdev; |
149 | dev->config->get(dev, | 685 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { |
150 | offsetof(struct virtio_console_config, cols), | 686 | vdev->config->get(vdev, |
151 | &ws.ws_col, sizeof(u16)); | 687 | offsetof(struct virtio_console_config, cols), |
152 | dev->config->get(dev, | 688 | &ws.ws_col, sizeof(u16)); |
153 | offsetof(struct virtio_console_config, rows), | 689 | vdev->config->get(vdev, |
154 | &ws.ws_row, sizeof(u16)); | 690 | offsetof(struct virtio_console_config, rows), |
155 | hvc_resize(hvc, ws); | 691 | &ws.ws_row, sizeof(u16)); |
692 | hvc_resize(port->cons.hvc, ws); | ||
156 | } | 693 | } |
157 | } | 694 | } |
158 | 695 | ||
159 | /* | 696 | /* We set the configuration at this point, since we now have a tty */ |
160 | * we support only one console, the hvc struct is a global var | ||
161 | * We set the configuration at this point, since we now have a tty | ||
162 | */ | ||
163 | static int notifier_add_vio(struct hvc_struct *hp, int data) | 697 | static int notifier_add_vio(struct hvc_struct *hp, int data) |
164 | { | 698 | { |
699 | struct port *port; | ||
700 | |||
701 | port = find_port_by_vtermno(hp->vtermno); | ||
702 | if (!port) | ||
703 | return -EINVAL; | ||
704 | |||
165 | hp->irq_requested = 1; | 705 | hp->irq_requested = 1; |
166 | virtcons_apply_config(vdev); | 706 | resize_console(port); |
167 | 707 | ||
168 | return 0; | 708 | return 0; |
169 | } | 709 | } |
@@ -173,79 +713,797 @@ static void notifier_del_vio(struct hvc_struct *hp, int data) | |||
173 | hp->irq_requested = 0; | 713 | hp->irq_requested = 0; |
174 | } | 714 | } |
175 | 715 | ||
176 | static void hvc_handle_input(struct virtqueue *vq) | 716 | /* The operations for console ports. */ |
717 | static const struct hv_ops hv_ops = { | ||
718 | .get_chars = get_chars, | ||
719 | .put_chars = put_chars, | ||
720 | .notifier_add = notifier_add_vio, | ||
721 | .notifier_del = notifier_del_vio, | ||
722 | .notifier_hangup = notifier_del_vio, | ||
723 | }; | ||
724 | |||
725 | /* | ||
726 | * Console drivers are initialized very early so boot messages can go | ||
727 | * out, so we do things slightly differently from the generic virtio | ||
728 | * initialization of the net and block drivers. | ||
729 | * | ||
730 | * At this stage, the console is output-only. It's too early to set | ||
731 | * up a virtqueue, so we let the drivers do some boutique early-output | ||
732 | * thing. | ||
733 | */ | ||
734 | int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) | ||
177 | { | 735 | { |
178 | if (hvc_poll(hvc)) | 736 | early_put_chars = put_chars; |
737 | return hvc_instantiate(0, 0, &hv_ops); | ||
738 | } | ||
739 | |||
740 | int init_port_console(struct port *port) | ||
741 | { | ||
742 | int ret; | ||
743 | |||
744 | /* | ||
745 | * The Host's telling us this port is a console port. Hook it | ||
746 | * up with an hvc console. | ||
747 | * | ||
748 | * To set up and manage our virtual console, we call | ||
749 | * hvc_alloc(). | ||
750 | * | ||
751 | * The first argument of hvc_alloc() is the virtual console | ||
752 | * number. The second argument is the parameter for the | ||
753 | * notification mechanism (like irq number). We currently | ||
754 | * leave this as zero, virtqueues have implicit notifications. | ||
755 | * | ||
756 | * The third argument is a "struct hv_ops" containing the | ||
757 | * put_chars() get_chars(), notifier_add() and notifier_del() | ||
758 | * pointers. The final argument is the output buffer size: we | ||
759 | * can do any size, so we put PAGE_SIZE here. | ||
760 | */ | ||
761 | port->cons.vtermno = pdrvdata.next_vtermno; | ||
762 | |||
763 | port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); | ||
764 | if (IS_ERR(port->cons.hvc)) { | ||
765 | ret = PTR_ERR(port->cons.hvc); | ||
766 | dev_err(port->dev, | ||
767 | "error %d allocating hvc for port\n", ret); | ||
768 | port->cons.hvc = NULL; | ||
769 | return ret; | ||
770 | } | ||
771 | spin_lock_irq(&pdrvdata_lock); | ||
772 | pdrvdata.next_vtermno++; | ||
773 | list_add_tail(&port->cons.list, &pdrvdata.consoles); | ||
774 | spin_unlock_irq(&pdrvdata_lock); | ||
775 | port->guest_connected = true; | ||
776 | |||
777 | /* Notify host of port being opened */ | ||
778 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); | ||
779 | |||
780 | return 0; | ||
781 | } | ||
782 | |||
783 | static ssize_t show_port_name(struct device *dev, | ||
784 | struct device_attribute *attr, char *buffer) | ||
785 | { | ||
786 | struct port *port; | ||
787 | |||
788 | port = dev_get_drvdata(dev); | ||
789 | |||
790 | return sprintf(buffer, "%s\n", port->name); | ||
791 | } | ||
792 | |||
793 | static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL); | ||
794 | |||
795 | static struct attribute *port_sysfs_entries[] = { | ||
796 | &dev_attr_name.attr, | ||
797 | NULL | ||
798 | }; | ||
799 | |||
800 | static struct attribute_group port_attribute_group = { | ||
801 | .name = NULL, /* put in device directory */ | ||
802 | .attrs = port_sysfs_entries, | ||
803 | }; | ||
804 | |||
805 | static int debugfs_open(struct inode *inode, struct file *filp) | ||
806 | { | ||
807 | filp->private_data = inode->i_private; | ||
808 | return 0; | ||
809 | } | ||
810 | |||
811 | static ssize_t debugfs_read(struct file *filp, char __user *ubuf, | ||
812 | size_t count, loff_t *offp) | ||
813 | { | ||
814 | struct port *port; | ||
815 | char *buf; | ||
816 | ssize_t ret, out_offset, out_count; | ||
817 | |||
818 | out_count = 1024; | ||
819 | buf = kmalloc(out_count, GFP_KERNEL); | ||
820 | if (!buf) | ||
821 | return -ENOMEM; | ||
822 | |||
823 | port = filp->private_data; | ||
824 | out_offset = 0; | ||
825 | out_offset += snprintf(buf + out_offset, out_count, | ||
826 | "name: %s\n", port->name ? port->name : ""); | ||
827 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
828 | "guest_connected: %d\n", port->guest_connected); | ||
829 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
830 | "host_connected: %d\n", port->host_connected); | ||
831 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
832 | "is_console: %s\n", | ||
833 | is_console_port(port) ? "yes" : "no"); | ||
834 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
835 | "console_vtermno: %u\n", port->cons.vtermno); | ||
836 | |||
837 | ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); | ||
838 | kfree(buf); | ||
839 | return ret; | ||
840 | } | ||
841 | |||
842 | static const struct file_operations port_debugfs_ops = { | ||
843 | .owner = THIS_MODULE, | ||
844 | .open = debugfs_open, | ||
845 | .read = debugfs_read, | ||
846 | }; | ||
847 | |||
848 | /* Remove all port-specific data. */ | ||
849 | static int remove_port(struct port *port) | ||
850 | { | ||
851 | struct port_buffer *buf; | ||
852 | |||
853 | spin_lock_irq(&port->portdev->ports_lock); | ||
854 | list_del(&port->list); | ||
855 | spin_unlock_irq(&port->portdev->ports_lock); | ||
856 | |||
857 | if (is_console_port(port)) { | ||
858 | spin_lock_irq(&pdrvdata_lock); | ||
859 | list_del(&port->cons.list); | ||
860 | spin_unlock_irq(&pdrvdata_lock); | ||
861 | hvc_remove(port->cons.hvc); | ||
862 | } | ||
863 | if (port->guest_connected) | ||
864 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); | ||
865 | |||
866 | sysfs_remove_group(&port->dev->kobj, &port_attribute_group); | ||
867 | device_destroy(pdrvdata.class, port->dev->devt); | ||
868 | cdev_del(&port->cdev); | ||
869 | |||
870 | /* Remove unused data this port might have received. */ | ||
871 | discard_port_data(port); | ||
872 | |||
873 | /* Remove buffers we queued up for the Host to send us data in. */ | ||
874 | while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) | ||
875 | free_buf(buf); | ||
876 | |||
877 | kfree(port->name); | ||
878 | |||
879 | debugfs_remove(port->debugfs_file); | ||
880 | |||
881 | kfree(port); | ||
882 | return 0; | ||
883 | } | ||
884 | |||
885 | /* Any private messages that the Host and Guest want to share */ | ||
886 | static void handle_control_message(struct ports_device *portdev, | ||
887 | struct port_buffer *buf) | ||
888 | { | ||
889 | struct virtio_console_control *cpkt; | ||
890 | struct port *port; | ||
891 | size_t name_size; | ||
892 | int err; | ||
893 | |||
894 | cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); | ||
895 | |||
896 | port = find_port_by_id(portdev, cpkt->id); | ||
897 | if (!port) { | ||
898 | /* No valid header at start of buffer. Drop it. */ | ||
899 | dev_dbg(&portdev->vdev->dev, | ||
900 | "Invalid index %u in control packet\n", cpkt->id); | ||
901 | return; | ||
902 | } | ||
903 | |||
904 | switch (cpkt->event) { | ||
905 | case VIRTIO_CONSOLE_CONSOLE_PORT: | ||
906 | if (!cpkt->value) | ||
907 | break; | ||
908 | if (is_console_port(port)) | ||
909 | break; | ||
910 | |||
911 | init_port_console(port); | ||
912 | /* | ||
913 | * Could remove the port here in case init fails - but | ||
914 | * have to notify the host first. | ||
915 | */ | ||
916 | break; | ||
917 | case VIRTIO_CONSOLE_RESIZE: | ||
918 | if (!is_console_port(port)) | ||
919 | break; | ||
920 | port->cons.hvc->irq_requested = 1; | ||
921 | resize_console(port); | ||
922 | break; | ||
923 | case VIRTIO_CONSOLE_PORT_OPEN: | ||
924 | port->host_connected = cpkt->value; | ||
925 | wake_up_interruptible(&port->waitqueue); | ||
926 | break; | ||
927 | case VIRTIO_CONSOLE_PORT_NAME: | ||
928 | /* | ||
929 | * Skip the size of the header and the cpkt to get the size | ||
930 | * of the name that was sent | ||
931 | */ | ||
932 | name_size = buf->len - buf->offset - sizeof(*cpkt) + 1; | ||
933 | |||
934 | port->name = kmalloc(name_size, GFP_KERNEL); | ||
935 | if (!port->name) { | ||
936 | dev_err(port->dev, | ||
937 | "Not enough space to store port name\n"); | ||
938 | break; | ||
939 | } | ||
940 | strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt), | ||
941 | name_size - 1); | ||
942 | port->name[name_size - 1] = 0; | ||
943 | |||
944 | /* | ||
945 | * Since we only have one sysfs attribute, 'name', | ||
946 | * create it only if we have a name for the port. | ||
947 | */ | ||
948 | err = sysfs_create_group(&port->dev->kobj, | ||
949 | &port_attribute_group); | ||
950 | if (err) | ||
951 | dev_err(port->dev, | ||
952 | "Error %d creating sysfs device attributes\n", | ||
953 | err); | ||
954 | |||
955 | break; | ||
956 | case VIRTIO_CONSOLE_PORT_REMOVE: | ||
957 | /* | ||
958 | * Hot unplug the port. We don't decrement nr_ports | ||
959 | * since we don't want to deal with extra complexities | ||
960 | * of using the lowest-available port id: We can just | ||
961 | * pick up the nr_ports number as the id and not have | ||
962 | * userspace send it to us. This helps us in two | ||
963 | * ways: | ||
964 | * | ||
965 | * - We don't need to have a 'port_id' field in the | ||
966 | * config space when a port is hot-added. This is a | ||
967 | * good thing as we might queue up multiple hotplug | ||
968 | * requests issued in our workqueue. | ||
969 | * | ||
970 | * - Another way to deal with this would have been to | ||
971 | * use a bitmap of the active ports and select the | ||
972 | * lowest non-active port from that map. That | ||
973 | * bloats the already tight config space and we | ||
974 | * would end up artificially limiting the | ||
975 | * max. number of ports to sizeof(bitmap). Right | ||
976 | * now we can support 2^32 ports (as the port id is | ||
977 | * stored in a u32 type). | ||
978 | * | ||
979 | */ | ||
980 | remove_port(port); | ||
981 | break; | ||
982 | } | ||
983 | } | ||
984 | |||
985 | static void control_work_handler(struct work_struct *work) | ||
986 | { | ||
987 | struct ports_device *portdev; | ||
988 | struct virtqueue *vq; | ||
989 | struct port_buffer *buf; | ||
990 | unsigned int len; | ||
991 | |||
992 | portdev = container_of(work, struct ports_device, control_work); | ||
993 | vq = portdev->c_ivq; | ||
994 | |||
995 | spin_lock(&portdev->cvq_lock); | ||
996 | while ((buf = vq->vq_ops->get_buf(vq, &len))) { | ||
997 | spin_unlock(&portdev->cvq_lock); | ||
998 | |||
999 | buf->len = len; | ||
1000 | buf->offset = 0; | ||
1001 | |||
1002 | handle_control_message(portdev, buf); | ||
1003 | |||
1004 | spin_lock(&portdev->cvq_lock); | ||
1005 | if (add_inbuf(portdev->c_ivq, buf) < 0) { | ||
1006 | dev_warn(&portdev->vdev->dev, | ||
1007 | "Error adding buffer to queue\n"); | ||
1008 | free_buf(buf); | ||
1009 | } | ||
1010 | } | ||
1011 | spin_unlock(&portdev->cvq_lock); | ||
1012 | } | ||
1013 | |||
1014 | static void in_intr(struct virtqueue *vq) | ||
1015 | { | ||
1016 | struct port *port; | ||
1017 | unsigned long flags; | ||
1018 | |||
1019 | port = find_port_by_vq(vq->vdev->priv, vq); | ||
1020 | if (!port) | ||
1021 | return; | ||
1022 | |||
1023 | spin_lock_irqsave(&port->inbuf_lock, flags); | ||
1024 | if (!port->inbuf) | ||
1025 | port->inbuf = get_inbuf(port); | ||
1026 | |||
1027 | /* | ||
1028 | * Don't queue up data when port is closed. This condition | ||
1029 | * can be reached when a console port is not yet connected (no | ||
1030 | * tty is spawned) and the host sends out data to console | ||
1031 | * ports. For generic serial ports, the host won't | ||
1032 | * (shouldn't) send data till the guest is connected. | ||
1033 | */ | ||
1034 | if (!port->guest_connected) | ||
1035 | discard_port_data(port); | ||
1036 | |||
1037 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | ||
1038 | |||
1039 | wake_up_interruptible(&port->waitqueue); | ||
1040 | |||
1041 | if (is_console_port(port) && hvc_poll(port->cons.hvc)) | ||
179 | hvc_kick(); | 1042 | hvc_kick(); |
180 | } | 1043 | } |
181 | 1044 | ||
182 | /*D:370 Once we're further in boot, we get probed like any other virtio device. | 1045 | static void control_intr(struct virtqueue *vq) |
183 | * At this stage we set up the output virtqueue. | 1046 | { |
184 | * | 1047 | struct ports_device *portdev; |
185 | * To set up and manage our virtual console, we call hvc_alloc(). Since we | 1048 | |
186 | * never remove the console device we never need this pointer again. | 1049 | portdev = vq->vdev->priv; |
1050 | schedule_work(&portdev->control_work); | ||
1051 | } | ||
1052 | |||
1053 | static void config_intr(struct virtio_device *vdev) | ||
1054 | { | ||
1055 | struct ports_device *portdev; | ||
1056 | |||
1057 | portdev = vdev->priv; | ||
1058 | if (use_multiport(portdev)) { | ||
1059 | /* Handle port hot-add */ | ||
1060 | schedule_work(&portdev->config_work); | ||
1061 | } | ||
1062 | /* | ||
1063 | * We'll use this way of resizing only for legacy support. | ||
1064 | * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use | ||
1065 | * control messages to indicate console size changes so that | ||
1066 | * it can be done per-port | ||
1067 | */ | ||
1068 | resize_console(find_port_by_id(portdev, 0)); | ||
1069 | } | ||
1070 | |||
1071 | static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | ||
1072 | { | ||
1073 | struct port_buffer *buf; | ||
1074 | unsigned int ret; | ||
1075 | int err; | ||
1076 | |||
1077 | ret = 0; | ||
1078 | do { | ||
1079 | buf = alloc_buf(PAGE_SIZE); | ||
1080 | if (!buf) | ||
1081 | break; | ||
1082 | |||
1083 | spin_lock_irq(lock); | ||
1084 | err = add_inbuf(vq, buf); | ||
1085 | if (err < 0) { | ||
1086 | spin_unlock_irq(lock); | ||
1087 | free_buf(buf); | ||
1088 | break; | ||
1089 | } | ||
1090 | ret++; | ||
1091 | spin_unlock_irq(lock); | ||
1092 | } while (err > 0); | ||
1093 | |||
1094 | return ret; | ||
1095 | } | ||
1096 | |||
1097 | static int add_port(struct ports_device *portdev, u32 id) | ||
1098 | { | ||
1099 | char debugfs_name[16]; | ||
1100 | struct port *port; | ||
1101 | struct port_buffer *buf; | ||
1102 | dev_t devt; | ||
1103 | int err; | ||
1104 | |||
1105 | port = kmalloc(sizeof(*port), GFP_KERNEL); | ||
1106 | if (!port) { | ||
1107 | err = -ENOMEM; | ||
1108 | goto fail; | ||
1109 | } | ||
1110 | |||
1111 | port->portdev = portdev; | ||
1112 | port->id = id; | ||
1113 | |||
1114 | port->name = NULL; | ||
1115 | port->inbuf = NULL; | ||
1116 | port->cons.hvc = NULL; | ||
1117 | |||
1118 | port->host_connected = port->guest_connected = false; | ||
1119 | |||
1120 | port->in_vq = portdev->in_vqs[port->id]; | ||
1121 | port->out_vq = portdev->out_vqs[port->id]; | ||
1122 | |||
1123 | cdev_init(&port->cdev, &port_fops); | ||
1124 | |||
1125 | devt = MKDEV(portdev->chr_major, id); | ||
1126 | err = cdev_add(&port->cdev, devt, 1); | ||
1127 | if (err < 0) { | ||
1128 | dev_err(&port->portdev->vdev->dev, | ||
1129 | "Error %d adding cdev for port %u\n", err, id); | ||
1130 | goto free_port; | ||
1131 | } | ||
1132 | port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, | ||
1133 | devt, port, "vport%up%u", | ||
1134 | port->portdev->drv_index, id); | ||
1135 | if (IS_ERR(port->dev)) { | ||
1136 | err = PTR_ERR(port->dev); | ||
1137 | dev_err(&port->portdev->vdev->dev, | ||
1138 | "Error %d creating device for port %u\n", | ||
1139 | err, id); | ||
1140 | goto free_cdev; | ||
1141 | } | ||
1142 | |||
1143 | spin_lock_init(&port->inbuf_lock); | ||
1144 | init_waitqueue_head(&port->waitqueue); | ||
1145 | |||
1146 | /* Fill the in_vq with buffers so the host can send us data. */ | ||
1147 | err = fill_queue(port->in_vq, &port->inbuf_lock); | ||
1148 | if (!err) { | ||
1149 | dev_err(port->dev, "Error allocating inbufs\n"); | ||
1150 | err = -ENOMEM; | ||
1151 | goto free_device; | ||
1152 | } | ||
1153 | |||
1154 | /* | ||
1155 | * If we're not using multiport support, this has to be a console port | ||
1156 | */ | ||
1157 | if (!use_multiport(port->portdev)) { | ||
1158 | err = init_port_console(port); | ||
1159 | if (err) | ||
1160 | goto free_inbufs; | ||
1161 | } | ||
1162 | |||
1163 | spin_lock_irq(&portdev->ports_lock); | ||
1164 | list_add_tail(&port->list, &port->portdev->ports); | ||
1165 | spin_unlock_irq(&portdev->ports_lock); | ||
1166 | |||
1167 | /* | ||
1168 | * Tell the Host we're set so that it can send us various | ||
1169 | * configuration parameters for this port (eg, port name, | ||
1170 | * caching, whether this is a console port, etc.) | ||
1171 | */ | ||
1172 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | ||
1173 | |||
1174 | if (pdrvdata.debugfs_dir) { | ||
1175 | /* | ||
1176 | * Finally, create the debugfs file that we can use to | ||
1177 | * inspect a port's state at any time | ||
1178 | */ | ||
1179 | sprintf(debugfs_name, "vport%up%u", | ||
1180 | port->portdev->drv_index, id); | ||
1181 | port->debugfs_file = debugfs_create_file(debugfs_name, 0444, | ||
1182 | pdrvdata.debugfs_dir, | ||
1183 | port, | ||
1184 | &port_debugfs_ops); | ||
1185 | } | ||
1186 | return 0; | ||
1187 | |||
1188 | free_inbufs: | ||
1189 | while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) | ||
1190 | free_buf(buf); | ||
1191 | free_device: | ||
1192 | device_destroy(pdrvdata.class, port->dev->devt); | ||
1193 | free_cdev: | ||
1194 | cdev_del(&port->cdev); | ||
1195 | free_port: | ||
1196 | kfree(port); | ||
1197 | fail: | ||
1198 | return err; | ||
1199 | } | ||
1200 | |||
1201 | /* | ||
1202 | * The workhandler for config-space updates. | ||
187 | * | 1203 | * |
188 | * Finally we put our input buffer in the input queue, ready to receive. */ | 1204 | * This is called when ports are hot-added. |
189 | static int __devinit virtcons_probe(struct virtio_device *dev) | 1205 | */ |
1206 | static void config_work_handler(struct work_struct *work) | ||
1207 | { | ||
1208 | struct virtio_console_config virtconconf; | ||
1209 | struct ports_device *portdev; | ||
1210 | struct virtio_device *vdev; | ||
1211 | int err; | ||
1212 | |||
1213 | portdev = container_of(work, struct ports_device, config_work); | ||
1214 | |||
1215 | vdev = portdev->vdev; | ||
1216 | vdev->config->get(vdev, | ||
1217 | offsetof(struct virtio_console_config, nr_ports), | ||
1218 | &virtconconf.nr_ports, | ||
1219 | sizeof(virtconconf.nr_ports)); | ||
1220 | |||
1221 | if (portdev->config.nr_ports == virtconconf.nr_ports) { | ||
1222 | /* | ||
1223 | * Port 0 got hot-added. Since we already did all the | ||
1224 | * other initialisation for it, just tell the Host | ||
1225 | * that the port is ready if we find the port. In | ||
1226 | * case the port was hot-removed earlier, we call | ||
1227 | * add_port to add the port. | ||
1228 | */ | ||
1229 | struct port *port; | ||
1230 | |||
1231 | port = find_port_by_id(portdev, 0); | ||
1232 | if (!port) | ||
1233 | add_port(portdev, 0); | ||
1234 | else | ||
1235 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | ||
1236 | return; | ||
1237 | } | ||
1238 | if (virtconconf.nr_ports > portdev->config.max_nr_ports) { | ||
1239 | dev_warn(&vdev->dev, | ||
1240 | "More ports specified (%u) than allowed (%u)", | ||
1241 | portdev->config.nr_ports + 1, | ||
1242 | portdev->config.max_nr_ports); | ||
1243 | return; | ||
1244 | } | ||
1245 | if (virtconconf.nr_ports < portdev->config.nr_ports) | ||
1246 | return; | ||
1247 | |||
1248 | /* Hot-add ports */ | ||
1249 | while (virtconconf.nr_ports - portdev->config.nr_ports) { | ||
1250 | err = add_port(portdev, portdev->config.nr_ports); | ||
1251 | if (err) | ||
1252 | break; | ||
1253 | portdev->config.nr_ports++; | ||
1254 | } | ||
1255 | } | ||
1256 | |||
1257 | static int init_vqs(struct ports_device *portdev) | ||
190 | { | 1258 | { |
191 | vq_callback_t *callbacks[] = { hvc_handle_input, NULL}; | 1259 | vq_callback_t **io_callbacks; |
192 | const char *names[] = { "input", "output" }; | 1260 | char **io_names; |
193 | struct virtqueue *vqs[2]; | 1261 | struct virtqueue **vqs; |
1262 | u32 i, j, nr_ports, nr_queues; | ||
194 | int err; | 1263 | int err; |
195 | 1264 | ||
196 | vdev = dev; | 1265 | nr_ports = portdev->config.max_nr_ports; |
1266 | nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; | ||
197 | 1267 | ||
198 | /* This is the scratch page we use to receive console input */ | 1268 | vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); |
199 | inbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 1269 | if (!vqs) { |
200 | if (!inbuf) { | ||
201 | err = -ENOMEM; | 1270 | err = -ENOMEM; |
202 | goto fail; | 1271 | goto fail; |
203 | } | 1272 | } |
1273 | io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); | ||
1274 | if (!io_callbacks) { | ||
1275 | err = -ENOMEM; | ||
1276 | goto free_vqs; | ||
1277 | } | ||
1278 | io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); | ||
1279 | if (!io_names) { | ||
1280 | err = -ENOMEM; | ||
1281 | goto free_callbacks; | ||
1282 | } | ||
1283 | portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), | ||
1284 | GFP_KERNEL); | ||
1285 | if (!portdev->in_vqs) { | ||
1286 | err = -ENOMEM; | ||
1287 | goto free_names; | ||
1288 | } | ||
1289 | portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), | ||
1290 | GFP_KERNEL); | ||
1291 | if (!portdev->out_vqs) { | ||
1292 | err = -ENOMEM; | ||
1293 | goto free_invqs; | ||
1294 | } | ||
1295 | |||
1296 | /* | ||
1297 | * For backward compat (newer host but older guest), the host | ||
1298 | * spawns a console port first and also inits the vqs for port | ||
1299 | * 0 before others. | ||
1300 | */ | ||
1301 | j = 0; | ||
1302 | io_callbacks[j] = in_intr; | ||
1303 | io_callbacks[j + 1] = NULL; | ||
1304 | io_names[j] = "input"; | ||
1305 | io_names[j + 1] = "output"; | ||
1306 | j += 2; | ||
204 | 1307 | ||
1308 | if (use_multiport(portdev)) { | ||
1309 | io_callbacks[j] = control_intr; | ||
1310 | io_callbacks[j + 1] = NULL; | ||
1311 | io_names[j] = "control-i"; | ||
1312 | io_names[j + 1] = "control-o"; | ||
1313 | |||
1314 | for (i = 1; i < nr_ports; i++) { | ||
1315 | j += 2; | ||
1316 | io_callbacks[j] = in_intr; | ||
1317 | io_callbacks[j + 1] = NULL; | ||
1318 | io_names[j] = "input"; | ||
1319 | io_names[j + 1] = "output"; | ||
1320 | } | ||
1321 | } | ||
205 | /* Find the queues. */ | 1322 | /* Find the queues. */ |
206 | /* FIXME: This is why we want to wean off hvc: we do nothing | 1323 | err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, |
207 | * when input comes in. */ | 1324 | io_callbacks, |
208 | err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); | 1325 | (const char **)io_names); |
209 | if (err) | 1326 | if (err) |
1327 | goto free_outvqs; | ||
1328 | |||
1329 | j = 0; | ||
1330 | portdev->in_vqs[0] = vqs[0]; | ||
1331 | portdev->out_vqs[0] = vqs[1]; | ||
1332 | j += 2; | ||
1333 | if (use_multiport(portdev)) { | ||
1334 | portdev->c_ivq = vqs[j]; | ||
1335 | portdev->c_ovq = vqs[j + 1]; | ||
1336 | |||
1337 | for (i = 1; i < nr_ports; i++) { | ||
1338 | j += 2; | ||
1339 | portdev->in_vqs[i] = vqs[j]; | ||
1340 | portdev->out_vqs[i] = vqs[j + 1]; | ||
1341 | } | ||
1342 | } | ||
1343 | kfree(io_callbacks); | ||
1344 | kfree(io_names); | ||
1345 | kfree(vqs); | ||
1346 | |||
1347 | return 0; | ||
1348 | |||
1349 | free_names: | ||
1350 | kfree(io_names); | ||
1351 | free_callbacks: | ||
1352 | kfree(io_callbacks); | ||
1353 | free_outvqs: | ||
1354 | kfree(portdev->out_vqs); | ||
1355 | free_invqs: | ||
1356 | kfree(portdev->in_vqs); | ||
1357 | free_vqs: | ||
1358 | kfree(vqs); | ||
1359 | fail: | ||
1360 | return err; | ||
1361 | } | ||
1362 | |||
1363 | static const struct file_operations portdev_fops = { | ||
1364 | .owner = THIS_MODULE, | ||
1365 | }; | ||
1366 | |||
1367 | /* | ||
1368 | * Once we're further in boot, we get probed like any other virtio | ||
1369 | * device. | ||
1370 | * | ||
1371 | * If the host also supports multiple console ports, we check the | ||
1372 | * config space to see how many ports the host has spawned. We | ||
1373 | * initialize each port found. | ||
1374 | */ | ||
1375 | static int __devinit virtcons_probe(struct virtio_device *vdev) | ||
1376 | { | ||
1377 | struct ports_device *portdev; | ||
1378 | u32 i; | ||
1379 | int err; | ||
1380 | bool multiport; | ||
1381 | |||
1382 | portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); | ||
1383 | if (!portdev) { | ||
1384 | err = -ENOMEM; | ||
1385 | goto fail; | ||
1386 | } | ||
1387 | |||
1388 | /* Attach this portdev to this virtio_device, and vice-versa. */ | ||
1389 | portdev->vdev = vdev; | ||
1390 | vdev->priv = portdev; | ||
1391 | |||
1392 | spin_lock_irq(&pdrvdata_lock); | ||
1393 | portdev->drv_index = pdrvdata.index++; | ||
1394 | spin_unlock_irq(&pdrvdata_lock); | ||
1395 | |||
1396 | portdev->chr_major = register_chrdev(0, "virtio-portsdev", | ||
1397 | &portdev_fops); | ||
1398 | if (portdev->chr_major < 0) { | ||
1399 | dev_err(&vdev->dev, | ||
1400 | "Error %d registering chrdev for device %u\n", | ||
1401 | portdev->chr_major, portdev->drv_index); | ||
1402 | err = portdev->chr_major; | ||
210 | goto free; | 1403 | goto free; |
1404 | } | ||
211 | 1405 | ||
212 | in_vq = vqs[0]; | 1406 | multiport = false; |
213 | out_vq = vqs[1]; | 1407 | portdev->config.nr_ports = 1; |
1408 | portdev->config.max_nr_ports = 1; | ||
1409 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { | ||
1410 | multiport = true; | ||
1411 | vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; | ||
214 | 1412 | ||
215 | /* Start using the new console output. */ | 1413 | vdev->config->get(vdev, offsetof(struct virtio_console_config, |
216 | virtio_cons.get_chars = get_chars; | 1414 | nr_ports), |
217 | virtio_cons.put_chars = put_chars; | 1415 | &portdev->config.nr_ports, |
218 | virtio_cons.notifier_add = notifier_add_vio; | 1416 | sizeof(portdev->config.nr_ports)); |
219 | virtio_cons.notifier_del = notifier_del_vio; | 1417 | vdev->config->get(vdev, offsetof(struct virtio_console_config, |
220 | virtio_cons.notifier_hangup = notifier_del_vio; | 1418 | max_nr_ports), |
221 | 1419 | &portdev->config.max_nr_ports, | |
222 | /* The first argument of hvc_alloc() is the virtual console number, so | 1420 | sizeof(portdev->config.max_nr_ports)); |
223 | * we use zero. The second argument is the parameter for the | 1421 | if (portdev->config.nr_ports > portdev->config.max_nr_ports) { |
224 | * notification mechanism (like irq number). We currently leave this | 1422 | dev_warn(&vdev->dev, |
225 | * as zero, virtqueues have implicit notifications. | 1423 | "More ports (%u) specified than allowed (%u). Will init %u ports.", |
226 | * | 1424 | portdev->config.nr_ports, |
227 | * The third argument is a "struct hv_ops" containing the put_chars() | 1425 | portdev->config.max_nr_ports, |
228 | * get_chars(), notifier_add() and notifier_del() pointers. | 1426 | portdev->config.max_nr_ports); |
229 | * The final argument is the output buffer size: we can do any size, | 1427 | |
230 | * so we put PAGE_SIZE here. */ | 1428 | portdev->config.nr_ports = portdev->config.max_nr_ports; |
231 | hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE); | 1429 | } |
232 | if (IS_ERR(hvc)) { | 1430 | } |
233 | err = PTR_ERR(hvc); | 1431 | |
234 | goto free_vqs; | 1432 | /* Let the Host know we support multiple ports.*/ |
1433 | vdev->config->finalize_features(vdev); | ||
1434 | |||
1435 | err = init_vqs(portdev); | ||
1436 | if (err < 0) { | ||
1437 | dev_err(&vdev->dev, "Error %d initializing vqs\n", err); | ||
1438 | goto free_chrdev; | ||
1439 | } | ||
1440 | |||
1441 | spin_lock_init(&portdev->ports_lock); | ||
1442 | INIT_LIST_HEAD(&portdev->ports); | ||
1443 | |||
1444 | if (multiport) { | ||
1445 | spin_lock_init(&portdev->cvq_lock); | ||
1446 | INIT_WORK(&portdev->control_work, &control_work_handler); | ||
1447 | INIT_WORK(&portdev->config_work, &config_work_handler); | ||
1448 | |||
1449 | err = fill_queue(portdev->c_ivq, &portdev->cvq_lock); | ||
1450 | if (!err) { | ||
1451 | dev_err(&vdev->dev, | ||
1452 | "Error allocating buffers for control queue\n"); | ||
1453 | err = -ENOMEM; | ||
1454 | goto free_vqs; | ||
1455 | } | ||
235 | } | 1456 | } |
236 | 1457 | ||
237 | /* Register the input buffer the first time. */ | 1458 | for (i = 0; i < portdev->config.nr_ports; i++) |
238 | add_inbuf(); | 1459 | add_port(portdev, i); |
1460 | |||
1461 | /* Start using the new console output. */ | ||
1462 | early_put_chars = NULL; | ||
239 | return 0; | 1463 | return 0; |
240 | 1464 | ||
241 | free_vqs: | 1465 | free_vqs: |
242 | vdev->config->del_vqs(vdev); | 1466 | vdev->config->del_vqs(vdev); |
1467 | kfree(portdev->in_vqs); | ||
1468 | kfree(portdev->out_vqs); | ||
1469 | free_chrdev: | ||
1470 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); | ||
243 | free: | 1471 | free: |
244 | kfree(inbuf); | 1472 | kfree(portdev); |
245 | fail: | 1473 | fail: |
246 | return err; | 1474 | return err; |
247 | } | 1475 | } |
248 | 1476 | ||
1477 | static void virtcons_remove(struct virtio_device *vdev) | ||
1478 | { | ||
1479 | struct ports_device *portdev; | ||
1480 | struct port *port, *port2; | ||
1481 | struct port_buffer *buf; | ||
1482 | unsigned int len; | ||
1483 | |||
1484 | portdev = vdev->priv; | ||
1485 | |||
1486 | cancel_work_sync(&portdev->control_work); | ||
1487 | cancel_work_sync(&portdev->config_work); | ||
1488 | |||
1489 | list_for_each_entry_safe(port, port2, &portdev->ports, list) | ||
1490 | remove_port(port); | ||
1491 | |||
1492 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); | ||
1493 | |||
1494 | while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len))) | ||
1495 | free_buf(buf); | ||
1496 | |||
1497 | while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq))) | ||
1498 | free_buf(buf); | ||
1499 | |||
1500 | vdev->config->del_vqs(vdev); | ||
1501 | kfree(portdev->in_vqs); | ||
1502 | kfree(portdev->out_vqs); | ||
1503 | |||
1504 | kfree(portdev); | ||
1505 | } | ||
1506 | |||
249 | static struct virtio_device_id id_table[] = { | 1507 | static struct virtio_device_id id_table[] = { |
250 | { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, | 1508 | { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, |
251 | { 0 }, | 1509 | { 0 }, |
@@ -253,6 +1511,7 @@ static struct virtio_device_id id_table[] = { | |||
253 | 1511 | ||
254 | static unsigned int features[] = { | 1512 | static unsigned int features[] = { |
255 | VIRTIO_CONSOLE_F_SIZE, | 1513 | VIRTIO_CONSOLE_F_SIZE, |
1514 | VIRTIO_CONSOLE_F_MULTIPORT, | ||
256 | }; | 1515 | }; |
257 | 1516 | ||
258 | static struct virtio_driver virtio_console = { | 1517 | static struct virtio_driver virtio_console = { |
@@ -262,14 +1521,41 @@ static struct virtio_driver virtio_console = { | |||
262 | .driver.owner = THIS_MODULE, | 1521 | .driver.owner = THIS_MODULE, |
263 | .id_table = id_table, | 1522 | .id_table = id_table, |
264 | .probe = virtcons_probe, | 1523 | .probe = virtcons_probe, |
265 | .config_changed = virtcons_apply_config, | 1524 | .remove = virtcons_remove, |
1525 | .config_changed = config_intr, | ||
266 | }; | 1526 | }; |
267 | 1527 | ||
268 | static int __init init(void) | 1528 | static int __init init(void) |
269 | { | 1529 | { |
1530 | int err; | ||
1531 | |||
1532 | pdrvdata.class = class_create(THIS_MODULE, "virtio-ports"); | ||
1533 | if (IS_ERR(pdrvdata.class)) { | ||
1534 | err = PTR_ERR(pdrvdata.class); | ||
1535 | pr_err("Error %d creating virtio-ports class\n", err); | ||
1536 | return err; | ||
1537 | } | ||
1538 | |||
1539 | pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL); | ||
1540 | if (!pdrvdata.debugfs_dir) { | ||
1541 | pr_warning("Error %ld creating debugfs dir for virtio-ports\n", | ||
1542 | PTR_ERR(pdrvdata.debugfs_dir)); | ||
1543 | } | ||
1544 | INIT_LIST_HEAD(&pdrvdata.consoles); | ||
1545 | |||
270 | return register_virtio_driver(&virtio_console); | 1546 | return register_virtio_driver(&virtio_console); |
271 | } | 1547 | } |
1548 | |||
1549 | static void __exit fini(void) | ||
1550 | { | ||
1551 | unregister_virtio_driver(&virtio_console); | ||
1552 | |||
1553 | class_destroy(pdrvdata.class); | ||
1554 | if (pdrvdata.debugfs_dir) | ||
1555 | debugfs_remove_recursive(pdrvdata.debugfs_dir); | ||
1556 | } | ||
272 | module_init(init); | 1557 | module_init(init); |
1558 | module_exit(fini); | ||
273 | 1559 | ||
274 | MODULE_DEVICE_TABLE(virtio, id_table); | 1560 | MODULE_DEVICE_TABLE(virtio, id_table); |
275 | MODULE_DESCRIPTION("Virtio console driver"); | 1561 | MODULE_DESCRIPTION("Virtio console driver"); |
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 6b3e0c2f33e2..6fe4f7701188 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c | |||
@@ -603,18 +603,13 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) | |||
603 | p->irqaction.handler = sh_cmt_interrupt; | 603 | p->irqaction.handler = sh_cmt_interrupt; |
604 | p->irqaction.dev_id = p; | 604 | p->irqaction.dev_id = p; |
605 | p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; | 605 | p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; |
606 | ret = setup_irq(irq, &p->irqaction); | ||
607 | if (ret) { | ||
608 | pr_err("sh_cmt: failed to request irq %d\n", irq); | ||
609 | goto err1; | ||
610 | } | ||
611 | 606 | ||
612 | /* get hold of clock */ | 607 | /* get hold of clock */ |
613 | p->clk = clk_get(&p->pdev->dev, cfg->clk); | 608 | p->clk = clk_get(&p->pdev->dev, cfg->clk); |
614 | if (IS_ERR(p->clk)) { | 609 | if (IS_ERR(p->clk)) { |
615 | pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk); | 610 | pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk); |
616 | ret = PTR_ERR(p->clk); | 611 | ret = PTR_ERR(p->clk); |
617 | goto err2; | 612 | goto err1; |
618 | } | 613 | } |
619 | 614 | ||
620 | if (resource_size(res) == 6) { | 615 | if (resource_size(res) == 6) { |
@@ -627,14 +622,25 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) | |||
627 | p->clear_bits = ~0xc000; | 622 | p->clear_bits = ~0xc000; |
628 | } | 623 | } |
629 | 624 | ||
630 | return sh_cmt_register(p, cfg->name, | 625 | ret = sh_cmt_register(p, cfg->name, |
631 | cfg->clockevent_rating, | 626 | cfg->clockevent_rating, |
632 | cfg->clocksource_rating); | 627 | cfg->clocksource_rating); |
633 | err2: | 628 | if (ret) { |
634 | remove_irq(irq, &p->irqaction); | 629 | pr_err("sh_cmt: registration failed\n"); |
635 | err1: | 630 | goto err1; |
631 | } | ||
632 | |||
633 | ret = setup_irq(irq, &p->irqaction); | ||
634 | if (ret) { | ||
635 | pr_err("sh_cmt: failed to request irq %d\n", irq); | ||
636 | goto err1; | ||
637 | } | ||
638 | |||
639 | return 0; | ||
640 | |||
641 | err1: | ||
636 | iounmap(p->mapbase); | 642 | iounmap(p->mapbase); |
637 | err0: | 643 | err0: |
638 | return ret; | 644 | return ret; |
639 | } | 645 | } |
640 | 646 | ||
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 973e714d6051..4c8a759e60cd 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c | |||
@@ -221,15 +221,15 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, | |||
221 | ced->cpumask = cpumask_of(0); | 221 | ced->cpumask = cpumask_of(0); |
222 | ced->set_mode = sh_mtu2_clock_event_mode; | 222 | ced->set_mode = sh_mtu2_clock_event_mode; |
223 | 223 | ||
224 | pr_info("sh_mtu2: %s used for clock events\n", ced->name); | ||
225 | clockevents_register_device(ced); | ||
226 | |||
224 | ret = setup_irq(p->irqaction.irq, &p->irqaction); | 227 | ret = setup_irq(p->irqaction.irq, &p->irqaction); |
225 | if (ret) { | 228 | if (ret) { |
226 | pr_err("sh_mtu2: failed to request irq %d\n", | 229 | pr_err("sh_mtu2: failed to request irq %d\n", |
227 | p->irqaction.irq); | 230 | p->irqaction.irq); |
228 | return; | 231 | return; |
229 | } | 232 | } |
230 | |||
231 | pr_info("sh_mtu2: %s used for clock events\n", ced->name); | ||
232 | clockevents_register_device(ced); | ||
233 | } | 233 | } |
234 | 234 | ||
235 | static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name, | 235 | static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name, |
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 93c2322feab7..961f5b5ef6a3 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c | |||
@@ -323,15 +323,15 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, | |||
323 | ced->set_next_event = sh_tmu_clock_event_next; | 323 | ced->set_next_event = sh_tmu_clock_event_next; |
324 | ced->set_mode = sh_tmu_clock_event_mode; | 324 | ced->set_mode = sh_tmu_clock_event_mode; |
325 | 325 | ||
326 | pr_info("sh_tmu: %s used for clock events\n", ced->name); | ||
327 | clockevents_register_device(ced); | ||
328 | |||
326 | ret = setup_irq(p->irqaction.irq, &p->irqaction); | 329 | ret = setup_irq(p->irqaction.irq, &p->irqaction); |
327 | if (ret) { | 330 | if (ret) { |
328 | pr_err("sh_tmu: failed to request irq %d\n", | 331 | pr_err("sh_tmu: failed to request irq %d\n", |
329 | p->irqaction.irq); | 332 | p->irqaction.irq); |
330 | return; | 333 | return; |
331 | } | 334 | } |
332 | |||
333 | pr_info("sh_tmu: %s used for clock events\n", ced->name); | ||
334 | clockevents_register_device(ced); | ||
335 | } | 335 | } |
336 | 336 | ||
337 | static int sh_tmu_register(struct sh_tmu_priv *p, char *name, | 337 | static int sh_tmu_register(struct sh_tmu_priv *p, char *name, |
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 46e899ac924e..1c3849f6b7a2 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -1274,7 +1274,7 @@ static int __exit crypto4xx_remove(struct of_device *ofdev) | |||
1274 | return 0; | 1274 | return 0; |
1275 | } | 1275 | } |
1276 | 1276 | ||
1277 | static struct of_device_id crypto4xx_match[] = { | 1277 | static const struct of_device_id crypto4xx_match[] = { |
1278 | { .compatible = "amcc,ppc4xx-crypto",}, | 1278 | { .compatible = "amcc,ppc4xx-crypto",}, |
1279 | { }, | 1279 | { }, |
1280 | }; | 1280 | }; |
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 4801162919d9..c7a5a43ba691 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c | |||
@@ -135,13 +135,13 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, | |||
135 | /* | 135 | /* |
136 | * The requested key size is not supported by HW, do a fallback | 136 | * The requested key size is not supported by HW, do a fallback |
137 | */ | 137 | */ |
138 | op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | 138 | op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
139 | op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); | 139 | op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); |
140 | 140 | ||
141 | ret = crypto_cipher_setkey(op->fallback.cip, key, len); | 141 | ret = crypto_cipher_setkey(op->fallback.cip, key, len); |
142 | if (ret) { | 142 | if (ret) { |
143 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | 143 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
144 | tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK); | 144 | tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK); |
145 | } | 145 | } |
146 | return ret; | 146 | return ret; |
147 | } | 147 | } |
@@ -263,7 +263,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm) | |||
263 | 263 | ||
264 | if (IS_ERR(op->fallback.cip)) { | 264 | if (IS_ERR(op->fallback.cip)) { |
265 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); | 265 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); |
266 | return PTR_ERR(op->fallback.blk); | 266 | return PTR_ERR(op->fallback.cip); |
267 | } | 267 | } |
268 | 268 | ||
269 | return 0; | 269 | return 0; |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index c47ffe8a73ef..fd529d68c5ba 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -1958,7 +1958,7 @@ err_out: | |||
1958 | return err; | 1958 | return err; |
1959 | } | 1959 | } |
1960 | 1960 | ||
1961 | static struct of_device_id talitos_match[] = { | 1961 | static const struct of_device_id talitos_match[] = { |
1962 | { | 1962 | { |
1963 | .compatible = "fsl,sec2.0", | 1963 | .compatible = "fsl,sec2.0", |
1964 | }, | 1964 | }, |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index d10cc899c460..b75ce8b84c46 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -48,23 +48,20 @@ enum sh_dmae_desc_status { | |||
48 | */ | 48 | */ |
49 | #define RS_DEFAULT (RS_DUAL) | 49 | #define RS_DEFAULT (RS_DUAL) |
50 | 50 | ||
51 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ | ||
52 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; | ||
53 | |||
51 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | 54 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); |
52 | 55 | ||
53 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) | 56 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) |
54 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 57 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
55 | { | 58 | { |
56 | ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | 59 | ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); |
57 | } | 60 | } |
58 | 61 | ||
59 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | 62 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
60 | { | 63 | { |
61 | return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | 64 | return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); |
62 | } | ||
63 | |||
64 | static void dmae_init(struct sh_dmae_chan *sh_chan) | ||
65 | { | ||
66 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ | ||
67 | sh_dmae_writel(sh_chan, chcr, CHCR); | ||
68 | } | 65 | } |
69 | 66 | ||
70 | /* | 67 | /* |
@@ -95,27 +92,30 @@ static int sh_dmae_rst(int id) | |||
95 | return 0; | 92 | return 0; |
96 | } | 93 | } |
97 | 94 | ||
98 | static int dmae_is_busy(struct sh_dmae_chan *sh_chan) | 95 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) |
99 | { | 96 | { |
100 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 97 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
101 | if (chcr & CHCR_DE) { | 98 | |
102 | if (!(chcr & CHCR_TE)) | 99 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) |
103 | return -EBUSY; /* working */ | 100 | return true; /* working */ |
104 | } | 101 | |
105 | return 0; /* waiting */ | 102 | return false; /* waiting */ |
106 | } | 103 | } |
107 | 104 | ||
108 | static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) | 105 | static unsigned int ts_shift[] = TS_SHIFT; |
106 | static inline unsigned int calc_xmit_shift(u32 chcr) | ||
109 | { | 107 | { |
110 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 108 | int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | |
111 | return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; | 109 | ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); |
110 | |||
111 | return ts_shift[cnt]; | ||
112 | } | 112 | } |
113 | 113 | ||
114 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) | 114 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) |
115 | { | 115 | { |
116 | sh_dmae_writel(sh_chan, hw->sar, SAR); | 116 | sh_dmae_writel(sh_chan, hw->sar, SAR); |
117 | sh_dmae_writel(sh_chan, hw->dar, DAR); | 117 | sh_dmae_writel(sh_chan, hw->dar, DAR); |
118 | sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR); | 118 | sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); |
119 | } | 119 | } |
120 | 120 | ||
121 | static void dmae_start(struct sh_dmae_chan *sh_chan) | 121 | static void dmae_start(struct sh_dmae_chan *sh_chan) |
@@ -123,7 +123,7 @@ static void dmae_start(struct sh_dmae_chan *sh_chan) | |||
123 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 123 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
124 | 124 | ||
125 | chcr |= CHCR_DE | CHCR_IE; | 125 | chcr |= CHCR_DE | CHCR_IE; |
126 | sh_dmae_writel(sh_chan, chcr, CHCR); | 126 | sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); |
127 | } | 127 | } |
128 | 128 | ||
129 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | 129 | static void dmae_halt(struct sh_dmae_chan *sh_chan) |
@@ -134,55 +134,50 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan) | |||
134 | sh_dmae_writel(sh_chan, chcr, CHCR); | 134 | sh_dmae_writel(sh_chan, chcr, CHCR); |
135 | } | 135 | } |
136 | 136 | ||
137 | static void dmae_init(struct sh_dmae_chan *sh_chan) | ||
138 | { | ||
139 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ | ||
140 | sh_chan->xmit_shift = calc_xmit_shift(chcr); | ||
141 | sh_dmae_writel(sh_chan, chcr, CHCR); | ||
142 | } | ||
143 | |||
137 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | 144 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
138 | { | 145 | { |
139 | int ret = dmae_is_busy(sh_chan); | ||
140 | /* When DMA was working, can not set data to CHCR */ | 146 | /* When DMA was working, can not set data to CHCR */ |
141 | if (ret) | 147 | if (dmae_is_busy(sh_chan)) |
142 | return ret; | 148 | return -EBUSY; |
143 | 149 | ||
150 | sh_chan->xmit_shift = calc_xmit_shift(val); | ||
144 | sh_dmae_writel(sh_chan, val, CHCR); | 151 | sh_dmae_writel(sh_chan, val, CHCR); |
152 | |||
145 | return 0; | 153 | return 0; |
146 | } | 154 | } |
147 | 155 | ||
148 | #define DMARS1_ADDR 0x04 | 156 | #define DMARS_SHIFT 8 |
149 | #define DMARS2_ADDR 0x08 | 157 | #define DMARS_CHAN_MSK 0x01 |
150 | #define DMARS_SHIFT 8 | ||
151 | #define DMARS_CHAN_MSK 0x01 | ||
152 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | 158 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
153 | { | 159 | { |
154 | u32 addr; | 160 | u32 addr; |
155 | int shift = 0; | 161 | int shift = 0; |
156 | int ret = dmae_is_busy(sh_chan); | 162 | |
157 | if (ret) | 163 | if (dmae_is_busy(sh_chan)) |
158 | return ret; | 164 | return -EBUSY; |
159 | 165 | ||
160 | if (sh_chan->id & DMARS_CHAN_MSK) | 166 | if (sh_chan->id & DMARS_CHAN_MSK) |
161 | shift = DMARS_SHIFT; | 167 | shift = DMARS_SHIFT; |
162 | 168 | ||
163 | switch (sh_chan->id) { | 169 | if (sh_chan->id < 6) |
164 | /* DMARS0 */ | 170 | /* DMA0RS0 - DMA0RS2 */ |
165 | case 0: | 171 | addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4; |
166 | case 1: | 172 | #ifdef SH_DMARS_BASE1 |
167 | addr = SH_DMARS_BASE; | 173 | else if (sh_chan->id < 12) |
168 | break; | 174 | /* DMA1RS0 - DMA1RS2 */ |
169 | /* DMARS1 */ | 175 | addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4; |
170 | case 2: | 176 | #endif |
171 | case 3: | 177 | else |
172 | addr = (SH_DMARS_BASE + DMARS1_ADDR); | ||
173 | break; | ||
174 | /* DMARS2 */ | ||
175 | case 4: | ||
176 | case 5: | ||
177 | addr = (SH_DMARS_BASE + DMARS2_ADDR); | ||
178 | break; | ||
179 | default: | ||
180 | return -EINVAL; | 178 | return -EINVAL; |
181 | } | ||
182 | 179 | ||
183 | ctrl_outw((val << shift) | | 180 | ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr); |
184 | (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)), | ||
185 | addr); | ||
186 | 181 | ||
187 | return 0; | 182 | return 0; |
188 | } | 183 | } |
@@ -250,10 +245,53 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) | |||
250 | return NULL; | 245 | return NULL; |
251 | } | 246 | } |
252 | 247 | ||
248 | static struct sh_dmae_slave_config *sh_dmae_find_slave( | ||
249 | struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id) | ||
250 | { | ||
251 | struct dma_device *dma_dev = sh_chan->common.device; | ||
252 | struct sh_dmae_device *shdev = container_of(dma_dev, | ||
253 | struct sh_dmae_device, common); | ||
254 | struct sh_dmae_pdata *pdata = &shdev->pdata; | ||
255 | int i; | ||
256 | |||
257 | if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) | ||
258 | return NULL; | ||
259 | |||
260 | for (i = 0; i < pdata->config_num; i++) | ||
261 | if (pdata->config[i].slave_id == slave_id) | ||
262 | return pdata->config + i; | ||
263 | |||
264 | return NULL; | ||
265 | } | ||
266 | |||
253 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | 267 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) |
254 | { | 268 | { |
255 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 269 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
256 | struct sh_desc *desc; | 270 | struct sh_desc *desc; |
271 | struct sh_dmae_slave *param = chan->private; | ||
272 | |||
273 | /* | ||
274 | * This relies on the guarantee from dmaengine that alloc_chan_resources | ||
275 | * never runs concurrently with itself or free_chan_resources. | ||
276 | */ | ||
277 | if (param) { | ||
278 | struct sh_dmae_slave_config *cfg; | ||
279 | |||
280 | cfg = sh_dmae_find_slave(sh_chan, param->slave_id); | ||
281 | if (!cfg) | ||
282 | return -EINVAL; | ||
283 | |||
284 | if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) | ||
285 | return -EBUSY; | ||
286 | |||
287 | param->config = cfg; | ||
288 | |||
289 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
290 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
291 | } else { | ||
292 | if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400) | ||
293 | dmae_set_chcr(sh_chan, RS_DEFAULT); | ||
294 | } | ||
257 | 295 | ||
258 | spin_lock_bh(&sh_chan->desc_lock); | 296 | spin_lock_bh(&sh_chan->desc_lock); |
259 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { | 297 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { |
@@ -286,10 +324,18 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
286 | struct sh_desc *desc, *_desc; | 324 | struct sh_desc *desc, *_desc; |
287 | LIST_HEAD(list); | 325 | LIST_HEAD(list); |
288 | 326 | ||
327 | dmae_halt(sh_chan); | ||
328 | |||
289 | /* Prepared and not submitted descriptors can still be on the queue */ | 329 | /* Prepared and not submitted descriptors can still be on the queue */ |
290 | if (!list_empty(&sh_chan->ld_queue)) | 330 | if (!list_empty(&sh_chan->ld_queue)) |
291 | sh_dmae_chan_ld_cleanup(sh_chan, true); | 331 | sh_dmae_chan_ld_cleanup(sh_chan, true); |
292 | 332 | ||
333 | if (chan->private) { | ||
334 | /* The caller is holding dma_list_mutex */ | ||
335 | struct sh_dmae_slave *param = chan->private; | ||
336 | clear_bit(param->slave_id, sh_dmae_slave_used); | ||
337 | } | ||
338 | |||
293 | spin_lock_bh(&sh_chan->desc_lock); | 339 | spin_lock_bh(&sh_chan->desc_lock); |
294 | 340 | ||
295 | list_splice_init(&sh_chan->ld_free, &list); | 341 | list_splice_init(&sh_chan->ld_free, &list); |
@@ -301,23 +347,97 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
301 | kfree(desc); | 347 | kfree(desc); |
302 | } | 348 | } |
303 | 349 | ||
304 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | 350 | /** |
305 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | 351 | * sh_dmae_add_desc - get, set up and return one transfer descriptor |
306 | size_t len, unsigned long flags) | 352 | * @sh_chan: DMA channel |
353 | * @flags: DMA transfer flags | ||
354 | * @dest: destination DMA address, incremented when direction equals | ||
355 | * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL | ||
356 | * @src: source DMA address, incremented when direction equals | ||
357 | * DMA_TO_DEVICE or DMA_BIDIRECTIONAL | ||
358 | * @len: DMA transfer length | ||
359 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | ||
360 | * @direction: needed for slave DMA to decide which address to keep constant, | ||
361 | * equals DMA_BIDIRECTIONAL for MEMCPY | ||
362 | * Returns 0 or an error | ||
363 | * Locks: called with desc_lock held | ||
364 | */ | ||
365 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | ||
366 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, | ||
367 | struct sh_desc **first, enum dma_data_direction direction) | ||
307 | { | 368 | { |
308 | struct sh_dmae_chan *sh_chan; | 369 | struct sh_desc *new; |
309 | struct sh_desc *first = NULL, *prev = NULL, *new; | ||
310 | size_t copy_size; | 370 | size_t copy_size; |
311 | LIST_HEAD(tx_list); | ||
312 | int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1); | ||
313 | 371 | ||
314 | if (!chan) | 372 | if (!*len) |
315 | return NULL; | 373 | return NULL; |
316 | 374 | ||
317 | if (!len) | 375 | /* Allocate the link descriptor from the free list */ |
376 | new = sh_dmae_get_desc(sh_chan); | ||
377 | if (!new) { | ||
378 | dev_err(sh_chan->dev, "No free link descriptor available\n"); | ||
318 | return NULL; | 379 | return NULL; |
380 | } | ||
319 | 381 | ||
320 | sh_chan = to_sh_chan(chan); | 382 | copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); |
383 | |||
384 | new->hw.sar = *src; | ||
385 | new->hw.dar = *dest; | ||
386 | new->hw.tcr = copy_size; | ||
387 | |||
388 | if (!*first) { | ||
389 | /* First desc */ | ||
390 | new->async_tx.cookie = -EBUSY; | ||
391 | *first = new; | ||
392 | } else { | ||
393 | /* Other desc - invisible to the user */ | ||
394 | new->async_tx.cookie = -EINVAL; | ||
395 | } | ||
396 | |||
397 | dev_dbg(sh_chan->dev, | ||
398 | "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", | ||
399 | copy_size, *len, *src, *dest, &new->async_tx, | ||
400 | new->async_tx.cookie, sh_chan->xmit_shift); | ||
401 | |||
402 | new->mark = DESC_PREPARED; | ||
403 | new->async_tx.flags = flags; | ||
404 | new->direction = direction; | ||
405 | |||
406 | *len -= copy_size; | ||
407 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) | ||
408 | *src += copy_size; | ||
409 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) | ||
410 | *dest += copy_size; | ||
411 | |||
412 | return new; | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * sh_dmae_prep_sg - prepare transfer descriptors from an SG list | ||
417 | * | ||
418 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | ||
419 | * converted to scatter-gather to guarantee consistent locking and a correct | ||
420 | * list manipulation. For slave DMA direction carries the usual meaning, and, | ||
421 | * logically, the SG list is RAM and the addr variable contains slave address, | ||
422 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL | ||
423 | * and the SG list contains only one element and points at the source buffer. | ||
424 | */ | ||
425 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, | ||
426 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | ||
427 | enum dma_data_direction direction, unsigned long flags) | ||
428 | { | ||
429 | struct scatterlist *sg; | ||
430 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; | ||
431 | LIST_HEAD(tx_list); | ||
432 | int chunks = 0; | ||
433 | int i; | ||
434 | |||
435 | if (!sg_len) | ||
436 | return NULL; | ||
437 | |||
438 | for_each_sg(sgl, sg, sg_len, i) | ||
439 | chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / | ||
440 | (SH_DMA_TCR_MAX + 1); | ||
321 | 441 | ||
322 | /* Have to lock the whole loop to protect against concurrent release */ | 442 | /* Have to lock the whole loop to protect against concurrent release */ |
323 | spin_lock_bh(&sh_chan->desc_lock); | 443 | spin_lock_bh(&sh_chan->desc_lock); |
@@ -333,49 +453,32 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
333 | * only during this function, then they are immediately spliced | 453 | * only during this function, then they are immediately spliced |
334 | * back onto the free list in form of a chain | 454 | * back onto the free list in form of a chain |
335 | */ | 455 | */ |
336 | do { | 456 | for_each_sg(sgl, sg, sg_len, i) { |
337 | /* Allocate the link descriptor from the free list */ | 457 | dma_addr_t sg_addr = sg_dma_address(sg); |
338 | new = sh_dmae_get_desc(sh_chan); | 458 | size_t len = sg_dma_len(sg); |
339 | if (!new) { | 459 | |
340 | dev_err(sh_chan->dev, | 460 | if (!len) |
341 | "No free memory for link descriptor\n"); | 461 | goto err_get_desc; |
342 | list_for_each_entry(new, &tx_list, node) | 462 | |
343 | new->mark = DESC_IDLE; | 463 | do { |
344 | list_splice(&tx_list, &sh_chan->ld_free); | 464 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", |
345 | spin_unlock_bh(&sh_chan->desc_lock); | 465 | i, sg, len, (unsigned long long)sg_addr); |
346 | return NULL; | 466 | |
347 | } | 467 | if (direction == DMA_FROM_DEVICE) |
348 | 468 | new = sh_dmae_add_desc(sh_chan, flags, | |
349 | copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1); | 469 | &sg_addr, addr, &len, &first, |
350 | 470 | direction); | |
351 | new->hw.sar = dma_src; | 471 | else |
352 | new->hw.dar = dma_dest; | 472 | new = sh_dmae_add_desc(sh_chan, flags, |
353 | new->hw.tcr = copy_size; | 473 | addr, &sg_addr, &len, &first, |
354 | if (!first) { | 474 | direction); |
355 | /* First desc */ | 475 | if (!new) |
356 | new->async_tx.cookie = -EBUSY; | 476 | goto err_get_desc; |
357 | first = new; | 477 | |
358 | } else { | 478 | new->chunks = chunks--; |
359 | /* Other desc - invisible to the user */ | 479 | list_add_tail(&new->node, &tx_list); |
360 | new->async_tx.cookie = -EINVAL; | 480 | } while (len); |
361 | } | 481 | } |
362 | |||
363 | dev_dbg(sh_chan->dev, | ||
364 | "chaining %u of %u with %p, dst %x, cookie %d\n", | ||
365 | copy_size, len, &new->async_tx, dma_dest, | ||
366 | new->async_tx.cookie); | ||
367 | |||
368 | new->mark = DESC_PREPARED; | ||
369 | new->async_tx.flags = flags; | ||
370 | new->chunks = chunks--; | ||
371 | |||
372 | prev = new; | ||
373 | len -= copy_size; | ||
374 | dma_src += copy_size; | ||
375 | dma_dest += copy_size; | ||
376 | /* Insert the link descriptor to the LD ring */ | ||
377 | list_add_tail(&new->node, &tx_list); | ||
378 | } while (len); | ||
379 | 482 | ||
380 | if (new != first) | 483 | if (new != first) |
381 | new->async_tx.cookie = -ENOSPC; | 484 | new->async_tx.cookie = -ENOSPC; |
@@ -386,6 +489,77 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
386 | spin_unlock_bh(&sh_chan->desc_lock); | 489 | spin_unlock_bh(&sh_chan->desc_lock); |
387 | 490 | ||
388 | return &first->async_tx; | 491 | return &first->async_tx; |
492 | |||
493 | err_get_desc: | ||
494 | list_for_each_entry(new, &tx_list, node) | ||
495 | new->mark = DESC_IDLE; | ||
496 | list_splice(&tx_list, &sh_chan->ld_free); | ||
497 | |||
498 | spin_unlock_bh(&sh_chan->desc_lock); | ||
499 | |||
500 | return NULL; | ||
501 | } | ||
502 | |||
503 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | ||
504 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
505 | size_t len, unsigned long flags) | ||
506 | { | ||
507 | struct sh_dmae_chan *sh_chan; | ||
508 | struct scatterlist sg; | ||
509 | |||
510 | if (!chan || !len) | ||
511 | return NULL; | ||
512 | |||
513 | chan->private = NULL; | ||
514 | |||
515 | sh_chan = to_sh_chan(chan); | ||
516 | |||
517 | sg_init_table(&sg, 1); | ||
518 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | ||
519 | offset_in_page(dma_src)); | ||
520 | sg_dma_address(&sg) = dma_src; | ||
521 | sg_dma_len(&sg) = len; | ||
522 | |||
523 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, | ||
524 | flags); | ||
525 | } | ||
526 | |||
527 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | ||
528 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | ||
529 | enum dma_data_direction direction, unsigned long flags) | ||
530 | { | ||
531 | struct sh_dmae_slave *param; | ||
532 | struct sh_dmae_chan *sh_chan; | ||
533 | |||
534 | if (!chan) | ||
535 | return NULL; | ||
536 | |||
537 | sh_chan = to_sh_chan(chan); | ||
538 | param = chan->private; | ||
539 | |||
540 | /* Someone calling slave DMA on a public channel? */ | ||
541 | if (!param || !sg_len) { | ||
542 | dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", | ||
543 | __func__, param, sg_len, param ? param->slave_id : -1); | ||
544 | return NULL; | ||
545 | } | ||
546 | |||
547 | /* | ||
548 | * if (param != NULL), this is a successfully requested slave channel, | ||
549 | * therefore param->config != NULL too. | ||
550 | */ | ||
551 | return sh_dmae_prep_sg(sh_chan, sgl, sg_len, ¶m->config->addr, | ||
552 | direction, flags); | ||
553 | } | ||
554 | |||
555 | static void sh_dmae_terminate_all(struct dma_chan *chan) | ||
556 | { | ||
557 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
558 | |||
559 | if (!chan) | ||
560 | return; | ||
561 | |||
562 | sh_dmae_chan_ld_cleanup(sh_chan, true); | ||
389 | } | 563 | } |
390 | 564 | ||
391 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | 565 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) |
@@ -419,7 +593,11 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all | |||
419 | cookie = tx->cookie; | 593 | cookie = tx->cookie; |
420 | 594 | ||
421 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | 595 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { |
422 | BUG_ON(sh_chan->completed_cookie != desc->cookie - 1); | 596 | if (sh_chan->completed_cookie != desc->cookie - 1) |
597 | dev_dbg(sh_chan->dev, | ||
598 | "Completing cookie %d, expected %d\n", | ||
599 | desc->cookie, | ||
600 | sh_chan->completed_cookie + 1); | ||
423 | sh_chan->completed_cookie = desc->cookie; | 601 | sh_chan->completed_cookie = desc->cookie; |
424 | } | 602 | } |
425 | 603 | ||
@@ -492,7 +670,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | |||
492 | return; | 670 | return; |
493 | } | 671 | } |
494 | 672 | ||
495 | /* Find the first un-transfer desciptor */ | 673 | /* Find the first not transferred desciptor */ |
496 | list_for_each_entry(sd, &sh_chan->ld_queue, node) | 674 | list_for_each_entry(sd, &sh_chan->ld_queue, node) |
497 | if (sd->mark == DESC_SUBMITTED) { | 675 | if (sd->mark == DESC_SUBMITTED) { |
498 | /* Get the ld start address from ld_queue */ | 676 | /* Get the ld start address from ld_queue */ |
@@ -559,7 +737,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
559 | 737 | ||
560 | /* IRQ Multi */ | 738 | /* IRQ Multi */ |
561 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 739 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { |
562 | int cnt = 0; | 740 | int __maybe_unused cnt = 0; |
563 | switch (irq) { | 741 | switch (irq) { |
564 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | 742 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) |
565 | case DMTE6_IRQ: | 743 | case DMTE6_IRQ: |
@@ -596,11 +774,14 @@ static void dmae_do_tasklet(unsigned long data) | |||
596 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | 774 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; |
597 | struct sh_desc *desc; | 775 | struct sh_desc *desc; |
598 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | 776 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); |
777 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); | ||
599 | 778 | ||
600 | spin_lock(&sh_chan->desc_lock); | 779 | spin_lock(&sh_chan->desc_lock); |
601 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 780 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
602 | if ((desc->hw.sar + desc->hw.tcr) == sar_buf && | 781 | if (desc->mark == DESC_SUBMITTED && |
603 | desc->mark == DESC_SUBMITTED) { | 782 | ((desc->direction == DMA_FROM_DEVICE && |
783 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || | ||
784 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { | ||
604 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", | 785 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", |
605 | desc->async_tx.cookie, &desc->async_tx, | 786 | desc->async_tx.cookie, &desc->async_tx, |
606 | desc->hw.dar); | 787 | desc->hw.dar); |
@@ -673,7 +854,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
673 | } | 854 | } |
674 | 855 | ||
675 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | 856 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
676 | "sh-dmae%d", new_sh_chan->id); | 857 | "sh-dmae%d", new_sh_chan->id); |
677 | 858 | ||
678 | /* set up channel irq */ | 859 | /* set up channel irq */ |
679 | err = request_irq(irq, &sh_dmae_interrupt, irqflags, | 860 | err = request_irq(irq, &sh_dmae_interrupt, irqflags, |
@@ -684,11 +865,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
684 | goto err_no_irq; | 865 | goto err_no_irq; |
685 | } | 866 | } |
686 | 867 | ||
687 | /* CHCR register control function */ | ||
688 | new_sh_chan->set_chcr = dmae_set_chcr; | ||
689 | /* DMARS register control function */ | ||
690 | new_sh_chan->set_dmars = dmae_set_dmars; | ||
691 | |||
692 | shdev->chan[id] = new_sh_chan; | 868 | shdev->chan[id] = new_sh_chan; |
693 | return 0; | 869 | return 0; |
694 | 870 | ||
@@ -759,12 +935,19 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
759 | INIT_LIST_HEAD(&shdev->common.channels); | 935 | INIT_LIST_HEAD(&shdev->common.channels); |
760 | 936 | ||
761 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | 937 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); |
938 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | ||
939 | |||
762 | shdev->common.device_alloc_chan_resources | 940 | shdev->common.device_alloc_chan_resources |
763 | = sh_dmae_alloc_chan_resources; | 941 | = sh_dmae_alloc_chan_resources; |
764 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; | 942 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; |
765 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; | 943 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; |
766 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; | 944 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; |
767 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; | 945 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; |
946 | |||
947 | /* Compulsory for DMA_SLAVE fields */ | ||
948 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; | ||
949 | shdev->common.device_terminate_all = sh_dmae_terminate_all; | ||
950 | |||
768 | shdev->common.dev = &pdev->dev; | 951 | shdev->common.dev = &pdev->dev; |
769 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | 952 | /* Default transfer size of 32 bytes requires 32-byte alignment */ |
770 | shdev->common.copy_align = 5; | 953 | shdev->common.copy_align = 5; |
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 108f1cffb6f5..7e227f3c87c4 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h | |||
@@ -29,6 +29,7 @@ struct sh_desc { | |||
29 | struct sh_dmae_regs hw; | 29 | struct sh_dmae_regs hw; |
30 | struct list_head node; | 30 | struct list_head node; |
31 | struct dma_async_tx_descriptor async_tx; | 31 | struct dma_async_tx_descriptor async_tx; |
32 | enum dma_data_direction direction; | ||
32 | dma_cookie_t cookie; | 33 | dma_cookie_t cookie; |
33 | int chunks; | 34 | int chunks; |
34 | int mark; | 35 | int mark; |
@@ -45,13 +46,9 @@ struct sh_dmae_chan { | |||
45 | struct device *dev; /* Channel device */ | 46 | struct device *dev; /* Channel device */ |
46 | struct tasklet_struct tasklet; /* Tasklet */ | 47 | struct tasklet_struct tasklet; /* Tasklet */ |
47 | int descs_allocated; /* desc count */ | 48 | int descs_allocated; /* desc count */ |
49 | int xmit_shift; /* log_2(bytes_per_xfer) */ | ||
48 | int id; /* Raw id of this channel */ | 50 | int id; /* Raw id of this channel */ |
49 | char dev_id[16]; /* unique name per DMAC of channel */ | 51 | char dev_id[16]; /* unique name per DMAC of channel */ |
50 | |||
51 | /* Set chcr */ | ||
52 | int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs); | ||
53 | /* Set DMA resource */ | ||
54 | int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res); | ||
55 | }; | 52 | }; |
56 | 53 | ||
57 | struct sh_dmae_device { | 54 | struct sh_dmae_device { |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index e7b19440102e..22d476160d52 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -1279,47 +1279,47 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1279 | rdev->mode_info.connector_table = radeon_connector_table; | 1279 | rdev->mode_info.connector_table = radeon_connector_table; |
1280 | if (rdev->mode_info.connector_table == CT_NONE) { | 1280 | if (rdev->mode_info.connector_table == CT_NONE) { |
1281 | #ifdef CONFIG_PPC_PMAC | 1281 | #ifdef CONFIG_PPC_PMAC |
1282 | if (machine_is_compatible("PowerBook3,3")) { | 1282 | if (of_machine_is_compatible("PowerBook3,3")) { |
1283 | /* powerbook with VGA */ | 1283 | /* powerbook with VGA */ |
1284 | rdev->mode_info.connector_table = CT_POWERBOOK_VGA; | 1284 | rdev->mode_info.connector_table = CT_POWERBOOK_VGA; |
1285 | } else if (machine_is_compatible("PowerBook3,4") || | 1285 | } else if (of_machine_is_compatible("PowerBook3,4") || |
1286 | machine_is_compatible("PowerBook3,5")) { | 1286 | of_machine_is_compatible("PowerBook3,5")) { |
1287 | /* powerbook with internal tmds */ | 1287 | /* powerbook with internal tmds */ |
1288 | rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL; | 1288 | rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL; |
1289 | } else if (machine_is_compatible("PowerBook5,1") || | 1289 | } else if (of_machine_is_compatible("PowerBook5,1") || |
1290 | machine_is_compatible("PowerBook5,2") || | 1290 | of_machine_is_compatible("PowerBook5,2") || |
1291 | machine_is_compatible("PowerBook5,3") || | 1291 | of_machine_is_compatible("PowerBook5,3") || |
1292 | machine_is_compatible("PowerBook5,4") || | 1292 | of_machine_is_compatible("PowerBook5,4") || |
1293 | machine_is_compatible("PowerBook5,5")) { | 1293 | of_machine_is_compatible("PowerBook5,5")) { |
1294 | /* powerbook with external single link tmds (sil164) */ | 1294 | /* powerbook with external single link tmds (sil164) */ |
1295 | rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; | 1295 | rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; |
1296 | } else if (machine_is_compatible("PowerBook5,6")) { | 1296 | } else if (of_machine_is_compatible("PowerBook5,6")) { |
1297 | /* powerbook with external dual or single link tmds */ | 1297 | /* powerbook with external dual or single link tmds */ |
1298 | rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; | 1298 | rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; |
1299 | } else if (machine_is_compatible("PowerBook5,7") || | 1299 | } else if (of_machine_is_compatible("PowerBook5,7") || |
1300 | machine_is_compatible("PowerBook5,8") || | 1300 | of_machine_is_compatible("PowerBook5,8") || |
1301 | machine_is_compatible("PowerBook5,9")) { | 1301 | of_machine_is_compatible("PowerBook5,9")) { |
1302 | /* PowerBook6,2 ? */ | 1302 | /* PowerBook6,2 ? */ |
1303 | /* powerbook with external dual link tmds (sil1178?) */ | 1303 | /* powerbook with external dual link tmds (sil1178?) */ |
1304 | rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; | 1304 | rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; |
1305 | } else if (machine_is_compatible("PowerBook4,1") || | 1305 | } else if (of_machine_is_compatible("PowerBook4,1") || |
1306 | machine_is_compatible("PowerBook4,2") || | 1306 | of_machine_is_compatible("PowerBook4,2") || |
1307 | machine_is_compatible("PowerBook4,3") || | 1307 | of_machine_is_compatible("PowerBook4,3") || |
1308 | machine_is_compatible("PowerBook6,3") || | 1308 | of_machine_is_compatible("PowerBook6,3") || |
1309 | machine_is_compatible("PowerBook6,5") || | 1309 | of_machine_is_compatible("PowerBook6,5") || |
1310 | machine_is_compatible("PowerBook6,7")) { | 1310 | of_machine_is_compatible("PowerBook6,7")) { |
1311 | /* ibook */ | 1311 | /* ibook */ |
1312 | rdev->mode_info.connector_table = CT_IBOOK; | 1312 | rdev->mode_info.connector_table = CT_IBOOK; |
1313 | } else if (machine_is_compatible("PowerMac4,4")) { | 1313 | } else if (of_machine_is_compatible("PowerMac4,4")) { |
1314 | /* emac */ | 1314 | /* emac */ |
1315 | rdev->mode_info.connector_table = CT_EMAC; | 1315 | rdev->mode_info.connector_table = CT_EMAC; |
1316 | } else if (machine_is_compatible("PowerMac10,1")) { | 1316 | } else if (of_machine_is_compatible("PowerMac10,1")) { |
1317 | /* mini with internal tmds */ | 1317 | /* mini with internal tmds */ |
1318 | rdev->mode_info.connector_table = CT_MINI_INTERNAL; | 1318 | rdev->mode_info.connector_table = CT_MINI_INTERNAL; |
1319 | } else if (machine_is_compatible("PowerMac10,2")) { | 1319 | } else if (of_machine_is_compatible("PowerMac10,2")) { |
1320 | /* mini with external tmds */ | 1320 | /* mini with external tmds */ |
1321 | rdev->mode_info.connector_table = CT_MINI_EXTERNAL; | 1321 | rdev->mode_info.connector_table = CT_MINI_EXTERNAL; |
1322 | } else if (machine_is_compatible("PowerMac12,1")) { | 1322 | } else if (of_machine_is_compatible("PowerMac12,1")) { |
1323 | /* PowerMac8,1 ? */ | 1323 | /* PowerMac8,1 ? */ |
1324 | /* imac g5 isight */ | 1324 | /* imac g5 isight */ |
1325 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; | 1325 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 24d90ea246ce..71d4c0703629 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
@@ -55,6 +55,12 @@ source "drivers/hid/usbhid/Kconfig" | |||
55 | menu "Special HID drivers" | 55 | menu "Special HID drivers" |
56 | depends on HID | 56 | depends on HID |
57 | 57 | ||
58 | config HID_3M_PCT | ||
59 | tristate "3M PCT" | ||
60 | depends on USB_HID | ||
61 | ---help--- | ||
62 | Support for 3M PCT touch screens. | ||
63 | |||
58 | config HID_A4TECH | 64 | config HID_A4TECH |
59 | tristate "A4 tech" if EMBEDDED | 65 | tristate "A4 tech" if EMBEDDED |
60 | depends on USB_HID | 66 | depends on USB_HID |
@@ -183,6 +189,23 @@ config LOGIRUMBLEPAD2_FF | |||
183 | Say Y here if you want to enable force feedback support for Logitech | 189 | Say Y here if you want to enable force feedback support for Logitech |
184 | Rumblepad 2 devices. | 190 | Rumblepad 2 devices. |
185 | 191 | ||
192 | config LOGIG940_FF | ||
193 | bool "Logitech Flight System G940 force feedback support" | ||
194 | depends on HID_LOGITECH | ||
195 | select INPUT_FF_MEMLESS | ||
196 | help | ||
197 | Say Y here if you want to enable force feedback support for Logitech | ||
198 | Flight System G940 devices. | ||
199 | |||
200 | config HID_MAGICMOUSE | ||
201 | tristate "Apple MagicMouse multi-touch support" | ||
202 | depends on BT_HIDP | ||
203 | ---help--- | ||
204 | Support for the Apple Magic Mouse multi-touch. | ||
205 | |||
206 | Say Y here if you want support for the multi-touch features of the | ||
207 | Apple Wireless "Magic" Mouse. | ||
208 | |||
186 | config HID_MICROSOFT | 209 | config HID_MICROSOFT |
187 | tristate "Microsoft" if EMBEDDED | 210 | tristate "Microsoft" if EMBEDDED |
188 | depends on USB_HID | 211 | depends on USB_HID |
@@ -190,6 +213,12 @@ config HID_MICROSOFT | |||
190 | ---help--- | 213 | ---help--- |
191 | Support for Microsoft devices that are not fully compliant with HID standard. | 214 | Support for Microsoft devices that are not fully compliant with HID standard. |
192 | 215 | ||
216 | config HID_MOSART | ||
217 | tristate "MosArt" | ||
218 | depends on USB_HID | ||
219 | ---help--- | ||
220 | Support for MosArt dual-touch panels. | ||
221 | |||
193 | config HID_MONTEREY | 222 | config HID_MONTEREY |
194 | tristate "Monterey" if EMBEDDED | 223 | tristate "Monterey" if EMBEDDED |
195 | depends on USB_HID | 224 | depends on USB_HID |
@@ -198,12 +227,18 @@ config HID_MONTEREY | |||
198 | Support for Monterey Genius KB29E. | 227 | Support for Monterey Genius KB29E. |
199 | 228 | ||
200 | config HID_NTRIG | 229 | config HID_NTRIG |
201 | tristate "NTrig" if EMBEDDED | 230 | tristate "NTrig" |
202 | depends on USB_HID | 231 | depends on USB_HID |
203 | default !EMBEDDED | ||
204 | ---help--- | 232 | ---help--- |
205 | Support for N-Trig touch screen. | 233 | Support for N-Trig touch screen. |
206 | 234 | ||
235 | config HID_ORTEK | ||
236 | tristate "Ortek" if EMBEDDED | ||
237 | depends on USB_HID | ||
238 | default !EMBEDDED | ||
239 | ---help--- | ||
240 | Support for Ortek WKB-2000 wireless keyboard + mouse trackpad. | ||
241 | |||
207 | config HID_PANTHERLORD | 242 | config HID_PANTHERLORD |
208 | tristate "Pantherlord support" if EMBEDDED | 243 | tristate "Pantherlord support" if EMBEDDED |
209 | depends on USB_HID | 244 | depends on USB_HID |
@@ -227,6 +262,12 @@ config HID_PETALYNX | |||
227 | ---help--- | 262 | ---help--- |
228 | Support for Petalynx Maxter remote control. | 263 | Support for Petalynx Maxter remote control. |
229 | 264 | ||
265 | config HID_QUANTA | ||
266 | tristate "Quanta Optical Touch" | ||
267 | depends on USB_HID | ||
268 | ---help--- | ||
269 | Support for Quanta Optical Touch dual-touch panels. | ||
270 | |||
230 | config HID_SAMSUNG | 271 | config HID_SAMSUNG |
231 | tristate "Samsung" if EMBEDDED | 272 | tristate "Samsung" if EMBEDDED |
232 | depends on USB_HID | 273 | depends on USB_HID |
@@ -241,6 +282,12 @@ config HID_SONY | |||
241 | ---help--- | 282 | ---help--- |
242 | Support for Sony PS3 controller. | 283 | Support for Sony PS3 controller. |
243 | 284 | ||
285 | config HID_STANTUM | ||
286 | tristate "Stantum" | ||
287 | depends on USB_HID | ||
288 | ---help--- | ||
289 | Support for Stantum multitouch panel. | ||
290 | |||
244 | config HID_SUNPLUS | 291 | config HID_SUNPLUS |
245 | tristate "Sunplus" if EMBEDDED | 292 | tristate "Sunplus" if EMBEDDED |
246 | depends on USB_HID | 293 | depends on USB_HID |
@@ -305,9 +352,8 @@ config THRUSTMASTER_FF | |||
305 | Rumble Force or Force Feedback Wheel. | 352 | Rumble Force or Force Feedback Wheel. |
306 | 353 | ||
307 | config HID_WACOM | 354 | config HID_WACOM |
308 | tristate "Wacom Bluetooth devices support" if EMBEDDED | 355 | tristate "Wacom Bluetooth devices support" |
309 | depends on BT_HIDP | 356 | depends on BT_HIDP |
310 | default !EMBEDDED | ||
311 | ---help--- | 357 | ---help--- |
312 | Support for Wacom Graphire Bluetooth tablet. | 358 | Support for Wacom Graphire Bluetooth tablet. |
313 | 359 | ||
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 0de2dff5542c..0b2618f092ca 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile | |||
@@ -18,7 +18,11 @@ endif | |||
18 | ifdef CONFIG_LOGIRUMBLEPAD2_FF | 18 | ifdef CONFIG_LOGIRUMBLEPAD2_FF |
19 | hid-logitech-objs += hid-lg2ff.o | 19 | hid-logitech-objs += hid-lg2ff.o |
20 | endif | 20 | endif |
21 | ifdef CONFIG_LOGIG940_FF | ||
22 | hid-logitech-objs += hid-lg3ff.o | ||
23 | endif | ||
21 | 24 | ||
25 | obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o | ||
22 | obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o | 26 | obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o |
23 | obj-$(CONFIG_HID_APPLE) += hid-apple.o | 27 | obj-$(CONFIG_HID_APPLE) += hid-apple.o |
24 | obj-$(CONFIG_HID_BELKIN) += hid-belkin.o | 28 | obj-$(CONFIG_HID_BELKIN) += hid-belkin.o |
@@ -31,14 +35,19 @@ obj-$(CONFIG_HID_GYRATION) += hid-gyration.o | |||
31 | obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o | 35 | obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o |
32 | obj-$(CONFIG_HID_KYE) += hid-kye.o | 36 | obj-$(CONFIG_HID_KYE) += hid-kye.o |
33 | obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o | 37 | obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o |
38 | obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o | ||
34 | obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o | 39 | obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o |
35 | obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o | 40 | obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o |
41 | obj-$(CONFIG_HID_MOSART) += hid-mosart.o | ||
36 | obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o | 42 | obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o |
43 | obj-$(CONFIG_HID_ORTEK) += hid-ortek.o | ||
44 | obj-$(CONFIG_HID_QUANTA) += hid-quanta.o | ||
37 | obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o | 45 | obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o |
38 | obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o | 46 | obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o |
39 | obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o | 47 | obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o |
40 | obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o | 48 | obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o |
41 | obj-$(CONFIG_HID_SONY) += hid-sony.o | 49 | obj-$(CONFIG_HID_SONY) += hid-sony.o |
50 | obj-$(CONFIG_HID_STANTUM) += hid-stantum.o | ||
42 | obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o | 51 | obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o |
43 | obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o | 52 | obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o |
44 | obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o | 53 | obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o |
diff --git a/drivers/hid/hid-3m-pct.c b/drivers/hid/hid-3m-pct.c new file mode 100644 index 000000000000..2370aefc86b2 --- /dev/null +++ b/drivers/hid/hid-3m-pct.c | |||
@@ -0,0 +1,290 @@ | |||
1 | /* | ||
2 | * HID driver for 3M PCT multitouch panels | ||
3 | * | ||
4 | * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | #include <linux/hid.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/usb.h> | ||
19 | |||
20 | MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); | ||
21 | MODULE_DESCRIPTION("3M PCT multitouch panels"); | ||
22 | MODULE_LICENSE("GPL"); | ||
23 | |||
24 | #include "hid-ids.h" | ||
25 | |||
26 | struct mmm_finger { | ||
27 | __s32 x, y; | ||
28 | __u8 rank; | ||
29 | bool touch, valid; | ||
30 | }; | ||
31 | |||
32 | struct mmm_data { | ||
33 | struct mmm_finger f[10]; | ||
34 | __u8 curid, num; | ||
35 | bool touch, valid; | ||
36 | }; | ||
37 | |||
38 | static int mmm_input_mapping(struct hid_device *hdev, struct hid_input *hi, | ||
39 | struct hid_field *field, struct hid_usage *usage, | ||
40 | unsigned long **bit, int *max) | ||
41 | { | ||
42 | switch (usage->hid & HID_USAGE_PAGE) { | ||
43 | |||
44 | case HID_UP_BUTTON: | ||
45 | return -1; | ||
46 | |||
47 | case HID_UP_GENDESK: | ||
48 | switch (usage->hid) { | ||
49 | case HID_GD_X: | ||
50 | hid_map_usage(hi, usage, bit, max, | ||
51 | EV_ABS, ABS_MT_POSITION_X); | ||
52 | /* touchscreen emulation */ | ||
53 | input_set_abs_params(hi->input, ABS_X, | ||
54 | field->logical_minimum, | ||
55 | field->logical_maximum, 0, 0); | ||
56 | return 1; | ||
57 | case HID_GD_Y: | ||
58 | hid_map_usage(hi, usage, bit, max, | ||
59 | EV_ABS, ABS_MT_POSITION_Y); | ||
60 | /* touchscreen emulation */ | ||
61 | input_set_abs_params(hi->input, ABS_Y, | ||
62 | field->logical_minimum, | ||
63 | field->logical_maximum, 0, 0); | ||
64 | return 1; | ||
65 | } | ||
66 | return 0; | ||
67 | |||
68 | case HID_UP_DIGITIZER: | ||
69 | switch (usage->hid) { | ||
70 | /* we do not want to map these: no input-oriented meaning */ | ||
71 | case 0x14: | ||
72 | case 0x23: | ||
73 | case HID_DG_INPUTMODE: | ||
74 | case HID_DG_DEVICEINDEX: | ||
75 | case HID_DG_CONTACTCOUNT: | ||
76 | case HID_DG_CONTACTMAX: | ||
77 | case HID_DG_INRANGE: | ||
78 | case HID_DG_CONFIDENCE: | ||
79 | return -1; | ||
80 | case HID_DG_TIPSWITCH: | ||
81 | /* touchscreen emulation */ | ||
82 | hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); | ||
83 | return 1; | ||
84 | case HID_DG_CONTACTID: | ||
85 | hid_map_usage(hi, usage, bit, max, | ||
86 | EV_ABS, ABS_MT_TRACKING_ID); | ||
87 | return 1; | ||
88 | } | ||
89 | /* let hid-input decide for the others */ | ||
90 | return 0; | ||
91 | |||
92 | case 0xff000000: | ||
93 | /* we do not want to map these: no input-oriented meaning */ | ||
94 | return -1; | ||
95 | } | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static int mmm_input_mapped(struct hid_device *hdev, struct hid_input *hi, | ||
101 | struct hid_field *field, struct hid_usage *usage, | ||
102 | unsigned long **bit, int *max) | ||
103 | { | ||
104 | if (usage->type == EV_KEY || usage->type == EV_ABS) | ||
105 | clear_bit(usage->code, *bit); | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * this function is called when a whole packet has been received and processed, | ||
112 | * so that it can decide what to send to the input layer. | ||
113 | */ | ||
114 | static void mmm_filter_event(struct mmm_data *md, struct input_dev *input) | ||
115 | { | ||
116 | struct mmm_finger *oldest = 0; | ||
117 | bool pressed = false, released = false; | ||
118 | int i; | ||
119 | |||
120 | /* | ||
121 | * we need to iterate on all fingers to decide if we have a press | ||
122 | * or a release event in our touchscreen emulation. | ||
123 | */ | ||
124 | for (i = 0; i < 10; ++i) { | ||
125 | struct mmm_finger *f = &md->f[i]; | ||
126 | if (!f->valid) { | ||
127 | /* this finger is just placeholder data, ignore */ | ||
128 | } else if (f->touch) { | ||
129 | /* this finger is on the screen */ | ||
130 | input_event(input, EV_ABS, ABS_MT_TRACKING_ID, i); | ||
131 | input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x); | ||
132 | input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y); | ||
133 | input_mt_sync(input); | ||
134 | /* | ||
135 | * touchscreen emulation: maintain the age rank | ||
136 | * of this finger, decide if we have a press | ||
137 | */ | ||
138 | if (f->rank == 0) { | ||
139 | f->rank = ++(md->num); | ||
140 | if (f->rank == 1) | ||
141 | pressed = true; | ||
142 | } | ||
143 | if (f->rank == 1) | ||
144 | oldest = f; | ||
145 | } else { | ||
146 | /* this finger took off the screen */ | ||
147 | /* touchscreen emulation: maintain age rank of others */ | ||
148 | int j; | ||
149 | |||
150 | for (j = 0; j < 10; ++j) { | ||
151 | struct mmm_finger *g = &md->f[j]; | ||
152 | if (g->rank > f->rank) { | ||
153 | g->rank--; | ||
154 | if (g->rank == 1) | ||
155 | oldest = g; | ||
156 | } | ||
157 | } | ||
158 | f->rank = 0; | ||
159 | --(md->num); | ||
160 | if (md->num == 0) | ||
161 | released = true; | ||
162 | } | ||
163 | f->valid = 0; | ||
164 | } | ||
165 | |||
166 | /* touchscreen emulation */ | ||
167 | if (oldest) { | ||
168 | if (pressed) | ||
169 | input_event(input, EV_KEY, BTN_TOUCH, 1); | ||
170 | input_event(input, EV_ABS, ABS_X, oldest->x); | ||
171 | input_event(input, EV_ABS, ABS_Y, oldest->y); | ||
172 | } else if (released) { | ||
173 | input_event(input, EV_KEY, BTN_TOUCH, 0); | ||
174 | } | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * this function is called upon all reports | ||
179 | * so that we can accumulate contact point information, | ||
180 | * and call input_mt_sync after each point. | ||
181 | */ | ||
182 | static int mmm_event(struct hid_device *hid, struct hid_field *field, | ||
183 | struct hid_usage *usage, __s32 value) | ||
184 | { | ||
185 | struct mmm_data *md = hid_get_drvdata(hid); | ||
186 | /* | ||
187 | * strangely, this function can be called before | ||
188 | * field->hidinput is initialized! | ||
189 | */ | ||
190 | if (hid->claimed & HID_CLAIMED_INPUT) { | ||
191 | struct input_dev *input = field->hidinput->input; | ||
192 | switch (usage->hid) { | ||
193 | case HID_DG_TIPSWITCH: | ||
194 | md->touch = value; | ||
195 | break; | ||
196 | case HID_DG_CONFIDENCE: | ||
197 | md->valid = value; | ||
198 | break; | ||
199 | case HID_DG_CONTACTID: | ||
200 | if (md->valid) { | ||
201 | md->curid = value; | ||
202 | md->f[value].touch = md->touch; | ||
203 | md->f[value].valid = 1; | ||
204 | } | ||
205 | break; | ||
206 | case HID_GD_X: | ||
207 | if (md->valid) | ||
208 | md->f[md->curid].x = value; | ||
209 | break; | ||
210 | case HID_GD_Y: | ||
211 | if (md->valid) | ||
212 | md->f[md->curid].y = value; | ||
213 | break; | ||
214 | case HID_DG_CONTACTCOUNT: | ||
215 | mmm_filter_event(md, input); | ||
216 | break; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | /* we have handled the hidinput part, now remains hiddev */ | ||
221 | if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) | ||
222 | hid->hiddev_hid_event(hid, field, usage, value); | ||
223 | |||
224 | return 1; | ||
225 | } | ||
226 | |||
227 | static int mmm_probe(struct hid_device *hdev, const struct hid_device_id *id) | ||
228 | { | ||
229 | int ret; | ||
230 | struct mmm_data *md; | ||
231 | |||
232 | md = kzalloc(sizeof(struct mmm_data), GFP_KERNEL); | ||
233 | if (!md) { | ||
234 | dev_err(&hdev->dev, "cannot allocate 3M data\n"); | ||
235 | return -ENOMEM; | ||
236 | } | ||
237 | hid_set_drvdata(hdev, md); | ||
238 | |||
239 | ret = hid_parse(hdev); | ||
240 | if (!ret) | ||
241 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); | ||
242 | |||
243 | if (ret) | ||
244 | kfree(md); | ||
245 | return ret; | ||
246 | } | ||
247 | |||
248 | static void mmm_remove(struct hid_device *hdev) | ||
249 | { | ||
250 | hid_hw_stop(hdev); | ||
251 | kfree(hid_get_drvdata(hdev)); | ||
252 | hid_set_drvdata(hdev, NULL); | ||
253 | } | ||
254 | |||
255 | static const struct hid_device_id mmm_devices[] = { | ||
256 | { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) }, | ||
257 | { } | ||
258 | }; | ||
259 | MODULE_DEVICE_TABLE(hid, mmm_devices); | ||
260 | |||
261 | static const struct hid_usage_id mmm_grabbed_usages[] = { | ||
262 | { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, | ||
263 | { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} | ||
264 | }; | ||
265 | |||
266 | static struct hid_driver mmm_driver = { | ||
267 | .name = "3m-pct", | ||
268 | .id_table = mmm_devices, | ||
269 | .probe = mmm_probe, | ||
270 | .remove = mmm_remove, | ||
271 | .input_mapping = mmm_input_mapping, | ||
272 | .input_mapped = mmm_input_mapped, | ||
273 | .usage_table = mmm_grabbed_usages, | ||
274 | .event = mmm_event, | ||
275 | }; | ||
276 | |||
277 | static int __init mmm_init(void) | ||
278 | { | ||
279 | return hid_register_driver(&mmm_driver); | ||
280 | } | ||
281 | |||
282 | static void __exit mmm_exit(void) | ||
283 | { | ||
284 | hid_unregister_driver(&mmm_driver); | ||
285 | } | ||
286 | |||
287 | module_init(mmm_init); | ||
288 | module_exit(mmm_exit); | ||
289 | MODULE_LICENSE("GPL"); | ||
290 | |||
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 5b4d66dc1a05..78286b184ace 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c | |||
@@ -40,6 +40,11 @@ module_param(fnmode, uint, 0644); | |||
40 | MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, " | 40 | MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, " |
41 | "[1] = fkeyslast, 2 = fkeysfirst)"); | 41 | "[1] = fkeyslast, 2 = fkeysfirst)"); |
42 | 42 | ||
43 | static unsigned int iso_layout = 1; | ||
44 | module_param(iso_layout, uint, 0644); | ||
45 | MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. " | ||
46 | "(0 = disabled, [1] = enabled)"); | ||
47 | |||
43 | struct apple_sc { | 48 | struct apple_sc { |
44 | unsigned long quirks; | 49 | unsigned long quirks; |
45 | unsigned int fn_on; | 50 | unsigned int fn_on; |
@@ -199,11 +204,13 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, | |||
199 | } | 204 | } |
200 | } | 205 | } |
201 | 206 | ||
202 | if (asc->quirks & APPLE_ISO_KEYBOARD) { | 207 | if (iso_layout) { |
203 | trans = apple_find_translation(apple_iso_keyboard, usage->code); | 208 | if (asc->quirks & APPLE_ISO_KEYBOARD) { |
204 | if (trans) { | 209 | trans = apple_find_translation(apple_iso_keyboard, usage->code); |
205 | input_event(input, usage->type, trans->to, value); | 210 | if (trans) { |
206 | return 1; | 211 | input_event(input, usage->type, trans->to, value); |
212 | return 1; | ||
213 | } | ||
207 | } | 214 | } |
208 | } | 215 | } |
209 | 216 | ||
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index eabe5f87c6c1..368fbb0c4ca6 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Copyright (c) 1999 Andreas Gal | 4 | * Copyright (c) 1999 Andreas Gal |
5 | * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> | 5 | * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> |
6 | * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc | 6 | * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc |
7 | * Copyright (c) 2006-2007 Jiri Kosina | 7 | * Copyright (c) 2006-2010 Jiri Kosina |
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* | 10 | /* |
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(hid_debug); | |||
51 | * Register a new report for a device. | 51 | * Register a new report for a device. |
52 | */ | 52 | */ |
53 | 53 | ||
54 | static struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id) | 54 | struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id) |
55 | { | 55 | { |
56 | struct hid_report_enum *report_enum = device->report_enum + type; | 56 | struct hid_report_enum *report_enum = device->report_enum + type; |
57 | struct hid_report *report; | 57 | struct hid_report *report; |
@@ -75,6 +75,7 @@ static struct hid_report *hid_register_report(struct hid_device *device, unsigne | |||
75 | 75 | ||
76 | return report; | 76 | return report; |
77 | } | 77 | } |
78 | EXPORT_SYMBOL_GPL(hid_register_report); | ||
78 | 79 | ||
79 | /* | 80 | /* |
80 | * Register a new field for this report. | 81 | * Register a new field for this report. |
@@ -387,7 +388,8 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) | |||
387 | __u32 data; | 388 | __u32 data; |
388 | unsigned n; | 389 | unsigned n; |
389 | 390 | ||
390 | if (item->size == 0) { | 391 | /* Local delimiter could have value 0, which allows size to be 0 */ |
392 | if (item->size == 0 && item->tag != HID_LOCAL_ITEM_TAG_DELIMITER) { | ||
391 | dbg_hid("item data expected for local item\n"); | 393 | dbg_hid("item data expected for local item\n"); |
392 | return -1; | 394 | return -1; |
393 | } | 395 | } |
@@ -1248,11 +1250,13 @@ EXPORT_SYMBOL_GPL(hid_disconnect); | |||
1248 | 1250 | ||
1249 | /* a list of devices for which there is a specialized driver on HID bus */ | 1251 | /* a list of devices for which there is a specialized driver on HID bus */ |
1250 | static const struct hid_device_id hid_blacklist[] = { | 1252 | static const struct hid_device_id hid_blacklist[] = { |
1253 | { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) }, | ||
1251 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, | 1254 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, |
1252 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, | 1255 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, |
1253 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, | 1256 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, |
1254 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, | 1257 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, |
1255 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, | 1258 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, |
1259 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, | ||
1256 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, | 1260 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, |
1257 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, | 1261 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, |
1258 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, | 1262 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, |
@@ -1324,6 +1328,7 @@ static const struct hid_device_id hid_blacklist[] = { | |||
1324 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, | 1328 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, |
1325 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, | 1329 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, |
1326 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, | 1330 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, |
1331 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) }, | ||
1327 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) }, | 1332 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) }, |
1328 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) }, | 1333 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) }, |
1329 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, | 1334 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, |
@@ -1337,10 +1342,15 @@ static const struct hid_device_id hid_blacklist[] = { | |||
1337 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, | 1342 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, |
1338 | { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, | 1343 | { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, |
1339 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, | 1344 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, |
1345 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, | ||
1340 | { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, | 1346 | { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, |
1347 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, | ||
1348 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) }, | ||
1341 | { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, | 1349 | { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, |
1342 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, | 1350 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, |
1351 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, | ||
1343 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, | 1352 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, |
1353 | { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) }, | ||
1344 | { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, | 1354 | { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, |
1345 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, | 1355 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, |
1346 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, | 1356 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, |
@@ -1543,8 +1553,9 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
1543 | { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, | 1553 | { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, |
1544 | { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, | 1554 | { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, |
1545 | { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, | 1555 | { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, |
1546 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM)}, | 1556 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)}, |
1547 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2)}, | 1557 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, |
1558 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, | ||
1548 | { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, | 1559 | { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, |
1549 | { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, | 1560 | { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, |
1550 | { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, | 1561 | { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, |
@@ -1661,8 +1672,6 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
1661 | { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) }, | 1672 | { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) }, |
1662 | { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, | 1673 | { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, |
1663 | { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, | 1674 | { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, |
1664 | { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY1) }, | ||
1665 | { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY2) }, | ||
1666 | { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) }, | 1675 | { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) }, |
1667 | { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) }, | 1676 | { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) }, |
1668 | { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, | 1677 | { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, |
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 6abd0369aedb..cd4ece6fdfb9 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c | |||
@@ -864,13 +864,13 @@ static const char **names[EV_MAX + 1] = { | |||
864 | [EV_SND] = sounds, [EV_REP] = repeats, | 864 | [EV_SND] = sounds, [EV_REP] = repeats, |
865 | }; | 865 | }; |
866 | 866 | ||
867 | void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f) { | 867 | static void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f) |
868 | 868 | { | |
869 | seq_printf(f, "%s.%s", events[type] ? events[type] : "?", | 869 | seq_printf(f, "%s.%s", events[type] ? events[type] : "?", |
870 | names[type] ? (names[type][code] ? names[type][code] : "?") : "?"); | 870 | names[type] ? (names[type][code] ? names[type][code] : "?") : "?"); |
871 | } | 871 | } |
872 | 872 | ||
873 | void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f) | 873 | static void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f) |
874 | { | 874 | { |
875 | int i, j, k; | 875 | int i, j, k; |
876 | struct hid_report *report; | 876 | struct hid_report *report; |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 010368e649ed..72c05f90553c 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -18,6 +18,9 @@ | |||
18 | #ifndef HID_IDS_H_FILE | 18 | #ifndef HID_IDS_H_FILE |
19 | #define HID_IDS_H_FILE | 19 | #define HID_IDS_H_FILE |
20 | 20 | ||
21 | #define USB_VENDOR_ID_3M 0x0596 | ||
22 | #define USB_DEVICE_ID_3M1968 0x0500 | ||
23 | |||
21 | #define USB_VENDOR_ID_A4TECH 0x09da | 24 | #define USB_VENDOR_ID_A4TECH 0x09da |
22 | #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006 | 25 | #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006 |
23 | #define USB_DEVICE_ID_A4TECH_X5_005D 0x000a | 26 | #define USB_DEVICE_ID_A4TECH_X5_005D 0x000a |
@@ -56,6 +59,7 @@ | |||
56 | 59 | ||
57 | #define USB_VENDOR_ID_APPLE 0x05ac | 60 | #define USB_VENDOR_ID_APPLE 0x05ac |
58 | #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 | 61 | #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 |
62 | #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d | ||
59 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e | 63 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e |
60 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f | 64 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f |
61 | #define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214 | 65 | #define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214 |
@@ -96,9 +100,12 @@ | |||
96 | #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 | 100 | #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 |
97 | #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 | 101 | #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 |
98 | 102 | ||
99 | #define USB_VENDOR_ID_ASUS 0x0b05 | 103 | #define USB_VENDOR_ID_ASUS 0x0486 |
100 | #define USB_DEVICE_ID_ASUS_LCM 0x1726 | 104 | #define USB_DEVICE_ID_ASUS_T91MT 0x0185 |
101 | #define USB_DEVICE_ID_ASUS_LCM2 0x175b | 105 | |
106 | #define USB_VENDOR_ID_ASUSTEK 0x0b05 | ||
107 | #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 | ||
108 | #define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b | ||
102 | 109 | ||
103 | #define USB_VENDOR_ID_ATEN 0x0557 | 110 | #define USB_VENDOR_ID_ATEN 0x0557 |
104 | #define USB_DEVICE_ID_ATEN_UC100KM 0x2004 | 111 | #define USB_DEVICE_ID_ATEN_UC100KM 0x2004 |
@@ -169,6 +176,9 @@ | |||
169 | #define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f | 176 | #define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f |
170 | #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 | 177 | #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 |
171 | 178 | ||
179 | #define USB_VENDOR_ID_ETURBOTOUCH 0x22b9 | ||
180 | #define USB_DEVICE_ID_ETURBOTOUCH 0x0006 | ||
181 | |||
172 | #define USB_VENDOR_ID_ETT 0x0664 | 182 | #define USB_VENDOR_ID_ETT 0x0664 |
173 | #define USB_DEVICE_ID_TC5UH 0x0309 | 183 | #define USB_DEVICE_ID_TC5UH 0x0309 |
174 | 184 | ||
@@ -303,6 +313,7 @@ | |||
303 | #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219 | 313 | #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219 |
304 | #define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283 | 314 | #define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283 |
305 | #define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286 | 315 | #define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286 |
316 | #define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940 0xc287 | ||
306 | #define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294 | 317 | #define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294 |
307 | #define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293 | 318 | #define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293 |
308 | #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295 | 319 | #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295 |
@@ -365,6 +376,9 @@ | |||
365 | #define USB_VENDOR_ID_ONTRAK 0x0a07 | 376 | #define USB_VENDOR_ID_ONTRAK 0x0a07 |
366 | #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064 | 377 | #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064 |
367 | 378 | ||
379 | #define USB_VENDOR_ID_ORTEK 0x05a4 | ||
380 | #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 | ||
381 | |||
368 | #define USB_VENDOR_ID_PANJIT 0x134c | 382 | #define USB_VENDOR_ID_PANJIT 0x134c |
369 | 383 | ||
370 | #define USB_VENDOR_ID_PANTHERLORD 0x0810 | 384 | #define USB_VENDOR_ID_PANTHERLORD 0x0810 |
@@ -382,9 +396,16 @@ | |||
382 | #define USB_VENDOR_ID_POWERCOM 0x0d9f | 396 | #define USB_VENDOR_ID_POWERCOM 0x0d9f |
383 | #define USB_DEVICE_ID_POWERCOM_UPS 0x0002 | 397 | #define USB_DEVICE_ID_POWERCOM_UPS 0x0002 |
384 | 398 | ||
399 | #define USB_VENDOR_ID_PRODIGE 0x05af | ||
400 | #define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062 | ||
401 | |||
385 | #define USB_VENDOR_ID_SAITEK 0x06a3 | 402 | #define USB_VENDOR_ID_SAITEK 0x06a3 |
386 | #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 | 403 | #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 |
387 | 404 | ||
405 | #define USB_VENDOR_ID_QUANTA 0x0408 | ||
406 | #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000 | ||
407 | #define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN 0x3001 | ||
408 | |||
388 | #define USB_VENDOR_ID_SAMSUNG 0x0419 | 409 | #define USB_VENDOR_ID_SAMSUNG 0x0419 |
389 | #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 | 410 | #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 |
390 | 411 | ||
@@ -396,18 +417,20 @@ | |||
396 | #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034 | 417 | #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034 |
397 | #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046 | 418 | #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046 |
398 | 419 | ||
420 | #define USB_VENDOR_ID_STANTUM 0x1f87 | ||
421 | #define USB_DEVICE_ID_MTP 0x0002 | ||
422 | |||
399 | #define USB_VENDOR_ID_SUN 0x0430 | 423 | #define USB_VENDOR_ID_SUN 0x0430 |
400 | #define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab | 424 | #define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab |
401 | 425 | ||
402 | #define USB_VENDOR_ID_SUNPLUS 0x04fc | 426 | #define USB_VENDOR_ID_SUNPLUS 0x04fc |
403 | #define USB_DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8 | 427 | #define USB_DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8 |
404 | 428 | ||
405 | #define USB_VENDOR_ID_TENX 0x1130 | ||
406 | #define USB_DEVICE_ID_TENX_IBUDDY1 0x0001 | ||
407 | #define USB_DEVICE_ID_TENX_IBUDDY2 0x0002 | ||
408 | |||
409 | #define USB_VENDOR_ID_THRUSTMASTER 0x044f | 429 | #define USB_VENDOR_ID_THRUSTMASTER 0x044f |
410 | 430 | ||
431 | #define USB_VENDOR_ID_TOUCHPACK 0x1bfd | ||
432 | #define USB_DEVICE_ID_TOUCHPACK_RTS 0x1688 | ||
433 | |||
411 | #define USB_VENDOR_ID_TOPMAX 0x0663 | 434 | #define USB_VENDOR_ID_TOPMAX 0x0663 |
412 | #define USB_DEVICE_ID_TOPMAX_COBRAPAD 0x0103 | 435 | #define USB_DEVICE_ID_TOPMAX_COBRAPAD 0x0103 |
413 | 436 | ||
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 5862b0f3b55d..79d9edd0bdfa 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2000-2001 Vojtech Pavlik | 2 | * Copyright (c) 2000-2001 Vojtech Pavlik |
3 | * Copyright (c) 2006-2007 Jiri Kosina | 3 | * Copyright (c) 2006-2010 Jiri Kosina |
4 | * | 4 | * |
5 | * HID to Linux Input mapping | 5 | * HID to Linux Input mapping |
6 | */ | 6 | */ |
@@ -193,12 +193,17 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel | |||
193 | break; | 193 | break; |
194 | 194 | ||
195 | case HID_UP_BUTTON: | 195 | case HID_UP_BUTTON: |
196 | code = ((usage->hid - 1) & 0xf); | 196 | code = ((usage->hid - 1) & HID_USAGE); |
197 | 197 | ||
198 | switch (field->application) { | 198 | switch (field->application) { |
199 | case HID_GD_MOUSE: | 199 | case HID_GD_MOUSE: |
200 | case HID_GD_POINTER: code += 0x110; break; | 200 | case HID_GD_POINTER: code += 0x110; break; |
201 | case HID_GD_JOYSTICK: code += 0x120; break; | 201 | case HID_GD_JOYSTICK: |
202 | if (code <= 0xf) | ||
203 | code += BTN_JOYSTICK; | ||
204 | else | ||
205 | code += BTN_TRIGGER_HAPPY; | ||
206 | break; | ||
202 | case HID_GD_GAMEPAD: code += 0x130; break; | 207 | case HID_GD_GAMEPAD: code += 0x130; break; |
203 | default: | 208 | default: |
204 | switch (field->physical) { | 209 | switch (field->physical) { |
@@ -400,6 +405,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel | |||
400 | case 0x192: map_key_clear(KEY_CALC); break; | 405 | case 0x192: map_key_clear(KEY_CALC); break; |
401 | case 0x194: map_key_clear(KEY_FILE); break; | 406 | case 0x194: map_key_clear(KEY_FILE); break; |
402 | case 0x196: map_key_clear(KEY_WWW); break; | 407 | case 0x196: map_key_clear(KEY_WWW); break; |
408 | case 0x199: map_key_clear(KEY_CHAT); break; | ||
403 | case 0x19c: map_key_clear(KEY_LOGOFF); break; | 409 | case 0x19c: map_key_clear(KEY_LOGOFF); break; |
404 | case 0x19e: map_key_clear(KEY_COFFEE); break; | 410 | case 0x19e: map_key_clear(KEY_COFFEE); break; |
405 | case 0x1a6: map_key_clear(KEY_HELP); break; | 411 | case 0x1a6: map_key_clear(KEY_HELP); break; |
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index 9fcd3d017ab3..3677c9037a11 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #define LG_FF 0x200 | 34 | #define LG_FF 0x200 |
35 | #define LG_FF2 0x400 | 35 | #define LG_FF2 0x400 |
36 | #define LG_RDESC_REL_ABS 0x800 | 36 | #define LG_RDESC_REL_ABS 0x800 |
37 | #define LG_FF3 0x1000 | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * Certain Logitech keyboards send in report #3 keys which are far | 40 | * Certain Logitech keyboards send in report #3 keys which are far |
@@ -266,7 +267,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
266 | goto err_free; | 267 | goto err_free; |
267 | } | 268 | } |
268 | 269 | ||
269 | if (quirks & (LG_FF | LG_FF2)) | 270 | if (quirks & (LG_FF | LG_FF2 | LG_FF3)) |
270 | connect_mask &= ~HID_CONNECT_FF; | 271 | connect_mask &= ~HID_CONNECT_FF; |
271 | 272 | ||
272 | ret = hid_hw_start(hdev, connect_mask); | 273 | ret = hid_hw_start(hdev, connect_mask); |
@@ -279,6 +280,8 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
279 | lgff_init(hdev); | 280 | lgff_init(hdev); |
280 | if (quirks & LG_FF2) | 281 | if (quirks & LG_FF2) |
281 | lg2ff_init(hdev); | 282 | lg2ff_init(hdev); |
283 | if (quirks & LG_FF3) | ||
284 | lg3ff_init(hdev); | ||
282 | 285 | ||
283 | return 0; | 286 | return 0; |
284 | err_free: | 287 | err_free: |
@@ -331,6 +334,8 @@ static const struct hid_device_id lg_devices[] = { | |||
331 | .driver_data = LG_FF }, | 334 | .driver_data = LG_FF }, |
332 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), | 335 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), |
333 | .driver_data = LG_FF2 }, | 336 | .driver_data = LG_FF2 }, |
337 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), | ||
338 | .driver_data = LG_FF3 }, | ||
334 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), | 339 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), |
335 | .driver_data = LG_RDESC_REL_ABS }, | 340 | .driver_data = LG_RDESC_REL_ABS }, |
336 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER), | 341 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER), |
diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h index bf31592eaf79..ce2ac8672624 100644 --- a/drivers/hid/hid-lg.h +++ b/drivers/hid/hid-lg.h | |||
@@ -13,4 +13,10 @@ int lg2ff_init(struct hid_device *hdev); | |||
13 | static inline int lg2ff_init(struct hid_device *hdev) { return -1; } | 13 | static inline int lg2ff_init(struct hid_device *hdev) { return -1; } |
14 | #endif | 14 | #endif |
15 | 15 | ||
16 | #ifdef CONFIG_LOGIG940_FF | ||
17 | int lg3ff_init(struct hid_device *hdev); | ||
18 | #else | ||
19 | static inline int lg3ff_init(struct hid_device *hdev) { return -1; } | ||
20 | #endif | ||
21 | |||
16 | #endif | 22 | #endif |
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c new file mode 100644 index 000000000000..4002832ee4af --- /dev/null +++ b/drivers/hid/hid-lg3ff.c | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * Force feedback support for Logitech Flight System G940 | ||
3 | * | ||
4 | * Copyright (c) 2009 Gary Stein <LordCnidarian@gmail.com> | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 | */ | ||
22 | |||
23 | |||
24 | #include <linux/input.h> | ||
25 | #include <linux/usb.h> | ||
26 | #include <linux/hid.h> | ||
27 | |||
28 | #include "usbhid/usbhid.h" | ||
29 | #include "hid-lg.h" | ||
30 | |||
31 | /* | ||
32 | * G940 Theory of Operation (from experimentation) | ||
33 | * | ||
34 | * There are 63 fields (only 3 of them currently used) | ||
35 | * 0 - seems to be command field | ||
36 | * 1 - 30 deal with the x axis | ||
37 | * 31 -60 deal with the y axis | ||
38 | * | ||
39 | * Field 1 is x axis constant force | ||
40 | * Field 31 is y axis constant force | ||
41 | * | ||
42 | * other interesting fields 1,2,3,4 on x axis | ||
43 | * (same for 31,32,33,34 on y axis) | ||
44 | * | ||
45 | * 0 0 127 127 makes the joystick autocenter hard | ||
46 | * | ||
47 | * 127 0 127 127 makes the joystick loose on the right, | ||
48 | * but stops all movemnt left | ||
49 | * | ||
50 | * -127 0 -127 -127 makes the joystick loose on the left, | ||
51 | * but stops all movement right | ||
52 | * | ||
53 | * 0 0 -127 -127 makes the joystick rattle very hard | ||
54 | * | ||
55 | * I'm sure these are effects that I don't know enough about them | ||
56 | */ | ||
57 | |||
58 | struct lg3ff_device { | ||
59 | struct hid_report *report; | ||
60 | }; | ||
61 | |||
62 | static int hid_lg3ff_play(struct input_dev *dev, void *data, | ||
63 | struct ff_effect *effect) | ||
64 | { | ||
65 | struct hid_device *hid = input_get_drvdata(dev); | ||
66 | struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; | ||
67 | struct hid_report *report = list_entry(report_list->next, struct hid_report, list); | ||
68 | int x, y; | ||
69 | |||
70 | /* | ||
71 | * Maxusage should always be 63 (maximum fields) | ||
72 | * likely a better way to ensure this data is clean | ||
73 | */ | ||
74 | memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage); | ||
75 | |||
76 | switch (effect->type) { | ||
77 | case FF_CONSTANT: | ||
78 | /* | ||
79 | * Already clamped in ff_memless | ||
80 | * 0 is center (different then other logitech) | ||
81 | */ | ||
82 | x = effect->u.ramp.start_level; | ||
83 | y = effect->u.ramp.end_level; | ||
84 | |||
85 | /* send command byte */ | ||
86 | report->field[0]->value[0] = 0x51; | ||
87 | |||
88 | /* | ||
89 | * Sign backwards from other Force3d pro | ||
90 | * which get recast here in two's complement 8 bits | ||
91 | */ | ||
92 | report->field[0]->value[1] = (unsigned char)(-x); | ||
93 | report->field[0]->value[31] = (unsigned char)(-y); | ||
94 | |||
95 | usbhid_submit_report(hid, report, USB_DIR_OUT); | ||
96 | break; | ||
97 | } | ||
98 | return 0; | ||
99 | } | ||
100 | static void hid_lg3ff_set_autocenter(struct input_dev *dev, u16 magnitude) | ||
101 | { | ||
102 | struct hid_device *hid = input_get_drvdata(dev); | ||
103 | struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; | ||
104 | struct hid_report *report = list_entry(report_list->next, struct hid_report, list); | ||
105 | |||
106 | /* | ||
107 | * Auto Centering probed from device | ||
108 | * NOTE: deadman's switch on G940 must be covered | ||
109 | * for effects to work | ||
110 | */ | ||
111 | report->field[0]->value[0] = 0x51; | ||
112 | report->field[0]->value[1] = 0x00; | ||
113 | report->field[0]->value[2] = 0x00; | ||
114 | report->field[0]->value[3] = 0x7F; | ||
115 | report->field[0]->value[4] = 0x7F; | ||
116 | report->field[0]->value[31] = 0x00; | ||
117 | report->field[0]->value[32] = 0x00; | ||
118 | report->field[0]->value[33] = 0x7F; | ||
119 | report->field[0]->value[34] = 0x7F; | ||
120 | |||
121 | usbhid_submit_report(hid, report, USB_DIR_OUT); | ||
122 | } | ||
123 | |||
124 | |||
125 | static const signed short ff3_joystick_ac[] = { | ||
126 | FF_CONSTANT, | ||
127 | FF_AUTOCENTER, | ||
128 | -1 | ||
129 | }; | ||
130 | |||
131 | int lg3ff_init(struct hid_device *hid) | ||
132 | { | ||
133 | struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); | ||
134 | struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; | ||
135 | struct input_dev *dev = hidinput->input; | ||
136 | struct hid_report *report; | ||
137 | struct hid_field *field; | ||
138 | const signed short *ff_bits = ff3_joystick_ac; | ||
139 | int error; | ||
140 | int i; | ||
141 | |||
142 | /* Find the report to use */ | ||
143 | if (list_empty(report_list)) { | ||
144 | err_hid("No output report found"); | ||
145 | return -1; | ||
146 | } | ||
147 | |||
148 | /* Check that the report looks ok */ | ||
149 | report = list_entry(report_list->next, struct hid_report, list); | ||
150 | if (!report) { | ||
151 | err_hid("NULL output report"); | ||
152 | return -1; | ||
153 | } | ||
154 | |||
155 | field = report->field[0]; | ||
156 | if (!field) { | ||
157 | err_hid("NULL field"); | ||
158 | return -1; | ||
159 | } | ||
160 | |||
161 | /* Assume single fixed device G940 */ | ||
162 | for (i = 0; ff_bits[i] >= 0; i++) | ||
163 | set_bit(ff_bits[i], dev->ffbit); | ||
164 | |||
165 | error = input_ff_create_memless(dev, NULL, hid_lg3ff_play); | ||
166 | if (error) | ||
167 | return error; | ||
168 | |||
169 | if (test_bit(FF_AUTOCENTER, dev->ffbit)) | ||
170 | dev->ff->set_autocenter = hid_lg3ff_set_autocenter; | ||
171 | |||
172 | dev_info(&hid->dev, "Force feedback for Logitech Flight System G940 by " | ||
173 | "Gary Stein <LordCnidarian@gmail.com>\n"); | ||
174 | return 0; | ||
175 | } | ||
176 | |||
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c index 987abebe0829..61142b76a9b1 100644 --- a/drivers/hid/hid-lgff.c +++ b/drivers/hid/hid-lgff.c | |||
@@ -67,6 +67,7 @@ static const struct dev_type devices[] = { | |||
67 | { 0x046d, 0xc219, ff_rumble }, | 67 | { 0x046d, 0xc219, ff_rumble }, |
68 | { 0x046d, 0xc283, ff_joystick }, | 68 | { 0x046d, 0xc283, ff_joystick }, |
69 | { 0x046d, 0xc286, ff_joystick_ac }, | 69 | { 0x046d, 0xc286, ff_joystick_ac }, |
70 | { 0x046d, 0xc287, ff_joystick_ac }, | ||
70 | { 0x046d, 0xc293, ff_joystick }, | 71 | { 0x046d, 0xc293, ff_joystick }, |
71 | { 0x046d, 0xc294, ff_wheel }, | 72 | { 0x046d, 0xc294, ff_wheel }, |
72 | { 0x046d, 0xc295, ff_joystick }, | 73 | { 0x046d, 0xc295, ff_joystick }, |
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c new file mode 100644 index 000000000000..4a3a94f2b10c --- /dev/null +++ b/drivers/hid/hid-magicmouse.c | |||
@@ -0,0 +1,449 @@ | |||
1 | /* | ||
2 | * Apple "Magic" Wireless Mouse driver | ||
3 | * | ||
4 | * Copyright (c) 2010 Michael Poole <mdpoole@troilus.org> | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/device.h> | ||
15 | #include <linux/hid.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/usb.h> | ||
18 | |||
19 | #include "hid-ids.h" | ||
20 | |||
21 | static bool emulate_3button = true; | ||
22 | module_param(emulate_3button, bool, 0644); | ||
23 | MODULE_PARM_DESC(emulate_3button, "Emulate a middle button"); | ||
24 | |||
25 | static int middle_button_start = -350; | ||
26 | static int middle_button_stop = +350; | ||
27 | |||
28 | static bool emulate_scroll_wheel = true; | ||
29 | module_param(emulate_scroll_wheel, bool, 0644); | ||
30 | MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel"); | ||
31 | |||
32 | static bool report_touches = true; | ||
33 | module_param(report_touches, bool, 0644); | ||
34 | MODULE_PARM_DESC(report_touches, "Emit touch records (otherwise, only use them for emulation)"); | ||
35 | |||
36 | static bool report_undeciphered; | ||
37 | module_param(report_undeciphered, bool, 0644); | ||
38 | MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event"); | ||
39 | |||
40 | #define TOUCH_REPORT_ID 0x29 | ||
41 | /* These definitions are not precise, but they're close enough. (Bits | ||
42 | * 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem | ||
43 | * to be some kind of bit mask -- 0x20 may be a near-field reading, | ||
44 | * and 0x40 is actual contact, and 0x10 may be a start/stop or change | ||
45 | * indication.) | ||
46 | */ | ||
47 | #define TOUCH_STATE_MASK 0xf0 | ||
48 | #define TOUCH_STATE_NONE 0x00 | ||
49 | #define TOUCH_STATE_START 0x30 | ||
50 | #define TOUCH_STATE_DRAG 0x40 | ||
51 | |||
52 | /** | ||
53 | * struct magicmouse_sc - Tracks Magic Mouse-specific data. | ||
54 | * @input: Input device through which we report events. | ||
55 | * @quirks: Currently unused. | ||
56 | * @last_timestamp: Timestamp from most recent (18-bit) touch report | ||
57 | * (units of milliseconds over short windows, but seems to | ||
58 | * increase faster when there are no touches). | ||
59 | * @delta_time: 18-bit difference between the two most recent touch | ||
60 | * reports from the mouse. | ||
61 | * @ntouches: Number of touches in most recent touch report. | ||
62 | * @scroll_accel: Number of consecutive scroll motions. | ||
63 | * @scroll_jiffies: Time of last scroll motion. | ||
64 | * @touches: Most recent data for a touch, indexed by tracking ID. | ||
65 | * @tracking_ids: Mapping of current touch input data to @touches. | ||
66 | */ | ||
67 | struct magicmouse_sc { | ||
68 | struct input_dev *input; | ||
69 | unsigned long quirks; | ||
70 | |||
71 | int last_timestamp; | ||
72 | int delta_time; | ||
73 | int ntouches; | ||
74 | int scroll_accel; | ||
75 | unsigned long scroll_jiffies; | ||
76 | |||
77 | struct { | ||
78 | short x; | ||
79 | short y; | ||
80 | short scroll_y; | ||
81 | u8 size; | ||
82 | } touches[16]; | ||
83 | int tracking_ids[16]; | ||
84 | }; | ||
85 | |||
86 | static int magicmouse_firm_touch(struct magicmouse_sc *msc) | ||
87 | { | ||
88 | int touch = -1; | ||
89 | int ii; | ||
90 | |||
91 | /* If there is only one "firm" touch, set touch to its | ||
92 | * tracking ID. | ||
93 | */ | ||
94 | for (ii = 0; ii < msc->ntouches; ii++) { | ||
95 | int idx = msc->tracking_ids[ii]; | ||
96 | if (msc->touches[idx].size < 8) { | ||
97 | /* Ignore this touch. */ | ||
98 | } else if (touch >= 0) { | ||
99 | touch = -1; | ||
100 | break; | ||
101 | } else { | ||
102 | touch = idx; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | return touch; | ||
107 | } | ||
108 | |||
109 | static void magicmouse_emit_buttons(struct magicmouse_sc *msc, int state) | ||
110 | { | ||
111 | int last_state = test_bit(BTN_LEFT, msc->input->key) << 0 | | ||
112 | test_bit(BTN_RIGHT, msc->input->key) << 1 | | ||
113 | test_bit(BTN_MIDDLE, msc->input->key) << 2; | ||
114 | |||
115 | if (emulate_3button) { | ||
116 | int id; | ||
117 | |||
118 | /* If some button was pressed before, keep it held | ||
119 | * down. Otherwise, if there's exactly one firm | ||
120 | * touch, use that to override the mouse's guess. | ||
121 | */ | ||
122 | if (state == 0) { | ||
123 | /* The button was released. */ | ||
124 | } else if (last_state != 0) { | ||
125 | state = last_state; | ||
126 | } else if ((id = magicmouse_firm_touch(msc)) >= 0) { | ||
127 | int x = msc->touches[id].x; | ||
128 | if (x < middle_button_start) | ||
129 | state = 1; | ||
130 | else if (x > middle_button_stop) | ||
131 | state = 2; | ||
132 | else | ||
133 | state = 4; | ||
134 | } /* else: we keep the mouse's guess */ | ||
135 | |||
136 | input_report_key(msc->input, BTN_MIDDLE, state & 4); | ||
137 | } | ||
138 | |||
139 | input_report_key(msc->input, BTN_LEFT, state & 1); | ||
140 | input_report_key(msc->input, BTN_RIGHT, state & 2); | ||
141 | |||
142 | if (state != last_state) | ||
143 | msc->scroll_accel = 0; | ||
144 | } | ||
145 | |||
146 | static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tdata) | ||
147 | { | ||
148 | struct input_dev *input = msc->input; | ||
149 | __s32 x_y = tdata[0] << 8 | tdata[1] << 16 | tdata[2] << 24; | ||
150 | int misc = tdata[5] | tdata[6] << 8; | ||
151 | int id = (misc >> 6) & 15; | ||
152 | int x = x_y << 12 >> 20; | ||
153 | int y = -(x_y >> 20); | ||
154 | |||
155 | /* Store tracking ID and other fields. */ | ||
156 | msc->tracking_ids[raw_id] = id; | ||
157 | msc->touches[id].x = x; | ||
158 | msc->touches[id].y = y; | ||
159 | msc->touches[id].size = misc & 63; | ||
160 | |||
161 | /* If requested, emulate a scroll wheel by detecting small | ||
162 | * vertical touch motions along the middle of the mouse. | ||
163 | */ | ||
164 | if (emulate_scroll_wheel && | ||
165 | middle_button_start < x && x < middle_button_stop) { | ||
166 | static const int accel_profile[] = { | ||
167 | 256, 228, 192, 160, 128, 96, 64, 32, | ||
168 | }; | ||
169 | unsigned long now = jiffies; | ||
170 | int step = msc->touches[id].scroll_y - y; | ||
171 | |||
172 | /* Reset acceleration after half a second. */ | ||
173 | if (time_after(now, msc->scroll_jiffies + HZ / 2)) | ||
174 | msc->scroll_accel = 0; | ||
175 | |||
176 | /* Calculate and apply the scroll motion. */ | ||
177 | switch (tdata[7] & TOUCH_STATE_MASK) { | ||
178 | case TOUCH_STATE_START: | ||
179 | msc->touches[id].scroll_y = y; | ||
180 | msc->scroll_accel = min_t(int, msc->scroll_accel + 1, | ||
181 | ARRAY_SIZE(accel_profile) - 1); | ||
182 | break; | ||
183 | case TOUCH_STATE_DRAG: | ||
184 | step = step / accel_profile[msc->scroll_accel]; | ||
185 | if (step != 0) { | ||
186 | msc->touches[id].scroll_y = y; | ||
187 | msc->scroll_jiffies = now; | ||
188 | input_report_rel(input, REL_WHEEL, step); | ||
189 | } | ||
190 | break; | ||
191 | } | ||
192 | } | ||
193 | |||
194 | /* Generate the input events for this touch. */ | ||
195 | if (report_touches) { | ||
196 | int orientation = (misc >> 10) - 32; | ||
197 | |||
198 | input_report_abs(input, ABS_MT_TRACKING_ID, id); | ||
199 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, tdata[3]); | ||
200 | input_report_abs(input, ABS_MT_TOUCH_MINOR, tdata[4]); | ||
201 | input_report_abs(input, ABS_MT_ORIENTATION, orientation); | ||
202 | input_report_abs(input, ABS_MT_POSITION_X, x); | ||
203 | input_report_abs(input, ABS_MT_POSITION_Y, y); | ||
204 | |||
205 | if (report_undeciphered) | ||
206 | input_event(input, EV_MSC, MSC_RAW, tdata[7]); | ||
207 | |||
208 | input_mt_sync(input); | ||
209 | } | ||
210 | } | ||
211 | |||
212 | static int magicmouse_raw_event(struct hid_device *hdev, | ||
213 | struct hid_report *report, u8 *data, int size) | ||
214 | { | ||
215 | struct magicmouse_sc *msc = hid_get_drvdata(hdev); | ||
216 | struct input_dev *input = msc->input; | ||
217 | int x, y, ts, ii, clicks; | ||
218 | |||
219 | switch (data[0]) { | ||
220 | case 0x10: | ||
221 | if (size != 6) | ||
222 | return 0; | ||
223 | x = (__s16)(data[2] | data[3] << 8); | ||
224 | y = (__s16)(data[4] | data[5] << 8); | ||
225 | clicks = data[1]; | ||
226 | break; | ||
227 | case TOUCH_REPORT_ID: | ||
228 | /* Expect six bytes of prefix, and N*8 bytes of touch data. */ | ||
229 | if (size < 6 || ((size - 6) % 8) != 0) | ||
230 | return 0; | ||
231 | ts = data[3] >> 6 | data[4] << 2 | data[5] << 10; | ||
232 | msc->delta_time = (ts - msc->last_timestamp) & 0x3ffff; | ||
233 | msc->last_timestamp = ts; | ||
234 | msc->ntouches = (size - 6) / 8; | ||
235 | for (ii = 0; ii < msc->ntouches; ii++) | ||
236 | magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); | ||
237 | /* When emulating three-button mode, it is important | ||
238 | * to have the current touch information before | ||
239 | * generating a click event. | ||
240 | */ | ||
241 | x = (signed char)data[1]; | ||
242 | y = (signed char)data[2]; | ||
243 | clicks = data[3]; | ||
244 | break; | ||
245 | case 0x20: /* Theoretically battery status (0-100), but I have | ||
246 | * never seen it -- maybe it is only upon request. | ||
247 | */ | ||
248 | case 0x60: /* Unknown, maybe laser on/off. */ | ||
249 | case 0x61: /* Laser reflection status change. | ||
250 | * data[1]: 0 = spotted, 1 = lost | ||
251 | */ | ||
252 | default: | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | magicmouse_emit_buttons(msc, clicks & 3); | ||
257 | input_report_rel(input, REL_X, x); | ||
258 | input_report_rel(input, REL_Y, y); | ||
259 | input_sync(input); | ||
260 | return 1; | ||
261 | } | ||
262 | |||
263 | static int magicmouse_input_open(struct input_dev *dev) | ||
264 | { | ||
265 | struct hid_device *hid = input_get_drvdata(dev); | ||
266 | |||
267 | return hid->ll_driver->open(hid); | ||
268 | } | ||
269 | |||
270 | static void magicmouse_input_close(struct input_dev *dev) | ||
271 | { | ||
272 | struct hid_device *hid = input_get_drvdata(dev); | ||
273 | |||
274 | hid->ll_driver->close(hid); | ||
275 | } | ||
276 | |||
277 | static void magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev) | ||
278 | { | ||
279 | input_set_drvdata(input, hdev); | ||
280 | input->event = hdev->ll_driver->hidinput_input_event; | ||
281 | input->open = magicmouse_input_open; | ||
282 | input->close = magicmouse_input_close; | ||
283 | |||
284 | input->name = hdev->name; | ||
285 | input->phys = hdev->phys; | ||
286 | input->uniq = hdev->uniq; | ||
287 | input->id.bustype = hdev->bus; | ||
288 | input->id.vendor = hdev->vendor; | ||
289 | input->id.product = hdev->product; | ||
290 | input->id.version = hdev->version; | ||
291 | input->dev.parent = hdev->dev.parent; | ||
292 | |||
293 | __set_bit(EV_KEY, input->evbit); | ||
294 | __set_bit(BTN_LEFT, input->keybit); | ||
295 | __set_bit(BTN_RIGHT, input->keybit); | ||
296 | if (emulate_3button) | ||
297 | __set_bit(BTN_MIDDLE, input->keybit); | ||
298 | __set_bit(BTN_TOOL_FINGER, input->keybit); | ||
299 | |||
300 | __set_bit(EV_REL, input->evbit); | ||
301 | __set_bit(REL_X, input->relbit); | ||
302 | __set_bit(REL_Y, input->relbit); | ||
303 | if (emulate_scroll_wheel) | ||
304 | __set_bit(REL_WHEEL, input->relbit); | ||
305 | |||
306 | if (report_touches) { | ||
307 | __set_bit(EV_ABS, input->evbit); | ||
308 | |||
309 | input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0); | ||
310 | input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0); | ||
311 | input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0); | ||
312 | input_set_abs_params(input, ABS_MT_ORIENTATION, -32, 31, 1, 0); | ||
313 | input_set_abs_params(input, ABS_MT_POSITION_X, -1100, 1358, | ||
314 | 4, 0); | ||
315 | /* Note: Touch Y position from the device is inverted relative | ||
316 | * to how pointer motion is reported (and relative to how USB | ||
317 | * HID recommends the coordinates work). This driver keeps | ||
318 | * the origin at the same position, and just uses the additive | ||
319 | * inverse of the reported Y. | ||
320 | */ | ||
321 | input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, 2047, | ||
322 | 4, 0); | ||
323 | } | ||
324 | |||
325 | if (report_undeciphered) { | ||
326 | __set_bit(EV_MSC, input->evbit); | ||
327 | __set_bit(MSC_RAW, input->mscbit); | ||
328 | } | ||
329 | } | ||
330 | |||
331 | static int magicmouse_probe(struct hid_device *hdev, | ||
332 | const struct hid_device_id *id) | ||
333 | { | ||
334 | __u8 feature_1[] = { 0xd7, 0x01 }; | ||
335 | __u8 feature_2[] = { 0xf8, 0x01, 0x32 }; | ||
336 | struct input_dev *input; | ||
337 | struct magicmouse_sc *msc; | ||
338 | struct hid_report *report; | ||
339 | int ret; | ||
340 | |||
341 | msc = kzalloc(sizeof(*msc), GFP_KERNEL); | ||
342 | if (msc == NULL) { | ||
343 | dev_err(&hdev->dev, "can't alloc magicmouse descriptor\n"); | ||
344 | return -ENOMEM; | ||
345 | } | ||
346 | |||
347 | msc->quirks = id->driver_data; | ||
348 | hid_set_drvdata(hdev, msc); | ||
349 | |||
350 | ret = hid_parse(hdev); | ||
351 | if (ret) { | ||
352 | dev_err(&hdev->dev, "magicmouse hid parse failed\n"); | ||
353 | goto err_free; | ||
354 | } | ||
355 | |||
356 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); | ||
357 | if (ret) { | ||
358 | dev_err(&hdev->dev, "magicmouse hw start failed\n"); | ||
359 | goto err_free; | ||
360 | } | ||
361 | |||
362 | report = hid_register_report(hdev, HID_INPUT_REPORT, TOUCH_REPORT_ID); | ||
363 | if (!report) { | ||
364 | dev_err(&hdev->dev, "unable to register touch report\n"); | ||
365 | ret = -ENOMEM; | ||
366 | goto err_stop_hw; | ||
367 | } | ||
368 | report->size = 6; | ||
369 | |||
370 | ret = hdev->hid_output_raw_report(hdev, feature_1, sizeof(feature_1), | ||
371 | HID_FEATURE_REPORT); | ||
372 | if (ret != sizeof(feature_1)) { | ||
373 | dev_err(&hdev->dev, "unable to request touch data (1:%d)\n", | ||
374 | ret); | ||
375 | goto err_stop_hw; | ||
376 | } | ||
377 | ret = hdev->hid_output_raw_report(hdev, feature_2, | ||
378 | sizeof(feature_2), HID_FEATURE_REPORT); | ||
379 | if (ret != sizeof(feature_2)) { | ||
380 | dev_err(&hdev->dev, "unable to request touch data (2:%d)\n", | ||
381 | ret); | ||
382 | goto err_stop_hw; | ||
383 | } | ||
384 | |||
385 | input = input_allocate_device(); | ||
386 | if (!input) { | ||
387 | dev_err(&hdev->dev, "can't alloc input device\n"); | ||
388 | ret = -ENOMEM; | ||
389 | goto err_stop_hw; | ||
390 | } | ||
391 | magicmouse_setup_input(input, hdev); | ||
392 | |||
393 | ret = input_register_device(input); | ||
394 | if (ret) { | ||
395 | dev_err(&hdev->dev, "input device registration failed\n"); | ||
396 | goto err_input; | ||
397 | } | ||
398 | msc->input = input; | ||
399 | |||
400 | return 0; | ||
401 | err_input: | ||
402 | input_free_device(input); | ||
403 | err_stop_hw: | ||
404 | hid_hw_stop(hdev); | ||
405 | err_free: | ||
406 | kfree(msc); | ||
407 | return ret; | ||
408 | } | ||
409 | |||
410 | static void magicmouse_remove(struct hid_device *hdev) | ||
411 | { | ||
412 | hid_hw_stop(hdev); | ||
413 | kfree(hid_get_drvdata(hdev)); | ||
414 | } | ||
415 | |||
416 | static const struct hid_device_id magic_mice[] = { | ||
417 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE), | ||
418 | .driver_data = 0 }, | ||
419 | { } | ||
420 | }; | ||
421 | MODULE_DEVICE_TABLE(hid, magic_mice); | ||
422 | |||
423 | static struct hid_driver magicmouse_driver = { | ||
424 | .name = "magicmouse", | ||
425 | .id_table = magic_mice, | ||
426 | .probe = magicmouse_probe, | ||
427 | .remove = magicmouse_remove, | ||
428 | .raw_event = magicmouse_raw_event, | ||
429 | }; | ||
430 | |||
431 | static int __init magicmouse_init(void) | ||
432 | { | ||
433 | int ret; | ||
434 | |||
435 | ret = hid_register_driver(&magicmouse_driver); | ||
436 | if (ret) | ||
437 | printk(KERN_ERR "can't register magicmouse driver\n"); | ||
438 | |||
439 | return ret; | ||
440 | } | ||
441 | |||
442 | static void __exit magicmouse_exit(void) | ||
443 | { | ||
444 | hid_unregister_driver(&magicmouse_driver); | ||
445 | } | ||
446 | |||
447 | module_init(magicmouse_init); | ||
448 | module_exit(magicmouse_exit); | ||
449 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c new file mode 100644 index 000000000000..c8718168fe42 --- /dev/null +++ b/drivers/hid/hid-mosart.c | |||
@@ -0,0 +1,273 @@ | |||
1 | /* | ||
2 | * HID driver for the multitouch panel on the ASUS EeePC T91MT | ||
3 | * | ||
4 | * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr> | ||
5 | * Copyright (c) 2010 Teemu Tuominen <teemu.tuominen@cybercom.com> | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the Free | ||
12 | * Software Foundation; either version 2 of the License, or (at your option) | ||
13 | * any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/device.h> | ||
17 | #include <linux/hid.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/usb.h> | ||
20 | #include "usbhid/usbhid.h" | ||
21 | |||
22 | MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); | ||
23 | MODULE_DESCRIPTION("MosArt dual-touch panel"); | ||
24 | MODULE_LICENSE("GPL"); | ||
25 | |||
26 | #include "hid-ids.h" | ||
27 | |||
28 | struct mosart_data { | ||
29 | __u16 x, y; | ||
30 | __u8 id; | ||
31 | bool valid; /* valid finger data, or just placeholder? */ | ||
32 | bool first; /* is this the first finger in this frame? */ | ||
33 | bool activity_now; /* at least one active finger in this frame? */ | ||
34 | bool activity; /* at least one active finger previously? */ | ||
35 | }; | ||
36 | |||
37 | static int mosart_input_mapping(struct hid_device *hdev, struct hid_input *hi, | ||
38 | struct hid_field *field, struct hid_usage *usage, | ||
39 | unsigned long **bit, int *max) | ||
40 | { | ||
41 | switch (usage->hid & HID_USAGE_PAGE) { | ||
42 | |||
43 | case HID_UP_GENDESK: | ||
44 | switch (usage->hid) { | ||
45 | case HID_GD_X: | ||
46 | hid_map_usage(hi, usage, bit, max, | ||
47 | EV_ABS, ABS_MT_POSITION_X); | ||
48 | /* touchscreen emulation */ | ||
49 | input_set_abs_params(hi->input, ABS_X, | ||
50 | field->logical_minimum, | ||
51 | field->logical_maximum, 0, 0); | ||
52 | return 1; | ||
53 | case HID_GD_Y: | ||
54 | hid_map_usage(hi, usage, bit, max, | ||
55 | EV_ABS, ABS_MT_POSITION_Y); | ||
56 | /* touchscreen emulation */ | ||
57 | input_set_abs_params(hi->input, ABS_Y, | ||
58 | field->logical_minimum, | ||
59 | field->logical_maximum, 0, 0); | ||
60 | return 1; | ||
61 | } | ||
62 | return 0; | ||
63 | |||
64 | case HID_UP_DIGITIZER: | ||
65 | switch (usage->hid) { | ||
66 | case HID_DG_CONFIDENCE: | ||
67 | case HID_DG_TIPSWITCH: | ||
68 | case HID_DG_INPUTMODE: | ||
69 | case HID_DG_DEVICEINDEX: | ||
70 | case HID_DG_CONTACTCOUNT: | ||
71 | case HID_DG_CONTACTMAX: | ||
72 | case HID_DG_TIPPRESSURE: | ||
73 | case HID_DG_WIDTH: | ||
74 | case HID_DG_HEIGHT: | ||
75 | return -1; | ||
76 | case HID_DG_INRANGE: | ||
77 | /* touchscreen emulation */ | ||
78 | hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); | ||
79 | return 1; | ||
80 | |||
81 | case HID_DG_CONTACTID: | ||
82 | hid_map_usage(hi, usage, bit, max, | ||
83 | EV_ABS, ABS_MT_TRACKING_ID); | ||
84 | return 1; | ||
85 | |||
86 | } | ||
87 | return 0; | ||
88 | |||
89 | case 0xff000000: | ||
90 | /* ignore HID features */ | ||
91 | return -1; | ||
92 | } | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static int mosart_input_mapped(struct hid_device *hdev, struct hid_input *hi, | ||
98 | struct hid_field *field, struct hid_usage *usage, | ||
99 | unsigned long **bit, int *max) | ||
100 | { | ||
101 | if (usage->type == EV_KEY || usage->type == EV_ABS) | ||
102 | clear_bit(usage->code, *bit); | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * this function is called when a whole finger has been parsed, | ||
109 | * so that it can decide what to send to the input layer. | ||
110 | */ | ||
111 | static void mosart_filter_event(struct mosart_data *td, struct input_dev *input) | ||
112 | { | ||
113 | td->first = !td->first; /* touchscreen emulation */ | ||
114 | |||
115 | if (!td->valid) { | ||
116 | /* | ||
117 | * touchscreen emulation: if no finger in this frame is valid | ||
118 | * and there previously was finger activity, this is a release | ||
119 | */ | ||
120 | if (!td->first && !td->activity_now && td->activity) { | ||
121 | input_event(input, EV_KEY, BTN_TOUCH, 0); | ||
122 | td->activity = false; | ||
123 | } | ||
124 | return; | ||
125 | } | ||
126 | |||
127 | input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id); | ||
128 | input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x); | ||
129 | input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y); | ||
130 | |||
131 | input_mt_sync(input); | ||
132 | td->valid = false; | ||
133 | |||
134 | /* touchscreen emulation: if first active finger in this frame... */ | ||
135 | if (!td->activity_now) { | ||
136 | /* if there was no previous activity, emit touch event */ | ||
137 | if (!td->activity) { | ||
138 | input_event(input, EV_KEY, BTN_TOUCH, 1); | ||
139 | td->activity = true; | ||
140 | } | ||
141 | td->activity_now = true; | ||
142 | /* and in any case this is our preferred finger */ | ||
143 | input_event(input, EV_ABS, ABS_X, td->x); | ||
144 | input_event(input, EV_ABS, ABS_Y, td->y); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | |||
149 | static int mosart_event(struct hid_device *hid, struct hid_field *field, | ||
150 | struct hid_usage *usage, __s32 value) | ||
151 | { | ||
152 | struct mosart_data *td = hid_get_drvdata(hid); | ||
153 | |||
154 | if (hid->claimed & HID_CLAIMED_INPUT) { | ||
155 | struct input_dev *input = field->hidinput->input; | ||
156 | switch (usage->hid) { | ||
157 | case HID_DG_INRANGE: | ||
158 | td->valid = !!value; | ||
159 | break; | ||
160 | case HID_GD_X: | ||
161 | td->x = value; | ||
162 | break; | ||
163 | case HID_GD_Y: | ||
164 | td->y = value; | ||
165 | mosart_filter_event(td, input); | ||
166 | break; | ||
167 | case HID_DG_CONTACTID: | ||
168 | td->id = value; | ||
169 | break; | ||
170 | case HID_DG_CONTACTCOUNT: | ||
171 | /* touch emulation: this is the last field in a frame */ | ||
172 | td->first = false; | ||
173 | td->activity_now = false; | ||
174 | break; | ||
175 | case HID_DG_CONFIDENCE: | ||
176 | case HID_DG_TIPSWITCH: | ||
177 | /* avoid interference from generic hidinput handling */ | ||
178 | break; | ||
179 | |||
180 | default: | ||
181 | /* fallback to the generic hidinput handling */ | ||
182 | return 0; | ||
183 | } | ||
184 | } | ||
185 | |||
186 | /* we have handled the hidinput part, now remains hiddev */ | ||
187 | if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) | ||
188 | hid->hiddev_hid_event(hid, field, usage, value); | ||
189 | |||
190 | return 1; | ||
191 | } | ||
192 | |||
193 | static int mosart_probe(struct hid_device *hdev, const struct hid_device_id *id) | ||
194 | { | ||
195 | int ret; | ||
196 | struct mosart_data *td; | ||
197 | |||
198 | |||
199 | td = kmalloc(sizeof(struct mosart_data), GFP_KERNEL); | ||
200 | if (!td) { | ||
201 | dev_err(&hdev->dev, "cannot allocate MosArt data\n"); | ||
202 | return -ENOMEM; | ||
203 | } | ||
204 | td->valid = false; | ||
205 | td->activity = false; | ||
206 | td->activity_now = false; | ||
207 | td->first = false; | ||
208 | hid_set_drvdata(hdev, td); | ||
209 | |||
210 | /* currently, it's better to have one evdev device only */ | ||
211 | #if 0 | ||
212 | hdev->quirks |= HID_QUIRK_MULTI_INPUT; | ||
213 | #endif | ||
214 | |||
215 | ret = hid_parse(hdev); | ||
216 | if (ret == 0) | ||
217 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); | ||
218 | |||
219 | if (ret == 0) { | ||
220 | struct hid_report_enum *re = hdev->report_enum | ||
221 | + HID_FEATURE_REPORT; | ||
222 | struct hid_report *r = re->report_id_hash[7]; | ||
223 | |||
224 | r->field[0]->value[0] = 0x02; | ||
225 | usbhid_submit_report(hdev, r, USB_DIR_OUT); | ||
226 | } else | ||
227 | kfree(td); | ||
228 | |||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | static void mosart_remove(struct hid_device *hdev) | ||
233 | { | ||
234 | hid_hw_stop(hdev); | ||
235 | kfree(hid_get_drvdata(hdev)); | ||
236 | hid_set_drvdata(hdev, NULL); | ||
237 | } | ||
238 | |||
239 | static const struct hid_device_id mosart_devices[] = { | ||
240 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, | ||
241 | { } | ||
242 | }; | ||
243 | MODULE_DEVICE_TABLE(hid, mosart_devices); | ||
244 | |||
245 | static const struct hid_usage_id mosart_grabbed_usages[] = { | ||
246 | { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, | ||
247 | { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} | ||
248 | }; | ||
249 | |||
250 | static struct hid_driver mosart_driver = { | ||
251 | .name = "mosart", | ||
252 | .id_table = mosart_devices, | ||
253 | .probe = mosart_probe, | ||
254 | .remove = mosart_remove, | ||
255 | .input_mapping = mosart_input_mapping, | ||
256 | .input_mapped = mosart_input_mapped, | ||
257 | .usage_table = mosart_grabbed_usages, | ||
258 | .event = mosart_event, | ||
259 | }; | ||
260 | |||
261 | static int __init mosart_init(void) | ||
262 | { | ||
263 | return hid_register_driver(&mosart_driver); | ||
264 | } | ||
265 | |||
266 | static void __exit mosart_exit(void) | ||
267 | { | ||
268 | hid_unregister_driver(&mosart_driver); | ||
269 | } | ||
270 | |||
271 | module_init(mosart_init); | ||
272 | module_exit(mosart_exit); | ||
273 | |||
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c index 49ce69d7bba7..3234c729a895 100644 --- a/drivers/hid/hid-ntrig.c +++ b/drivers/hid/hid-ntrig.c | |||
@@ -25,11 +25,16 @@ | |||
25 | EV_KEY, (c)) | 25 | EV_KEY, (c)) |
26 | 26 | ||
27 | struct ntrig_data { | 27 | struct ntrig_data { |
28 | __s32 x, y, id, w, h; | 28 | /* Incoming raw values for a single contact */ |
29 | char reading_a_point, found_contact_id; | 29 | __u16 x, y, w, h; |
30 | char pen_active; | 30 | __u16 id; |
31 | char finger_active; | 31 | __u8 confidence; |
32 | char inverted; | 32 | |
33 | bool reading_mt; | ||
34 | __u8 first_contact_confidence; | ||
35 | |||
36 | __u8 mt_footer[4]; | ||
37 | __u8 mt_foot_count; | ||
33 | }; | 38 | }; |
34 | 39 | ||
35 | /* | 40 | /* |
@@ -42,8 +47,11 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
42 | struct hid_field *field, struct hid_usage *usage, | 47 | struct hid_field *field, struct hid_usage *usage, |
43 | unsigned long **bit, int *max) | 48 | unsigned long **bit, int *max) |
44 | { | 49 | { |
45 | switch (usage->hid & HID_USAGE_PAGE) { | 50 | /* No special mappings needed for the pen and single touch */ |
51 | if (field->physical) | ||
52 | return 0; | ||
46 | 53 | ||
54 | switch (usage->hid & HID_USAGE_PAGE) { | ||
47 | case HID_UP_GENDESK: | 55 | case HID_UP_GENDESK: |
48 | switch (usage->hid) { | 56 | switch (usage->hid) { |
49 | case HID_GD_X: | 57 | case HID_GD_X: |
@@ -66,18 +74,12 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
66 | case HID_UP_DIGITIZER: | 74 | case HID_UP_DIGITIZER: |
67 | switch (usage->hid) { | 75 | switch (usage->hid) { |
68 | /* we do not want to map these for now */ | 76 | /* we do not want to map these for now */ |
69 | case HID_DG_CONTACTID: /* value is useless */ | 77 | case HID_DG_CONTACTID: /* Not trustworthy, squelch for now */ |
70 | case HID_DG_INPUTMODE: | 78 | case HID_DG_INPUTMODE: |
71 | case HID_DG_DEVICEINDEX: | 79 | case HID_DG_DEVICEINDEX: |
72 | case HID_DG_CONTACTCOUNT: | ||
73 | case HID_DG_CONTACTMAX: | 80 | case HID_DG_CONTACTMAX: |
74 | return -1; | 81 | return -1; |
75 | 82 | ||
76 | /* original mapping by Rafi Rubin */ | ||
77 | case HID_DG_CONFIDENCE: | ||
78 | nt_map_key_clear(BTN_TOOL_DOUBLETAP); | ||
79 | return 1; | ||
80 | |||
81 | /* width/height mapped on TouchMajor/TouchMinor/Orientation */ | 83 | /* width/height mapped on TouchMajor/TouchMinor/Orientation */ |
82 | case HID_DG_WIDTH: | 84 | case HID_DG_WIDTH: |
83 | hid_map_usage(hi, usage, bit, max, | 85 | hid_map_usage(hi, usage, bit, max, |
@@ -104,6 +106,10 @@ static int ntrig_input_mapped(struct hid_device *hdev, struct hid_input *hi, | |||
104 | struct hid_field *field, struct hid_usage *usage, | 106 | struct hid_field *field, struct hid_usage *usage, |
105 | unsigned long **bit, int *max) | 107 | unsigned long **bit, int *max) |
106 | { | 108 | { |
109 | /* No special mappings needed for the pen and single touch */ | ||
110 | if (field->physical) | ||
111 | return 0; | ||
112 | |||
107 | if (usage->type == EV_KEY || usage->type == EV_REL | 113 | if (usage->type == EV_KEY || usage->type == EV_REL |
108 | || usage->type == EV_ABS) | 114 | || usage->type == EV_ABS) |
109 | clear_bit(usage->code, *bit); | 115 | clear_bit(usage->code, *bit); |
@@ -123,31 +129,30 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
123 | struct input_dev *input = field->hidinput->input; | 129 | struct input_dev *input = field->hidinput->input; |
124 | struct ntrig_data *nd = hid_get_drvdata(hid); | 130 | struct ntrig_data *nd = hid_get_drvdata(hid); |
125 | 131 | ||
132 | /* No special handling needed for the pen */ | ||
133 | if (field->application == HID_DG_PEN) | ||
134 | return 0; | ||
135 | |||
126 | if (hid->claimed & HID_CLAIMED_INPUT) { | 136 | if (hid->claimed & HID_CLAIMED_INPUT) { |
127 | switch (usage->hid) { | 137 | switch (usage->hid) { |
128 | 138 | case 0xff000001: | |
129 | case HID_DG_INRANGE: | 139 | /* Tag indicating the start of a multitouch group */ |
130 | if (field->application & 0x3) | 140 | nd->reading_mt = 1; |
131 | nd->pen_active = (value != 0); | 141 | nd->first_contact_confidence = 0; |
132 | else | 142 | break; |
133 | nd->finger_active = (value != 0); | 143 | case HID_DG_CONFIDENCE: |
134 | return 0; | 144 | nd->confidence = value; |
135 | 145 | break; | |
136 | case HID_DG_INVERT: | ||
137 | nd->inverted = value; | ||
138 | return 0; | ||
139 | |||
140 | case HID_GD_X: | 146 | case HID_GD_X: |
141 | nd->x = value; | 147 | nd->x = value; |
142 | nd->reading_a_point = 1; | 148 | /* Clear the contact footer */ |
149 | nd->mt_foot_count = 0; | ||
143 | break; | 150 | break; |
144 | case HID_GD_Y: | 151 | case HID_GD_Y: |
145 | nd->y = value; | 152 | nd->y = value; |
146 | break; | 153 | break; |
147 | case HID_DG_CONTACTID: | 154 | case HID_DG_CONTACTID: |
148 | nd->id = value; | 155 | nd->id = value; |
149 | /* we receive this only when in multitouch mode */ | ||
150 | nd->found_contact_id = 1; | ||
151 | break; | 156 | break; |
152 | case HID_DG_WIDTH: | 157 | case HID_DG_WIDTH: |
153 | nd->w = value; | 158 | nd->w = value; |
@@ -159,35 +164,13 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
159 | * report received in a finger event. We want | 164 | * report received in a finger event. We want |
160 | * to emit a normal (X, Y) position | 165 | * to emit a normal (X, Y) position |
161 | */ | 166 | */ |
162 | if (!nd->found_contact_id) { | 167 | if (!nd->reading_mt) { |
163 | if (nd->pen_active && nd->finger_active) { | 168 | input_report_key(input, BTN_TOOL_DOUBLETAP, |
164 | input_report_key(input, BTN_TOOL_DOUBLETAP, 0); | 169 | (nd->confidence != 0)); |
165 | input_report_key(input, BTN_TOOL_DOUBLETAP, 1); | ||
166 | } | ||
167 | input_event(input, EV_ABS, ABS_X, nd->x); | 170 | input_event(input, EV_ABS, ABS_X, nd->x); |
168 | input_event(input, EV_ABS, ABS_Y, nd->y); | 171 | input_event(input, EV_ABS, ABS_Y, nd->y); |
169 | } | 172 | } |
170 | break; | 173 | break; |
171 | case HID_DG_TIPPRESSURE: | ||
172 | /* | ||
173 | * when in single touch mode, this is the last | ||
174 | * report received in a pen event. We want | ||
175 | * to emit a normal (X, Y) position | ||
176 | */ | ||
177 | if (! nd->found_contact_id) { | ||
178 | if (nd->pen_active && nd->finger_active) { | ||
179 | input_report_key(input, | ||
180 | nd->inverted ? BTN_TOOL_RUBBER : BTN_TOOL_PEN | ||
181 | , 0); | ||
182 | input_report_key(input, | ||
183 | nd->inverted ? BTN_TOOL_RUBBER : BTN_TOOL_PEN | ||
184 | , 1); | ||
185 | } | ||
186 | input_event(input, EV_ABS, ABS_X, nd->x); | ||
187 | input_event(input, EV_ABS, ABS_Y, nd->y); | ||
188 | input_event(input, EV_ABS, ABS_PRESSURE, value); | ||
189 | } | ||
190 | break; | ||
191 | case 0xff000002: | 174 | case 0xff000002: |
192 | /* | 175 | /* |
193 | * we receive this when the device is in multitouch | 176 | * we receive this when the device is in multitouch |
@@ -195,10 +178,34 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
195 | * this usage tells if the contact point is real | 178 | * this usage tells if the contact point is real |
196 | * or a placeholder | 179 | * or a placeholder |
197 | */ | 180 | */ |
198 | if (!nd->reading_a_point || value != 1) | 181 | |
182 | /* Shouldn't get more than 4 footer packets, so skip */ | ||
183 | if (nd->mt_foot_count >= 4) | ||
199 | break; | 184 | break; |
185 | |||
186 | nd->mt_footer[nd->mt_foot_count++] = value; | ||
187 | |||
188 | /* if the footer isn't complete break */ | ||
189 | if (nd->mt_foot_count != 4) | ||
190 | break; | ||
191 | |||
192 | /* Pen activity signal, trigger end of touch. */ | ||
193 | if (nd->mt_footer[2]) { | ||
194 | nd->confidence = 0; | ||
195 | break; | ||
196 | } | ||
197 | |||
198 | /* If the contact was invalid */ | ||
199 | if (!(nd->confidence && nd->mt_footer[0]) | ||
200 | || nd->w <= 250 | ||
201 | || nd->h <= 190) { | ||
202 | nd->confidence = 0; | ||
203 | break; | ||
204 | } | ||
205 | |||
200 | /* emit a normal (X, Y) for the first point only */ | 206 | /* emit a normal (X, Y) for the first point only */ |
201 | if (nd->id == 0) { | 207 | if (nd->id == 0) { |
208 | nd->first_contact_confidence = nd->confidence; | ||
202 | input_event(input, EV_ABS, ABS_X, nd->x); | 209 | input_event(input, EV_ABS, ABS_X, nd->x); |
203 | input_event(input, EV_ABS, ABS_Y, nd->y); | 210 | input_event(input, EV_ABS, ABS_Y, nd->y); |
204 | } | 211 | } |
@@ -220,8 +227,39 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
220 | ABS_MT_TOUCH_MINOR, nd->w); | 227 | ABS_MT_TOUCH_MINOR, nd->w); |
221 | } | 228 | } |
222 | input_mt_sync(field->hidinput->input); | 229 | input_mt_sync(field->hidinput->input); |
223 | nd->reading_a_point = 0; | 230 | break; |
224 | nd->found_contact_id = 0; | 231 | |
232 | case HID_DG_CONTACTCOUNT: /* End of a multitouch group */ | ||
233 | if (!nd->reading_mt) | ||
234 | break; | ||
235 | |||
236 | nd->reading_mt = 0; | ||
237 | |||
238 | if (nd->first_contact_confidence) { | ||
239 | switch (value) { | ||
240 | case 0: /* for single touch devices */ | ||
241 | case 1: | ||
242 | input_report_key(input, | ||
243 | BTN_TOOL_DOUBLETAP, 1); | ||
244 | break; | ||
245 | case 2: | ||
246 | input_report_key(input, | ||
247 | BTN_TOOL_TRIPLETAP, 1); | ||
248 | break; | ||
249 | case 3: | ||
250 | default: | ||
251 | input_report_key(input, | ||
252 | BTN_TOOL_QUADTAP, 1); | ||
253 | } | ||
254 | input_report_key(input, BTN_TOUCH, 1); | ||
255 | } else { | ||
256 | input_report_key(input, | ||
257 | BTN_TOOL_DOUBLETAP, 0); | ||
258 | input_report_key(input, | ||
259 | BTN_TOOL_TRIPLETAP, 0); | ||
260 | input_report_key(input, | ||
261 | BTN_TOOL_QUADTAP, 0); | ||
262 | } | ||
225 | break; | 263 | break; |
226 | 264 | ||
227 | default: | 265 | default: |
@@ -231,8 +269,8 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
231 | } | 269 | } |
232 | 270 | ||
233 | /* we have handled the hidinput part, now remains hiddev */ | 271 | /* we have handled the hidinput part, now remains hiddev */ |
234 | if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) | 272 | if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_hid_event) |
235 | hid->hiddev_hid_event(hid, field, usage, value); | 273 | hid->hiddev_hid_event(hid, field, usage, value); |
236 | 274 | ||
237 | return 1; | 275 | return 1; |
238 | } | 276 | } |
@@ -241,23 +279,67 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
241 | { | 279 | { |
242 | int ret; | 280 | int ret; |
243 | struct ntrig_data *nd; | 281 | struct ntrig_data *nd; |
282 | struct hid_input *hidinput; | ||
283 | struct input_dev *input; | ||
284 | |||
285 | if (id->driver_data) | ||
286 | hdev->quirks |= HID_QUIRK_MULTI_INPUT; | ||
244 | 287 | ||
245 | nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL); | 288 | nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL); |
246 | if (!nd) { | 289 | if (!nd) { |
247 | dev_err(&hdev->dev, "cannot allocate N-Trig data\n"); | 290 | dev_err(&hdev->dev, "cannot allocate N-Trig data\n"); |
248 | return -ENOMEM; | 291 | return -ENOMEM; |
249 | } | 292 | } |
250 | nd->reading_a_point = 0; | 293 | |
251 | nd->found_contact_id = 0; | 294 | nd->reading_mt = 0; |
252 | hid_set_drvdata(hdev, nd); | 295 | hid_set_drvdata(hdev, nd); |
253 | 296 | ||
254 | ret = hid_parse(hdev); | 297 | ret = hid_parse(hdev); |
255 | if (!ret) | 298 | if (ret) { |
256 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); | 299 | dev_err(&hdev->dev, "parse failed\n"); |
300 | goto err_free; | ||
301 | } | ||
302 | |||
303 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); | ||
304 | if (ret) { | ||
305 | dev_err(&hdev->dev, "hw start failed\n"); | ||
306 | goto err_free; | ||
307 | } | ||
257 | 308 | ||
258 | if (ret) | ||
259 | kfree (nd); | ||
260 | 309 | ||
310 | list_for_each_entry(hidinput, &hdev->inputs, list) { | ||
311 | input = hidinput->input; | ||
312 | switch (hidinput->report->field[0]->application) { | ||
313 | case HID_DG_PEN: | ||
314 | input->name = "N-Trig Pen"; | ||
315 | break; | ||
316 | case HID_DG_TOUCHSCREEN: | ||
317 | __clear_bit(BTN_TOOL_PEN, input->keybit); | ||
318 | /* | ||
319 | * A little something special to enable | ||
320 | * two and three finger taps. | ||
321 | */ | ||
322 | __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); | ||
323 | __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); | ||
324 | __set_bit(BTN_TOOL_QUADTAP, input->keybit); | ||
325 | /* | ||
326 | * The physical touchscreen (single touch) | ||
327 | * input has a value for physical, whereas | ||
328 | * the multitouch only has logical input | ||
329 | * fields. | ||
330 | */ | ||
331 | input->name = | ||
332 | (hidinput->report->field[0] | ||
333 | ->physical) ? | ||
334 | "N-Trig Touchscreen" : | ||
335 | "N-Trig MultiTouch"; | ||
336 | break; | ||
337 | } | ||
338 | } | ||
339 | |||
340 | return 0; | ||
341 | err_free: | ||
342 | kfree(nd); | ||
261 | return ret; | 343 | return ret; |
262 | } | 344 | } |
263 | 345 | ||
@@ -276,7 +358,7 @@ MODULE_DEVICE_TABLE(hid, ntrig_devices); | |||
276 | 358 | ||
277 | static const struct hid_usage_id ntrig_grabbed_usages[] = { | 359 | static const struct hid_usage_id ntrig_grabbed_usages[] = { |
278 | { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, | 360 | { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, |
279 | { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} | 361 | { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1 } |
280 | }; | 362 | }; |
281 | 363 | ||
282 | static struct hid_driver ntrig_driver = { | 364 | static struct hid_driver ntrig_driver = { |
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c new file mode 100644 index 000000000000..aa9a960f73a4 --- /dev/null +++ b/drivers/hid/hid-ortek.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * HID driver for Ortek WKB-2000 (wireless keyboard + mouse trackpad). | ||
3 | * Fixes LogicalMaximum error in USB report description, see | ||
4 | * http://bugzilla.kernel.org/show_bug.cgi?id=14787 | ||
5 | * | ||
6 | * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com> | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the Free | ||
12 | * Software Foundation; either version 2 of the License, or (at your option) | ||
13 | * any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/device.h> | ||
17 | #include <linux/hid.h> | ||
18 | #include <linux/module.h> | ||
19 | |||
20 | #include "hid-ids.h" | ||
21 | |||
22 | static void ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, | ||
23 | unsigned int rsize) | ||
24 | { | ||
25 | if (rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { | ||
26 | dev_info(&hdev->dev, "Fixing up Ortek WKB-2000 " | ||
27 | "report descriptor.\n"); | ||
28 | rdesc[55] = 0x92; | ||
29 | } | ||
30 | } | ||
31 | |||
32 | static const struct hid_device_id ortek_devices[] = { | ||
33 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, | ||
34 | { } | ||
35 | }; | ||
36 | MODULE_DEVICE_TABLE(hid, ortek_devices); | ||
37 | |||
38 | static struct hid_driver ortek_driver = { | ||
39 | .name = "ortek", | ||
40 | .id_table = ortek_devices, | ||
41 | .report_fixup = ortek_report_fixup | ||
42 | }; | ||
43 | |||
44 | static int __init ortek_init(void) | ||
45 | { | ||
46 | return hid_register_driver(&ortek_driver); | ||
47 | } | ||
48 | |||
49 | static void __exit ortek_exit(void) | ||
50 | { | ||
51 | hid_unregister_driver(&ortek_driver); | ||
52 | } | ||
53 | |||
54 | module_init(ortek_init); | ||
55 | module_exit(ortek_exit); | ||
56 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/hid/hid-quanta.c b/drivers/hid/hid-quanta.c new file mode 100644 index 000000000000..01dd51c4986c --- /dev/null +++ b/drivers/hid/hid-quanta.c | |||
@@ -0,0 +1,260 @@ | |||
1 | /* | ||
2 | * HID driver for Quanta Optical Touch dual-touch panels | ||
3 | * | ||
4 | * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | #include <linux/hid.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); | ||
20 | MODULE_DESCRIPTION("Quanta dual-touch panel"); | ||
21 | MODULE_LICENSE("GPL"); | ||
22 | |||
23 | #include "hid-ids.h" | ||
24 | |||
25 | struct quanta_data { | ||
26 | __u16 x, y; | ||
27 | __u8 id; | ||
28 | bool valid; /* valid finger data, or just placeholder? */ | ||
29 | bool first; /* is this the first finger in this frame? */ | ||
30 | bool activity_now; /* at least one active finger in this frame? */ | ||
31 | bool activity; /* at least one active finger previously? */ | ||
32 | }; | ||
33 | |||
34 | static int quanta_input_mapping(struct hid_device *hdev, struct hid_input *hi, | ||
35 | struct hid_field *field, struct hid_usage *usage, | ||
36 | unsigned long **bit, int *max) | ||
37 | { | ||
38 | switch (usage->hid & HID_USAGE_PAGE) { | ||
39 | |||
40 | case HID_UP_GENDESK: | ||
41 | switch (usage->hid) { | ||
42 | case HID_GD_X: | ||
43 | hid_map_usage(hi, usage, bit, max, | ||
44 | EV_ABS, ABS_MT_POSITION_X); | ||
45 | /* touchscreen emulation */ | ||
46 | input_set_abs_params(hi->input, ABS_X, | ||
47 | field->logical_minimum, | ||
48 | field->logical_maximum, 0, 0); | ||
49 | return 1; | ||
50 | case HID_GD_Y: | ||
51 | hid_map_usage(hi, usage, bit, max, | ||
52 | EV_ABS, ABS_MT_POSITION_Y); | ||
53 | /* touchscreen emulation */ | ||
54 | input_set_abs_params(hi->input, ABS_Y, | ||
55 | field->logical_minimum, | ||
56 | field->logical_maximum, 0, 0); | ||
57 | return 1; | ||
58 | } | ||
59 | return 0; | ||
60 | |||
61 | case HID_UP_DIGITIZER: | ||
62 | switch (usage->hid) { | ||
63 | case HID_DG_CONFIDENCE: | ||
64 | case HID_DG_TIPSWITCH: | ||
65 | case HID_DG_INPUTMODE: | ||
66 | case HID_DG_DEVICEINDEX: | ||
67 | case HID_DG_CONTACTCOUNT: | ||
68 | case HID_DG_CONTACTMAX: | ||
69 | case HID_DG_TIPPRESSURE: | ||
70 | case HID_DG_WIDTH: | ||
71 | case HID_DG_HEIGHT: | ||
72 | return -1; | ||
73 | case HID_DG_INRANGE: | ||
74 | /* touchscreen emulation */ | ||
75 | hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); | ||
76 | return 1; | ||
77 | case HID_DG_CONTACTID: | ||
78 | hid_map_usage(hi, usage, bit, max, | ||
79 | EV_ABS, ABS_MT_TRACKING_ID); | ||
80 | return 1; | ||
81 | } | ||
82 | return 0; | ||
83 | |||
84 | case 0xff000000: | ||
85 | /* ignore vendor-specific features */ | ||
86 | return -1; | ||
87 | } | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static int quanta_input_mapped(struct hid_device *hdev, struct hid_input *hi, | ||
93 | struct hid_field *field, struct hid_usage *usage, | ||
94 | unsigned long **bit, int *max) | ||
95 | { | ||
96 | if (usage->type == EV_KEY || usage->type == EV_ABS) | ||
97 | clear_bit(usage->code, *bit); | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * this function is called when a whole finger has been parsed, | ||
104 | * so that it can decide what to send to the input layer. | ||
105 | */ | ||
106 | static void quanta_filter_event(struct quanta_data *td, struct input_dev *input) | ||
107 | { | ||
108 | |||
109 | td->first = !td->first; /* touchscreen emulation */ | ||
110 | |||
111 | if (!td->valid) { | ||
112 | /* | ||
113 | * touchscreen emulation: if no finger in this frame is valid | ||
114 | * and there previously was finger activity, this is a release | ||
115 | */ | ||
116 | if (!td->first && !td->activity_now && td->activity) { | ||
117 | input_event(input, EV_KEY, BTN_TOUCH, 0); | ||
118 | td->activity = false; | ||
119 | } | ||
120 | return; | ||
121 | } | ||
122 | |||
123 | input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id); | ||
124 | input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x); | ||
125 | input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y); | ||
126 | |||
127 | input_mt_sync(input); | ||
128 | td->valid = false; | ||
129 | |||
130 | /* touchscreen emulation: if first active finger in this frame... */ | ||
131 | if (!td->activity_now) { | ||
132 | /* if there was no previous activity, emit touch event */ | ||
133 | if (!td->activity) { | ||
134 | input_event(input, EV_KEY, BTN_TOUCH, 1); | ||
135 | td->activity = true; | ||
136 | } | ||
137 | td->activity_now = true; | ||
138 | /* and in any case this is our preferred finger */ | ||
139 | input_event(input, EV_ABS, ABS_X, td->x); | ||
140 | input_event(input, EV_ABS, ABS_Y, td->y); | ||
141 | } | ||
142 | } | ||
143 | |||
144 | |||
145 | static int quanta_event(struct hid_device *hid, struct hid_field *field, | ||
146 | struct hid_usage *usage, __s32 value) | ||
147 | { | ||
148 | struct quanta_data *td = hid_get_drvdata(hid); | ||
149 | |||
150 | if (hid->claimed & HID_CLAIMED_INPUT) { | ||
151 | struct input_dev *input = field->hidinput->input; | ||
152 | |||
153 | switch (usage->hid) { | ||
154 | case HID_DG_INRANGE: | ||
155 | td->valid = !!value; | ||
156 | break; | ||
157 | case HID_GD_X: | ||
158 | td->x = value; | ||
159 | break; | ||
160 | case HID_GD_Y: | ||
161 | td->y = value; | ||
162 | quanta_filter_event(td, input); | ||
163 | break; | ||
164 | case HID_DG_CONTACTID: | ||
165 | td->id = value; | ||
166 | break; | ||
167 | case HID_DG_CONTACTCOUNT: | ||
168 | /* touch emulation: this is the last field in a frame */ | ||
169 | td->first = false; | ||
170 | td->activity_now = false; | ||
171 | break; | ||
172 | case HID_DG_CONFIDENCE: | ||
173 | case HID_DG_TIPSWITCH: | ||
174 | /* avoid interference from generic hidinput handling */ | ||
175 | break; | ||
176 | |||
177 | default: | ||
178 | /* fallback to the generic hidinput handling */ | ||
179 | return 0; | ||
180 | } | ||
181 | } | ||
182 | |||
183 | /* we have handled the hidinput part, now remains hiddev */ | ||
184 | if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) | ||
185 | hid->hiddev_hid_event(hid, field, usage, value); | ||
186 | |||
187 | return 1; | ||
188 | } | ||
189 | |||
190 | static int quanta_probe(struct hid_device *hdev, const struct hid_device_id *id) | ||
191 | { | ||
192 | int ret; | ||
193 | struct quanta_data *td; | ||
194 | |||
195 | td = kmalloc(sizeof(struct quanta_data), GFP_KERNEL); | ||
196 | if (!td) { | ||
197 | dev_err(&hdev->dev, "cannot allocate Quanta Touch data\n"); | ||
198 | return -ENOMEM; | ||
199 | } | ||
200 | td->valid = false; | ||
201 | td->activity = false; | ||
202 | td->activity_now = false; | ||
203 | td->first = false; | ||
204 | hid_set_drvdata(hdev, td); | ||
205 | |||
206 | ret = hid_parse(hdev); | ||
207 | if (!ret) | ||
208 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); | ||
209 | |||
210 | if (ret) | ||
211 | kfree(td); | ||
212 | |||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | static void quanta_remove(struct hid_device *hdev) | ||
217 | { | ||
218 | hid_hw_stop(hdev); | ||
219 | kfree(hid_get_drvdata(hdev)); | ||
220 | hid_set_drvdata(hdev, NULL); | ||
221 | } | ||
222 | |||
223 | static const struct hid_device_id quanta_devices[] = { | ||
224 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, | ||
225 | USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, | ||
226 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, | ||
227 | USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) }, | ||
228 | { } | ||
229 | }; | ||
230 | MODULE_DEVICE_TABLE(hid, quanta_devices); | ||
231 | |||
232 | static const struct hid_usage_id quanta_grabbed_usages[] = { | ||
233 | { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, | ||
234 | { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} | ||
235 | }; | ||
236 | |||
237 | static struct hid_driver quanta_driver = { | ||
238 | .name = "quanta-touch", | ||
239 | .id_table = quanta_devices, | ||
240 | .probe = quanta_probe, | ||
241 | .remove = quanta_remove, | ||
242 | .input_mapping = quanta_input_mapping, | ||
243 | .input_mapped = quanta_input_mapped, | ||
244 | .usage_table = quanta_grabbed_usages, | ||
245 | .event = quanta_event, | ||
246 | }; | ||
247 | |||
248 | static int __init quanta_init(void) | ||
249 | { | ||
250 | return hid_register_driver(&quanta_driver); | ||
251 | } | ||
252 | |||
253 | static void __exit quanta_exit(void) | ||
254 | { | ||
255 | hid_unregister_driver(&quanta_driver); | ||
256 | } | ||
257 | |||
258 | module_init(quanta_init); | ||
259 | module_exit(quanta_exit); | ||
260 | |||
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 4e8450228a24..9bf00d77d92b 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c | |||
@@ -48,7 +48,7 @@ static void sony_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
48 | * to "operational". Without this, the ps3 controller will not report any | 48 | * to "operational". Without this, the ps3 controller will not report any |
49 | * events. | 49 | * events. |
50 | */ | 50 | */ |
51 | static int sony_set_operational(struct hid_device *hdev) | 51 | static int sony_set_operational_usb(struct hid_device *hdev) |
52 | { | 52 | { |
53 | struct usb_interface *intf = to_usb_interface(hdev->dev.parent); | 53 | struct usb_interface *intf = to_usb_interface(hdev->dev.parent); |
54 | struct usb_device *dev = interface_to_usbdev(intf); | 54 | struct usb_device *dev = interface_to_usbdev(intf); |
@@ -73,6 +73,12 @@ static int sony_set_operational(struct hid_device *hdev) | |||
73 | return ret; | 73 | return ret; |
74 | } | 74 | } |
75 | 75 | ||
76 | static int sony_set_operational_bt(struct hid_device *hdev) | ||
77 | { | ||
78 | unsigned char buf[] = { 0x53, 0xf4, 0x42, 0x03, 0x00, 0x00 }; | ||
79 | return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT); | ||
80 | } | ||
81 | |||
76 | static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | 82 | static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) |
77 | { | 83 | { |
78 | int ret; | 84 | int ret; |
@@ -81,7 +87,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
81 | 87 | ||
82 | sc = kzalloc(sizeof(*sc), GFP_KERNEL); | 88 | sc = kzalloc(sizeof(*sc), GFP_KERNEL); |
83 | if (sc == NULL) { | 89 | if (sc == NULL) { |
84 | dev_err(&hdev->dev, "can't alloc apple descriptor\n"); | 90 | dev_err(&hdev->dev, "can't alloc sony descriptor\n"); |
85 | return -ENOMEM; | 91 | return -ENOMEM; |
86 | } | 92 | } |
87 | 93 | ||
@@ -101,7 +107,17 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
101 | goto err_free; | 107 | goto err_free; |
102 | } | 108 | } |
103 | 109 | ||
104 | ret = sony_set_operational(hdev); | 110 | switch (hdev->bus) { |
111 | case BUS_USB: | ||
112 | ret = sony_set_operational_usb(hdev); | ||
113 | break; | ||
114 | case BUS_BLUETOOTH: | ||
115 | ret = sony_set_operational_bt(hdev); | ||
116 | break; | ||
117 | default: | ||
118 | ret = 0; | ||
119 | } | ||
120 | |||
105 | if (ret < 0) | 121 | if (ret < 0) |
106 | goto err_stop; | 122 | goto err_stop; |
107 | 123 | ||
@@ -121,6 +137,7 @@ static void sony_remove(struct hid_device *hdev) | |||
121 | 137 | ||
122 | static const struct hid_device_id sony_devices[] = { | 138 | static const struct hid_device_id sony_devices[] = { |
123 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, | 139 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, |
140 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, | ||
124 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE), | 141 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE), |
125 | .driver_data = VAIO_RDESC_CONSTANT }, | 142 | .driver_data = VAIO_RDESC_CONSTANT }, |
126 | { } | 143 | { } |
diff --git a/drivers/hid/hid-stantum.c b/drivers/hid/hid-stantum.c new file mode 100644 index 000000000000..2e592a06654e --- /dev/null +++ b/drivers/hid/hid-stantum.c | |||
@@ -0,0 +1,283 @@ | |||
1 | /* | ||
2 | * HID driver for Stantum multitouch panels | ||
3 | * | ||
4 | * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | #include <linux/hid.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); | ||
20 | MODULE_DESCRIPTION("Stantum HID multitouch panels"); | ||
21 | MODULE_LICENSE("GPL"); | ||
22 | |||
23 | #include "hid-ids.h" | ||
24 | |||
25 | struct stantum_data { | ||
26 | __s32 x, y, z, w, h; /* x, y, pressure, width, height */ | ||
27 | __u16 id; /* touch id */ | ||
28 | bool valid; /* valid finger data, or just placeholder? */ | ||
29 | bool first; /* first finger in the HID packet? */ | ||
30 | bool activity; /* at least one active finger so far? */ | ||
31 | }; | ||
32 | |||
33 | static int stantum_input_mapping(struct hid_device *hdev, struct hid_input *hi, | ||
34 | struct hid_field *field, struct hid_usage *usage, | ||
35 | unsigned long **bit, int *max) | ||
36 | { | ||
37 | switch (usage->hid & HID_USAGE_PAGE) { | ||
38 | |||
39 | case HID_UP_GENDESK: | ||
40 | switch (usage->hid) { | ||
41 | case HID_GD_X: | ||
42 | hid_map_usage(hi, usage, bit, max, | ||
43 | EV_ABS, ABS_MT_POSITION_X); | ||
44 | /* touchscreen emulation */ | ||
45 | input_set_abs_params(hi->input, ABS_X, | ||
46 | field->logical_minimum, | ||
47 | field->logical_maximum, 0, 0); | ||
48 | return 1; | ||
49 | case HID_GD_Y: | ||
50 | hid_map_usage(hi, usage, bit, max, | ||
51 | EV_ABS, ABS_MT_POSITION_Y); | ||
52 | /* touchscreen emulation */ | ||
53 | input_set_abs_params(hi->input, ABS_Y, | ||
54 | field->logical_minimum, | ||
55 | field->logical_maximum, 0, 0); | ||
56 | return 1; | ||
57 | } | ||
58 | return 0; | ||
59 | |||
60 | case HID_UP_DIGITIZER: | ||
61 | switch (usage->hid) { | ||
62 | case HID_DG_INRANGE: | ||
63 | case HID_DG_CONFIDENCE: | ||
64 | case HID_DG_INPUTMODE: | ||
65 | case HID_DG_DEVICEINDEX: | ||
66 | case HID_DG_CONTACTCOUNT: | ||
67 | case HID_DG_CONTACTMAX: | ||
68 | return -1; | ||
69 | |||
70 | case HID_DG_TIPSWITCH: | ||
71 | /* touchscreen emulation */ | ||
72 | hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); | ||
73 | return 1; | ||
74 | |||
75 | case HID_DG_WIDTH: | ||
76 | hid_map_usage(hi, usage, bit, max, | ||
77 | EV_ABS, ABS_MT_TOUCH_MAJOR); | ||
78 | return 1; | ||
79 | case HID_DG_HEIGHT: | ||
80 | hid_map_usage(hi, usage, bit, max, | ||
81 | EV_ABS, ABS_MT_TOUCH_MINOR); | ||
82 | input_set_abs_params(hi->input, ABS_MT_ORIENTATION, | ||
83 | 1, 1, 0, 0); | ||
84 | return 1; | ||
85 | case HID_DG_TIPPRESSURE: | ||
86 | hid_map_usage(hi, usage, bit, max, | ||
87 | EV_ABS, ABS_MT_PRESSURE); | ||
88 | return 1; | ||
89 | |||
90 | case HID_DG_CONTACTID: | ||
91 | hid_map_usage(hi, usage, bit, max, | ||
92 | EV_ABS, ABS_MT_TRACKING_ID); | ||
93 | return 1; | ||
94 | |||
95 | } | ||
96 | return 0; | ||
97 | |||
98 | case 0xff000000: | ||
99 | /* no input-oriented meaning */ | ||
100 | return -1; | ||
101 | } | ||
102 | |||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | static int stantum_input_mapped(struct hid_device *hdev, struct hid_input *hi, | ||
107 | struct hid_field *field, struct hid_usage *usage, | ||
108 | unsigned long **bit, int *max) | ||
109 | { | ||
110 | if (usage->type == EV_KEY || usage->type == EV_ABS) | ||
111 | clear_bit(usage->code, *bit); | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * this function is called when a whole finger has been parsed, | ||
118 | * so that it can decide what to send to the input layer. | ||
119 | */ | ||
120 | static void stantum_filter_event(struct stantum_data *sd, | ||
121 | struct input_dev *input) | ||
122 | { | ||
123 | bool wide; | ||
124 | |||
125 | if (!sd->valid) { | ||
126 | /* | ||
127 | * touchscreen emulation: if the first finger is not valid and | ||
128 | * there previously was finger activity, this is a release | ||
129 | */ | ||
130 | if (sd->first && sd->activity) { | ||
131 | input_event(input, EV_KEY, BTN_TOUCH, 0); | ||
132 | sd->activity = false; | ||
133 | } | ||
134 | return; | ||
135 | } | ||
136 | |||
137 | input_event(input, EV_ABS, ABS_MT_TRACKING_ID, sd->id); | ||
138 | input_event(input, EV_ABS, ABS_MT_POSITION_X, sd->x); | ||
139 | input_event(input, EV_ABS, ABS_MT_POSITION_Y, sd->y); | ||
140 | |||
141 | wide = (sd->w > sd->h); | ||
142 | input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide); | ||
143 | input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, wide ? sd->w : sd->h); | ||
144 | input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, wide ? sd->h : sd->w); | ||
145 | |||
146 | input_event(input, EV_ABS, ABS_MT_PRESSURE, sd->z); | ||
147 | |||
148 | input_mt_sync(input); | ||
149 | sd->valid = false; | ||
150 | |||
151 | /* touchscreen emulation */ | ||
152 | if (sd->first) { | ||
153 | if (!sd->activity) { | ||
154 | input_event(input, EV_KEY, BTN_TOUCH, 1); | ||
155 | sd->activity = true; | ||
156 | } | ||
157 | input_event(input, EV_ABS, ABS_X, sd->x); | ||
158 | input_event(input, EV_ABS, ABS_Y, sd->y); | ||
159 | } | ||
160 | sd->first = false; | ||
161 | } | ||
162 | |||
163 | |||
164 | static int stantum_event(struct hid_device *hid, struct hid_field *field, | ||
165 | struct hid_usage *usage, __s32 value) | ||
166 | { | ||
167 | struct stantum_data *sd = hid_get_drvdata(hid); | ||
168 | |||
169 | if (hid->claimed & HID_CLAIMED_INPUT) { | ||
170 | struct input_dev *input = field->hidinput->input; | ||
171 | |||
172 | switch (usage->hid) { | ||
173 | case HID_DG_INRANGE: | ||
174 | /* this is the last field in a finger */ | ||
175 | stantum_filter_event(sd, input); | ||
176 | break; | ||
177 | case HID_DG_WIDTH: | ||
178 | sd->w = value; | ||
179 | break; | ||
180 | case HID_DG_HEIGHT: | ||
181 | sd->h = value; | ||
182 | break; | ||
183 | case HID_GD_X: | ||
184 | sd->x = value; | ||
185 | break; | ||
186 | case HID_GD_Y: | ||
187 | sd->y = value; | ||
188 | break; | ||
189 | case HID_DG_TIPPRESSURE: | ||
190 | sd->z = value; | ||
191 | break; | ||
192 | case HID_DG_CONTACTID: | ||
193 | sd->id = value; | ||
194 | break; | ||
195 | case HID_DG_CONFIDENCE: | ||
196 | sd->valid = !!value; | ||
197 | break; | ||
198 | case 0xff000002: | ||
199 | /* this comes only before the first finger */ | ||
200 | sd->first = true; | ||
201 | break; | ||
202 | |||
203 | default: | ||
204 | /* ignore the others */ | ||
205 | return 1; | ||
206 | } | ||
207 | } | ||
208 | |||
209 | /* we have handled the hidinput part, now remains hiddev */ | ||
210 | if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) | ||
211 | hid->hiddev_hid_event(hid, field, usage, value); | ||
212 | |||
213 | return 1; | ||
214 | } | ||
215 | |||
216 | static int stantum_probe(struct hid_device *hdev, | ||
217 | const struct hid_device_id *id) | ||
218 | { | ||
219 | int ret; | ||
220 | struct stantum_data *sd; | ||
221 | |||
222 | sd = kmalloc(sizeof(struct stantum_data), GFP_KERNEL); | ||
223 | if (!sd) { | ||
224 | dev_err(&hdev->dev, "cannot allocate Stantum data\n"); | ||
225 | return -ENOMEM; | ||
226 | } | ||
227 | sd->valid = false; | ||
228 | sd->first = false; | ||
229 | sd->activity = false; | ||
230 | hid_set_drvdata(hdev, sd); | ||
231 | |||
232 | ret = hid_parse(hdev); | ||
233 | if (!ret) | ||
234 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); | ||
235 | |||
236 | if (ret) | ||
237 | kfree(sd); | ||
238 | |||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | static void stantum_remove(struct hid_device *hdev) | ||
243 | { | ||
244 | hid_hw_stop(hdev); | ||
245 | kfree(hid_get_drvdata(hdev)); | ||
246 | hid_set_drvdata(hdev, NULL); | ||
247 | } | ||
248 | |||
249 | static const struct hid_device_id stantum_devices[] = { | ||
250 | { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) }, | ||
251 | { } | ||
252 | }; | ||
253 | MODULE_DEVICE_TABLE(hid, stantum_devices); | ||
254 | |||
255 | static const struct hid_usage_id stantum_grabbed_usages[] = { | ||
256 | { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, | ||
257 | { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} | ||
258 | }; | ||
259 | |||
260 | static struct hid_driver stantum_driver = { | ||
261 | .name = "stantum", | ||
262 | .id_table = stantum_devices, | ||
263 | .probe = stantum_probe, | ||
264 | .remove = stantum_remove, | ||
265 | .input_mapping = stantum_input_mapping, | ||
266 | .input_mapped = stantum_input_mapped, | ||
267 | .usage_table = stantum_grabbed_usages, | ||
268 | .event = stantum_event, | ||
269 | }; | ||
270 | |||
271 | static int __init stantum_init(void) | ||
272 | { | ||
273 | return hid_register_driver(&stantum_driver); | ||
274 | } | ||
275 | |||
276 | static void __exit stantum_exit(void) | ||
277 | { | ||
278 | hid_unregister_driver(&stantum_driver); | ||
279 | } | ||
280 | |||
281 | module_init(stantum_init); | ||
282 | module_exit(stantum_exit); | ||
283 | |||
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c index 12dcda529201..8d3b46f5d149 100644 --- a/drivers/hid/hid-wacom.c +++ b/drivers/hid/hid-wacom.c | |||
@@ -156,7 +156,9 @@ static int wacom_probe(struct hid_device *hdev, | |||
156 | struct hid_input *hidinput; | 156 | struct hid_input *hidinput; |
157 | struct input_dev *input; | 157 | struct input_dev *input; |
158 | struct wacom_data *wdata; | 158 | struct wacom_data *wdata; |
159 | char rep_data[2]; | ||
159 | int ret; | 160 | int ret; |
161 | int limit; | ||
160 | 162 | ||
161 | wdata = kzalloc(sizeof(*wdata), GFP_KERNEL); | 163 | wdata = kzalloc(sizeof(*wdata), GFP_KERNEL); |
162 | if (wdata == NULL) { | 164 | if (wdata == NULL) { |
@@ -166,6 +168,7 @@ static int wacom_probe(struct hid_device *hdev, | |||
166 | 168 | ||
167 | hid_set_drvdata(hdev, wdata); | 169 | hid_set_drvdata(hdev, wdata); |
168 | 170 | ||
171 | /* Parse the HID report now */ | ||
169 | ret = hid_parse(hdev); | 172 | ret = hid_parse(hdev); |
170 | if (ret) { | 173 | if (ret) { |
171 | dev_err(&hdev->dev, "parse failed\n"); | 174 | dev_err(&hdev->dev, "parse failed\n"); |
@@ -178,6 +181,31 @@ static int wacom_probe(struct hid_device *hdev, | |||
178 | goto err_free; | 181 | goto err_free; |
179 | } | 182 | } |
180 | 183 | ||
184 | /* | ||
185 | * Note that if the raw queries fail, it's not a hard failure and it | ||
186 | * is safe to continue | ||
187 | */ | ||
188 | |||
189 | /* Set Wacom mode2 */ | ||
190 | rep_data[0] = 0x03; rep_data[1] = 0x00; | ||
191 | limit = 3; | ||
192 | do { | ||
193 | ret = hdev->hid_output_raw_report(hdev, rep_data, 2, | ||
194 | HID_FEATURE_REPORT); | ||
195 | } while (ret < 0 && limit-- > 0); | ||
196 | if (ret < 0) | ||
197 | dev_warn(&hdev->dev, "failed to poke device #1, %d\n", ret); | ||
198 | |||
199 | /* 0x06 - high reporting speed, 0x05 - low speed */ | ||
200 | rep_data[0] = 0x06; rep_data[1] = 0x00; | ||
201 | limit = 3; | ||
202 | do { | ||
203 | ret = hdev->hid_output_raw_report(hdev, rep_data, 2, | ||
204 | HID_FEATURE_REPORT); | ||
205 | } while (ret < 0 && limit-- > 0); | ||
206 | if (ret < 0) | ||
207 | dev_warn(&hdev->dev, "failed to poke device #2, %d\n", ret); | ||
208 | |||
181 | hidinput = list_entry(hdev->inputs.next, struct hid_input, list); | 209 | hidinput = list_entry(hdev->inputs.next, struct hid_input, list); |
182 | input = hidinput->input; | 210 | input = hidinput->input; |
183 | 211 | ||
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index cdd136942bca..d04476700b7b 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
@@ -134,7 +134,7 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t | |||
134 | goto out; | 134 | goto out; |
135 | } | 135 | } |
136 | 136 | ||
137 | ret = dev->hid_output_raw_report(dev, buf, count); | 137 | ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT); |
138 | out: | 138 | out: |
139 | kfree(buf); | 139 | kfree(buf); |
140 | return ret; | 140 | return ret; |
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index e2997a8d5e1b..56d06cd8075b 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> | 5 | * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> |
6 | * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc | 6 | * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc |
7 | * Copyright (c) 2007-2008 Oliver Neukum | 7 | * Copyright (c) 2007-2008 Oliver Neukum |
8 | * Copyright (c) 2006-2009 Jiri Kosina | 8 | * Copyright (c) 2006-2010 Jiri Kosina |
9 | */ | 9 | */ |
10 | 10 | ||
11 | /* | 11 | /* |
@@ -316,6 +316,7 @@ static int hid_submit_out(struct hid_device *hid) | |||
316 | err_hid("usb_submit_urb(out) failed"); | 316 | err_hid("usb_submit_urb(out) failed"); |
317 | return -1; | 317 | return -1; |
318 | } | 318 | } |
319 | usbhid->last_out = jiffies; | ||
319 | } else { | 320 | } else { |
320 | /* | 321 | /* |
321 | * queue work to wake up the device. | 322 | * queue work to wake up the device. |
@@ -377,6 +378,7 @@ static int hid_submit_ctrl(struct hid_device *hid) | |||
377 | err_hid("usb_submit_urb(ctrl) failed"); | 378 | err_hid("usb_submit_urb(ctrl) failed"); |
378 | return -1; | 379 | return -1; |
379 | } | 380 | } |
381 | usbhid->last_ctrl = jiffies; | ||
380 | } else { | 382 | } else { |
381 | /* | 383 | /* |
382 | * queue work to wake up the device. | 384 | * queue work to wake up the device. |
@@ -512,9 +514,20 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re | |||
512 | usbhid->out[usbhid->outhead].report = report; | 514 | usbhid->out[usbhid->outhead].report = report; |
513 | usbhid->outhead = head; | 515 | usbhid->outhead = head; |
514 | 516 | ||
515 | if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) | 517 | if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) { |
516 | if (hid_submit_out(hid)) | 518 | if (hid_submit_out(hid)) |
517 | clear_bit(HID_OUT_RUNNING, &usbhid->iofl); | 519 | clear_bit(HID_OUT_RUNNING, &usbhid->iofl); |
520 | } else { | ||
521 | /* | ||
522 | * the queue is known to run | ||
523 | * but an earlier request may be stuck | ||
524 | * we may need to time out | ||
525 | * no race because this is called under | ||
526 | * spinlock | ||
527 | */ | ||
528 | if (time_after(jiffies, usbhid->last_out + HZ * 5)) | ||
529 | usb_unlink_urb(usbhid->urbout); | ||
530 | } | ||
518 | return; | 531 | return; |
519 | } | 532 | } |
520 | 533 | ||
@@ -535,9 +548,20 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re | |||
535 | usbhid->ctrl[usbhid->ctrlhead].dir = dir; | 548 | usbhid->ctrl[usbhid->ctrlhead].dir = dir; |
536 | usbhid->ctrlhead = head; | 549 | usbhid->ctrlhead = head; |
537 | 550 | ||
538 | if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) | 551 | if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) { |
539 | if (hid_submit_ctrl(hid)) | 552 | if (hid_submit_ctrl(hid)) |
540 | clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); | 553 | clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); |
554 | } else { | ||
555 | /* | ||
556 | * the queue is known to run | ||
557 | * but an earlier request may be stuck | ||
558 | * we may need to time out | ||
559 | * no race because this is called under | ||
560 | * spinlock | ||
561 | */ | ||
562 | if (time_after(jiffies, usbhid->last_ctrl + HZ * 5)) | ||
563 | usb_unlink_urb(usbhid->urbctrl); | ||
564 | } | ||
541 | } | 565 | } |
542 | 566 | ||
543 | void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) | 567 | void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) |
@@ -774,7 +798,8 @@ static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid) | |||
774 | return 0; | 798 | return 0; |
775 | } | 799 | } |
776 | 800 | ||
777 | static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count) | 801 | static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count, |
802 | unsigned char report_type) | ||
778 | { | 803 | { |
779 | struct usbhid_device *usbhid = hid->driver_data; | 804 | struct usbhid_device *usbhid = hid->driver_data; |
780 | struct usb_device *dev = hid_to_usb_dev(hid); | 805 | struct usb_device *dev = hid_to_usb_dev(hid); |
@@ -785,7 +810,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co | |||
785 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), | 810 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), |
786 | HID_REQ_SET_REPORT, | 811 | HID_REQ_SET_REPORT, |
787 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | 812 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, |
788 | ((HID_OUTPUT_REPORT + 1) << 8) | *buf, | 813 | ((report_type + 1) << 8) | *buf, |
789 | interface->desc.bInterfaceNumber, buf + 1, count - 1, | 814 | interface->desc.bInterfaceNumber, buf + 1, count - 1, |
790 | USB_CTRL_SET_TIMEOUT); | 815 | USB_CTRL_SET_TIMEOUT); |
791 | 816 | ||
@@ -981,9 +1006,6 @@ static int usbhid_start(struct hid_device *hid) | |||
981 | 1006 | ||
982 | spin_lock_init(&usbhid->lock); | 1007 | spin_lock_init(&usbhid->lock); |
983 | 1008 | ||
984 | usbhid->intf = intf; | ||
985 | usbhid->ifnum = interface->desc.bInterfaceNumber; | ||
986 | |||
987 | usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); | 1009 | usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); |
988 | if (!usbhid->urbctrl) { | 1010 | if (!usbhid->urbctrl) { |
989 | ret = -ENOMEM; | 1011 | ret = -ENOMEM; |
@@ -1154,6 +1176,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * | |||
1154 | 1176 | ||
1155 | hid->driver_data = usbhid; | 1177 | hid->driver_data = usbhid; |
1156 | usbhid->hid = hid; | 1178 | usbhid->hid = hid; |
1179 | usbhid->intf = intf; | ||
1180 | usbhid->ifnum = interface->desc.bInterfaceNumber; | ||
1157 | 1181 | ||
1158 | ret = hid_add_device(hid); | 1182 | ret = hid_add_device(hid); |
1159 | if (ret) { | 1183 | if (ret) { |
@@ -1342,7 +1366,7 @@ static int hid_reset_resume(struct usb_interface *intf) | |||
1342 | 1366 | ||
1343 | #endif /* CONFIG_PM */ | 1367 | #endif /* CONFIG_PM */ |
1344 | 1368 | ||
1345 | static struct usb_device_id hid_usb_ids [] = { | 1369 | static const struct usb_device_id hid_usb_ids[] = { |
1346 | { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, | 1370 | { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, |
1347 | .bInterfaceClass = USB_INTERFACE_CLASS_HID }, | 1371 | .bInterfaceClass = USB_INTERFACE_CLASS_HID }, |
1348 | { } /* Terminating entry */ | 1372 | { } /* Terminating entry */ |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 38773dc2821b..7844280897d1 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -43,8 +43,10 @@ static const struct hid_blacklist { | |||
43 | 43 | ||
44 | { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, | 44 | { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, |
45 | 45 | ||
46 | { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, | ||
46 | { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, | 47 | { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, |
47 | { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, | 48 | { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, |
49 | { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, | ||
48 | 50 | ||
49 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, | 51 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, |
50 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, | 52 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, |
@@ -57,6 +59,7 @@ static const struct hid_blacklist { | |||
57 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, | 59 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, |
58 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, | 60 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, |
59 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, | 61 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, |
62 | { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, | ||
60 | { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, | 63 | { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, |
61 | { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, | 64 | { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, |
62 | { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, | 65 | { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, |
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h index 08f505ca2e3d..ec20400c7f29 100644 --- a/drivers/hid/usbhid/usbhid.h +++ b/drivers/hid/usbhid/usbhid.h | |||
@@ -80,12 +80,14 @@ struct usbhid_device { | |||
80 | unsigned char ctrlhead, ctrltail; /* Control fifo head & tail */ | 80 | unsigned char ctrlhead, ctrltail; /* Control fifo head & tail */ |
81 | char *ctrlbuf; /* Control buffer */ | 81 | char *ctrlbuf; /* Control buffer */ |
82 | dma_addr_t ctrlbuf_dma; /* Control buffer dma */ | 82 | dma_addr_t ctrlbuf_dma; /* Control buffer dma */ |
83 | unsigned long last_ctrl; /* record of last output for timeouts */ | ||
83 | 84 | ||
84 | struct urb *urbout; /* Output URB */ | 85 | struct urb *urbout; /* Output URB */ |
85 | struct hid_output_fifo out[HID_CONTROL_FIFO_SIZE]; /* Output pipe fifo */ | 86 | struct hid_output_fifo out[HID_CONTROL_FIFO_SIZE]; /* Output pipe fifo */ |
86 | unsigned char outhead, outtail; /* Output pipe fifo head & tail */ | 87 | unsigned char outhead, outtail; /* Output pipe fifo head & tail */ |
87 | char *outbuf; /* Output buffer */ | 88 | char *outbuf; /* Output buffer */ |
88 | dma_addr_t outbuf_dma; /* Output buffer dma */ | 89 | dma_addr_t outbuf_dma; /* Output buffer dma */ |
90 | unsigned long last_out; /* record of last output for timeouts */ | ||
89 | 91 | ||
90 | spinlock_t lock; /* fifo spinlock */ | 92 | spinlock_t lock; /* fifo spinlock */ |
91 | unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ | 93 | unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ |
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig index 3464ebc4cdbc..452fde9edf86 100644 --- a/drivers/isdn/hisax/Kconfig +++ b/drivers/isdn/hisax/Kconfig | |||
@@ -109,7 +109,7 @@ config HISAX_16_3 | |||
109 | 109 | ||
110 | config HISAX_TELESPCI | 110 | config HISAX_TELESPCI |
111 | bool "Teles PCI" | 111 | bool "Teles PCI" |
112 | depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) | 112 | depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) |
113 | help | 113 | help |
114 | This enables HiSax support for the Teles PCI. | 114 | This enables HiSax support for the Teles PCI. |
115 | See <file:Documentation/isdn/README.HiSax> on how to configure it. | 115 | See <file:Documentation/isdn/README.HiSax> on how to configure it. |
@@ -237,7 +237,7 @@ config HISAX_MIC | |||
237 | 237 | ||
238 | config HISAX_NETJET | 238 | config HISAX_NETJET |
239 | bool "NETjet card" | 239 | bool "NETjet card" |
240 | depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) | 240 | depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) |
241 | help | 241 | help |
242 | This enables HiSax support for the NetJet from Traverse | 242 | This enables HiSax support for the NetJet from Traverse |
243 | Technologies. | 243 | Technologies. |
@@ -248,7 +248,7 @@ config HISAX_NETJET | |||
248 | 248 | ||
249 | config HISAX_NETJET_U | 249 | config HISAX_NETJET_U |
250 | bool "NETspider U card" | 250 | bool "NETspider U card" |
251 | depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) | 251 | depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) |
252 | help | 252 | help |
253 | This enables HiSax support for the Netspider U interface ISDN card | 253 | This enables HiSax support for the Netspider U interface ISDN card |
254 | from Traverse Technologies. | 254 | from Traverse Technologies. |
@@ -287,7 +287,7 @@ config HISAX_HSTSAPHIR | |||
287 | 287 | ||
288 | config HISAX_BKM_A4T | 288 | config HISAX_BKM_A4T |
289 | bool "Telekom A4T card" | 289 | bool "Telekom A4T card" |
290 | depends on PCI && PCI_LEGACY | 290 | depends on PCI |
291 | help | 291 | help |
292 | This enables HiSax support for the Telekom A4T card. | 292 | This enables HiSax support for the Telekom A4T card. |
293 | 293 | ||
@@ -297,7 +297,7 @@ config HISAX_BKM_A4T | |||
297 | 297 | ||
298 | config HISAX_SCT_QUADRO | 298 | config HISAX_SCT_QUADRO |
299 | bool "Scitel Quadro card" | 299 | bool "Scitel Quadro card" |
300 | depends on PCI && PCI_LEGACY | 300 | depends on PCI |
301 | help | 301 | help |
302 | This enables HiSax support for the Scitel Quadro card. | 302 | This enables HiSax support for the Scitel Quadro card. |
303 | 303 | ||
@@ -316,7 +316,7 @@ config HISAX_GAZEL | |||
316 | 316 | ||
317 | config HISAX_HFC_PCI | 317 | config HISAX_HFC_PCI |
318 | bool "HFC PCI-Bus cards" | 318 | bool "HFC PCI-Bus cards" |
319 | depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) | 319 | depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) |
320 | help | 320 | help |
321 | This enables HiSax support for the HFC-S PCI 2BDS0 based cards. | 321 | This enables HiSax support for the HFC-S PCI 2BDS0 based cards. |
322 | 322 | ||
@@ -325,7 +325,7 @@ config HISAX_HFC_PCI | |||
325 | 325 | ||
326 | config HISAX_W6692 | 326 | config HISAX_W6692 |
327 | bool "Winbond W6692 based cards" | 327 | bool "Winbond W6692 based cards" |
328 | depends on PCI && PCI_LEGACY | 328 | depends on PCI |
329 | help | 329 | help |
330 | This enables HiSax support for Winbond W6692 based PCI ISDN cards. | 330 | This enables HiSax support for Winbond W6692 based PCI ISDN cards. |
331 | 331 | ||
@@ -341,7 +341,7 @@ config HISAX_HFC_SX | |||
341 | 341 | ||
342 | config HISAX_ENTERNOW_PCI | 342 | config HISAX_ENTERNOW_PCI |
343 | bool "Formula-n enter:now PCI card" | 343 | bool "Formula-n enter:now PCI card" |
344 | depends on HISAX_NETJET && PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) | 344 | depends on HISAX_NETJET && PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) |
345 | help | 345 | help |
346 | This enables HiSax support for the Formula-n enter:now PCI | 346 | This enables HiSax support for the Formula-n enter:now PCI |
347 | ISDN card. | 347 | ISDN card. |
@@ -412,7 +412,7 @@ config HISAX_HFC4S8S | |||
412 | 412 | ||
413 | config HISAX_FRITZ_PCIPNP | 413 | config HISAX_FRITZ_PCIPNP |
414 | tristate "AVM Fritz!Card PCI/PCIv2/PnP support (EXPERIMENTAL)" | 414 | tristate "AVM Fritz!Card PCI/PCIv2/PnP support (EXPERIMENTAL)" |
415 | depends on PCI && PCI_LEGACY && EXPERIMENTAL | 415 | depends on PCI && EXPERIMENTAL |
416 | help | 416 | help |
417 | This enables the driver for the AVM Fritz!Card PCI, | 417 | This enables the driver for the AVM Fritz!Card PCI, |
418 | Fritz!Card PCI v2 and Fritz!Card PnP. | 418 | Fritz!Card PCI v2 and Fritz!Card PnP. |
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c index 7cabc5a19492..14295a155e71 100644 --- a/drivers/isdn/hisax/avm_pci.c +++ b/drivers/isdn/hisax/avm_pci.c | |||
@@ -822,7 +822,7 @@ static int __devinit avm_pnp_setup(struct IsdnCardState *cs) | |||
822 | 822 | ||
823 | #endif /* __ISAPNP__ */ | 823 | #endif /* __ISAPNP__ */ |
824 | 824 | ||
825 | #ifndef CONFIG_PCI_LEGACY | 825 | #ifndef CONFIG_PCI |
826 | 826 | ||
827 | static int __devinit avm_pci_setup(struct IsdnCardState *cs) | 827 | static int __devinit avm_pci_setup(struct IsdnCardState *cs) |
828 | { | 828 | { |
@@ -835,7 +835,7 @@ static struct pci_dev *dev_avm __devinitdata = NULL; | |||
835 | 835 | ||
836 | static int __devinit avm_pci_setup(struct IsdnCardState *cs) | 836 | static int __devinit avm_pci_setup(struct IsdnCardState *cs) |
837 | { | 837 | { |
838 | if ((dev_avm = pci_find_device(PCI_VENDOR_ID_AVM, | 838 | if ((dev_avm = hisax_find_pci_device(PCI_VENDOR_ID_AVM, |
839 | PCI_DEVICE_ID_AVM_A1, dev_avm))) { | 839 | PCI_DEVICE_ID_AVM_A1, dev_avm))) { |
840 | 840 | ||
841 | if (pci_enable_device(dev_avm)) | 841 | if (pci_enable_device(dev_avm)) |
@@ -864,7 +864,7 @@ static int __devinit avm_pci_setup(struct IsdnCardState *cs) | |||
864 | return (1); | 864 | return (1); |
865 | } | 865 | } |
866 | 866 | ||
867 | #endif /* CONFIG_PCI_LEGACY */ | 867 | #endif /* CONFIG_PCI */ |
868 | 868 | ||
869 | int __devinit | 869 | int __devinit |
870 | setup_avm_pcipnp(struct IsdnCard *card) | 870 | setup_avm_pcipnp(struct IsdnCard *card) |
diff --git a/drivers/isdn/hisax/bkm_a4t.c b/drivers/isdn/hisax/bkm_a4t.c index 9ca2ee54cc94..9f2009c0b69c 100644 --- a/drivers/isdn/hisax/bkm_a4t.c +++ b/drivers/isdn/hisax/bkm_a4t.c | |||
@@ -340,7 +340,7 @@ setup_bkm_a4t(struct IsdnCard *card) | |||
340 | } else | 340 | } else |
341 | return (0); | 341 | return (0); |
342 | 342 | ||
343 | while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN, | 343 | while ((dev_a4t = hisax_find_pci_device(PCI_VENDOR_ID_ZORAN, |
344 | PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) { | 344 | PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) { |
345 | ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr); | 345 | ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr); |
346 | if (!ret) | 346 | if (!ret) |
diff --git a/drivers/isdn/hisax/bkm_a8.c b/drivers/isdn/hisax/bkm_a8.c index e1ff4717a8a6..e775706c60e3 100644 --- a/drivers/isdn/hisax/bkm_a8.c +++ b/drivers/isdn/hisax/bkm_a8.c | |||
@@ -301,7 +301,7 @@ setup_sct_quadro(struct IsdnCard *card) | |||
301 | (sub_vendor_id != PCI_VENDOR_ID_BERKOM))) | 301 | (sub_vendor_id != PCI_VENDOR_ID_BERKOM))) |
302 | return (0); | 302 | return (0); |
303 | if (cs->subtyp == SCT_1) { | 303 | if (cs->subtyp == SCT_1) { |
304 | while ((dev_a8 = pci_find_device(PCI_VENDOR_ID_PLX, | 304 | while ((dev_a8 = hisax_find_pci_device(PCI_VENDOR_ID_PLX, |
305 | PCI_DEVICE_ID_PLX_9050, dev_a8))) { | 305 | PCI_DEVICE_ID_PLX_9050, dev_a8))) { |
306 | 306 | ||
307 | sub_vendor_id = dev_a8->subsystem_vendor; | 307 | sub_vendor_id = dev_a8->subsystem_vendor; |
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c index 0b0c2e5d806b..780da9bda915 100644 --- a/drivers/isdn/hisax/diva.c +++ b/drivers/isdn/hisax/diva.c | |||
@@ -1148,7 +1148,7 @@ static int __devinit setup_diva_isapnp(struct IsdnCard *card) | |||
1148 | 1148 | ||
1149 | #endif /* ISAPNP */ | 1149 | #endif /* ISAPNP */ |
1150 | 1150 | ||
1151 | #ifdef CONFIG_PCI_LEGACY | 1151 | #ifdef CONFIG_PCI |
1152 | static struct pci_dev *dev_diva __devinitdata = NULL; | 1152 | static struct pci_dev *dev_diva __devinitdata = NULL; |
1153 | static struct pci_dev *dev_diva_u __devinitdata = NULL; | 1153 | static struct pci_dev *dev_diva_u __devinitdata = NULL; |
1154 | static struct pci_dev *dev_diva201 __devinitdata = NULL; | 1154 | static struct pci_dev *dev_diva201 __devinitdata = NULL; |
@@ -1159,21 +1159,21 @@ static int __devinit setup_diva_pci(struct IsdnCard *card) | |||
1159 | struct IsdnCardState *cs = card->cs; | 1159 | struct IsdnCardState *cs = card->cs; |
1160 | 1160 | ||
1161 | cs->subtyp = 0; | 1161 | cs->subtyp = 0; |
1162 | if ((dev_diva = pci_find_device(PCI_VENDOR_ID_EICON, | 1162 | if ((dev_diva = hisax_find_pci_device(PCI_VENDOR_ID_EICON, |
1163 | PCI_DEVICE_ID_EICON_DIVA20, dev_diva))) { | 1163 | PCI_DEVICE_ID_EICON_DIVA20, dev_diva))) { |
1164 | if (pci_enable_device(dev_diva)) | 1164 | if (pci_enable_device(dev_diva)) |
1165 | return(0); | 1165 | return(0); |
1166 | cs->subtyp = DIVA_PCI; | 1166 | cs->subtyp = DIVA_PCI; |
1167 | cs->irq = dev_diva->irq; | 1167 | cs->irq = dev_diva->irq; |
1168 | cs->hw.diva.cfg_reg = pci_resource_start(dev_diva, 2); | 1168 | cs->hw.diva.cfg_reg = pci_resource_start(dev_diva, 2); |
1169 | } else if ((dev_diva_u = pci_find_device(PCI_VENDOR_ID_EICON, | 1169 | } else if ((dev_diva_u = hisax_find_pci_device(PCI_VENDOR_ID_EICON, |
1170 | PCI_DEVICE_ID_EICON_DIVA20_U, dev_diva_u))) { | 1170 | PCI_DEVICE_ID_EICON_DIVA20_U, dev_diva_u))) { |
1171 | if (pci_enable_device(dev_diva_u)) | 1171 | if (pci_enable_device(dev_diva_u)) |
1172 | return(0); | 1172 | return(0); |
1173 | cs->subtyp = DIVA_PCI; | 1173 | cs->subtyp = DIVA_PCI; |
1174 | cs->irq = dev_diva_u->irq; | 1174 | cs->irq = dev_diva_u->irq; |
1175 | cs->hw.diva.cfg_reg = pci_resource_start(dev_diva_u, 2); | 1175 | cs->hw.diva.cfg_reg = pci_resource_start(dev_diva_u, 2); |
1176 | } else if ((dev_diva201 = pci_find_device(PCI_VENDOR_ID_EICON, | 1176 | } else if ((dev_diva201 = hisax_find_pci_device(PCI_VENDOR_ID_EICON, |
1177 | PCI_DEVICE_ID_EICON_DIVA201, dev_diva201))) { | 1177 | PCI_DEVICE_ID_EICON_DIVA201, dev_diva201))) { |
1178 | if (pci_enable_device(dev_diva201)) | 1178 | if (pci_enable_device(dev_diva201)) |
1179 | return(0); | 1179 | return(0); |
@@ -1183,7 +1183,7 @@ static int __devinit setup_diva_pci(struct IsdnCard *card) | |||
1183 | (ulong) ioremap(pci_resource_start(dev_diva201, 0), 4096); | 1183 | (ulong) ioremap(pci_resource_start(dev_diva201, 0), 4096); |
1184 | cs->hw.diva.cfg_reg = | 1184 | cs->hw.diva.cfg_reg = |
1185 | (ulong) ioremap(pci_resource_start(dev_diva201, 1), 4096); | 1185 | (ulong) ioremap(pci_resource_start(dev_diva201, 1), 4096); |
1186 | } else if ((dev_diva202 = pci_find_device(PCI_VENDOR_ID_EICON, | 1186 | } else if ((dev_diva202 = hisax_find_pci_device(PCI_VENDOR_ID_EICON, |
1187 | PCI_DEVICE_ID_EICON_DIVA202, dev_diva202))) { | 1187 | PCI_DEVICE_ID_EICON_DIVA202, dev_diva202))) { |
1188 | if (pci_enable_device(dev_diva202)) | 1188 | if (pci_enable_device(dev_diva202)) |
1189 | return(0); | 1189 | return(0); |
@@ -1229,14 +1229,14 @@ static int __devinit setup_diva_pci(struct IsdnCard *card) | |||
1229 | return (1); /* card found */ | 1229 | return (1); /* card found */ |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | #else /* if !CONFIG_PCI_LEGACY */ | 1232 | #else /* if !CONFIG_PCI */ |
1233 | 1233 | ||
1234 | static int __devinit setup_diva_pci(struct IsdnCard *card) | 1234 | static int __devinit setup_diva_pci(struct IsdnCard *card) |
1235 | { | 1235 | { |
1236 | return (-1); /* card not found; continue search */ | 1236 | return (-1); /* card not found; continue search */ |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | #endif /* CONFIG_PCI_LEGACY */ | 1239 | #endif /* CONFIG_PCI */ |
1240 | 1240 | ||
1241 | int __devinit | 1241 | int __devinit |
1242 | setup_diva(struct IsdnCard *card) | 1242 | setup_diva(struct IsdnCard *card) |
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c index aa29d1cf16af..23c41fcd864e 100644 --- a/drivers/isdn/hisax/elsa.c +++ b/drivers/isdn/hisax/elsa.c | |||
@@ -1025,7 +1025,7 @@ setup_elsa_pcmcia(struct IsdnCard *card) | |||
1025 | cs->irq); | 1025 | cs->irq); |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | #ifdef CONFIG_PCI_LEGACY | 1028 | #ifdef CONFIG_PCI |
1029 | static struct pci_dev *dev_qs1000 __devinitdata = NULL; | 1029 | static struct pci_dev *dev_qs1000 __devinitdata = NULL; |
1030 | static struct pci_dev *dev_qs3000 __devinitdata = NULL; | 1030 | static struct pci_dev *dev_qs3000 __devinitdata = NULL; |
1031 | 1031 | ||
@@ -1035,7 +1035,7 @@ setup_elsa_pci(struct IsdnCard *card) | |||
1035 | struct IsdnCardState *cs = card->cs; | 1035 | struct IsdnCardState *cs = card->cs; |
1036 | 1036 | ||
1037 | cs->subtyp = 0; | 1037 | cs->subtyp = 0; |
1038 | if ((dev_qs1000 = pci_find_device(PCI_VENDOR_ID_ELSA, | 1038 | if ((dev_qs1000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA, |
1039 | PCI_DEVICE_ID_ELSA_MICROLINK, dev_qs1000))) { | 1039 | PCI_DEVICE_ID_ELSA_MICROLINK, dev_qs1000))) { |
1040 | if (pci_enable_device(dev_qs1000)) | 1040 | if (pci_enable_device(dev_qs1000)) |
1041 | return(0); | 1041 | return(0); |
@@ -1043,7 +1043,7 @@ setup_elsa_pci(struct IsdnCard *card) | |||
1043 | cs->irq = dev_qs1000->irq; | 1043 | cs->irq = dev_qs1000->irq; |
1044 | cs->hw.elsa.cfg = pci_resource_start(dev_qs1000, 1); | 1044 | cs->hw.elsa.cfg = pci_resource_start(dev_qs1000, 1); |
1045 | cs->hw.elsa.base = pci_resource_start(dev_qs1000, 3); | 1045 | cs->hw.elsa.base = pci_resource_start(dev_qs1000, 3); |
1046 | } else if ((dev_qs3000 = pci_find_device(PCI_VENDOR_ID_ELSA, | 1046 | } else if ((dev_qs3000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA, |
1047 | PCI_DEVICE_ID_ELSA_QS3000, dev_qs3000))) { | 1047 | PCI_DEVICE_ID_ELSA_QS3000, dev_qs3000))) { |
1048 | if (pci_enable_device(dev_qs3000)) | 1048 | if (pci_enable_device(dev_qs3000)) |
1049 | return(0); | 1049 | return(0); |
@@ -1093,7 +1093,7 @@ setup_elsa_pci(struct IsdnCard *card) | |||
1093 | { | 1093 | { |
1094 | return (1); | 1094 | return (1); |
1095 | } | 1095 | } |
1096 | #endif /* CONFIG_PCI_LEGACY */ | 1096 | #endif /* CONFIG_PCI */ |
1097 | 1097 | ||
1098 | static int __devinit | 1098 | static int __devinit |
1099 | setup_elsa_common(struct IsdnCard *card) | 1099 | setup_elsa_common(struct IsdnCard *card) |
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c index 39f421ed8de8..26264abf1f58 100644 --- a/drivers/isdn/hisax/enternow_pci.c +++ b/drivers/isdn/hisax/enternow_pci.c | |||
@@ -406,7 +406,7 @@ setup_enternow_pci(struct IsdnCard *card) | |||
406 | 406 | ||
407 | for ( ;; ) | 407 | for ( ;; ) |
408 | { | 408 | { |
409 | if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, | 409 | if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, |
410 | PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { | 410 | PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { |
411 | ret = en_pci_probe(dev_netjet, cs); | 411 | ret = en_pci_probe(dev_netjet, cs); |
412 | if (!ret) | 412 | if (!ret) |
diff --git a/drivers/isdn/hisax/gazel.c b/drivers/isdn/hisax/gazel.c index 0ea3b4607680..353982fc1436 100644 --- a/drivers/isdn/hisax/gazel.c +++ b/drivers/isdn/hisax/gazel.c | |||
@@ -531,7 +531,7 @@ setup_gazelisa(struct IsdnCard *card, struct IsdnCardState *cs) | |||
531 | return (0); | 531 | return (0); |
532 | } | 532 | } |
533 | 533 | ||
534 | #ifdef CONFIG_PCI_LEGACY | 534 | #ifdef CONFIG_PCI |
535 | static struct pci_dev *dev_tel __devinitdata = NULL; | 535 | static struct pci_dev *dev_tel __devinitdata = NULL; |
536 | 536 | ||
537 | static int __devinit | 537 | static int __devinit |
@@ -546,7 +546,7 @@ setup_gazelpci(struct IsdnCardState *cs) | |||
546 | found = 0; | 546 | found = 0; |
547 | seekcard = PCI_DEVICE_ID_PLX_R685; | 547 | seekcard = PCI_DEVICE_ID_PLX_R685; |
548 | for (nbseek = 0; nbseek < 4; nbseek++) { | 548 | for (nbseek = 0; nbseek < 4; nbseek++) { |
549 | if ((dev_tel = pci_find_device(PCI_VENDOR_ID_PLX, | 549 | if ((dev_tel = hisax_find_pci_device(PCI_VENDOR_ID_PLX, |
550 | seekcard, dev_tel))) { | 550 | seekcard, dev_tel))) { |
551 | if (pci_enable_device(dev_tel)) | 551 | if (pci_enable_device(dev_tel)) |
552 | return 1; | 552 | return 1; |
@@ -620,7 +620,7 @@ setup_gazelpci(struct IsdnCardState *cs) | |||
620 | 620 | ||
621 | return (0); | 621 | return (0); |
622 | } | 622 | } |
623 | #endif /* CONFIG_PCI_LEGACY */ | 623 | #endif /* CONFIG_PCI */ |
624 | 624 | ||
625 | int __devinit | 625 | int __devinit |
626 | setup_gazel(struct IsdnCard *card) | 626 | setup_gazel(struct IsdnCard *card) |
@@ -640,7 +640,7 @@ setup_gazel(struct IsdnCard *card) | |||
640 | return (0); | 640 | return (0); |
641 | } else { | 641 | } else { |
642 | 642 | ||
643 | #ifdef CONFIG_PCI_LEGACY | 643 | #ifdef CONFIG_PCI |
644 | if (setup_gazelpci(cs)) | 644 | if (setup_gazelpci(cs)) |
645 | return (0); | 645 | return (0); |
646 | #else | 646 | #else |
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 10914731b304..917cc84065bd 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c | |||
@@ -1658,7 +1658,7 @@ setup_hfcpci(struct IsdnCard *card) | |||
1658 | 1658 | ||
1659 | i = 0; | 1659 | i = 0; |
1660 | while (id_list[i].vendor_id) { | 1660 | while (id_list[i].vendor_id) { |
1661 | tmp_hfcpci = pci_find_device(id_list[i].vendor_id, | 1661 | tmp_hfcpci = hisax_find_pci_device(id_list[i].vendor_id, |
1662 | id_list[i].device_id, | 1662 | id_list[i].device_id, |
1663 | dev_hfcpci); | 1663 | dev_hfcpci); |
1664 | i++; | 1664 | i++; |
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h index 0685c1946969..832a87855ffb 100644 --- a/drivers/isdn/hisax/hisax.h +++ b/drivers/isdn/hisax/hisax.h | |||
@@ -1323,3 +1323,26 @@ void release_tei(struct IsdnCardState *cs); | |||
1323 | char *HiSax_getrev(const char *revision); | 1323 | char *HiSax_getrev(const char *revision); |
1324 | int TeiNew(void); | 1324 | int TeiNew(void); |
1325 | void TeiFree(void); | 1325 | void TeiFree(void); |
1326 | |||
1327 | #ifdef CONFIG_PCI | ||
1328 | |||
1329 | #include <linux/pci.h> | ||
1330 | |||
1331 | /* adaptation wrapper for old usage | ||
1332 | * WARNING! This is unfit for use in a PCI hotplug environment, | ||
1333 | * as the returned PCI device can disappear at any moment in time. | ||
1334 | * Callers should be converted to use pci_get_device() instead. | ||
1335 | */ | ||
1336 | static inline struct pci_dev *hisax_find_pci_device(unsigned int vendor, | ||
1337 | unsigned int device, | ||
1338 | struct pci_dev *from) | ||
1339 | { | ||
1340 | struct pci_dev *pdev; | ||
1341 | |||
1342 | pci_dev_get(from); | ||
1343 | pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); | ||
1344 | pci_dev_put(pdev); | ||
1345 | return pdev; | ||
1346 | } | ||
1347 | |||
1348 | #endif | ||
diff --git a/drivers/isdn/hisax/niccy.c b/drivers/isdn/hisax/niccy.c index ef00633e1d2a..ccaa6e13310f 100644 --- a/drivers/isdn/hisax/niccy.c +++ b/drivers/isdn/hisax/niccy.c | |||
@@ -297,12 +297,12 @@ int __devinit setup_niccy(struct IsdnCard *card) | |||
297 | return 0; | 297 | return 0; |
298 | } | 298 | } |
299 | } else { | 299 | } else { |
300 | #ifdef CONFIG_PCI_LEGACY | 300 | #ifdef CONFIG_PCI |
301 | static struct pci_dev *niccy_dev __devinitdata; | 301 | static struct pci_dev *niccy_dev __devinitdata; |
302 | 302 | ||
303 | u_int pci_ioaddr; | 303 | u_int pci_ioaddr; |
304 | cs->subtyp = 0; | 304 | cs->subtyp = 0; |
305 | if ((niccy_dev = pci_find_device(PCI_VENDOR_ID_SATSAGEM, | 305 | if ((niccy_dev = hisax_find_pci_device(PCI_VENDOR_ID_SATSAGEM, |
306 | PCI_DEVICE_ID_SATSAGEM_NICCY, | 306 | PCI_DEVICE_ID_SATSAGEM_NICCY, |
307 | niccy_dev))) { | 307 | niccy_dev))) { |
308 | if (pci_enable_device(niccy_dev)) | 308 | if (pci_enable_device(niccy_dev)) |
@@ -354,7 +354,7 @@ int __devinit setup_niccy(struct IsdnCard *card) | |||
354 | printk(KERN_WARNING "Niccy: io0 0 and NO_PCI_BIOS\n"); | 354 | printk(KERN_WARNING "Niccy: io0 0 and NO_PCI_BIOS\n"); |
355 | printk(KERN_WARNING "Niccy: unable to config NICCY PCI\n"); | 355 | printk(KERN_WARNING "Niccy: unable to config NICCY PCI\n"); |
356 | return 0; | 356 | return 0; |
357 | #endif /* CONFIG_PCI_LEGACY */ | 357 | #endif /* CONFIG_PCI */ |
358 | } | 358 | } |
359 | printk(KERN_INFO "HiSax: NICCY %s config irq:%d data:0x%X ale:0x%X\n", | 359 | printk(KERN_INFO "HiSax: NICCY %s config irq:%d data:0x%X ale:0x%X\n", |
360 | (cs->subtyp == 1) ? "PnP" : "PCI", | 360 | (cs->subtyp == 1) ? "PnP" : "PCI", |
diff --git a/drivers/isdn/hisax/nj_s.c b/drivers/isdn/hisax/nj_s.c index 8d36ccc87d81..2344e7b33448 100644 --- a/drivers/isdn/hisax/nj_s.c +++ b/drivers/isdn/hisax/nj_s.c | |||
@@ -276,7 +276,7 @@ setup_netjet_s(struct IsdnCard *card) | |||
276 | 276 | ||
277 | for ( ;; ) | 277 | for ( ;; ) |
278 | { | 278 | { |
279 | if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, | 279 | if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, |
280 | PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { | 280 | PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { |
281 | ret = njs_pci_probe(dev_netjet, cs); | 281 | ret = njs_pci_probe(dev_netjet, cs); |
282 | if (!ret) | 282 | if (!ret) |
diff --git a/drivers/isdn/hisax/nj_u.c b/drivers/isdn/hisax/nj_u.c index d306c946ffba..095e974aed80 100644 --- a/drivers/isdn/hisax/nj_u.c +++ b/drivers/isdn/hisax/nj_u.c | |||
@@ -240,7 +240,7 @@ setup_netjet_u(struct IsdnCard *card) | |||
240 | 240 | ||
241 | for ( ;; ) | 241 | for ( ;; ) |
242 | { | 242 | { |
243 | if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, | 243 | if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, |
244 | PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { | 244 | PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { |
245 | ret = nju_pci_probe(dev_netjet, cs); | 245 | ret = nju_pci_probe(dev_netjet, cs); |
246 | if (!ret) | 246 | if (!ret) |
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c index 5569a522e2a1..69dfc8d29017 100644 --- a/drivers/isdn/hisax/sedlbauer.c +++ b/drivers/isdn/hisax/sedlbauer.c | |||
@@ -598,7 +598,7 @@ setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt) | |||
598 | } | 598 | } |
599 | #endif /* __ISAPNP__ */ | 599 | #endif /* __ISAPNP__ */ |
600 | 600 | ||
601 | #ifdef CONFIG_PCI_LEGACY | 601 | #ifdef CONFIG_PCI |
602 | static struct pci_dev *dev_sedl __devinitdata = NULL; | 602 | static struct pci_dev *dev_sedl __devinitdata = NULL; |
603 | 603 | ||
604 | static int __devinit | 604 | static int __devinit |
@@ -607,7 +607,7 @@ setup_sedlbauer_pci(struct IsdnCard *card) | |||
607 | struct IsdnCardState *cs = card->cs; | 607 | struct IsdnCardState *cs = card->cs; |
608 | u16 sub_vendor_id, sub_id; | 608 | u16 sub_vendor_id, sub_id; |
609 | 609 | ||
610 | if ((dev_sedl = pci_find_device(PCI_VENDOR_ID_TIGERJET, | 610 | if ((dev_sedl = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, |
611 | PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) { | 611 | PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) { |
612 | if (pci_enable_device(dev_sedl)) | 612 | if (pci_enable_device(dev_sedl)) |
613 | return(0); | 613 | return(0); |
@@ -673,7 +673,7 @@ setup_sedlbauer_pci(struct IsdnCard *card) | |||
673 | return (1); | 673 | return (1); |
674 | } | 674 | } |
675 | 675 | ||
676 | #endif /* CONFIG_PCI_LEGACY */ | 676 | #endif /* CONFIG_PCI */ |
677 | 677 | ||
678 | int __devinit | 678 | int __devinit |
679 | setup_sedlbauer(struct IsdnCard *card) | 679 | setup_sedlbauer(struct IsdnCard *card) |
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c index 28b08de4673d..b85ceb3746ce 100644 --- a/drivers/isdn/hisax/telespci.c +++ b/drivers/isdn/hisax/telespci.c | |||
@@ -300,7 +300,7 @@ setup_telespci(struct IsdnCard *card) | |||
300 | if (cs->typ != ISDN_CTYPE_TELESPCI) | 300 | if (cs->typ != ISDN_CTYPE_TELESPCI) |
301 | return (0); | 301 | return (0); |
302 | 302 | ||
303 | if ((dev_tel = pci_find_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) { | 303 | if ((dev_tel = hisax_find_pci_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) { |
304 | if (pci_enable_device(dev_tel)) | 304 | if (pci_enable_device(dev_tel)) |
305 | return(0); | 305 | return(0); |
306 | cs->irq = dev_tel->irq; | 306 | cs->irq = dev_tel->irq; |
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c index c4d862c11a60..9d6e864023fe 100644 --- a/drivers/isdn/hisax/w6692.c +++ b/drivers/isdn/hisax/w6692.c | |||
@@ -1007,7 +1007,7 @@ setup_w6692(struct IsdnCard *card) | |||
1007 | return (0); | 1007 | return (0); |
1008 | 1008 | ||
1009 | while (id_list[id_idx].vendor_id) { | 1009 | while (id_list[id_idx].vendor_id) { |
1010 | dev_w6692 = pci_find_device(id_list[id_idx].vendor_id, | 1010 | dev_w6692 = hisax_find_pci_device(id_list[id_idx].vendor_id, |
1011 | id_list[id_idx].device_id, | 1011 | id_list[id_idx].device_id, |
1012 | dev_w6692); | 1012 | dev_w6692); |
1013 | if (dev_w6692) { | 1013 | if (dev_w6692) { |
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 23741cec45e3..d840a109f833 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c | |||
@@ -322,8 +322,8 @@ static int __init adb_init(void) | |||
322 | adb_controller = NULL; | 322 | adb_controller = NULL; |
323 | } else { | 323 | } else { |
324 | #ifdef CONFIG_PPC | 324 | #ifdef CONFIG_PPC |
325 | if (machine_is_compatible("AAPL,PowerBook1998") || | 325 | if (of_machine_is_compatible("AAPL,PowerBook1998") || |
326 | machine_is_compatible("PowerBook1,1")) | 326 | of_machine_is_compatible("PowerBook1,1")) |
327 | sleepy_trackpad = 1; | 327 | sleepy_trackpad = 1; |
328 | #endif /* CONFIG_PPC */ | 328 | #endif /* CONFIG_PPC */ |
329 | 329 | ||
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c index 454bc501df3c..5738d8bf2d97 100644 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c | |||
@@ -1899,7 +1899,7 @@ static int create_control_loops(void) | |||
1899 | */ | 1899 | */ |
1900 | if (rackmac) | 1900 | if (rackmac) |
1901 | cpu_pid_type = CPU_PID_TYPE_RACKMAC; | 1901 | cpu_pid_type = CPU_PID_TYPE_RACKMAC; |
1902 | else if (machine_is_compatible("PowerMac7,3") | 1902 | else if (of_machine_is_compatible("PowerMac7,3") |
1903 | && (cpu_count > 1) | 1903 | && (cpu_count > 1) |
1904 | && fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID | 1904 | && fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID |
1905 | && fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) { | 1905 | && fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) { |
@@ -2234,10 +2234,10 @@ static int __init therm_pm72_init(void) | |||
2234 | { | 2234 | { |
2235 | struct device_node *np; | 2235 | struct device_node *np; |
2236 | 2236 | ||
2237 | rackmac = machine_is_compatible("RackMac3,1"); | 2237 | rackmac = of_machine_is_compatible("RackMac3,1"); |
2238 | 2238 | ||
2239 | if (!machine_is_compatible("PowerMac7,2") && | 2239 | if (!of_machine_is_compatible("PowerMac7,2") && |
2240 | !machine_is_compatible("PowerMac7,3") && | 2240 | !of_machine_is_compatible("PowerMac7,3") && |
2241 | !rackmac) | 2241 | !rackmac) |
2242 | return -ENODEV; | 2242 | return -ENODEV; |
2243 | 2243 | ||
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index ba48fd76396e..7fb8b4da35a7 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c | |||
@@ -490,7 +490,7 @@ g4fan_init( void ) | |||
490 | info = of_get_property(np, "thermal-info", NULL); | 490 | info = of_get_property(np, "thermal-info", NULL); |
491 | of_node_put(np); | 491 | of_node_put(np); |
492 | 492 | ||
493 | if( !info || !machine_is_compatible("PowerMac3,6") ) | 493 | if( !info || !of_machine_is_compatible("PowerMac3,6") ) |
494 | return -ENODEV; | 494 | return -ENODEV; |
495 | 495 | ||
496 | if( info->id != 3 ) { | 496 | if( info->id != 3 ) { |
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c index a348bb0791d3..4f3c4479c16a 100644 --- a/drivers/macintosh/via-pmu-backlight.c +++ b/drivers/macintosh/via-pmu-backlight.c | |||
@@ -150,13 +150,13 @@ void __init pmu_backlight_init() | |||
150 | 150 | ||
151 | /* Special case for the old PowerBook since I can't test on it */ | 151 | /* Special case for the old PowerBook since I can't test on it */ |
152 | autosave = | 152 | autosave = |
153 | machine_is_compatible("AAPL,3400/2400") || | 153 | of_machine_is_compatible("AAPL,3400/2400") || |
154 | machine_is_compatible("AAPL,3500"); | 154 | of_machine_is_compatible("AAPL,3500"); |
155 | 155 | ||
156 | if (!autosave && | 156 | if (!autosave && |
157 | !pmac_has_backlight_type("pmu") && | 157 | !pmac_has_backlight_type("pmu") && |
158 | !machine_is_compatible("AAPL,PowerBook1998") && | 158 | !of_machine_is_compatible("AAPL,PowerBook1998") && |
159 | !machine_is_compatible("PowerBook1,1")) | 159 | !of_machine_is_compatible("PowerBook1,1")) |
160 | return; | 160 | return; |
161 | 161 | ||
162 | snprintf(name, sizeof(name), "pmubl"); | 162 | snprintf(name, sizeof(name), "pmubl"); |
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index db379c381432..42764849eb78 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c | |||
@@ -463,8 +463,8 @@ static int __init via_pmu_dev_init(void) | |||
463 | #endif | 463 | #endif |
464 | 464 | ||
465 | #ifdef CONFIG_PPC32 | 465 | #ifdef CONFIG_PPC32 |
466 | if (machine_is_compatible("AAPL,3400/2400") || | 466 | if (of_machine_is_compatible("AAPL,3400/2400") || |
467 | machine_is_compatible("AAPL,3500")) { | 467 | of_machine_is_compatible("AAPL,3500")) { |
468 | int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO, | 468 | int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO, |
469 | NULL, PMAC_MB_INFO_MODEL, 0); | 469 | NULL, PMAC_MB_INFO_MODEL, 0); |
470 | pmu_battery_count = 1; | 470 | pmu_battery_count = 1; |
@@ -472,8 +472,8 @@ static int __init via_pmu_dev_init(void) | |||
472 | pmu_batteries[0].flags |= PMU_BATT_TYPE_COMET; | 472 | pmu_batteries[0].flags |= PMU_BATT_TYPE_COMET; |
473 | else | 473 | else |
474 | pmu_batteries[0].flags |= PMU_BATT_TYPE_HOOPER; | 474 | pmu_batteries[0].flags |= PMU_BATT_TYPE_HOOPER; |
475 | } else if (machine_is_compatible("AAPL,PowerBook1998") || | 475 | } else if (of_machine_is_compatible("AAPL,PowerBook1998") || |
476 | machine_is_compatible("PowerBook1,1")) { | 476 | of_machine_is_compatible("PowerBook1,1")) { |
477 | pmu_battery_count = 2; | 477 | pmu_battery_count = 2; |
478 | pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART; | 478 | pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART; |
479 | pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART; | 479 | pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART; |
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c index 075b4d99e354..437f55c5d18d 100644 --- a/drivers/macintosh/windfarm_core.c +++ b/drivers/macintosh/windfarm_core.c | |||
@@ -468,9 +468,9 @@ static int __init windfarm_core_init(void) | |||
468 | DBG("wf: core loaded\n"); | 468 | DBG("wf: core loaded\n"); |
469 | 469 | ||
470 | /* Don't register on old machines that use therm_pm72 for now */ | 470 | /* Don't register on old machines that use therm_pm72 for now */ |
471 | if (machine_is_compatible("PowerMac7,2") || | 471 | if (of_machine_is_compatible("PowerMac7,2") || |
472 | machine_is_compatible("PowerMac7,3") || | 472 | of_machine_is_compatible("PowerMac7,3") || |
473 | machine_is_compatible("RackMac3,1")) | 473 | of_machine_is_compatible("RackMac3,1")) |
474 | return -ENODEV; | 474 | return -ENODEV; |
475 | platform_device_register(&wf_platform_device); | 475 | platform_device_register(&wf_platform_device); |
476 | return 0; | 476 | return 0; |
diff --git a/drivers/macintosh/windfarm_cpufreq_clamp.c b/drivers/macintosh/windfarm_cpufreq_clamp.c index 900aade06198..1a77a7c97d0e 100644 --- a/drivers/macintosh/windfarm_cpufreq_clamp.c +++ b/drivers/macintosh/windfarm_cpufreq_clamp.c | |||
@@ -76,9 +76,9 @@ static int __init wf_cpufreq_clamp_init(void) | |||
76 | struct wf_control *clamp; | 76 | struct wf_control *clamp; |
77 | 77 | ||
78 | /* Don't register on old machines that use therm_pm72 for now */ | 78 | /* Don't register on old machines that use therm_pm72 for now */ |
79 | if (machine_is_compatible("PowerMac7,2") || | 79 | if (of_machine_is_compatible("PowerMac7,2") || |
80 | machine_is_compatible("PowerMac7,3") || | 80 | of_machine_is_compatible("PowerMac7,3") || |
81 | machine_is_compatible("RackMac3,1")) | 81 | of_machine_is_compatible("RackMac3,1")) |
82 | return -ENODEV; | 82 | return -ENODEV; |
83 | 83 | ||
84 | clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL); | 84 | clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL); |
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index ed6426a10773..d8257d35afde 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c | |||
@@ -239,9 +239,9 @@ static struct i2c_driver wf_lm75_driver = { | |||
239 | static int __init wf_lm75_sensor_init(void) | 239 | static int __init wf_lm75_sensor_init(void) |
240 | { | 240 | { |
241 | /* Don't register on old machines that use therm_pm72 for now */ | 241 | /* Don't register on old machines that use therm_pm72 for now */ |
242 | if (machine_is_compatible("PowerMac7,2") || | 242 | if (of_machine_is_compatible("PowerMac7,2") || |
243 | machine_is_compatible("PowerMac7,3") || | 243 | of_machine_is_compatible("PowerMac7,3") || |
244 | machine_is_compatible("RackMac3,1")) | 244 | of_machine_is_compatible("RackMac3,1")) |
245 | return -ENODEV; | 245 | return -ENODEV; |
246 | return i2c_add_driver(&wf_lm75_driver); | 246 | return i2c_add_driver(&wf_lm75_driver); |
247 | } | 247 | } |
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c index a67b349319e9..b486eb929fde 100644 --- a/drivers/macintosh/windfarm_max6690_sensor.c +++ b/drivers/macintosh/windfarm_max6690_sensor.c | |||
@@ -188,9 +188,9 @@ static struct i2c_driver wf_max6690_driver = { | |||
188 | static int __init wf_max6690_sensor_init(void) | 188 | static int __init wf_max6690_sensor_init(void) |
189 | { | 189 | { |
190 | /* Don't register on old machines that use therm_pm72 for now */ | 190 | /* Don't register on old machines that use therm_pm72 for now */ |
191 | if (machine_is_compatible("PowerMac7,2") || | 191 | if (of_machine_is_compatible("PowerMac7,2") || |
192 | machine_is_compatible("PowerMac7,3") || | 192 | of_machine_is_compatible("PowerMac7,3") || |
193 | machine_is_compatible("RackMac3,1")) | 193 | of_machine_is_compatible("RackMac3,1")) |
194 | return -ENODEV; | 194 | return -ENODEV; |
195 | return i2c_add_driver(&wf_max6690_driver); | 195 | return i2c_add_driver(&wf_max6690_driver); |
196 | } | 196 | } |
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c index 73d695dc9e50..e0ee80700cde 100644 --- a/drivers/macintosh/windfarm_pm112.c +++ b/drivers/macintosh/windfarm_pm112.c | |||
@@ -676,7 +676,7 @@ static int __init wf_pm112_init(void) | |||
676 | { | 676 | { |
677 | struct device_node *cpu; | 677 | struct device_node *cpu; |
678 | 678 | ||
679 | if (!machine_is_compatible("PowerMac11,2")) | 679 | if (!of_machine_is_compatible("PowerMac11,2")) |
680 | return -ENODEV; | 680 | return -ENODEV; |
681 | 681 | ||
682 | /* Count the number of CPU cores */ | 682 | /* Count the number of CPU cores */ |
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c index 66ec4fb115bb..947d4afa25ca 100644 --- a/drivers/macintosh/windfarm_pm121.c +++ b/drivers/macintosh/windfarm_pm121.c | |||
@@ -1008,7 +1008,7 @@ static int __init pm121_init(void) | |||
1008 | { | 1008 | { |
1009 | int rc = -ENODEV; | 1009 | int rc = -ENODEV; |
1010 | 1010 | ||
1011 | if (machine_is_compatible("PowerMac12,1")) | 1011 | if (of_machine_is_compatible("PowerMac12,1")) |
1012 | rc = pm121_init_pm(); | 1012 | rc = pm121_init_pm(); |
1013 | 1013 | ||
1014 | if (rc == 0) { | 1014 | if (rc == 0) { |
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c index abbe206474f5..565d5b2adc95 100644 --- a/drivers/macintosh/windfarm_pm81.c +++ b/drivers/macintosh/windfarm_pm81.c | |||
@@ -779,8 +779,8 @@ static int __init wf_smu_init(void) | |||
779 | { | 779 | { |
780 | int rc = -ENODEV; | 780 | int rc = -ENODEV; |
781 | 781 | ||
782 | if (machine_is_compatible("PowerMac8,1") || | 782 | if (of_machine_is_compatible("PowerMac8,1") || |
783 | machine_is_compatible("PowerMac8,2")) | 783 | of_machine_is_compatible("PowerMac8,2")) |
784 | rc = wf_init_pm(); | 784 | rc = wf_init_pm(); |
785 | 785 | ||
786 | if (rc == 0) { | 786 | if (rc == 0) { |
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c index 764c525b2117..bea99168ff35 100644 --- a/drivers/macintosh/windfarm_pm91.c +++ b/drivers/macintosh/windfarm_pm91.c | |||
@@ -711,7 +711,7 @@ static int __init wf_smu_init(void) | |||
711 | { | 711 | { |
712 | int rc = -ENODEV; | 712 | int rc = -ENODEV; |
713 | 713 | ||
714 | if (machine_is_compatible("PowerMac9,1")) | 714 | if (of_machine_is_compatible("PowerMac9,1")) |
715 | rc = wf_init_pm(); | 715 | rc = wf_init_pm(); |
716 | 716 | ||
717 | if (rc == 0) { | 717 | if (rc == 0) { |
diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c index 9c567b93f417..3c193504bb80 100644 --- a/drivers/macintosh/windfarm_smu_sensors.c +++ b/drivers/macintosh/windfarm_smu_sensors.c | |||
@@ -363,9 +363,9 @@ smu_cpu_power_create(struct wf_sensor *volts, struct wf_sensor *amps) | |||
363 | * I yet have to figure out what's up with 8,2 and will have to | 363 | * I yet have to figure out what's up with 8,2 and will have to |
364 | * adjust for later, unless we can 100% trust the SDB partition... | 364 | * adjust for later, unless we can 100% trust the SDB partition... |
365 | */ | 365 | */ |
366 | if ((machine_is_compatible("PowerMac8,1") || | 366 | if ((of_machine_is_compatible("PowerMac8,1") || |
367 | machine_is_compatible("PowerMac8,2") || | 367 | of_machine_is_compatible("PowerMac8,2") || |
368 | machine_is_compatible("PowerMac9,1")) && | 368 | of_machine_is_compatible("PowerMac9,1")) && |
369 | cpuvcp_version >= 2) { | 369 | cpuvcp_version >= 2) { |
370 | pow->quadratic = 1; | 370 | pow->quadratic = 1; |
371 | DBG("windfarm: CPU Power using quadratic transform\n"); | 371 | DBG("windfarm: CPU Power using quadratic transform\n"); |
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index 8b8558fcb042..b11533f76195 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c | |||
@@ -504,6 +504,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len ) | |||
504 | "bytes left in TS. Resyncing.\n", ts_remain); | 504 | "bytes left in TS. Resyncing.\n", ts_remain); |
505 | priv->ule_sndu_len = 0; | 505 | priv->ule_sndu_len = 0; |
506 | priv->need_pusi = 1; | 506 | priv->need_pusi = 1; |
507 | ts += TS_SZ; | ||
507 | continue; | 508 | continue; |
508 | } | 509 | } |
509 | 510 | ||
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 44d2037e9e56..5382b5a44aff 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
@@ -126,8 +126,6 @@ static int mfcounter = 0; | |||
126 | * Public data... | 126 | * Public data... |
127 | */ | 127 | */ |
128 | 128 | ||
129 | static struct proc_dir_entry *mpt_proc_root_dir; | ||
130 | |||
131 | #define WHOINIT_UNKNOWN 0xAA | 129 | #define WHOINIT_UNKNOWN 0xAA |
132 | 130 | ||
133 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 131 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
@@ -146,6 +144,9 @@ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS]; | |||
146 | static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS]; | 144 | static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS]; |
147 | static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS]; | 145 | static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS]; |
148 | 146 | ||
147 | #ifdef CONFIG_PROC_FS | ||
148 | static struct proc_dir_entry *mpt_proc_root_dir; | ||
149 | #endif | ||
149 | 150 | ||
150 | /* | 151 | /* |
151 | * Driver Callback Index's | 152 | * Driver Callback Index's |
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index b4948671eb92..9718c8f2e959 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h | |||
@@ -76,8 +76,8 @@ | |||
76 | #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR | 76 | #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | #define MPT_LINUX_VERSION_COMMON "3.04.13" | 79 | #define MPT_LINUX_VERSION_COMMON "3.04.14" |
80 | #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.13" | 80 | #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.14" |
81 | #define WHAT_MAGIC_STRING "@" "(" "#" ")" | 81 | #define WHAT_MAGIC_STRING "@" "(" "#" ")" |
82 | 82 | ||
83 | #define show_mptmod_ver(s,ver) \ | 83 | #define show_mptmod_ver(s,ver) \ |
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 352acd05c46b..caa8f568a41c 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c | |||
@@ -360,8 +360,8 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function) | |||
360 | u16 iocstatus; | 360 | u16 iocstatus; |
361 | 361 | ||
362 | /* bus reset is only good for SCSI IO, RAID PASSTHRU */ | 362 | /* bus reset is only good for SCSI IO, RAID PASSTHRU */ |
363 | if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) || | 363 | if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || |
364 | (function == MPI_FUNCTION_SCSI_IO_REQUEST)) { | 364 | function == MPI_FUNCTION_SCSI_IO_REQUEST)) { |
365 | dtmprintk(ioc, printk(MYIOC_s_WARN_FMT | 365 | dtmprintk(ioc, printk(MYIOC_s_WARN_FMT |
366 | "TaskMgmt, not SCSI_IO!!\n", ioc->name)); | 366 | "TaskMgmt, not SCSI_IO!!\n", ioc->name)); |
367 | return -EPERM; | 367 | return -EPERM; |
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index ebf6ae024da4..612ab3c51a6b 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c | |||
@@ -195,29 +195,34 @@ mptfc_block_error_handler(struct scsi_cmnd *SCpnt, | |||
195 | unsigned long flags; | 195 | unsigned long flags; |
196 | int ready; | 196 | int ready; |
197 | MPT_ADAPTER *ioc; | 197 | MPT_ADAPTER *ioc; |
198 | int loops = 40; /* seconds */ | ||
198 | 199 | ||
199 | hd = shost_priv(SCpnt->device->host); | 200 | hd = shost_priv(SCpnt->device->host); |
200 | ioc = hd->ioc; | 201 | ioc = hd->ioc; |
201 | spin_lock_irqsave(shost->host_lock, flags); | 202 | spin_lock_irqsave(shost->host_lock, flags); |
202 | while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY) { | 203 | while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY |
204 | || (loops > 0 && ioc->active == 0)) { | ||
203 | spin_unlock_irqrestore(shost->host_lock, flags); | 205 | spin_unlock_irqrestore(shost->host_lock, flags); |
204 | dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT | 206 | dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT |
205 | "mptfc_block_error_handler.%d: %d:%d, port status is " | 207 | "mptfc_block_error_handler.%d: %d:%d, port status is " |
206 | "DID_IMM_RETRY, deferring %s recovery.\n", | 208 | "%x, active flag %d, deferring %s recovery.\n", |
207 | ioc->name, ioc->sh->host_no, | 209 | ioc->name, ioc->sh->host_no, |
208 | SCpnt->device->id, SCpnt->device->lun, caller)); | 210 | SCpnt->device->id, SCpnt->device->lun, |
211 | ready, ioc->active, caller)); | ||
209 | msleep(1000); | 212 | msleep(1000); |
210 | spin_lock_irqsave(shost->host_lock, flags); | 213 | spin_lock_irqsave(shost->host_lock, flags); |
214 | loops --; | ||
211 | } | 215 | } |
212 | spin_unlock_irqrestore(shost->host_lock, flags); | 216 | spin_unlock_irqrestore(shost->host_lock, flags); |
213 | 217 | ||
214 | if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata) { | 218 | if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata |
219 | || ioc->active == 0) { | ||
215 | dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT | 220 | dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT |
216 | "%s.%d: %d:%d, failing recovery, " | 221 | "%s.%d: %d:%d, failing recovery, " |
217 | "port state %d, vdevice %p.\n", caller, | 222 | "port state %x, active %d, vdevice %p.\n", caller, |
218 | ioc->name, ioc->sh->host_no, | 223 | ioc->name, ioc->sh->host_no, |
219 | SCpnt->device->id, SCpnt->device->lun, ready, | 224 | SCpnt->device->id, SCpnt->device->lun, ready, |
220 | SCpnt->device->hostdata)); | 225 | ioc->active, SCpnt->device->hostdata)); |
221 | return FAILED; | 226 | return FAILED; |
222 | } | 227 | } |
223 | dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT | 228 | dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 83873e3d0ce7..c20bbe45da82 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -1075,6 +1075,19 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id) | |||
1075 | return 0; | 1075 | return 0; |
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | static void | ||
1079 | mptsas_block_io_sdev(struct scsi_device *sdev, void *data) | ||
1080 | { | ||
1081 | scsi_device_set_state(sdev, SDEV_BLOCK); | ||
1082 | } | ||
1083 | |||
1084 | static void | ||
1085 | mptsas_block_io_starget(struct scsi_target *starget) | ||
1086 | { | ||
1087 | if (starget) | ||
1088 | starget_for_each_device(starget, NULL, mptsas_block_io_sdev); | ||
1089 | } | ||
1090 | |||
1078 | /** | 1091 | /** |
1079 | * mptsas_target_reset_queue | 1092 | * mptsas_target_reset_queue |
1080 | * | 1093 | * |
@@ -1098,10 +1111,11 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc, | |||
1098 | id = sas_event_data->TargetID; | 1111 | id = sas_event_data->TargetID; |
1099 | channel = sas_event_data->Bus; | 1112 | channel = sas_event_data->Bus; |
1100 | 1113 | ||
1101 | if (!(vtarget = mptsas_find_vtarget(ioc, channel, id))) | 1114 | vtarget = mptsas_find_vtarget(ioc, channel, id); |
1102 | return; | 1115 | if (vtarget) { |
1103 | 1116 | mptsas_block_io_starget(vtarget->starget); | |
1104 | vtarget->deleted = 1; /* block IO */ | 1117 | vtarget->deleted = 1; /* block IO */ |
1118 | } | ||
1105 | 1119 | ||
1106 | target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event), | 1120 | target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event), |
1107 | GFP_ATOMIC); | 1121 | GFP_ATOMIC); |
@@ -1868,7 +1882,8 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
1868 | if (ioc->sas_discovery_quiesce_io) | 1882 | if (ioc->sas_discovery_quiesce_io) |
1869 | return SCSI_MLQUEUE_HOST_BUSY; | 1883 | return SCSI_MLQUEUE_HOST_BUSY; |
1870 | 1884 | ||
1871 | // scsi_print_command(SCpnt); | 1885 | if (ioc->debug_level & MPT_DEBUG_SCSI) |
1886 | scsi_print_command(SCpnt); | ||
1872 | 1887 | ||
1873 | return mptscsih_qcmd(SCpnt,done); | 1888 | return mptscsih_qcmd(SCpnt,done); |
1874 | } | 1889 | } |
@@ -2686,6 +2701,187 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, | |||
2686 | return error; | 2701 | return error; |
2687 | } | 2702 | } |
2688 | 2703 | ||
2704 | struct rep_manu_request{ | ||
2705 | u8 smp_frame_type; | ||
2706 | u8 function; | ||
2707 | u8 reserved; | ||
2708 | u8 request_length; | ||
2709 | }; | ||
2710 | |||
2711 | struct rep_manu_reply{ | ||
2712 | u8 smp_frame_type; /* 0x41 */ | ||
2713 | u8 function; /* 0x01 */ | ||
2714 | u8 function_result; | ||
2715 | u8 response_length; | ||
2716 | u16 expander_change_count; | ||
2717 | u8 reserved0[2]; | ||
2718 | u8 sas_format:1; | ||
2719 | u8 reserved1:7; | ||
2720 | u8 reserved2[3]; | ||
2721 | u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; | ||
2722 | u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; | ||
2723 | u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN]; | ||
2724 | u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN]; | ||
2725 | u16 component_id; | ||
2726 | u8 component_revision_id; | ||
2727 | u8 reserved3; | ||
2728 | u8 vendor_specific[8]; | ||
2729 | }; | ||
2730 | |||
2731 | /** | ||
2732 | * mptsas_exp_repmanufacture_info - | ||
2733 | * @ioc: per adapter object | ||
2734 | * @sas_address: expander sas address | ||
2735 | * @edev: the sas_expander_device object | ||
2736 | * | ||
2737 | * Fills in the sas_expander_device object when SMP port is created. | ||
2738 | * | ||
2739 | * Returns 0 for success, non-zero for failure. | ||
2740 | */ | ||
2741 | static int | ||
2742 | mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc, | ||
2743 | u64 sas_address, struct sas_expander_device *edev) | ||
2744 | { | ||
2745 | MPT_FRAME_HDR *mf; | ||
2746 | SmpPassthroughRequest_t *smpreq; | ||
2747 | SmpPassthroughReply_t *smprep; | ||
2748 | struct rep_manu_reply *manufacture_reply; | ||
2749 | struct rep_manu_request *manufacture_request; | ||
2750 | int ret; | ||
2751 | int flagsLength; | ||
2752 | unsigned long timeleft; | ||
2753 | char *psge; | ||
2754 | unsigned long flags; | ||
2755 | void *data_out = NULL; | ||
2756 | dma_addr_t data_out_dma = 0; | ||
2757 | u32 sz; | ||
2758 | |||
2759 | spin_lock_irqsave(&ioc->taskmgmt_lock, flags); | ||
2760 | if (ioc->ioc_reset_in_progress) { | ||
2761 | spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); | ||
2762 | printk(MYIOC_s_INFO_FMT "%s: host reset in progress!\n", | ||
2763 | __func__, ioc->name); | ||
2764 | return -EFAULT; | ||
2765 | } | ||
2766 | spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); | ||
2767 | |||
2768 | ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex); | ||
2769 | if (ret) | ||
2770 | goto out; | ||
2771 | |||
2772 | mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc); | ||
2773 | if (!mf) { | ||
2774 | ret = -ENOMEM; | ||
2775 | goto out_unlock; | ||
2776 | } | ||
2777 | |||
2778 | smpreq = (SmpPassthroughRequest_t *)mf; | ||
2779 | memset(smpreq, 0, sizeof(*smpreq)); | ||
2780 | |||
2781 | sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply); | ||
2782 | |||
2783 | data_out = pci_alloc_consistent(ioc->pcidev, sz, &data_out_dma); | ||
2784 | if (!data_out) { | ||
2785 | printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n", | ||
2786 | __FILE__, __LINE__, __func__); | ||
2787 | ret = -ENOMEM; | ||
2788 | goto put_mf; | ||
2789 | } | ||
2790 | |||
2791 | manufacture_request = data_out; | ||
2792 | manufacture_request->smp_frame_type = 0x40; | ||
2793 | manufacture_request->function = 1; | ||
2794 | manufacture_request->reserved = 0; | ||
2795 | manufacture_request->request_length = 0; | ||
2796 | |||
2797 | smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; | ||
2798 | smpreq->PhysicalPort = 0xFF; | ||
2799 | *((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address); | ||
2800 | smpreq->RequestDataLength = sizeof(struct rep_manu_request); | ||
2801 | |||
2802 | psge = (char *) | ||
2803 | (((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4)); | ||
2804 | |||
2805 | flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT | | ||
2806 | MPI_SGE_FLAGS_SYSTEM_ADDRESS | | ||
2807 | MPI_SGE_FLAGS_HOST_TO_IOC | | ||
2808 | MPI_SGE_FLAGS_END_OF_BUFFER; | ||
2809 | flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; | ||
2810 | flagsLength |= sizeof(struct rep_manu_request); | ||
2811 | |||
2812 | ioc->add_sge(psge, flagsLength, data_out_dma); | ||
2813 | psge += ioc->SGE_size; | ||
2814 | |||
2815 | flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT | | ||
2816 | MPI_SGE_FLAGS_SYSTEM_ADDRESS | | ||
2817 | MPI_SGE_FLAGS_IOC_TO_HOST | | ||
2818 | MPI_SGE_FLAGS_END_OF_BUFFER; | ||
2819 | flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; | ||
2820 | flagsLength |= sizeof(struct rep_manu_reply); | ||
2821 | ioc->add_sge(psge, flagsLength, data_out_dma + | ||
2822 | sizeof(struct rep_manu_request)); | ||
2823 | |||
2824 | INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status) | ||
2825 | mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); | ||
2826 | |||
2827 | timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); | ||
2828 | if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { | ||
2829 | ret = -ETIME; | ||
2830 | mpt_free_msg_frame(ioc, mf); | ||
2831 | mf = NULL; | ||
2832 | if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET) | ||
2833 | goto out_free; | ||
2834 | if (!timeleft) | ||
2835 | mpt_HardResetHandler(ioc, CAN_SLEEP); | ||
2836 | goto out_free; | ||
2837 | } | ||
2838 | |||
2839 | mf = NULL; | ||
2840 | |||
2841 | if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) { | ||
2842 | u8 *tmp; | ||
2843 | |||
2844 | smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; | ||
2845 | if (le16_to_cpu(smprep->ResponseDataLength) != | ||
2846 | sizeof(struct rep_manu_reply)) | ||
2847 | goto out_free; | ||
2848 | |||
2849 | manufacture_reply = data_out + sizeof(struct rep_manu_request); | ||
2850 | strncpy(edev->vendor_id, manufacture_reply->vendor_id, | ||
2851 | SAS_EXPANDER_VENDOR_ID_LEN); | ||
2852 | strncpy(edev->product_id, manufacture_reply->product_id, | ||
2853 | SAS_EXPANDER_PRODUCT_ID_LEN); | ||
2854 | strncpy(edev->product_rev, manufacture_reply->product_rev, | ||
2855 | SAS_EXPANDER_PRODUCT_REV_LEN); | ||
2856 | edev->level = manufacture_reply->sas_format; | ||
2857 | if (manufacture_reply->sas_format) { | ||
2858 | strncpy(edev->component_vendor_id, | ||
2859 | manufacture_reply->component_vendor_id, | ||
2860 | SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); | ||
2861 | tmp = (u8 *)&manufacture_reply->component_id; | ||
2862 | edev->component_id = tmp[0] << 8 | tmp[1]; | ||
2863 | edev->component_revision_id = | ||
2864 | manufacture_reply->component_revision_id; | ||
2865 | } | ||
2866 | } else { | ||
2867 | printk(MYIOC_s_ERR_FMT | ||
2868 | "%s: smp passthru reply failed to be returned\n", | ||
2869 | ioc->name, __func__); | ||
2870 | ret = -ENXIO; | ||
2871 | } | ||
2872 | out_free: | ||
2873 | if (data_out_dma) | ||
2874 | pci_free_consistent(ioc->pcidev, sz, data_out, data_out_dma); | ||
2875 | put_mf: | ||
2876 | if (mf) | ||
2877 | mpt_free_msg_frame(ioc, mf); | ||
2878 | out_unlock: | ||
2879 | CLEAR_MGMT_STATUS(ioc->sas_mgmt.status) | ||
2880 | mutex_unlock(&ioc->sas_mgmt.mutex); | ||
2881 | out: | ||
2882 | return ret; | ||
2883 | } | ||
2884 | |||
2689 | static void | 2885 | static void |
2690 | mptsas_parse_device_info(struct sas_identify *identify, | 2886 | mptsas_parse_device_info(struct sas_identify *identify, |
2691 | struct mptsas_devinfo *device_info) | 2887 | struct mptsas_devinfo *device_info) |
@@ -2967,6 +3163,11 @@ static int mptsas_probe_one_phy(struct device *dev, | |||
2967 | goto out; | 3163 | goto out; |
2968 | } | 3164 | } |
2969 | mptsas_set_rphy(ioc, phy_info, rphy); | 3165 | mptsas_set_rphy(ioc, phy_info, rphy); |
3166 | if (identify.device_type == SAS_EDGE_EXPANDER_DEVICE || | ||
3167 | identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) | ||
3168 | mptsas_exp_repmanufacture_info(ioc, | ||
3169 | identify.sas_address, | ||
3170 | rphy_to_expander_device(rphy)); | ||
2970 | } | 3171 | } |
2971 | 3172 | ||
2972 | out: | 3173 | out: |
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 81279b3d694c..4a7d1afcb666 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
@@ -1438,9 +1438,14 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
1438 | && (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES) | 1438 | && (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES) |
1439 | && (SCpnt->device->tagged_supported)) { | 1439 | && (SCpnt->device->tagged_supported)) { |
1440 | scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ; | 1440 | scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ; |
1441 | } else { | 1441 | if (SCpnt->request && SCpnt->request->ioprio) { |
1442 | if (((SCpnt->request->ioprio & 0x7) == 1) || | ||
1443 | !(SCpnt->request->ioprio & 0x7)) | ||
1444 | scsictl |= MPI_SCSIIO_CONTROL_HEADOFQ; | ||
1445 | } | ||
1446 | } else | ||
1442 | scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED; | 1447 | scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED; |
1443 | } | 1448 | |
1444 | 1449 | ||
1445 | /* Use the above information to set up the message frame | 1450 | /* Use the above information to set up the message frame |
1446 | */ | 1451 | */ |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 677cd53f18c3..bb6465604235 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -457,10 +457,10 @@ config MTD_NAND_NOMADIK | |||
457 | 457 | ||
458 | config MTD_NAND_SH_FLCTL | 458 | config MTD_NAND_SH_FLCTL |
459 | tristate "Support for NAND on Renesas SuperH FLCTL" | 459 | tristate "Support for NAND on Renesas SuperH FLCTL" |
460 | depends on MTD_NAND && SUPERH && CPU_SUBTYPE_SH7723 | 460 | depends on MTD_NAND && SUPERH |
461 | help | 461 | help |
462 | Several Renesas SuperH CPU has FLCTL. This option enables support | 462 | Several Renesas SuperH CPU has FLCTL. This option enables support |
463 | for NAND Flash using FLCTL. This driver support SH7723. | 463 | for NAND Flash using FLCTL. |
464 | 464 | ||
465 | config MTD_NAND_DAVINCI | 465 | config MTD_NAND_DAVINCI |
466 | tristate "Support NAND on DaVinci SoC" | 466 | tristate "Support NAND on DaVinci SoC" |
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 02bef21f2e4b..1842df8bdd93 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c | |||
@@ -1,10 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * SuperH FLCTL nand controller | 2 | * SuperH FLCTL nand controller |
3 | * | 3 | * |
4 | * Copyright © 2008 Renesas Solutions Corp. | 4 | * Copyright (c) 2008 Renesas Solutions Corp. |
5 | * Copyright © 2008 Atom Create Engineering Co., Ltd. | 5 | * Copyright (c) 2008 Atom Create Engineering Co., Ltd. |
6 | * | 6 | * |
7 | * Based on fsl_elbc_nand.c, Copyright © 2006-2007 Freescale Semiconductor | 7 | * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
@@ -75,6 +75,11 @@ static void start_translation(struct sh_flctl *flctl) | |||
75 | writeb(TRSTRT, FLTRCR(flctl)); | 75 | writeb(TRSTRT, FLTRCR(flctl)); |
76 | } | 76 | } |
77 | 77 | ||
78 | static void timeout_error(struct sh_flctl *flctl, const char *str) | ||
79 | { | ||
80 | dev_err(&flctl->pdev->dev, "Timeout occured in %s\n", str); | ||
81 | } | ||
82 | |||
78 | static void wait_completion(struct sh_flctl *flctl) | 83 | static void wait_completion(struct sh_flctl *flctl) |
79 | { | 84 | { |
80 | uint32_t timeout = LOOP_TIMEOUT_MAX; | 85 | uint32_t timeout = LOOP_TIMEOUT_MAX; |
@@ -87,7 +92,7 @@ static void wait_completion(struct sh_flctl *flctl) | |||
87 | udelay(1); | 92 | udelay(1); |
88 | } | 93 | } |
89 | 94 | ||
90 | printk(KERN_ERR "wait_completion(): Timeout occured \n"); | 95 | timeout_error(flctl, __func__); |
91 | writeb(0x0, FLTRCR(flctl)); | 96 | writeb(0x0, FLTRCR(flctl)); |
92 | } | 97 | } |
93 | 98 | ||
@@ -100,6 +105,8 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr) | |||
100 | addr = page_addr; /* ERASE1 */ | 105 | addr = page_addr; /* ERASE1 */ |
101 | } else if (page_addr != -1) { | 106 | } else if (page_addr != -1) { |
102 | /* SEQIN, READ0, etc.. */ | 107 | /* SEQIN, READ0, etc.. */ |
108 | if (flctl->chip.options & NAND_BUSWIDTH_16) | ||
109 | column >>= 1; | ||
103 | if (flctl->page_size) { | 110 | if (flctl->page_size) { |
104 | addr = column & 0x0FFF; | 111 | addr = column & 0x0FFF; |
105 | addr |= (page_addr & 0xff) << 16; | 112 | addr |= (page_addr & 0xff) << 16; |
@@ -132,7 +139,7 @@ static void wait_rfifo_ready(struct sh_flctl *flctl) | |||
132 | return; | 139 | return; |
133 | udelay(1); | 140 | udelay(1); |
134 | } | 141 | } |
135 | printk(KERN_ERR "wait_rfifo_ready(): Timeout occured \n"); | 142 | timeout_error(flctl, __func__); |
136 | } | 143 | } |
137 | 144 | ||
138 | static void wait_wfifo_ready(struct sh_flctl *flctl) | 145 | static void wait_wfifo_ready(struct sh_flctl *flctl) |
@@ -146,7 +153,7 @@ static void wait_wfifo_ready(struct sh_flctl *flctl) | |||
146 | return; | 153 | return; |
147 | udelay(1); | 154 | udelay(1); |
148 | } | 155 | } |
149 | printk(KERN_ERR "wait_wfifo_ready(): Timeout occured \n"); | 156 | timeout_error(flctl, __func__); |
150 | } | 157 | } |
151 | 158 | ||
152 | static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number) | 159 | static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number) |
@@ -198,7 +205,7 @@ static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number) | |||
198 | writel(0, FL4ECCCR(flctl)); | 205 | writel(0, FL4ECCCR(flctl)); |
199 | } | 206 | } |
200 | 207 | ||
201 | printk(KERN_ERR "wait_recfifo_ready(): Timeout occured \n"); | 208 | timeout_error(flctl, __func__); |
202 | return 1; /* timeout */ | 209 | return 1; /* timeout */ |
203 | } | 210 | } |
204 | 211 | ||
@@ -214,7 +221,7 @@ static void wait_wecfifo_ready(struct sh_flctl *flctl) | |||
214 | return; | 221 | return; |
215 | udelay(1); | 222 | udelay(1); |
216 | } | 223 | } |
217 | printk(KERN_ERR "wait_wecfifo_ready(): Timeout occured \n"); | 224 | timeout_error(flctl, __func__); |
218 | } | 225 | } |
219 | 226 | ||
220 | static void read_datareg(struct sh_flctl *flctl, int offset) | 227 | static void read_datareg(struct sh_flctl *flctl, int offset) |
@@ -275,7 +282,7 @@ static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset) | |||
275 | static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) | 282 | static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) |
276 | { | 283 | { |
277 | struct sh_flctl *flctl = mtd_to_flctl(mtd); | 284 | struct sh_flctl *flctl = mtd_to_flctl(mtd); |
278 | uint32_t flcmncr_val = readl(FLCMNCR(flctl)); | 285 | uint32_t flcmncr_val = readl(FLCMNCR(flctl)) & ~SEL_16BIT; |
279 | uint32_t flcmdcr_val, addr_len_bytes = 0; | 286 | uint32_t flcmdcr_val, addr_len_bytes = 0; |
280 | 287 | ||
281 | /* Set SNAND bit if page size is 2048byte */ | 288 | /* Set SNAND bit if page size is 2048byte */ |
@@ -297,6 +304,8 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va | |||
297 | case NAND_CMD_READOOB: | 304 | case NAND_CMD_READOOB: |
298 | addr_len_bytes = flctl->rw_ADRCNT; | 305 | addr_len_bytes = flctl->rw_ADRCNT; |
299 | flcmdcr_val |= CDSRC_E; | 306 | flcmdcr_val |= CDSRC_E; |
307 | if (flctl->chip.options & NAND_BUSWIDTH_16) | ||
308 | flcmncr_val |= SEL_16BIT; | ||
300 | break; | 309 | break; |
301 | case NAND_CMD_SEQIN: | 310 | case NAND_CMD_SEQIN: |
302 | /* This case is that cmd is READ0 or READ1 or READ00 */ | 311 | /* This case is that cmd is READ0 or READ1 or READ00 */ |
@@ -305,6 +314,8 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va | |||
305 | case NAND_CMD_PAGEPROG: | 314 | case NAND_CMD_PAGEPROG: |
306 | addr_len_bytes = flctl->rw_ADRCNT; | 315 | addr_len_bytes = flctl->rw_ADRCNT; |
307 | flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW; | 316 | flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW; |
317 | if (flctl->chip.options & NAND_BUSWIDTH_16) | ||
318 | flcmncr_val |= SEL_16BIT; | ||
308 | break; | 319 | break; |
309 | case NAND_CMD_READID: | 320 | case NAND_CMD_READID: |
310 | flcmncr_val &= ~SNAND_E; | 321 | flcmncr_val &= ~SNAND_E; |
@@ -523,6 +534,8 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command, | |||
523 | set_addr(mtd, 0, page_addr); | 534 | set_addr(mtd, 0, page_addr); |
524 | 535 | ||
525 | flctl->read_bytes = mtd->writesize + mtd->oobsize; | 536 | flctl->read_bytes = mtd->writesize + mtd->oobsize; |
537 | if (flctl->chip.options & NAND_BUSWIDTH_16) | ||
538 | column >>= 1; | ||
526 | flctl->index += column; | 539 | flctl->index += column; |
527 | goto read_normal_exit; | 540 | goto read_normal_exit; |
528 | 541 | ||
@@ -686,6 +699,18 @@ static uint8_t flctl_read_byte(struct mtd_info *mtd) | |||
686 | return data; | 699 | return data; |
687 | } | 700 | } |
688 | 701 | ||
702 | static uint16_t flctl_read_word(struct mtd_info *mtd) | ||
703 | { | ||
704 | struct sh_flctl *flctl = mtd_to_flctl(mtd); | ||
705 | int index = flctl->index; | ||
706 | uint16_t data; | ||
707 | uint16_t *buf = (uint16_t *)&flctl->done_buff[index]; | ||
708 | |||
709 | data = *buf; | ||
710 | flctl->index += 2; | ||
711 | return data; | ||
712 | } | ||
713 | |||
689 | static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | 714 | static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) |
690 | { | 715 | { |
691 | int i; | 716 | int i; |
@@ -769,38 +794,36 @@ static int flctl_chip_init_tail(struct mtd_info *mtd) | |||
769 | return 0; | 794 | return 0; |
770 | } | 795 | } |
771 | 796 | ||
772 | static int __init flctl_probe(struct platform_device *pdev) | 797 | static int __devinit flctl_probe(struct platform_device *pdev) |
773 | { | 798 | { |
774 | struct resource *res; | 799 | struct resource *res; |
775 | struct sh_flctl *flctl; | 800 | struct sh_flctl *flctl; |
776 | struct mtd_info *flctl_mtd; | 801 | struct mtd_info *flctl_mtd; |
777 | struct nand_chip *nand; | 802 | struct nand_chip *nand; |
778 | struct sh_flctl_platform_data *pdata; | 803 | struct sh_flctl_platform_data *pdata; |
779 | int ret; | 804 | int ret = -ENXIO; |
780 | 805 | ||
781 | pdata = pdev->dev.platform_data; | 806 | pdata = pdev->dev.platform_data; |
782 | if (pdata == NULL) { | 807 | if (pdata == NULL) { |
783 | printk(KERN_ERR "sh_flctl platform_data not found.\n"); | 808 | dev_err(&pdev->dev, "no platform data defined\n"); |
784 | return -ENODEV; | 809 | return -EINVAL; |
785 | } | 810 | } |
786 | 811 | ||
787 | flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL); | 812 | flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL); |
788 | if (!flctl) { | 813 | if (!flctl) { |
789 | printk(KERN_ERR "Unable to allocate NAND MTD dev structure.\n"); | 814 | dev_err(&pdev->dev, "failed to allocate driver data\n"); |
790 | return -ENOMEM; | 815 | return -ENOMEM; |
791 | } | 816 | } |
792 | 817 | ||
793 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 818 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
794 | if (!res) { | 819 | if (!res) { |
795 | printk(KERN_ERR "%s: resource not found.\n", __func__); | 820 | dev_err(&pdev->dev, "failed to get I/O memory\n"); |
796 | ret = -ENODEV; | ||
797 | goto err; | 821 | goto err; |
798 | } | 822 | } |
799 | 823 | ||
800 | flctl->reg = ioremap(res->start, res->end - res->start + 1); | 824 | flctl->reg = ioremap(res->start, resource_size(res)); |
801 | if (flctl->reg == NULL) { | 825 | if (flctl->reg == NULL) { |
802 | printk(KERN_ERR "%s: ioremap error.\n", __func__); | 826 | dev_err(&pdev->dev, "failed to remap I/O memory\n"); |
803 | ret = -ENOMEM; | ||
804 | goto err; | 827 | goto err; |
805 | } | 828 | } |
806 | 829 | ||
@@ -808,6 +831,7 @@ static int __init flctl_probe(struct platform_device *pdev) | |||
808 | flctl_mtd = &flctl->mtd; | 831 | flctl_mtd = &flctl->mtd; |
809 | nand = &flctl->chip; | 832 | nand = &flctl->chip; |
810 | flctl_mtd->priv = nand; | 833 | flctl_mtd->priv = nand; |
834 | flctl->pdev = pdev; | ||
811 | flctl->hwecc = pdata->has_hwecc; | 835 | flctl->hwecc = pdata->has_hwecc; |
812 | 836 | ||
813 | flctl_register_init(flctl, pdata->flcmncr_val); | 837 | flctl_register_init(flctl, pdata->flcmncr_val); |
@@ -825,6 +849,11 @@ static int __init flctl_probe(struct platform_device *pdev) | |||
825 | nand->select_chip = flctl_select_chip; | 849 | nand->select_chip = flctl_select_chip; |
826 | nand->cmdfunc = flctl_cmdfunc; | 850 | nand->cmdfunc = flctl_cmdfunc; |
827 | 851 | ||
852 | if (pdata->flcmncr_val & SEL_16BIT) { | ||
853 | nand->options |= NAND_BUSWIDTH_16; | ||
854 | nand->read_word = flctl_read_word; | ||
855 | } | ||
856 | |||
828 | ret = nand_scan_ident(flctl_mtd, 1); | 857 | ret = nand_scan_ident(flctl_mtd, 1); |
829 | if (ret) | 858 | if (ret) |
830 | goto err; | 859 | goto err; |
@@ -846,7 +875,7 @@ err: | |||
846 | return ret; | 875 | return ret; |
847 | } | 876 | } |
848 | 877 | ||
849 | static int __exit flctl_remove(struct platform_device *pdev) | 878 | static int __devexit flctl_remove(struct platform_device *pdev) |
850 | { | 879 | { |
851 | struct sh_flctl *flctl = platform_get_drvdata(pdev); | 880 | struct sh_flctl *flctl = platform_get_drvdata(pdev); |
852 | 881 | ||
diff --git a/drivers/net/mace.c b/drivers/net/mace.c index d9fbad386389..43aea91e3369 100644 --- a/drivers/net/mace.c +++ b/drivers/net/mace.c | |||
@@ -206,7 +206,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i | |||
206 | mp->port_aaui = port_aaui; | 206 | mp->port_aaui = port_aaui; |
207 | else { | 207 | else { |
208 | /* Apple Network Server uses the AAUI port */ | 208 | /* Apple Network Server uses the AAUI port */ |
209 | if (machine_is_compatible("AAPL,ShinerESB")) | 209 | if (of_machine_is_compatible("AAPL,ShinerESB")) |
210 | mp->port_aaui = 1; | 210 | mp->port_aaui = 1; |
211 | else { | 211 | else { |
212 | #ifdef CONFIG_MACE_AAUI_PORT | 212 | #ifdef CONFIG_MACE_AAUI_PORT |
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index d2fa27c5c1b2..7cecc8fea9bd 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig | |||
@@ -1,3 +1,11 @@ | |||
1 | config OF_FLATTREE | ||
2 | bool | ||
3 | depends on OF | ||
4 | |||
5 | config OF_DYNAMIC | ||
6 | def_bool y | ||
7 | depends on OF && PPC_OF | ||
8 | |||
1 | config OF_DEVICE | 9 | config OF_DEVICE |
2 | def_bool y | 10 | def_bool y |
3 | depends on OF && (SPARC || PPC_OF || MICROBLAZE) | 11 | depends on OF && (SPARC || PPC_OF || MICROBLAZE) |
diff --git a/drivers/of/Makefile b/drivers/of/Makefile index bdfb5f5d4b06..f232cc98ce00 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | obj-y = base.o | 1 | obj-y = base.o |
2 | obj-$(CONFIG_OF_FLATTREE) += fdt.o | ||
2 | obj-$(CONFIG_OF_DEVICE) += device.o platform.o | 3 | obj-$(CONFIG_OF_DEVICE) += device.o platform.o |
3 | obj-$(CONFIG_OF_GPIO) += gpio.o | 4 | obj-$(CONFIG_OF_GPIO) += gpio.o |
4 | obj-$(CONFIG_OF_I2C) += of_i2c.o | 5 | obj-$(CONFIG_OF_I2C) += of_i2c.o |
diff --git a/drivers/of/base.c b/drivers/of/base.c index e6627b2320f1..cb96888d1427 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -20,8 +20,10 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/of.h> | 21 | #include <linux/of.h> |
22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
23 | #include <linux/proc_fs.h> | ||
23 | 24 | ||
24 | struct device_node *allnodes; | 25 | struct device_node *allnodes; |
26 | struct device_node *of_chosen; | ||
25 | 27 | ||
26 | /* use when traversing tree through the allnext, child, sibling, | 28 | /* use when traversing tree through the allnext, child, sibling, |
27 | * or parent members of struct device_node. | 29 | * or parent members of struct device_node. |
@@ -37,7 +39,7 @@ int of_n_addr_cells(struct device_node *np) | |||
37 | np = np->parent; | 39 | np = np->parent; |
38 | ip = of_get_property(np, "#address-cells", NULL); | 40 | ip = of_get_property(np, "#address-cells", NULL); |
39 | if (ip) | 41 | if (ip) |
40 | return *ip; | 42 | return be32_to_cpup(ip); |
41 | } while (np->parent); | 43 | } while (np->parent); |
42 | /* No #address-cells property for the root node */ | 44 | /* No #address-cells property for the root node */ |
43 | return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; | 45 | return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; |
@@ -53,13 +55,88 @@ int of_n_size_cells(struct device_node *np) | |||
53 | np = np->parent; | 55 | np = np->parent; |
54 | ip = of_get_property(np, "#size-cells", NULL); | 56 | ip = of_get_property(np, "#size-cells", NULL); |
55 | if (ip) | 57 | if (ip) |
56 | return *ip; | 58 | return be32_to_cpup(ip); |
57 | } while (np->parent); | 59 | } while (np->parent); |
58 | /* No #size-cells property for the root node */ | 60 | /* No #size-cells property for the root node */ |
59 | return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; | 61 | return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; |
60 | } | 62 | } |
61 | EXPORT_SYMBOL(of_n_size_cells); | 63 | EXPORT_SYMBOL(of_n_size_cells); |
62 | 64 | ||
65 | #if !defined(CONFIG_SPARC) /* SPARC doesn't do ref counting (yet) */ | ||
66 | /** | ||
67 | * of_node_get - Increment refcount of a node | ||
68 | * @node: Node to inc refcount, NULL is supported to | ||
69 | * simplify writing of callers | ||
70 | * | ||
71 | * Returns node. | ||
72 | */ | ||
73 | struct device_node *of_node_get(struct device_node *node) | ||
74 | { | ||
75 | if (node) | ||
76 | kref_get(&node->kref); | ||
77 | return node; | ||
78 | } | ||
79 | EXPORT_SYMBOL(of_node_get); | ||
80 | |||
81 | static inline struct device_node *kref_to_device_node(struct kref *kref) | ||
82 | { | ||
83 | return container_of(kref, struct device_node, kref); | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * of_node_release - release a dynamically allocated node | ||
88 | * @kref: kref element of the node to be released | ||
89 | * | ||
90 | * In of_node_put() this function is passed to kref_put() | ||
91 | * as the destructor. | ||
92 | */ | ||
93 | static void of_node_release(struct kref *kref) | ||
94 | { | ||
95 | struct device_node *node = kref_to_device_node(kref); | ||
96 | struct property *prop = node->properties; | ||
97 | |||
98 | /* We should never be releasing nodes that haven't been detached. */ | ||
99 | if (!of_node_check_flag(node, OF_DETACHED)) { | ||
100 | pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name); | ||
101 | dump_stack(); | ||
102 | kref_init(&node->kref); | ||
103 | return; | ||
104 | } | ||
105 | |||
106 | if (!of_node_check_flag(node, OF_DYNAMIC)) | ||
107 | return; | ||
108 | |||
109 | while (prop) { | ||
110 | struct property *next = prop->next; | ||
111 | kfree(prop->name); | ||
112 | kfree(prop->value); | ||
113 | kfree(prop); | ||
114 | prop = next; | ||
115 | |||
116 | if (!prop) { | ||
117 | prop = node->deadprops; | ||
118 | node->deadprops = NULL; | ||
119 | } | ||
120 | } | ||
121 | kfree(node->full_name); | ||
122 | kfree(node->data); | ||
123 | kfree(node); | ||
124 | } | ||
125 | |||
126 | /** | ||
127 | * of_node_put - Decrement refcount of a node | ||
128 | * @node: Node to dec refcount, NULL is supported to | ||
129 | * simplify writing of callers | ||
130 | * | ||
131 | */ | ||
132 | void of_node_put(struct device_node *node) | ||
133 | { | ||
134 | if (node) | ||
135 | kref_put(&node->kref, of_node_release); | ||
136 | } | ||
137 | EXPORT_SYMBOL(of_node_put); | ||
138 | #endif /* !CONFIG_SPARC */ | ||
139 | |||
63 | struct property *of_find_property(const struct device_node *np, | 140 | struct property *of_find_property(const struct device_node *np, |
64 | const char *name, | 141 | const char *name, |
65 | int *lenp) | 142 | int *lenp) |
@@ -144,6 +221,27 @@ int of_device_is_compatible(const struct device_node *device, | |||
144 | EXPORT_SYMBOL(of_device_is_compatible); | 221 | EXPORT_SYMBOL(of_device_is_compatible); |
145 | 222 | ||
146 | /** | 223 | /** |
224 | * of_machine_is_compatible - Test root of device tree for a given compatible value | ||
225 | * @compat: compatible string to look for in root node's compatible property. | ||
226 | * | ||
227 | * Returns true if the root node has the given value in its | ||
228 | * compatible property. | ||
229 | */ | ||
230 | int of_machine_is_compatible(const char *compat) | ||
231 | { | ||
232 | struct device_node *root; | ||
233 | int rc = 0; | ||
234 | |||
235 | root = of_find_node_by_path("/"); | ||
236 | if (root) { | ||
237 | rc = of_device_is_compatible(root, compat); | ||
238 | of_node_put(root); | ||
239 | } | ||
240 | return rc; | ||
241 | } | ||
242 | EXPORT_SYMBOL(of_machine_is_compatible); | ||
243 | |||
244 | /** | ||
147 | * of_device_is_available - check if a device is available for use | 245 | * of_device_is_available - check if a device is available for use |
148 | * | 246 | * |
149 | * @device: Node to check for availability | 247 | * @device: Node to check for availability |
@@ -519,6 +617,27 @@ int of_modalias_node(struct device_node *node, char *modalias, int len) | |||
519 | EXPORT_SYMBOL_GPL(of_modalias_node); | 617 | EXPORT_SYMBOL_GPL(of_modalias_node); |
520 | 618 | ||
521 | /** | 619 | /** |
620 | * of_find_node_by_phandle - Find a node given a phandle | ||
621 | * @handle: phandle of the node to find | ||
622 | * | ||
623 | * Returns a node pointer with refcount incremented, use | ||
624 | * of_node_put() on it when done. | ||
625 | */ | ||
626 | struct device_node *of_find_node_by_phandle(phandle handle) | ||
627 | { | ||
628 | struct device_node *np; | ||
629 | |||
630 | read_lock(&devtree_lock); | ||
631 | for (np = allnodes; np; np = np->allnext) | ||
632 | if (np->phandle == handle) | ||
633 | break; | ||
634 | of_node_get(np); | ||
635 | read_unlock(&devtree_lock); | ||
636 | return np; | ||
637 | } | ||
638 | EXPORT_SYMBOL(of_find_node_by_phandle); | ||
639 | |||
640 | /** | ||
522 | * of_parse_phandle - Resolve a phandle property to a device_node pointer | 641 | * of_parse_phandle - Resolve a phandle property to a device_node pointer |
523 | * @np: Pointer to device node holding phandle property | 642 | * @np: Pointer to device node holding phandle property |
524 | * @phandle_name: Name of property holding a phandle value | 643 | * @phandle_name: Name of property holding a phandle value |
@@ -578,8 +697,8 @@ int of_parse_phandles_with_args(struct device_node *np, const char *list_name, | |||
578 | const void **out_args) | 697 | const void **out_args) |
579 | { | 698 | { |
580 | int ret = -EINVAL; | 699 | int ret = -EINVAL; |
581 | const u32 *list; | 700 | const __be32 *list; |
582 | const u32 *list_end; | 701 | const __be32 *list_end; |
583 | int size; | 702 | int size; |
584 | int cur_index = 0; | 703 | int cur_index = 0; |
585 | struct device_node *node = NULL; | 704 | struct device_node *node = NULL; |
@@ -593,7 +712,7 @@ int of_parse_phandles_with_args(struct device_node *np, const char *list_name, | |||
593 | list_end = list + size / sizeof(*list); | 712 | list_end = list + size / sizeof(*list); |
594 | 713 | ||
595 | while (list < list_end) { | 714 | while (list < list_end) { |
596 | const u32 *cells; | 715 | const __be32 *cells; |
597 | const phandle *phandle; | 716 | const phandle *phandle; |
598 | 717 | ||
599 | phandle = list++; | 718 | phandle = list++; |
@@ -617,7 +736,7 @@ int of_parse_phandles_with_args(struct device_node *np, const char *list_name, | |||
617 | goto err1; | 736 | goto err1; |
618 | } | 737 | } |
619 | 738 | ||
620 | list += *cells; | 739 | list += be32_to_cpup(cells); |
621 | if (list > list_end) { | 740 | if (list > list_end) { |
622 | pr_debug("%s: insufficient arguments length\n", | 741 | pr_debug("%s: insufficient arguments length\n", |
623 | np->full_name); | 742 | np->full_name); |
@@ -658,3 +777,190 @@ err0: | |||
658 | return ret; | 777 | return ret; |
659 | } | 778 | } |
660 | EXPORT_SYMBOL(of_parse_phandles_with_args); | 779 | EXPORT_SYMBOL(of_parse_phandles_with_args); |
780 | |||
781 | /** | ||
782 | * prom_add_property - Add a property to a node | ||
783 | */ | ||
784 | int prom_add_property(struct device_node *np, struct property *prop) | ||
785 | { | ||
786 | struct property **next; | ||
787 | unsigned long flags; | ||
788 | |||
789 | prop->next = NULL; | ||
790 | write_lock_irqsave(&devtree_lock, flags); | ||
791 | next = &np->properties; | ||
792 | while (*next) { | ||
793 | if (strcmp(prop->name, (*next)->name) == 0) { | ||
794 | /* duplicate ! don't insert it */ | ||
795 | write_unlock_irqrestore(&devtree_lock, flags); | ||
796 | return -1; | ||
797 | } | ||
798 | next = &(*next)->next; | ||
799 | } | ||
800 | *next = prop; | ||
801 | write_unlock_irqrestore(&devtree_lock, flags); | ||
802 | |||
803 | #ifdef CONFIG_PROC_DEVICETREE | ||
804 | /* try to add to proc as well if it was initialized */ | ||
805 | if (np->pde) | ||
806 | proc_device_tree_add_prop(np->pde, prop); | ||
807 | #endif /* CONFIG_PROC_DEVICETREE */ | ||
808 | |||
809 | return 0; | ||
810 | } | ||
811 | |||
812 | /** | ||
813 | * prom_remove_property - Remove a property from a node. | ||
814 | * | ||
815 | * Note that we don't actually remove it, since we have given out | ||
816 | * who-knows-how-many pointers to the data using get-property. | ||
817 | * Instead we just move the property to the "dead properties" | ||
818 | * list, so it won't be found any more. | ||
819 | */ | ||
820 | int prom_remove_property(struct device_node *np, struct property *prop) | ||
821 | { | ||
822 | struct property **next; | ||
823 | unsigned long flags; | ||
824 | int found = 0; | ||
825 | |||
826 | write_lock_irqsave(&devtree_lock, flags); | ||
827 | next = &np->properties; | ||
828 | while (*next) { | ||
829 | if (*next == prop) { | ||
830 | /* found the node */ | ||
831 | *next = prop->next; | ||
832 | prop->next = np->deadprops; | ||
833 | np->deadprops = prop; | ||
834 | found = 1; | ||
835 | break; | ||
836 | } | ||
837 | next = &(*next)->next; | ||
838 | } | ||
839 | write_unlock_irqrestore(&devtree_lock, flags); | ||
840 | |||
841 | if (!found) | ||
842 | return -ENODEV; | ||
843 | |||
844 | #ifdef CONFIG_PROC_DEVICETREE | ||
845 | /* try to remove the proc node as well */ | ||
846 | if (np->pde) | ||
847 | proc_device_tree_remove_prop(np->pde, prop); | ||
848 | #endif /* CONFIG_PROC_DEVICETREE */ | ||
849 | |||
850 | return 0; | ||
851 | } | ||
852 | |||
853 | /* | ||
854 | * prom_update_property - Update a property in a node. | ||
855 | * | ||
856 | * Note that we don't actually remove it, since we have given out | ||
857 | * who-knows-how-many pointers to the data using get-property. | ||
858 | * Instead we just move the property to the "dead properties" list, | ||
859 | * and add the new property to the property list | ||
860 | */ | ||
861 | int prom_update_property(struct device_node *np, | ||
862 | struct property *newprop, | ||
863 | struct property *oldprop) | ||
864 | { | ||
865 | struct property **next; | ||
866 | unsigned long flags; | ||
867 | int found = 0; | ||
868 | |||
869 | write_lock_irqsave(&devtree_lock, flags); | ||
870 | next = &np->properties; | ||
871 | while (*next) { | ||
872 | if (*next == oldprop) { | ||
873 | /* found the node */ | ||
874 | newprop->next = oldprop->next; | ||
875 | *next = newprop; | ||
876 | oldprop->next = np->deadprops; | ||
877 | np->deadprops = oldprop; | ||
878 | found = 1; | ||
879 | break; | ||
880 | } | ||
881 | next = &(*next)->next; | ||
882 | } | ||
883 | write_unlock_irqrestore(&devtree_lock, flags); | ||
884 | |||
885 | if (!found) | ||
886 | return -ENODEV; | ||
887 | |||
888 | #ifdef CONFIG_PROC_DEVICETREE | ||
889 | /* try to add to proc as well if it was initialized */ | ||
890 | if (np->pde) | ||
891 | proc_device_tree_update_prop(np->pde, newprop, oldprop); | ||
892 | #endif /* CONFIG_PROC_DEVICETREE */ | ||
893 | |||
894 | return 0; | ||
895 | } | ||
896 | |||
897 | #if defined(CONFIG_OF_DYNAMIC) | ||
898 | /* | ||
899 | * Support for dynamic device trees. | ||
900 | * | ||
901 | * On some platforms, the device tree can be manipulated at runtime. | ||
902 | * The routines in this section support adding, removing and changing | ||
903 | * device tree nodes. | ||
904 | */ | ||
905 | |||
906 | /** | ||
907 | * of_attach_node - Plug a device node into the tree and global list. | ||
908 | */ | ||
909 | void of_attach_node(struct device_node *np) | ||
910 | { | ||
911 | unsigned long flags; | ||
912 | |||
913 | write_lock_irqsave(&devtree_lock, flags); | ||
914 | np->sibling = np->parent->child; | ||
915 | np->allnext = allnodes; | ||
916 | np->parent->child = np; | ||
917 | allnodes = np; | ||
918 | write_unlock_irqrestore(&devtree_lock, flags); | ||
919 | } | ||
920 | |||
921 | /** | ||
922 | * of_detach_node - "Unplug" a node from the device tree. | ||
923 | * | ||
924 | * The caller must hold a reference to the node. The memory associated with | ||
925 | * the node is not freed until its refcount goes to zero. | ||
926 | */ | ||
927 | void of_detach_node(struct device_node *np) | ||
928 | { | ||
929 | struct device_node *parent; | ||
930 | unsigned long flags; | ||
931 | |||
932 | write_lock_irqsave(&devtree_lock, flags); | ||
933 | |||
934 | parent = np->parent; | ||
935 | if (!parent) | ||
936 | goto out_unlock; | ||
937 | |||
938 | if (allnodes == np) | ||
939 | allnodes = np->allnext; | ||
940 | else { | ||
941 | struct device_node *prev; | ||
942 | for (prev = allnodes; | ||
943 | prev->allnext != np; | ||
944 | prev = prev->allnext) | ||
945 | ; | ||
946 | prev->allnext = np->allnext; | ||
947 | } | ||
948 | |||
949 | if (parent->child == np) | ||
950 | parent->child = np->sibling; | ||
951 | else { | ||
952 | struct device_node *prevsib; | ||
953 | for (prevsib = np->parent->child; | ||
954 | prevsib->sibling != np; | ||
955 | prevsib = prevsib->sibling) | ||
956 | ; | ||
957 | prevsib->sibling = np->sibling; | ||
958 | } | ||
959 | |||
960 | of_node_set_flag(np, OF_DETACHED); | ||
961 | |||
962 | out_unlock: | ||
963 | write_unlock_irqrestore(&devtree_lock, flags); | ||
964 | } | ||
965 | #endif /* defined(CONFIG_OF_DYNAMIC) */ | ||
966 | |||
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c new file mode 100644 index 000000000000..406757a9d7ea --- /dev/null +++ b/drivers/of/fdt.c | |||
@@ -0,0 +1,590 @@ | |||
1 | /* | ||
2 | * Functions for working with the Flattened Device Tree data format | ||
3 | * | ||
4 | * Copyright 2009 Benjamin Herrenschmidt, IBM Corp | ||
5 | * benh@kernel.crashing.org | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/initrd.h> | ||
14 | #include <linux/of.h> | ||
15 | #include <linux/of_fdt.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/errno.h> | ||
18 | |||
19 | #ifdef CONFIG_PPC | ||
20 | #include <asm/machdep.h> | ||
21 | #endif /* CONFIG_PPC */ | ||
22 | |||
23 | #include <asm/page.h> | ||
24 | |||
25 | int __initdata dt_root_addr_cells; | ||
26 | int __initdata dt_root_size_cells; | ||
27 | |||
28 | struct boot_param_header *initial_boot_params; | ||
29 | |||
30 | char *find_flat_dt_string(u32 offset) | ||
31 | { | ||
32 | return ((char *)initial_boot_params) + | ||
33 | be32_to_cpu(initial_boot_params->off_dt_strings) + offset; | ||
34 | } | ||
35 | |||
36 | /** | ||
37 | * of_scan_flat_dt - scan flattened tree blob and call callback on each. | ||
38 | * @it: callback function | ||
39 | * @data: context data pointer | ||
40 | * | ||
41 | * This function is used to scan the flattened device-tree, it is | ||
42 | * used to extract the memory information at boot before we can | ||
43 | * unflatten the tree | ||
44 | */ | ||
45 | int __init of_scan_flat_dt(int (*it)(unsigned long node, | ||
46 | const char *uname, int depth, | ||
47 | void *data), | ||
48 | void *data) | ||
49 | { | ||
50 | unsigned long p = ((unsigned long)initial_boot_params) + | ||
51 | be32_to_cpu(initial_boot_params->off_dt_struct); | ||
52 | int rc = 0; | ||
53 | int depth = -1; | ||
54 | |||
55 | do { | ||
56 | u32 tag = be32_to_cpup((__be32 *)p); | ||
57 | char *pathp; | ||
58 | |||
59 | p += 4; | ||
60 | if (tag == OF_DT_END_NODE) { | ||
61 | depth--; | ||
62 | continue; | ||
63 | } | ||
64 | if (tag == OF_DT_NOP) | ||
65 | continue; | ||
66 | if (tag == OF_DT_END) | ||
67 | break; | ||
68 | if (tag == OF_DT_PROP) { | ||
69 | u32 sz = be32_to_cpup((__be32 *)p); | ||
70 | p += 8; | ||
71 | if (be32_to_cpu(initial_boot_params->version) < 0x10) | ||
72 | p = _ALIGN(p, sz >= 8 ? 8 : 4); | ||
73 | p += sz; | ||
74 | p = _ALIGN(p, 4); | ||
75 | continue; | ||
76 | } | ||
77 | if (tag != OF_DT_BEGIN_NODE) { | ||
78 | pr_err("Invalid tag %x in flat device tree!\n", tag); | ||
79 | return -EINVAL; | ||
80 | } | ||
81 | depth++; | ||
82 | pathp = (char *)p; | ||
83 | p = _ALIGN(p + strlen(pathp) + 1, 4); | ||
84 | if ((*pathp) == '/') { | ||
85 | char *lp, *np; | ||
86 | for (lp = NULL, np = pathp; *np; np++) | ||
87 | if ((*np) == '/') | ||
88 | lp = np+1; | ||
89 | if (lp != NULL) | ||
90 | pathp = lp; | ||
91 | } | ||
92 | rc = it(p, pathp, depth, data); | ||
93 | if (rc != 0) | ||
94 | break; | ||
95 | } while (1); | ||
96 | |||
97 | return rc; | ||
98 | } | ||
99 | |||
100 | /** | ||
101 | * of_get_flat_dt_root - find the root node in the flat blob | ||
102 | */ | ||
103 | unsigned long __init of_get_flat_dt_root(void) | ||
104 | { | ||
105 | unsigned long p = ((unsigned long)initial_boot_params) + | ||
106 | be32_to_cpu(initial_boot_params->off_dt_struct); | ||
107 | |||
108 | while (be32_to_cpup((__be32 *)p) == OF_DT_NOP) | ||
109 | p += 4; | ||
110 | BUG_ON(be32_to_cpup((__be32 *)p) != OF_DT_BEGIN_NODE); | ||
111 | p += 4; | ||
112 | return _ALIGN(p + strlen((char *)p) + 1, 4); | ||
113 | } | ||
114 | |||
115 | /** | ||
116 | * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr | ||
117 | * | ||
118 | * This function can be used within scan_flattened_dt callback to get | ||
119 | * access to properties | ||
120 | */ | ||
121 | void *__init of_get_flat_dt_prop(unsigned long node, const char *name, | ||
122 | unsigned long *size) | ||
123 | { | ||
124 | unsigned long p = node; | ||
125 | |||
126 | do { | ||
127 | u32 tag = be32_to_cpup((__be32 *)p); | ||
128 | u32 sz, noff; | ||
129 | const char *nstr; | ||
130 | |||
131 | p += 4; | ||
132 | if (tag == OF_DT_NOP) | ||
133 | continue; | ||
134 | if (tag != OF_DT_PROP) | ||
135 | return NULL; | ||
136 | |||
137 | sz = be32_to_cpup((__be32 *)p); | ||
138 | noff = be32_to_cpup((__be32 *)(p + 4)); | ||
139 | p += 8; | ||
140 | if (be32_to_cpu(initial_boot_params->version) < 0x10) | ||
141 | p = _ALIGN(p, sz >= 8 ? 8 : 4); | ||
142 | |||
143 | nstr = find_flat_dt_string(noff); | ||
144 | if (nstr == NULL) { | ||
145 | pr_warning("Can't find property index name !\n"); | ||
146 | return NULL; | ||
147 | } | ||
148 | if (strcmp(name, nstr) == 0) { | ||
149 | if (size) | ||
150 | *size = sz; | ||
151 | return (void *)p; | ||
152 | } | ||
153 | p += sz; | ||
154 | p = _ALIGN(p, 4); | ||
155 | } while (1); | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * of_flat_dt_is_compatible - Return true if given node has compat in compatible list | ||
160 | * @node: node to test | ||
161 | * @compat: compatible string to compare with compatible list. | ||
162 | */ | ||
163 | int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) | ||
164 | { | ||
165 | const char *cp; | ||
166 | unsigned long cplen, l; | ||
167 | |||
168 | cp = of_get_flat_dt_prop(node, "compatible", &cplen); | ||
169 | if (cp == NULL) | ||
170 | return 0; | ||
171 | while (cplen > 0) { | ||
172 | if (strncasecmp(cp, compat, strlen(compat)) == 0) | ||
173 | return 1; | ||
174 | l = strlen(cp) + 1; | ||
175 | cp += l; | ||
176 | cplen -= l; | ||
177 | } | ||
178 | |||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, | ||
183 | unsigned long align) | ||
184 | { | ||
185 | void *res; | ||
186 | |||
187 | *mem = _ALIGN(*mem, align); | ||
188 | res = (void *)*mem; | ||
189 | *mem += size; | ||
190 | |||
191 | return res; | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * unflatten_dt_node - Alloc and populate a device_node from the flat tree | ||
196 | * @p: pointer to node in flat tree | ||
197 | * @dad: Parent struct device_node | ||
198 | * @allnextpp: pointer to ->allnext from last allocated device_node | ||
199 | * @fpsize: Size of the node path up at the current depth. | ||
200 | */ | ||
201 | unsigned long __init unflatten_dt_node(unsigned long mem, | ||
202 | unsigned long *p, | ||
203 | struct device_node *dad, | ||
204 | struct device_node ***allnextpp, | ||
205 | unsigned long fpsize) | ||
206 | { | ||
207 | struct device_node *np; | ||
208 | struct property *pp, **prev_pp = NULL; | ||
209 | char *pathp; | ||
210 | u32 tag; | ||
211 | unsigned int l, allocl; | ||
212 | int has_name = 0; | ||
213 | int new_format = 0; | ||
214 | |||
215 | tag = be32_to_cpup((__be32 *)(*p)); | ||
216 | if (tag != OF_DT_BEGIN_NODE) { | ||
217 | pr_err("Weird tag at start of node: %x\n", tag); | ||
218 | return mem; | ||
219 | } | ||
220 | *p += 4; | ||
221 | pathp = (char *)*p; | ||
222 | l = allocl = strlen(pathp) + 1; | ||
223 | *p = _ALIGN(*p + l, 4); | ||
224 | |||
225 | /* version 0x10 has a more compact unit name here instead of the full | ||
226 | * path. we accumulate the full path size using "fpsize", we'll rebuild | ||
227 | * it later. We detect this because the first character of the name is | ||
228 | * not '/'. | ||
229 | */ | ||
230 | if ((*pathp) != '/') { | ||
231 | new_format = 1; | ||
232 | if (fpsize == 0) { | ||
233 | /* root node: special case. fpsize accounts for path | ||
234 | * plus terminating zero. root node only has '/', so | ||
235 | * fpsize should be 2, but we want to avoid the first | ||
236 | * level nodes to have two '/' so we use fpsize 1 here | ||
237 | */ | ||
238 | fpsize = 1; | ||
239 | allocl = 2; | ||
240 | } else { | ||
241 | /* account for '/' and path size minus terminal 0 | ||
242 | * already in 'l' | ||
243 | */ | ||
244 | fpsize += l; | ||
245 | allocl = fpsize; | ||
246 | } | ||
247 | } | ||
248 | |||
249 | np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, | ||
250 | __alignof__(struct device_node)); | ||
251 | if (allnextpp) { | ||
252 | memset(np, 0, sizeof(*np)); | ||
253 | np->full_name = ((char *)np) + sizeof(struct device_node); | ||
254 | if (new_format) { | ||
255 | char *fn = np->full_name; | ||
256 | /* rebuild full path for new format */ | ||
257 | if (dad && dad->parent) { | ||
258 | strcpy(fn, dad->full_name); | ||
259 | #ifdef DEBUG | ||
260 | if ((strlen(fn) + l + 1) != allocl) { | ||
261 | pr_debug("%s: p: %d, l: %d, a: %d\n", | ||
262 | pathp, (int)strlen(fn), | ||
263 | l, allocl); | ||
264 | } | ||
265 | #endif | ||
266 | fn += strlen(fn); | ||
267 | } | ||
268 | *(fn++) = '/'; | ||
269 | memcpy(fn, pathp, l); | ||
270 | } else | ||
271 | memcpy(np->full_name, pathp, l); | ||
272 | prev_pp = &np->properties; | ||
273 | **allnextpp = np; | ||
274 | *allnextpp = &np->allnext; | ||
275 | if (dad != NULL) { | ||
276 | np->parent = dad; | ||
277 | /* we temporarily use the next field as `last_child'*/ | ||
278 | if (dad->next == NULL) | ||
279 | dad->child = np; | ||
280 | else | ||
281 | dad->next->sibling = np; | ||
282 | dad->next = np; | ||
283 | } | ||
284 | kref_init(&np->kref); | ||
285 | } | ||
286 | while (1) { | ||
287 | u32 sz, noff; | ||
288 | char *pname; | ||
289 | |||
290 | tag = be32_to_cpup((__be32 *)(*p)); | ||
291 | if (tag == OF_DT_NOP) { | ||
292 | *p += 4; | ||
293 | continue; | ||
294 | } | ||
295 | if (tag != OF_DT_PROP) | ||
296 | break; | ||
297 | *p += 4; | ||
298 | sz = be32_to_cpup((__be32 *)(*p)); | ||
299 | noff = be32_to_cpup((__be32 *)((*p) + 4)); | ||
300 | *p += 8; | ||
301 | if (be32_to_cpu(initial_boot_params->version) < 0x10) | ||
302 | *p = _ALIGN(*p, sz >= 8 ? 8 : 4); | ||
303 | |||
304 | pname = find_flat_dt_string(noff); | ||
305 | if (pname == NULL) { | ||
306 | pr_info("Can't find property name in list !\n"); | ||
307 | break; | ||
308 | } | ||
309 | if (strcmp(pname, "name") == 0) | ||
310 | has_name = 1; | ||
311 | l = strlen(pname) + 1; | ||
312 | pp = unflatten_dt_alloc(&mem, sizeof(struct property), | ||
313 | __alignof__(struct property)); | ||
314 | if (allnextpp) { | ||
315 | /* We accept flattened tree phandles either in | ||
316 | * ePAPR-style "phandle" properties, or the | ||
317 | * legacy "linux,phandle" properties. If both | ||
318 | * appear and have different values, things | ||
319 | * will get weird. Don't do that. */ | ||
320 | if ((strcmp(pname, "phandle") == 0) || | ||
321 | (strcmp(pname, "linux,phandle") == 0)) { | ||
322 | if (np->phandle == 0) | ||
323 | np->phandle = *((u32 *)*p); | ||
324 | } | ||
325 | /* And we process the "ibm,phandle" property | ||
326 | * used in pSeries dynamic device tree | ||
327 | * stuff */ | ||
328 | if (strcmp(pname, "ibm,phandle") == 0) | ||
329 | np->phandle = *((u32 *)*p); | ||
330 | pp->name = pname; | ||
331 | pp->length = sz; | ||
332 | pp->value = (void *)*p; | ||
333 | *prev_pp = pp; | ||
334 | prev_pp = &pp->next; | ||
335 | } | ||
336 | *p = _ALIGN((*p) + sz, 4); | ||
337 | } | ||
338 | /* with version 0x10 we may not have the name property, recreate | ||
339 | * it here from the unit name if absent | ||
340 | */ | ||
341 | if (!has_name) { | ||
342 | char *p1 = pathp, *ps = pathp, *pa = NULL; | ||
343 | int sz; | ||
344 | |||
345 | while (*p1) { | ||
346 | if ((*p1) == '@') | ||
347 | pa = p1; | ||
348 | if ((*p1) == '/') | ||
349 | ps = p1 + 1; | ||
350 | p1++; | ||
351 | } | ||
352 | if (pa < ps) | ||
353 | pa = p1; | ||
354 | sz = (pa - ps) + 1; | ||
355 | pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, | ||
356 | __alignof__(struct property)); | ||
357 | if (allnextpp) { | ||
358 | pp->name = "name"; | ||
359 | pp->length = sz; | ||
360 | pp->value = pp + 1; | ||
361 | *prev_pp = pp; | ||
362 | prev_pp = &pp->next; | ||
363 | memcpy(pp->value, ps, sz - 1); | ||
364 | ((char *)pp->value)[sz - 1] = 0; | ||
365 | pr_debug("fixed up name for %s -> %s\n", pathp, | ||
366 | (char *)pp->value); | ||
367 | } | ||
368 | } | ||
369 | if (allnextpp) { | ||
370 | *prev_pp = NULL; | ||
371 | np->name = of_get_property(np, "name", NULL); | ||
372 | np->type = of_get_property(np, "device_type", NULL); | ||
373 | |||
374 | if (!np->name) | ||
375 | np->name = "<NULL>"; | ||
376 | if (!np->type) | ||
377 | np->type = "<NULL>"; | ||
378 | } | ||
379 | while (tag == OF_DT_BEGIN_NODE) { | ||
380 | mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); | ||
381 | tag = be32_to_cpup((__be32 *)(*p)); | ||
382 | } | ||
383 | if (tag != OF_DT_END_NODE) { | ||
384 | pr_err("Weird tag at end of node: %x\n", tag); | ||
385 | return mem; | ||
386 | } | ||
387 | *p += 4; | ||
388 | return mem; | ||
389 | } | ||
390 | |||
391 | #ifdef CONFIG_BLK_DEV_INITRD | ||
392 | /** | ||
393 | * early_init_dt_check_for_initrd - Decode initrd location from flat tree | ||
394 | * @node: reference to node containing initrd location ('chosen') | ||
395 | */ | ||
396 | void __init early_init_dt_check_for_initrd(unsigned long node) | ||
397 | { | ||
398 | unsigned long start, end, len; | ||
399 | __be32 *prop; | ||
400 | |||
401 | pr_debug("Looking for initrd properties... "); | ||
402 | |||
403 | prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len); | ||
404 | if (!prop) | ||
405 | return; | ||
406 | start = of_read_ulong(prop, len/4); | ||
407 | |||
408 | prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len); | ||
409 | if (!prop) | ||
410 | return; | ||
411 | end = of_read_ulong(prop, len/4); | ||
412 | |||
413 | early_init_dt_setup_initrd_arch(start, end); | ||
414 | pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n", start, end); | ||
415 | } | ||
416 | #else | ||
417 | inline void early_init_dt_check_for_initrd(unsigned long node) | ||
418 | { | ||
419 | } | ||
420 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
421 | |||
422 | /** | ||
423 | * early_init_dt_scan_root - fetch the top level address and size cells | ||
424 | */ | ||
425 | int __init early_init_dt_scan_root(unsigned long node, const char *uname, | ||
426 | int depth, void *data) | ||
427 | { | ||
428 | __be32 *prop; | ||
429 | |||
430 | if (depth != 0) | ||
431 | return 0; | ||
432 | |||
433 | dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT; | ||
434 | dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT; | ||
435 | |||
436 | prop = of_get_flat_dt_prop(node, "#size-cells", NULL); | ||
437 | if (prop) | ||
438 | dt_root_size_cells = be32_to_cpup(prop); | ||
439 | pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells); | ||
440 | |||
441 | prop = of_get_flat_dt_prop(node, "#address-cells", NULL); | ||
442 | if (prop) | ||
443 | dt_root_addr_cells = be32_to_cpup(prop); | ||
444 | pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells); | ||
445 | |||
446 | /* break now */ | ||
447 | return 1; | ||
448 | } | ||
449 | |||
450 | u64 __init dt_mem_next_cell(int s, __be32 **cellp) | ||
451 | { | ||
452 | __be32 *p = *cellp; | ||
453 | |||
454 | *cellp = p + s; | ||
455 | return of_read_number(p, s); | ||
456 | } | ||
457 | |||
458 | /** | ||
459 | * early_init_dt_scan_memory - Look for an parse memory nodes | ||
460 | */ | ||
461 | int __init early_init_dt_scan_memory(unsigned long node, const char *uname, | ||
462 | int depth, void *data) | ||
463 | { | ||
464 | char *type = of_get_flat_dt_prop(node, "device_type", NULL); | ||
465 | __be32 *reg, *endp; | ||
466 | unsigned long l; | ||
467 | |||
468 | /* We are scanning "memory" nodes only */ | ||
469 | if (type == NULL) { | ||
470 | /* | ||
471 | * The longtrail doesn't have a device_type on the | ||
472 | * /memory node, so look for the node called /memory@0. | ||
473 | */ | ||
474 | if (depth != 1 || strcmp(uname, "memory@0") != 0) | ||
475 | return 0; | ||
476 | } else if (strcmp(type, "memory") != 0) | ||
477 | return 0; | ||
478 | |||
479 | reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); | ||
480 | if (reg == NULL) | ||
481 | reg = of_get_flat_dt_prop(node, "reg", &l); | ||
482 | if (reg == NULL) | ||
483 | return 0; | ||
484 | |||
485 | endp = reg + (l / sizeof(__be32)); | ||
486 | |||
487 | pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", | ||
488 | uname, l, reg[0], reg[1], reg[2], reg[3]); | ||
489 | |||
490 | while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { | ||
491 | u64 base, size; | ||
492 | |||
493 | base = dt_mem_next_cell(dt_root_addr_cells, ®); | ||
494 | size = dt_mem_next_cell(dt_root_size_cells, ®); | ||
495 | |||
496 | if (size == 0) | ||
497 | continue; | ||
498 | pr_debug(" - %llx , %llx\n", (unsigned long long)base, | ||
499 | (unsigned long long)size); | ||
500 | |||
501 | early_init_dt_add_memory_arch(base, size); | ||
502 | } | ||
503 | |||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, | ||
508 | int depth, void *data) | ||
509 | { | ||
510 | unsigned long l; | ||
511 | char *p; | ||
512 | |||
513 | pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); | ||
514 | |||
515 | if (depth != 1 || | ||
516 | (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) | ||
517 | return 0; | ||
518 | |||
519 | early_init_dt_check_for_initrd(node); | ||
520 | |||
521 | /* Retreive command line */ | ||
522 | p = of_get_flat_dt_prop(node, "bootargs", &l); | ||
523 | if (p != NULL && l > 0) | ||
524 | strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); | ||
525 | |||
526 | #ifdef CONFIG_CMDLINE | ||
527 | #ifndef CONFIG_CMDLINE_FORCE | ||
528 | if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) | ||
529 | #endif | ||
530 | strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); | ||
531 | #endif /* CONFIG_CMDLINE */ | ||
532 | |||
533 | early_init_dt_scan_chosen_arch(node); | ||
534 | |||
535 | pr_debug("Command line is: %s\n", cmd_line); | ||
536 | |||
537 | /* break now */ | ||
538 | return 1; | ||
539 | } | ||
540 | |||
541 | /** | ||
542 | * unflatten_device_tree - create tree of device_nodes from flat blob | ||
543 | * | ||
544 | * unflattens the device-tree passed by the firmware, creating the | ||
545 | * tree of struct device_node. It also fills the "name" and "type" | ||
546 | * pointers of the nodes so the normal device-tree walking functions | ||
547 | * can be used. | ||
548 | */ | ||
549 | void __init unflatten_device_tree(void) | ||
550 | { | ||
551 | unsigned long start, mem, size; | ||
552 | struct device_node **allnextp = &allnodes; | ||
553 | |||
554 | pr_debug(" -> unflatten_device_tree()\n"); | ||
555 | |||
556 | /* First pass, scan for size */ | ||
557 | start = ((unsigned long)initial_boot_params) + | ||
558 | be32_to_cpu(initial_boot_params->off_dt_struct); | ||
559 | size = unflatten_dt_node(0, &start, NULL, NULL, 0); | ||
560 | size = (size | 3) + 1; | ||
561 | |||
562 | pr_debug(" size is %lx, allocating...\n", size); | ||
563 | |||
564 | /* Allocate memory for the expanded device tree */ | ||
565 | mem = early_init_dt_alloc_memory_arch(size + 4, | ||
566 | __alignof__(struct device_node)); | ||
567 | mem = (unsigned long) __va(mem); | ||
568 | |||
569 | ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); | ||
570 | |||
571 | pr_debug(" unflattening %lx...\n", mem); | ||
572 | |||
573 | /* Second pass, do actual unflattening */ | ||
574 | start = ((unsigned long)initial_boot_params) + | ||
575 | be32_to_cpu(initial_boot_params->off_dt_struct); | ||
576 | unflatten_dt_node(mem, &start, NULL, &allnextp, 0); | ||
577 | if (be32_to_cpup((__be32 *)start) != OF_DT_END) | ||
578 | pr_warning("Weird tag at end of tree: %08x\n", *((u32 *)start)); | ||
579 | if (be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef) | ||
580 | pr_warning("End of tree marker overwritten: %08x\n", | ||
581 | be32_to_cpu(((__be32 *)mem)[size / 4])); | ||
582 | *allnextp = NULL; | ||
583 | |||
584 | /* Get pointer to OF "/chosen" node for use everywhere */ | ||
585 | of_chosen = of_find_node_by_path("/chosen"); | ||
586 | if (of_chosen == NULL) | ||
587 | of_chosen = of_find_node_by_path("/chosen@0"); | ||
588 | |||
589 | pr_debug(" <- unflatten_device_tree()\n"); | ||
590 | } | ||
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c index 6eea601a9204..24c3606217f8 100644 --- a/drivers/of/gpio.c +++ b/drivers/of/gpio.c | |||
@@ -36,7 +36,7 @@ int of_get_gpio_flags(struct device_node *np, int index, | |||
36 | struct of_gpio_chip *of_gc = NULL; | 36 | struct of_gpio_chip *of_gc = NULL; |
37 | int size; | 37 | int size; |
38 | const void *gpio_spec; | 38 | const void *gpio_spec; |
39 | const u32 *gpio_cells; | 39 | const __be32 *gpio_cells; |
40 | 40 | ||
41 | ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index, | 41 | ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index, |
42 | &gc, &gpio_spec); | 42 | &gc, &gpio_spec); |
@@ -55,7 +55,7 @@ int of_get_gpio_flags(struct device_node *np, int index, | |||
55 | 55 | ||
56 | gpio_cells = of_get_property(gc, "#gpio-cells", &size); | 56 | gpio_cells = of_get_property(gc, "#gpio-cells", &size); |
57 | if (!gpio_cells || size != sizeof(*gpio_cells) || | 57 | if (!gpio_cells || size != sizeof(*gpio_cells) || |
58 | *gpio_cells != of_gc->gpio_cells) { | 58 | be32_to_cpup(gpio_cells) != of_gc->gpio_cells) { |
59 | pr_debug("%s: wrong #gpio-cells for %s\n", | 59 | pr_debug("%s: wrong #gpio-cells for %s\n", |
60 | np->full_name, gc->full_name); | 60 | np->full_name, gc->full_name); |
61 | ret = -EINVAL; | 61 | ret = -EINVAL; |
@@ -127,7 +127,8 @@ EXPORT_SYMBOL(of_gpio_count); | |||
127 | int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np, | 127 | int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np, |
128 | const void *gpio_spec, enum of_gpio_flags *flags) | 128 | const void *gpio_spec, enum of_gpio_flags *flags) |
129 | { | 129 | { |
130 | const u32 *gpio = gpio_spec; | 130 | const __be32 *gpio = gpio_spec; |
131 | const u32 n = be32_to_cpup(gpio); | ||
131 | 132 | ||
132 | /* | 133 | /* |
133 | * We're discouraging gpio_cells < 2, since that way you'll have to | 134 | * We're discouraging gpio_cells < 2, since that way you'll have to |
@@ -140,13 +141,13 @@ int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np, | |||
140 | return -EINVAL; | 141 | return -EINVAL; |
141 | } | 142 | } |
142 | 143 | ||
143 | if (*gpio > of_gc->gc.ngpio) | 144 | if (n > of_gc->gc.ngpio) |
144 | return -EINVAL; | 145 | return -EINVAL; |
145 | 146 | ||
146 | if (flags) | 147 | if (flags) |
147 | *flags = gpio[1]; | 148 | *flags = be32_to_cpu(gpio[1]); |
148 | 149 | ||
149 | return *gpio; | 150 | return n; |
150 | } | 151 | } |
151 | EXPORT_SYMBOL(of_gpio_simple_xlate); | 152 | EXPORT_SYMBOL(of_gpio_simple_xlate); |
152 | 153 | ||
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c index fa65a2b2ae2e..a3a708e590d0 100644 --- a/drivers/of/of_i2c.c +++ b/drivers/of/of_i2c.c | |||
@@ -25,7 +25,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap, | |||
25 | for_each_child_of_node(adap_node, node) { | 25 | for_each_child_of_node(adap_node, node) { |
26 | struct i2c_board_info info = {}; | 26 | struct i2c_board_info info = {}; |
27 | struct dev_archdata dev_ad = {}; | 27 | struct dev_archdata dev_ad = {}; |
28 | const u32 *addr; | 28 | const __be32 *addr; |
29 | int len; | 29 | int len; |
30 | 30 | ||
31 | if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) | 31 | if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) |
@@ -40,7 +40,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap, | |||
40 | 40 | ||
41 | info.irq = irq_of_parse_and_map(node, 0); | 41 | info.irq = irq_of_parse_and_map(node, 0); |
42 | 42 | ||
43 | info.addr = *addr; | 43 | info.addr = be32_to_cpup(addr); |
44 | 44 | ||
45 | dev_archdata_set_node(&dev_ad, node); | 45 | dev_archdata_set_node(&dev_ad, node); |
46 | info.archdata = &dev_ad; | 46 | info.archdata = &dev_ad; |
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 4b22ba568b19..18ecae4a4375 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c | |||
@@ -51,7 +51,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) | |||
51 | 51 | ||
52 | /* Loop over the child nodes and register a phy_device for each one */ | 52 | /* Loop over the child nodes and register a phy_device for each one */ |
53 | for_each_child_of_node(np, child) { | 53 | for_each_child_of_node(np, child) { |
54 | const u32 *addr; | 54 | const __be32 *addr; |
55 | int len; | 55 | int len; |
56 | 56 | ||
57 | /* A PHY must have a reg property in the range [0-31] */ | 57 | /* A PHY must have a reg property in the range [0-31] */ |
@@ -68,7 +68,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) | |||
68 | mdio->irq[*addr] = PHY_POLL; | 68 | mdio->irq[*addr] = PHY_POLL; |
69 | } | 69 | } |
70 | 70 | ||
71 | phy = get_phy_device(mdio, *addr); | 71 | phy = get_phy_device(mdio, be32_to_cpup(addr)); |
72 | if (!phy) { | 72 | if (!phy) { |
73 | dev_err(&mdio->dev, "error probing PHY at address %i\n", | 73 | dev_err(&mdio->dev, "error probing PHY at address %i\n", |
74 | *addr); | 74 | *addr); |
@@ -160,7 +160,7 @@ struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, | |||
160 | struct device_node *net_np; | 160 | struct device_node *net_np; |
161 | char bus_id[MII_BUS_ID_SIZE + 3]; | 161 | char bus_id[MII_BUS_ID_SIZE + 3]; |
162 | struct phy_device *phy; | 162 | struct phy_device *phy; |
163 | const u32 *phy_id; | 163 | const __be32 *phy_id; |
164 | int sz; | 164 | int sz; |
165 | 165 | ||
166 | if (!dev->dev.parent) | 166 | if (!dev->dev.parent) |
@@ -174,7 +174,7 @@ struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, | |||
174 | if (!phy_id || sz < sizeof(*phy_id)) | 174 | if (!phy_id || sz < sizeof(*phy_id)) |
175 | return NULL; | 175 | return NULL; |
176 | 176 | ||
177 | sprintf(bus_id, PHY_ID_FMT, "0", phy_id[0]); | 177 | sprintf(bus_id, PHY_ID_FMT, "0", be32_to_cpu(phy_id[0])); |
178 | 178 | ||
179 | phy = phy_connect(dev, bus_id, hndlr, 0, iface); | 179 | phy = phy_connect(dev, bus_id, hndlr, 0, iface); |
180 | return IS_ERR(phy) ? NULL : phy; | 180 | return IS_ERR(phy) ? NULL : phy; |
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c index bed0ed6dcdc1..f65f48b98448 100644 --- a/drivers/of/of_spi.c +++ b/drivers/of/of_spi.c | |||
@@ -23,7 +23,7 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np) | |||
23 | { | 23 | { |
24 | struct spi_device *spi; | 24 | struct spi_device *spi; |
25 | struct device_node *nc; | 25 | struct device_node *nc; |
26 | const u32 *prop; | 26 | const __be32 *prop; |
27 | int rc; | 27 | int rc; |
28 | int len; | 28 | int len; |
29 | 29 | ||
@@ -54,7 +54,7 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np) | |||
54 | spi_dev_put(spi); | 54 | spi_dev_put(spi); |
55 | continue; | 55 | continue; |
56 | } | 56 | } |
57 | spi->chip_select = *prop; | 57 | spi->chip_select = be32_to_cpup(prop); |
58 | 58 | ||
59 | /* Mode (clock phase/polarity/etc.) */ | 59 | /* Mode (clock phase/polarity/etc.) */ |
60 | if (of_find_property(nc, "spi-cpha", NULL)) | 60 | if (of_find_property(nc, "spi-cpha", NULL)) |
@@ -72,7 +72,7 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np) | |||
72 | spi_dev_put(spi); | 72 | spi_dev_put(spi); |
73 | continue; | 73 | continue; |
74 | } | 74 | } |
75 | spi->max_speed_hz = *prop; | 75 | spi->max_speed_hz = be32_to_cpup(prop); |
76 | 76 | ||
77 | /* IRQ */ | 77 | /* IRQ */ |
78 | spi->irq = irq_of_parse_and_map(nc, 0); | 78 | spi->irq = irq_of_parse_and_map(nc, 0); |
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index b1ecefa2a23d..7858a117e80b 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -21,17 +21,6 @@ config PCI_MSI | |||
21 | 21 | ||
22 | If you don't know what to do here, say N. | 22 | If you don't know what to do here, say N. |
23 | 23 | ||
24 | config PCI_LEGACY | ||
25 | bool "Enable deprecated pci_find_* API" | ||
26 | depends on PCI | ||
27 | default y | ||
28 | help | ||
29 | Say Y here if you want to include support for the deprecated | ||
30 | pci_find_device() API. Most drivers have been converted over | ||
31 | to using the proper hotplug APIs, so this option serves to | ||
32 | include/exclude only a few drivers that are still using this | ||
33 | API. | ||
34 | |||
35 | config PCI_DEBUG | 24 | config PCI_DEBUG |
36 | bool "PCI Debugging" | 25 | bool "PCI Debugging" |
37 | depends on PCI && DEBUG_KERNEL | 26 | depends on PCI && DEBUG_KERNEL |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 4df48d58eaa6..8674c1ebe979 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -2,14 +2,13 @@ | |||
2 | # Makefile for the PCI bus specific drivers. | 2 | # Makefile for the PCI bus specific drivers. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \ | 5 | obj-y += access.o bus.o probe.o remove.o pci.o \ |
6 | pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ | 6 | pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ |
7 | irq.o | 7 | irq.o |
8 | obj-$(CONFIG_PROC_FS) += proc.o | 8 | obj-$(CONFIG_PROC_FS) += proc.o |
9 | obj-$(CONFIG_SYSFS) += slot.o | 9 | obj-$(CONFIG_SYSFS) += slot.o |
10 | 10 | ||
11 | obj-$(CONFIG_PCI_LEGACY) += legacy.o | 11 | obj-$(CONFIG_PCI_QUIRKS) += quirks.o |
12 | CFLAGS_legacy.o += -Wno-deprecated-declarations | ||
13 | 12 | ||
14 | # Build PCI Express stuff if needed | 13 | # Build PCI Express stuff if needed |
15 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ | 14 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index cef28a79103f..712250f5874a 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -17,6 +17,52 @@ | |||
17 | 17 | ||
18 | #include "pci.h" | 18 | #include "pci.h" |
19 | 19 | ||
20 | void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, | ||
21 | unsigned int flags) | ||
22 | { | ||
23 | struct pci_bus_resource *bus_res; | ||
24 | |||
25 | bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL); | ||
26 | if (!bus_res) { | ||
27 | dev_err(&bus->dev, "can't add %pR resource\n", res); | ||
28 | return; | ||
29 | } | ||
30 | |||
31 | bus_res->res = res; | ||
32 | bus_res->flags = flags; | ||
33 | list_add_tail(&bus_res->list, &bus->resources); | ||
34 | } | ||
35 | |||
36 | struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n) | ||
37 | { | ||
38 | struct pci_bus_resource *bus_res; | ||
39 | |||
40 | if (n < PCI_BRIDGE_RESOURCE_NUM) | ||
41 | return bus->resource[n]; | ||
42 | |||
43 | n -= PCI_BRIDGE_RESOURCE_NUM; | ||
44 | list_for_each_entry(bus_res, &bus->resources, list) { | ||
45 | if (n-- == 0) | ||
46 | return bus_res->res; | ||
47 | } | ||
48 | return NULL; | ||
49 | } | ||
50 | EXPORT_SYMBOL_GPL(pci_bus_resource_n); | ||
51 | |||
52 | void pci_bus_remove_resources(struct pci_bus *bus) | ||
53 | { | ||
54 | struct pci_bus_resource *bus_res, *tmp; | ||
55 | int i; | ||
56 | |||
57 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) | ||
58 | bus->resource[i] = 0; | ||
59 | |||
60 | list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) { | ||
61 | list_del(&bus_res->list); | ||
62 | kfree(bus_res); | ||
63 | } | ||
64 | } | ||
65 | |||
20 | /** | 66 | /** |
21 | * pci_bus_alloc_resource - allocate a resource from a parent bus | 67 | * pci_bus_alloc_resource - allocate a resource from a parent bus |
22 | * @bus: PCI bus | 68 | * @bus: PCI bus |
@@ -36,11 +82,14 @@ int | |||
36 | pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | 82 | pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, |
37 | resource_size_t size, resource_size_t align, | 83 | resource_size_t size, resource_size_t align, |
38 | resource_size_t min, unsigned int type_mask, | 84 | resource_size_t min, unsigned int type_mask, |
39 | void (*alignf)(void *, struct resource *, resource_size_t, | 85 | resource_size_t (*alignf)(void *, |
40 | resource_size_t), | 86 | const struct resource *, |
87 | resource_size_t, | ||
88 | resource_size_t), | ||
41 | void *alignf_data) | 89 | void *alignf_data) |
42 | { | 90 | { |
43 | int i, ret = -ENOMEM; | 91 | int i, ret = -ENOMEM; |
92 | struct resource *r; | ||
44 | resource_size_t max = -1; | 93 | resource_size_t max = -1; |
45 | 94 | ||
46 | type_mask |= IORESOURCE_IO | IORESOURCE_MEM; | 95 | type_mask |= IORESOURCE_IO | IORESOURCE_MEM; |
@@ -49,8 +98,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | |||
49 | if (!(res->flags & IORESOURCE_MEM_64)) | 98 | if (!(res->flags & IORESOURCE_MEM_64)) |
50 | max = PCIBIOS_MAX_MEM_32; | 99 | max = PCIBIOS_MAX_MEM_32; |
51 | 100 | ||
52 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 101 | pci_bus_for_each_resource(bus, r, i) { |
53 | struct resource *r = bus->resource[i]; | ||
54 | if (!r) | 102 | if (!r) |
55 | continue; | 103 | continue; |
56 | 104 | ||
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 4dd7114964ac..efa9f2de51c1 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c | |||
@@ -332,8 +332,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) | |||
332 | slot->hotplug_slot->info->attention_status = 0; | 332 | slot->hotplug_slot->info->attention_status = 0; |
333 | slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot); | 333 | slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot); |
334 | slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot); | 334 | slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot); |
335 | slot->hotplug_slot->info->max_bus_speed = PCI_SPEED_UNKNOWN; | ||
336 | slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; | ||
337 | 335 | ||
338 | acpiphp_slot->slot = slot; | 336 | acpiphp_slot->slot = slot; |
339 | snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun); | 337 | snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun); |
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c index 148fb463b81c..fb3f84661bdc 100644 --- a/drivers/pci/hotplug/cpcihp_generic.c +++ b/drivers/pci/hotplug/cpcihp_generic.c | |||
@@ -162,6 +162,7 @@ static int __init cpcihp_generic_init(void) | |||
162 | dev = pci_get_slot(bus, PCI_DEVFN(bridge_slot, 0)); | 162 | dev = pci_get_slot(bus, PCI_DEVFN(bridge_slot, 0)); |
163 | if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { | 163 | if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { |
164 | err("Invalid bridge device %s", bridge); | 164 | err("Invalid bridge device %s", bridge); |
165 | pci_dev_put(dev); | ||
165 | return -EINVAL; | 166 | return -EINVAL; |
166 | } | 167 | } |
167 | bus = dev->subordinate; | 168 | bus = dev->subordinate; |
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h index 9c6a9fd26812..d8ffc7366801 100644 --- a/drivers/pci/hotplug/cpqphp.h +++ b/drivers/pci/hotplug/cpqphp.h | |||
@@ -310,8 +310,6 @@ struct controller { | |||
310 | u8 first_slot; | 310 | u8 first_slot; |
311 | u8 add_support; | 311 | u8 add_support; |
312 | u8 push_flag; | 312 | u8 push_flag; |
313 | enum pci_bus_speed speed; | ||
314 | enum pci_bus_speed speed_capability; | ||
315 | u8 push_button; /* 0 = no pushbutton, 1 = pushbutton present */ | 313 | u8 push_button; /* 0 = no pushbutton, 1 = pushbutton present */ |
316 | u8 slot_switch_type; /* 0 = no switch, 1 = switch present */ | 314 | u8 slot_switch_type; /* 0 = no switch, 1 = switch present */ |
317 | u8 defeature_PHP; /* 0 = PHP not supported, 1 = PHP supported */ | 315 | u8 defeature_PHP; /* 0 = PHP not supported, 1 = PHP supported */ |
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 075b4f4b6e0d..f184d1d2ecbe 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c | |||
@@ -583,30 +583,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
583 | return 0; | 583 | return 0; |
584 | } | 584 | } |
585 | 585 | ||
586 | static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | ||
587 | { | ||
588 | struct slot *slot = hotplug_slot->private; | ||
589 | struct controller *ctrl = slot->ctrl; | ||
590 | |||
591 | dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); | ||
592 | |||
593 | *value = ctrl->speed_capability; | ||
594 | |||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | ||
599 | { | ||
600 | struct slot *slot = hotplug_slot->private; | ||
601 | struct controller *ctrl = slot->ctrl; | ||
602 | |||
603 | dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); | ||
604 | |||
605 | *value = ctrl->speed; | ||
606 | |||
607 | return 0; | ||
608 | } | ||
609 | |||
610 | static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = { | 586 | static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = { |
611 | .set_attention_status = set_attention_status, | 587 | .set_attention_status = set_attention_status, |
612 | .enable_slot = process_SI, | 588 | .enable_slot = process_SI, |
@@ -616,8 +592,6 @@ static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = { | |||
616 | .get_attention_status = get_attention_status, | 592 | .get_attention_status = get_attention_status, |
617 | .get_latch_status = get_latch_status, | 593 | .get_latch_status = get_latch_status, |
618 | .get_adapter_status = get_adapter_status, | 594 | .get_adapter_status = get_adapter_status, |
619 | .get_max_bus_speed = get_max_bus_speed, | ||
620 | .get_cur_bus_speed = get_cur_bus_speed, | ||
621 | }; | 595 | }; |
622 | 596 | ||
623 | #define SLOT_NAME_SIZE 10 | 597 | #define SLOT_NAME_SIZE 10 |
@@ -629,6 +603,7 @@ static int ctrl_slot_setup(struct controller *ctrl, | |||
629 | struct slot *slot; | 603 | struct slot *slot; |
630 | struct hotplug_slot *hotplug_slot; | 604 | struct hotplug_slot *hotplug_slot; |
631 | struct hotplug_slot_info *hotplug_slot_info; | 605 | struct hotplug_slot_info *hotplug_slot_info; |
606 | struct pci_bus *bus = ctrl->pci_bus; | ||
632 | u8 number_of_slots; | 607 | u8 number_of_slots; |
633 | u8 slot_device; | 608 | u8 slot_device; |
634 | u8 slot_number; | 609 | u8 slot_number; |
@@ -694,7 +669,7 @@ static int ctrl_slot_setup(struct controller *ctrl, | |||
694 | slot->capabilities |= PCISLOT_64_BIT_SUPPORTED; | 669 | slot->capabilities |= PCISLOT_64_BIT_SUPPORTED; |
695 | if (is_slot66mhz(slot)) | 670 | if (is_slot66mhz(slot)) |
696 | slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED; | 671 | slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED; |
697 | if (ctrl->speed == PCI_SPEED_66MHz) | 672 | if (bus->cur_bus_speed == PCI_SPEED_66MHz) |
698 | slot->capabilities |= PCISLOT_66_MHZ_OPERATION; | 673 | slot->capabilities |= PCISLOT_66_MHZ_OPERATION; |
699 | 674 | ||
700 | ctrl_slot = | 675 | ctrl_slot = |
@@ -844,6 +819,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
844 | u32 rc; | 819 | u32 rc; |
845 | struct controller *ctrl; | 820 | struct controller *ctrl; |
846 | struct pci_func *func; | 821 | struct pci_func *func; |
822 | struct pci_bus *bus; | ||
847 | int err; | 823 | int err; |
848 | 824 | ||
849 | err = pci_enable_device(pdev); | 825 | err = pci_enable_device(pdev); |
@@ -852,6 +828,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
852 | pci_name(pdev), err); | 828 | pci_name(pdev), err); |
853 | return err; | 829 | return err; |
854 | } | 830 | } |
831 | bus = pdev->subordinate; | ||
855 | 832 | ||
856 | /* Need to read VID early b/c it's used to differentiate CPQ and INTC | 833 | /* Need to read VID early b/c it's used to differentiate CPQ and INTC |
857 | * discovery | 834 | * discovery |
@@ -929,22 +906,22 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
929 | pci_read_config_byte(pdev, 0x41, &bus_cap); | 906 | pci_read_config_byte(pdev, 0x41, &bus_cap); |
930 | if (bus_cap & 0x80) { | 907 | if (bus_cap & 0x80) { |
931 | dbg("bus max supports 133MHz PCI-X\n"); | 908 | dbg("bus max supports 133MHz PCI-X\n"); |
932 | ctrl->speed_capability = PCI_SPEED_133MHz_PCIX; | 909 | bus->max_bus_speed = PCI_SPEED_133MHz_PCIX; |
933 | break; | 910 | break; |
934 | } | 911 | } |
935 | if (bus_cap & 0x40) { | 912 | if (bus_cap & 0x40) { |
936 | dbg("bus max supports 100MHz PCI-X\n"); | 913 | dbg("bus max supports 100MHz PCI-X\n"); |
937 | ctrl->speed_capability = PCI_SPEED_100MHz_PCIX; | 914 | bus->max_bus_speed = PCI_SPEED_100MHz_PCIX; |
938 | break; | 915 | break; |
939 | } | 916 | } |
940 | if (bus_cap & 20) { | 917 | if (bus_cap & 20) { |
941 | dbg("bus max supports 66MHz PCI-X\n"); | 918 | dbg("bus max supports 66MHz PCI-X\n"); |
942 | ctrl->speed_capability = PCI_SPEED_66MHz_PCIX; | 919 | bus->max_bus_speed = PCI_SPEED_66MHz_PCIX; |
943 | break; | 920 | break; |
944 | } | 921 | } |
945 | if (bus_cap & 10) { | 922 | if (bus_cap & 10) { |
946 | dbg("bus max supports 66MHz PCI\n"); | 923 | dbg("bus max supports 66MHz PCI\n"); |
947 | ctrl->speed_capability = PCI_SPEED_66MHz; | 924 | bus->max_bus_speed = PCI_SPEED_66MHz; |
948 | break; | 925 | break; |
949 | } | 926 | } |
950 | 927 | ||
@@ -955,7 +932,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
955 | case PCI_SUB_HPC_ID: | 932 | case PCI_SUB_HPC_ID: |
956 | /* Original 6500/7000 implementation */ | 933 | /* Original 6500/7000 implementation */ |
957 | ctrl->slot_switch_type = 1; | 934 | ctrl->slot_switch_type = 1; |
958 | ctrl->speed_capability = PCI_SPEED_33MHz; | 935 | bus->max_bus_speed = PCI_SPEED_33MHz; |
959 | ctrl->push_button = 0; | 936 | ctrl->push_button = 0; |
960 | ctrl->pci_config_space = 1; | 937 | ctrl->pci_config_space = 1; |
961 | ctrl->defeature_PHP = 1; | 938 | ctrl->defeature_PHP = 1; |
@@ -966,7 +943,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
966 | /* First Pushbutton implementation */ | 943 | /* First Pushbutton implementation */ |
967 | ctrl->push_flag = 1; | 944 | ctrl->push_flag = 1; |
968 | ctrl->slot_switch_type = 1; | 945 | ctrl->slot_switch_type = 1; |
969 | ctrl->speed_capability = PCI_SPEED_33MHz; | 946 | bus->max_bus_speed = PCI_SPEED_33MHz; |
970 | ctrl->push_button = 1; | 947 | ctrl->push_button = 1; |
971 | ctrl->pci_config_space = 1; | 948 | ctrl->pci_config_space = 1; |
972 | ctrl->defeature_PHP = 1; | 949 | ctrl->defeature_PHP = 1; |
@@ -976,7 +953,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
976 | case PCI_SUB_HPC_ID_INTC: | 953 | case PCI_SUB_HPC_ID_INTC: |
977 | /* Third party (6500/7000) */ | 954 | /* Third party (6500/7000) */ |
978 | ctrl->slot_switch_type = 1; | 955 | ctrl->slot_switch_type = 1; |
979 | ctrl->speed_capability = PCI_SPEED_33MHz; | 956 | bus->max_bus_speed = PCI_SPEED_33MHz; |
980 | ctrl->push_button = 0; | 957 | ctrl->push_button = 0; |
981 | ctrl->pci_config_space = 1; | 958 | ctrl->pci_config_space = 1; |
982 | ctrl->defeature_PHP = 1; | 959 | ctrl->defeature_PHP = 1; |
@@ -987,7 +964,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
987 | /* First 66 Mhz implementation */ | 964 | /* First 66 Mhz implementation */ |
988 | ctrl->push_flag = 1; | 965 | ctrl->push_flag = 1; |
989 | ctrl->slot_switch_type = 1; | 966 | ctrl->slot_switch_type = 1; |
990 | ctrl->speed_capability = PCI_SPEED_66MHz; | 967 | bus->max_bus_speed = PCI_SPEED_66MHz; |
991 | ctrl->push_button = 1; | 968 | ctrl->push_button = 1; |
992 | ctrl->pci_config_space = 1; | 969 | ctrl->pci_config_space = 1; |
993 | ctrl->defeature_PHP = 1; | 970 | ctrl->defeature_PHP = 1; |
@@ -998,7 +975,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
998 | /* First PCI-X implementation, 100MHz */ | 975 | /* First PCI-X implementation, 100MHz */ |
999 | ctrl->push_flag = 1; | 976 | ctrl->push_flag = 1; |
1000 | ctrl->slot_switch_type = 1; | 977 | ctrl->slot_switch_type = 1; |
1001 | ctrl->speed_capability = PCI_SPEED_100MHz_PCIX; | 978 | bus->max_bus_speed = PCI_SPEED_100MHz_PCIX; |
1002 | ctrl->push_button = 1; | 979 | ctrl->push_button = 1; |
1003 | ctrl->pci_config_space = 1; | 980 | ctrl->pci_config_space = 1; |
1004 | ctrl->defeature_PHP = 1; | 981 | ctrl->defeature_PHP = 1; |
@@ -1015,9 +992,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1015 | case PCI_VENDOR_ID_INTEL: | 992 | case PCI_VENDOR_ID_INTEL: |
1016 | /* Check for speed capability (0=33, 1=66) */ | 993 | /* Check for speed capability (0=33, 1=66) */ |
1017 | if (subsystem_deviceid & 0x0001) | 994 | if (subsystem_deviceid & 0x0001) |
1018 | ctrl->speed_capability = PCI_SPEED_66MHz; | 995 | bus->max_bus_speed = PCI_SPEED_66MHz; |
1019 | else | 996 | else |
1020 | ctrl->speed_capability = PCI_SPEED_33MHz; | 997 | bus->max_bus_speed = PCI_SPEED_33MHz; |
1021 | 998 | ||
1022 | /* Check for push button */ | 999 | /* Check for push button */ |
1023 | if (subsystem_deviceid & 0x0002) | 1000 | if (subsystem_deviceid & 0x0002) |
@@ -1079,7 +1056,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1079 | pdev->bus->number); | 1056 | pdev->bus->number); |
1080 | 1057 | ||
1081 | dbg("Hotplug controller capabilities:\n"); | 1058 | dbg("Hotplug controller capabilities:\n"); |
1082 | dbg(" speed_capability %d\n", ctrl->speed_capability); | 1059 | dbg(" speed_capability %d\n", bus->max_bus_speed); |
1083 | dbg(" slot_switch_type %s\n", ctrl->slot_switch_type ? | 1060 | dbg(" slot_switch_type %s\n", ctrl->slot_switch_type ? |
1084 | "switch present" : "no switch"); | 1061 | "switch present" : "no switch"); |
1085 | dbg(" defeature_PHP %s\n", ctrl->defeature_PHP ? | 1062 | dbg(" defeature_PHP %s\n", ctrl->defeature_PHP ? |
@@ -1142,7 +1119,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1142 | } | 1119 | } |
1143 | 1120 | ||
1144 | /* Check for 66Mhz operation */ | 1121 | /* Check for 66Mhz operation */ |
1145 | ctrl->speed = get_controller_speed(ctrl); | 1122 | bus->cur_bus_speed = get_controller_speed(ctrl); |
1146 | 1123 | ||
1147 | 1124 | ||
1148 | /******************************************************** | 1125 | /******************************************************** |
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index 0ff689afa757..e43908d9b5df 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c | |||
@@ -1130,12 +1130,13 @@ static int is_bridge(struct pci_func * func) | |||
1130 | static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot) | 1130 | static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot) |
1131 | { | 1131 | { |
1132 | struct slot *slot; | 1132 | struct slot *slot; |
1133 | struct pci_bus *bus = ctrl->pci_bus; | ||
1133 | u8 reg; | 1134 | u8 reg; |
1134 | u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); | 1135 | u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); |
1135 | u16 reg16; | 1136 | u16 reg16; |
1136 | u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); | 1137 | u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); |
1137 | 1138 | ||
1138 | if (ctrl->speed == adapter_speed) | 1139 | if (bus->cur_bus_speed == adapter_speed) |
1139 | return 0; | 1140 | return 0; |
1140 | 1141 | ||
1141 | /* We don't allow freq/mode changes if we find another adapter running | 1142 | /* We don't allow freq/mode changes if we find another adapter running |
@@ -1152,7 +1153,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1152 | * lower speed/mode, we allow the new adapter to function at | 1153 | * lower speed/mode, we allow the new adapter to function at |
1153 | * this rate if supported | 1154 | * this rate if supported |
1154 | */ | 1155 | */ |
1155 | if (ctrl->speed < adapter_speed) | 1156 | if (bus->cur_bus_speed < adapter_speed) |
1156 | return 0; | 1157 | return 0; |
1157 | 1158 | ||
1158 | return 1; | 1159 | return 1; |
@@ -1161,20 +1162,20 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1161 | /* If the controller doesn't support freq/mode changes and the | 1162 | /* If the controller doesn't support freq/mode changes and the |
1162 | * controller is running at a higher mode, we bail | 1163 | * controller is running at a higher mode, we bail |
1163 | */ | 1164 | */ |
1164 | if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability)) | 1165 | if ((bus->cur_bus_speed > adapter_speed) && (!ctrl->pcix_speed_capability)) |
1165 | return 1; | 1166 | return 1; |
1166 | 1167 | ||
1167 | /* But we allow the adapter to run at a lower rate if possible */ | 1168 | /* But we allow the adapter to run at a lower rate if possible */ |
1168 | if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability)) | 1169 | if ((bus->cur_bus_speed < adapter_speed) && (!ctrl->pcix_speed_capability)) |
1169 | return 0; | 1170 | return 0; |
1170 | 1171 | ||
1171 | /* We try to set the max speed supported by both the adapter and | 1172 | /* We try to set the max speed supported by both the adapter and |
1172 | * controller | 1173 | * controller |
1173 | */ | 1174 | */ |
1174 | if (ctrl->speed_capability < adapter_speed) { | 1175 | if (bus->max_bus_speed < adapter_speed) { |
1175 | if (ctrl->speed == ctrl->speed_capability) | 1176 | if (bus->cur_bus_speed == bus->max_bus_speed) |
1176 | return 0; | 1177 | return 0; |
1177 | adapter_speed = ctrl->speed_capability; | 1178 | adapter_speed = bus->max_bus_speed; |
1178 | } | 1179 | } |
1179 | 1180 | ||
1180 | writel(0x0L, ctrl->hpc_reg + LED_CONTROL); | 1181 | writel(0x0L, ctrl->hpc_reg + LED_CONTROL); |
@@ -1229,8 +1230,8 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1229 | pci_write_config_byte(ctrl->pci_dev, 0x43, reg); | 1230 | pci_write_config_byte(ctrl->pci_dev, 0x43, reg); |
1230 | 1231 | ||
1231 | /* Only if mode change...*/ | 1232 | /* Only if mode change...*/ |
1232 | if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || | 1233 | if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || |
1233 | ((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) | 1234 | ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) |
1234 | set_SOGO(ctrl); | 1235 | set_SOGO(ctrl); |
1235 | 1236 | ||
1236 | wait_for_ctrl_irq(ctrl); | 1237 | wait_for_ctrl_irq(ctrl); |
@@ -1243,7 +1244,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1243 | set_SOGO(ctrl); | 1244 | set_SOGO(ctrl); |
1244 | wait_for_ctrl_irq(ctrl); | 1245 | wait_for_ctrl_irq(ctrl); |
1245 | 1246 | ||
1246 | ctrl->speed = adapter_speed; | 1247 | bus->cur_bus_speed = adapter_speed; |
1247 | slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); | 1248 | slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); |
1248 | 1249 | ||
1249 | info("Successfully changed frequency/mode for adapter in slot %d\n", | 1250 | info("Successfully changed frequency/mode for adapter in slot %d\n", |
@@ -1269,6 +1270,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1269 | */ | 1270 | */ |
1270 | static u32 board_replaced(struct pci_func *func, struct controller *ctrl) | 1271 | static u32 board_replaced(struct pci_func *func, struct controller *ctrl) |
1271 | { | 1272 | { |
1273 | struct pci_bus *bus = ctrl->pci_bus; | ||
1272 | u8 hp_slot; | 1274 | u8 hp_slot; |
1273 | u8 temp_byte; | 1275 | u8 temp_byte; |
1274 | u8 adapter_speed; | 1276 | u8 adapter_speed; |
@@ -1309,7 +1311,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) | |||
1309 | wait_for_ctrl_irq (ctrl); | 1311 | wait_for_ctrl_irq (ctrl); |
1310 | 1312 | ||
1311 | adapter_speed = get_adapter_speed(ctrl, hp_slot); | 1313 | adapter_speed = get_adapter_speed(ctrl, hp_slot); |
1312 | if (ctrl->speed != adapter_speed) | 1314 | if (bus->cur_bus_speed != adapter_speed) |
1313 | if (set_controller_speed(ctrl, adapter_speed, hp_slot)) | 1315 | if (set_controller_speed(ctrl, adapter_speed, hp_slot)) |
1314 | rc = WRONG_BUS_FREQUENCY; | 1316 | rc = WRONG_BUS_FREQUENCY; |
1315 | 1317 | ||
@@ -1426,6 +1428,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) | |||
1426 | u32 temp_register = 0xFFFFFFFF; | 1428 | u32 temp_register = 0xFFFFFFFF; |
1427 | u32 rc = 0; | 1429 | u32 rc = 0; |
1428 | struct pci_func *new_slot = NULL; | 1430 | struct pci_func *new_slot = NULL; |
1431 | struct pci_bus *bus = ctrl->pci_bus; | ||
1429 | struct slot *p_slot; | 1432 | struct slot *p_slot; |
1430 | struct resource_lists res_lists; | 1433 | struct resource_lists res_lists; |
1431 | 1434 | ||
@@ -1456,7 +1459,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) | |||
1456 | wait_for_ctrl_irq (ctrl); | 1459 | wait_for_ctrl_irq (ctrl); |
1457 | 1460 | ||
1458 | adapter_speed = get_adapter_speed(ctrl, hp_slot); | 1461 | adapter_speed = get_adapter_speed(ctrl, hp_slot); |
1459 | if (ctrl->speed != adapter_speed) | 1462 | if (bus->cur_bus_speed != adapter_speed) |
1460 | if (set_controller_speed(ctrl, adapter_speed, hp_slot)) | 1463 | if (set_controller_speed(ctrl, adapter_speed, hp_slot)) |
1461 | rc = WRONG_BUS_FREQUENCY; | 1464 | rc = WRONG_BUS_FREQUENCY; |
1462 | 1465 | ||
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 7485ffda950c..d934dd4fa873 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c | |||
@@ -395,89 +395,40 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 * value) | |||
395 | return rc; | 395 | return rc; |
396 | } | 396 | } |
397 | 397 | ||
398 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | 398 | static int get_max_bus_speed(struct slot *slot) |
399 | { | 399 | { |
400 | int rc = -ENODEV; | 400 | int rc; |
401 | struct slot *pslot; | ||
402 | u8 mode = 0; | 401 | u8 mode = 0; |
402 | enum pci_bus_speed speed; | ||
403 | struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus; | ||
403 | 404 | ||
404 | debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__, | 405 | debug("%s - Entry slot[%p]\n", __func__, slot); |
405 | hotplug_slot, value); | ||
406 | 406 | ||
407 | ibmphp_lock_operations(); | 407 | ibmphp_lock_operations(); |
408 | 408 | mode = slot->supported_bus_mode; | |
409 | if (hotplug_slot) { | 409 | speed = slot->supported_speed; |
410 | pslot = hotplug_slot->private; | ||
411 | if (pslot) { | ||
412 | rc = 0; | ||
413 | mode = pslot->supported_bus_mode; | ||
414 | *value = pslot->supported_speed; | ||
415 | switch (*value) { | ||
416 | case BUS_SPEED_33: | ||
417 | break; | ||
418 | case BUS_SPEED_66: | ||
419 | if (mode == BUS_MODE_PCIX) | ||
420 | *value += 0x01; | ||
421 | break; | ||
422 | case BUS_SPEED_100: | ||
423 | case BUS_SPEED_133: | ||
424 | *value = pslot->supported_speed + 0x01; | ||
425 | break; | ||
426 | default: | ||
427 | /* Note (will need to change): there would be soon 256, 512 also */ | ||
428 | rc = -ENODEV; | ||
429 | } | ||
430 | } | ||
431 | } | ||
432 | |||
433 | ibmphp_unlock_operations(); | 410 | ibmphp_unlock_operations(); |
434 | debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value); | ||
435 | return rc; | ||
436 | } | ||
437 | 411 | ||
438 | static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | 412 | switch (speed) { |
439 | { | 413 | case BUS_SPEED_33: |
440 | int rc = -ENODEV; | 414 | break; |
441 | struct slot *pslot; | 415 | case BUS_SPEED_66: |
442 | u8 mode = 0; | 416 | if (mode == BUS_MODE_PCIX) |
443 | 417 | speed += 0x01; | |
444 | debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__, | 418 | break; |
445 | hotplug_slot, value); | 419 | case BUS_SPEED_100: |
446 | 420 | case BUS_SPEED_133: | |
447 | ibmphp_lock_operations(); | 421 | speed += 0x01; |
448 | 422 | break; | |
449 | if (hotplug_slot) { | 423 | default: |
450 | pslot = hotplug_slot->private; | 424 | /* Note (will need to change): there would be soon 256, 512 also */ |
451 | if (pslot) { | 425 | rc = -ENODEV; |
452 | rc = get_cur_bus_info(&pslot); | ||
453 | if (!rc) { | ||
454 | mode = pslot->bus_on->current_bus_mode; | ||
455 | *value = pslot->bus_on->current_speed; | ||
456 | switch (*value) { | ||
457 | case BUS_SPEED_33: | ||
458 | break; | ||
459 | case BUS_SPEED_66: | ||
460 | if (mode == BUS_MODE_PCIX) | ||
461 | *value += 0x01; | ||
462 | else if (mode == BUS_MODE_PCI) | ||
463 | ; | ||
464 | else | ||
465 | *value = PCI_SPEED_UNKNOWN; | ||
466 | break; | ||
467 | case BUS_SPEED_100: | ||
468 | case BUS_SPEED_133: | ||
469 | *value += 0x01; | ||
470 | break; | ||
471 | default: | ||
472 | /* Note of change: there would also be 256, 512 soon */ | ||
473 | rc = -ENODEV; | ||
474 | } | ||
475 | } | ||
476 | } | ||
477 | } | 426 | } |
478 | 427 | ||
479 | ibmphp_unlock_operations(); | 428 | if (!rc) |
480 | debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value); | 429 | bus->max_bus_speed = speed; |
430 | |||
431 | debug("%s - Exit rc[%d] speed[%x]\n", __func__, rc, speed); | ||
481 | return rc; | 432 | return rc; |
482 | } | 433 | } |
483 | 434 | ||
@@ -572,6 +523,7 @@ static int __init init_ops(void) | |||
572 | if (slot_cur->bus_on->current_speed == 0xFF) | 523 | if (slot_cur->bus_on->current_speed == 0xFF) |
573 | if (get_cur_bus_info(&slot_cur)) | 524 | if (get_cur_bus_info(&slot_cur)) |
574 | return -1; | 525 | return -1; |
526 | get_max_bus_speed(slot_cur); | ||
575 | 527 | ||
576 | if (slot_cur->ctrl->options == 0xFF) | 528 | if (slot_cur->ctrl->options == 0xFF) |
577 | if (get_hpc_options(slot_cur, &slot_cur->ctrl->options)) | 529 | if (get_hpc_options(slot_cur, &slot_cur->ctrl->options)) |
@@ -655,6 +607,7 @@ static int validate(struct slot *slot_cur, int opn) | |||
655 | int ibmphp_update_slot_info(struct slot *slot_cur) | 607 | int ibmphp_update_slot_info(struct slot *slot_cur) |
656 | { | 608 | { |
657 | struct hotplug_slot_info *info; | 609 | struct hotplug_slot_info *info; |
610 | struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus; | ||
658 | int rc; | 611 | int rc; |
659 | u8 bus_speed; | 612 | u8 bus_speed; |
660 | u8 mode; | 613 | u8 mode; |
@@ -700,8 +653,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur) | |||
700 | bus_speed = PCI_SPEED_UNKNOWN; | 653 | bus_speed = PCI_SPEED_UNKNOWN; |
701 | } | 654 | } |
702 | 655 | ||
703 | info->cur_bus_speed = bus_speed; | 656 | bus->cur_bus_speed = bus_speed; |
704 | info->max_bus_speed = slot_cur->hotplug_slot->info->max_bus_speed; | ||
705 | // To do: bus_names | 657 | // To do: bus_names |
706 | 658 | ||
707 | rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info); | 659 | rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info); |
@@ -1326,8 +1278,6 @@ struct hotplug_slot_ops ibmphp_hotplug_slot_ops = { | |||
1326 | .get_attention_status = get_attention_status, | 1278 | .get_attention_status = get_attention_status, |
1327 | .get_latch_status = get_latch_status, | 1279 | .get_latch_status = get_latch_status, |
1328 | .get_adapter_status = get_adapter_present, | 1280 | .get_adapter_status = get_adapter_present, |
1329 | .get_max_bus_speed = get_max_bus_speed, | ||
1330 | .get_cur_bus_speed = get_cur_bus_speed, | ||
1331 | /* .get_max_adapter_speed = get_max_adapter_speed, | 1281 | /* .get_max_adapter_speed = get_max_adapter_speed, |
1332 | .get_bus_name_status = get_bus_name, | 1282 | .get_bus_name_status = get_bus_name, |
1333 | */ | 1283 | */ |
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index c1abac8ab5c3..5becbdee4027 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c | |||
@@ -245,7 +245,7 @@ static void __init print_ebda_hpc (void) | |||
245 | 245 | ||
246 | int __init ibmphp_access_ebda (void) | 246 | int __init ibmphp_access_ebda (void) |
247 | { | 247 | { |
248 | u8 format, num_ctlrs, rio_complete, hs_complete; | 248 | u8 format, num_ctlrs, rio_complete, hs_complete, ebda_sz; |
249 | u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base; | 249 | u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base; |
250 | int rc = 0; | 250 | int rc = 0; |
251 | 251 | ||
@@ -260,7 +260,16 @@ int __init ibmphp_access_ebda (void) | |||
260 | iounmap (io_mem); | 260 | iounmap (io_mem); |
261 | debug ("returned ebda segment: %x\n", ebda_seg); | 261 | debug ("returned ebda segment: %x\n", ebda_seg); |
262 | 262 | ||
263 | io_mem = ioremap(ebda_seg<<4, 1024); | 263 | io_mem = ioremap(ebda_seg<<4, 1); |
264 | if (!io_mem) | ||
265 | return -ENOMEM; | ||
266 | ebda_sz = readb(io_mem); | ||
267 | iounmap(io_mem); | ||
268 | debug("ebda size: %d(KiB)\n", ebda_sz); | ||
269 | if (ebda_sz == 0) | ||
270 | return -ENOMEM; | ||
271 | |||
272 | io_mem = ioremap(ebda_seg<<4, (ebda_sz * 1024)); | ||
264 | if (!io_mem ) | 273 | if (!io_mem ) |
265 | return -ENOMEM; | 274 | return -ENOMEM; |
266 | next_offset = 0x180; | 275 | next_offset = 0x180; |
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c index c7084f0eca5a..1aaf3f32d3cd 100644 --- a/drivers/pci/hotplug/ibmphp_hpc.c +++ b/drivers/pci/hotplug/ibmphp_hpc.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/mutex.h> | 36 | #include <linux/mutex.h> |
37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
38 | #include <linux/semaphore.h> | ||
38 | #include <linux/kthread.h> | 39 | #include <linux/kthread.h> |
39 | #include "ibmphp.h" | 40 | #include "ibmphp.h" |
40 | 41 | ||
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 38183a534b65..728b119f71ad 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c | |||
@@ -64,32 +64,6 @@ static int debug; | |||
64 | static LIST_HEAD(pci_hotplug_slot_list); | 64 | static LIST_HEAD(pci_hotplug_slot_list); |
65 | static DEFINE_MUTEX(pci_hp_mutex); | 65 | static DEFINE_MUTEX(pci_hp_mutex); |
66 | 66 | ||
67 | /* these strings match up with the values in pci_bus_speed */ | ||
68 | static char *pci_bus_speed_strings[] = { | ||
69 | "33 MHz PCI", /* 0x00 */ | ||
70 | "66 MHz PCI", /* 0x01 */ | ||
71 | "66 MHz PCI-X", /* 0x02 */ | ||
72 | "100 MHz PCI-X", /* 0x03 */ | ||
73 | "133 MHz PCI-X", /* 0x04 */ | ||
74 | NULL, /* 0x05 */ | ||
75 | NULL, /* 0x06 */ | ||
76 | NULL, /* 0x07 */ | ||
77 | NULL, /* 0x08 */ | ||
78 | "66 MHz PCI-X 266", /* 0x09 */ | ||
79 | "100 MHz PCI-X 266", /* 0x0a */ | ||
80 | "133 MHz PCI-X 266", /* 0x0b */ | ||
81 | NULL, /* 0x0c */ | ||
82 | NULL, /* 0x0d */ | ||
83 | NULL, /* 0x0e */ | ||
84 | NULL, /* 0x0f */ | ||
85 | NULL, /* 0x10 */ | ||
86 | "66 MHz PCI-X 533", /* 0x11 */ | ||
87 | "100 MHz PCI-X 533", /* 0x12 */ | ||
88 | "133 MHz PCI-X 533", /* 0x13 */ | ||
89 | "2.5 GT/s PCIe", /* 0x14 */ | ||
90 | "5.0 GT/s PCIe", /* 0x15 */ | ||
91 | }; | ||
92 | |||
93 | #ifdef CONFIG_HOTPLUG_PCI_CPCI | 67 | #ifdef CONFIG_HOTPLUG_PCI_CPCI |
94 | extern int cpci_hotplug_init(int debug); | 68 | extern int cpci_hotplug_init(int debug); |
95 | extern void cpci_hotplug_exit(void); | 69 | extern void cpci_hotplug_exit(void); |
@@ -118,8 +92,6 @@ GET_STATUS(power_status, u8) | |||
118 | GET_STATUS(attention_status, u8) | 92 | GET_STATUS(attention_status, u8) |
119 | GET_STATUS(latch_status, u8) | 93 | GET_STATUS(latch_status, u8) |
120 | GET_STATUS(adapter_status, u8) | 94 | GET_STATUS(adapter_status, u8) |
121 | GET_STATUS(max_bus_speed, enum pci_bus_speed) | ||
122 | GET_STATUS(cur_bus_speed, enum pci_bus_speed) | ||
123 | 95 | ||
124 | static ssize_t power_read_file(struct pci_slot *slot, char *buf) | 96 | static ssize_t power_read_file(struct pci_slot *slot, char *buf) |
125 | { | 97 | { |
@@ -263,60 +235,6 @@ static struct pci_slot_attribute hotplug_slot_attr_presence = { | |||
263 | .show = presence_read_file, | 235 | .show = presence_read_file, |
264 | }; | 236 | }; |
265 | 237 | ||
266 | static char *unknown_speed = "Unknown bus speed"; | ||
267 | |||
268 | static ssize_t max_bus_speed_read_file(struct pci_slot *slot, char *buf) | ||
269 | { | ||
270 | char *speed_string; | ||
271 | int retval; | ||
272 | enum pci_bus_speed value; | ||
273 | |||
274 | retval = get_max_bus_speed(slot->hotplug, &value); | ||
275 | if (retval) | ||
276 | goto exit; | ||
277 | |||
278 | if (value == PCI_SPEED_UNKNOWN) | ||
279 | speed_string = unknown_speed; | ||
280 | else | ||
281 | speed_string = pci_bus_speed_strings[value]; | ||
282 | |||
283 | retval = sprintf (buf, "%s\n", speed_string); | ||
284 | |||
285 | exit: | ||
286 | return retval; | ||
287 | } | ||
288 | |||
289 | static struct pci_slot_attribute hotplug_slot_attr_max_bus_speed = { | ||
290 | .attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO}, | ||
291 | .show = max_bus_speed_read_file, | ||
292 | }; | ||
293 | |||
294 | static ssize_t cur_bus_speed_read_file(struct pci_slot *slot, char *buf) | ||
295 | { | ||
296 | char *speed_string; | ||
297 | int retval; | ||
298 | enum pci_bus_speed value; | ||
299 | |||
300 | retval = get_cur_bus_speed(slot->hotplug, &value); | ||
301 | if (retval) | ||
302 | goto exit; | ||
303 | |||
304 | if (value == PCI_SPEED_UNKNOWN) | ||
305 | speed_string = unknown_speed; | ||
306 | else | ||
307 | speed_string = pci_bus_speed_strings[value]; | ||
308 | |||
309 | retval = sprintf (buf, "%s\n", speed_string); | ||
310 | |||
311 | exit: | ||
312 | return retval; | ||
313 | } | ||
314 | |||
315 | static struct pci_slot_attribute hotplug_slot_attr_cur_bus_speed = { | ||
316 | .attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO}, | ||
317 | .show = cur_bus_speed_read_file, | ||
318 | }; | ||
319 | |||
320 | static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf, | 238 | static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf, |
321 | size_t count) | 239 | size_t count) |
322 | { | 240 | { |
@@ -391,26 +309,6 @@ static bool has_adapter_file(struct pci_slot *pci_slot) | |||
391 | return false; | 309 | return false; |
392 | } | 310 | } |
393 | 311 | ||
394 | static bool has_max_bus_speed_file(struct pci_slot *pci_slot) | ||
395 | { | ||
396 | struct hotplug_slot *slot = pci_slot->hotplug; | ||
397 | if ((!slot) || (!slot->ops)) | ||
398 | return false; | ||
399 | if (slot->ops->get_max_bus_speed) | ||
400 | return true; | ||
401 | return false; | ||
402 | } | ||
403 | |||
404 | static bool has_cur_bus_speed_file(struct pci_slot *pci_slot) | ||
405 | { | ||
406 | struct hotplug_slot *slot = pci_slot->hotplug; | ||
407 | if ((!slot) || (!slot->ops)) | ||
408 | return false; | ||
409 | if (slot->ops->get_cur_bus_speed) | ||
410 | return true; | ||
411 | return false; | ||
412 | } | ||
413 | |||
414 | static bool has_test_file(struct pci_slot *pci_slot) | 312 | static bool has_test_file(struct pci_slot *pci_slot) |
415 | { | 313 | { |
416 | struct hotplug_slot *slot = pci_slot->hotplug; | 314 | struct hotplug_slot *slot = pci_slot->hotplug; |
@@ -456,20 +354,6 @@ static int fs_add_slot(struct pci_slot *slot) | |||
456 | goto exit_adapter; | 354 | goto exit_adapter; |
457 | } | 355 | } |
458 | 356 | ||
459 | if (has_max_bus_speed_file(slot)) { | ||
460 | retval = sysfs_create_file(&slot->kobj, | ||
461 | &hotplug_slot_attr_max_bus_speed.attr); | ||
462 | if (retval) | ||
463 | goto exit_max_speed; | ||
464 | } | ||
465 | |||
466 | if (has_cur_bus_speed_file(slot)) { | ||
467 | retval = sysfs_create_file(&slot->kobj, | ||
468 | &hotplug_slot_attr_cur_bus_speed.attr); | ||
469 | if (retval) | ||
470 | goto exit_cur_speed; | ||
471 | } | ||
472 | |||
473 | if (has_test_file(slot)) { | 357 | if (has_test_file(slot)) { |
474 | retval = sysfs_create_file(&slot->kobj, | 358 | retval = sysfs_create_file(&slot->kobj, |
475 | &hotplug_slot_attr_test.attr); | 359 | &hotplug_slot_attr_test.attr); |
@@ -480,14 +364,6 @@ static int fs_add_slot(struct pci_slot *slot) | |||
480 | goto exit; | 364 | goto exit; |
481 | 365 | ||
482 | exit_test: | 366 | exit_test: |
483 | if (has_cur_bus_speed_file(slot)) | ||
484 | sysfs_remove_file(&slot->kobj, | ||
485 | &hotplug_slot_attr_cur_bus_speed.attr); | ||
486 | exit_cur_speed: | ||
487 | if (has_max_bus_speed_file(slot)) | ||
488 | sysfs_remove_file(&slot->kobj, | ||
489 | &hotplug_slot_attr_max_bus_speed.attr); | ||
490 | exit_max_speed: | ||
491 | if (has_adapter_file(slot)) | 367 | if (has_adapter_file(slot)) |
492 | sysfs_remove_file(&slot->kobj, | 368 | sysfs_remove_file(&slot->kobj, |
493 | &hotplug_slot_attr_presence.attr); | 369 | &hotplug_slot_attr_presence.attr); |
@@ -523,14 +399,6 @@ static void fs_remove_slot(struct pci_slot *slot) | |||
523 | sysfs_remove_file(&slot->kobj, | 399 | sysfs_remove_file(&slot->kobj, |
524 | &hotplug_slot_attr_presence.attr); | 400 | &hotplug_slot_attr_presence.attr); |
525 | 401 | ||
526 | if (has_max_bus_speed_file(slot)) | ||
527 | sysfs_remove_file(&slot->kobj, | ||
528 | &hotplug_slot_attr_max_bus_speed.attr); | ||
529 | |||
530 | if (has_cur_bus_speed_file(slot)) | ||
531 | sysfs_remove_file(&slot->kobj, | ||
532 | &hotplug_slot_attr_cur_bus_speed.attr); | ||
533 | |||
534 | if (has_test_file(slot)) | 402 | if (has_test_file(slot)) |
535 | sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr); | 403 | sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr); |
536 | 404 | ||
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 5674b2075bdc..920f820edf87 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -69,8 +69,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value); | |||
69 | static int get_attention_status (struct hotplug_slot *slot, u8 *value); | 69 | static int get_attention_status (struct hotplug_slot *slot, u8 *value); |
70 | static int get_latch_status (struct hotplug_slot *slot, u8 *value); | 70 | static int get_latch_status (struct hotplug_slot *slot, u8 *value); |
71 | static int get_adapter_status (struct hotplug_slot *slot, u8 *value); | 71 | static int get_adapter_status (struct hotplug_slot *slot, u8 *value); |
72 | static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | ||
73 | static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | ||
74 | 72 | ||
75 | /** | 73 | /** |
76 | * release_slot - free up the memory used by a slot | 74 | * release_slot - free up the memory used by a slot |
@@ -113,8 +111,6 @@ static int init_slot(struct controller *ctrl) | |||
113 | ops->disable_slot = disable_slot; | 111 | ops->disable_slot = disable_slot; |
114 | ops->get_power_status = get_power_status; | 112 | ops->get_power_status = get_power_status; |
115 | ops->get_adapter_status = get_adapter_status; | 113 | ops->get_adapter_status = get_adapter_status; |
116 | ops->get_max_bus_speed = get_max_bus_speed; | ||
117 | ops->get_cur_bus_speed = get_cur_bus_speed; | ||
118 | if (MRL_SENS(ctrl)) | 114 | if (MRL_SENS(ctrl)) |
119 | ops->get_latch_status = get_latch_status; | 115 | ops->get_latch_status = get_latch_status; |
120 | if (ATTN_LED(ctrl)) { | 116 | if (ATTN_LED(ctrl)) { |
@@ -227,27 +223,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
227 | return pciehp_get_adapter_status(slot, value); | 223 | return pciehp_get_adapter_status(slot, value); |
228 | } | 224 | } |
229 | 225 | ||
230 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, | ||
231 | enum pci_bus_speed *value) | ||
232 | { | ||
233 | struct slot *slot = hotplug_slot->private; | ||
234 | |||
235 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | ||
236 | __func__, slot_name(slot)); | ||
237 | |||
238 | return pciehp_get_max_link_speed(slot, value); | ||
239 | } | ||
240 | |||
241 | static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | ||
242 | { | ||
243 | struct slot *slot = hotplug_slot->private; | ||
244 | |||
245 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | ||
246 | __func__, slot_name(slot)); | ||
247 | |||
248 | return pciehp_get_cur_link_speed(slot, value); | ||
249 | } | ||
250 | |||
251 | static int pciehp_probe(struct pcie_device *dev) | 226 | static int pciehp_probe(struct pcie_device *dev) |
252 | { | 227 | { |
253 | int rc; | 228 | int rc; |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index d6ac1b261dd9..9a7f247e8ac1 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -341,6 +341,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) | |||
341 | p_slot->state = POWERON_STATE; | 341 | p_slot->state = POWERON_STATE; |
342 | break; | 342 | break; |
343 | default: | 343 | default: |
344 | kfree(info); | ||
344 | goto out; | 345 | goto out; |
345 | } | 346 | } |
346 | queue_work(pciehp_wq, &info->work); | 347 | queue_work(pciehp_wq, &info->work); |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 10040d58c8ef..40b48f569b1e 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -492,6 +492,7 @@ int pciehp_power_on_slot(struct slot * slot) | |||
492 | u16 slot_cmd; | 492 | u16 slot_cmd; |
493 | u16 cmd_mask; | 493 | u16 cmd_mask; |
494 | u16 slot_status; | 494 | u16 slot_status; |
495 | u16 lnk_status; | ||
495 | int retval = 0; | 496 | int retval = 0; |
496 | 497 | ||
497 | /* Clear sticky power-fault bit from previous power failures */ | 498 | /* Clear sticky power-fault bit from previous power failures */ |
@@ -523,6 +524,14 @@ int pciehp_power_on_slot(struct slot * slot) | |||
523 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, | 524 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
524 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); | 525 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); |
525 | 526 | ||
527 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); | ||
528 | if (retval) { | ||
529 | ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n", | ||
530 | __func__); | ||
531 | return retval; | ||
532 | } | ||
533 | pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); | ||
534 | |||
526 | return retval; | 535 | return retval; |
527 | } | 536 | } |
528 | 537 | ||
@@ -610,37 +619,6 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
610 | return IRQ_HANDLED; | 619 | return IRQ_HANDLED; |
611 | } | 620 | } |
612 | 621 | ||
613 | int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *value) | ||
614 | { | ||
615 | struct controller *ctrl = slot->ctrl; | ||
616 | enum pcie_link_speed lnk_speed; | ||
617 | u32 lnk_cap; | ||
618 | int retval = 0; | ||
619 | |||
620 | retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap); | ||
621 | if (retval) { | ||
622 | ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); | ||
623 | return retval; | ||
624 | } | ||
625 | |||
626 | switch (lnk_cap & 0x000F) { | ||
627 | case 1: | ||
628 | lnk_speed = PCIE_2_5GB; | ||
629 | break; | ||
630 | case 2: | ||
631 | lnk_speed = PCIE_5_0GB; | ||
632 | break; | ||
633 | default: | ||
634 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; | ||
635 | break; | ||
636 | } | ||
637 | |||
638 | *value = lnk_speed; | ||
639 | ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed); | ||
640 | |||
641 | return retval; | ||
642 | } | ||
643 | |||
644 | int pciehp_get_max_lnk_width(struct slot *slot, | 622 | int pciehp_get_max_lnk_width(struct slot *slot, |
645 | enum pcie_link_width *value) | 623 | enum pcie_link_width *value) |
646 | { | 624 | { |
@@ -691,38 +669,6 @@ int pciehp_get_max_lnk_width(struct slot *slot, | |||
691 | return retval; | 669 | return retval; |
692 | } | 670 | } |
693 | 671 | ||
694 | int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *value) | ||
695 | { | ||
696 | struct controller *ctrl = slot->ctrl; | ||
697 | enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN; | ||
698 | int retval = 0; | ||
699 | u16 lnk_status; | ||
700 | |||
701 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); | ||
702 | if (retval) { | ||
703 | ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", | ||
704 | __func__); | ||
705 | return retval; | ||
706 | } | ||
707 | |||
708 | switch (lnk_status & PCI_EXP_LNKSTA_CLS) { | ||
709 | case 1: | ||
710 | lnk_speed = PCIE_2_5GB; | ||
711 | break; | ||
712 | case 2: | ||
713 | lnk_speed = PCIE_5_0GB; | ||
714 | break; | ||
715 | default: | ||
716 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; | ||
717 | break; | ||
718 | } | ||
719 | |||
720 | *value = lnk_speed; | ||
721 | ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed); | ||
722 | |||
723 | return retval; | ||
724 | } | ||
725 | |||
726 | int pciehp_get_cur_lnk_width(struct slot *slot, | 672 | int pciehp_get_cur_lnk_width(struct slot *slot, |
727 | enum pcie_link_width *value) | 673 | enum pcie_link_width *value) |
728 | { | 674 | { |
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 21733108adde..0a16444c14c9 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -53,17 +53,15 @@ static int __ref pciehp_add_bridge(struct pci_dev *dev) | |||
53 | busnr = pci_scan_bridge(parent, dev, busnr, pass); | 53 | busnr = pci_scan_bridge(parent, dev, busnr, pass); |
54 | if (!dev->subordinate) | 54 | if (!dev->subordinate) |
55 | return -1; | 55 | return -1; |
56 | pci_bus_size_bridges(dev->subordinate); | 56 | |
57 | pci_bus_assign_resources(parent); | ||
58 | pci_enable_bridges(parent); | ||
59 | pci_bus_add_devices(parent); | ||
60 | return 0; | 57 | return 0; |
61 | } | 58 | } |
62 | 59 | ||
63 | int pciehp_configure_device(struct slot *p_slot) | 60 | int pciehp_configure_device(struct slot *p_slot) |
64 | { | 61 | { |
65 | struct pci_dev *dev; | 62 | struct pci_dev *dev; |
66 | struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; | 63 | struct pci_dev *bridge = p_slot->ctrl->pcie->port; |
64 | struct pci_bus *parent = bridge->subordinate; | ||
67 | int num, fn; | 65 | int num, fn; |
68 | struct controller *ctrl = p_slot->ctrl; | 66 | struct controller *ctrl = p_slot->ctrl; |
69 | 67 | ||
@@ -96,12 +94,25 @@ int pciehp_configure_device(struct slot *p_slot) | |||
96 | (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { | 94 | (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { |
97 | pciehp_add_bridge(dev); | 95 | pciehp_add_bridge(dev); |
98 | } | 96 | } |
97 | pci_dev_put(dev); | ||
98 | } | ||
99 | |||
100 | pci_assign_unassigned_bridge_resources(bridge); | ||
101 | |||
102 | for (fn = 0; fn < 8; fn++) { | ||
103 | dev = pci_get_slot(parent, PCI_DEVFN(0, fn)); | ||
104 | if (!dev) | ||
105 | continue; | ||
106 | if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { | ||
107 | pci_dev_put(dev); | ||
108 | continue; | ||
109 | } | ||
99 | pci_configure_slot(dev); | 110 | pci_configure_slot(dev); |
100 | pci_dev_put(dev); | 111 | pci_dev_put(dev); |
101 | } | 112 | } |
102 | 113 | ||
103 | pci_bus_assign_resources(parent); | ||
104 | pci_bus_add_devices(parent); | 114 | pci_bus_add_devices(parent); |
115 | |||
105 | return 0; | 116 | return 0; |
106 | } | 117 | } |
107 | 118 | ||
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index c159223389ec..dcaae725fd79 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c | |||
@@ -130,10 +130,9 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 * value) | |||
130 | return 0; | 130 | return 0; |
131 | } | 131 | } |
132 | 132 | ||
133 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | 133 | static enum pci_bus_speed get_max_bus_speed(struct slot *slot) |
134 | { | 134 | { |
135 | struct slot *slot = (struct slot *)hotplug_slot->private; | 135 | enum pci_bus_speed speed; |
136 | |||
137 | switch (slot->type) { | 136 | switch (slot->type) { |
138 | case 1: | 137 | case 1: |
139 | case 2: | 138 | case 2: |
@@ -141,30 +140,30 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe | |||
141 | case 4: | 140 | case 4: |
142 | case 5: | 141 | case 5: |
143 | case 6: | 142 | case 6: |
144 | *value = PCI_SPEED_33MHz; /* speed for case 1-6 */ | 143 | speed = PCI_SPEED_33MHz; /* speed for case 1-6 */ |
145 | break; | 144 | break; |
146 | case 7: | 145 | case 7: |
147 | case 8: | 146 | case 8: |
148 | *value = PCI_SPEED_66MHz; | 147 | speed = PCI_SPEED_66MHz; |
149 | break; | 148 | break; |
150 | case 11: | 149 | case 11: |
151 | case 14: | 150 | case 14: |
152 | *value = PCI_SPEED_66MHz_PCIX; | 151 | speed = PCI_SPEED_66MHz_PCIX; |
153 | break; | 152 | break; |
154 | case 12: | 153 | case 12: |
155 | case 15: | 154 | case 15: |
156 | *value = PCI_SPEED_100MHz_PCIX; | 155 | speed = PCI_SPEED_100MHz_PCIX; |
157 | break; | 156 | break; |
158 | case 13: | 157 | case 13: |
159 | case 16: | 158 | case 16: |
160 | *value = PCI_SPEED_133MHz_PCIX; | 159 | speed = PCI_SPEED_133MHz_PCIX; |
161 | break; | 160 | break; |
162 | default: | 161 | default: |
163 | *value = PCI_SPEED_UNKNOWN; | 162 | speed = PCI_SPEED_UNKNOWN; |
164 | break; | 163 | break; |
165 | |||
166 | } | 164 | } |
167 | return 0; | 165 | |
166 | return speed; | ||
168 | } | 167 | } |
169 | 168 | ||
170 | static int get_children_props(struct device_node *dn, const int **drc_indexes, | 169 | static int get_children_props(struct device_node *dn, const int **drc_indexes, |
@@ -408,6 +407,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) | |||
408 | slot->state = NOT_VALID; | 407 | slot->state = NOT_VALID; |
409 | return -EINVAL; | 408 | return -EINVAL; |
410 | } | 409 | } |
410 | |||
411 | slot->bus->max_bus_speed = get_max_bus_speed(slot); | ||
411 | return 0; | 412 | return 0; |
412 | } | 413 | } |
413 | 414 | ||
@@ -429,7 +430,6 @@ struct hotplug_slot_ops rpaphp_hotplug_slot_ops = { | |||
429 | .get_power_status = get_power_status, | 430 | .get_power_status = get_power_status, |
430 | .get_attention_status = get_attention_status, | 431 | .get_attention_status = get_attention_status, |
431 | .get_adapter_status = get_adapter_status, | 432 | .get_adapter_status = get_adapter_status, |
432 | .get_max_bus_speed = get_max_bus_speed, | ||
433 | }; | 433 | }; |
434 | 434 | ||
435 | module_init(rpaphp_init); | 435 | module_init(rpaphp_init); |
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index 8e210cd76e55..d2627e1c3ac1 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h | |||
@@ -333,8 +333,6 @@ struct hpc_ops { | |||
333 | int (*set_attention_status)(struct slot *slot, u8 status); | 333 | int (*set_attention_status)(struct slot *slot, u8 status); |
334 | int (*get_latch_status)(struct slot *slot, u8 *status); | 334 | int (*get_latch_status)(struct slot *slot, u8 *status); |
335 | int (*get_adapter_status)(struct slot *slot, u8 *status); | 335 | int (*get_adapter_status)(struct slot *slot, u8 *status); |
336 | int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); | ||
337 | int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); | ||
338 | int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed); | 336 | int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed); |
339 | int (*get_mode1_ECC_cap)(struct slot *slot, u8 *mode); | 337 | int (*get_mode1_ECC_cap)(struct slot *slot, u8 *mode); |
340 | int (*get_prog_int)(struct slot *slot, u8 *prog_int); | 338 | int (*get_prog_int)(struct slot *slot, u8 *prog_int); |
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index 8a520a3d0f59..a5062297f488 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c | |||
@@ -65,8 +65,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value); | |||
65 | static int get_attention_status (struct hotplug_slot *slot, u8 *value); | 65 | static int get_attention_status (struct hotplug_slot *slot, u8 *value); |
66 | static int get_latch_status (struct hotplug_slot *slot, u8 *value); | 66 | static int get_latch_status (struct hotplug_slot *slot, u8 *value); |
67 | static int get_adapter_status (struct hotplug_slot *slot, u8 *value); | 67 | static int get_adapter_status (struct hotplug_slot *slot, u8 *value); |
68 | static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | ||
69 | static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | ||
70 | 68 | ||
71 | static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { | 69 | static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { |
72 | .set_attention_status = set_attention_status, | 70 | .set_attention_status = set_attention_status, |
@@ -76,8 +74,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { | |||
76 | .get_attention_status = get_attention_status, | 74 | .get_attention_status = get_attention_status, |
77 | .get_latch_status = get_latch_status, | 75 | .get_latch_status = get_latch_status, |
78 | .get_adapter_status = get_adapter_status, | 76 | .get_adapter_status = get_adapter_status, |
79 | .get_max_bus_speed = get_max_bus_speed, | ||
80 | .get_cur_bus_speed = get_cur_bus_speed, | ||
81 | }; | 77 | }; |
82 | 78 | ||
83 | /** | 79 | /** |
@@ -279,37 +275,6 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value) | |||
279 | return 0; | 275 | return 0; |
280 | } | 276 | } |
281 | 277 | ||
282 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, | ||
283 | enum pci_bus_speed *value) | ||
284 | { | ||
285 | struct slot *slot = get_slot(hotplug_slot); | ||
286 | int retval; | ||
287 | |||
288 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | ||
289 | __func__, slot_name(slot)); | ||
290 | |||
291 | retval = slot->hpc_ops->get_max_bus_speed(slot, value); | ||
292 | if (retval < 0) | ||
293 | *value = PCI_SPEED_UNKNOWN; | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | ||
299 | { | ||
300 | struct slot *slot = get_slot(hotplug_slot); | ||
301 | int retval; | ||
302 | |||
303 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | ||
304 | __func__, slot_name(slot)); | ||
305 | |||
306 | retval = slot->hpc_ops->get_cur_bus_speed(slot, value); | ||
307 | if (retval < 0) | ||
308 | *value = PCI_SPEED_UNKNOWN; | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static int is_shpc_capable(struct pci_dev *dev) | 278 | static int is_shpc_capable(struct pci_dev *dev) |
314 | { | 279 | { |
315 | if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == | 280 | if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == |
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index b8ab2796e66a..3bba0c0888ff 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c | |||
@@ -285,17 +285,8 @@ static int board_added(struct slot *p_slot) | |||
285 | return WRONG_BUS_FREQUENCY; | 285 | return WRONG_BUS_FREQUENCY; |
286 | } | 286 | } |
287 | 287 | ||
288 | rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp); | 288 | bsp = ctrl->pci_dev->bus->cur_bus_speed; |
289 | if (rc) { | 289 | msp = ctrl->pci_dev->bus->max_bus_speed; |
290 | ctrl_err(ctrl, "Can't get bus operation speed\n"); | ||
291 | return WRONG_BUS_FREQUENCY; | ||
292 | } | ||
293 | |||
294 | rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp); | ||
295 | if (rc) { | ||
296 | ctrl_err(ctrl, "Can't get max bus operation speed\n"); | ||
297 | msp = bsp; | ||
298 | } | ||
299 | 290 | ||
300 | /* Check if there are other slots or devices on the same bus */ | 291 | /* Check if there are other slots or devices on the same bus */ |
301 | if (!list_empty(&ctrl->pci_dev->subordinate->devices)) | 292 | if (!list_empty(&ctrl->pci_dev->subordinate->devices)) |
@@ -462,6 +453,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work) | |||
462 | p_slot->state = POWERON_STATE; | 453 | p_slot->state = POWERON_STATE; |
463 | break; | 454 | break; |
464 | default: | 455 | default: |
456 | kfree(info); | ||
465 | goto out; | 457 | goto out; |
466 | } | 458 | } |
467 | queue_work(shpchp_wq, &info->work); | 459 | queue_work(shpchp_wq, &info->work); |
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index 86dc39847769..5f5e8d2e3552 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c | |||
@@ -660,6 +660,75 @@ static int hpc_slot_disable(struct slot * slot) | |||
660 | return retval; | 660 | return retval; |
661 | } | 661 | } |
662 | 662 | ||
663 | static int shpc_get_cur_bus_speed(struct controller *ctrl) | ||
664 | { | ||
665 | int retval = 0; | ||
666 | struct pci_bus *bus = ctrl->pci_dev->subordinate; | ||
667 | enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; | ||
668 | u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG); | ||
669 | u8 pi = shpc_readb(ctrl, PROG_INTERFACE); | ||
670 | u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7); | ||
671 | |||
672 | if ((pi == 1) && (speed_mode > 4)) { | ||
673 | retval = -ENODEV; | ||
674 | goto out; | ||
675 | } | ||
676 | |||
677 | switch (speed_mode) { | ||
678 | case 0x0: | ||
679 | bus_speed = PCI_SPEED_33MHz; | ||
680 | break; | ||
681 | case 0x1: | ||
682 | bus_speed = PCI_SPEED_66MHz; | ||
683 | break; | ||
684 | case 0x2: | ||
685 | bus_speed = PCI_SPEED_66MHz_PCIX; | ||
686 | break; | ||
687 | case 0x3: | ||
688 | bus_speed = PCI_SPEED_100MHz_PCIX; | ||
689 | break; | ||
690 | case 0x4: | ||
691 | bus_speed = PCI_SPEED_133MHz_PCIX; | ||
692 | break; | ||
693 | case 0x5: | ||
694 | bus_speed = PCI_SPEED_66MHz_PCIX_ECC; | ||
695 | break; | ||
696 | case 0x6: | ||
697 | bus_speed = PCI_SPEED_100MHz_PCIX_ECC; | ||
698 | break; | ||
699 | case 0x7: | ||
700 | bus_speed = PCI_SPEED_133MHz_PCIX_ECC; | ||
701 | break; | ||
702 | case 0x8: | ||
703 | bus_speed = PCI_SPEED_66MHz_PCIX_266; | ||
704 | break; | ||
705 | case 0x9: | ||
706 | bus_speed = PCI_SPEED_100MHz_PCIX_266; | ||
707 | break; | ||
708 | case 0xa: | ||
709 | bus_speed = PCI_SPEED_133MHz_PCIX_266; | ||
710 | break; | ||
711 | case 0xb: | ||
712 | bus_speed = PCI_SPEED_66MHz_PCIX_533; | ||
713 | break; | ||
714 | case 0xc: | ||
715 | bus_speed = PCI_SPEED_100MHz_PCIX_533; | ||
716 | break; | ||
717 | case 0xd: | ||
718 | bus_speed = PCI_SPEED_133MHz_PCIX_533; | ||
719 | break; | ||
720 | default: | ||
721 | retval = -ENODEV; | ||
722 | break; | ||
723 | } | ||
724 | |||
725 | out: | ||
726 | bus->cur_bus_speed = bus_speed; | ||
727 | dbg("Current bus speed = %d\n", bus_speed); | ||
728 | return retval; | ||
729 | } | ||
730 | |||
731 | |||
663 | static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) | 732 | static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) |
664 | { | 733 | { |
665 | int retval; | 734 | int retval; |
@@ -720,6 +789,8 @@ static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) | |||
720 | retval = shpc_write_cmd(slot, 0, cmd); | 789 | retval = shpc_write_cmd(slot, 0, cmd); |
721 | if (retval) | 790 | if (retval) |
722 | ctrl_err(ctrl, "%s: Write command failed!\n", __func__); | 791 | ctrl_err(ctrl, "%s: Write command failed!\n", __func__); |
792 | else | ||
793 | shpc_get_cur_bus_speed(ctrl); | ||
723 | 794 | ||
724 | return retval; | 795 | return retval; |
725 | } | 796 | } |
@@ -803,10 +874,10 @@ static irqreturn_t shpc_isr(int irq, void *dev_id) | |||
803 | return IRQ_HANDLED; | 874 | return IRQ_HANDLED; |
804 | } | 875 | } |
805 | 876 | ||
806 | static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value) | 877 | static int shpc_get_max_bus_speed(struct controller *ctrl) |
807 | { | 878 | { |
808 | int retval = 0; | 879 | int retval = 0; |
809 | struct controller *ctrl = slot->ctrl; | 880 | struct pci_bus *bus = ctrl->pci_dev->subordinate; |
810 | enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; | 881 | enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; |
811 | u8 pi = shpc_readb(ctrl, PROG_INTERFACE); | 882 | u8 pi = shpc_readb(ctrl, PROG_INTERFACE); |
812 | u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1); | 883 | u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1); |
@@ -842,79 +913,12 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value) | |||
842 | retval = -ENODEV; | 913 | retval = -ENODEV; |
843 | } | 914 | } |
844 | 915 | ||
845 | *value = bus_speed; | 916 | bus->max_bus_speed = bus_speed; |
846 | ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed); | 917 | ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed); |
847 | 918 | ||
848 | return retval; | 919 | return retval; |
849 | } | 920 | } |
850 | 921 | ||
851 | static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value) | ||
852 | { | ||
853 | int retval = 0; | ||
854 | struct controller *ctrl = slot->ctrl; | ||
855 | enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; | ||
856 | u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG); | ||
857 | u8 pi = shpc_readb(ctrl, PROG_INTERFACE); | ||
858 | u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7); | ||
859 | |||
860 | if ((pi == 1) && (speed_mode > 4)) { | ||
861 | *value = PCI_SPEED_UNKNOWN; | ||
862 | return -ENODEV; | ||
863 | } | ||
864 | |||
865 | switch (speed_mode) { | ||
866 | case 0x0: | ||
867 | *value = PCI_SPEED_33MHz; | ||
868 | break; | ||
869 | case 0x1: | ||
870 | *value = PCI_SPEED_66MHz; | ||
871 | break; | ||
872 | case 0x2: | ||
873 | *value = PCI_SPEED_66MHz_PCIX; | ||
874 | break; | ||
875 | case 0x3: | ||
876 | *value = PCI_SPEED_100MHz_PCIX; | ||
877 | break; | ||
878 | case 0x4: | ||
879 | *value = PCI_SPEED_133MHz_PCIX; | ||
880 | break; | ||
881 | case 0x5: | ||
882 | *value = PCI_SPEED_66MHz_PCIX_ECC; | ||
883 | break; | ||
884 | case 0x6: | ||
885 | *value = PCI_SPEED_100MHz_PCIX_ECC; | ||
886 | break; | ||
887 | case 0x7: | ||
888 | *value = PCI_SPEED_133MHz_PCIX_ECC; | ||
889 | break; | ||
890 | case 0x8: | ||
891 | *value = PCI_SPEED_66MHz_PCIX_266; | ||
892 | break; | ||
893 | case 0x9: | ||
894 | *value = PCI_SPEED_100MHz_PCIX_266; | ||
895 | break; | ||
896 | case 0xa: | ||
897 | *value = PCI_SPEED_133MHz_PCIX_266; | ||
898 | break; | ||
899 | case 0xb: | ||
900 | *value = PCI_SPEED_66MHz_PCIX_533; | ||
901 | break; | ||
902 | case 0xc: | ||
903 | *value = PCI_SPEED_100MHz_PCIX_533; | ||
904 | break; | ||
905 | case 0xd: | ||
906 | *value = PCI_SPEED_133MHz_PCIX_533; | ||
907 | break; | ||
908 | default: | ||
909 | *value = PCI_SPEED_UNKNOWN; | ||
910 | retval = -ENODEV; | ||
911 | break; | ||
912 | } | ||
913 | |||
914 | ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed); | ||
915 | return retval; | ||
916 | } | ||
917 | |||
918 | static struct hpc_ops shpchp_hpc_ops = { | 922 | static struct hpc_ops shpchp_hpc_ops = { |
919 | .power_on_slot = hpc_power_on_slot, | 923 | .power_on_slot = hpc_power_on_slot, |
920 | .slot_enable = hpc_slot_enable, | 924 | .slot_enable = hpc_slot_enable, |
@@ -926,8 +930,6 @@ static struct hpc_ops shpchp_hpc_ops = { | |||
926 | .get_latch_status = hpc_get_latch_status, | 930 | .get_latch_status = hpc_get_latch_status, |
927 | .get_adapter_status = hpc_get_adapter_status, | 931 | .get_adapter_status = hpc_get_adapter_status, |
928 | 932 | ||
929 | .get_max_bus_speed = hpc_get_max_bus_speed, | ||
930 | .get_cur_bus_speed = hpc_get_cur_bus_speed, | ||
931 | .get_adapter_speed = hpc_get_adapter_speed, | 933 | .get_adapter_speed = hpc_get_adapter_speed, |
932 | .get_mode1_ECC_cap = hpc_get_mode1_ECC_cap, | 934 | .get_mode1_ECC_cap = hpc_get_mode1_ECC_cap, |
933 | .get_prog_int = hpc_get_prog_int, | 935 | .get_prog_int = hpc_get_prog_int, |
@@ -1086,6 +1088,9 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) | |||
1086 | } | 1088 | } |
1087 | ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq); | 1089 | ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq); |
1088 | 1090 | ||
1091 | shpc_get_max_bus_speed(ctrl); | ||
1092 | shpc_get_cur_bus_speed(ctrl); | ||
1093 | |||
1089 | /* | 1094 | /* |
1090 | * If this is the first controller to be initialized, | 1095 | * If this is the first controller to be initialized, |
1091 | * initialize the shpchpd work queue | 1096 | * initialize the shpchpd work queue |
diff --git a/drivers/pci/hotplug/shpchp_sysfs.c b/drivers/pci/hotplug/shpchp_sysfs.c index 29fa9d26adae..071b7dc0094b 100644 --- a/drivers/pci/hotplug/shpchp_sysfs.c +++ b/drivers/pci/hotplug/shpchp_sysfs.c | |||
@@ -47,8 +47,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha | |||
47 | bus = pdev->subordinate; | 47 | bus = pdev->subordinate; |
48 | 48 | ||
49 | out += sprintf(buf, "Free resources: memory\n"); | 49 | out += sprintf(buf, "Free resources: memory\n"); |
50 | for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) { | 50 | pci_bus_for_each_resource(bus, res, index) { |
51 | res = bus->resource[index]; | ||
52 | if (res && (res->flags & IORESOURCE_MEM) && | 51 | if (res && (res->flags & IORESOURCE_MEM) && |
53 | !(res->flags & IORESOURCE_PREFETCH)) { | 52 | !(res->flags & IORESOURCE_PREFETCH)) { |
54 | out += sprintf(out, "start = %8.8llx, " | 53 | out += sprintf(out, "start = %8.8llx, " |
@@ -58,8 +57,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha | |||
58 | } | 57 | } |
59 | } | 58 | } |
60 | out += sprintf(out, "Free resources: prefetchable memory\n"); | 59 | out += sprintf(out, "Free resources: prefetchable memory\n"); |
61 | for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) { | 60 | pci_bus_for_each_resource(bus, res, index) { |
62 | res = bus->resource[index]; | ||
63 | if (res && (res->flags & IORESOURCE_MEM) && | 61 | if (res && (res->flags & IORESOURCE_MEM) && |
64 | (res->flags & IORESOURCE_PREFETCH)) { | 62 | (res->flags & IORESOURCE_PREFETCH)) { |
65 | out += sprintf(out, "start = %8.8llx, " | 63 | out += sprintf(out, "start = %8.8llx, " |
@@ -69,8 +67,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha | |||
69 | } | 67 | } |
70 | } | 68 | } |
71 | out += sprintf(out, "Free resources: IO\n"); | 69 | out += sprintf(out, "Free resources: IO\n"); |
72 | for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) { | 70 | pci_bus_for_each_resource(bus, res, index) { |
73 | res = bus->resource[index]; | ||
74 | if (res && (res->flags & IORESOURCE_IO)) { | 71 | if (res && (res->flags & IORESOURCE_IO)) { |
75 | out += sprintf(out, "start = %8.8llx, " | 72 | out += sprintf(out, "start = %8.8llx, " |
76 | "length = %8.8llx\n", | 73 | "length = %8.8llx\n", |
diff --git a/drivers/pci/legacy.c b/drivers/pci/legacy.c deleted file mode 100644 index 871f65c15936..000000000000 --- a/drivers/pci/legacy.c +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/pci.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/interrupt.h> | ||
5 | #include "pci.h" | ||
6 | |||
7 | /** | ||
8 | * pci_find_device - begin or continue searching for a PCI device by vendor/device id | ||
9 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | ||
10 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | ||
11 | * @from: Previous PCI device found in search, or %NULL for new search. | ||
12 | * | ||
13 | * Iterates through the list of known PCI devices. If a PCI device is found | ||
14 | * with a matching @vendor and @device, a pointer to its device structure is | ||
15 | * returned. Otherwise, %NULL is returned. | ||
16 | * A new search is initiated by passing %NULL as the @from argument. | ||
17 | * Otherwise if @from is not %NULL, searches continue from next device | ||
18 | * on the global list. | ||
19 | * | ||
20 | * NOTE: Do not use this function any more; use pci_get_device() instead, as | ||
21 | * the PCI device returned by this function can disappear at any moment in | ||
22 | * time. | ||
23 | */ | ||
24 | struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, | ||
25 | struct pci_dev *from) | ||
26 | { | ||
27 | struct pci_dev *pdev; | ||
28 | |||
29 | pci_dev_get(from); | ||
30 | pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); | ||
31 | pci_dev_put(pdev); | ||
32 | return pdev; | ||
33 | } | ||
34 | EXPORT_SYMBOL(pci_find_device); | ||
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 7e2829538a4c..c0c73913833d 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -16,8 +16,144 @@ | |||
16 | #include <acpi/acpi_bus.h> | 16 | #include <acpi/acpi_bus.h> |
17 | 17 | ||
18 | #include <linux/pci-acpi.h> | 18 | #include <linux/pci-acpi.h> |
19 | #include <linux/pm_runtime.h> | ||
19 | #include "pci.h" | 20 | #include "pci.h" |
20 | 21 | ||
22 | static DEFINE_MUTEX(pci_acpi_pm_notify_mtx); | ||
23 | |||
24 | /** | ||
25 | * pci_acpi_wake_bus - Wake-up notification handler for root buses. | ||
26 | * @handle: ACPI handle of a device the notification is for. | ||
27 | * @event: Type of the signaled event. | ||
28 | * @context: PCI root bus to wake up devices on. | ||
29 | */ | ||
30 | static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context) | ||
31 | { | ||
32 | struct pci_bus *pci_bus = context; | ||
33 | |||
34 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus) | ||
35 | pci_pme_wakeup_bus(pci_bus); | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * pci_acpi_wake_dev - Wake-up notification handler for PCI devices. | ||
40 | * @handle: ACPI handle of a device the notification is for. | ||
41 | * @event: Type of the signaled event. | ||
42 | * @context: PCI device object to wake up. | ||
43 | */ | ||
44 | static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) | ||
45 | { | ||
46 | struct pci_dev *pci_dev = context; | ||
47 | |||
48 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { | ||
49 | pci_check_pme_status(pci_dev); | ||
50 | pm_runtime_resume(&pci_dev->dev); | ||
51 | if (pci_dev->subordinate) | ||
52 | pci_pme_wakeup_bus(pci_dev->subordinate); | ||
53 | } | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * add_pm_notifier - Register PM notifier for given ACPI device. | ||
58 | * @dev: ACPI device to add the notifier for. | ||
59 | * @context: PCI device or bus to check for PME status if an event is signaled. | ||
60 | * | ||
61 | * NOTE: @dev need not be a run-wake or wake-up device to be a valid source of | ||
62 | * PM wake-up events. For example, wake-up events may be generated for bridges | ||
63 | * if one of the devices below the bridge is signaling PME, even if the bridge | ||
64 | * itself doesn't have a wake-up GPE associated with it. | ||
65 | */ | ||
66 | static acpi_status add_pm_notifier(struct acpi_device *dev, | ||
67 | acpi_notify_handler handler, | ||
68 | void *context) | ||
69 | { | ||
70 | acpi_status status = AE_ALREADY_EXISTS; | ||
71 | |||
72 | mutex_lock(&pci_acpi_pm_notify_mtx); | ||
73 | |||
74 | if (dev->wakeup.flags.notifier_present) | ||
75 | goto out; | ||
76 | |||
77 | status = acpi_install_notify_handler(dev->handle, | ||
78 | ACPI_SYSTEM_NOTIFY, | ||
79 | handler, context); | ||
80 | if (ACPI_FAILURE(status)) | ||
81 | goto out; | ||
82 | |||
83 | dev->wakeup.flags.notifier_present = true; | ||
84 | |||
85 | out: | ||
86 | mutex_unlock(&pci_acpi_pm_notify_mtx); | ||
87 | return status; | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * remove_pm_notifier - Unregister PM notifier from given ACPI device. | ||
92 | * @dev: ACPI device to remove the notifier from. | ||
93 | */ | ||
94 | static acpi_status remove_pm_notifier(struct acpi_device *dev, | ||
95 | acpi_notify_handler handler) | ||
96 | { | ||
97 | acpi_status status = AE_BAD_PARAMETER; | ||
98 | |||
99 | mutex_lock(&pci_acpi_pm_notify_mtx); | ||
100 | |||
101 | if (!dev->wakeup.flags.notifier_present) | ||
102 | goto out; | ||
103 | |||
104 | status = acpi_remove_notify_handler(dev->handle, | ||
105 | ACPI_SYSTEM_NOTIFY, | ||
106 | handler); | ||
107 | if (ACPI_FAILURE(status)) | ||
108 | goto out; | ||
109 | |||
110 | dev->wakeup.flags.notifier_present = false; | ||
111 | |||
112 | out: | ||
113 | mutex_unlock(&pci_acpi_pm_notify_mtx); | ||
114 | return status; | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. | ||
119 | * @dev: ACPI device to add the notifier for. | ||
120 | * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. | ||
121 | */ | ||
122 | acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, | ||
123 | struct pci_bus *pci_bus) | ||
124 | { | ||
125 | return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier. | ||
130 | * @dev: ACPI device to remove the notifier from. | ||
131 | */ | ||
132 | acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) | ||
133 | { | ||
134 | return remove_pm_notifier(dev, pci_acpi_wake_bus); | ||
135 | } | ||
136 | |||
137 | /** | ||
138 | * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. | ||
139 | * @dev: ACPI device to add the notifier for. | ||
140 | * @pci_dev: PCI device to check for the PME status if an event is signaled. | ||
141 | */ | ||
142 | acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, | ||
143 | struct pci_dev *pci_dev) | ||
144 | { | ||
145 | return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier. | ||
150 | * @dev: ACPI device to remove the notifier from. | ||
151 | */ | ||
152 | acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) | ||
153 | { | ||
154 | return remove_pm_notifier(dev, pci_acpi_wake_dev); | ||
155 | } | ||
156 | |||
21 | /* | 157 | /* |
22 | * _SxD returns the D-state with the highest power | 158 | * _SxD returns the D-state with the highest power |
23 | * (lowest D-state number) supported in the S-state "x". | 159 | * (lowest D-state number) supported in the S-state "x". |
@@ -131,12 +267,87 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
131 | return 0; | 267 | return 0; |
132 | } | 268 | } |
133 | 269 | ||
270 | /** | ||
271 | * acpi_dev_run_wake - Enable/disable wake-up for given device. | ||
272 | * @phys_dev: Device to enable/disable the platform to wake-up the system for. | ||
273 | * @enable: Whether enable or disable the wake-up functionality. | ||
274 | * | ||
275 | * Find the ACPI device object corresponding to @pci_dev and try to | ||
276 | * enable/disable the GPE associated with it. | ||
277 | */ | ||
278 | static int acpi_dev_run_wake(struct device *phys_dev, bool enable) | ||
279 | { | ||
280 | struct acpi_device *dev; | ||
281 | acpi_handle handle; | ||
282 | int error = -ENODEV; | ||
283 | |||
284 | if (!device_run_wake(phys_dev)) | ||
285 | return -EINVAL; | ||
286 | |||
287 | handle = DEVICE_ACPI_HANDLE(phys_dev); | ||
288 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) { | ||
289 | dev_dbg(phys_dev, "ACPI handle has no context in %s!\n", | ||
290 | __func__); | ||
291 | return -ENODEV; | ||
292 | } | ||
293 | |||
294 | if (enable) { | ||
295 | if (!dev->wakeup.run_wake_count++) { | ||
296 | acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0); | ||
297 | acpi_enable_gpe(dev->wakeup.gpe_device, | ||
298 | dev->wakeup.gpe_number, | ||
299 | ACPI_GPE_TYPE_RUNTIME); | ||
300 | } | ||
301 | } else if (dev->wakeup.run_wake_count > 0) { | ||
302 | if (!--dev->wakeup.run_wake_count) { | ||
303 | acpi_disable_gpe(dev->wakeup.gpe_device, | ||
304 | dev->wakeup.gpe_number, | ||
305 | ACPI_GPE_TYPE_RUNTIME); | ||
306 | acpi_disable_wakeup_device_power(dev); | ||
307 | } | ||
308 | } else { | ||
309 | error = -EALREADY; | ||
310 | } | ||
311 | |||
312 | return error; | ||
313 | } | ||
314 | |||
315 | static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) | ||
316 | { | ||
317 | while (bus->parent) { | ||
318 | struct pci_dev *bridge = bus->self; | ||
319 | |||
320 | if (bridge->pme_interrupt) | ||
321 | return; | ||
322 | if (!acpi_dev_run_wake(&bridge->dev, enable)) | ||
323 | return; | ||
324 | bus = bus->parent; | ||
325 | } | ||
326 | |||
327 | /* We have reached the root bus. */ | ||
328 | if (bus->bridge) | ||
329 | acpi_dev_run_wake(bus->bridge, enable); | ||
330 | } | ||
331 | |||
332 | static int acpi_pci_run_wake(struct pci_dev *dev, bool enable) | ||
333 | { | ||
334 | if (dev->pme_interrupt) | ||
335 | return 0; | ||
336 | |||
337 | if (!acpi_dev_run_wake(&dev->dev, enable)) | ||
338 | return 0; | ||
339 | |||
340 | acpi_pci_propagate_run_wake(dev->bus, enable); | ||
341 | return 0; | ||
342 | } | ||
343 | |||
134 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { | 344 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { |
135 | .is_manageable = acpi_pci_power_manageable, | 345 | .is_manageable = acpi_pci_power_manageable, |
136 | .set_state = acpi_pci_set_power_state, | 346 | .set_state = acpi_pci_set_power_state, |
137 | .choose_state = acpi_pci_choose_state, | 347 | .choose_state = acpi_pci_choose_state, |
138 | .can_wakeup = acpi_pci_can_wakeup, | 348 | .can_wakeup = acpi_pci_can_wakeup, |
139 | .sleep_wake = acpi_pci_sleep_wake, | 349 | .sleep_wake = acpi_pci_sleep_wake, |
350 | .run_wake = acpi_pci_run_wake, | ||
140 | }; | 351 | }; |
141 | 352 | ||
142 | /* ACPI bus type */ | 353 | /* ACPI bus type */ |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index e5d47be3c6d7..f9a0aec3abcf 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include <linux/pm_runtime.h> | ||
20 | #include "pci.h" | 21 | #include "pci.h" |
21 | 22 | ||
22 | struct pci_dynid { | 23 | struct pci_dynid { |
@@ -404,6 +405,35 @@ static void pci_device_shutdown(struct device *dev) | |||
404 | pci_msix_shutdown(pci_dev); | 405 | pci_msix_shutdown(pci_dev); |
405 | } | 406 | } |
406 | 407 | ||
408 | #ifdef CONFIG_PM_OPS | ||
409 | |||
410 | /* Auxiliary functions used for system resume and run-time resume. */ | ||
411 | |||
412 | /** | ||
413 | * pci_restore_standard_config - restore standard config registers of PCI device | ||
414 | * @pci_dev: PCI device to handle | ||
415 | */ | ||
416 | static int pci_restore_standard_config(struct pci_dev *pci_dev) | ||
417 | { | ||
418 | pci_update_current_state(pci_dev, PCI_UNKNOWN); | ||
419 | |||
420 | if (pci_dev->current_state != PCI_D0) { | ||
421 | int error = pci_set_power_state(pci_dev, PCI_D0); | ||
422 | if (error) | ||
423 | return error; | ||
424 | } | ||
425 | |||
426 | return pci_restore_state(pci_dev); | ||
427 | } | ||
428 | |||
429 | static void pci_pm_default_resume_early(struct pci_dev *pci_dev) | ||
430 | { | ||
431 | pci_restore_standard_config(pci_dev); | ||
432 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | ||
433 | } | ||
434 | |||
435 | #endif | ||
436 | |||
407 | #ifdef CONFIG_PM_SLEEP | 437 | #ifdef CONFIG_PM_SLEEP |
408 | 438 | ||
409 | /* | 439 | /* |
@@ -520,29 +550,6 @@ static int pci_legacy_resume(struct device *dev) | |||
520 | 550 | ||
521 | /* Auxiliary functions used by the new power management framework */ | 551 | /* Auxiliary functions used by the new power management framework */ |
522 | 552 | ||
523 | /** | ||
524 | * pci_restore_standard_config - restore standard config registers of PCI device | ||
525 | * @pci_dev: PCI device to handle | ||
526 | */ | ||
527 | static int pci_restore_standard_config(struct pci_dev *pci_dev) | ||
528 | { | ||
529 | pci_update_current_state(pci_dev, PCI_UNKNOWN); | ||
530 | |||
531 | if (pci_dev->current_state != PCI_D0) { | ||
532 | int error = pci_set_power_state(pci_dev, PCI_D0); | ||
533 | if (error) | ||
534 | return error; | ||
535 | } | ||
536 | |||
537 | return pci_restore_state(pci_dev); | ||
538 | } | ||
539 | |||
540 | static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) | ||
541 | { | ||
542 | pci_restore_standard_config(pci_dev); | ||
543 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | ||
544 | } | ||
545 | |||
546 | static void pci_pm_default_resume(struct pci_dev *pci_dev) | 553 | static void pci_pm_default_resume(struct pci_dev *pci_dev) |
547 | { | 554 | { |
548 | pci_fixup_device(pci_fixup_resume, pci_dev); | 555 | pci_fixup_device(pci_fixup_resume, pci_dev); |
@@ -581,6 +588,17 @@ static int pci_pm_prepare(struct device *dev) | |||
581 | struct device_driver *drv = dev->driver; | 588 | struct device_driver *drv = dev->driver; |
582 | int error = 0; | 589 | int error = 0; |
583 | 590 | ||
591 | /* | ||
592 | * PCI devices suspended at run time need to be resumed at this | ||
593 | * point, because in general it is necessary to reconfigure them for | ||
594 | * system suspend. Namely, if the device is supposed to wake up the | ||
595 | * system from the sleep state, we may need to reconfigure it for this | ||
596 | * purpose. In turn, if the device is not supposed to wake up the | ||
597 | * system from the sleep state, we'll have to prevent it from signaling | ||
598 | * wake-up. | ||
599 | */ | ||
600 | pm_runtime_resume(dev); | ||
601 | |||
584 | if (drv && drv->pm && drv->pm->prepare) | 602 | if (drv && drv->pm && drv->pm->prepare) |
585 | error = drv->pm->prepare(dev); | 603 | error = drv->pm->prepare(dev); |
586 | 604 | ||
@@ -595,6 +613,13 @@ static void pci_pm_complete(struct device *dev) | |||
595 | drv->pm->complete(dev); | 613 | drv->pm->complete(dev); |
596 | } | 614 | } |
597 | 615 | ||
616 | #else /* !CONFIG_PM_SLEEP */ | ||
617 | |||
618 | #define pci_pm_prepare NULL | ||
619 | #define pci_pm_complete NULL | ||
620 | |||
621 | #endif /* !CONFIG_PM_SLEEP */ | ||
622 | |||
598 | #ifdef CONFIG_SUSPEND | 623 | #ifdef CONFIG_SUSPEND |
599 | 624 | ||
600 | static int pci_pm_suspend(struct device *dev) | 625 | static int pci_pm_suspend(struct device *dev) |
@@ -681,7 +706,7 @@ static int pci_pm_resume_noirq(struct device *dev) | |||
681 | struct device_driver *drv = dev->driver; | 706 | struct device_driver *drv = dev->driver; |
682 | int error = 0; | 707 | int error = 0; |
683 | 708 | ||
684 | pci_pm_default_resume_noirq(pci_dev); | 709 | pci_pm_default_resume_early(pci_dev); |
685 | 710 | ||
686 | if (pci_has_legacy_pm_support(pci_dev)) | 711 | if (pci_has_legacy_pm_support(pci_dev)) |
687 | return pci_legacy_resume_early(dev); | 712 | return pci_legacy_resume_early(dev); |
@@ -879,7 +904,7 @@ static int pci_pm_restore_noirq(struct device *dev) | |||
879 | struct device_driver *drv = dev->driver; | 904 | struct device_driver *drv = dev->driver; |
880 | int error = 0; | 905 | int error = 0; |
881 | 906 | ||
882 | pci_pm_default_resume_noirq(pci_dev); | 907 | pci_pm_default_resume_early(pci_dev); |
883 | 908 | ||
884 | if (pci_has_legacy_pm_support(pci_dev)) | 909 | if (pci_has_legacy_pm_support(pci_dev)) |
885 | return pci_legacy_resume_early(dev); | 910 | return pci_legacy_resume_early(dev); |
@@ -931,6 +956,84 @@ static int pci_pm_restore(struct device *dev) | |||
931 | 956 | ||
932 | #endif /* !CONFIG_HIBERNATION */ | 957 | #endif /* !CONFIG_HIBERNATION */ |
933 | 958 | ||
959 | #ifdef CONFIG_PM_RUNTIME | ||
960 | |||
961 | static int pci_pm_runtime_suspend(struct device *dev) | ||
962 | { | ||
963 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
964 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
965 | pci_power_t prev = pci_dev->current_state; | ||
966 | int error; | ||
967 | |||
968 | if (!pm || !pm->runtime_suspend) | ||
969 | return -ENOSYS; | ||
970 | |||
971 | error = pm->runtime_suspend(dev); | ||
972 | suspend_report_result(pm->runtime_suspend, error); | ||
973 | if (error) | ||
974 | return error; | ||
975 | |||
976 | pci_fixup_device(pci_fixup_suspend, pci_dev); | ||
977 | |||
978 | if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 | ||
979 | && pci_dev->current_state != PCI_UNKNOWN) { | ||
980 | WARN_ONCE(pci_dev->current_state != prev, | ||
981 | "PCI PM: State of device not saved by %pF\n", | ||
982 | pm->runtime_suspend); | ||
983 | return 0; | ||
984 | } | ||
985 | |||
986 | if (!pci_dev->state_saved) | ||
987 | pci_save_state(pci_dev); | ||
988 | |||
989 | pci_finish_runtime_suspend(pci_dev); | ||
990 | |||
991 | return 0; | ||
992 | } | ||
993 | |||
994 | static int pci_pm_runtime_resume(struct device *dev) | ||
995 | { | ||
996 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
997 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
998 | |||
999 | if (!pm || !pm->runtime_resume) | ||
1000 | return -ENOSYS; | ||
1001 | |||
1002 | pci_pm_default_resume_early(pci_dev); | ||
1003 | __pci_enable_wake(pci_dev, PCI_D0, true, false); | ||
1004 | pci_fixup_device(pci_fixup_resume, pci_dev); | ||
1005 | |||
1006 | return pm->runtime_resume(dev); | ||
1007 | } | ||
1008 | |||
1009 | static int pci_pm_runtime_idle(struct device *dev) | ||
1010 | { | ||
1011 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
1012 | |||
1013 | if (!pm) | ||
1014 | return -ENOSYS; | ||
1015 | |||
1016 | if (pm->runtime_idle) { | ||
1017 | int ret = pm->runtime_idle(dev); | ||
1018 | if (ret) | ||
1019 | return ret; | ||
1020 | } | ||
1021 | |||
1022 | pm_runtime_suspend(dev); | ||
1023 | |||
1024 | return 0; | ||
1025 | } | ||
1026 | |||
1027 | #else /* !CONFIG_PM_RUNTIME */ | ||
1028 | |||
1029 | #define pci_pm_runtime_suspend NULL | ||
1030 | #define pci_pm_runtime_resume NULL | ||
1031 | #define pci_pm_runtime_idle NULL | ||
1032 | |||
1033 | #endif /* !CONFIG_PM_RUNTIME */ | ||
1034 | |||
1035 | #ifdef CONFIG_PM_OPS | ||
1036 | |||
934 | const struct dev_pm_ops pci_dev_pm_ops = { | 1037 | const struct dev_pm_ops pci_dev_pm_ops = { |
935 | .prepare = pci_pm_prepare, | 1038 | .prepare = pci_pm_prepare, |
936 | .complete = pci_pm_complete, | 1039 | .complete = pci_pm_complete, |
@@ -946,15 +1049,18 @@ const struct dev_pm_ops pci_dev_pm_ops = { | |||
946 | .thaw_noirq = pci_pm_thaw_noirq, | 1049 | .thaw_noirq = pci_pm_thaw_noirq, |
947 | .poweroff_noirq = pci_pm_poweroff_noirq, | 1050 | .poweroff_noirq = pci_pm_poweroff_noirq, |
948 | .restore_noirq = pci_pm_restore_noirq, | 1051 | .restore_noirq = pci_pm_restore_noirq, |
1052 | .runtime_suspend = pci_pm_runtime_suspend, | ||
1053 | .runtime_resume = pci_pm_runtime_resume, | ||
1054 | .runtime_idle = pci_pm_runtime_idle, | ||
949 | }; | 1055 | }; |
950 | 1056 | ||
951 | #define PCI_PM_OPS_PTR (&pci_dev_pm_ops) | 1057 | #define PCI_PM_OPS_PTR (&pci_dev_pm_ops) |
952 | 1058 | ||
953 | #else /* !CONFIG_PM_SLEEP */ | 1059 | #else /* !COMFIG_PM_OPS */ |
954 | 1060 | ||
955 | #define PCI_PM_OPS_PTR NULL | 1061 | #define PCI_PM_OPS_PTR NULL |
956 | 1062 | ||
957 | #endif /* !CONFIG_PM_SLEEP */ | 1063 | #endif /* !COMFIG_PM_OPS */ |
958 | 1064 | ||
959 | /** | 1065 | /** |
960 | * __pci_register_driver - register a new pci driver | 1066 | * __pci_register_driver - register a new pci driver |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 315fea47e784..f4a2738bf0bf 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -19,8 +19,8 @@ | |||
19 | #include <linux/pci-aspm.h> | 19 | #include <linux/pci-aspm.h> |
20 | #include <linux/pm_wakeup.h> | 20 | #include <linux/pm_wakeup.h> |
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ | ||
23 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/pm_runtime.h> | ||
24 | #include <asm/setup.h> | 24 | #include <asm/setup.h> |
25 | #include "pci.h" | 25 | #include "pci.h" |
26 | 26 | ||
@@ -29,6 +29,12 @@ const char *pci_power_names[] = { | |||
29 | }; | 29 | }; |
30 | EXPORT_SYMBOL_GPL(pci_power_names); | 30 | EXPORT_SYMBOL_GPL(pci_power_names); |
31 | 31 | ||
32 | int isa_dma_bridge_buggy; | ||
33 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | ||
34 | |||
35 | int pci_pci_problems; | ||
36 | EXPORT_SYMBOL(pci_pci_problems); | ||
37 | |||
32 | unsigned int pci_pm_d3_delay; | 38 | unsigned int pci_pm_d3_delay; |
33 | 39 | ||
34 | static void pci_dev_d3_sleep(struct pci_dev *dev) | 40 | static void pci_dev_d3_sleep(struct pci_dev *dev) |
@@ -380,10 +386,9 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) | |||
380 | { | 386 | { |
381 | const struct pci_bus *bus = dev->bus; | 387 | const struct pci_bus *bus = dev->bus; |
382 | int i; | 388 | int i; |
383 | struct resource *best = NULL; | 389 | struct resource *best = NULL, *r; |
384 | 390 | ||
385 | for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 391 | pci_bus_for_each_resource(bus, r, i) { |
386 | struct resource *r = bus->resource[i]; | ||
387 | if (!r) | 392 | if (!r) |
388 | continue; | 393 | continue; |
389 | if (res->start && !(res->start >= r->start && res->end <= r->end)) | 394 | if (res->start && !(res->start >= r->start && res->end <= r->end)) |
@@ -457,6 +462,12 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
457 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; | 462 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; |
458 | } | 463 | } |
459 | 464 | ||
465 | static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) | ||
466 | { | ||
467 | return pci_platform_pm ? | ||
468 | pci_platform_pm->run_wake(dev, enable) : -ENODEV; | ||
469 | } | ||
470 | |||
460 | /** | 471 | /** |
461 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of | 472 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of |
462 | * given PCI device | 473 | * given PCI device |
@@ -1190,6 +1201,66 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) | |||
1190 | } | 1201 | } |
1191 | 1202 | ||
1192 | /** | 1203 | /** |
1204 | * pci_check_pme_status - Check if given device has generated PME. | ||
1205 | * @dev: Device to check. | ||
1206 | * | ||
1207 | * Check the PME status of the device and if set, clear it and clear PME enable | ||
1208 | * (if set). Return 'true' if PME status and PME enable were both set or | ||
1209 | * 'false' otherwise. | ||
1210 | */ | ||
1211 | bool pci_check_pme_status(struct pci_dev *dev) | ||
1212 | { | ||
1213 | int pmcsr_pos; | ||
1214 | u16 pmcsr; | ||
1215 | bool ret = false; | ||
1216 | |||
1217 | if (!dev->pm_cap) | ||
1218 | return false; | ||
1219 | |||
1220 | pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; | ||
1221 | pci_read_config_word(dev, pmcsr_pos, &pmcsr); | ||
1222 | if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) | ||
1223 | return false; | ||
1224 | |||
1225 | /* Clear PME status. */ | ||
1226 | pmcsr |= PCI_PM_CTRL_PME_STATUS; | ||
1227 | if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { | ||
1228 | /* Disable PME to avoid interrupt flood. */ | ||
1229 | pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; | ||
1230 | ret = true; | ||
1231 | } | ||
1232 | |||
1233 | pci_write_config_word(dev, pmcsr_pos, pmcsr); | ||
1234 | |||
1235 | return ret; | ||
1236 | } | ||
1237 | |||
1238 | /** | ||
1239 | * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. | ||
1240 | * @dev: Device to handle. | ||
1241 | * @ign: Ignored. | ||
1242 | * | ||
1243 | * Check if @dev has generated PME and queue a resume request for it in that | ||
1244 | * case. | ||
1245 | */ | ||
1246 | static int pci_pme_wakeup(struct pci_dev *dev, void *ign) | ||
1247 | { | ||
1248 | if (pci_check_pme_status(dev)) | ||
1249 | pm_request_resume(&dev->dev); | ||
1250 | return 0; | ||
1251 | } | ||
1252 | |||
1253 | /** | ||
1254 | * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. | ||
1255 | * @bus: Top bus of the subtree to walk. | ||
1256 | */ | ||
1257 | void pci_pme_wakeup_bus(struct pci_bus *bus) | ||
1258 | { | ||
1259 | if (bus) | ||
1260 | pci_walk_bus(bus, pci_pme_wakeup, NULL); | ||
1261 | } | ||
1262 | |||
1263 | /** | ||
1193 | * pci_pme_capable - check the capability of PCI device to generate PME# | 1264 | * pci_pme_capable - check the capability of PCI device to generate PME# |
1194 | * @dev: PCI device to handle. | 1265 | * @dev: PCI device to handle. |
1195 | * @state: PCI state from which device will issue PME#. | 1266 | * @state: PCI state from which device will issue PME#. |
@@ -1230,9 +1301,10 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1230 | } | 1301 | } |
1231 | 1302 | ||
1232 | /** | 1303 | /** |
1233 | * pci_enable_wake - enable PCI device as wakeup event source | 1304 | * __pci_enable_wake - enable PCI device as wakeup event source |
1234 | * @dev: PCI device affected | 1305 | * @dev: PCI device affected |
1235 | * @state: PCI state from which device will issue wakeup events | 1306 | * @state: PCI state from which device will issue wakeup events |
1307 | * @runtime: True if the events are to be generated at run time | ||
1236 | * @enable: True to enable event generation; false to disable | 1308 | * @enable: True to enable event generation; false to disable |
1237 | * | 1309 | * |
1238 | * This enables the device as a wakeup event source, or disables it. | 1310 | * This enables the device as a wakeup event source, or disables it. |
@@ -1248,11 +1320,12 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1248 | * Error code depending on the platform is returned if both the platform and | 1320 | * Error code depending on the platform is returned if both the platform and |
1249 | * the native mechanism fail to enable the generation of wake-up events | 1321 | * the native mechanism fail to enable the generation of wake-up events |
1250 | */ | 1322 | */ |
1251 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | 1323 | int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, |
1324 | bool runtime, bool enable) | ||
1252 | { | 1325 | { |
1253 | int ret = 0; | 1326 | int ret = 0; |
1254 | 1327 | ||
1255 | if (enable && !device_may_wakeup(&dev->dev)) | 1328 | if (enable && !runtime && !device_may_wakeup(&dev->dev)) |
1256 | return -EINVAL; | 1329 | return -EINVAL; |
1257 | 1330 | ||
1258 | /* Don't do the same thing twice in a row for one device. */ | 1331 | /* Don't do the same thing twice in a row for one device. */ |
@@ -1272,19 +1345,24 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | |||
1272 | pci_pme_active(dev, true); | 1345 | pci_pme_active(dev, true); |
1273 | else | 1346 | else |
1274 | ret = 1; | 1347 | ret = 1; |
1275 | error = platform_pci_sleep_wake(dev, true); | 1348 | error = runtime ? platform_pci_run_wake(dev, true) : |
1349 | platform_pci_sleep_wake(dev, true); | ||
1276 | if (ret) | 1350 | if (ret) |
1277 | ret = error; | 1351 | ret = error; |
1278 | if (!ret) | 1352 | if (!ret) |
1279 | dev->wakeup_prepared = true; | 1353 | dev->wakeup_prepared = true; |
1280 | } else { | 1354 | } else { |
1281 | platform_pci_sleep_wake(dev, false); | 1355 | if (runtime) |
1356 | platform_pci_run_wake(dev, false); | ||
1357 | else | ||
1358 | platform_pci_sleep_wake(dev, false); | ||
1282 | pci_pme_active(dev, false); | 1359 | pci_pme_active(dev, false); |
1283 | dev->wakeup_prepared = false; | 1360 | dev->wakeup_prepared = false; |
1284 | } | 1361 | } |
1285 | 1362 | ||
1286 | return ret; | 1363 | return ret; |
1287 | } | 1364 | } |
1365 | EXPORT_SYMBOL(__pci_enable_wake); | ||
1288 | 1366 | ||
1289 | /** | 1367 | /** |
1290 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold | 1368 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold |
@@ -1394,6 +1472,66 @@ int pci_back_from_sleep(struct pci_dev *dev) | |||
1394 | } | 1472 | } |
1395 | 1473 | ||
1396 | /** | 1474 | /** |
1475 | * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. | ||
1476 | * @dev: PCI device being suspended. | ||
1477 | * | ||
1478 | * Prepare @dev to generate wake-up events at run time and put it into a low | ||
1479 | * power state. | ||
1480 | */ | ||
1481 | int pci_finish_runtime_suspend(struct pci_dev *dev) | ||
1482 | { | ||
1483 | pci_power_t target_state = pci_target_state(dev); | ||
1484 | int error; | ||
1485 | |||
1486 | if (target_state == PCI_POWER_ERROR) | ||
1487 | return -EIO; | ||
1488 | |||
1489 | __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); | ||
1490 | |||
1491 | error = pci_set_power_state(dev, target_state); | ||
1492 | |||
1493 | if (error) | ||
1494 | __pci_enable_wake(dev, target_state, true, false); | ||
1495 | |||
1496 | return error; | ||
1497 | } | ||
1498 | |||
1499 | /** | ||
1500 | * pci_dev_run_wake - Check if device can generate run-time wake-up events. | ||
1501 | * @dev: Device to check. | ||
1502 | * | ||
1503 | * Return true if the device itself is cabable of generating wake-up events | ||
1504 | * (through the platform or using the native PCIe PME) or if the device supports | ||
1505 | * PME and one of its upstream bridges can generate wake-up events. | ||
1506 | */ | ||
1507 | bool pci_dev_run_wake(struct pci_dev *dev) | ||
1508 | { | ||
1509 | struct pci_bus *bus = dev->bus; | ||
1510 | |||
1511 | if (device_run_wake(&dev->dev)) | ||
1512 | return true; | ||
1513 | |||
1514 | if (!dev->pme_support) | ||
1515 | return false; | ||
1516 | |||
1517 | while (bus->parent) { | ||
1518 | struct pci_dev *bridge = bus->self; | ||
1519 | |||
1520 | if (device_run_wake(&bridge->dev)) | ||
1521 | return true; | ||
1522 | |||
1523 | bus = bus->parent; | ||
1524 | } | ||
1525 | |||
1526 | /* We have reached the root bus. */ | ||
1527 | if (bus->bridge) | ||
1528 | return device_run_wake(bus->bridge); | ||
1529 | |||
1530 | return false; | ||
1531 | } | ||
1532 | EXPORT_SYMBOL_GPL(pci_dev_run_wake); | ||
1533 | |||
1534 | /** | ||
1397 | * pci_pm_init - Initialize PM functions of given PCI device | 1535 | * pci_pm_init - Initialize PM functions of given PCI device |
1398 | * @dev: PCI device to handle. | 1536 | * @dev: PCI device to handle. |
1399 | */ | 1537 | */ |
@@ -2871,7 +3009,6 @@ EXPORT_SYMBOL(pci_save_state); | |||
2871 | EXPORT_SYMBOL(pci_restore_state); | 3009 | EXPORT_SYMBOL(pci_restore_state); |
2872 | EXPORT_SYMBOL(pci_pme_capable); | 3010 | EXPORT_SYMBOL(pci_pme_capable); |
2873 | EXPORT_SYMBOL(pci_pme_active); | 3011 | EXPORT_SYMBOL(pci_pme_active); |
2874 | EXPORT_SYMBOL(pci_enable_wake); | ||
2875 | EXPORT_SYMBOL(pci_wake_from_d3); | 3012 | EXPORT_SYMBOL(pci_wake_from_d3); |
2876 | EXPORT_SYMBOL(pci_target_state); | 3013 | EXPORT_SYMBOL(pci_target_state); |
2877 | EXPORT_SYMBOL(pci_prepare_to_sleep); | 3014 | EXPORT_SYMBOL(pci_prepare_to_sleep); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index fbd0e3adbca3..4eb10f48d270 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -35,6 +35,10 @@ int pci_probe_reset_function(struct pci_dev *dev); | |||
35 | * | 35 | * |
36 | * @sleep_wake: enables/disables the system wake up capability of given device | 36 | * @sleep_wake: enables/disables the system wake up capability of given device |
37 | * | 37 | * |
38 | * @run_wake: enables/disables the platform to generate run-time wake-up events | ||
39 | * for given device (the device's wake-up capability has to be | ||
40 | * enabled by @sleep_wake for this feature to work) | ||
41 | * | ||
38 | * If given platform is generally capable of power managing PCI devices, all of | 42 | * If given platform is generally capable of power managing PCI devices, all of |
39 | * these callbacks are mandatory. | 43 | * these callbacks are mandatory. |
40 | */ | 44 | */ |
@@ -44,11 +48,16 @@ struct pci_platform_pm_ops { | |||
44 | pci_power_t (*choose_state)(struct pci_dev *dev); | 48 | pci_power_t (*choose_state)(struct pci_dev *dev); |
45 | bool (*can_wakeup)(struct pci_dev *dev); | 49 | bool (*can_wakeup)(struct pci_dev *dev); |
46 | int (*sleep_wake)(struct pci_dev *dev, bool enable); | 50 | int (*sleep_wake)(struct pci_dev *dev, bool enable); |
51 | int (*run_wake)(struct pci_dev *dev, bool enable); | ||
47 | }; | 52 | }; |
48 | 53 | ||
49 | extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); | 54 | extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); |
50 | extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); | 55 | extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); |
51 | extern void pci_disable_enabled_device(struct pci_dev *dev); | 56 | extern void pci_disable_enabled_device(struct pci_dev *dev); |
57 | extern bool pci_check_pme_status(struct pci_dev *dev); | ||
58 | extern int pci_finish_runtime_suspend(struct pci_dev *dev); | ||
59 | extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); | ||
60 | extern void pci_pme_wakeup_bus(struct pci_bus *bus); | ||
52 | extern void pci_pm_init(struct pci_dev *dev); | 61 | extern void pci_pm_init(struct pci_dev *dev); |
53 | extern void platform_pci_wakeup_init(struct pci_dev *dev); | 62 | extern void platform_pci_wakeup_init(struct pci_dev *dev); |
54 | extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); | 63 | extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); |
@@ -319,6 +328,13 @@ struct pci_dev_reset_methods { | |||
319 | int (*reset)(struct pci_dev *dev, int probe); | 328 | int (*reset)(struct pci_dev *dev, int probe); |
320 | }; | 329 | }; |
321 | 330 | ||
331 | #ifdef CONFIG_PCI_QUIRKS | ||
322 | extern int pci_dev_specific_reset(struct pci_dev *dev, int probe); | 332 | extern int pci_dev_specific_reset(struct pci_dev *dev, int probe); |
333 | #else | ||
334 | static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe) | ||
335 | { | ||
336 | return -ENOTTY; | ||
337 | } | ||
338 | #endif | ||
323 | 339 | ||
324 | #endif /* DRIVERS_PCI_H */ | 340 | #endif /* DRIVERS_PCI_H */ |
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 5a0c6ad53f8e..b8b494b3e0d0 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig | |||
@@ -46,3 +46,7 @@ config PCIEASPM_DEBUG | |||
46 | help | 46 | help |
47 | This enables PCI Express ASPM debug support. It will add per-device | 47 | This enables PCI Express ASPM debug support. It will add per-device |
48 | interface to control ASPM. | 48 | interface to control ASPM. |
49 | |||
50 | config PCIE_PME | ||
51 | def_bool y | ||
52 | depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI | ||
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index 11f6bb1eae24..ea654545e7c4 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile | |||
@@ -11,3 +11,5 @@ obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o | |||
11 | 11 | ||
12 | # Build PCI Express AER if needed | 12 | # Build PCI Express AER if needed |
13 | obj-$(CONFIG_PCIEAER) += aer/ | 13 | obj-$(CONFIG_PCIEAER) += aer/ |
14 | |||
15 | obj-$(CONFIG_PCIE_PME) += pme/ | ||
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile new file mode 100644 index 000000000000..8b9238053080 --- /dev/null +++ b/drivers/pci/pcie/pme/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Makefile for PCI-Express Root Port PME signaling driver | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_PCIE_PME) += pmedriver.o | ||
6 | |||
7 | pmedriver-objs := pcie_pme.o | ||
8 | pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o | ||
diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme/pcie_pme.c new file mode 100644 index 000000000000..7b3cbff547ee --- /dev/null +++ b/drivers/pci/pcie/pme/pcie_pme.c | |||
@@ -0,0 +1,505 @@ | |||
1 | /* | ||
2 | * PCIe Native PME support | ||
3 | * | ||
4 | * Copyright (C) 2007 - 2009 Intel Corp | ||
5 | * Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com> | ||
6 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License V2. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/device.h> | ||
20 | #include <linux/pcieport_if.h> | ||
21 | #include <linux/acpi.h> | ||
22 | #include <linux/pci-acpi.h> | ||
23 | #include <linux/pm_runtime.h> | ||
24 | |||
25 | #include "../../pci.h" | ||
26 | #include "pcie_pme.h" | ||
27 | |||
28 | #define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ | ||
29 | #define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ | ||
30 | |||
31 | /* | ||
32 | * If set, this switch will prevent the PCIe root port PME service driver from | ||
33 | * being registered. Consequently, the interrupt-based PCIe PME signaling will | ||
34 | * not be used by any PCIe root ports in that case. | ||
35 | */ | ||
36 | static bool pcie_pme_disabled; | ||
37 | |||
38 | /* | ||
39 | * The PCI Express Base Specification 2.0, Section 6.1.8, states the following: | ||
40 | * "In order to maintain compatibility with non-PCI Express-aware system | ||
41 | * software, system power management logic must be configured by firmware to use | ||
42 | * the legacy mechanism of signaling PME by default. PCI Express-aware system | ||
43 | * software must notify the firmware prior to enabling native, interrupt-based | ||
44 | * PME signaling." However, if the platform doesn't provide us with a suitable | ||
45 | * notification mechanism or the notification fails, it is not clear whether or | ||
46 | * not we are supposed to use the interrupt-based PCIe PME signaling. The | ||
47 | * switch below can be used to indicate the desired behaviour. When set, it | ||
48 | * will make the kernel use the interrupt-based PCIe PME signaling regardless of | ||
49 | * the platform notification status, although the kernel will attempt to notify | ||
50 | * the platform anyway. When unset, it will prevent the kernel from using the | ||
51 | * the interrupt-based PCIe PME signaling if the platform notification fails, | ||
52 | * which is the default. | ||
53 | */ | ||
54 | static bool pcie_pme_force_enable; | ||
55 | |||
56 | /* | ||
57 | * If this switch is set, MSI will not be used for PCIe PME signaling. This | ||
58 | * causes the PCIe port driver to use INTx interrupts only, but it turns out | ||
59 | * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based | ||
60 | * wake-up from system sleep states. | ||
61 | */ | ||
62 | bool pcie_pme_msi_disabled; | ||
63 | |||
64 | static int __init pcie_pme_setup(char *str) | ||
65 | { | ||
66 | if (!strcmp(str, "off")) | ||
67 | pcie_pme_disabled = true; | ||
68 | else if (!strcmp(str, "force")) | ||
69 | pcie_pme_force_enable = true; | ||
70 | else if (!strcmp(str, "nomsi")) | ||
71 | pcie_pme_msi_disabled = true; | ||
72 | return 1; | ||
73 | } | ||
74 | __setup("pcie_pme=", pcie_pme_setup); | ||
75 | |||
76 | /** | ||
77 | * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME. | ||
78 | * @srv: PCIe PME root port service to use for carrying out the check. | ||
79 | * | ||
80 | * Notify the platform that the native PCIe PME is going to be used and return | ||
81 | * 'true' if the control of the PCIe PME registers has been acquired from the | ||
82 | * platform. | ||
83 | */ | ||
84 | static bool pcie_pme_platform_setup(struct pcie_device *srv) | ||
85 | { | ||
86 | if (!pcie_pme_platform_notify(srv)) | ||
87 | return true; | ||
88 | return pcie_pme_force_enable; | ||
89 | } | ||
90 | |||
91 | struct pcie_pme_service_data { | ||
92 | spinlock_t lock; | ||
93 | struct pcie_device *srv; | ||
94 | struct work_struct work; | ||
95 | bool noirq; /* Don't enable the PME interrupt used by this service. */ | ||
96 | }; | ||
97 | |||
98 | /** | ||
99 | * pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation. | ||
100 | * @dev: PCIe root port or event collector. | ||
101 | * @enable: Enable or disable the interrupt. | ||
102 | */ | ||
103 | static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) | ||
104 | { | ||
105 | int rtctl_pos; | ||
106 | u16 rtctl; | ||
107 | |||
108 | rtctl_pos = pci_pcie_cap(dev) + PCI_EXP_RTCTL; | ||
109 | |||
110 | pci_read_config_word(dev, rtctl_pos, &rtctl); | ||
111 | if (enable) | ||
112 | rtctl |= PCI_EXP_RTCTL_PMEIE; | ||
113 | else | ||
114 | rtctl &= ~PCI_EXP_RTCTL_PMEIE; | ||
115 | pci_write_config_word(dev, rtctl_pos, rtctl); | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * pcie_pme_clear_status - Clear root port PME interrupt status. | ||
120 | * @dev: PCIe root port or event collector. | ||
121 | */ | ||
122 | static void pcie_pme_clear_status(struct pci_dev *dev) | ||
123 | { | ||
124 | int rtsta_pos; | ||
125 | u32 rtsta; | ||
126 | |||
127 | rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA; | ||
128 | |||
129 | pci_read_config_dword(dev, rtsta_pos, &rtsta); | ||
130 | rtsta |= PCI_EXP_RTSTA_PME; | ||
131 | pci_write_config_dword(dev, rtsta_pos, rtsta); | ||
132 | } | ||
133 | |||
134 | /** | ||
135 | * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#. | ||
136 | * @bus: PCI bus to scan. | ||
137 | * | ||
138 | * Scan given PCI bus and all buses under it for devices asserting PME#. | ||
139 | */ | ||
140 | static bool pcie_pme_walk_bus(struct pci_bus *bus) | ||
141 | { | ||
142 | struct pci_dev *dev; | ||
143 | bool ret = false; | ||
144 | |||
145 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
146 | /* Skip PCIe devices in case we started from a root port. */ | ||
147 | if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { | ||
148 | pm_request_resume(&dev->dev); | ||
149 | ret = true; | ||
150 | } | ||
151 | |||
152 | if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate)) | ||
153 | ret = true; | ||
154 | } | ||
155 | |||
156 | return ret; | ||
157 | } | ||
158 | |||
159 | /** | ||
160 | * pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME. | ||
161 | * @bus: Secondary bus of the bridge. | ||
162 | * @devfn: Device/function number to check. | ||
163 | * | ||
164 | * PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band | ||
165 | * PCIe PME message. In such that case the bridge should use the Requester ID | ||
166 | * of device/function number 0 on its secondary bus. | ||
167 | */ | ||
168 | static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn) | ||
169 | { | ||
170 | struct pci_dev *dev; | ||
171 | bool found = false; | ||
172 | |||
173 | if (devfn) | ||
174 | return false; | ||
175 | |||
176 | dev = pci_dev_get(bus->self); | ||
177 | if (!dev) | ||
178 | return false; | ||
179 | |||
180 | if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { | ||
181 | down_read(&pci_bus_sem); | ||
182 | if (pcie_pme_walk_bus(bus)) | ||
183 | found = true; | ||
184 | up_read(&pci_bus_sem); | ||
185 | } | ||
186 | |||
187 | pci_dev_put(dev); | ||
188 | return found; | ||
189 | } | ||
190 | |||
191 | /** | ||
192 | * pcie_pme_handle_request - Find device that generated PME and handle it. | ||
193 | * @port: Root port or event collector that generated the PME interrupt. | ||
194 | * @req_id: PCIe Requester ID of the device that generated the PME. | ||
195 | */ | ||
196 | static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) | ||
197 | { | ||
198 | u8 busnr = req_id >> 8, devfn = req_id & 0xff; | ||
199 | struct pci_bus *bus; | ||
200 | struct pci_dev *dev; | ||
201 | bool found = false; | ||
202 | |||
203 | /* First, check if the PME is from the root port itself. */ | ||
204 | if (port->devfn == devfn && port->bus->number == busnr) { | ||
205 | if (pci_check_pme_status(port)) { | ||
206 | pm_request_resume(&port->dev); | ||
207 | found = true; | ||
208 | } else { | ||
209 | /* | ||
210 | * Apparently, the root port generated the PME on behalf | ||
211 | * of a non-PCIe device downstream. If this is done by | ||
212 | * a root port, the Requester ID field in its status | ||
213 | * register may contain either the root port's, or the | ||
214 | * source device's information (PCI Express Base | ||
215 | * Specification, Rev. 2.0, Section 6.1.9). | ||
216 | */ | ||
217 | down_read(&pci_bus_sem); | ||
218 | found = pcie_pme_walk_bus(port->subordinate); | ||
219 | up_read(&pci_bus_sem); | ||
220 | } | ||
221 | goto out; | ||
222 | } | ||
223 | |||
224 | /* Second, find the bus the source device is on. */ | ||
225 | bus = pci_find_bus(pci_domain_nr(port->bus), busnr); | ||
226 | if (!bus) | ||
227 | goto out; | ||
228 | |||
229 | /* Next, check if the PME is from a PCIe-PCI bridge. */ | ||
230 | found = pcie_pme_from_pci_bridge(bus, devfn); | ||
231 | if (found) | ||
232 | goto out; | ||
233 | |||
234 | /* Finally, try to find the PME source on the bus. */ | ||
235 | down_read(&pci_bus_sem); | ||
236 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
237 | pci_dev_get(dev); | ||
238 | if (dev->devfn == devfn) { | ||
239 | found = true; | ||
240 | break; | ||
241 | } | ||
242 | pci_dev_put(dev); | ||
243 | } | ||
244 | up_read(&pci_bus_sem); | ||
245 | |||
246 | if (found) { | ||
247 | /* The device is there, but we have to check its PME status. */ | ||
248 | found = pci_check_pme_status(dev); | ||
249 | if (found) | ||
250 | pm_request_resume(&dev->dev); | ||
251 | pci_dev_put(dev); | ||
252 | } else if (devfn) { | ||
253 | /* | ||
254 | * The device is not there, but we can still try to recover by | ||
255 | * assuming that the PME was reported by a PCIe-PCI bridge that | ||
256 | * used devfn different from zero. | ||
257 | */ | ||
258 | dev_dbg(&port->dev, "PME interrupt generated for " | ||
259 | "non-existent device %02x:%02x.%d\n", | ||
260 | busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); | ||
261 | found = pcie_pme_from_pci_bridge(bus, 0); | ||
262 | } | ||
263 | |||
264 | out: | ||
265 | if (!found) | ||
266 | dev_dbg(&port->dev, "Spurious native PME interrupt!\n"); | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * pcie_pme_work_fn - Work handler for PCIe PME interrupt. | ||
271 | * @work: Work structure giving access to service data. | ||
272 | */ | ||
273 | static void pcie_pme_work_fn(struct work_struct *work) | ||
274 | { | ||
275 | struct pcie_pme_service_data *data = | ||
276 | container_of(work, struct pcie_pme_service_data, work); | ||
277 | struct pci_dev *port = data->srv->port; | ||
278 | int rtsta_pos; | ||
279 | u32 rtsta; | ||
280 | |||
281 | rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; | ||
282 | |||
283 | spin_lock_irq(&data->lock); | ||
284 | |||
285 | for (;;) { | ||
286 | if (data->noirq) | ||
287 | break; | ||
288 | |||
289 | pci_read_config_dword(port, rtsta_pos, &rtsta); | ||
290 | if (rtsta & PCI_EXP_RTSTA_PME) { | ||
291 | /* | ||
292 | * Clear PME status of the port. If there are other | ||
293 | * pending PMEs, the status will be set again. | ||
294 | */ | ||
295 | pcie_pme_clear_status(port); | ||
296 | |||
297 | spin_unlock_irq(&data->lock); | ||
298 | pcie_pme_handle_request(port, rtsta & 0xffff); | ||
299 | spin_lock_irq(&data->lock); | ||
300 | |||
301 | continue; | ||
302 | } | ||
303 | |||
304 | /* No need to loop if there are no more PMEs pending. */ | ||
305 | if (!(rtsta & PCI_EXP_RTSTA_PENDING)) | ||
306 | break; | ||
307 | |||
308 | spin_unlock_irq(&data->lock); | ||
309 | cpu_relax(); | ||
310 | spin_lock_irq(&data->lock); | ||
311 | } | ||
312 | |||
313 | if (!data->noirq) | ||
314 | pcie_pme_interrupt_enable(port, true); | ||
315 | |||
316 | spin_unlock_irq(&data->lock); | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt. | ||
321 | * @irq: Interrupt vector. | ||
322 | * @context: Interrupt context pointer. | ||
323 | */ | ||
324 | static irqreturn_t pcie_pme_irq(int irq, void *context) | ||
325 | { | ||
326 | struct pci_dev *port; | ||
327 | struct pcie_pme_service_data *data; | ||
328 | int rtsta_pos; | ||
329 | u32 rtsta; | ||
330 | unsigned long flags; | ||
331 | |||
332 | port = ((struct pcie_device *)context)->port; | ||
333 | data = get_service_data((struct pcie_device *)context); | ||
334 | |||
335 | rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; | ||
336 | |||
337 | spin_lock_irqsave(&data->lock, flags); | ||
338 | pci_read_config_dword(port, rtsta_pos, &rtsta); | ||
339 | |||
340 | if (!(rtsta & PCI_EXP_RTSTA_PME)) { | ||
341 | spin_unlock_irqrestore(&data->lock, flags); | ||
342 | return IRQ_NONE; | ||
343 | } | ||
344 | |||
345 | pcie_pme_interrupt_enable(port, false); | ||
346 | spin_unlock_irqrestore(&data->lock, flags); | ||
347 | |||
348 | /* We don't use pm_wq, because it's freezable. */ | ||
349 | schedule_work(&data->work); | ||
350 | |||
351 | return IRQ_HANDLED; | ||
352 | } | ||
353 | |||
354 | /** | ||
355 | * pcie_pme_set_native - Set the PME interrupt flag for given device. | ||
356 | * @dev: PCI device to handle. | ||
357 | * @ign: Ignored. | ||
358 | */ | ||
359 | static int pcie_pme_set_native(struct pci_dev *dev, void *ign) | ||
360 | { | ||
361 | dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n"); | ||
362 | |||
363 | device_set_run_wake(&dev->dev, true); | ||
364 | dev->pme_interrupt = true; | ||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | /** | ||
369 | * pcie_pme_mark_devices - Set the PME interrupt flag for devices below a port. | ||
370 | * @port: PCIe root port or event collector to handle. | ||
371 | * | ||
372 | * For each device below given root port, including the port itself (or for each | ||
373 | * root complex integrated endpoint if @port is a root complex event collector) | ||
374 | * set the flag indicating that it can signal run-time wake-up events via PCIe | ||
375 | * PME interrupts. | ||
376 | */ | ||
377 | static void pcie_pme_mark_devices(struct pci_dev *port) | ||
378 | { | ||
379 | pcie_pme_set_native(port, NULL); | ||
380 | if (port->subordinate) { | ||
381 | pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL); | ||
382 | } else { | ||
383 | struct pci_bus *bus = port->bus; | ||
384 | struct pci_dev *dev; | ||
385 | |||
386 | /* Check if this is a root port event collector. */ | ||
387 | if (port->pcie_type != PCI_EXP_TYPE_RC_EC || !bus) | ||
388 | return; | ||
389 | |||
390 | down_read(&pci_bus_sem); | ||
391 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
392 | if (pci_is_pcie(dev) | ||
393 | && dev->pcie_type == PCI_EXP_TYPE_RC_END) | ||
394 | pcie_pme_set_native(dev, NULL); | ||
395 | up_read(&pci_bus_sem); | ||
396 | } | ||
397 | } | ||
398 | |||
399 | /** | ||
400 | * pcie_pme_probe - Initialize PCIe PME service for given root port. | ||
401 | * @srv: PCIe service to initialize. | ||
402 | */ | ||
403 | static int pcie_pme_probe(struct pcie_device *srv) | ||
404 | { | ||
405 | struct pci_dev *port; | ||
406 | struct pcie_pme_service_data *data; | ||
407 | int ret; | ||
408 | |||
409 | if (!pcie_pme_platform_setup(srv)) | ||
410 | return -EACCES; | ||
411 | |||
412 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
413 | if (!data) | ||
414 | return -ENOMEM; | ||
415 | |||
416 | spin_lock_init(&data->lock); | ||
417 | INIT_WORK(&data->work, pcie_pme_work_fn); | ||
418 | data->srv = srv; | ||
419 | set_service_data(srv, data); | ||
420 | |||
421 | port = srv->port; | ||
422 | pcie_pme_interrupt_enable(port, false); | ||
423 | pcie_pme_clear_status(port); | ||
424 | |||
425 | ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv); | ||
426 | if (ret) { | ||
427 | kfree(data); | ||
428 | } else { | ||
429 | pcie_pme_mark_devices(port); | ||
430 | pcie_pme_interrupt_enable(port, true); | ||
431 | } | ||
432 | |||
433 | return ret; | ||
434 | } | ||
435 | |||
436 | /** | ||
437 | * pcie_pme_suspend - Suspend PCIe PME service device. | ||
438 | * @srv: PCIe service device to suspend. | ||
439 | */ | ||
440 | static int pcie_pme_suspend(struct pcie_device *srv) | ||
441 | { | ||
442 | struct pcie_pme_service_data *data = get_service_data(srv); | ||
443 | struct pci_dev *port = srv->port; | ||
444 | |||
445 | spin_lock_irq(&data->lock); | ||
446 | pcie_pme_interrupt_enable(port, false); | ||
447 | pcie_pme_clear_status(port); | ||
448 | data->noirq = true; | ||
449 | spin_unlock_irq(&data->lock); | ||
450 | |||
451 | synchronize_irq(srv->irq); | ||
452 | |||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | /** | ||
457 | * pcie_pme_resume - Resume PCIe PME service device. | ||
458 | * @srv - PCIe service device to resume. | ||
459 | */ | ||
460 | static int pcie_pme_resume(struct pcie_device *srv) | ||
461 | { | ||
462 | struct pcie_pme_service_data *data = get_service_data(srv); | ||
463 | struct pci_dev *port = srv->port; | ||
464 | |||
465 | spin_lock_irq(&data->lock); | ||
466 | data->noirq = false; | ||
467 | pcie_pme_clear_status(port); | ||
468 | pcie_pme_interrupt_enable(port, true); | ||
469 | spin_unlock_irq(&data->lock); | ||
470 | |||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | /** | ||
475 | * pcie_pme_remove - Prepare PCIe PME service device for removal. | ||
476 | * @srv - PCIe service device to resume. | ||
477 | */ | ||
478 | static void pcie_pme_remove(struct pcie_device *srv) | ||
479 | { | ||
480 | pcie_pme_suspend(srv); | ||
481 | free_irq(srv->irq, srv); | ||
482 | kfree(get_service_data(srv)); | ||
483 | } | ||
484 | |||
485 | static struct pcie_port_service_driver pcie_pme_driver = { | ||
486 | .name = "pcie_pme", | ||
487 | .port_type = PCI_EXP_TYPE_ROOT_PORT, | ||
488 | .service = PCIE_PORT_SERVICE_PME, | ||
489 | |||
490 | .probe = pcie_pme_probe, | ||
491 | .suspend = pcie_pme_suspend, | ||
492 | .resume = pcie_pme_resume, | ||
493 | .remove = pcie_pme_remove, | ||
494 | }; | ||
495 | |||
496 | /** | ||
497 | * pcie_pme_service_init - Register the PCIe PME service driver. | ||
498 | */ | ||
499 | static int __init pcie_pme_service_init(void) | ||
500 | { | ||
501 | return pcie_pme_disabled ? | ||
502 | -ENODEV : pcie_port_service_register(&pcie_pme_driver); | ||
503 | } | ||
504 | |||
505 | module_init(pcie_pme_service_init); | ||
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h new file mode 100644 index 000000000000..b30d2b7c9775 --- /dev/null +++ b/drivers/pci/pcie/pme/pcie_pme.h | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * drivers/pci/pcie/pme/pcie_pme.h | ||
3 | * | ||
4 | * PCI Express Root Port PME signaling support | ||
5 | * | ||
6 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
7 | */ | ||
8 | |||
9 | #ifndef _PCIE_PME_H_ | ||
10 | #define _PCIE_PME_H_ | ||
11 | |||
12 | struct pcie_device; | ||
13 | |||
14 | #ifdef CONFIG_ACPI | ||
15 | extern int pcie_pme_acpi_setup(struct pcie_device *srv); | ||
16 | |||
17 | static inline int pcie_pme_platform_notify(struct pcie_device *srv) | ||
18 | { | ||
19 | return pcie_pme_acpi_setup(srv); | ||
20 | } | ||
21 | #else /* !CONFIG_ACPI */ | ||
22 | static inline int pcie_pme_platform_notify(struct pcie_device *srv) | ||
23 | { | ||
24 | return 0; | ||
25 | } | ||
26 | #endif /* !CONFIG_ACPI */ | ||
27 | |||
28 | #endif | ||
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c new file mode 100644 index 000000000000..83ab2287ae3f --- /dev/null +++ b/drivers/pci/pcie/pme/pcie_pme_acpi.c | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * PCIe Native PME support, ACPI-related part | ||
3 | * | ||
4 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License V2. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/pci.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/acpi.h> | ||
15 | #include <linux/pci-acpi.h> | ||
16 | #include <linux/pcieport_if.h> | ||
17 | |||
18 | /** | ||
19 | * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME. | ||
20 | * @srv - PCIe PME service for a root port or event collector. | ||
21 | * | ||
22 | * Invoked when the PCIe bus type loads PCIe PME service driver. To avoid | ||
23 | * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME | ||
24 | * control to the kernel. | ||
25 | */ | ||
26 | int pcie_pme_acpi_setup(struct pcie_device *srv) | ||
27 | { | ||
28 | acpi_status status = AE_NOT_FOUND; | ||
29 | struct pci_dev *port = srv->port; | ||
30 | acpi_handle handle; | ||
31 | int error = 0; | ||
32 | |||
33 | if (acpi_pci_disabled) | ||
34 | return -ENOSYS; | ||
35 | |||
36 | dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n"); | ||
37 | |||
38 | handle = acpi_find_root_bridge_handle(port); | ||
39 | if (!handle) | ||
40 | return -EINVAL; | ||
41 | |||
42 | status = acpi_pci_osc_control_set(handle, | ||
43 | OSC_PCI_EXPRESS_PME_CONTROL | | ||
44 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
45 | if (ACPI_FAILURE(status)) { | ||
46 | dev_info(&port->dev, | ||
47 | "Failed to receive control of PCIe PME service: %s\n", | ||
48 | (status == AE_SUPPORT || status == AE_NOT_FOUND) ? | ||
49 | "no _OSC support" : "ACPI _OSC failed"); | ||
50 | error = -ENODEV; | ||
51 | } | ||
52 | |||
53 | return error; | ||
54 | } | ||
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index aaeb9d21cba5..813a5c3427b6 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h | |||
@@ -30,4 +30,21 @@ extern void pcie_port_device_remove(struct pci_dev *dev); | |||
30 | extern int __must_check pcie_port_bus_register(void); | 30 | extern int __must_check pcie_port_bus_register(void); |
31 | extern void pcie_port_bus_unregister(void); | 31 | extern void pcie_port_bus_unregister(void); |
32 | 32 | ||
33 | #ifdef CONFIG_PCIE_PME | ||
34 | extern bool pcie_pme_msi_disabled; | ||
35 | |||
36 | static inline void pcie_pme_disable_msi(void) | ||
37 | { | ||
38 | pcie_pme_msi_disabled = true; | ||
39 | } | ||
40 | |||
41 | static inline bool pcie_pme_no_msi(void) | ||
42 | { | ||
43 | return pcie_pme_msi_disabled; | ||
44 | } | ||
45 | #else /* !CONFIG_PCIE_PME */ | ||
46 | static inline void pcie_pme_disable_msi(void) {} | ||
47 | static inline bool pcie_pme_no_msi(void) { return false; } | ||
48 | #endif /* !CONFIG_PCIE_PME */ | ||
49 | |||
33 | #endif /* _PORTDRV_H_ */ | 50 | #endif /* _PORTDRV_H_ */ |
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index b174188ac121..0d34ff415399 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -186,16 +186,24 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) | |||
186 | */ | 186 | */ |
187 | static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) | 187 | static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) |
188 | { | 188 | { |
189 | int i, irq; | 189 | int i, irq = -1; |
190 | |||
191 | /* We have to use INTx if MSI cannot be used for PCIe PME. */ | ||
192 | if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) { | ||
193 | if (dev->pin) | ||
194 | irq = dev->irq; | ||
195 | goto no_msi; | ||
196 | } | ||
190 | 197 | ||
191 | /* Try to use MSI-X if supported */ | 198 | /* Try to use MSI-X if supported */ |
192 | if (!pcie_port_enable_msix(dev, irqs, mask)) | 199 | if (!pcie_port_enable_msix(dev, irqs, mask)) |
193 | return 0; | 200 | return 0; |
201 | |||
194 | /* We're not going to use MSI-X, so try MSI and fall back to INTx */ | 202 | /* We're not going to use MSI-X, so try MSI and fall back to INTx */ |
195 | irq = -1; | ||
196 | if (!pci_enable_msi(dev) || dev->pin) | 203 | if (!pci_enable_msi(dev) || dev->pin) |
197 | irq = dev->irq; | 204 | irq = dev->irq; |
198 | 205 | ||
206 | no_msi: | ||
199 | for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) | 207 | for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) |
200 | irqs[i] = irq; | 208 | irqs[i] = irq; |
201 | irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; | 209 | irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 13c8972886e6..127e8f169d9c 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/pcieport_if.h> | 16 | #include <linux/pcieport_if.h> |
17 | #include <linux/aer.h> | 17 | #include <linux/aer.h> |
18 | #include <linux/dmi.h> | ||
18 | 19 | ||
19 | #include "portdrv.h" | 20 | #include "portdrv.h" |
20 | #include "aer/aerdrv.h" | 21 | #include "aer/aerdrv.h" |
@@ -273,10 +274,36 @@ static struct pci_driver pcie_portdriver = { | |||
273 | .driver.pm = PCIE_PORTDRV_PM_OPS, | 274 | .driver.pm = PCIE_PORTDRV_PM_OPS, |
274 | }; | 275 | }; |
275 | 276 | ||
277 | static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) | ||
278 | { | ||
279 | pr_notice("%s detected: will not use MSI for PCIe PME signaling\n", | ||
280 | d->ident); | ||
281 | pcie_pme_disable_msi(); | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = { | ||
286 | /* | ||
287 | * Boxes that should not use MSI for PCIe PME signaling. | ||
288 | */ | ||
289 | { | ||
290 | .callback = dmi_pcie_pme_disable_msi, | ||
291 | .ident = "MSI Wind U-100", | ||
292 | .matches = { | ||
293 | DMI_MATCH(DMI_SYS_VENDOR, | ||
294 | "MICRO-STAR INTERNATIONAL CO., LTD"), | ||
295 | DMI_MATCH(DMI_PRODUCT_NAME, "U-100"), | ||
296 | }, | ||
297 | }, | ||
298 | {} | ||
299 | }; | ||
300 | |||
276 | static int __init pcie_portdrv_init(void) | 301 | static int __init pcie_portdrv_init(void) |
277 | { | 302 | { |
278 | int retval; | 303 | int retval; |
279 | 304 | ||
305 | dmi_check_system(pcie_portdrv_dmi_table); | ||
306 | |||
280 | retval = pcie_port_bus_register(); | 307 | retval = pcie_port_bus_register(); |
281 | if (retval) { | 308 | if (retval) { |
282 | printk(KERN_WARNING "PCIE: bus_register error: %d\n", retval); | 309 | printk(KERN_WARNING "PCIE: bus_register error: %d\n", retval); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 446e4a94d7d3..270d069819f7 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -89,6 +89,7 @@ static void release_pcibus_dev(struct device *dev) | |||
89 | 89 | ||
90 | if (pci_bus->bridge) | 90 | if (pci_bus->bridge) |
91 | put_device(pci_bus->bridge); | 91 | put_device(pci_bus->bridge); |
92 | pci_bus_remove_resources(pci_bus); | ||
92 | kfree(pci_bus); | 93 | kfree(pci_bus); |
93 | } | 94 | } |
94 | 95 | ||
@@ -281,26 +282,12 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) | |||
281 | } | 282 | } |
282 | } | 283 | } |
283 | 284 | ||
284 | void __devinit pci_read_bridge_bases(struct pci_bus *child) | 285 | static void __devinit pci_read_bridge_io(struct pci_bus *child) |
285 | { | 286 | { |
286 | struct pci_dev *dev = child->self; | 287 | struct pci_dev *dev = child->self; |
287 | u8 io_base_lo, io_limit_lo; | 288 | u8 io_base_lo, io_limit_lo; |
288 | u16 mem_base_lo, mem_limit_lo; | ||
289 | unsigned long base, limit; | 289 | unsigned long base, limit; |
290 | struct resource *res; | 290 | struct resource *res; |
291 | int i; | ||
292 | |||
293 | if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ | ||
294 | return; | ||
295 | |||
296 | dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n", | ||
297 | child->secondary, child->subordinate, | ||
298 | dev->transparent ? " (subtractive decode)": ""); | ||
299 | |||
300 | if (dev->transparent) { | ||
301 | for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) | ||
302 | child->resource[i] = child->parent->resource[i - 3]; | ||
303 | } | ||
304 | 291 | ||
305 | res = child->resource[0]; | 292 | res = child->resource[0]; |
306 | pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); | 293 | pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); |
@@ -316,26 +303,50 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
316 | limit |= (io_limit_hi << 16); | 303 | limit |= (io_limit_hi << 16); |
317 | } | 304 | } |
318 | 305 | ||
319 | if (base <= limit) { | 306 | if (base && base <= limit) { |
320 | res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; | 307 | res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; |
321 | if (!res->start) | 308 | if (!res->start) |
322 | res->start = base; | 309 | res->start = base; |
323 | if (!res->end) | 310 | if (!res->end) |
324 | res->end = limit + 0xfff; | 311 | res->end = limit + 0xfff; |
325 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 312 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
313 | } else { | ||
314 | dev_printk(KERN_DEBUG, &dev->dev, | ||
315 | " bridge window [io %04lx - %04lx] reg reading\n", | ||
316 | base, limit); | ||
326 | } | 317 | } |
318 | } | ||
319 | |||
320 | static void __devinit pci_read_bridge_mmio(struct pci_bus *child) | ||
321 | { | ||
322 | struct pci_dev *dev = child->self; | ||
323 | u16 mem_base_lo, mem_limit_lo; | ||
324 | unsigned long base, limit; | ||
325 | struct resource *res; | ||
327 | 326 | ||
328 | res = child->resource[1]; | 327 | res = child->resource[1]; |
329 | pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); | 328 | pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); |
330 | pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); | 329 | pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); |
331 | base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; | 330 | base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; |
332 | limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; | 331 | limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; |
333 | if (base <= limit) { | 332 | if (base && base <= limit) { |
334 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; | 333 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; |
335 | res->start = base; | 334 | res->start = base; |
336 | res->end = limit + 0xfffff; | 335 | res->end = limit + 0xfffff; |
337 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 336 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
337 | } else { | ||
338 | dev_printk(KERN_DEBUG, &dev->dev, | ||
339 | " bridge window [mem 0x%08lx - 0x%08lx] reg reading\n", | ||
340 | base, limit + 0xfffff); | ||
338 | } | 341 | } |
342 | } | ||
343 | |||
344 | static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child) | ||
345 | { | ||
346 | struct pci_dev *dev = child->self; | ||
347 | u16 mem_base_lo, mem_limit_lo; | ||
348 | unsigned long base, limit; | ||
349 | struct resource *res; | ||
339 | 350 | ||
340 | res = child->resource[2]; | 351 | res = child->resource[2]; |
341 | pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); | 352 | pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); |
@@ -366,7 +377,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
366 | #endif | 377 | #endif |
367 | } | 378 | } |
368 | } | 379 | } |
369 | if (base <= limit) { | 380 | if (base && base <= limit) { |
370 | res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | | 381 | res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | |
371 | IORESOURCE_MEM | IORESOURCE_PREFETCH; | 382 | IORESOURCE_MEM | IORESOURCE_PREFETCH; |
372 | if (res->flags & PCI_PREF_RANGE_TYPE_64) | 383 | if (res->flags & PCI_PREF_RANGE_TYPE_64) |
@@ -374,6 +385,44 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
374 | res->start = base; | 385 | res->start = base; |
375 | res->end = limit + 0xfffff; | 386 | res->end = limit + 0xfffff; |
376 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 387 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
388 | } else { | ||
389 | dev_printk(KERN_DEBUG, &dev->dev, | ||
390 | " bridge window [mem 0x%08lx - %08lx pref] reg reading\n", | ||
391 | base, limit + 0xfffff); | ||
392 | } | ||
393 | } | ||
394 | |||
395 | void __devinit pci_read_bridge_bases(struct pci_bus *child) | ||
396 | { | ||
397 | struct pci_dev *dev = child->self; | ||
398 | struct resource *res; | ||
399 | int i; | ||
400 | |||
401 | if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ | ||
402 | return; | ||
403 | |||
404 | dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n", | ||
405 | child->secondary, child->subordinate, | ||
406 | dev->transparent ? " (subtractive decode)" : ""); | ||
407 | |||
408 | pci_bus_remove_resources(child); | ||
409 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) | ||
410 | child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; | ||
411 | |||
412 | pci_read_bridge_io(child); | ||
413 | pci_read_bridge_mmio(child); | ||
414 | pci_read_bridge_mmio_pref(child); | ||
415 | |||
416 | if (dev->transparent) { | ||
417 | pci_bus_for_each_resource(child->parent, res, i) { | ||
418 | if (res) { | ||
419 | pci_bus_add_resource(child, res, | ||
420 | PCI_SUBTRACTIVE_DECODE); | ||
421 | dev_printk(KERN_DEBUG, &dev->dev, | ||
422 | " bridge window %pR (subtractive decode)\n", | ||
423 | res); | ||
424 | } | ||
425 | } | ||
377 | } | 426 | } |
378 | } | 427 | } |
379 | 428 | ||
@@ -387,10 +436,147 @@ static struct pci_bus * pci_alloc_bus(void) | |||
387 | INIT_LIST_HEAD(&b->children); | 436 | INIT_LIST_HEAD(&b->children); |
388 | INIT_LIST_HEAD(&b->devices); | 437 | INIT_LIST_HEAD(&b->devices); |
389 | INIT_LIST_HEAD(&b->slots); | 438 | INIT_LIST_HEAD(&b->slots); |
439 | INIT_LIST_HEAD(&b->resources); | ||
440 | b->max_bus_speed = PCI_SPEED_UNKNOWN; | ||
441 | b->cur_bus_speed = PCI_SPEED_UNKNOWN; | ||
390 | } | 442 | } |
391 | return b; | 443 | return b; |
392 | } | 444 | } |
393 | 445 | ||
446 | static unsigned char pcix_bus_speed[] = { | ||
447 | PCI_SPEED_UNKNOWN, /* 0 */ | ||
448 | PCI_SPEED_66MHz_PCIX, /* 1 */ | ||
449 | PCI_SPEED_100MHz_PCIX, /* 2 */ | ||
450 | PCI_SPEED_133MHz_PCIX, /* 3 */ | ||
451 | PCI_SPEED_UNKNOWN, /* 4 */ | ||
452 | PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ | ||
453 | PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ | ||
454 | PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ | ||
455 | PCI_SPEED_UNKNOWN, /* 8 */ | ||
456 | PCI_SPEED_66MHz_PCIX_266, /* 9 */ | ||
457 | PCI_SPEED_100MHz_PCIX_266, /* A */ | ||
458 | PCI_SPEED_133MHz_PCIX_266, /* B */ | ||
459 | PCI_SPEED_UNKNOWN, /* C */ | ||
460 | PCI_SPEED_66MHz_PCIX_533, /* D */ | ||
461 | PCI_SPEED_100MHz_PCIX_533, /* E */ | ||
462 | PCI_SPEED_133MHz_PCIX_533 /* F */ | ||
463 | }; | ||
464 | |||
465 | static unsigned char pcie_link_speed[] = { | ||
466 | PCI_SPEED_UNKNOWN, /* 0 */ | ||
467 | PCIE_SPEED_2_5GT, /* 1 */ | ||
468 | PCIE_SPEED_5_0GT, /* 2 */ | ||
469 | PCIE_SPEED_8_0GT, /* 3 */ | ||
470 | PCI_SPEED_UNKNOWN, /* 4 */ | ||
471 | PCI_SPEED_UNKNOWN, /* 5 */ | ||
472 | PCI_SPEED_UNKNOWN, /* 6 */ | ||
473 | PCI_SPEED_UNKNOWN, /* 7 */ | ||
474 | PCI_SPEED_UNKNOWN, /* 8 */ | ||
475 | PCI_SPEED_UNKNOWN, /* 9 */ | ||
476 | PCI_SPEED_UNKNOWN, /* A */ | ||
477 | PCI_SPEED_UNKNOWN, /* B */ | ||
478 | PCI_SPEED_UNKNOWN, /* C */ | ||
479 | PCI_SPEED_UNKNOWN, /* D */ | ||
480 | PCI_SPEED_UNKNOWN, /* E */ | ||
481 | PCI_SPEED_UNKNOWN /* F */ | ||
482 | }; | ||
483 | |||
484 | void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) | ||
485 | { | ||
486 | bus->cur_bus_speed = pcie_link_speed[linksta & 0xf]; | ||
487 | } | ||
488 | EXPORT_SYMBOL_GPL(pcie_update_link_speed); | ||
489 | |||
490 | static unsigned char agp_speeds[] = { | ||
491 | AGP_UNKNOWN, | ||
492 | AGP_1X, | ||
493 | AGP_2X, | ||
494 | AGP_4X, | ||
495 | AGP_8X | ||
496 | }; | ||
497 | |||
498 | static enum pci_bus_speed agp_speed(int agp3, int agpstat) | ||
499 | { | ||
500 | int index = 0; | ||
501 | |||
502 | if (agpstat & 4) | ||
503 | index = 3; | ||
504 | else if (agpstat & 2) | ||
505 | index = 2; | ||
506 | else if (agpstat & 1) | ||
507 | index = 1; | ||
508 | else | ||
509 | goto out; | ||
510 | |||
511 | if (agp3) { | ||
512 | index += 2; | ||
513 | if (index == 5) | ||
514 | index = 0; | ||
515 | } | ||
516 | |||
517 | out: | ||
518 | return agp_speeds[index]; | ||
519 | } | ||
520 | |||
521 | |||
522 | static void pci_set_bus_speed(struct pci_bus *bus) | ||
523 | { | ||
524 | struct pci_dev *bridge = bus->self; | ||
525 | int pos; | ||
526 | |||
527 | pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); | ||
528 | if (!pos) | ||
529 | pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); | ||
530 | if (pos) { | ||
531 | u32 agpstat, agpcmd; | ||
532 | |||
533 | pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); | ||
534 | bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); | ||
535 | |||
536 | pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); | ||
537 | bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); | ||
538 | } | ||
539 | |||
540 | pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); | ||
541 | if (pos) { | ||
542 | u16 status; | ||
543 | enum pci_bus_speed max; | ||
544 | pci_read_config_word(bridge, pos + 2, &status); | ||
545 | |||
546 | if (status & 0x8000) { | ||
547 | max = PCI_SPEED_133MHz_PCIX_533; | ||
548 | } else if (status & 0x4000) { | ||
549 | max = PCI_SPEED_133MHz_PCIX_266; | ||
550 | } else if (status & 0x0002) { | ||
551 | if (((status >> 12) & 0x3) == 2) { | ||
552 | max = PCI_SPEED_133MHz_PCIX_ECC; | ||
553 | } else { | ||
554 | max = PCI_SPEED_133MHz_PCIX; | ||
555 | } | ||
556 | } else { | ||
557 | max = PCI_SPEED_66MHz_PCIX; | ||
558 | } | ||
559 | |||
560 | bus->max_bus_speed = max; | ||
561 | bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf]; | ||
562 | |||
563 | return; | ||
564 | } | ||
565 | |||
566 | pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); | ||
567 | if (pos) { | ||
568 | u32 linkcap; | ||
569 | u16 linksta; | ||
570 | |||
571 | pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap); | ||
572 | bus->max_bus_speed = pcie_link_speed[linkcap & 0xf]; | ||
573 | |||
574 | pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta); | ||
575 | pcie_update_link_speed(bus, linksta); | ||
576 | } | ||
577 | } | ||
578 | |||
579 | |||
394 | static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, | 580 | static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, |
395 | struct pci_dev *bridge, int busnr) | 581 | struct pci_dev *bridge, int busnr) |
396 | { | 582 | { |
@@ -430,6 +616,8 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, | |||
430 | child->self = bridge; | 616 | child->self = bridge; |
431 | child->bridge = get_device(&bridge->dev); | 617 | child->bridge = get_device(&bridge->dev); |
432 | 618 | ||
619 | pci_set_bus_speed(child); | ||
620 | |||
433 | /* Set up default resource pointers and names.. */ | 621 | /* Set up default resource pointers and names.. */ |
434 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { | 622 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { |
435 | child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; | 623 | child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; |
@@ -1081,6 +1269,45 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) | |||
1081 | } | 1269 | } |
1082 | EXPORT_SYMBOL(pci_scan_single_device); | 1270 | EXPORT_SYMBOL(pci_scan_single_device); |
1083 | 1271 | ||
1272 | static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn) | ||
1273 | { | ||
1274 | u16 cap; | ||
1275 | unsigned pos, next_fn; | ||
1276 | |||
1277 | if (!dev) | ||
1278 | return 0; | ||
1279 | |||
1280 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); | ||
1281 | if (!pos) | ||
1282 | return 0; | ||
1283 | pci_read_config_word(dev, pos + 4, &cap); | ||
1284 | next_fn = cap >> 8; | ||
1285 | if (next_fn <= fn) | ||
1286 | return 0; | ||
1287 | return next_fn; | ||
1288 | } | ||
1289 | |||
1290 | static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn) | ||
1291 | { | ||
1292 | return (fn + 1) % 8; | ||
1293 | } | ||
1294 | |||
1295 | static unsigned no_next_fn(struct pci_dev *dev, unsigned fn) | ||
1296 | { | ||
1297 | return 0; | ||
1298 | } | ||
1299 | |||
1300 | static int only_one_child(struct pci_bus *bus) | ||
1301 | { | ||
1302 | struct pci_dev *parent = bus->self; | ||
1303 | if (!parent || !pci_is_pcie(parent)) | ||
1304 | return 0; | ||
1305 | if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT || | ||
1306 | parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) | ||
1307 | return 1; | ||
1308 | return 0; | ||
1309 | } | ||
1310 | |||
1084 | /** | 1311 | /** |
1085 | * pci_scan_slot - scan a PCI slot on a bus for devices. | 1312 | * pci_scan_slot - scan a PCI slot on a bus for devices. |
1086 | * @bus: PCI bus to scan | 1313 | * @bus: PCI bus to scan |
@@ -1094,21 +1321,30 @@ EXPORT_SYMBOL(pci_scan_single_device); | |||
1094 | */ | 1321 | */ |
1095 | int pci_scan_slot(struct pci_bus *bus, int devfn) | 1322 | int pci_scan_slot(struct pci_bus *bus, int devfn) |
1096 | { | 1323 | { |
1097 | int fn, nr = 0; | 1324 | unsigned fn, nr = 0; |
1098 | struct pci_dev *dev; | 1325 | struct pci_dev *dev; |
1326 | unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn; | ||
1327 | |||
1328 | if (only_one_child(bus) && (devfn > 0)) | ||
1329 | return 0; /* Already scanned the entire slot */ | ||
1099 | 1330 | ||
1100 | dev = pci_scan_single_device(bus, devfn); | 1331 | dev = pci_scan_single_device(bus, devfn); |
1101 | if (dev && !dev->is_added) /* new device? */ | 1332 | if (!dev) |
1333 | return 0; | ||
1334 | if (!dev->is_added) | ||
1102 | nr++; | 1335 | nr++; |
1103 | 1336 | ||
1104 | if (dev && dev->multifunction) { | 1337 | if (pci_ari_enabled(bus)) |
1105 | for (fn = 1; fn < 8; fn++) { | 1338 | next_fn = next_ari_fn; |
1106 | dev = pci_scan_single_device(bus, devfn + fn); | 1339 | else if (dev->multifunction) |
1107 | if (dev) { | 1340 | next_fn = next_trad_fn; |
1108 | if (!dev->is_added) | 1341 | |
1109 | nr++; | 1342 | for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) { |
1110 | dev->multifunction = 1; | 1343 | dev = pci_scan_single_device(bus, devfn + fn); |
1111 | } | 1344 | if (dev) { |
1345 | if (!dev->is_added) | ||
1346 | nr++; | ||
1347 | dev->multifunction = 1; | ||
1112 | } | 1348 | } |
1113 | } | 1349 | } |
1114 | 1350 | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index d58b94030ef3..790eb69a4aa9 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -25,14 +25,9 @@ | |||
25 | #include <linux/dmi.h> | 25 | #include <linux/dmi.h> |
26 | #include <linux/pci-aspm.h> | 26 | #include <linux/pci-aspm.h> |
27 | #include <linux/ioport.h> | 27 | #include <linux/ioport.h> |
28 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ | ||
28 | #include "pci.h" | 29 | #include "pci.h" |
29 | 30 | ||
30 | int isa_dma_bridge_buggy; | ||
31 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | ||
32 | int pci_pci_problems; | ||
33 | EXPORT_SYMBOL(pci_pci_problems); | ||
34 | |||
35 | #ifdef CONFIG_PCI_QUIRKS | ||
36 | /* | 31 | /* |
37 | * This quirk function disables memory decoding and releases memory resources | 32 | * This quirk function disables memory decoding and releases memory resources |
38 | * of the device specified by kernel's boot parameter 'pci=resource_alignment='. | 33 | * of the device specified by kernel's boot parameter 'pci=resource_alignment='. |
@@ -2612,6 +2607,7 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) | |||
2612 | } | 2607 | } |
2613 | pci_do_fixups(dev, start, end); | 2608 | pci_do_fixups(dev, start, end); |
2614 | } | 2609 | } |
2610 | EXPORT_SYMBOL(pci_fixup_device); | ||
2615 | 2611 | ||
2616 | static int __init pci_apply_final_quirks(void) | 2612 | static int __init pci_apply_final_quirks(void) |
2617 | { | 2613 | { |
@@ -2723,9 +2719,3 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe) | |||
2723 | 2719 | ||
2724 | return -ENOTTY; | 2720 | return -ENOTTY; |
2725 | } | 2721 | } |
2726 | |||
2727 | #else | ||
2728 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} | ||
2729 | int pci_dev_specific_reset(struct pci_dev *dev, int probe) { return -ENOTTY; } | ||
2730 | #endif | ||
2731 | EXPORT_SYMBOL(pci_fixup_device); | ||
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index c48cd377b3f5..bf32f07c4efb 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -27,37 +27,83 @@ | |||
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include "pci.h" | 28 | #include "pci.h" |
29 | 29 | ||
30 | static void pbus_assign_resources_sorted(const struct pci_bus *bus) | 30 | struct resource_list_x { |
31 | { | 31 | struct resource_list_x *next; |
32 | struct pci_dev *dev; | ||
33 | struct resource *res; | 32 | struct resource *res; |
34 | struct resource_list head, *list, *tmp; | 33 | struct pci_dev *dev; |
35 | int idx; | 34 | resource_size_t start; |
35 | resource_size_t end; | ||
36 | unsigned long flags; | ||
37 | }; | ||
36 | 38 | ||
37 | head.next = NULL; | 39 | static void add_to_failed_list(struct resource_list_x *head, |
38 | list_for_each_entry(dev, &bus->devices, bus_list) { | 40 | struct pci_dev *dev, struct resource *res) |
39 | u16 class = dev->class >> 8; | 41 | { |
42 | struct resource_list_x *list = head; | ||
43 | struct resource_list_x *ln = list->next; | ||
44 | struct resource_list_x *tmp; | ||
40 | 45 | ||
41 | /* Don't touch classless devices or host bridges or ioapics. */ | 46 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); |
42 | if (class == PCI_CLASS_NOT_DEFINED || | 47 | if (!tmp) { |
43 | class == PCI_CLASS_BRIDGE_HOST) | 48 | pr_warning("add_to_failed_list: kmalloc() failed!\n"); |
44 | continue; | 49 | return; |
50 | } | ||
45 | 51 | ||
46 | /* Don't touch ioapic devices already enabled by firmware */ | 52 | tmp->next = ln; |
47 | if (class == PCI_CLASS_SYSTEM_PIC) { | 53 | tmp->res = res; |
48 | u16 command; | 54 | tmp->dev = dev; |
49 | pci_read_config_word(dev, PCI_COMMAND, &command); | 55 | tmp->start = res->start; |
50 | if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) | 56 | tmp->end = res->end; |
51 | continue; | 57 | tmp->flags = res->flags; |
52 | } | 58 | list->next = tmp; |
59 | } | ||
60 | |||
61 | static void free_failed_list(struct resource_list_x *head) | ||
62 | { | ||
63 | struct resource_list_x *list, *tmp; | ||
53 | 64 | ||
54 | pdev_sort_resources(dev, &head); | 65 | for (list = head->next; list;) { |
66 | tmp = list; | ||
67 | list = list->next; | ||
68 | kfree(tmp); | ||
55 | } | 69 | } |
56 | 70 | ||
57 | for (list = head.next; list;) { | 71 | head->next = NULL; |
72 | } | ||
73 | |||
74 | static void __dev_sort_resources(struct pci_dev *dev, | ||
75 | struct resource_list *head) | ||
76 | { | ||
77 | u16 class = dev->class >> 8; | ||
78 | |||
79 | /* Don't touch classless devices or host bridges or ioapics. */ | ||
80 | if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST) | ||
81 | return; | ||
82 | |||
83 | /* Don't touch ioapic devices already enabled by firmware */ | ||
84 | if (class == PCI_CLASS_SYSTEM_PIC) { | ||
85 | u16 command; | ||
86 | pci_read_config_word(dev, PCI_COMMAND, &command); | ||
87 | if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) | ||
88 | return; | ||
89 | } | ||
90 | |||
91 | pdev_sort_resources(dev, head); | ||
92 | } | ||
93 | |||
94 | static void __assign_resources_sorted(struct resource_list *head, | ||
95 | struct resource_list_x *fail_head) | ||
96 | { | ||
97 | struct resource *res; | ||
98 | struct resource_list *list, *tmp; | ||
99 | int idx; | ||
100 | |||
101 | for (list = head->next; list;) { | ||
58 | res = list->res; | 102 | res = list->res; |
59 | idx = res - &list->dev->resource[0]; | 103 | idx = res - &list->dev->resource[0]; |
60 | if (pci_assign_resource(list->dev, idx)) { | 104 | if (pci_assign_resource(list->dev, idx)) { |
105 | if (fail_head && !pci_is_root_bus(list->dev->bus)) | ||
106 | add_to_failed_list(fail_head, list->dev, res); | ||
61 | res->start = 0; | 107 | res->start = 0; |
62 | res->end = 0; | 108 | res->end = 0; |
63 | res->flags = 0; | 109 | res->flags = 0; |
@@ -68,6 +114,30 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus) | |||
68 | } | 114 | } |
69 | } | 115 | } |
70 | 116 | ||
117 | static void pdev_assign_resources_sorted(struct pci_dev *dev, | ||
118 | struct resource_list_x *fail_head) | ||
119 | { | ||
120 | struct resource_list head; | ||
121 | |||
122 | head.next = NULL; | ||
123 | __dev_sort_resources(dev, &head); | ||
124 | __assign_resources_sorted(&head, fail_head); | ||
125 | |||
126 | } | ||
127 | |||
128 | static void pbus_assign_resources_sorted(const struct pci_bus *bus, | ||
129 | struct resource_list_x *fail_head) | ||
130 | { | ||
131 | struct pci_dev *dev; | ||
132 | struct resource_list head; | ||
133 | |||
134 | head.next = NULL; | ||
135 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
136 | __dev_sort_resources(dev, &head); | ||
137 | |||
138 | __assign_resources_sorted(&head, fail_head); | ||
139 | } | ||
140 | |||
71 | void pci_setup_cardbus(struct pci_bus *bus) | 141 | void pci_setup_cardbus(struct pci_bus *bus) |
72 | { | 142 | { |
73 | struct pci_dev *bridge = bus->self; | 143 | struct pci_dev *bridge = bus->self; |
@@ -134,18 +204,12 @@ EXPORT_SYMBOL(pci_setup_cardbus); | |||
134 | config space writes, so it's quite possible that an I/O window of | 204 | config space writes, so it's quite possible that an I/O window of |
135 | the bridge will have some undesirable address (e.g. 0) after the | 205 | the bridge will have some undesirable address (e.g. 0) after the |
136 | first write. Ditto 64-bit prefetchable MMIO. */ | 206 | first write. Ditto 64-bit prefetchable MMIO. */ |
137 | static void pci_setup_bridge(struct pci_bus *bus) | 207 | static void pci_setup_bridge_io(struct pci_bus *bus) |
138 | { | 208 | { |
139 | struct pci_dev *bridge = bus->self; | 209 | struct pci_dev *bridge = bus->self; |
140 | struct resource *res; | 210 | struct resource *res; |
141 | struct pci_bus_region region; | 211 | struct pci_bus_region region; |
142 | u32 l, bu, lu, io_upper16; | 212 | u32 l, io_upper16; |
143 | |||
144 | if (pci_is_enabled(bridge)) | ||
145 | return; | ||
146 | |||
147 | dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", | ||
148 | bus->secondary, bus->subordinate); | ||
149 | 213 | ||
150 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ | 214 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ |
151 | res = bus->resource[0]; | 215 | res = bus->resource[0]; |
@@ -158,8 +222,7 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
158 | /* Set up upper 16 bits of I/O base/limit. */ | 222 | /* Set up upper 16 bits of I/O base/limit. */ |
159 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); | 223 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); |
160 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 224 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
161 | } | 225 | } else { |
162 | else { | ||
163 | /* Clear upper 16 bits of I/O base/limit. */ | 226 | /* Clear upper 16 bits of I/O base/limit. */ |
164 | io_upper16 = 0; | 227 | io_upper16 = 0; |
165 | l = 0x00f0; | 228 | l = 0x00f0; |
@@ -171,21 +234,35 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
171 | pci_write_config_dword(bridge, PCI_IO_BASE, l); | 234 | pci_write_config_dword(bridge, PCI_IO_BASE, l); |
172 | /* Update upper 16 bits of I/O base/limit. */ | 235 | /* Update upper 16 bits of I/O base/limit. */ |
173 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); | 236 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); |
237 | } | ||
174 | 238 | ||
175 | /* Set up the top and bottom of the PCI Memory segment | 239 | static void pci_setup_bridge_mmio(struct pci_bus *bus) |
176 | for this bus. */ | 240 | { |
241 | struct pci_dev *bridge = bus->self; | ||
242 | struct resource *res; | ||
243 | struct pci_bus_region region; | ||
244 | u32 l; | ||
245 | |||
246 | /* Set up the top and bottom of the PCI Memory segment for this bus. */ | ||
177 | res = bus->resource[1]; | 247 | res = bus->resource[1]; |
178 | pcibios_resource_to_bus(bridge, ®ion, res); | 248 | pcibios_resource_to_bus(bridge, ®ion, res); |
179 | if (res->flags & IORESOURCE_MEM) { | 249 | if (res->flags & IORESOURCE_MEM) { |
180 | l = (region.start >> 16) & 0xfff0; | 250 | l = (region.start >> 16) & 0xfff0; |
181 | l |= region.end & 0xfff00000; | 251 | l |= region.end & 0xfff00000; |
182 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 252 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
183 | } | 253 | } else { |
184 | else { | ||
185 | l = 0x0000fff0; | 254 | l = 0x0000fff0; |
186 | dev_info(&bridge->dev, " bridge window [mem disabled]\n"); | 255 | dev_info(&bridge->dev, " bridge window [mem disabled]\n"); |
187 | } | 256 | } |
188 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); | 257 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); |
258 | } | ||
259 | |||
260 | static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) | ||
261 | { | ||
262 | struct pci_dev *bridge = bus->self; | ||
263 | struct resource *res; | ||
264 | struct pci_bus_region region; | ||
265 | u32 l, bu, lu; | ||
189 | 266 | ||
190 | /* Clear out the upper 32 bits of PREF limit. | 267 | /* Clear out the upper 32 bits of PREF limit. |
191 | If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily | 268 | If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily |
@@ -204,8 +281,7 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
204 | lu = upper_32_bits(region.end); | 281 | lu = upper_32_bits(region.end); |
205 | } | 282 | } |
206 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 283 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
207 | } | 284 | } else { |
208 | else { | ||
209 | l = 0x0000fff0; | 285 | l = 0x0000fff0; |
210 | dev_info(&bridge->dev, " bridge window [mem pref disabled]\n"); | 286 | dev_info(&bridge->dev, " bridge window [mem pref disabled]\n"); |
211 | } | 287 | } |
@@ -214,10 +290,35 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
214 | /* Set the upper 32 bits of PREF base & limit. */ | 290 | /* Set the upper 32 bits of PREF base & limit. */ |
215 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); | 291 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); |
216 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); | 292 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); |
293 | } | ||
294 | |||
295 | static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) | ||
296 | { | ||
297 | struct pci_dev *bridge = bus->self; | ||
298 | |||
299 | dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", | ||
300 | bus->secondary, bus->subordinate); | ||
301 | |||
302 | if (type & IORESOURCE_IO) | ||
303 | pci_setup_bridge_io(bus); | ||
304 | |||
305 | if (type & IORESOURCE_MEM) | ||
306 | pci_setup_bridge_mmio(bus); | ||
307 | |||
308 | if (type & IORESOURCE_PREFETCH) | ||
309 | pci_setup_bridge_mmio_pref(bus); | ||
217 | 310 | ||
218 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); | 311 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); |
219 | } | 312 | } |
220 | 313 | ||
314 | static void pci_setup_bridge(struct pci_bus *bus) | ||
315 | { | ||
316 | unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | | ||
317 | IORESOURCE_PREFETCH; | ||
318 | |||
319 | __pci_setup_bridge(bus, type); | ||
320 | } | ||
321 | |||
221 | /* Check whether the bridge supports optional I/O and | 322 | /* Check whether the bridge supports optional I/O and |
222 | prefetchable memory ranges. If not, the respective | 323 | prefetchable memory ranges. If not, the respective |
223 | base/limit registers must be read-only and read as 0. */ | 324 | base/limit registers must be read-only and read as 0. */ |
@@ -253,8 +354,11 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) | |||
253 | } | 354 | } |
254 | if (pmem) { | 355 | if (pmem) { |
255 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; | 356 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; |
256 | if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) | 357 | if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == |
358 | PCI_PREF_RANGE_TYPE_64) { | ||
257 | b_res[2].flags |= IORESOURCE_MEM_64; | 359 | b_res[2].flags |= IORESOURCE_MEM_64; |
360 | b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; | ||
361 | } | ||
258 | } | 362 | } |
259 | 363 | ||
260 | /* double check if bridge does support 64 bit pref */ | 364 | /* double check if bridge does support 64 bit pref */ |
@@ -283,8 +387,7 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon | |||
283 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | 387 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | |
284 | IORESOURCE_PREFETCH; | 388 | IORESOURCE_PREFETCH; |
285 | 389 | ||
286 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 390 | pci_bus_for_each_resource(bus, r, i) { |
287 | r = bus->resource[i]; | ||
288 | if (r == &ioport_resource || r == &iomem_resource) | 391 | if (r == &ioport_resource || r == &iomem_resource) |
289 | continue; | 392 | continue; |
290 | if (r && (r->flags & type_mask) == type && !r->parent) | 393 | if (r && (r->flags & type_mask) == type && !r->parent) |
@@ -301,7 +404,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size) | |||
301 | { | 404 | { |
302 | struct pci_dev *dev; | 405 | struct pci_dev *dev; |
303 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); | 406 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); |
304 | unsigned long size = 0, size1 = 0; | 407 | unsigned long size = 0, size1 = 0, old_size; |
305 | 408 | ||
306 | if (!b_res) | 409 | if (!b_res) |
307 | return; | 410 | return; |
@@ -326,12 +429,17 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size) | |||
326 | } | 429 | } |
327 | if (size < min_size) | 430 | if (size < min_size) |
328 | size = min_size; | 431 | size = min_size; |
432 | old_size = resource_size(b_res); | ||
433 | if (old_size == 1) | ||
434 | old_size = 0; | ||
329 | /* To be fixed in 2.5: we should have sort of HAVE_ISA | 435 | /* To be fixed in 2.5: we should have sort of HAVE_ISA |
330 | flag in the struct pci_bus. */ | 436 | flag in the struct pci_bus. */ |
331 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) | 437 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) |
332 | size = (size & 0xff) + ((size & ~0xffUL) << 2); | 438 | size = (size & 0xff) + ((size & ~0xffUL) << 2); |
333 | #endif | 439 | #endif |
334 | size = ALIGN(size + size1, 4096); | 440 | size = ALIGN(size + size1, 4096); |
441 | if (size < old_size) | ||
442 | size = old_size; | ||
335 | if (!size) { | 443 | if (!size) { |
336 | if (b_res->start || b_res->end) | 444 | if (b_res->start || b_res->end) |
337 | dev_info(&bus->self->dev, "disabling bridge window " | 445 | dev_info(&bus->self->dev, "disabling bridge window " |
@@ -352,7 +460,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
352 | unsigned long type, resource_size_t min_size) | 460 | unsigned long type, resource_size_t min_size) |
353 | { | 461 | { |
354 | struct pci_dev *dev; | 462 | struct pci_dev *dev; |
355 | resource_size_t min_align, align, size; | 463 | resource_size_t min_align, align, size, old_size; |
356 | resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ | 464 | resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ |
357 | int order, max_order; | 465 | int order, max_order; |
358 | struct resource *b_res = find_free_bus_resource(bus, type); | 466 | struct resource *b_res = find_free_bus_resource(bus, type); |
@@ -402,6 +510,11 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
402 | } | 510 | } |
403 | if (size < min_size) | 511 | if (size < min_size) |
404 | size = min_size; | 512 | size = min_size; |
513 | old_size = resource_size(b_res); | ||
514 | if (old_size == 1) | ||
515 | old_size = 0; | ||
516 | if (size < old_size) | ||
517 | size = old_size; | ||
405 | 518 | ||
406 | align = 0; | 519 | align = 0; |
407 | min_align = 0; | 520 | min_align = 0; |
@@ -538,23 +651,25 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
538 | } | 651 | } |
539 | EXPORT_SYMBOL(pci_bus_size_bridges); | 652 | EXPORT_SYMBOL(pci_bus_size_bridges); |
540 | 653 | ||
541 | void __ref pci_bus_assign_resources(const struct pci_bus *bus) | 654 | static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, |
655 | struct resource_list_x *fail_head) | ||
542 | { | 656 | { |
543 | struct pci_bus *b; | 657 | struct pci_bus *b; |
544 | struct pci_dev *dev; | 658 | struct pci_dev *dev; |
545 | 659 | ||
546 | pbus_assign_resources_sorted(bus); | 660 | pbus_assign_resources_sorted(bus, fail_head); |
547 | 661 | ||
548 | list_for_each_entry(dev, &bus->devices, bus_list) { | 662 | list_for_each_entry(dev, &bus->devices, bus_list) { |
549 | b = dev->subordinate; | 663 | b = dev->subordinate; |
550 | if (!b) | 664 | if (!b) |
551 | continue; | 665 | continue; |
552 | 666 | ||
553 | pci_bus_assign_resources(b); | 667 | __pci_bus_assign_resources(b, fail_head); |
554 | 668 | ||
555 | switch (dev->class >> 8) { | 669 | switch (dev->class >> 8) { |
556 | case PCI_CLASS_BRIDGE_PCI: | 670 | case PCI_CLASS_BRIDGE_PCI: |
557 | pci_setup_bridge(b); | 671 | if (!pci_is_enabled(dev)) |
672 | pci_setup_bridge(b); | ||
558 | break; | 673 | break; |
559 | 674 | ||
560 | case PCI_CLASS_BRIDGE_CARDBUS: | 675 | case PCI_CLASS_BRIDGE_CARDBUS: |
@@ -568,15 +683,130 @@ void __ref pci_bus_assign_resources(const struct pci_bus *bus) | |||
568 | } | 683 | } |
569 | } | 684 | } |
570 | } | 685 | } |
686 | |||
687 | void __ref pci_bus_assign_resources(const struct pci_bus *bus) | ||
688 | { | ||
689 | __pci_bus_assign_resources(bus, NULL); | ||
690 | } | ||
571 | EXPORT_SYMBOL(pci_bus_assign_resources); | 691 | EXPORT_SYMBOL(pci_bus_assign_resources); |
572 | 692 | ||
693 | static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge, | ||
694 | struct resource_list_x *fail_head) | ||
695 | { | ||
696 | struct pci_bus *b; | ||
697 | |||
698 | pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head); | ||
699 | |||
700 | b = bridge->subordinate; | ||
701 | if (!b) | ||
702 | return; | ||
703 | |||
704 | __pci_bus_assign_resources(b, fail_head); | ||
705 | |||
706 | switch (bridge->class >> 8) { | ||
707 | case PCI_CLASS_BRIDGE_PCI: | ||
708 | pci_setup_bridge(b); | ||
709 | break; | ||
710 | |||
711 | case PCI_CLASS_BRIDGE_CARDBUS: | ||
712 | pci_setup_cardbus(b); | ||
713 | break; | ||
714 | |||
715 | default: | ||
716 | dev_info(&bridge->dev, "not setting up bridge for bus " | ||
717 | "%04x:%02x\n", pci_domain_nr(b), b->number); | ||
718 | break; | ||
719 | } | ||
720 | } | ||
721 | static void pci_bridge_release_resources(struct pci_bus *bus, | ||
722 | unsigned long type) | ||
723 | { | ||
724 | int idx; | ||
725 | bool changed = false; | ||
726 | struct pci_dev *dev; | ||
727 | struct resource *r; | ||
728 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | ||
729 | IORESOURCE_PREFETCH; | ||
730 | |||
731 | dev = bus->self; | ||
732 | for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END; | ||
733 | idx++) { | ||
734 | r = &dev->resource[idx]; | ||
735 | if ((r->flags & type_mask) != type) | ||
736 | continue; | ||
737 | if (!r->parent) | ||
738 | continue; | ||
739 | /* | ||
740 | * if there are children under that, we should release them | ||
741 | * all | ||
742 | */ | ||
743 | release_child_resources(r); | ||
744 | if (!release_resource(r)) { | ||
745 | dev_printk(KERN_DEBUG, &dev->dev, | ||
746 | "resource %d %pR released\n", idx, r); | ||
747 | /* keep the old size */ | ||
748 | r->end = resource_size(r) - 1; | ||
749 | r->start = 0; | ||
750 | r->flags = 0; | ||
751 | changed = true; | ||
752 | } | ||
753 | } | ||
754 | |||
755 | if (changed) { | ||
756 | /* avoiding touch the one without PREF */ | ||
757 | if (type & IORESOURCE_PREFETCH) | ||
758 | type = IORESOURCE_PREFETCH; | ||
759 | __pci_setup_bridge(bus, type); | ||
760 | } | ||
761 | } | ||
762 | |||
763 | enum release_type { | ||
764 | leaf_only, | ||
765 | whole_subtree, | ||
766 | }; | ||
767 | /* | ||
768 | * try to release pci bridge resources that is from leaf bridge, | ||
769 | * so we can allocate big new one later | ||
770 | */ | ||
771 | static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus, | ||
772 | unsigned long type, | ||
773 | enum release_type rel_type) | ||
774 | { | ||
775 | struct pci_dev *dev; | ||
776 | bool is_leaf_bridge = true; | ||
777 | |||
778 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
779 | struct pci_bus *b = dev->subordinate; | ||
780 | if (!b) | ||
781 | continue; | ||
782 | |||
783 | is_leaf_bridge = false; | ||
784 | |||
785 | if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) | ||
786 | continue; | ||
787 | |||
788 | if (rel_type == whole_subtree) | ||
789 | pci_bus_release_bridge_resources(b, type, | ||
790 | whole_subtree); | ||
791 | } | ||
792 | |||
793 | if (pci_is_root_bus(bus)) | ||
794 | return; | ||
795 | |||
796 | if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI) | ||
797 | return; | ||
798 | |||
799 | if ((rel_type == whole_subtree) || is_leaf_bridge) | ||
800 | pci_bridge_release_resources(bus, type); | ||
801 | } | ||
802 | |||
573 | static void pci_bus_dump_res(struct pci_bus *bus) | 803 | static void pci_bus_dump_res(struct pci_bus *bus) |
574 | { | 804 | { |
575 | int i; | 805 | struct resource *res; |
806 | int i; | ||
576 | 807 | ||
577 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 808 | pci_bus_for_each_resource(bus, res, i) { |
578 | struct resource *res = bus->resource[i]; | 809 | if (!res || !res->end || !res->flags) |
579 | if (!res || !res->end) | ||
580 | continue; | 810 | continue; |
581 | 811 | ||
582 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); | 812 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); |
@@ -600,11 +830,65 @@ static void pci_bus_dump_resources(struct pci_bus *bus) | |||
600 | } | 830 | } |
601 | } | 831 | } |
602 | 832 | ||
833 | static int __init pci_bus_get_depth(struct pci_bus *bus) | ||
834 | { | ||
835 | int depth = 0; | ||
836 | struct pci_dev *dev; | ||
837 | |||
838 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
839 | int ret; | ||
840 | struct pci_bus *b = dev->subordinate; | ||
841 | if (!b) | ||
842 | continue; | ||
843 | |||
844 | ret = pci_bus_get_depth(b); | ||
845 | if (ret + 1 > depth) | ||
846 | depth = ret + 1; | ||
847 | } | ||
848 | |||
849 | return depth; | ||
850 | } | ||
851 | static int __init pci_get_max_depth(void) | ||
852 | { | ||
853 | int depth = 0; | ||
854 | struct pci_bus *bus; | ||
855 | |||
856 | list_for_each_entry(bus, &pci_root_buses, node) { | ||
857 | int ret; | ||
858 | |||
859 | ret = pci_bus_get_depth(bus); | ||
860 | if (ret > depth) | ||
861 | depth = ret; | ||
862 | } | ||
863 | |||
864 | return depth; | ||
865 | } | ||
866 | |||
867 | /* | ||
868 | * first try will not touch pci bridge res | ||
869 | * second and later try will clear small leaf bridge res | ||
870 | * will stop till to the max deepth if can not find good one | ||
871 | */ | ||
603 | void __init | 872 | void __init |
604 | pci_assign_unassigned_resources(void) | 873 | pci_assign_unassigned_resources(void) |
605 | { | 874 | { |
606 | struct pci_bus *bus; | 875 | struct pci_bus *bus; |
876 | int tried_times = 0; | ||
877 | enum release_type rel_type = leaf_only; | ||
878 | struct resource_list_x head, *list; | ||
879 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | ||
880 | IORESOURCE_PREFETCH; | ||
881 | unsigned long failed_type; | ||
882 | int max_depth = pci_get_max_depth(); | ||
883 | int pci_try_num; | ||
607 | 884 | ||
885 | head.next = NULL; | ||
886 | |||
887 | pci_try_num = max_depth + 1; | ||
888 | printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", | ||
889 | max_depth, pci_try_num); | ||
890 | |||
891 | again: | ||
608 | /* Depth first, calculate sizes and alignments of all | 892 | /* Depth first, calculate sizes and alignments of all |
609 | subordinate buses. */ | 893 | subordinate buses. */ |
610 | list_for_each_entry(bus, &pci_root_buses, node) { | 894 | list_for_each_entry(bus, &pci_root_buses, node) { |
@@ -612,12 +896,130 @@ pci_assign_unassigned_resources(void) | |||
612 | } | 896 | } |
613 | /* Depth last, allocate resources and update the hardware. */ | 897 | /* Depth last, allocate resources and update the hardware. */ |
614 | list_for_each_entry(bus, &pci_root_buses, node) { | 898 | list_for_each_entry(bus, &pci_root_buses, node) { |
615 | pci_bus_assign_resources(bus); | 899 | __pci_bus_assign_resources(bus, &head); |
616 | pci_enable_bridges(bus); | ||
617 | } | 900 | } |
901 | tried_times++; | ||
902 | |||
903 | /* any device complain? */ | ||
904 | if (!head.next) | ||
905 | goto enable_and_dump; | ||
906 | failed_type = 0; | ||
907 | for (list = head.next; list;) { | ||
908 | failed_type |= list->flags; | ||
909 | list = list->next; | ||
910 | } | ||
911 | /* | ||
912 | * io port are tight, don't try extra | ||
913 | * or if reach the limit, don't want to try more | ||
914 | */ | ||
915 | failed_type &= type_mask; | ||
916 | if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) { | ||
917 | free_failed_list(&head); | ||
918 | goto enable_and_dump; | ||
919 | } | ||
920 | |||
921 | printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", | ||
922 | tried_times + 1); | ||
923 | |||
924 | /* third times and later will not check if it is leaf */ | ||
925 | if ((tried_times + 1) > 2) | ||
926 | rel_type = whole_subtree; | ||
927 | |||
928 | /* | ||
929 | * Try to release leaf bridge's resources that doesn't fit resource of | ||
930 | * child device under that bridge | ||
931 | */ | ||
932 | for (list = head.next; list;) { | ||
933 | bus = list->dev->bus; | ||
934 | pci_bus_release_bridge_resources(bus, list->flags & type_mask, | ||
935 | rel_type); | ||
936 | list = list->next; | ||
937 | } | ||
938 | /* restore size and flags */ | ||
939 | for (list = head.next; list;) { | ||
940 | struct resource *res = list->res; | ||
941 | |||
942 | res->start = list->start; | ||
943 | res->end = list->end; | ||
944 | res->flags = list->flags; | ||
945 | if (list->dev->subordinate) | ||
946 | res->flags = 0; | ||
947 | |||
948 | list = list->next; | ||
949 | } | ||
950 | free_failed_list(&head); | ||
951 | |||
952 | goto again; | ||
953 | |||
954 | enable_and_dump: | ||
955 | /* Depth last, update the hardware. */ | ||
956 | list_for_each_entry(bus, &pci_root_buses, node) | ||
957 | pci_enable_bridges(bus); | ||
618 | 958 | ||
619 | /* dump the resource on buses */ | 959 | /* dump the resource on buses */ |
620 | list_for_each_entry(bus, &pci_root_buses, node) { | 960 | list_for_each_entry(bus, &pci_root_buses, node) { |
621 | pci_bus_dump_resources(bus); | 961 | pci_bus_dump_resources(bus); |
622 | } | 962 | } |
623 | } | 963 | } |
964 | |||
965 | void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) | ||
966 | { | ||
967 | struct pci_bus *parent = bridge->subordinate; | ||
968 | int tried_times = 0; | ||
969 | struct resource_list_x head, *list; | ||
970 | int retval; | ||
971 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | ||
972 | IORESOURCE_PREFETCH; | ||
973 | |||
974 | head.next = NULL; | ||
975 | |||
976 | again: | ||
977 | pci_bus_size_bridges(parent); | ||
978 | __pci_bridge_assign_resources(bridge, &head); | ||
979 | retval = pci_reenable_device(bridge); | ||
980 | pci_set_master(bridge); | ||
981 | pci_enable_bridges(parent); | ||
982 | |||
983 | tried_times++; | ||
984 | |||
985 | if (!head.next) | ||
986 | return; | ||
987 | |||
988 | if (tried_times >= 2) { | ||
989 | /* still fail, don't need to try more */ | ||
990 | free_failed_list(&head); | ||
991 | return; | ||
992 | } | ||
993 | |||
994 | printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", | ||
995 | tried_times + 1); | ||
996 | |||
997 | /* | ||
998 | * Try to release leaf bridge's resources that doesn't fit resource of | ||
999 | * child device under that bridge | ||
1000 | */ | ||
1001 | for (list = head.next; list;) { | ||
1002 | struct pci_bus *bus = list->dev->bus; | ||
1003 | unsigned long flags = list->flags; | ||
1004 | |||
1005 | pci_bus_release_bridge_resources(bus, flags & type_mask, | ||
1006 | whole_subtree); | ||
1007 | list = list->next; | ||
1008 | } | ||
1009 | /* restore size and flags */ | ||
1010 | for (list = head.next; list;) { | ||
1011 | struct resource *res = list->res; | ||
1012 | |||
1013 | res->start = list->start; | ||
1014 | res->end = list->end; | ||
1015 | res->flags = list->flags; | ||
1016 | if (list->dev->subordinate) | ||
1017 | res->flags = 0; | ||
1018 | |||
1019 | list = list->next; | ||
1020 | } | ||
1021 | free_failed_list(&head); | ||
1022 | |||
1023 | goto again; | ||
1024 | } | ||
1025 | EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources); | ||
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 8c02b6c53bdb..49c9e6c9779a 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c | |||
@@ -47,6 +47,55 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf) | |||
47 | slot->number); | 47 | slot->number); |
48 | } | 48 | } |
49 | 49 | ||
50 | /* these strings match up with the values in pci_bus_speed */ | ||
51 | static char *pci_bus_speed_strings[] = { | ||
52 | "33 MHz PCI", /* 0x00 */ | ||
53 | "66 MHz PCI", /* 0x01 */ | ||
54 | "66 MHz PCI-X", /* 0x02 */ | ||
55 | "100 MHz PCI-X", /* 0x03 */ | ||
56 | "133 MHz PCI-X", /* 0x04 */ | ||
57 | NULL, /* 0x05 */ | ||
58 | NULL, /* 0x06 */ | ||
59 | NULL, /* 0x07 */ | ||
60 | NULL, /* 0x08 */ | ||
61 | "66 MHz PCI-X 266", /* 0x09 */ | ||
62 | "100 MHz PCI-X 266", /* 0x0a */ | ||
63 | "133 MHz PCI-X 266", /* 0x0b */ | ||
64 | "Unknown AGP", /* 0x0c */ | ||
65 | "1x AGP", /* 0x0d */ | ||
66 | "2x AGP", /* 0x0e */ | ||
67 | "4x AGP", /* 0x0f */ | ||
68 | "8x AGP", /* 0x10 */ | ||
69 | "66 MHz PCI-X 533", /* 0x11 */ | ||
70 | "100 MHz PCI-X 533", /* 0x12 */ | ||
71 | "133 MHz PCI-X 533", /* 0x13 */ | ||
72 | "2.5 GT/s PCIe", /* 0x14 */ | ||
73 | "5.0 GT/s PCIe", /* 0x15 */ | ||
74 | "8.0 GT/s PCIe", /* 0x16 */ | ||
75 | }; | ||
76 | |||
77 | static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf) | ||
78 | { | ||
79 | const char *speed_string; | ||
80 | |||
81 | if (speed < ARRAY_SIZE(pci_bus_speed_strings)) | ||
82 | speed_string = pci_bus_speed_strings[speed]; | ||
83 | else | ||
84 | speed_string = "Unknown"; | ||
85 | |||
86 | return sprintf(buf, "%s\n", speed_string); | ||
87 | } | ||
88 | |||
89 | static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf) | ||
90 | { | ||
91 | return bus_speed_read(slot->bus->max_bus_speed, buf); | ||
92 | } | ||
93 | |||
94 | static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf) | ||
95 | { | ||
96 | return bus_speed_read(slot->bus->cur_bus_speed, buf); | ||
97 | } | ||
98 | |||
50 | static void pci_slot_release(struct kobject *kobj) | 99 | static void pci_slot_release(struct kobject *kobj) |
51 | { | 100 | { |
52 | struct pci_dev *dev; | 101 | struct pci_dev *dev; |
@@ -66,9 +115,15 @@ static void pci_slot_release(struct kobject *kobj) | |||
66 | 115 | ||
67 | static struct pci_slot_attribute pci_slot_attr_address = | 116 | static struct pci_slot_attribute pci_slot_attr_address = |
68 | __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL); | 117 | __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL); |
118 | static struct pci_slot_attribute pci_slot_attr_max_speed = | ||
119 | __ATTR(max_bus_speed, (S_IFREG | S_IRUGO), max_speed_read_file, NULL); | ||
120 | static struct pci_slot_attribute pci_slot_attr_cur_speed = | ||
121 | __ATTR(cur_bus_speed, (S_IFREG | S_IRUGO), cur_speed_read_file, NULL); | ||
69 | 122 | ||
70 | static struct attribute *pci_slot_default_attrs[] = { | 123 | static struct attribute *pci_slot_default_attrs[] = { |
71 | &pci_slot_attr_address.attr, | 124 | &pci_slot_attr_address.attr, |
125 | &pci_slot_attr_max_speed.attr, | ||
126 | &pci_slot_attr_cur_speed.attr, | ||
72 | NULL, | 127 | NULL, |
73 | }; | 128 | }; |
74 | 129 | ||
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c index 52db17263d8b..f8401a0ef89b 100644 --- a/drivers/pcmcia/rsrc_mgr.c +++ b/drivers/pcmcia/rsrc_mgr.c | |||
@@ -114,22 +114,21 @@ struct pcmcia_align_data { | |||
114 | unsigned long offset; | 114 | unsigned long offset; |
115 | }; | 115 | }; |
116 | 116 | ||
117 | static void pcmcia_align(void *align_data, struct resource *res, | 117 | static resource_size_t pcmcia_align(void *align_data, |
118 | unsigned long size, unsigned long align) | 118 | const struct resource *res, |
119 | resource_size_t size, resource_size_t align) | ||
119 | { | 120 | { |
120 | struct pcmcia_align_data *data = align_data; | 121 | struct pcmcia_align_data *data = align_data; |
121 | unsigned long start; | 122 | resource_size_t start; |
122 | 123 | ||
123 | start = (res->start & ~data->mask) + data->offset; | 124 | start = (res->start & ~data->mask) + data->offset; |
124 | if (start < res->start) | 125 | if (start < res->start) |
125 | start += data->mask + 1; | 126 | start += data->mask + 1; |
126 | res->start = start; | ||
127 | 127 | ||
128 | #ifdef CONFIG_X86 | 128 | #ifdef CONFIG_X86 |
129 | if (res->flags & IORESOURCE_IO) { | 129 | if (res->flags & IORESOURCE_IO) { |
130 | if (start & 0x300) { | 130 | if (start & 0x300) { |
131 | start = (start + 0x3ff) & ~0x3ff; | 131 | start = (start + 0x3ff) & ~0x3ff; |
132 | res->start = start; | ||
133 | } | 132 | } |
134 | } | 133 | } |
135 | #endif | 134 | #endif |
@@ -137,9 +136,11 @@ static void pcmcia_align(void *align_data, struct resource *res, | |||
137 | #ifdef CONFIG_M68K | 136 | #ifdef CONFIG_M68K |
138 | if (res->flags & IORESOURCE_IO) { | 137 | if (res->flags & IORESOURCE_IO) { |
139 | if ((res->start + size - 1) >= 1024) | 138 | if ((res->start + size - 1) >= 1024) |
140 | res->start = res->end; | 139 | start = res->end; |
141 | } | 140 | } |
142 | #endif | 141 | #endif |
142 | |||
143 | return start; | ||
143 | } | 144 | } |
144 | 145 | ||
145 | 146 | ||
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 9b0dc433a8c3..c67638fe6914 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c | |||
@@ -533,8 +533,8 @@ struct pcmcia_align_data { | |||
533 | struct resource_map *map; | 533 | struct resource_map *map; |
534 | }; | 534 | }; |
535 | 535 | ||
536 | static void | 536 | static resource_size_t |
537 | pcmcia_common_align(void *align_data, struct resource *res, | 537 | pcmcia_common_align(void *align_data, const struct resource *res, |
538 | resource_size_t size, resource_size_t align) | 538 | resource_size_t size, resource_size_t align) |
539 | { | 539 | { |
540 | struct pcmcia_align_data *data = align_data; | 540 | struct pcmcia_align_data *data = align_data; |
@@ -545,17 +545,18 @@ pcmcia_common_align(void *align_data, struct resource *res, | |||
545 | start = (res->start & ~data->mask) + data->offset; | 545 | start = (res->start & ~data->mask) + data->offset; |
546 | if (start < res->start) | 546 | if (start < res->start) |
547 | start += data->mask + 1; | 547 | start += data->mask + 1; |
548 | res->start = start; | 548 | return start; |
549 | } | 549 | } |
550 | 550 | ||
551 | static void | 551 | static resource_size_t |
552 | pcmcia_align(void *align_data, struct resource *res, resource_size_t size, | 552 | pcmcia_align(void *align_data, const struct resource *res, |
553 | resource_size_t align) | 553 | resource_size_t size, resource_size_t align) |
554 | { | 554 | { |
555 | struct pcmcia_align_data *data = align_data; | 555 | struct pcmcia_align_data *data = align_data; |
556 | struct resource_map *m; | 556 | struct resource_map *m; |
557 | resource_size_t start; | ||
557 | 558 | ||
558 | pcmcia_common_align(data, res, size, align); | 559 | start = pcmcia_common_align(data, res, size, align); |
559 | 560 | ||
560 | for (m = data->map->next; m != data->map; m = m->next) { | 561 | for (m = data->map->next; m != data->map; m = m->next) { |
561 | unsigned long start = m->base; | 562 | unsigned long start = m->base; |
@@ -567,8 +568,7 @@ pcmcia_align(void *align_data, struct resource *res, resource_size_t size, | |||
567 | * fit here. | 568 | * fit here. |
568 | */ | 569 | */ |
569 | if (res->start < start) { | 570 | if (res->start < start) { |
570 | res->start = start; | 571 | start = pcmcia_common_align(data, res, size, align); |
571 | pcmcia_common_align(data, res, size, align); | ||
572 | } | 572 | } |
573 | 573 | ||
574 | /* | 574 | /* |
@@ -586,7 +586,9 @@ pcmcia_align(void *align_data, struct resource *res, resource_size_t size, | |||
586 | * If we failed to find something suitable, ensure we fail. | 586 | * If we failed to find something suitable, ensure we fail. |
587 | */ | 587 | */ |
588 | if (m == data->map) | 588 | if (m == data->map) |
589 | res->start = res->end; | 589 | start = res->end; |
590 | |||
591 | return start; | ||
590 | } | 592 | } |
591 | 593 | ||
592 | /* | 594 | /* |
@@ -801,8 +803,7 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s) | |||
801 | return -EINVAL; | 803 | return -EINVAL; |
802 | #endif | 804 | #endif |
803 | 805 | ||
804 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 806 | pci_bus_for_each_resource(s->cb_dev->bus, res, i) { |
805 | res = s->cb_dev->bus->resource[i]; | ||
806 | if (!res) | 807 | if (!res) |
807 | continue; | 808 | continue; |
808 | 809 | ||
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index e4d12acdd525..1f2039d5e966 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c | |||
@@ -649,9 +649,10 @@ static int yenta_search_one_res(struct resource *root, struct resource *res, | |||
649 | static int yenta_search_res(struct yenta_socket *socket, struct resource *res, | 649 | static int yenta_search_res(struct yenta_socket *socket, struct resource *res, |
650 | u32 min) | 650 | u32 min) |
651 | { | 651 | { |
652 | struct resource *root; | ||
652 | int i; | 653 | int i; |
653 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 654 | |
654 | struct resource *root = socket->dev->bus->resource[i]; | 655 | pci_bus_for_each_resource(socket->dev->bus, root, i) { |
655 | if (!root) | 656 | if (!root) |
656 | continue; | 657 | continue; |
657 | 658 | ||
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 9d0c941b7d33..66d6c01fcf3e 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Module interface and handling of zfcp data structures. | 4 | * Module interface and handling of zfcp data structures. |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2002, 2009 | 6 | * Copyright IBM Corporation 2002, 2010 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* | 9 | /* |
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
33 | #include "zfcp_ext.h" | 33 | #include "zfcp_ext.h" |
34 | #include "zfcp_fc.h" | 34 | #include "zfcp_fc.h" |
35 | #include "zfcp_reqlist.h" | ||
35 | 36 | ||
36 | #define ZFCP_BUS_ID_SIZE 20 | 37 | #define ZFCP_BUS_ID_SIZE 20 |
37 | 38 | ||
@@ -49,36 +50,6 @@ static struct kmem_cache *zfcp_cache_hw_align(const char *name, | |||
49 | return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); | 50 | return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); |
50 | } | 51 | } |
51 | 52 | ||
52 | static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter) | ||
53 | { | ||
54 | int idx; | ||
55 | |||
56 | adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head), | ||
57 | GFP_KERNEL); | ||
58 | if (!adapter->req_list) | ||
59 | return -ENOMEM; | ||
60 | |||
61 | for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) | ||
62 | INIT_LIST_HEAD(&adapter->req_list[idx]); | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * zfcp_reqlist_isempty - is the request list empty | ||
68 | * @adapter: pointer to struct zfcp_adapter | ||
69 | * | ||
70 | * Returns: true if list is empty, false otherwise | ||
71 | */ | ||
72 | int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) | ||
73 | { | ||
74 | unsigned int idx; | ||
75 | |||
76 | for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) | ||
77 | if (!list_empty(&adapter->req_list[idx])) | ||
78 | return 0; | ||
79 | return 1; | ||
80 | } | ||
81 | |||
82 | static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) | 53 | static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) |
83 | { | 54 | { |
84 | struct ccw_device *cdev; | 55 | struct ccw_device *cdev; |
@@ -110,7 +81,7 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) | |||
110 | flush_work(&unit->scsi_work); | 81 | flush_work(&unit->scsi_work); |
111 | 82 | ||
112 | out_unit: | 83 | out_unit: |
113 | put_device(&port->sysfs_device); | 84 | put_device(&port->dev); |
114 | out_port: | 85 | out_port: |
115 | zfcp_ccw_adapter_put(adapter); | 86 | zfcp_ccw_adapter_put(adapter); |
116 | out_ccw_device: | 87 | out_ccw_device: |
@@ -255,7 +226,7 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun) | |||
255 | read_lock_irqsave(&port->unit_list_lock, flags); | 226 | read_lock_irqsave(&port->unit_list_lock, flags); |
256 | list_for_each_entry(unit, &port->unit_list, list) | 227 | list_for_each_entry(unit, &port->unit_list, list) |
257 | if (unit->fcp_lun == fcp_lun) { | 228 | if (unit->fcp_lun == fcp_lun) { |
258 | if (!get_device(&unit->sysfs_device)) | 229 | if (!get_device(&unit->dev)) |
259 | unit = NULL; | 230 | unit = NULL; |
260 | read_unlock_irqrestore(&port->unit_list_lock, flags); | 231 | read_unlock_irqrestore(&port->unit_list_lock, flags); |
261 | return unit; | 232 | return unit; |
@@ -280,7 +251,7 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, | |||
280 | read_lock_irqsave(&adapter->port_list_lock, flags); | 251 | read_lock_irqsave(&adapter->port_list_lock, flags); |
281 | list_for_each_entry(port, &adapter->port_list, list) | 252 | list_for_each_entry(port, &adapter->port_list, list) |
282 | if (port->wwpn == wwpn) { | 253 | if (port->wwpn == wwpn) { |
283 | if (!get_device(&port->sysfs_device)) | 254 | if (!get_device(&port->dev)) |
284 | port = NULL; | 255 | port = NULL; |
285 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 256 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
286 | return port; | 257 | return port; |
@@ -298,10 +269,9 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, | |||
298 | */ | 269 | */ |
299 | static void zfcp_unit_release(struct device *dev) | 270 | static void zfcp_unit_release(struct device *dev) |
300 | { | 271 | { |
301 | struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, | 272 | struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); |
302 | sysfs_device); | ||
303 | 273 | ||
304 | put_device(&unit->port->sysfs_device); | 274 | put_device(&unit->port->dev); |
305 | kfree(unit); | 275 | kfree(unit); |
306 | } | 276 | } |
307 | 277 | ||
@@ -318,11 +288,11 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) | |||
318 | struct zfcp_unit *unit; | 288 | struct zfcp_unit *unit; |
319 | int retval = -ENOMEM; | 289 | int retval = -ENOMEM; |
320 | 290 | ||
321 | get_device(&port->sysfs_device); | 291 | get_device(&port->dev); |
322 | 292 | ||
323 | unit = zfcp_get_unit_by_lun(port, fcp_lun); | 293 | unit = zfcp_get_unit_by_lun(port, fcp_lun); |
324 | if (unit) { | 294 | if (unit) { |
325 | put_device(&unit->sysfs_device); | 295 | put_device(&unit->dev); |
326 | retval = -EEXIST; | 296 | retval = -EEXIST; |
327 | goto err_out; | 297 | goto err_out; |
328 | } | 298 | } |
@@ -333,10 +303,10 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) | |||
333 | 303 | ||
334 | unit->port = port; | 304 | unit->port = port; |
335 | unit->fcp_lun = fcp_lun; | 305 | unit->fcp_lun = fcp_lun; |
336 | unit->sysfs_device.parent = &port->sysfs_device; | 306 | unit->dev.parent = &port->dev; |
337 | unit->sysfs_device.release = zfcp_unit_release; | 307 | unit->dev.release = zfcp_unit_release; |
338 | 308 | ||
339 | if (dev_set_name(&unit->sysfs_device, "0x%016llx", | 309 | if (dev_set_name(&unit->dev, "0x%016llx", |
340 | (unsigned long long) fcp_lun)) { | 310 | (unsigned long long) fcp_lun)) { |
341 | kfree(unit); | 311 | kfree(unit); |
342 | goto err_out; | 312 | goto err_out; |
@@ -353,13 +323,12 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) | |||
353 | unit->latencies.cmd.channel.min = 0xFFFFFFFF; | 323 | unit->latencies.cmd.channel.min = 0xFFFFFFFF; |
354 | unit->latencies.cmd.fabric.min = 0xFFFFFFFF; | 324 | unit->latencies.cmd.fabric.min = 0xFFFFFFFF; |
355 | 325 | ||
356 | if (device_register(&unit->sysfs_device)) { | 326 | if (device_register(&unit->dev)) { |
357 | put_device(&unit->sysfs_device); | 327 | put_device(&unit->dev); |
358 | goto err_out; | 328 | goto err_out; |
359 | } | 329 | } |
360 | 330 | ||
361 | if (sysfs_create_group(&unit->sysfs_device.kobj, | 331 | if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) |
362 | &zfcp_sysfs_unit_attrs)) | ||
363 | goto err_out_put; | 332 | goto err_out_put; |
364 | 333 | ||
365 | write_lock_irq(&port->unit_list_lock); | 334 | write_lock_irq(&port->unit_list_lock); |
@@ -371,9 +340,9 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) | |||
371 | return unit; | 340 | return unit; |
372 | 341 | ||
373 | err_out_put: | 342 | err_out_put: |
374 | device_unregister(&unit->sysfs_device); | 343 | device_unregister(&unit->dev); |
375 | err_out: | 344 | err_out: |
376 | put_device(&port->sysfs_device); | 345 | put_device(&port->dev); |
377 | return ERR_PTR(retval); | 346 | return ERR_PTR(retval); |
378 | } | 347 | } |
379 | 348 | ||
@@ -539,7 +508,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
539 | if (zfcp_allocate_low_mem_buffers(adapter)) | 508 | if (zfcp_allocate_low_mem_buffers(adapter)) |
540 | goto failed; | 509 | goto failed; |
541 | 510 | ||
542 | if (zfcp_reqlist_alloc(adapter)) | 511 | adapter->req_list = zfcp_reqlist_alloc(); |
512 | if (!adapter->req_list) | ||
543 | goto failed; | 513 | goto failed; |
544 | 514 | ||
545 | if (zfcp_dbf_adapter_register(adapter)) | 515 | if (zfcp_dbf_adapter_register(adapter)) |
@@ -560,8 +530,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
560 | INIT_LIST_HEAD(&adapter->erp_ready_head); | 530 | INIT_LIST_HEAD(&adapter->erp_ready_head); |
561 | INIT_LIST_HEAD(&adapter->erp_running_head); | 531 | INIT_LIST_HEAD(&adapter->erp_running_head); |
562 | 532 | ||
563 | spin_lock_init(&adapter->req_list_lock); | ||
564 | |||
565 | rwlock_init(&adapter->erp_lock); | 533 | rwlock_init(&adapter->erp_lock); |
566 | rwlock_init(&adapter->abort_lock); | 534 | rwlock_init(&adapter->abort_lock); |
567 | 535 | ||
@@ -640,8 +608,7 @@ void zfcp_device_unregister(struct device *dev, | |||
640 | 608 | ||
641 | static void zfcp_port_release(struct device *dev) | 609 | static void zfcp_port_release(struct device *dev) |
642 | { | 610 | { |
643 | struct zfcp_port *port = container_of(dev, struct zfcp_port, | 611 | struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); |
644 | sysfs_device); | ||
645 | 612 | ||
646 | zfcp_ccw_adapter_put(port->adapter); | 613 | zfcp_ccw_adapter_put(port->adapter); |
647 | kfree(port); | 614 | kfree(port); |
@@ -669,7 +636,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, | |||
669 | 636 | ||
670 | port = zfcp_get_port_by_wwpn(adapter, wwpn); | 637 | port = zfcp_get_port_by_wwpn(adapter, wwpn); |
671 | if (port) { | 638 | if (port) { |
672 | put_device(&port->sysfs_device); | 639 | put_device(&port->dev); |
673 | retval = -EEXIST; | 640 | retval = -EEXIST; |
674 | goto err_out; | 641 | goto err_out; |
675 | } | 642 | } |
@@ -689,22 +656,21 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, | |||
689 | port->d_id = d_id; | 656 | port->d_id = d_id; |
690 | port->wwpn = wwpn; | 657 | port->wwpn = wwpn; |
691 | port->rport_task = RPORT_NONE; | 658 | port->rport_task = RPORT_NONE; |
692 | port->sysfs_device.parent = &adapter->ccw_device->dev; | 659 | port->dev.parent = &adapter->ccw_device->dev; |
693 | port->sysfs_device.release = zfcp_port_release; | 660 | port->dev.release = zfcp_port_release; |
694 | 661 | ||
695 | if (dev_set_name(&port->sysfs_device, "0x%016llx", | 662 | if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { |
696 | (unsigned long long)wwpn)) { | ||
697 | kfree(port); | 663 | kfree(port); |
698 | goto err_out; | 664 | goto err_out; |
699 | } | 665 | } |
700 | retval = -EINVAL; | 666 | retval = -EINVAL; |
701 | 667 | ||
702 | if (device_register(&port->sysfs_device)) { | 668 | if (device_register(&port->dev)) { |
703 | put_device(&port->sysfs_device); | 669 | put_device(&port->dev); |
704 | goto err_out; | 670 | goto err_out; |
705 | } | 671 | } |
706 | 672 | ||
707 | if (sysfs_create_group(&port->sysfs_device.kobj, | 673 | if (sysfs_create_group(&port->dev.kobj, |
708 | &zfcp_sysfs_port_attrs)) | 674 | &zfcp_sysfs_port_attrs)) |
709 | goto err_out_put; | 675 | goto err_out_put; |
710 | 676 | ||
@@ -717,7 +683,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, | |||
717 | return port; | 683 | return port; |
718 | 684 | ||
719 | err_out_put: | 685 | err_out_put: |
720 | device_unregister(&port->sysfs_device); | 686 | device_unregister(&port->dev); |
721 | err_out: | 687 | err_out: |
722 | zfcp_ccw_adapter_put(adapter); | 688 | zfcp_ccw_adapter_put(adapter); |
723 | return ERR_PTR(retval); | 689 | return ERR_PTR(retval); |
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index c22cb72a5ae8..ce1cc7a11fb4 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c | |||
@@ -3,13 +3,14 @@ | |||
3 | * | 3 | * |
4 | * Registration and callback for the s390 common I/O layer. | 4 | * Registration and callback for the s390 common I/O layer. |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2002, 2009 | 6 | * Copyright IBM Corporation 2002, 2010 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | 9 | #define KMSG_COMPONENT "zfcp" |
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
11 | 11 | ||
12 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
13 | #include "zfcp_reqlist.h" | ||
13 | 14 | ||
14 | #define ZFCP_MODEL_PRIV 0x4 | 15 | #define ZFCP_MODEL_PRIV 0x4 |
15 | 16 | ||
@@ -122,12 +123,10 @@ static void zfcp_ccw_remove(struct ccw_device *cdev) | |||
122 | zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */ | 123 | zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */ |
123 | 124 | ||
124 | list_for_each_entry_safe(unit, u, &unit_remove_lh, list) | 125 | list_for_each_entry_safe(unit, u, &unit_remove_lh, list) |
125 | zfcp_device_unregister(&unit->sysfs_device, | 126 | zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs); |
126 | &zfcp_sysfs_unit_attrs); | ||
127 | 127 | ||
128 | list_for_each_entry_safe(port, p, &port_remove_lh, list) | 128 | list_for_each_entry_safe(port, p, &port_remove_lh, list) |
129 | zfcp_device_unregister(&port->sysfs_device, | 129 | zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); |
130 | &zfcp_sysfs_port_attrs); | ||
131 | 130 | ||
132 | zfcp_adapter_unregister(adapter); | 131 | zfcp_adapter_unregister(adapter); |
133 | } | 132 | } |
@@ -162,7 +161,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev) | |||
162 | } | 161 | } |
163 | 162 | ||
164 | /* initialize request counter */ | 163 | /* initialize request counter */ |
165 | BUG_ON(!zfcp_reqlist_isempty(adapter)); | 164 | BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); |
166 | adapter->req_no = 0; | 165 | adapter->req_no = 0; |
167 | 166 | ||
168 | zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL, | 167 | zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL, |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 7369c8911bcf..7a149fd85f6d 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -140,9 +140,9 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, | |||
140 | memcpy(response->fsf_status_qual, | 140 | memcpy(response->fsf_status_qual, |
141 | fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); | 141 | fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); |
142 | response->fsf_req_status = fsf_req->status; | 142 | response->fsf_req_status = fsf_req->status; |
143 | response->sbal_first = fsf_req->queue_req.sbal_first; | 143 | response->sbal_first = fsf_req->qdio_req.sbal_first; |
144 | response->sbal_last = fsf_req->queue_req.sbal_last; | 144 | response->sbal_last = fsf_req->qdio_req.sbal_last; |
145 | response->sbal_response = fsf_req->queue_req.sbal_response; | 145 | response->sbal_response = fsf_req->qdio_req.sbal_response; |
146 | response->pool = fsf_req->pool != NULL; | 146 | response->pool = fsf_req->pool != NULL; |
147 | response->erp_action = (unsigned long)fsf_req->erp_action; | 147 | response->erp_action = (unsigned long)fsf_req->erp_action; |
148 | 148 | ||
@@ -576,7 +576,8 @@ void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf) | |||
576 | struct zfcp_adapter *adapter = dbf->adapter; | 576 | struct zfcp_adapter *adapter = dbf->adapter; |
577 | 577 | ||
578 | zfcp_dbf_rec_target(id, ref, dbf, &adapter->status, | 578 | zfcp_dbf_rec_target(id, ref, dbf, &adapter->status, |
579 | &adapter->erp_counter, 0, 0, 0); | 579 | &adapter->erp_counter, 0, 0, |
580 | ZFCP_DBF_INVALID_LUN); | ||
580 | } | 581 | } |
581 | 582 | ||
582 | /** | 583 | /** |
@@ -590,8 +591,8 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port) | |||
590 | struct zfcp_dbf *dbf = port->adapter->dbf; | 591 | struct zfcp_dbf *dbf = port->adapter->dbf; |
591 | 592 | ||
592 | zfcp_dbf_rec_target(id, ref, dbf, &port->status, | 593 | zfcp_dbf_rec_target(id, ref, dbf, &port->status, |
593 | &port->erp_counter, port->wwpn, port->d_id, | 594 | &port->erp_counter, port->wwpn, port->d_id, |
594 | 0); | 595 | ZFCP_DBF_INVALID_LUN); |
595 | } | 596 | } |
596 | 597 | ||
597 | /** | 598 | /** |
@@ -642,10 +643,9 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, | |||
642 | r->u.trigger.ps = atomic_read(&port->status); | 643 | r->u.trigger.ps = atomic_read(&port->status); |
643 | r->u.trigger.wwpn = port->wwpn; | 644 | r->u.trigger.wwpn = port->wwpn; |
644 | } | 645 | } |
645 | if (unit) { | 646 | if (unit) |
646 | r->u.trigger.us = atomic_read(&unit->status); | 647 | r->u.trigger.us = atomic_read(&unit->status); |
647 | r->u.trigger.fcp_lun = unit->fcp_lun; | 648 | r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN; |
648 | } | ||
649 | debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); | 649 | debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); |
650 | spin_unlock_irqrestore(&dbf->rec_lock, flags); | 650 | spin_unlock_irqrestore(&dbf->rec_lock, flags); |
651 | } | 651 | } |
@@ -668,7 +668,7 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action) | |||
668 | r->u.action.action = (unsigned long)erp_action; | 668 | r->u.action.action = (unsigned long)erp_action; |
669 | r->u.action.status = erp_action->status; | 669 | r->u.action.status = erp_action->status; |
670 | r->u.action.step = erp_action->step; | 670 | r->u.action.step = erp_action->step; |
671 | r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; | 671 | r->u.action.fsf_req = erp_action->fsf_req_id; |
672 | debug_event(dbf->rec, 5, r, sizeof(*r)); | 672 | debug_event(dbf->rec, 5, r, sizeof(*r)); |
673 | spin_unlock_irqrestore(&dbf->rec_lock, flags); | 673 | spin_unlock_irqrestore(&dbf->rec_lock, flags); |
674 | } | 674 | } |
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 8b7fd9a1033e..457e046f2d28 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h | |||
@@ -30,6 +30,8 @@ | |||
30 | #define ZFCP_DBF_TAG_SIZE 4 | 30 | #define ZFCP_DBF_TAG_SIZE 4 |
31 | #define ZFCP_DBF_ID_SIZE 7 | 31 | #define ZFCP_DBF_ID_SIZE 7 |
32 | 32 | ||
33 | #define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull | ||
34 | |||
33 | struct zfcp_dbf_dump { | 35 | struct zfcp_dbf_dump { |
34 | u8 tag[ZFCP_DBF_TAG_SIZE]; | 36 | u8 tag[ZFCP_DBF_TAG_SIZE]; |
35 | u32 total_size; /* size of total dump data */ | 37 | u32 total_size; /* size of total dump data */ |
@@ -192,10 +194,10 @@ struct zfcp_dbf_san_record { | |||
192 | struct zfcp_dbf_san_record_ct_response ct_resp; | 194 | struct zfcp_dbf_san_record_ct_response ct_resp; |
193 | struct zfcp_dbf_san_record_els els; | 195 | struct zfcp_dbf_san_record_els els; |
194 | } u; | 196 | } u; |
195 | #define ZFCP_DBF_SAN_MAX_PAYLOAD 1024 | ||
196 | u8 payload[32]; | ||
197 | } __attribute__ ((packed)); | 197 | } __attribute__ ((packed)); |
198 | 198 | ||
199 | #define ZFCP_DBF_SAN_MAX_PAYLOAD 1024 | ||
200 | |||
199 | struct zfcp_dbf_scsi_record { | 201 | struct zfcp_dbf_scsi_record { |
200 | u8 tag[ZFCP_DBF_TAG_SIZE]; | 202 | u8 tag[ZFCP_DBF_TAG_SIZE]; |
201 | u8 tag2[ZFCP_DBF_TAG_SIZE]; | 203 | u8 tag2[ZFCP_DBF_TAG_SIZE]; |
@@ -301,17 +303,31 @@ void zfcp_dbf_scsi(const char *tag, const char *tag2, int level, | |||
301 | 303 | ||
302 | /** | 304 | /** |
303 | * zfcp_dbf_scsi_result - trace event for SCSI command completion | 305 | * zfcp_dbf_scsi_result - trace event for SCSI command completion |
304 | * @tag: tag indicating success or failure of SCSI command | 306 | * @dbf: adapter dbf trace |
305 | * @level: trace level applicable for this event | 307 | * @scmd: SCSI command pointer |
306 | * @adapter: adapter that has been used to issue the SCSI command | 308 | * @req: FSF request used to issue SCSI command |
309 | */ | ||
310 | static inline | ||
311 | void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd, | ||
312 | struct zfcp_fsf_req *req) | ||
313 | { | ||
314 | if (scmd->result != 0) | ||
315 | zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0); | ||
316 | else if (scmd->retries > 0) | ||
317 | zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0); | ||
318 | else | ||
319 | zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0); | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command | ||
324 | * @dbf: adapter dbf trace | ||
307 | * @scmd: SCSI command pointer | 325 | * @scmd: SCSI command pointer |
308 | * @fsf_req: request used to issue SCSI command (might be NULL) | ||
309 | */ | 326 | */ |
310 | static inline | 327 | static inline |
311 | void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf, | 328 | void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd) |
312 | struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req) | ||
313 | { | 329 | { |
314 | zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0); | 330 | zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0); |
315 | } | 331 | } |
316 | 332 | ||
317 | /** | 333 | /** |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index e1b5b88e2ddb..7131c7db1f04 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Global definitions for the zfcp device driver. | 4 | * Global definitions for the zfcp device driver. |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2002, 2009 | 6 | * Copyright IBM Corporation 2002, 2010 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef ZFCP_DEF_H | 9 | #ifndef ZFCP_DEF_H |
@@ -33,15 +33,13 @@ | |||
33 | #include <scsi/scsi_transport_fc.h> | 33 | #include <scsi/scsi_transport_fc.h> |
34 | #include <scsi/scsi_bsg_fc.h> | 34 | #include <scsi/scsi_bsg_fc.h> |
35 | #include <asm/ccwdev.h> | 35 | #include <asm/ccwdev.h> |
36 | #include <asm/qdio.h> | ||
37 | #include <asm/debug.h> | 36 | #include <asm/debug.h> |
38 | #include <asm/ebcdic.h> | 37 | #include <asm/ebcdic.h> |
39 | #include <asm/sysinfo.h> | 38 | #include <asm/sysinfo.h> |
40 | #include "zfcp_fsf.h" | 39 | #include "zfcp_fsf.h" |
40 | #include "zfcp_qdio.h" | ||
41 | 41 | ||
42 | /********************* GENERAL DEFINES *********************************/ | 42 | struct zfcp_reqlist; |
43 | |||
44 | #define REQUEST_LIST_SIZE 128 | ||
45 | 43 | ||
46 | /********************* SCSI SPECIFIC DEFINES *********************************/ | 44 | /********************* SCSI SPECIFIC DEFINES *********************************/ |
47 | #define ZFCP_SCSI_ER_TIMEOUT (10*HZ) | 45 | #define ZFCP_SCSI_ER_TIMEOUT (10*HZ) |
@@ -129,12 +127,6 @@ struct zfcp_adapter_mempool { | |||
129 | mempool_t *qtcb_pool; | 127 | mempool_t *qtcb_pool; |
130 | }; | 128 | }; |
131 | 129 | ||
132 | struct zfcp_qdio_queue { | ||
133 | struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; | ||
134 | u8 first; /* index of next free bfr in queue */ | ||
135 | atomic_t count; /* number of free buffers in queue */ | ||
136 | }; | ||
137 | |||
138 | struct zfcp_erp_action { | 130 | struct zfcp_erp_action { |
139 | struct list_head list; | 131 | struct list_head list; |
140 | int action; /* requested action code */ | 132 | int action; /* requested action code */ |
@@ -143,8 +135,7 @@ struct zfcp_erp_action { | |||
143 | struct zfcp_unit *unit; | 135 | struct zfcp_unit *unit; |
144 | u32 status; /* recovery status */ | 136 | u32 status; /* recovery status */ |
145 | u32 step; /* active step of this erp action */ | 137 | u32 step; /* active step of this erp action */ |
146 | struct zfcp_fsf_req *fsf_req; /* fsf request currently pending | 138 | unsigned long fsf_req_id; |
147 | for this action */ | ||
148 | struct timer_list timer; | 139 | struct timer_list timer; |
149 | }; | 140 | }; |
150 | 141 | ||
@@ -167,29 +158,6 @@ struct zfcp_latencies { | |||
167 | spinlock_t lock; | 158 | spinlock_t lock; |
168 | }; | 159 | }; |
169 | 160 | ||
170 | /** struct zfcp_qdio - basic QDIO data structure | ||
171 | * @resp_q: response queue | ||
172 | * @req_q: request queue | ||
173 | * @stat_lock: lock to protect req_q_util and req_q_time | ||
174 | * @req_q_lock; lock to serialize access to request queue | ||
175 | * @req_q_time: time of last fill level change | ||
176 | * @req_q_util: used for accounting | ||
177 | * @req_q_full: queue full incidents | ||
178 | * @req_q_wq: used to wait for SBAL availability | ||
179 | * @adapter: adapter used in conjunction with this QDIO structure | ||
180 | */ | ||
181 | struct zfcp_qdio { | ||
182 | struct zfcp_qdio_queue resp_q; | ||
183 | struct zfcp_qdio_queue req_q; | ||
184 | spinlock_t stat_lock; | ||
185 | spinlock_t req_q_lock; | ||
186 | unsigned long long req_q_time; | ||
187 | u64 req_q_util; | ||
188 | atomic_t req_q_full; | ||
189 | wait_queue_head_t req_q_wq; | ||
190 | struct zfcp_adapter *adapter; | ||
191 | }; | ||
192 | |||
193 | struct zfcp_adapter { | 161 | struct zfcp_adapter { |
194 | struct kref ref; | 162 | struct kref ref; |
195 | u64 peer_wwnn; /* P2P peer WWNN */ | 163 | u64 peer_wwnn; /* P2P peer WWNN */ |
@@ -207,8 +175,7 @@ struct zfcp_adapter { | |||
207 | struct list_head port_list; /* remote port list */ | 175 | struct list_head port_list; /* remote port list */ |
208 | rwlock_t port_list_lock; /* port list lock */ | 176 | rwlock_t port_list_lock; /* port list lock */ |
209 | unsigned long req_no; /* unique FSF req number */ | 177 | unsigned long req_no; /* unique FSF req number */ |
210 | struct list_head *req_list; /* list of pending reqs */ | 178 | struct zfcp_reqlist *req_list; |
211 | spinlock_t req_list_lock; /* request list lock */ | ||
212 | u32 fsf_req_seq_no; /* FSF cmnd seq number */ | 179 | u32 fsf_req_seq_no; /* FSF cmnd seq number */ |
213 | rwlock_t abort_lock; /* Protects against SCSI | 180 | rwlock_t abort_lock; /* Protects against SCSI |
214 | stack abort/command | 181 | stack abort/command |
@@ -241,7 +208,7 @@ struct zfcp_adapter { | |||
241 | }; | 208 | }; |
242 | 209 | ||
243 | struct zfcp_port { | 210 | struct zfcp_port { |
244 | struct device sysfs_device; /* sysfs device */ | 211 | struct device dev; |
245 | struct fc_rport *rport; /* rport of fc transport class */ | 212 | struct fc_rport *rport; /* rport of fc transport class */ |
246 | struct list_head list; /* list of remote ports */ | 213 | struct list_head list; /* list of remote ports */ |
247 | struct zfcp_adapter *adapter; /* adapter used to access port */ | 214 | struct zfcp_adapter *adapter; /* adapter used to access port */ |
@@ -263,7 +230,7 @@ struct zfcp_port { | |||
263 | }; | 230 | }; |
264 | 231 | ||
265 | struct zfcp_unit { | 232 | struct zfcp_unit { |
266 | struct device sysfs_device; /* sysfs device */ | 233 | struct device dev; |
267 | struct list_head list; /* list of logical units */ | 234 | struct list_head list; /* list of logical units */ |
268 | struct zfcp_port *port; /* remote port of unit */ | 235 | struct zfcp_port *port; /* remote port of unit */ |
269 | atomic_t status; /* status of this logical unit */ | 236 | atomic_t status; /* status of this logical unit */ |
@@ -277,33 +244,11 @@ struct zfcp_unit { | |||
277 | }; | 244 | }; |
278 | 245 | ||
279 | /** | 246 | /** |
280 | * struct zfcp_queue_req - queue related values for a request | ||
281 | * @sbal_number: number of free SBALs | ||
282 | * @sbal_first: first SBAL for this request | ||
283 | * @sbal_last: last SBAL for this request | ||
284 | * @sbal_limit: last possible SBAL for this request | ||
285 | * @sbale_curr: current SBALE at creation of this request | ||
286 | * @sbal_response: SBAL used in interrupt | ||
287 | * @qdio_outb_usage: usage of outbound queue | ||
288 | * @qdio_inb_usage: usage of inbound queue | ||
289 | */ | ||
290 | struct zfcp_queue_req { | ||
291 | u8 sbal_number; | ||
292 | u8 sbal_first; | ||
293 | u8 sbal_last; | ||
294 | u8 sbal_limit; | ||
295 | u8 sbale_curr; | ||
296 | u8 sbal_response; | ||
297 | u16 qdio_outb_usage; | ||
298 | u16 qdio_inb_usage; | ||
299 | }; | ||
300 | |||
301 | /** | ||
302 | * struct zfcp_fsf_req - basic FSF request structure | 247 | * struct zfcp_fsf_req - basic FSF request structure |
303 | * @list: list of FSF requests | 248 | * @list: list of FSF requests |
304 | * @req_id: unique request ID | 249 | * @req_id: unique request ID |
305 | * @adapter: adapter this request belongs to | 250 | * @adapter: adapter this request belongs to |
306 | * @queue_req: queue related values | 251 | * @qdio_req: qdio queue related values |
307 | * @completion: used to signal the completion of the request | 252 | * @completion: used to signal the completion of the request |
308 | * @status: status of the request | 253 | * @status: status of the request |
309 | * @fsf_command: FSF command issued | 254 | * @fsf_command: FSF command issued |
@@ -321,7 +266,7 @@ struct zfcp_fsf_req { | |||
321 | struct list_head list; | 266 | struct list_head list; |
322 | unsigned long req_id; | 267 | unsigned long req_id; |
323 | struct zfcp_adapter *adapter; | 268 | struct zfcp_adapter *adapter; |
324 | struct zfcp_queue_req queue_req; | 269 | struct zfcp_qdio_req qdio_req; |
325 | struct completion completion; | 270 | struct completion completion; |
326 | u32 status; | 271 | u32 status; |
327 | u32 fsf_command; | 272 | u32 fsf_command; |
@@ -352,45 +297,4 @@ struct zfcp_data { | |||
352 | #define ZFCP_SET 0x00000100 | 297 | #define ZFCP_SET 0x00000100 |
353 | #define ZFCP_CLEAR 0x00000200 | 298 | #define ZFCP_CLEAR 0x00000200 |
354 | 299 | ||
355 | /* | ||
356 | * Helper functions for request ID management. | ||
357 | */ | ||
358 | static inline int zfcp_reqlist_hash(unsigned long req_id) | ||
359 | { | ||
360 | return req_id % REQUEST_LIST_SIZE; | ||
361 | } | ||
362 | |||
363 | static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter, | ||
364 | struct zfcp_fsf_req *fsf_req) | ||
365 | { | ||
366 | list_del(&fsf_req->list); | ||
367 | } | ||
368 | |||
369 | static inline struct zfcp_fsf_req * | ||
370 | zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id) | ||
371 | { | ||
372 | struct zfcp_fsf_req *request; | ||
373 | unsigned int idx; | ||
374 | |||
375 | idx = zfcp_reqlist_hash(req_id); | ||
376 | list_for_each_entry(request, &adapter->req_list[idx], list) | ||
377 | if (request->req_id == req_id) | ||
378 | return request; | ||
379 | return NULL; | ||
380 | } | ||
381 | |||
382 | static inline struct zfcp_fsf_req * | ||
383 | zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req) | ||
384 | { | ||
385 | struct zfcp_fsf_req *request; | ||
386 | unsigned int idx; | ||
387 | |||
388 | for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) { | ||
389 | list_for_each_entry(request, &adapter->req_list[idx], list) | ||
390 | if (request == req) | ||
391 | return request; | ||
392 | } | ||
393 | return NULL; | ||
394 | } | ||
395 | |||
396 | #endif /* ZFCP_DEF_H */ | 300 | #endif /* ZFCP_DEF_H */ |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index b51a11a82e63..0be5e7ea2828 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Error Recovery Procedures (ERP). | 4 | * Error Recovery Procedures (ERP). |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2002, 2009 | 6 | * Copyright IBM Corporation 2002, 2010 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | 9 | #define KMSG_COMPONENT "zfcp" |
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/kthread.h> | 12 | #include <linux/kthread.h> |
13 | #include "zfcp_ext.h" | 13 | #include "zfcp_ext.h" |
14 | #include "zfcp_reqlist.h" | ||
14 | 15 | ||
15 | #define ZFCP_MAX_ERPS 3 | 16 | #define ZFCP_MAX_ERPS 3 |
16 | 17 | ||
@@ -174,7 +175,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, | |||
174 | 175 | ||
175 | switch (need) { | 176 | switch (need) { |
176 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 177 | case ZFCP_ERP_ACTION_REOPEN_UNIT: |
177 | if (!get_device(&unit->sysfs_device)) | 178 | if (!get_device(&unit->dev)) |
178 | return NULL; | 179 | return NULL; |
179 | atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); | 180 | atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); |
180 | erp_action = &unit->erp_action; | 181 | erp_action = &unit->erp_action; |
@@ -184,7 +185,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, | |||
184 | 185 | ||
185 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 186 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
186 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | 187 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
187 | if (!get_device(&port->sysfs_device)) | 188 | if (!get_device(&port->dev)) |
188 | return NULL; | 189 | return NULL; |
189 | zfcp_erp_action_dismiss_port(port); | 190 | zfcp_erp_action_dismiss_port(port); |
190 | atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); | 191 | atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); |
@@ -478,26 +479,27 @@ static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) | |||
478 | static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) | 479 | static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) |
479 | { | 480 | { |
480 | struct zfcp_adapter *adapter = act->adapter; | 481 | struct zfcp_adapter *adapter = act->adapter; |
482 | struct zfcp_fsf_req *req; | ||
481 | 483 | ||
482 | if (!act->fsf_req) | 484 | if (!act->fsf_req_id) |
483 | return; | 485 | return; |
484 | 486 | ||
485 | spin_lock(&adapter->req_list_lock); | 487 | spin_lock(&adapter->req_list->lock); |
486 | if (zfcp_reqlist_find_safe(adapter, act->fsf_req) && | 488 | req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id); |
487 | act->fsf_req->erp_action == act) { | 489 | if (req && req->erp_action == act) { |
488 | if (act->status & (ZFCP_STATUS_ERP_DISMISSED | | 490 | if (act->status & (ZFCP_STATUS_ERP_DISMISSED | |
489 | ZFCP_STATUS_ERP_TIMEDOUT)) { | 491 | ZFCP_STATUS_ERP_TIMEDOUT)) { |
490 | act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; | 492 | req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; |
491 | zfcp_dbf_rec_action("erscf_1", act); | 493 | zfcp_dbf_rec_action("erscf_1", act); |
492 | act->fsf_req->erp_action = NULL; | 494 | req->erp_action = NULL; |
493 | } | 495 | } |
494 | if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) | 496 | if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) |
495 | zfcp_dbf_rec_action("erscf_2", act); | 497 | zfcp_dbf_rec_action("erscf_2", act); |
496 | if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) | 498 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) |
497 | act->fsf_req = NULL; | 499 | act->fsf_req_id = 0; |
498 | } else | 500 | } else |
499 | act->fsf_req = NULL; | 501 | act->fsf_req_id = 0; |
500 | spin_unlock(&adapter->req_list_lock); | 502 | spin_unlock(&adapter->req_list->lock); |
501 | } | 503 | } |
502 | 504 | ||
503 | /** | 505 | /** |
@@ -1179,19 +1181,19 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) | |||
1179 | switch (act->action) { | 1181 | switch (act->action) { |
1180 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 1182 | case ZFCP_ERP_ACTION_REOPEN_UNIT: |
1181 | if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) { | 1183 | if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) { |
1182 | get_device(&unit->sysfs_device); | 1184 | get_device(&unit->dev); |
1183 | if (scsi_queue_work(unit->port->adapter->scsi_host, | 1185 | if (scsi_queue_work(unit->port->adapter->scsi_host, |
1184 | &unit->scsi_work) <= 0) | 1186 | &unit->scsi_work) <= 0) |
1185 | put_device(&unit->sysfs_device); | 1187 | put_device(&unit->dev); |
1186 | } | 1188 | } |
1187 | put_device(&unit->sysfs_device); | 1189 | put_device(&unit->dev); |
1188 | break; | 1190 | break; |
1189 | 1191 | ||
1190 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | 1192 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
1191 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 1193 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
1192 | if (result == ZFCP_ERP_SUCCEEDED) | 1194 | if (result == ZFCP_ERP_SUCCEEDED) |
1193 | zfcp_scsi_schedule_rport_register(port); | 1195 | zfcp_scsi_schedule_rport_register(port); |
1194 | put_device(&port->sysfs_device); | 1196 | put_device(&port->dev); |
1195 | break; | 1197 | break; |
1196 | 1198 | ||
1197 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 1199 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 66bdb34143cb..8786a79c7f8f 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -21,7 +21,6 @@ extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *); | |||
21 | extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, | 21 | extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, |
22 | u32); | 22 | u32); |
23 | extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64); | 23 | extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64); |
24 | extern int zfcp_reqlist_isempty(struct zfcp_adapter *); | ||
25 | extern void zfcp_sg_free_table(struct scatterlist *, int); | 24 | extern void zfcp_sg_free_table(struct scatterlist *, int); |
26 | extern int zfcp_sg_setup_table(struct scatterlist *, int); | 25 | extern int zfcp_sg_setup_table(struct scatterlist *, int); |
27 | extern void zfcp_device_unregister(struct device *, | 26 | extern void zfcp_device_unregister(struct device *, |
@@ -144,13 +143,9 @@ extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); | |||
144 | /* zfcp_qdio.c */ | 143 | /* zfcp_qdio.c */ |
145 | extern int zfcp_qdio_setup(struct zfcp_adapter *); | 144 | extern int zfcp_qdio_setup(struct zfcp_adapter *); |
146 | extern void zfcp_qdio_destroy(struct zfcp_qdio *); | 145 | extern void zfcp_qdio_destroy(struct zfcp_qdio *); |
147 | extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *); | 146 | extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *); |
148 | extern struct qdio_buffer_element | ||
149 | *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *); | ||
150 | extern struct qdio_buffer_element | ||
151 | *zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *); | ||
152 | extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, | 147 | extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, |
153 | struct zfcp_queue_req *, unsigned long, | 148 | struct zfcp_qdio_req *, unsigned long, |
154 | struct scatterlist *, int); | 149 | struct scatterlist *, int); |
155 | extern int zfcp_qdio_open(struct zfcp_qdio *); | 150 | extern int zfcp_qdio_open(struct zfcp_qdio *); |
156 | extern void zfcp_qdio_close(struct zfcp_qdio *); | 151 | extern void zfcp_qdio_close(struct zfcp_qdio *); |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 271399f62f1b..5219670f0c99 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Fibre Channel related functions for the zfcp device driver. | 4 | * Fibre Channel related functions for the zfcp device driver. |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2008, 2009 | 6 | * Copyright IBM Corporation 2008, 2010 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | 9 | #define KMSG_COMPONENT "zfcp" |
@@ -316,7 +316,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work) | |||
316 | 316 | ||
317 | zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); | 317 | zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); |
318 | out: | 318 | out: |
319 | put_device(&port->sysfs_device); | 319 | put_device(&port->dev); |
320 | } | 320 | } |
321 | 321 | ||
322 | /** | 322 | /** |
@@ -325,9 +325,9 @@ out: | |||
325 | */ | 325 | */ |
326 | void zfcp_fc_trigger_did_lookup(struct zfcp_port *port) | 326 | void zfcp_fc_trigger_did_lookup(struct zfcp_port *port) |
327 | { | 327 | { |
328 | get_device(&port->sysfs_device); | 328 | get_device(&port->dev); |
329 | if (!queue_work(port->adapter->work_queue, &port->gid_pn_work)) | 329 | if (!queue_work(port->adapter->work_queue, &port->gid_pn_work)) |
330 | put_device(&port->sysfs_device); | 330 | put_device(&port->dev); |
331 | } | 331 | } |
332 | 332 | ||
333 | /** | 333 | /** |
@@ -389,7 +389,7 @@ static void zfcp_fc_adisc_handler(void *data) | |||
389 | zfcp_scsi_schedule_rport_register(port); | 389 | zfcp_scsi_schedule_rport_register(port); |
390 | out: | 390 | out: |
391 | atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); | 391 | atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); |
392 | put_device(&port->sysfs_device); | 392 | put_device(&port->dev); |
393 | kmem_cache_free(zfcp_data.adisc_cache, adisc); | 393 | kmem_cache_free(zfcp_data.adisc_cache, adisc); |
394 | } | 394 | } |
395 | 395 | ||
@@ -436,7 +436,7 @@ void zfcp_fc_link_test_work(struct work_struct *work) | |||
436 | container_of(work, struct zfcp_port, test_link_work); | 436 | container_of(work, struct zfcp_port, test_link_work); |
437 | int retval; | 437 | int retval; |
438 | 438 | ||
439 | get_device(&port->sysfs_device); | 439 | get_device(&port->dev); |
440 | port->rport_task = RPORT_DEL; | 440 | port->rport_task = RPORT_DEL; |
441 | zfcp_scsi_rport_work(&port->rport_work); | 441 | zfcp_scsi_rport_work(&port->rport_work); |
442 | 442 | ||
@@ -455,7 +455,7 @@ void zfcp_fc_link_test_work(struct work_struct *work) | |||
455 | zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); | 455 | zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); |
456 | 456 | ||
457 | out: | 457 | out: |
458 | put_device(&port->sysfs_device); | 458 | put_device(&port->dev); |
459 | } | 459 | } |
460 | 460 | ||
461 | /** | 461 | /** |
@@ -468,9 +468,9 @@ out: | |||
468 | */ | 468 | */ |
469 | void zfcp_fc_test_link(struct zfcp_port *port) | 469 | void zfcp_fc_test_link(struct zfcp_port *port) |
470 | { | 470 | { |
471 | get_device(&port->sysfs_device); | 471 | get_device(&port->dev); |
472 | if (!queue_work(port->adapter->work_queue, &port->test_link_work)) | 472 | if (!queue_work(port->adapter->work_queue, &port->test_link_work)) |
473 | put_device(&port->sysfs_device); | 473 | put_device(&port->dev); |
474 | } | 474 | } |
475 | 475 | ||
476 | static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num) | 476 | static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num) |
@@ -617,8 +617,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, | |||
617 | 617 | ||
618 | list_for_each_entry_safe(port, tmp, &remove_lh, list) { | 618 | list_for_each_entry_safe(port, tmp, &remove_lh, list) { |
619 | zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL); | 619 | zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL); |
620 | zfcp_device_unregister(&port->sysfs_device, | 620 | zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); |
621 | &zfcp_sysfs_port_attrs); | ||
622 | } | 621 | } |
623 | 622 | ||
624 | return ret; | 623 | return ret; |
@@ -731,7 +730,7 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, | |||
731 | return -EINVAL; | 730 | return -EINVAL; |
732 | 731 | ||
733 | d_id = port->d_id; | 732 | d_id = port->d_id; |
734 | put_device(&port->sysfs_device); | 733 | put_device(&port->dev); |
735 | } else | 734 | } else |
736 | d_id = ntoh24(job->request->rqst_data.h_els.port_id); | 735 | d_id = ntoh24(job->request->rqst_data.h_els.port_id); |
737 | 736 | ||
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index e8fb4d9baa8b..6538742b421a 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Implementation of FSF commands. | 4 | * Implementation of FSF commands. |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2002, 2009 | 6 | * Copyright IBM Corporation 2002, 2010 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | 9 | #define KMSG_COMPONENT "zfcp" |
@@ -14,6 +14,8 @@ | |||
14 | #include "zfcp_ext.h" | 14 | #include "zfcp_ext.h" |
15 | #include "zfcp_fc.h" | 15 | #include "zfcp_fc.h" |
16 | #include "zfcp_dbf.h" | 16 | #include "zfcp_dbf.h" |
17 | #include "zfcp_qdio.h" | ||
18 | #include "zfcp_reqlist.h" | ||
17 | 19 | ||
18 | static void zfcp_fsf_request_timeout_handler(unsigned long data) | 20 | static void zfcp_fsf_request_timeout_handler(unsigned long data) |
19 | { | 21 | { |
@@ -393,7 +395,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) | |||
393 | case FSF_PROT_LINK_DOWN: | 395 | case FSF_PROT_LINK_DOWN: |
394 | zfcp_fsf_link_down_info_eval(req, "fspse_5", | 396 | zfcp_fsf_link_down_info_eval(req, "fspse_5", |
395 | &psq->link_down_info); | 397 | &psq->link_down_info); |
396 | /* FIXME: reopening adapter now? better wait for link up */ | 398 | /* go through reopen to flush pending requests */ |
397 | zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); | 399 | zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); |
398 | break; | 400 | break; |
399 | case FSF_PROT_REEST_QUEUE: | 401 | case FSF_PROT_REEST_QUEUE: |
@@ -457,15 +459,10 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) | |||
457 | void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) | 459 | void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) |
458 | { | 460 | { |
459 | struct zfcp_fsf_req *req, *tmp; | 461 | struct zfcp_fsf_req *req, *tmp; |
460 | unsigned long flags; | ||
461 | LIST_HEAD(remove_queue); | 462 | LIST_HEAD(remove_queue); |
462 | unsigned int i; | ||
463 | 463 | ||
464 | BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); | 464 | BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); |
465 | spin_lock_irqsave(&adapter->req_list_lock, flags); | 465 | zfcp_reqlist_move(adapter->req_list, &remove_queue); |
466 | for (i = 0; i < REQUEST_LIST_SIZE; i++) | ||
467 | list_splice_init(&adapter->req_list[i], &remove_queue); | ||
468 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
469 | 466 | ||
470 | list_for_each_entry_safe(req, tmp, &remove_queue, list) { | 467 | list_for_each_entry_safe(req, tmp, &remove_queue, list) { |
471 | list_del(&req->list); | 468 | list_del(&req->list); |
@@ -495,8 +492,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) | |||
495 | fc_host_port_id(shost) = ntoh24(bottom->s_id); | 492 | fc_host_port_id(shost) = ntoh24(bottom->s_id); |
496 | fc_host_speed(shost) = bottom->fc_link_speed; | 493 | fc_host_speed(shost) = bottom->fc_link_speed; |
497 | fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; | 494 | fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; |
498 | fc_host_supported_fc4s(shost)[2] = 1; /* FCP */ | ||
499 | fc_host_active_fc4s(shost)[2] = 1; /* FCP */ | ||
500 | 495 | ||
501 | adapter->hydra_version = bottom->adapter_type; | 496 | adapter->hydra_version = bottom->adapter_type; |
502 | adapter->timer_ticks = bottom->timer_interval; | 497 | adapter->timer_ticks = bottom->timer_interval; |
@@ -619,6 +614,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) | |||
619 | fc_host_permanent_port_name(shost) = fc_host_port_name(shost); | 614 | fc_host_permanent_port_name(shost) = fc_host_port_name(shost); |
620 | fc_host_maxframe_size(shost) = bottom->maximum_frame_size; | 615 | fc_host_maxframe_size(shost) = bottom->maximum_frame_size; |
621 | fc_host_supported_speeds(shost) = bottom->supported_speed; | 616 | fc_host_supported_speeds(shost) = bottom->supported_speed; |
617 | memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types, | ||
618 | FC_FC4_LIST_SIZE); | ||
619 | memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types, | ||
620 | FC_FC4_LIST_SIZE); | ||
622 | } | 621 | } |
623 | 622 | ||
624 | static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) | 623 | static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) |
@@ -725,12 +724,12 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, | |||
725 | req->adapter = adapter; | 724 | req->adapter = adapter; |
726 | req->fsf_command = fsf_cmd; | 725 | req->fsf_command = fsf_cmd; |
727 | req->req_id = adapter->req_no; | 726 | req->req_id = adapter->req_no; |
728 | req->queue_req.sbal_number = 1; | 727 | req->qdio_req.sbal_number = 1; |
729 | req->queue_req.sbal_first = req_q->first; | 728 | req->qdio_req.sbal_first = req_q->first; |
730 | req->queue_req.sbal_last = req_q->first; | 729 | req->qdio_req.sbal_last = req_q->first; |
731 | req->queue_req.sbale_curr = 1; | 730 | req->qdio_req.sbale_curr = 1; |
732 | 731 | ||
733 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 732 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
734 | sbale[0].addr = (void *) req->req_id; | 733 | sbale[0].addr = (void *) req->req_id; |
735 | sbale[0].flags |= SBAL_FLAGS0_COMMAND; | 734 | sbale[0].flags |= SBAL_FLAGS0_COMMAND; |
736 | 735 | ||
@@ -745,6 +744,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, | |||
745 | return ERR_PTR(-ENOMEM); | 744 | return ERR_PTR(-ENOMEM); |
746 | } | 745 | } |
747 | 746 | ||
747 | req->seq_no = adapter->fsf_req_seq_no; | ||
748 | req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; | 748 | req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; |
749 | req->qtcb->prefix.req_id = req->req_id; | 749 | req->qtcb->prefix.req_id = req->req_id; |
750 | req->qtcb->prefix.ulp_info = 26; | 750 | req->qtcb->prefix.ulp_info = 26; |
@@ -752,8 +752,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, | |||
752 | req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; | 752 | req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; |
753 | req->qtcb->header.req_handle = req->req_id; | 753 | req->qtcb->header.req_handle = req->req_id; |
754 | req->qtcb->header.fsf_command = req->fsf_command; | 754 | req->qtcb->header.fsf_command = req->fsf_command; |
755 | req->seq_no = adapter->fsf_req_seq_no; | ||
756 | req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; | ||
757 | sbale[1].addr = (void *) req->qtcb; | 755 | sbale[1].addr = (void *) req->qtcb; |
758 | sbale[1].length = sizeof(struct fsf_qtcb); | 756 | sbale[1].length = sizeof(struct fsf_qtcb); |
759 | } | 757 | } |
@@ -770,25 +768,17 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) | |||
770 | { | 768 | { |
771 | struct zfcp_adapter *adapter = req->adapter; | 769 | struct zfcp_adapter *adapter = req->adapter; |
772 | struct zfcp_qdio *qdio = adapter->qdio; | 770 | struct zfcp_qdio *qdio = adapter->qdio; |
773 | unsigned long flags; | 771 | int with_qtcb = (req->qtcb != NULL); |
774 | int idx; | 772 | int req_id = req->req_id; |
775 | int with_qtcb = (req->qtcb != NULL); | ||
776 | 773 | ||
777 | /* put allocated FSF request into hash table */ | 774 | zfcp_reqlist_add(adapter->req_list, req); |
778 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
779 | idx = zfcp_reqlist_hash(req->req_id); | ||
780 | list_add_tail(&req->list, &adapter->req_list[idx]); | ||
781 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
782 | 775 | ||
783 | req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); | 776 | req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); |
784 | req->issued = get_clock(); | 777 | req->issued = get_clock(); |
785 | if (zfcp_qdio_send(qdio, &req->queue_req)) { | 778 | if (zfcp_qdio_send(qdio, &req->qdio_req)) { |
786 | del_timer(&req->timer); | 779 | del_timer(&req->timer); |
787 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
788 | /* lookup request again, list might have changed */ | 780 | /* lookup request again, list might have changed */ |
789 | if (zfcp_reqlist_find_safe(adapter, req)) | 781 | zfcp_reqlist_find_rm(adapter->req_list, req_id); |
790 | zfcp_reqlist_remove(adapter, req); | ||
791 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
792 | zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req); | 782 | zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req); |
793 | return -EIO; | 783 | return -EIO; |
794 | } | 784 | } |
@@ -826,9 +816,9 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) | |||
826 | goto out; | 816 | goto out; |
827 | } | 817 | } |
828 | 818 | ||
829 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 819 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
830 | sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; | 820 | sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; |
831 | req->queue_req.sbale_curr = 2; | 821 | req->qdio_req.sbale_curr = 2; |
832 | 822 | ||
833 | sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); | 823 | sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); |
834 | if (!sr_buf) { | 824 | if (!sr_buf) { |
@@ -837,7 +827,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) | |||
837 | } | 827 | } |
838 | memset(sr_buf, 0, sizeof(*sr_buf)); | 828 | memset(sr_buf, 0, sizeof(*sr_buf)); |
839 | req->data = sr_buf; | 829 | req->data = sr_buf; |
840 | sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req); | 830 | sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req); |
841 | sbale->addr = (void *) sr_buf; | 831 | sbale->addr = (void *) sr_buf; |
842 | sbale->length = sizeof(*sr_buf); | 832 | sbale->length = sizeof(*sr_buf); |
843 | 833 | ||
@@ -934,7 +924,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, | |||
934 | ZFCP_STATUS_COMMON_UNBLOCKED))) | 924 | ZFCP_STATUS_COMMON_UNBLOCKED))) |
935 | goto out_error_free; | 925 | goto out_error_free; |
936 | 926 | ||
937 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 927 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
938 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 928 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
939 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 929 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
940 | 930 | ||
@@ -1029,7 +1019,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
1029 | { | 1019 | { |
1030 | struct zfcp_adapter *adapter = req->adapter; | 1020 | struct zfcp_adapter *adapter = req->adapter; |
1031 | struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio, | 1021 | struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio, |
1032 | &req->queue_req); | 1022 | &req->qdio_req); |
1033 | u32 feat = adapter->adapter_features; | 1023 | u32 feat = adapter->adapter_features; |
1034 | int bytes; | 1024 | int bytes; |
1035 | 1025 | ||
@@ -1047,15 +1037,15 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
1047 | return 0; | 1037 | return 0; |
1048 | } | 1038 | } |
1049 | 1039 | ||
1050 | bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, | 1040 | bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, |
1051 | SBAL_FLAGS0_TYPE_WRITE_READ, | 1041 | SBAL_FLAGS0_TYPE_WRITE_READ, |
1052 | sg_req, max_sbals); | 1042 | sg_req, max_sbals); |
1053 | if (bytes <= 0) | 1043 | if (bytes <= 0) |
1054 | return -EIO; | 1044 | return -EIO; |
1055 | req->qtcb->bottom.support.req_buf_length = bytes; | 1045 | req->qtcb->bottom.support.req_buf_length = bytes; |
1056 | req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; | 1046 | req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; |
1057 | 1047 | ||
1058 | bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, | 1048 | bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, |
1059 | SBAL_FLAGS0_TYPE_WRITE_READ, | 1049 | SBAL_FLAGS0_TYPE_WRITE_READ, |
1060 | sg_resp, max_sbals); | 1050 | sg_resp, max_sbals); |
1061 | req->qtcb->bottom.support.resp_buf_length = bytes; | 1051 | req->qtcb->bottom.support.resp_buf_length = bytes; |
@@ -1251,7 +1241,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) | |||
1251 | } | 1241 | } |
1252 | 1242 | ||
1253 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1243 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1254 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 1244 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
1255 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1245 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1256 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1246 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1257 | 1247 | ||
@@ -1262,13 +1252,13 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) | |||
1262 | FSF_FEATURE_UPDATE_ALERT; | 1252 | FSF_FEATURE_UPDATE_ALERT; |
1263 | req->erp_action = erp_action; | 1253 | req->erp_action = erp_action; |
1264 | req->handler = zfcp_fsf_exchange_config_data_handler; | 1254 | req->handler = zfcp_fsf_exchange_config_data_handler; |
1265 | erp_action->fsf_req = req; | 1255 | erp_action->fsf_req_id = req->req_id; |
1266 | 1256 | ||
1267 | zfcp_fsf_start_erp_timer(req); | 1257 | zfcp_fsf_start_erp_timer(req); |
1268 | retval = zfcp_fsf_req_send(req); | 1258 | retval = zfcp_fsf_req_send(req); |
1269 | if (retval) { | 1259 | if (retval) { |
1270 | zfcp_fsf_req_free(req); | 1260 | zfcp_fsf_req_free(req); |
1271 | erp_action->fsf_req = NULL; | 1261 | erp_action->fsf_req_id = 0; |
1272 | } | 1262 | } |
1273 | out: | 1263 | out: |
1274 | spin_unlock_bh(&qdio->req_q_lock); | 1264 | spin_unlock_bh(&qdio->req_q_lock); |
@@ -1293,7 +1283,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, | |||
1293 | goto out_unlock; | 1283 | goto out_unlock; |
1294 | } | 1284 | } |
1295 | 1285 | ||
1296 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 1286 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
1297 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1287 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1298 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1288 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1299 | req->handler = zfcp_fsf_exchange_config_data_handler; | 1289 | req->handler = zfcp_fsf_exchange_config_data_handler; |
@@ -1349,19 +1339,19 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) | |||
1349 | } | 1339 | } |
1350 | 1340 | ||
1351 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1341 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1352 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 1342 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
1353 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1343 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1354 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1344 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1355 | 1345 | ||
1356 | req->handler = zfcp_fsf_exchange_port_data_handler; | 1346 | req->handler = zfcp_fsf_exchange_port_data_handler; |
1357 | req->erp_action = erp_action; | 1347 | req->erp_action = erp_action; |
1358 | erp_action->fsf_req = req; | 1348 | erp_action->fsf_req_id = req->req_id; |
1359 | 1349 | ||
1360 | zfcp_fsf_start_erp_timer(req); | 1350 | zfcp_fsf_start_erp_timer(req); |
1361 | retval = zfcp_fsf_req_send(req); | 1351 | retval = zfcp_fsf_req_send(req); |
1362 | if (retval) { | 1352 | if (retval) { |
1363 | zfcp_fsf_req_free(req); | 1353 | zfcp_fsf_req_free(req); |
1364 | erp_action->fsf_req = NULL; | 1354 | erp_action->fsf_req_id = 0; |
1365 | } | 1355 | } |
1366 | out: | 1356 | out: |
1367 | spin_unlock_bh(&qdio->req_q_lock); | 1357 | spin_unlock_bh(&qdio->req_q_lock); |
@@ -1398,7 +1388,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, | |||
1398 | if (data) | 1388 | if (data) |
1399 | req->data = data; | 1389 | req->data = data; |
1400 | 1390 | ||
1401 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 1391 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
1402 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1392 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1403 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1393 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1404 | 1394 | ||
@@ -1484,7 +1474,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) | |||
1484 | } | 1474 | } |
1485 | 1475 | ||
1486 | out: | 1476 | out: |
1487 | put_device(&port->sysfs_device); | 1477 | put_device(&port->dev); |
1488 | } | 1478 | } |
1489 | 1479 | ||
1490 | /** | 1480 | /** |
@@ -1513,7 +1503,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) | |||
1513 | } | 1503 | } |
1514 | 1504 | ||
1515 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1505 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1516 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 1506 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
1517 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1507 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1518 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1508 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1519 | 1509 | ||
@@ -1521,15 +1511,15 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) | |||
1521 | hton24(req->qtcb->bottom.support.d_id, port->d_id); | 1511 | hton24(req->qtcb->bottom.support.d_id, port->d_id); |
1522 | req->data = port; | 1512 | req->data = port; |
1523 | req->erp_action = erp_action; | 1513 | req->erp_action = erp_action; |
1524 | erp_action->fsf_req = req; | 1514 | erp_action->fsf_req_id = req->req_id; |
1525 | get_device(&port->sysfs_device); | 1515 | get_device(&port->dev); |
1526 | 1516 | ||
1527 | zfcp_fsf_start_erp_timer(req); | 1517 | zfcp_fsf_start_erp_timer(req); |
1528 | retval = zfcp_fsf_req_send(req); | 1518 | retval = zfcp_fsf_req_send(req); |
1529 | if (retval) { | 1519 | if (retval) { |
1530 | zfcp_fsf_req_free(req); | 1520 | zfcp_fsf_req_free(req); |
1531 | erp_action->fsf_req = NULL; | 1521 | erp_action->fsf_req_id = 0; |
1532 | put_device(&port->sysfs_device); | 1522 | put_device(&port->dev); |
1533 | } | 1523 | } |
1534 | out: | 1524 | out: |
1535 | spin_unlock_bh(&qdio->req_q_lock); | 1525 | spin_unlock_bh(&qdio->req_q_lock); |
@@ -1583,7 +1573,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) | |||
1583 | } | 1573 | } |
1584 | 1574 | ||
1585 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1575 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1586 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 1576 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
1587 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1577 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1588 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1578 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1589 | 1579 | ||
@@ -1591,13 +1581,13 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) | |||
1591 | req->data = erp_action->port; | 1581 | req->data = erp_action->port; |
1592 | req->erp_action = erp_action; | 1582 | req->erp_action = erp_action; |
1593 | req->qtcb->header.port_handle = erp_action->port->handle; | 1583 | req->qtcb->header.port_handle = erp_action->port->handle; |
1594 | erp_action->fsf_req = req; | 1584 | erp_action->fsf_req_id = req->req_id; |
1595 | 1585 | ||
1596 | zfcp_fsf_start_erp_timer(req); | 1586 | zfcp_fsf_start_erp_timer(req); |
1597 | retval = zfcp_fsf_req_send(req); | 1587 | retval = zfcp_fsf_req_send(req); |
1598 | if (retval) { | 1588 | if (retval) { |
1599 | zfcp_fsf_req_free(req); | 1589 | zfcp_fsf_req_free(req); |
1600 | erp_action->fsf_req = NULL; | 1590 | erp_action->fsf_req_id = 0; |
1601 | } | 1591 | } |
1602 | out: | 1592 | out: |
1603 | spin_unlock_bh(&qdio->req_q_lock); | 1593 | spin_unlock_bh(&qdio->req_q_lock); |
@@ -1660,7 +1650,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) | |||
1660 | } | 1650 | } |
1661 | 1651 | ||
1662 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1652 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1663 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 1653 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
1664 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1654 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1665 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1655 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1666 | 1656 | ||
@@ -1715,7 +1705,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) | |||
1715 | } | 1705 | } |
1716 | 1706 | ||
1717 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1707 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1718 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 1708 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
1719 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1709 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1720 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1710 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1721 | 1711 | ||
@@ -1809,7 +1799,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) | |||
1809 | } | 1799 | } |
1810 | 1800 | ||
1811 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1801 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1812 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 1802 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
1813 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1803 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1814 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1804 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1815 | 1805 | ||
@@ -1817,13 +1807,13 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) | |||
1817 | req->qtcb->header.port_handle = erp_action->port->handle; | 1807 | req->qtcb->header.port_handle = erp_action->port->handle; |
1818 | req->erp_action = erp_action; | 1808 | req->erp_action = erp_action; |
1819 | req->handler = zfcp_fsf_close_physical_port_handler; | 1809 | req->handler = zfcp_fsf_close_physical_port_handler; |
1820 | erp_action->fsf_req = req; | 1810 | erp_action->fsf_req_id = req->req_id; |
1821 | 1811 | ||
1822 | zfcp_fsf_start_erp_timer(req); | 1812 | zfcp_fsf_start_erp_timer(req); |
1823 | retval = zfcp_fsf_req_send(req); | 1813 | retval = zfcp_fsf_req_send(req); |
1824 | if (retval) { | 1814 | if (retval) { |
1825 | zfcp_fsf_req_free(req); | 1815 | zfcp_fsf_req_free(req); |
1826 | erp_action->fsf_req = NULL; | 1816 | erp_action->fsf_req_id = 0; |
1827 | } | 1817 | } |
1828 | out: | 1818 | out: |
1829 | spin_unlock_bh(&qdio->req_q_lock); | 1819 | spin_unlock_bh(&qdio->req_q_lock); |
@@ -1982,7 +1972,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | |||
1982 | } | 1972 | } |
1983 | 1973 | ||
1984 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1974 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1985 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 1975 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
1986 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1976 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1987 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1977 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1988 | 1978 | ||
@@ -1991,7 +1981,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | |||
1991 | req->handler = zfcp_fsf_open_unit_handler; | 1981 | req->handler = zfcp_fsf_open_unit_handler; |
1992 | req->data = erp_action->unit; | 1982 | req->data = erp_action->unit; |
1993 | req->erp_action = erp_action; | 1983 | req->erp_action = erp_action; |
1994 | erp_action->fsf_req = req; | 1984 | erp_action->fsf_req_id = req->req_id; |
1995 | 1985 | ||
1996 | if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) | 1986 | if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) |
1997 | req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; | 1987 | req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; |
@@ -2000,7 +1990,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | |||
2000 | retval = zfcp_fsf_req_send(req); | 1990 | retval = zfcp_fsf_req_send(req); |
2001 | if (retval) { | 1991 | if (retval) { |
2002 | zfcp_fsf_req_free(req); | 1992 | zfcp_fsf_req_free(req); |
2003 | erp_action->fsf_req = NULL; | 1993 | erp_action->fsf_req_id = 0; |
2004 | } | 1994 | } |
2005 | out: | 1995 | out: |
2006 | spin_unlock_bh(&qdio->req_q_lock); | 1996 | spin_unlock_bh(&qdio->req_q_lock); |
@@ -2068,7 +2058,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) | |||
2068 | } | 2058 | } |
2069 | 2059 | ||
2070 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 2060 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
2071 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 2061 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
2072 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 2062 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
2073 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 2063 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
2074 | 2064 | ||
@@ -2077,13 +2067,13 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) | |||
2077 | req->handler = zfcp_fsf_close_unit_handler; | 2067 | req->handler = zfcp_fsf_close_unit_handler; |
2078 | req->data = erp_action->unit; | 2068 | req->data = erp_action->unit; |
2079 | req->erp_action = erp_action; | 2069 | req->erp_action = erp_action; |
2080 | erp_action->fsf_req = req; | 2070 | erp_action->fsf_req_id = req->req_id; |
2081 | 2071 | ||
2082 | zfcp_fsf_start_erp_timer(req); | 2072 | zfcp_fsf_start_erp_timer(req); |
2083 | retval = zfcp_fsf_req_send(req); | 2073 | retval = zfcp_fsf_req_send(req); |
2084 | if (retval) { | 2074 | if (retval) { |
2085 | zfcp_fsf_req_free(req); | 2075 | zfcp_fsf_req_free(req); |
2086 | erp_action->fsf_req = NULL; | 2076 | erp_action->fsf_req_id = 0; |
2087 | } | 2077 | } |
2088 | out: | 2078 | out: |
2089 | spin_unlock_bh(&qdio->req_q_lock); | 2079 | spin_unlock_bh(&qdio->req_q_lock); |
@@ -2111,8 +2101,8 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) | |||
2111 | blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; | 2101 | blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; |
2112 | if (req->status & ZFCP_STATUS_FSFREQ_ERROR) | 2102 | if (req->status & ZFCP_STATUS_FSFREQ_ERROR) |
2113 | blktrc.flags |= ZFCP_BLK_REQ_ERROR; | 2103 | blktrc.flags |= ZFCP_BLK_REQ_ERROR; |
2114 | blktrc.inb_usage = req->queue_req.qdio_inb_usage; | 2104 | blktrc.inb_usage = req->qdio_req.qdio_inb_usage; |
2115 | blktrc.outb_usage = req->queue_req.qdio_outb_usage; | 2105 | blktrc.outb_usage = req->qdio_req.qdio_outb_usage; |
2116 | 2106 | ||
2117 | if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { | 2107 | if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { |
2118 | blktrc.flags |= ZFCP_BLK_LAT_VALID; | 2108 | blktrc.flags |= ZFCP_BLK_LAT_VALID; |
@@ -2169,12 +2159,7 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) | |||
2169 | zfcp_fsf_req_trace(req, scpnt); | 2159 | zfcp_fsf_req_trace(req, scpnt); |
2170 | 2160 | ||
2171 | skip_fsfstatus: | 2161 | skip_fsfstatus: |
2172 | if (scpnt->result != 0) | 2162 | zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); |
2173 | zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req); | ||
2174 | else if (scpnt->retries > 0) | ||
2175 | zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req); | ||
2176 | else | ||
2177 | zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req); | ||
2178 | 2163 | ||
2179 | scpnt->host_scribble = NULL; | 2164 | scpnt->host_scribble = NULL; |
2180 | (scpnt->scsi_done) (scpnt); | 2165 | (scpnt->scsi_done) (scpnt); |
@@ -2274,7 +2259,7 @@ skip_fsfstatus: | |||
2274 | else { | 2259 | else { |
2275 | zfcp_fsf_send_fcp_command_task_handler(req); | 2260 | zfcp_fsf_send_fcp_command_task_handler(req); |
2276 | req->unit = NULL; | 2261 | req->unit = NULL; |
2277 | put_device(&unit->sysfs_device); | 2262 | put_device(&unit->dev); |
2278 | } | 2263 | } |
2279 | } | 2264 | } |
2280 | 2265 | ||
@@ -2312,7 +2297,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | |||
2312 | } | 2297 | } |
2313 | 2298 | ||
2314 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 2299 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
2315 | get_device(&unit->sysfs_device); | 2300 | get_device(&unit->dev); |
2316 | req->unit = unit; | 2301 | req->unit = unit; |
2317 | req->data = scsi_cmnd; | 2302 | req->data = scsi_cmnd; |
2318 | req->handler = zfcp_fsf_send_fcp_command_handler; | 2303 | req->handler = zfcp_fsf_send_fcp_command_handler; |
@@ -2346,11 +2331,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | |||
2346 | fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; | 2331 | fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; |
2347 | zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); | 2332 | zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); |
2348 | 2333 | ||
2349 | real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype, | 2334 | real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype, |
2350 | scsi_sglist(scsi_cmnd), | 2335 | scsi_sglist(scsi_cmnd), |
2351 | FSF_MAX_SBALS_PER_REQ); | 2336 | FSF_MAX_SBALS_PER_REQ); |
2352 | if (unlikely(real_bytes < 0)) { | 2337 | if (unlikely(real_bytes < 0)) { |
2353 | if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) { | 2338 | if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) { |
2354 | dev_err(&adapter->ccw_device->dev, | 2339 | dev_err(&adapter->ccw_device->dev, |
2355 | "Oversize data package, unit 0x%016Lx " | 2340 | "Oversize data package, unit 0x%016Lx " |
2356 | "on port 0x%016Lx closed\n", | 2341 | "on port 0x%016Lx closed\n", |
@@ -2369,7 +2354,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | |||
2369 | goto out; | 2354 | goto out; |
2370 | 2355 | ||
2371 | failed_scsi_cmnd: | 2356 | failed_scsi_cmnd: |
2372 | put_device(&unit->sysfs_device); | 2357 | put_device(&unit->dev); |
2373 | zfcp_fsf_req_free(req); | 2358 | zfcp_fsf_req_free(req); |
2374 | scsi_cmnd->host_scribble = NULL; | 2359 | scsi_cmnd->host_scribble = NULL; |
2375 | out: | 2360 | out: |
@@ -2415,7 +2400,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) | |||
2415 | req->qtcb->bottom.io.service_class = FSF_CLASS_3; | 2400 | req->qtcb->bottom.io.service_class = FSF_CLASS_3; |
2416 | req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; | 2401 | req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; |
2417 | 2402 | ||
2418 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 2403 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
2419 | sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; | 2404 | sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; |
2420 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 2405 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
2421 | 2406 | ||
@@ -2478,14 +2463,14 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2478 | 2463 | ||
2479 | req->handler = zfcp_fsf_control_file_handler; | 2464 | req->handler = zfcp_fsf_control_file_handler; |
2480 | 2465 | ||
2481 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); | 2466 | sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); |
2482 | sbale[0].flags |= direction; | 2467 | sbale[0].flags |= direction; |
2483 | 2468 | ||
2484 | bottom = &req->qtcb->bottom.support; | 2469 | bottom = &req->qtcb->bottom.support; |
2485 | bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; | 2470 | bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; |
2486 | bottom->option = fsf_cfdc->option; | 2471 | bottom->option = fsf_cfdc->option; |
2487 | 2472 | ||
2488 | bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, | 2473 | bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, |
2489 | direction, fsf_cfdc->sg, | 2474 | direction, fsf_cfdc->sg, |
2490 | FSF_MAX_SBALS_PER_REQ); | 2475 | FSF_MAX_SBALS_PER_REQ); |
2491 | if (bytes != ZFCP_CFDC_MAX_SIZE) { | 2476 | if (bytes != ZFCP_CFDC_MAX_SIZE) { |
@@ -2516,15 +2501,14 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) | |||
2516 | struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; | 2501 | struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; |
2517 | struct qdio_buffer_element *sbale; | 2502 | struct qdio_buffer_element *sbale; |
2518 | struct zfcp_fsf_req *fsf_req; | 2503 | struct zfcp_fsf_req *fsf_req; |
2519 | unsigned long flags, req_id; | 2504 | unsigned long req_id; |
2520 | int idx; | 2505 | int idx; |
2521 | 2506 | ||
2522 | for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { | 2507 | for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { |
2523 | 2508 | ||
2524 | sbale = &sbal->element[idx]; | 2509 | sbale = &sbal->element[idx]; |
2525 | req_id = (unsigned long) sbale->addr; | 2510 | req_id = (unsigned long) sbale->addr; |
2526 | spin_lock_irqsave(&adapter->req_list_lock, flags); | 2511 | fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); |
2527 | fsf_req = zfcp_reqlist_find(adapter, req_id); | ||
2528 | 2512 | ||
2529 | if (!fsf_req) | 2513 | if (!fsf_req) |
2530 | /* | 2514 | /* |
@@ -2534,11 +2518,8 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) | |||
2534 | panic("error: unknown req_id (%lx) on adapter %s.\n", | 2518 | panic("error: unknown req_id (%lx) on adapter %s.\n", |
2535 | req_id, dev_name(&adapter->ccw_device->dev)); | 2519 | req_id, dev_name(&adapter->ccw_device->dev)); |
2536 | 2520 | ||
2537 | list_del(&fsf_req->list); | 2521 | fsf_req->qdio_req.sbal_response = sbal_idx; |
2538 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | 2522 | fsf_req->qdio_req.qdio_inb_usage = |
2539 | |||
2540 | fsf_req->queue_req.sbal_response = sbal_idx; | ||
2541 | fsf_req->queue_req.qdio_inb_usage = | ||
2542 | atomic_read(&qdio->resp_q.count); | 2523 | atomic_read(&qdio->resp_q.count); |
2543 | zfcp_fsf_req_complete(fsf_req); | 2524 | zfcp_fsf_req_complete(fsf_req); |
2544 | 2525 | ||
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 6c5228b627fc..71b97ff77cf0 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
11 | 11 | ||
12 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
13 | #include "zfcp_qdio.h" | ||
13 | 14 | ||
14 | #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) | 15 | #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) |
15 | 16 | ||
@@ -28,12 +29,6 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) | |||
28 | return 0; | 29 | return 0; |
29 | } | 30 | } |
30 | 31 | ||
31 | static struct qdio_buffer_element * | ||
32 | zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) | ||
33 | { | ||
34 | return &q->sbal[sbal_idx]->element[sbale_idx]; | ||
35 | } | ||
36 | |||
37 | static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) | 32 | static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) |
38 | { | 33 | { |
39 | struct zfcp_adapter *adapter = qdio->adapter; | 34 | struct zfcp_adapter *adapter = qdio->adapter; |
@@ -106,7 +101,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed) | |||
106 | 101 | ||
107 | if (unlikely(retval)) { | 102 | if (unlikely(retval)) { |
108 | atomic_set(&queue->count, count); | 103 | atomic_set(&queue->count, count); |
109 | /* FIXME: Recover this with an adapter reopen? */ | 104 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL); |
110 | } else { | 105 | } else { |
111 | queue->first += count; | 106 | queue->first += count; |
112 | queue->first %= QDIO_MAX_BUFFERS_PER_Q; | 107 | queue->first %= QDIO_MAX_BUFFERS_PER_Q; |
@@ -145,32 +140,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, | |||
145 | zfcp_qdio_resp_put_back(qdio, count); | 140 | zfcp_qdio_resp_put_back(qdio, count); |
146 | } | 141 | } |
147 | 142 | ||
148 | /** | ||
149 | * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req | ||
150 | * @qdio: pointer to struct zfcp_qdio | ||
151 | * @q_rec: pointer to struct zfcp_queue_rec | ||
152 | * Returns: pointer to qdio_buffer_element (SBALE) structure | ||
153 | */ | ||
154 | struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, | ||
155 | struct zfcp_queue_req *q_req) | ||
156 | { | ||
157 | return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req | ||
162 | * @fsf_req: pointer to struct fsf_req | ||
163 | * Returns: pointer to qdio_buffer_element (SBALE) structure | ||
164 | */ | ||
165 | struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, | ||
166 | struct zfcp_queue_req *q_req) | ||
167 | { | ||
168 | return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, | ||
169 | q_req->sbale_curr); | ||
170 | } | ||
171 | |||
172 | static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, | 143 | static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, |
173 | struct zfcp_queue_req *q_req, int max_sbals) | 144 | struct zfcp_qdio_req *q_req, int max_sbals) |
174 | { | 145 | { |
175 | int count = atomic_read(&qdio->req_q.count); | 146 | int count = atomic_read(&qdio->req_q.count); |
176 | count = min(count, max_sbals); | 147 | count = min(count, max_sbals); |
@@ -179,7 +150,7 @@ static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, | |||
179 | } | 150 | } |
180 | 151 | ||
181 | static struct qdio_buffer_element * | 152 | static struct qdio_buffer_element * |
182 | zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, | 153 | zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, |
183 | unsigned long sbtype) | 154 | unsigned long sbtype) |
184 | { | 155 | { |
185 | struct qdio_buffer_element *sbale; | 156 | struct qdio_buffer_element *sbale; |
@@ -214,7 +185,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, | |||
214 | } | 185 | } |
215 | 186 | ||
216 | static struct qdio_buffer_element * | 187 | static struct qdio_buffer_element * |
217 | zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, | 188 | zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, |
218 | unsigned int sbtype) | 189 | unsigned int sbtype) |
219 | { | 190 | { |
220 | if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) | 191 | if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) |
@@ -224,7 +195,7 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, | |||
224 | } | 195 | } |
225 | 196 | ||
226 | static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, | 197 | static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, |
227 | struct zfcp_queue_req *q_req) | 198 | struct zfcp_qdio_req *q_req) |
228 | { | 199 | { |
229 | struct qdio_buffer **sbal = qdio->req_q.sbal; | 200 | struct qdio_buffer **sbal = qdio->req_q.sbal; |
230 | int first = q_req->sbal_first; | 201 | int first = q_req->sbal_first; |
@@ -235,7 +206,7 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, | |||
235 | } | 206 | } |
236 | 207 | ||
237 | static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio, | 208 | static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio, |
238 | struct zfcp_queue_req *q_req, | 209 | struct zfcp_qdio_req *q_req, |
239 | unsigned int sbtype, void *start_addr, | 210 | unsigned int sbtype, void *start_addr, |
240 | unsigned int total_length) | 211 | unsigned int total_length) |
241 | { | 212 | { |
@@ -271,8 +242,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio, | |||
271 | * @max_sbals: upper bound for number of SBALs to be used | 242 | * @max_sbals: upper bound for number of SBALs to be used |
272 | * Returns: number of bytes, or error (negativ) | 243 | * Returns: number of bytes, or error (negativ) |
273 | */ | 244 | */ |
274 | int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, | 245 | int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, |
275 | struct zfcp_queue_req *q_req, | ||
276 | unsigned long sbtype, struct scatterlist *sg, | 246 | unsigned long sbtype, struct scatterlist *sg, |
277 | int max_sbals) | 247 | int max_sbals) |
278 | { | 248 | { |
@@ -304,10 +274,10 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, | |||
304 | /** | 274 | /** |
305 | * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO | 275 | * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO |
306 | * @qdio: pointer to struct zfcp_qdio | 276 | * @qdio: pointer to struct zfcp_qdio |
307 | * @q_req: pointer to struct zfcp_queue_req | 277 | * @q_req: pointer to struct zfcp_qdio_req |
308 | * Returns: 0 on success, error otherwise | 278 | * Returns: 0 on success, error otherwise |
309 | */ | 279 | */ |
310 | int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) | 280 | int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) |
311 | { | 281 | { |
312 | struct zfcp_qdio_queue *req_q = &qdio->req_q; | 282 | struct zfcp_qdio_queue *req_q = &qdio->req_q; |
313 | int first = q_req->sbal_first; | 283 | int first = q_req->sbal_first; |
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h new file mode 100644 index 000000000000..8cca54631e1e --- /dev/null +++ b/drivers/s390/scsi/zfcp_qdio.h | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * zfcp device driver | ||
3 | * | ||
4 | * Header file for zfcp qdio interface | ||
5 | * | ||
6 | * Copyright IBM Corporation 2010 | ||
7 | */ | ||
8 | |||
9 | #ifndef ZFCP_QDIO_H | ||
10 | #define ZFCP_QDIO_H | ||
11 | |||
12 | #include <asm/qdio.h> | ||
13 | |||
14 | /** | ||
15 | * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count | ||
16 | * @sbal: qdio buffers | ||
17 | * @first: index of next free buffer in queue | ||
18 | * @count: number of free buffers in queue | ||
19 | */ | ||
20 | struct zfcp_qdio_queue { | ||
21 | struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; | ||
22 | u8 first; | ||
23 | atomic_t count; | ||
24 | }; | ||
25 | |||
26 | /** | ||
27 | * struct zfcp_qdio - basic qdio data structure | ||
28 | * @resp_q: response queue | ||
29 | * @req_q: request queue | ||
30 | * @stat_lock: lock to protect req_q_util and req_q_time | ||
31 | * @req_q_lock: lock to serialize access to request queue | ||
32 | * @req_q_time: time of last fill level change | ||
33 | * @req_q_util: used for accounting | ||
34 | * @req_q_full: queue full incidents | ||
35 | * @req_q_wq: used to wait for SBAL availability | ||
36 | * @adapter: adapter used in conjunction with this qdio structure | ||
37 | */ | ||
38 | struct zfcp_qdio { | ||
39 | struct zfcp_qdio_queue resp_q; | ||
40 | struct zfcp_qdio_queue req_q; | ||
41 | spinlock_t stat_lock; | ||
42 | spinlock_t req_q_lock; | ||
43 | unsigned long long req_q_time; | ||
44 | u64 req_q_util; | ||
45 | atomic_t req_q_full; | ||
46 | wait_queue_head_t req_q_wq; | ||
47 | struct zfcp_adapter *adapter; | ||
48 | }; | ||
49 | |||
50 | /** | ||
51 | * struct zfcp_qdio_req - qdio queue related values for a request | ||
52 | * @sbal_number: number of free sbals | ||
53 | * @sbal_first: first sbal for this request | ||
54 | * @sbal_last: last sbal for this request | ||
55 | * @sbal_limit: last possible sbal for this request | ||
56 | * @sbale_curr: current sbale at creation of this request | ||
57 | * @sbal_response: sbal used in interrupt | ||
58 | * @qdio_outb_usage: usage of outbound queue | ||
59 | * @qdio_inb_usage: usage of inbound queue | ||
60 | */ | ||
61 | struct zfcp_qdio_req { | ||
62 | u8 sbal_number; | ||
63 | u8 sbal_first; | ||
64 | u8 sbal_last; | ||
65 | u8 sbal_limit; | ||
66 | u8 sbale_curr; | ||
67 | u8 sbal_response; | ||
68 | u16 qdio_outb_usage; | ||
69 | u16 qdio_inb_usage; | ||
70 | }; | ||
71 | |||
72 | /** | ||
73 | * zfcp_qdio_sbale - return pointer to sbale in qdio queue | ||
74 | * @q: queue where to find sbal | ||
75 | * @sbal_idx: sbal index in queue | ||
76 | * @sbale_idx: sbale index in sbal | ||
77 | */ | ||
78 | static inline struct qdio_buffer_element * | ||
79 | zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) | ||
80 | { | ||
81 | return &q->sbal[sbal_idx]->element[sbale_idx]; | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request | ||
86 | * @qdio: pointer to struct zfcp_qdio | ||
87 | * @q_rec: pointer to struct zfcp_qdio_req | ||
88 | * Returns: pointer to qdio_buffer_element (sbale) structure | ||
89 | */ | ||
90 | static inline struct qdio_buffer_element * | ||
91 | zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) | ||
92 | { | ||
93 | return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * zfcp_qdio_sbale_curr - return current sbale on req_q for a request | ||
98 | * @qdio: pointer to struct zfcp_qdio | ||
99 | * @fsf_req: pointer to struct zfcp_fsf_req | ||
100 | * Returns: pointer to qdio_buffer_element (sbale) structure | ||
101 | */ | ||
102 | static inline struct qdio_buffer_element * | ||
103 | zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) | ||
104 | { | ||
105 | return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, | ||
106 | q_req->sbale_curr); | ||
107 | } | ||
108 | |||
109 | #endif /* ZFCP_QDIO_H */ | ||
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h new file mode 100644 index 000000000000..a72d1b730aba --- /dev/null +++ b/drivers/s390/scsi/zfcp_reqlist.h | |||
@@ -0,0 +1,183 @@ | |||
1 | /* | ||
2 | * zfcp device driver | ||
3 | * | ||
4 | * Data structure and helper functions for tracking pending FSF | ||
5 | * requests. | ||
6 | * | ||
7 | * Copyright IBM Corporation 2009 | ||
8 | */ | ||
9 | |||
10 | #ifndef ZFCP_REQLIST_H | ||
11 | #define ZFCP_REQLIST_H | ||
12 | |||
13 | /* number of hash buckets */ | ||
14 | #define ZFCP_REQ_LIST_BUCKETS 128 | ||
15 | |||
16 | /** | ||
17 | * struct zfcp_reqlist - Container for request list (reqlist) | ||
18 | * @lock: Spinlock for protecting the hash list | ||
19 | * @list: Array of hashbuckets, each is a list of requests in this bucket | ||
20 | */ | ||
21 | struct zfcp_reqlist { | ||
22 | spinlock_t lock; | ||
23 | struct list_head buckets[ZFCP_REQ_LIST_BUCKETS]; | ||
24 | }; | ||
25 | |||
26 | static inline int zfcp_reqlist_hash(unsigned long req_id) | ||
27 | { | ||
28 | return req_id % ZFCP_REQ_LIST_BUCKETS; | ||
29 | } | ||
30 | |||
31 | /** | ||
32 | * zfcp_reqlist_alloc - Allocate and initialize reqlist | ||
33 | * | ||
34 | * Returns pointer to allocated reqlist on success, or NULL on | ||
35 | * allocation failure. | ||
36 | */ | ||
37 | static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void) | ||
38 | { | ||
39 | unsigned int i; | ||
40 | struct zfcp_reqlist *rl; | ||
41 | |||
42 | rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL); | ||
43 | if (!rl) | ||
44 | return NULL; | ||
45 | |||
46 | spin_lock_init(&rl->lock); | ||
47 | |||
48 | for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++) | ||
49 | INIT_LIST_HEAD(&rl->buckets[i]); | ||
50 | |||
51 | return rl; | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * zfcp_reqlist_isempty - Check whether the request list empty | ||
56 | * @rl: pointer to reqlist | ||
57 | * | ||
58 | * Returns: 1 if list is empty, 0 if not | ||
59 | */ | ||
60 | static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl) | ||
61 | { | ||
62 | unsigned int i; | ||
63 | |||
64 | for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++) | ||
65 | if (!list_empty(&rl->buckets[i])) | ||
66 | return 0; | ||
67 | return 1; | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * zfcp_reqlist_free - Free allocated memory for reqlist | ||
72 | * @rl: The reqlist where to free memory | ||
73 | */ | ||
74 | static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl) | ||
75 | { | ||
76 | /* sanity check */ | ||
77 | BUG_ON(!zfcp_reqlist_isempty(rl)); | ||
78 | |||
79 | kfree(rl); | ||
80 | } | ||
81 | |||
82 | static inline struct zfcp_fsf_req * | ||
83 | _zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id) | ||
84 | { | ||
85 | struct zfcp_fsf_req *req; | ||
86 | unsigned int i; | ||
87 | |||
88 | i = zfcp_reqlist_hash(req_id); | ||
89 | list_for_each_entry(req, &rl->buckets[i], list) | ||
90 | if (req->req_id == req_id) | ||
91 | return req; | ||
92 | return NULL; | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * zfcp_reqlist_find - Lookup FSF request by its request id | ||
97 | * @rl: The reqlist where to lookup the FSF request | ||
98 | * @req_id: The request id to look for | ||
99 | * | ||
100 | * Returns a pointer to the FSF request with the specified request id | ||
101 | * or NULL if there is no known FSF request with this id. | ||
102 | */ | ||
103 | static inline struct zfcp_fsf_req * | ||
104 | zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id) | ||
105 | { | ||
106 | unsigned long flags; | ||
107 | struct zfcp_fsf_req *req; | ||
108 | |||
109 | spin_lock_irqsave(&rl->lock, flags); | ||
110 | req = _zfcp_reqlist_find(rl, req_id); | ||
111 | spin_unlock_irqrestore(&rl->lock, flags); | ||
112 | |||
113 | return req; | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist | ||
118 | * @rl: reqlist where to search and remove entry | ||
119 | * @req_id: The request id of the request to look for | ||
120 | * | ||
121 | * This functions tries to find the FSF request with the specified | ||
122 | * id and then removes it from the reqlist. The reqlist lock is held | ||
123 | * during both steps of the operation. | ||
124 | * | ||
125 | * Returns: Pointer to the FSF request if the request has been found, | ||
126 | * NULL if it has not been found. | ||
127 | */ | ||
128 | static inline struct zfcp_fsf_req * | ||
129 | zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | struct zfcp_fsf_req *req; | ||
133 | |||
134 | spin_lock_irqsave(&rl->lock, flags); | ||
135 | req = _zfcp_reqlist_find(rl, req_id); | ||
136 | if (req) | ||
137 | list_del(&req->list); | ||
138 | spin_unlock_irqrestore(&rl->lock, flags); | ||
139 | |||
140 | return req; | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * zfcp_reqlist_add - Add entry to reqlist | ||
145 | * @rl: reqlist where to add the entry | ||
146 | * @req: The entry to add | ||
147 | * | ||
148 | * The request id always increases. As an optimization new requests | ||
149 | * are added here with list_add_tail at the end of the bucket lists | ||
150 | * while old requests are looked up starting at the beginning of the | ||
151 | * lists. | ||
152 | */ | ||
153 | static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl, | ||
154 | struct zfcp_fsf_req *req) | ||
155 | { | ||
156 | unsigned int i; | ||
157 | unsigned long flags; | ||
158 | |||
159 | i = zfcp_reqlist_hash(req->req_id); | ||
160 | |||
161 | spin_lock_irqsave(&rl->lock, flags); | ||
162 | list_add_tail(&req->list, &rl->buckets[i]); | ||
163 | spin_unlock_irqrestore(&rl->lock, flags); | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * zfcp_reqlist_move - Move all entries from reqlist to simple list | ||
168 | * @rl: The zfcp_reqlist where to remove all entries | ||
169 | * @list: The list where to move all entries | ||
170 | */ | ||
171 | static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl, | ||
172 | struct list_head *list) | ||
173 | { | ||
174 | unsigned int i; | ||
175 | unsigned long flags; | ||
176 | |||
177 | spin_lock_irqsave(&rl->lock, flags); | ||
178 | for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++) | ||
179 | list_splice_init(&rl->buckets[i], list); | ||
180 | spin_unlock_irqrestore(&rl->lock, flags); | ||
181 | } | ||
182 | |||
183 | #endif /* ZFCP_REQLIST_H */ | ||
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 8e6fc68d6bd4..c3c4178888af 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Interface to Linux SCSI midlayer. | 4 | * Interface to Linux SCSI midlayer. |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2002, 2009 | 6 | * Copyright IBM Corporation 2002, 2010 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | 9 | #define KMSG_COMPONENT "zfcp" |
@@ -15,6 +15,7 @@ | |||
15 | #include "zfcp_ext.h" | 15 | #include "zfcp_ext.h" |
16 | #include "zfcp_dbf.h" | 16 | #include "zfcp_dbf.h" |
17 | #include "zfcp_fc.h" | 17 | #include "zfcp_fc.h" |
18 | #include "zfcp_reqlist.h" | ||
18 | 19 | ||
19 | static unsigned int default_depth = 32; | 20 | static unsigned int default_depth = 32; |
20 | module_param_named(queue_depth, default_depth, uint, 0600); | 21 | module_param_named(queue_depth, default_depth, uint, 0600); |
@@ -43,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) | |||
43 | { | 44 | { |
44 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; | 45 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; |
45 | unit->device = NULL; | 46 | unit->device = NULL; |
46 | put_device(&unit->sysfs_device); | 47 | put_device(&unit->dev); |
47 | } | 48 | } |
48 | 49 | ||
49 | static int zfcp_scsi_slave_configure(struct scsi_device *sdp) | 50 | static int zfcp_scsi_slave_configure(struct scsi_device *sdp) |
@@ -59,10 +60,9 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) | |||
59 | { | 60 | { |
60 | struct zfcp_adapter *adapter = | 61 | struct zfcp_adapter *adapter = |
61 | (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; | 62 | (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; |
63 | |||
62 | set_host_byte(scpnt, result); | 64 | set_host_byte(scpnt, result); |
63 | if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) | 65 | zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt); |
64 | zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL); | ||
65 | /* return directly */ | ||
66 | scpnt->scsi_done(scpnt); | 66 | scpnt->scsi_done(scpnt); |
67 | } | 67 | } |
68 | 68 | ||
@@ -86,18 +86,10 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | |||
86 | adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; | 86 | adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; |
87 | unit = scpnt->device->hostdata; | 87 | unit = scpnt->device->hostdata; |
88 | 88 | ||
89 | BUG_ON(!adapter || (adapter != unit->port->adapter)); | ||
90 | BUG_ON(!scpnt->scsi_done); | ||
91 | |||
92 | if (unlikely(!unit)) { | ||
93 | zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT); | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | scsi_result = fc_remote_port_chkready(rport); | 89 | scsi_result = fc_remote_port_chkready(rport); |
98 | if (unlikely(scsi_result)) { | 90 | if (unlikely(scsi_result)) { |
99 | scpnt->result = scsi_result; | 91 | scpnt->result = scsi_result; |
100 | zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL); | 92 | zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt); |
101 | scpnt->scsi_done(scpnt); | 93 | scpnt->scsi_done(scpnt); |
102 | return 0; | 94 | return 0; |
103 | } | 95 | } |
@@ -189,9 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
189 | /* avoid race condition between late normal completion and abort */ | 181 | /* avoid race condition between late normal completion and abort */ |
190 | write_lock_irqsave(&adapter->abort_lock, flags); | 182 | write_lock_irqsave(&adapter->abort_lock, flags); |
191 | 183 | ||
192 | spin_lock(&adapter->req_list_lock); | 184 | old_req = zfcp_reqlist_find(adapter->req_list, old_reqid); |
193 | old_req = zfcp_reqlist_find(adapter, old_reqid); | ||
194 | spin_unlock(&adapter->req_list_lock); | ||
195 | if (!old_req) { | 185 | if (!old_req) { |
196 | write_unlock_irqrestore(&adapter->abort_lock, flags); | 186 | write_unlock_irqrestore(&adapter->abort_lock, flags); |
197 | zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL, | 187 | zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL, |
@@ -521,7 +511,7 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) | |||
521 | 511 | ||
522 | if (port) { | 512 | if (port) { |
523 | zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); | 513 | zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); |
524 | put_device(&port->sysfs_device); | 514 | put_device(&port->dev); |
525 | } | 515 | } |
526 | } | 516 | } |
527 | 517 | ||
@@ -563,23 +553,23 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port) | |||
563 | 553 | ||
564 | void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) | 554 | void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) |
565 | { | 555 | { |
566 | get_device(&port->sysfs_device); | 556 | get_device(&port->dev); |
567 | port->rport_task = RPORT_ADD; | 557 | port->rport_task = RPORT_ADD; |
568 | 558 | ||
569 | if (!queue_work(port->adapter->work_queue, &port->rport_work)) | 559 | if (!queue_work(port->adapter->work_queue, &port->rport_work)) |
570 | put_device(&port->sysfs_device); | 560 | put_device(&port->dev); |
571 | } | 561 | } |
572 | 562 | ||
573 | void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) | 563 | void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) |
574 | { | 564 | { |
575 | get_device(&port->sysfs_device); | 565 | get_device(&port->dev); |
576 | port->rport_task = RPORT_DEL; | 566 | port->rport_task = RPORT_DEL; |
577 | 567 | ||
578 | if (port->rport && queue_work(port->adapter->work_queue, | 568 | if (port->rport && queue_work(port->adapter->work_queue, |
579 | &port->rport_work)) | 569 | &port->rport_work)) |
580 | return; | 570 | return; |
581 | 571 | ||
582 | put_device(&port->sysfs_device); | 572 | put_device(&port->dev); |
583 | } | 573 | } |
584 | 574 | ||
585 | void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) | 575 | void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) |
@@ -608,7 +598,7 @@ void zfcp_scsi_rport_work(struct work_struct *work) | |||
608 | } | 598 | } |
609 | } | 599 | } |
610 | 600 | ||
611 | put_device(&port->sysfs_device); | 601 | put_device(&port->dev); |
612 | } | 602 | } |
613 | 603 | ||
614 | 604 | ||
@@ -626,7 +616,7 @@ void zfcp_scsi_scan(struct work_struct *work) | |||
626 | scsilun_to_int((struct scsi_lun *) | 616 | scsilun_to_int((struct scsi_lun *) |
627 | &unit->fcp_lun), 0); | 617 | &unit->fcp_lun), 0); |
628 | 618 | ||
629 | put_device(&unit->sysfs_device); | 619 | put_device(&unit->dev); |
630 | } | 620 | } |
631 | 621 | ||
632 | struct fc_function_template zfcp_transport_functions = { | 622 | struct fc_function_template zfcp_transport_functions = { |
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index f539e006683c..a43035d4bd70 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * sysfs attributes. | 4 | * sysfs attributes. |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2008, 2009 | 6 | * Copyright IBM Corporation 2008, 2010 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | 9 | #define KMSG_COMPONENT "zfcp" |
@@ -19,8 +19,7 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ | |||
19 | struct device_attribute *at,\ | 19 | struct device_attribute *at,\ |
20 | char *buf) \ | 20 | char *buf) \ |
21 | { \ | 21 | { \ |
22 | struct _feat_def *_feat = container_of(dev, struct _feat_def, \ | 22 | struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ |
23 | sysfs_device); \ | ||
24 | \ | 23 | \ |
25 | return sprintf(buf, _format, _value); \ | 24 | return sprintf(buf, _format, _value); \ |
26 | } \ | 25 | } \ |
@@ -87,8 +86,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \ | |||
87 | struct device_attribute *attr, \ | 86 | struct device_attribute *attr, \ |
88 | char *buf) \ | 87 | char *buf) \ |
89 | { \ | 88 | { \ |
90 | struct _feat_def *_feat = container_of(dev, struct _feat_def, \ | 89 | struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ |
91 | sysfs_device); \ | ||
92 | \ | 90 | \ |
93 | if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ | 91 | if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ |
94 | return sprintf(buf, "1\n"); \ | 92 | return sprintf(buf, "1\n"); \ |
@@ -99,12 +97,11 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ | |||
99 | struct device_attribute *attr,\ | 97 | struct device_attribute *attr,\ |
100 | const char *buf, size_t count)\ | 98 | const char *buf, size_t count)\ |
101 | { \ | 99 | { \ |
102 | struct _feat_def *_feat = container_of(dev, struct _feat_def, \ | 100 | struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ |
103 | sysfs_device); \ | ||
104 | unsigned long val; \ | 101 | unsigned long val; \ |
105 | int retval = 0; \ | 102 | int retval = 0; \ |
106 | \ | 103 | \ |
107 | if (!(_feat && get_device(&_feat->sysfs_device))) \ | 104 | if (!(_feat && get_device(&_feat->dev))) \ |
108 | return -EBUSY; \ | 105 | return -EBUSY; \ |
109 | \ | 106 | \ |
110 | if (strict_strtoul(buf, 0, &val) || val != 0) { \ | 107 | if (strict_strtoul(buf, 0, &val) || val != 0) { \ |
@@ -118,7 +115,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ | |||
118 | _reopen_id, NULL); \ | 115 | _reopen_id, NULL); \ |
119 | zfcp_erp_wait(_adapter); \ | 116 | zfcp_erp_wait(_adapter); \ |
120 | out: \ | 117 | out: \ |
121 | put_device(&_feat->sysfs_device); \ | 118 | put_device(&_feat->dev); \ |
122 | return retval ? retval : (ssize_t) count; \ | 119 | return retval ? retval : (ssize_t) count; \ |
123 | } \ | 120 | } \ |
124 | static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ | 121 | static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ |
@@ -224,10 +221,10 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, | |||
224 | list_del(&port->list); | 221 | list_del(&port->list); |
225 | write_unlock_irq(&adapter->port_list_lock); | 222 | write_unlock_irq(&adapter->port_list_lock); |
226 | 223 | ||
227 | put_device(&port->sysfs_device); | 224 | put_device(&port->dev); |
228 | 225 | ||
229 | zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL); | 226 | zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL); |
230 | zfcp_device_unregister(&port->sysfs_device, &zfcp_sysfs_port_attrs); | 227 | zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); |
231 | out: | 228 | out: |
232 | zfcp_ccw_adapter_put(adapter); | 229 | zfcp_ccw_adapter_put(adapter); |
233 | return retval ? retval : (ssize_t) count; | 230 | return retval ? retval : (ssize_t) count; |
@@ -258,13 +255,12 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, | |||
258 | struct device_attribute *attr, | 255 | struct device_attribute *attr, |
259 | const char *buf, size_t count) | 256 | const char *buf, size_t count) |
260 | { | 257 | { |
261 | struct zfcp_port *port = container_of(dev, struct zfcp_port, | 258 | struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); |
262 | sysfs_device); | ||
263 | struct zfcp_unit *unit; | 259 | struct zfcp_unit *unit; |
264 | u64 fcp_lun; | 260 | u64 fcp_lun; |
265 | int retval = -EINVAL; | 261 | int retval = -EINVAL; |
266 | 262 | ||
267 | if (!(port && get_device(&port->sysfs_device))) | 263 | if (!(port && get_device(&port->dev))) |
268 | return -EBUSY; | 264 | return -EBUSY; |
269 | 265 | ||
270 | if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) | 266 | if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) |
@@ -280,7 +276,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, | |||
280 | zfcp_erp_wait(unit->port->adapter); | 276 | zfcp_erp_wait(unit->port->adapter); |
281 | flush_work(&unit->scsi_work); | 277 | flush_work(&unit->scsi_work); |
282 | out: | 278 | out: |
283 | put_device(&port->sysfs_device); | 279 | put_device(&port->dev); |
284 | return retval ? retval : (ssize_t) count; | 280 | return retval ? retval : (ssize_t) count; |
285 | } | 281 | } |
286 | static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); | 282 | static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); |
@@ -289,13 +285,12 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, | |||
289 | struct device_attribute *attr, | 285 | struct device_attribute *attr, |
290 | const char *buf, size_t count) | 286 | const char *buf, size_t count) |
291 | { | 287 | { |
292 | struct zfcp_port *port = container_of(dev, struct zfcp_port, | 288 | struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); |
293 | sysfs_device); | ||
294 | struct zfcp_unit *unit; | 289 | struct zfcp_unit *unit; |
295 | u64 fcp_lun; | 290 | u64 fcp_lun; |
296 | int retval = -EINVAL; | 291 | int retval = -EINVAL; |
297 | 292 | ||
298 | if (!(port && get_device(&port->sysfs_device))) | 293 | if (!(port && get_device(&port->dev))) |
299 | return -EBUSY; | 294 | return -EBUSY; |
300 | 295 | ||
301 | if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) | 296 | if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) |
@@ -314,12 +309,12 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, | |||
314 | list_del(&unit->list); | 309 | list_del(&unit->list); |
315 | write_unlock_irq(&port->unit_list_lock); | 310 | write_unlock_irq(&port->unit_list_lock); |
316 | 311 | ||
317 | put_device(&unit->sysfs_device); | 312 | put_device(&unit->dev); |
318 | 313 | ||
319 | zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); | 314 | zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); |
320 | zfcp_device_unregister(&unit->sysfs_device, &zfcp_sysfs_unit_attrs); | 315 | zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs); |
321 | out: | 316 | out: |
322 | put_device(&port->sysfs_device); | 317 | put_device(&port->dev); |
323 | return retval ? retval : (ssize_t) count; | 318 | return retval ? retval : (ssize_t) count; |
324 | } | 319 | } |
325 | static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); | 320 | static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); |
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c index 75ac19b1192f..fc2f676e984d 100644 --- a/drivers/sbus/char/openprom.c +++ b/drivers/sbus/char/openprom.c | |||
@@ -233,7 +233,7 @@ static int opromnext(void __user *argp, unsigned int cmd, struct device_node *dp | |||
233 | 233 | ||
234 | ph = 0; | 234 | ph = 0; |
235 | if (dp) | 235 | if (dp) |
236 | ph = dp->node; | 236 | ph = dp->phandle; |
237 | 237 | ||
238 | data->current_node = dp; | 238 | data->current_node = dp; |
239 | *((int *) op->oprom_array) = ph; | 239 | *((int *) op->oprom_array) = ph; |
@@ -256,7 +256,7 @@ static int oprompci2node(void __user *argp, struct device_node *dp, struct openp | |||
256 | 256 | ||
257 | dp = pci_device_to_OF_node(pdev); | 257 | dp = pci_device_to_OF_node(pdev); |
258 | data->current_node = dp; | 258 | data->current_node = dp; |
259 | *((int *)op->oprom_array) = dp->node; | 259 | *((int *)op->oprom_array) = dp->phandle; |
260 | op->oprom_size = sizeof(int); | 260 | op->oprom_size = sizeof(int); |
261 | err = copyout(argp, op, bufsize + sizeof(int)); | 261 | err = copyout(argp, op, bufsize + sizeof(int)); |
262 | 262 | ||
@@ -273,7 +273,7 @@ static int oprompath2node(void __user *argp, struct device_node *dp, struct open | |||
273 | 273 | ||
274 | dp = of_find_node_by_path(op->oprom_array); | 274 | dp = of_find_node_by_path(op->oprom_array); |
275 | if (dp) | 275 | if (dp) |
276 | ph = dp->node; | 276 | ph = dp->phandle; |
277 | data->current_node = dp; | 277 | data->current_node = dp; |
278 | *((int *)op->oprom_array) = ph; | 278 | *((int *)op->oprom_array) = ph; |
279 | op->oprom_size = sizeof(int); | 279 | op->oprom_size = sizeof(int); |
@@ -540,7 +540,7 @@ static int opiocgetnext(unsigned int cmd, void __user *argp) | |||
540 | } | 540 | } |
541 | } | 541 | } |
542 | if (dp) | 542 | if (dp) |
543 | nd = dp->node; | 543 | nd = dp->phandle; |
544 | if (copy_to_user(argp, &nd, sizeof(phandle))) | 544 | if (copy_to_user(argp, &nd, sizeof(phandle))) |
545 | return -EFAULT; | 545 | return -EFAULT; |
546 | 546 | ||
@@ -570,7 +570,7 @@ static int openprom_bsd_ioctl(struct inode * inode, struct file * file, | |||
570 | case OPIOCGETOPTNODE: | 570 | case OPIOCGETOPTNODE: |
571 | BUILD_BUG_ON(sizeof(phandle) != sizeof(int)); | 571 | BUILD_BUG_ON(sizeof(phandle) != sizeof(int)); |
572 | 572 | ||
573 | if (copy_to_user(argp, &options_node->node, sizeof(phandle))) | 573 | if (copy_to_user(argp, &options_node->phandle, sizeof(phandle))) |
574 | return -EFAULT; | 574 | return -EFAULT; |
575 | 575 | ||
576 | return 0; | 576 | return 0; |
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c index b898d382b7b0..e40cdfb7541f 100644 --- a/drivers/scsi/FlashPoint.c +++ b/drivers/scsi/FlashPoint.c | |||
@@ -3924,7 +3924,7 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card) | |||
3924 | { | 3924 | { |
3925 | struct sccb_mgr_tar_info *currTar_Info; | 3925 | struct sccb_mgr_tar_info *currTar_Info; |
3926 | 3926 | ||
3927 | if ((p_sccb->TargID > MAX_SCSI_TAR) || (p_sccb->Lun > MAX_LUN)) { | 3927 | if ((p_sccb->TargID >= MAX_SCSI_TAR) || (p_sccb->Lun >= MAX_LUN)) { |
3928 | return; | 3928 | return; |
3929 | } | 3929 | } |
3930 | currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID]; | 3930 | currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID]; |
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index a93a5040f087..136b49cea791 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /** | 1 | /** |
2 | * Copyright (C) 2005 - 2009 ServerEngines | 2 | * Copyright (C) 2005 - 2010 ServerEngines |
3 | * All rights reserved. | 3 | * All rights reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
@@ -24,6 +24,10 @@ | |||
24 | #define FW_VER_LEN 32 | 24 | #define FW_VER_LEN 32 |
25 | #define MCC_Q_LEN 128 | 25 | #define MCC_Q_LEN 128 |
26 | #define MCC_CQ_LEN 256 | 26 | #define MCC_CQ_LEN 256 |
27 | #define MAX_MCC_CMD 16 | ||
28 | /* BladeEngine Generation numbers */ | ||
29 | #define BE_GEN2 2 | ||
30 | #define BE_GEN3 3 | ||
27 | 31 | ||
28 | struct be_dma_mem { | 32 | struct be_dma_mem { |
29 | void *va; | 33 | void *va; |
@@ -57,6 +61,11 @@ static inline void *queue_head_node(struct be_queue_info *q) | |||
57 | return q->dma_mem.va + q->head * q->entry_size; | 61 | return q->dma_mem.va + q->head * q->entry_size; |
58 | } | 62 | } |
59 | 63 | ||
64 | static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num) | ||
65 | { | ||
66 | return q->dma_mem.va + wrb_num * q->entry_size; | ||
67 | } | ||
68 | |||
60 | static inline void *queue_tail_node(struct be_queue_info *q) | 69 | static inline void *queue_tail_node(struct be_queue_info *q) |
61 | { | 70 | { |
62 | return q->dma_mem.va + q->tail * q->entry_size; | 71 | return q->dma_mem.va + q->tail * q->entry_size; |
@@ -104,15 +113,19 @@ struct be_ctrl_info { | |||
104 | spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ | 113 | spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ |
105 | spinlock_t mcc_cq_lock; | 114 | spinlock_t mcc_cq_lock; |
106 | 115 | ||
107 | /* MCC Async callback */ | 116 | wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1]; |
108 | void (*async_cb) (void *adapter, bool link_up); | 117 | unsigned int mcc_tag[MAX_MCC_CMD]; |
109 | void *adapter_ctxt; | 118 | unsigned int mcc_numtag[MAX_MCC_CMD + 1]; |
119 | unsigned short mcc_alloc_index; | ||
120 | unsigned short mcc_free_index; | ||
121 | unsigned int mcc_tag_available; | ||
110 | }; | 122 | }; |
111 | 123 | ||
112 | #include "be_cmds.h" | 124 | #include "be_cmds.h" |
113 | 125 | ||
114 | #define PAGE_SHIFT_4K 12 | 126 | #define PAGE_SHIFT_4K 12 |
115 | #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) | 127 | #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) |
128 | #define mcc_timeout 120000 /* 5s timeout */ | ||
116 | 129 | ||
117 | /* Returns number of pages spanned by the data starting at the given addr */ | 130 | /* Returns number of pages spanned by the data starting at the given addr */ |
118 | #define PAGES_4K_SPANNED(_address, size) \ | 131 | #define PAGES_4K_SPANNED(_address, size) \ |
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index f008708f1b08..67098578fba4 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /** | 1 | /** |
2 | * Copyright (C) 2005 - 2009 ServerEngines | 2 | * Copyright (C) 2005 - 2010 ServerEngines |
3 | * All rights reserved. | 3 | * All rights reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
@@ -19,7 +19,7 @@ | |||
19 | #include "be_mgmt.h" | 19 | #include "be_mgmt.h" |
20 | #include "be_main.h" | 20 | #include "be_main.h" |
21 | 21 | ||
22 | static void be_mcc_notify(struct beiscsi_hba *phba) | 22 | void be_mcc_notify(struct beiscsi_hba *phba) |
23 | { | 23 | { |
24 | struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; | 24 | struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; |
25 | u32 val = 0; | 25 | u32 val = 0; |
@@ -29,6 +29,52 @@ static void be_mcc_notify(struct beiscsi_hba *phba) | |||
29 | iowrite32(val, phba->db_va + DB_MCCQ_OFFSET); | 29 | iowrite32(val, phba->db_va + DB_MCCQ_OFFSET); |
30 | } | 30 | } |
31 | 31 | ||
32 | unsigned int alloc_mcc_tag(struct beiscsi_hba *phba) | ||
33 | { | ||
34 | unsigned int tag = 0; | ||
35 | unsigned int num = 0; | ||
36 | |||
37 | mcc_tag_rdy: | ||
38 | if (phba->ctrl.mcc_tag_available) { | ||
39 | tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index]; | ||
40 | phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0; | ||
41 | phba->ctrl.mcc_numtag[tag] = 0; | ||
42 | } else { | ||
43 | udelay(100); | ||
44 | num++; | ||
45 | if (num < mcc_timeout) | ||
46 | goto mcc_tag_rdy; | ||
47 | } | ||
48 | if (tag) { | ||
49 | phba->ctrl.mcc_tag_available--; | ||
50 | if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1)) | ||
51 | phba->ctrl.mcc_alloc_index = 0; | ||
52 | else | ||
53 | phba->ctrl.mcc_alloc_index++; | ||
54 | } | ||
55 | return tag; | ||
56 | } | ||
57 | |||
58 | void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag) | ||
59 | { | ||
60 | spin_lock(&ctrl->mbox_lock); | ||
61 | tag = tag & 0x000000FF; | ||
62 | ctrl->mcc_tag[ctrl->mcc_free_index] = tag; | ||
63 | if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1)) | ||
64 | ctrl->mcc_free_index = 0; | ||
65 | else | ||
66 | ctrl->mcc_free_index++; | ||
67 | ctrl->mcc_tag_available++; | ||
68 | spin_unlock(&ctrl->mbox_lock); | ||
69 | } | ||
70 | |||
71 | bool is_link_state_evt(u32 trailer) | ||
72 | { | ||
73 | return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & | ||
74 | ASYNC_TRAILER_EVENT_CODE_MASK) == | ||
75 | ASYNC_EVENT_CODE_LINK_STATE); | ||
76 | } | ||
77 | |||
32 | static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) | 78 | static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) |
33 | { | 79 | { |
34 | if (compl->flags != 0) { | 80 | if (compl->flags != 0) { |
@@ -64,12 +110,30 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, | |||
64 | return 0; | 110 | return 0; |
65 | } | 111 | } |
66 | 112 | ||
67 | 113 | int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl, | |
68 | static inline bool is_link_state_evt(u32 trailer) | 114 | struct be_mcc_compl *compl) |
69 | { | 115 | { |
70 | return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & | 116 | u16 compl_status, extd_status; |
71 | ASYNC_TRAILER_EVENT_CODE_MASK) == | 117 | unsigned short tag; |
72 | ASYNC_EVENT_CODE_LINK_STATE); | 118 | |
119 | be_dws_le_to_cpu(compl, 4); | ||
120 | |||
121 | compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & | ||
122 | CQE_STATUS_COMPL_MASK; | ||
123 | /* The ctrl.mcc_numtag[tag] is filled with | ||
124 | * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status, | ||
125 | * [7:0] = compl_status | ||
126 | */ | ||
127 | tag = (compl->tag0 & 0x000000FF); | ||
128 | extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & | ||
129 | CQE_STATUS_EXTD_MASK; | ||
130 | |||
131 | ctrl->mcc_numtag[tag] = 0x80000000; | ||
132 | ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000); | ||
133 | ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8; | ||
134 | ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF); | ||
135 | wake_up_interruptible(&ctrl->mcc_wait[tag]); | ||
136 | return 0; | ||
73 | } | 137 | } |
74 | 138 | ||
75 | static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba) | 139 | static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba) |
@@ -89,7 +153,7 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) | |||
89 | iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); | 153 | iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); |
90 | } | 154 | } |
91 | 155 | ||
92 | static void beiscsi_async_link_state_process(struct beiscsi_hba *phba, | 156 | void beiscsi_async_link_state_process(struct beiscsi_hba *phba, |
93 | struct be_async_event_link_state *evt) | 157 | struct be_async_event_link_state *evt) |
94 | { | 158 | { |
95 | switch (evt->port_link_status) { | 159 | switch (evt->port_link_status) { |
@@ -97,13 +161,13 @@ static void beiscsi_async_link_state_process(struct beiscsi_hba *phba, | |||
97 | SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n", | 161 | SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n", |
98 | evt->physical_port); | 162 | evt->physical_port); |
99 | phba->state |= BE_ADAPTER_LINK_DOWN; | 163 | phba->state |= BE_ADAPTER_LINK_DOWN; |
164 | iscsi_host_for_each_session(phba->shost, | ||
165 | be2iscsi_fail_session); | ||
100 | break; | 166 | break; |
101 | case ASYNC_EVENT_LINK_UP: | 167 | case ASYNC_EVENT_LINK_UP: |
102 | phba->state = BE_ADAPTER_UP; | 168 | phba->state = BE_ADAPTER_UP; |
103 | SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n", | 169 | SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n", |
104 | evt->physical_port); | 170 | evt->physical_port); |
105 | iscsi_host_for_each_session(phba->shost, | ||
106 | be2iscsi_fail_session); | ||
107 | break; | 171 | break; |
108 | default: | 172 | default: |
109 | SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on" | 173 | SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on" |
@@ -162,7 +226,6 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba) | |||
162 | /* Wait till no more pending mcc requests are present */ | 226 | /* Wait till no more pending mcc requests are present */ |
163 | static int be_mcc_wait_compl(struct beiscsi_hba *phba) | 227 | static int be_mcc_wait_compl(struct beiscsi_hba *phba) |
164 | { | 228 | { |
165 | #define mcc_timeout 120000 /* 5s timeout */ | ||
166 | int i, status; | 229 | int i, status; |
167 | for (i = 0; i < mcc_timeout; i++) { | 230 | for (i = 0; i < mcc_timeout; i++) { |
168 | status = beiscsi_process_mcc(phba); | 231 | status = beiscsi_process_mcc(phba); |
@@ -372,9 +435,10 @@ struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba) | |||
372 | 435 | ||
373 | BUG_ON(atomic_read(&mccq->used) >= mccq->len); | 436 | BUG_ON(atomic_read(&mccq->used) >= mccq->len); |
374 | wrb = queue_head_node(mccq); | 437 | wrb = queue_head_node(mccq); |
438 | memset(wrb, 0, sizeof(*wrb)); | ||
439 | wrb->tag0 = (mccq->head & 0x000000FF) << 16; | ||
375 | queue_head_inc(mccq); | 440 | queue_head_inc(mccq); |
376 | atomic_inc(&mccq->used); | 441 | atomic_inc(&mccq->used); |
377 | memset(wrb, 0, sizeof(*wrb)); | ||
378 | return wrb; | 442 | return wrb; |
379 | } | 443 | } |
380 | 444 | ||
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 5de8acb924cb..49fcc787ee8b 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /** | 1 | /** |
2 | * Copyright (C) 2005 - 2009 ServerEngines | 2 | * Copyright (C) 2005 - 2010 ServerEngines |
3 | * All rights reserved. | 3 | * All rights reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
@@ -425,14 +425,20 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba, | |||
425 | int be_poll_mcc(struct be_ctrl_info *ctrl); | 425 | int be_poll_mcc(struct be_ctrl_info *ctrl); |
426 | unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl, | 426 | unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl, |
427 | struct beiscsi_hba *phba); | 427 | struct beiscsi_hba *phba); |
428 | int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr); | 428 | unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba); |
429 | 429 | void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); | |
430 | /*ISCSI Functuions */ | 430 | /*ISCSI Functuions */ |
431 | int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); | 431 | int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); |
432 | 432 | ||
433 | struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); | 433 | struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); |
434 | struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); | 434 | struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); |
435 | int be_mcc_notify_wait(struct beiscsi_hba *phba); | 435 | int be_mcc_notify_wait(struct beiscsi_hba *phba); |
436 | void be_mcc_notify(struct beiscsi_hba *phba); | ||
437 | unsigned int alloc_mcc_tag(struct beiscsi_hba *phba); | ||
438 | void beiscsi_async_link_state_process(struct beiscsi_hba *phba, | ||
439 | struct be_async_event_link_state *evt); | ||
440 | int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl, | ||
441 | struct be_mcc_compl *compl); | ||
436 | 442 | ||
437 | int be_mbox_notify(struct be_ctrl_info *ctrl); | 443 | int be_mbox_notify(struct be_ctrl_info *ctrl); |
438 | 444 | ||
@@ -448,6 +454,8 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, | |||
448 | int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, | 454 | int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, |
449 | struct be_queue_info *wrbq); | 455 | struct be_queue_info *wrbq); |
450 | 456 | ||
457 | bool is_link_state_evt(u32 trailer); | ||
458 | |||
451 | struct be_default_pdu_context { | 459 | struct be_default_pdu_context { |
452 | u32 dw[4]; | 460 | u32 dw[4]; |
453 | } __packed; | 461 | } __packed; |
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index d587b0362f18..29a3aaf35f9f 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /** | 1 | /** |
2 | * Copyright (C) 2005 - 2009 ServerEngines | 2 | * Copyright (C) 2005 - 2010 ServerEngines |
3 | * All rights reserved. | 3 | * All rights reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
@@ -101,6 +101,7 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session) | |||
101 | struct iscsi_session *sess = cls_session->dd_data; | 101 | struct iscsi_session *sess = cls_session->dd_data; |
102 | struct beiscsi_session *beiscsi_sess = sess->dd_data; | 102 | struct beiscsi_session *beiscsi_sess = sess->dd_data; |
103 | 103 | ||
104 | SE_DEBUG(DBG_LVL_8, "In beiscsi_session_destroy\n"); | ||
104 | pci_pool_destroy(beiscsi_sess->bhs_pool); | 105 | pci_pool_destroy(beiscsi_sess->bhs_pool); |
105 | iscsi_session_teardown(cls_session); | 106 | iscsi_session_teardown(cls_session); |
106 | } | 107 | } |
@@ -224,6 +225,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, | |||
224 | struct beiscsi_conn *beiscsi_conn = conn->dd_data; | 225 | struct beiscsi_conn *beiscsi_conn = conn->dd_data; |
225 | int len = 0; | 226 | int len = 0; |
226 | 227 | ||
228 | SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param); | ||
227 | beiscsi_ep = beiscsi_conn->ep; | 229 | beiscsi_ep = beiscsi_conn->ep; |
228 | if (!beiscsi_ep) { | 230 | if (!beiscsi_ep) { |
229 | SE_DEBUG(DBG_LVL_1, | 231 | SE_DEBUG(DBG_LVL_1, |
@@ -254,6 +256,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn, | |||
254 | struct iscsi_session *session = conn->session; | 256 | struct iscsi_session *session = conn->session; |
255 | int ret; | 257 | int ret; |
256 | 258 | ||
259 | SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_set_param, param= %d\n", param); | ||
257 | ret = iscsi_set_param(cls_conn, param, buf, buflen); | 260 | ret = iscsi_set_param(cls_conn, param, buf, buflen); |
258 | if (ret) | 261 | if (ret) |
259 | return ret; | 262 | return ret; |
@@ -271,8 +274,8 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn, | |||
271 | conn->max_recv_dlength = 65536; | 274 | conn->max_recv_dlength = 65536; |
272 | break; | 275 | break; |
273 | case ISCSI_PARAM_MAX_BURST: | 276 | case ISCSI_PARAM_MAX_BURST: |
274 | if (session->first_burst > 262144) | 277 | if (session->max_burst > 262144) |
275 | session->first_burst = 262144; | 278 | session->max_burst = 262144; |
276 | break; | 279 | break; |
277 | default: | 280 | default: |
278 | return 0; | 281 | return 0; |
@@ -293,12 +296,41 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, | |||
293 | enum iscsi_host_param param, char *buf) | 296 | enum iscsi_host_param param, char *buf) |
294 | { | 297 | { |
295 | struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); | 298 | struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); |
299 | struct be_cmd_resp_get_mac_addr *resp; | ||
300 | struct be_mcc_wrb *wrb; | ||
301 | unsigned int tag, wrb_num; | ||
296 | int len = 0; | 302 | int len = 0; |
303 | unsigned short status, extd_status; | ||
304 | struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; | ||
297 | 305 | ||
306 | SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); | ||
298 | switch (param) { | 307 | switch (param) { |
299 | case ISCSI_HOST_PARAM_HWADDRESS: | 308 | case ISCSI_HOST_PARAM_HWADDRESS: |
300 | be_cmd_get_mac_addr(phba, phba->mac_address); | 309 | tag = be_cmd_get_mac_addr(phba); |
301 | len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); | 310 | if (!tag) { |
311 | SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed \n"); | ||
312 | return -1; | ||
313 | } else | ||
314 | wait_event_interruptible(phba->ctrl.mcc_wait[tag], | ||
315 | phba->ctrl.mcc_numtag[tag]); | ||
316 | |||
317 | wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16; | ||
318 | extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; | ||
319 | status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; | ||
320 | if (status || extd_status) { | ||
321 | SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed" | ||
322 | " status = %d extd_status = %d \n", | ||
323 | status, extd_status); | ||
324 | free_mcc_tag(&phba->ctrl, tag); | ||
325 | return -1; | ||
326 | } else { | ||
327 | wrb = queue_get_wrb(mccq, wrb_num); | ||
328 | free_mcc_tag(&phba->ctrl, tag); | ||
329 | resp = embedded_payload(wrb); | ||
330 | memcpy(phba->mac_address, resp->mac_address, ETH_ALEN); | ||
331 | len = sysfs_format_mac(buf, phba->mac_address, | ||
332 | ETH_ALEN); | ||
333 | } | ||
302 | break; | 334 | break; |
303 | default: | 335 | default: |
304 | return iscsi_host_get_param(shost, param, buf); | 336 | return iscsi_host_get_param(shost, param, buf); |
@@ -378,6 +410,7 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn) | |||
378 | struct beiscsi_endpoint *beiscsi_ep; | 410 | struct beiscsi_endpoint *beiscsi_ep; |
379 | struct beiscsi_offload_params params; | 411 | struct beiscsi_offload_params params; |
380 | 412 | ||
413 | SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_start\n"); | ||
381 | memset(¶ms, 0, sizeof(struct beiscsi_offload_params)); | 414 | memset(¶ms, 0, sizeof(struct beiscsi_offload_params)); |
382 | beiscsi_ep = beiscsi_conn->ep; | 415 | beiscsi_ep = beiscsi_conn->ep; |
383 | if (!beiscsi_ep) | 416 | if (!beiscsi_ep) |
@@ -422,8 +455,14 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, | |||
422 | { | 455 | { |
423 | struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; | 456 | struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; |
424 | struct beiscsi_hba *phba = beiscsi_ep->phba; | 457 | struct beiscsi_hba *phba = beiscsi_ep->phba; |
458 | struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; | ||
459 | struct be_mcc_wrb *wrb; | ||
460 | struct tcp_connect_and_offload_out *ptcpcnct_out; | ||
461 | unsigned short status, extd_status; | ||
462 | unsigned int tag, wrb_num; | ||
425 | int ret = -1; | 463 | int ret = -1; |
426 | 464 | ||
465 | SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n"); | ||
427 | beiscsi_ep->ep_cid = beiscsi_get_cid(phba); | 466 | beiscsi_ep->ep_cid = beiscsi_get_cid(phba); |
428 | if (beiscsi_ep->ep_cid == 0xFFFF) { | 467 | if (beiscsi_ep->ep_cid == 0xFFFF) { |
429 | SE_DEBUG(DBG_LVL_1, "No free cid available\n"); | 468 | SE_DEBUG(DBG_LVL_1, "No free cid available\n"); |
@@ -431,15 +470,44 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, | |||
431 | } | 470 | } |
432 | SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ", | 471 | SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ", |
433 | beiscsi_ep->ep_cid); | 472 | beiscsi_ep->ep_cid); |
434 | phba->ep_array[beiscsi_ep->ep_cid] = ep; | 473 | phba->ep_array[beiscsi_ep->ep_cid - |
435 | if (beiscsi_ep->ep_cid > | 474 | phba->fw_config.iscsi_cid_start] = ep; |
436 | (phba->fw_config.iscsi_cid_start + phba->params.cxns_per_ctrl)) { | 475 | if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start + |
476 | phba->params.cxns_per_ctrl * 2)) { | ||
437 | SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n"); | 477 | SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n"); |
438 | return ret; | 478 | return ret; |
439 | } | 479 | } |
440 | 480 | ||
441 | beiscsi_ep->cid_vld = 0; | 481 | beiscsi_ep->cid_vld = 0; |
442 | return mgmt_open_connection(phba, dst_addr, beiscsi_ep); | 482 | tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep); |
483 | if (!tag) { | ||
484 | SE_DEBUG(DBG_LVL_1, | ||
485 | "mgmt_invalidate_connection Failed for cid=%d \n", | ||
486 | beiscsi_ep->ep_cid); | ||
487 | } else { | ||
488 | wait_event_interruptible(phba->ctrl.mcc_wait[tag], | ||
489 | phba->ctrl.mcc_numtag[tag]); | ||
490 | } | ||
491 | wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16; | ||
492 | extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; | ||
493 | status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; | ||
494 | if (status || extd_status) { | ||
495 | SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed" | ||
496 | " status = %d extd_status = %d \n", | ||
497 | status, extd_status); | ||
498 | free_mcc_tag(&phba->ctrl, tag); | ||
499 | return -1; | ||
500 | } else { | ||
501 | wrb = queue_get_wrb(mccq, wrb_num); | ||
502 | free_mcc_tag(&phba->ctrl, tag); | ||
503 | |||
504 | ptcpcnct_out = embedded_payload(wrb); | ||
505 | beiscsi_ep = ep->dd_data; | ||
506 | beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; | ||
507 | beiscsi_ep->cid_vld = 1; | ||
508 | SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n"); | ||
509 | } | ||
510 | return 0; | ||
443 | } | 511 | } |
444 | 512 | ||
445 | /** | 513 | /** |
@@ -459,14 +527,12 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) | |||
459 | * beiscsi_free_ep - free endpoint | 527 | * beiscsi_free_ep - free endpoint |
460 | * @ep: pointer to iscsi endpoint structure | 528 | * @ep: pointer to iscsi endpoint structure |
461 | */ | 529 | */ |
462 | static void beiscsi_free_ep(struct iscsi_endpoint *ep) | 530 | static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep) |
463 | { | 531 | { |
464 | struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; | ||
465 | struct beiscsi_hba *phba = beiscsi_ep->phba; | 532 | struct beiscsi_hba *phba = beiscsi_ep->phba; |
466 | 533 | ||
467 | beiscsi_put_cid(phba, beiscsi_ep->ep_cid); | 534 | beiscsi_put_cid(phba, beiscsi_ep->ep_cid); |
468 | beiscsi_ep->phba = NULL; | 535 | beiscsi_ep->phba = NULL; |
469 | iscsi_destroy_endpoint(ep); | ||
470 | } | 536 | } |
471 | 537 | ||
472 | /** | 538 | /** |
@@ -495,9 +561,9 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, | |||
495 | return ERR_PTR(ret); | 561 | return ERR_PTR(ret); |
496 | } | 562 | } |
497 | 563 | ||
498 | if (phba->state) { | 564 | if (phba->state != BE_ADAPTER_UP) { |
499 | ret = -EBUSY; | 565 | ret = -EBUSY; |
500 | SE_DEBUG(DBG_LVL_1, "The Adapet state is Not UP \n"); | 566 | SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP \n"); |
501 | return ERR_PTR(ret); | 567 | return ERR_PTR(ret); |
502 | } | 568 | } |
503 | 569 | ||
@@ -509,9 +575,9 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, | |||
509 | 575 | ||
510 | beiscsi_ep = ep->dd_data; | 576 | beiscsi_ep = ep->dd_data; |
511 | beiscsi_ep->phba = phba; | 577 | beiscsi_ep->phba = phba; |
512 | 578 | beiscsi_ep->openiscsi_ep = ep; | |
513 | if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) { | 579 | if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) { |
514 | SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n"); | 580 | SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn \n"); |
515 | ret = -ENOMEM; | 581 | ret = -ENOMEM; |
516 | goto free_ep; | 582 | goto free_ep; |
517 | } | 583 | } |
@@ -519,7 +585,7 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, | |||
519 | return ep; | 585 | return ep; |
520 | 586 | ||
521 | free_ep: | 587 | free_ep: |
522 | beiscsi_free_ep(ep); | 588 | beiscsi_free_ep(beiscsi_ep); |
523 | return ERR_PTR(ret); | 589 | return ERR_PTR(ret); |
524 | } | 590 | } |
525 | 591 | ||
@@ -546,20 +612,22 @@ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) | |||
546 | * @ep: The iscsi endpoint | 612 | * @ep: The iscsi endpoint |
547 | * @flag: The type of connection closure | 613 | * @flag: The type of connection closure |
548 | */ | 614 | */ |
549 | static int beiscsi_close_conn(struct iscsi_endpoint *ep, int flag) | 615 | static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag) |
550 | { | 616 | { |
551 | int ret = 0; | 617 | int ret = 0; |
552 | struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; | 618 | unsigned int tag; |
553 | struct beiscsi_hba *phba = beiscsi_ep->phba; | 619 | struct beiscsi_hba *phba = beiscsi_ep->phba; |
554 | 620 | ||
555 | if (MGMT_STATUS_SUCCESS != | 621 | tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag); |
556 | mgmt_upload_connection(phba, beiscsi_ep->ep_cid, | 622 | if (!tag) { |
557 | CONNECTION_UPLOAD_GRACEFUL)) { | ||
558 | SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x", | 623 | SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x", |
559 | beiscsi_ep->ep_cid); | 624 | beiscsi_ep->ep_cid); |
560 | ret = -1; | 625 | ret = -1; |
626 | } else { | ||
627 | wait_event_interruptible(phba->ctrl.mcc_wait[tag], | ||
628 | phba->ctrl.mcc_numtag[tag]); | ||
629 | free_mcc_tag(&phba->ctrl, tag); | ||
561 | } | 630 | } |
562 | |||
563 | return ret; | 631 | return ret; |
564 | } | 632 | } |
565 | 633 | ||
@@ -574,19 +642,17 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) | |||
574 | struct beiscsi_conn *beiscsi_conn; | 642 | struct beiscsi_conn *beiscsi_conn; |
575 | struct beiscsi_endpoint *beiscsi_ep; | 643 | struct beiscsi_endpoint *beiscsi_ep; |
576 | struct beiscsi_hba *phba; | 644 | struct beiscsi_hba *phba; |
577 | int flag = 0; | ||
578 | 645 | ||
579 | beiscsi_ep = ep->dd_data; | 646 | beiscsi_ep = ep->dd_data; |
580 | phba = beiscsi_ep->phba; | 647 | phba = beiscsi_ep->phba; |
581 | SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect\n"); | 648 | SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n", |
649 | beiscsi_ep->ep_cid); | ||
582 | 650 | ||
583 | if (beiscsi_ep->conn) { | 651 | if (beiscsi_ep->conn) { |
584 | beiscsi_conn = beiscsi_ep->conn; | 652 | beiscsi_conn = beiscsi_ep->conn; |
585 | iscsi_suspend_queue(beiscsi_conn->conn); | 653 | iscsi_suspend_queue(beiscsi_conn->conn); |
586 | beiscsi_close_conn(ep, flag); | ||
587 | } | 654 | } |
588 | 655 | ||
589 | beiscsi_free_ep(ep); | ||
590 | } | 656 | } |
591 | 657 | ||
592 | /** | 658 | /** |
@@ -619,23 +685,31 @@ void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | |||
619 | struct iscsi_session *session = conn->session; | 685 | struct iscsi_session *session = conn->session; |
620 | struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); | 686 | struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); |
621 | struct beiscsi_hba *phba = iscsi_host_priv(shost); | 687 | struct beiscsi_hba *phba = iscsi_host_priv(shost); |
622 | unsigned int status; | 688 | unsigned int tag; |
623 | unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH; | 689 | unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH; |
624 | 690 | ||
625 | SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop\n"); | ||
626 | beiscsi_ep = beiscsi_conn->ep; | 691 | beiscsi_ep = beiscsi_conn->ep; |
627 | if (!beiscsi_ep) { | 692 | if (!beiscsi_ep) { |
628 | SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n"); | 693 | SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n"); |
629 | return; | 694 | return; |
630 | } | 695 | } |
631 | status = mgmt_invalidate_connection(phba, beiscsi_ep, | 696 | SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop ep_cid = %d\n", |
697 | beiscsi_ep->ep_cid); | ||
698 | tag = mgmt_invalidate_connection(phba, beiscsi_ep, | ||
632 | beiscsi_ep->ep_cid, 1, | 699 | beiscsi_ep->ep_cid, 1, |
633 | savecfg_flag); | 700 | savecfg_flag); |
634 | if (status != MGMT_STATUS_SUCCESS) { | 701 | if (!tag) { |
635 | SE_DEBUG(DBG_LVL_1, | 702 | SE_DEBUG(DBG_LVL_1, |
636 | "mgmt_invalidate_connection Failed for cid=%d \n", | 703 | "mgmt_invalidate_connection Failed for cid=%d \n", |
637 | beiscsi_ep->ep_cid); | 704 | beiscsi_ep->ep_cid); |
705 | } else { | ||
706 | wait_event_interruptible(phba->ctrl.mcc_wait[tag], | ||
707 | phba->ctrl.mcc_numtag[tag]); | ||
708 | free_mcc_tag(&phba->ctrl, tag); | ||
638 | } | 709 | } |
710 | beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL); | ||
711 | beiscsi_free_ep(beiscsi_ep); | ||
712 | iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); | ||
639 | beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); | 713 | beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); |
640 | iscsi_conn_stop(cls_conn, flag); | 714 | iscsi_conn_stop(cls_conn, flag); |
641 | } | 715 | } |
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index f92ffc5349fb..1f512c28cbf9 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /** | 1 | /** |
2 | * Copyright (C) 2005 - 2009 ServerEngines | 2 | * Copyright (C) 2005 - 2010 ServerEngines |
3 | * All rights reserved. | 3 | * All rights reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 1a557fa77888..7c22616ab141 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /** | 1 | /** |
2 | * Copyright (C) 2005 - 2009 ServerEngines | 2 | * Copyright (C) 2005 - 2010 ServerEngines |
3 | * All rights reserved. | 3 | * All rights reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
@@ -40,7 +40,6 @@ | |||
40 | static unsigned int be_iopoll_budget = 10; | 40 | static unsigned int be_iopoll_budget = 10; |
41 | static unsigned int be_max_phys_size = 64; | 41 | static unsigned int be_max_phys_size = 64; |
42 | static unsigned int enable_msix = 1; | 42 | static unsigned int enable_msix = 1; |
43 | static unsigned int ring_mode; | ||
44 | 43 | ||
45 | MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); | 44 | MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); |
46 | MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); | 45 | MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); |
@@ -62,10 +61,10 @@ static int beiscsi_slave_configure(struct scsi_device *sdev) | |||
62 | /*------------------- PCI Driver operations and data ----------------- */ | 61 | /*------------------- PCI Driver operations and data ----------------- */ |
63 | static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { | 62 | static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { |
64 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, | 63 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, |
64 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, | ||
65 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, | 65 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, |
66 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, | 66 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, |
67 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, | 67 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, |
68 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) }, | ||
69 | { 0 } | 68 | { 0 } |
70 | }; | 69 | }; |
71 | MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); | 70 | MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); |
@@ -112,6 +111,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) | |||
112 | memset(phba, 0, sizeof(*phba)); | 111 | memset(phba, 0, sizeof(*phba)); |
113 | phba->shost = shost; | 112 | phba->shost = shost; |
114 | phba->pcidev = pci_dev_get(pcidev); | 113 | phba->pcidev = pci_dev_get(pcidev); |
114 | pci_set_drvdata(pcidev, phba); | ||
115 | 115 | ||
116 | if (iscsi_host_add(shost, &phba->pcidev->dev)) | 116 | if (iscsi_host_add(shost, &phba->pcidev->dev)) |
117 | goto free_devices; | 117 | goto free_devices; |
@@ -143,6 +143,7 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, | |||
143 | struct pci_dev *pcidev) | 143 | struct pci_dev *pcidev) |
144 | { | 144 | { |
145 | u8 __iomem *addr; | 145 | u8 __iomem *addr; |
146 | int pcicfg_reg; | ||
146 | 147 | ||
147 | addr = ioremap_nocache(pci_resource_start(pcidev, 2), | 148 | addr = ioremap_nocache(pci_resource_start(pcidev, 2), |
148 | pci_resource_len(pcidev, 2)); | 149 | pci_resource_len(pcidev, 2)); |
@@ -159,13 +160,19 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, | |||
159 | phba->db_va = addr; | 160 | phba->db_va = addr; |
160 | phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4); | 161 | phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4); |
161 | 162 | ||
162 | addr = ioremap_nocache(pci_resource_start(pcidev, 1), | 163 | if (phba->generation == BE_GEN2) |
163 | pci_resource_len(pcidev, 1)); | 164 | pcicfg_reg = 1; |
165 | else | ||
166 | pcicfg_reg = 0; | ||
167 | |||
168 | addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), | ||
169 | pci_resource_len(pcidev, pcicfg_reg)); | ||
170 | |||
164 | if (addr == NULL) | 171 | if (addr == NULL) |
165 | goto pci_map_err; | 172 | goto pci_map_err; |
166 | phba->ctrl.pcicfg = addr; | 173 | phba->ctrl.pcicfg = addr; |
167 | phba->pci_va = addr; | 174 | phba->pci_va = addr; |
168 | phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1); | 175 | phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg); |
169 | return 0; | 176 | return 0; |
170 | 177 | ||
171 | pci_map_err: | 178 | pci_map_err: |
@@ -230,29 +237,27 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) | |||
230 | 237 | ||
231 | static void beiscsi_get_params(struct beiscsi_hba *phba) | 238 | static void beiscsi_get_params(struct beiscsi_hba *phba) |
232 | { | 239 | { |
233 | phba->params.ios_per_ctrl = BE2_IO_DEPTH; | 240 | phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count |
234 | phba->params.cxns_per_ctrl = BE2_MAX_SESSIONS; | 241 | - (phba->fw_config.iscsi_cid_count |
235 | phba->params.asyncpdus_per_ctrl = BE2_ASYNCPDUS; | 242 | + BE2_TMFS |
236 | phba->params.icds_per_ctrl = BE2_MAX_ICDS / 2; | 243 | + BE2_NOPOUT_REQ)); |
244 | phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; | ||
245 | phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;; | ||
246 | phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;; | ||
237 | phba->params.num_sge_per_io = BE2_SGE; | 247 | phba->params.num_sge_per_io = BE2_SGE; |
238 | phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; | 248 | phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; |
239 | phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; | 249 | phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; |
240 | phba->params.eq_timer = 64; | 250 | phba->params.eq_timer = 64; |
241 | phba->params.num_eq_entries = | 251 | phba->params.num_eq_entries = |
242 | (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) / | 252 | (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 |
243 | 512) + 1) * 512; | 253 | + BE2_TMFS) / 512) + 1) * 512; |
244 | phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) | 254 | phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) |
245 | ? 1024 : phba->params.num_eq_entries; | 255 | ? 1024 : phba->params.num_eq_entries; |
246 | SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n", | 256 | SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n", |
247 | phba->params.num_eq_entries); | 257 | phba->params.num_eq_entries); |
248 | phba->params.num_cq_entries = | 258 | phba->params.num_cq_entries = |
249 | (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) / | 259 | (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 |
250 | 512) + 1) * 512; | 260 | + BE2_TMFS) / 512) + 1) * 512; |
251 | SE_DEBUG(DBG_LVL_8, | ||
252 | "phba->params.num_cq_entries=%d BE2_CMDS_PER_CXN=%d" | ||
253 | "BE2_LOGOUTS=%d BE2_TMFS=%d BE2_ASYNCPDUS=%d \n", | ||
254 | phba->params.num_cq_entries, BE2_CMDS_PER_CXN, | ||
255 | BE2_LOGOUTS, BE2_TMFS, BE2_ASYNCPDUS); | ||
256 | phba->params.wrbs_per_cxn = 256; | 261 | phba->params.wrbs_per_cxn = 256; |
257 | } | 262 | } |
258 | 263 | ||
@@ -443,7 +448,7 @@ static irqreturn_t be_isr(int irq, void *dev_id) | |||
443 | if (phba->todo_mcc_cq) | 448 | if (phba->todo_mcc_cq) |
444 | queue_work(phba->wq, &phba->work_cqs); | 449 | queue_work(phba->wq, &phba->work_cqs); |
445 | 450 | ||
446 | if ((num_mcceq_processed) && (!num_ioeq_processed)) | 451 | if ((num_mcceq_processed) && (!num_ioeq_processed)) |
447 | hwi_ring_eq_db(phba, eq->id, 0, | 452 | hwi_ring_eq_db(phba, eq->id, 0, |
448 | (num_ioeq_processed + | 453 | (num_ioeq_processed + |
449 | num_mcceq_processed) , 1, 1); | 454 | num_mcceq_processed) , 1, 1); |
@@ -561,6 +566,7 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, | |||
561 | SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n"); | 566 | SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n"); |
562 | break; | 567 | break; |
563 | case ISCSI_OP_LOGIN_RSP: | 568 | case ISCSI_OP_LOGIN_RSP: |
569 | case ISCSI_OP_TEXT_RSP: | ||
564 | task = conn->login_task; | 570 | task = conn->login_task; |
565 | io_task = task->dd_data; | 571 | io_task = task->dd_data; |
566 | login_hdr = (struct iscsi_hdr *)ppdu; | 572 | login_hdr = (struct iscsi_hdr *)ppdu; |
@@ -631,29 +637,29 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) | |||
631 | * alloc_wrb_handle - To allocate a wrb handle | 637 | * alloc_wrb_handle - To allocate a wrb handle |
632 | * @phba: The hba pointer | 638 | * @phba: The hba pointer |
633 | * @cid: The cid to use for allocation | 639 | * @cid: The cid to use for allocation |
634 | * @index: index allocation and wrb index | ||
635 | * | 640 | * |
636 | * This happens under session_lock until submission to chip | 641 | * This happens under session_lock until submission to chip |
637 | */ | 642 | */ |
638 | struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, | 643 | struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid) |
639 | int index) | ||
640 | { | 644 | { |
641 | struct hwi_wrb_context *pwrb_context; | 645 | struct hwi_wrb_context *pwrb_context; |
642 | struct hwi_controller *phwi_ctrlr; | 646 | struct hwi_controller *phwi_ctrlr; |
643 | struct wrb_handle *pwrb_handle; | 647 | struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; |
644 | 648 | ||
645 | phwi_ctrlr = phba->phwi_ctrlr; | 649 | phwi_ctrlr = phba->phwi_ctrlr; |
646 | pwrb_context = &phwi_ctrlr->wrb_context[cid]; | 650 | pwrb_context = &phwi_ctrlr->wrb_context[cid]; |
647 | if (pwrb_context->wrb_handles_available) { | 651 | if (pwrb_context->wrb_handles_available >= 2) { |
648 | pwrb_handle = pwrb_context->pwrb_handle_base[ | 652 | pwrb_handle = pwrb_context->pwrb_handle_base[ |
649 | pwrb_context->alloc_index]; | 653 | pwrb_context->alloc_index]; |
650 | pwrb_context->wrb_handles_available--; | 654 | pwrb_context->wrb_handles_available--; |
651 | pwrb_handle->nxt_wrb_index = pwrb_handle->wrb_index; | ||
652 | if (pwrb_context->alloc_index == | 655 | if (pwrb_context->alloc_index == |
653 | (phba->params.wrbs_per_cxn - 1)) | 656 | (phba->params.wrbs_per_cxn - 1)) |
654 | pwrb_context->alloc_index = 0; | 657 | pwrb_context->alloc_index = 0; |
655 | else | 658 | else |
656 | pwrb_context->alloc_index++; | 659 | pwrb_context->alloc_index++; |
660 | pwrb_handle_tmp = pwrb_context->pwrb_handle_base[ | ||
661 | pwrb_context->alloc_index]; | ||
662 | pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index; | ||
657 | } else | 663 | } else |
658 | pwrb_handle = NULL; | 664 | pwrb_handle = NULL; |
659 | return pwrb_handle; | 665 | return pwrb_handle; |
@@ -671,9 +677,7 @@ static void | |||
671 | free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, | 677 | free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, |
672 | struct wrb_handle *pwrb_handle) | 678 | struct wrb_handle *pwrb_handle) |
673 | { | 679 | { |
674 | if (!ring_mode) | 680 | pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; |
675 | pwrb_context->pwrb_handle_base[pwrb_context->free_index] = | ||
676 | pwrb_handle; | ||
677 | pwrb_context->wrb_handles_available++; | 681 | pwrb_context->wrb_handles_available++; |
678 | if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) | 682 | if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) |
679 | pwrb_context->free_index = 0; | 683 | pwrb_context->free_index = 0; |
@@ -790,6 +794,7 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, | |||
790 | memcpy(task->sc->sense_buffer, sense, | 794 | memcpy(task->sc->sense_buffer, sense, |
791 | min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); | 795 | min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); |
792 | } | 796 | } |
797 | |||
793 | if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) { | 798 | if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) { |
794 | if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] | 799 | if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] |
795 | & SOL_RES_CNT_MASK) | 800 | & SOL_RES_CNT_MASK) |
@@ -811,6 +816,7 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, | |||
811 | struct iscsi_conn *conn = beiscsi_conn->conn; | 816 | struct iscsi_conn *conn = beiscsi_conn->conn; |
812 | 817 | ||
813 | hdr = (struct iscsi_logout_rsp *)task->hdr; | 818 | hdr = (struct iscsi_logout_rsp *)task->hdr; |
819 | hdr->opcode = ISCSI_OP_LOGOUT_RSP; | ||
814 | hdr->t2wait = 5; | 820 | hdr->t2wait = 5; |
815 | hdr->t2retain = 0; | 821 | hdr->t2retain = 0; |
816 | hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] | 822 | hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] |
@@ -825,6 +831,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, | |||
825 | & SOL_EXP_CMD_SN_MASK) + | 831 | & SOL_EXP_CMD_SN_MASK) + |
826 | ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) | 832 | ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) |
827 | / 32] & SOL_CMD_WND_MASK) >> 24) - 1); | 833 | / 32] & SOL_CMD_WND_MASK) >> 24) - 1); |
834 | hdr->dlength[0] = 0; | ||
835 | hdr->dlength[1] = 0; | ||
836 | hdr->dlength[2] = 0; | ||
828 | hdr->hlength = 0; | 837 | hdr->hlength = 0; |
829 | hdr->itt = io_task->libiscsi_itt; | 838 | hdr->itt = io_task->libiscsi_itt; |
830 | __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); | 839 | __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); |
@@ -839,6 +848,7 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn, | |||
839 | struct beiscsi_io_task *io_task = task->dd_data; | 848 | struct beiscsi_io_task *io_task = task->dd_data; |
840 | 849 | ||
841 | hdr = (struct iscsi_tm_rsp *)task->hdr; | 850 | hdr = (struct iscsi_tm_rsp *)task->hdr; |
851 | hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; | ||
842 | hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] | 852 | hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] |
843 | & SOL_FLAGS_MASK) >> 24) | 0x80; | 853 | & SOL_FLAGS_MASK) >> 24) | 0x80; |
844 | hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / | 854 | hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / |
@@ -859,7 +869,6 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, | |||
859 | { | 869 | { |
860 | struct hwi_wrb_context *pwrb_context; | 870 | struct hwi_wrb_context *pwrb_context; |
861 | struct wrb_handle *pwrb_handle = NULL; | 871 | struct wrb_handle *pwrb_handle = NULL; |
862 | struct sgl_handle *psgl_handle = NULL; | ||
863 | struct hwi_controller *phwi_ctrlr; | 872 | struct hwi_controller *phwi_ctrlr; |
864 | struct iscsi_task *task; | 873 | struct iscsi_task *task; |
865 | struct beiscsi_io_task *io_task; | 874 | struct beiscsi_io_task *io_task; |
@@ -867,22 +876,14 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, | |||
867 | struct iscsi_session *session = conn->session; | 876 | struct iscsi_session *session = conn->session; |
868 | 877 | ||
869 | phwi_ctrlr = phba->phwi_ctrlr; | 878 | phwi_ctrlr = phba->phwi_ctrlr; |
870 | if (ring_mode) { | 879 | pwrb_context = &phwi_ctrlr->wrb_context[((psol-> |
871 | psgl_handle = phba->sgl_hndl_array[((psol-> | ||
872 | dw[offsetof(struct amap_sol_cqe_ring, icd_index) / | ||
873 | 32] & SOL_ICD_INDEX_MASK) >> 6)]; | ||
874 | pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid]; | ||
875 | task = psgl_handle->task; | ||
876 | pwrb_handle = NULL; | ||
877 | } else { | ||
878 | pwrb_context = &phwi_ctrlr->wrb_context[((psol-> | ||
879 | dw[offsetof(struct amap_sol_cqe, cid) / 32] & | 880 | dw[offsetof(struct amap_sol_cqe, cid) / 32] & |
880 | SOL_CID_MASK) >> 6)]; | 881 | SOL_CID_MASK) >> 6) - |
881 | pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> | 882 | phba->fw_config.iscsi_cid_start]; |
883 | pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> | ||
882 | dw[offsetof(struct amap_sol_cqe, wrb_index) / | 884 | dw[offsetof(struct amap_sol_cqe, wrb_index) / |
883 | 32] & SOL_WRB_INDEX_MASK) >> 16)]; | 885 | 32] & SOL_WRB_INDEX_MASK) >> 16)]; |
884 | task = pwrb_handle->pio_handle; | 886 | task = pwrb_handle->pio_handle; |
885 | } | ||
886 | 887 | ||
887 | io_task = task->dd_data; | 888 | io_task = task->dd_data; |
888 | spin_lock(&phba->mgmt_sgl_lock); | 889 | spin_lock(&phba->mgmt_sgl_lock); |
@@ -923,31 +924,23 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, | |||
923 | struct iscsi_wrb *pwrb = NULL; | 924 | struct iscsi_wrb *pwrb = NULL; |
924 | struct hwi_controller *phwi_ctrlr; | 925 | struct hwi_controller *phwi_ctrlr; |
925 | struct iscsi_task *task; | 926 | struct iscsi_task *task; |
926 | struct sgl_handle *psgl_handle = NULL; | ||
927 | unsigned int type; | 927 | unsigned int type; |
928 | struct iscsi_conn *conn = beiscsi_conn->conn; | 928 | struct iscsi_conn *conn = beiscsi_conn->conn; |
929 | struct iscsi_session *session = conn->session; | 929 | struct iscsi_session *session = conn->session; |
930 | 930 | ||
931 | phwi_ctrlr = phba->phwi_ctrlr; | 931 | phwi_ctrlr = phba->phwi_ctrlr; |
932 | if (ring_mode) { | 932 | pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof |
933 | psgl_handle = phba->sgl_hndl_array[((psol-> | ||
934 | dw[offsetof(struct amap_sol_cqe_ring, icd_index) / | ||
935 | 32] & SOL_ICD_INDEX_MASK) >> 6)]; | ||
936 | task = psgl_handle->task; | ||
937 | type = psgl_handle->type; | ||
938 | } else { | ||
939 | pwrb_context = &phwi_ctrlr-> | ||
940 | wrb_context[((psol->dw[offsetof | ||
941 | (struct amap_sol_cqe, cid) / 32] | 933 | (struct amap_sol_cqe, cid) / 32] |
942 | & SOL_CID_MASK) >> 6)]; | 934 | & SOL_CID_MASK) >> 6) - |
943 | pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> | 935 | phba->fw_config.iscsi_cid_start]; |
936 | pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> | ||
944 | dw[offsetof(struct amap_sol_cqe, wrb_index) / | 937 | dw[offsetof(struct amap_sol_cqe, wrb_index) / |
945 | 32] & SOL_WRB_INDEX_MASK) >> 16)]; | 938 | 32] & SOL_WRB_INDEX_MASK) >> 16)]; |
946 | task = pwrb_handle->pio_handle; | 939 | task = pwrb_handle->pio_handle; |
947 | pwrb = pwrb_handle->pwrb; | 940 | pwrb = pwrb_handle->pwrb; |
948 | type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & | 941 | type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & |
949 | WRB_TYPE_MASK) >> 28; | 942 | WRB_TYPE_MASK) >> 28; |
950 | } | 943 | |
951 | spin_lock_bh(&session->lock); | 944 | spin_lock_bh(&session->lock); |
952 | switch (type) { | 945 | switch (type) { |
953 | case HWH_TYPE_IO: | 946 | case HWH_TYPE_IO: |
@@ -978,15 +971,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, | |||
978 | break; | 971 | break; |
979 | 972 | ||
980 | default: | 973 | default: |
981 | if (ring_mode) | 974 | shost_printk(KERN_WARNING, phba->shost, |
982 | shost_printk(KERN_WARNING, phba->shost, | ||
983 | "In hwi_complete_cmd, unknown type = %d" | ||
984 | "icd_index 0x%x CID 0x%x\n", type, | ||
985 | ((psol->dw[offsetof(struct amap_sol_cqe_ring, | ||
986 | icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6), | ||
987 | psgl_handle->cid); | ||
988 | else | ||
989 | shost_printk(KERN_WARNING, phba->shost, | ||
990 | "In hwi_complete_cmd, unknown type = %d" | 975 | "In hwi_complete_cmd, unknown type = %d" |
991 | "wrb_index 0x%x CID 0x%x\n", type, | 976 | "wrb_index 0x%x CID 0x%x\n", type, |
992 | ((psol->dw[offsetof(struct amap_iscsi_wrb, | 977 | ((psol->dw[offsetof(struct amap_iscsi_wrb, |
@@ -1077,7 +1062,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba, | |||
1077 | 1062 | ||
1078 | WARN_ON(!pasync_handle); | 1063 | WARN_ON(!pasync_handle); |
1079 | 1064 | ||
1080 | pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid; | 1065 | pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid - |
1066 | phba->fw_config.iscsi_cid_start; | ||
1081 | pasync_handle->is_header = is_header; | 1067 | pasync_handle->is_header = is_header; |
1082 | pasync_handle->buffer_len = ((pdpdu_cqe-> | 1068 | pasync_handle->buffer_len = ((pdpdu_cqe-> |
1083 | dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32] | 1069 | dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32] |
@@ -1327,9 +1313,10 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, | |||
1327 | } | 1313 | } |
1328 | 1314 | ||
1329 | status = beiscsi_process_async_pdu(beiscsi_conn, phba, | 1315 | status = beiscsi_process_async_pdu(beiscsi_conn, phba, |
1330 | beiscsi_conn->beiscsi_conn_cid, | 1316 | (beiscsi_conn->beiscsi_conn_cid - |
1331 | phdr, hdr_len, pfirst_buffer, | 1317 | phba->fw_config.iscsi_cid_start), |
1332 | buf_len); | 1318 | phdr, hdr_len, pfirst_buffer, |
1319 | buf_len); | ||
1333 | 1320 | ||
1334 | if (status == 0) | 1321 | if (status == 0) |
1335 | hwi_free_async_msg(phba, cri); | 1322 | hwi_free_async_msg(phba, cri); |
@@ -1422,6 +1409,48 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, | |||
1422 | hwi_post_async_buffers(phba, pasync_handle->is_header); | 1409 | hwi_post_async_buffers(phba, pasync_handle->is_header); |
1423 | } | 1410 | } |
1424 | 1411 | ||
1412 | static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) | ||
1413 | { | ||
1414 | struct be_queue_info *mcc_cq; | ||
1415 | struct be_mcc_compl *mcc_compl; | ||
1416 | unsigned int num_processed = 0; | ||
1417 | |||
1418 | mcc_cq = &phba->ctrl.mcc_obj.cq; | ||
1419 | mcc_compl = queue_tail_node(mcc_cq); | ||
1420 | mcc_compl->flags = le32_to_cpu(mcc_compl->flags); | ||
1421 | while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { | ||
1422 | |||
1423 | if (num_processed >= 32) { | ||
1424 | hwi_ring_cq_db(phba, mcc_cq->id, | ||
1425 | num_processed, 0, 0); | ||
1426 | num_processed = 0; | ||
1427 | } | ||
1428 | if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { | ||
1429 | /* Interpret flags as an async trailer */ | ||
1430 | if (is_link_state_evt(mcc_compl->flags)) | ||
1431 | /* Interpret compl as a async link evt */ | ||
1432 | beiscsi_async_link_state_process(phba, | ||
1433 | (struct be_async_event_link_state *) mcc_compl); | ||
1434 | else | ||
1435 | SE_DEBUG(DBG_LVL_1, | ||
1436 | " Unsupported Async Event, flags" | ||
1437 | " = 0x%08x \n", mcc_compl->flags); | ||
1438 | } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { | ||
1439 | be_mcc_compl_process_isr(&phba->ctrl, mcc_compl); | ||
1440 | atomic_dec(&phba->ctrl.mcc_obj.q.used); | ||
1441 | } | ||
1442 | |||
1443 | mcc_compl->flags = 0; | ||
1444 | queue_tail_inc(mcc_cq); | ||
1445 | mcc_compl = queue_tail_node(mcc_cq); | ||
1446 | mcc_compl->flags = le32_to_cpu(mcc_compl->flags); | ||
1447 | num_processed++; | ||
1448 | } | ||
1449 | |||
1450 | if (num_processed > 0) | ||
1451 | hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0); | ||
1452 | |||
1453 | } | ||
1425 | 1454 | ||
1426 | static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) | 1455 | static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) |
1427 | { | 1456 | { |
@@ -1431,7 +1460,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) | |||
1431 | unsigned int num_processed = 0; | 1460 | unsigned int num_processed = 0; |
1432 | unsigned int tot_nump = 0; | 1461 | unsigned int tot_nump = 0; |
1433 | struct beiscsi_conn *beiscsi_conn; | 1462 | struct beiscsi_conn *beiscsi_conn; |
1434 | struct sgl_handle *psgl_handle = NULL; | 1463 | struct beiscsi_endpoint *beiscsi_ep; |
1464 | struct iscsi_endpoint *ep; | ||
1435 | struct beiscsi_hba *phba; | 1465 | struct beiscsi_hba *phba; |
1436 | 1466 | ||
1437 | cq = pbe_eq->cq; | 1467 | cq = pbe_eq->cq; |
@@ -1442,32 +1472,13 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) | |||
1442 | CQE_VALID_MASK) { | 1472 | CQE_VALID_MASK) { |
1443 | be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); | 1473 | be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); |
1444 | 1474 | ||
1445 | if (ring_mode) { | 1475 | ep = phba->ep_array[(u32) ((sol-> |
1446 | psgl_handle = phba->sgl_hndl_array[((sol-> | 1476 | dw[offsetof(struct amap_sol_cqe, cid) / 32] & |
1447 | dw[offsetof(struct amap_sol_cqe_ring, | 1477 | SOL_CID_MASK) >> 6) - |
1448 | icd_index) / 32] & SOL_ICD_INDEX_MASK) | 1478 | phba->fw_config.iscsi_cid_start]; |
1449 | >> 6)]; | ||
1450 | beiscsi_conn = phba->conn_table[psgl_handle->cid]; | ||
1451 | if (!beiscsi_conn || !beiscsi_conn->ep) { | ||
1452 | shost_printk(KERN_WARNING, phba->shost, | ||
1453 | "Connection table empty for cid = %d\n", | ||
1454 | psgl_handle->cid); | ||
1455 | return 0; | ||
1456 | } | ||
1457 | 1479 | ||
1458 | } else { | 1480 | beiscsi_ep = ep->dd_data; |
1459 | beiscsi_conn = phba->conn_table[(u32) (sol-> | 1481 | beiscsi_conn = beiscsi_ep->conn; |
1460 | dw[offsetof(struct amap_sol_cqe, cid) / 32] & | ||
1461 | SOL_CID_MASK) >> 6]; | ||
1462 | |||
1463 | if (!beiscsi_conn || !beiscsi_conn->ep) { | ||
1464 | shost_printk(KERN_WARNING, phba->shost, | ||
1465 | "Connection table empty for cid = %d\n", | ||
1466 | (u32)(sol->dw[offsetof(struct amap_sol_cqe, | ||
1467 | cid) / 32] & SOL_CID_MASK) >> 6); | ||
1468 | return 0; | ||
1469 | } | ||
1470 | } | ||
1471 | 1482 | ||
1472 | if (num_processed >= 32) { | 1483 | if (num_processed >= 32) { |
1473 | hwi_ring_cq_db(phba, cq->id, | 1484 | hwi_ring_cq_db(phba, cq->id, |
@@ -1511,21 +1522,13 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) | |||
1511 | case CMD_CXN_KILLED_ITT_INVALID: | 1522 | case CMD_CXN_KILLED_ITT_INVALID: |
1512 | case CMD_CXN_KILLED_SEQ_OUTOFORDER: | 1523 | case CMD_CXN_KILLED_SEQ_OUTOFORDER: |
1513 | case CMD_CXN_KILLED_INVALID_DATASN_RCVD: | 1524 | case CMD_CXN_KILLED_INVALID_DATASN_RCVD: |
1514 | if (ring_mode) { | 1525 | SE_DEBUG(DBG_LVL_1, |
1515 | SE_DEBUG(DBG_LVL_1, | ||
1516 | "CQ Error notification for cmd.. " | ||
1517 | "code %d cid 0x%x\n", | ||
1518 | sol->dw[offsetof(struct amap_sol_cqe, code) / | ||
1519 | 32] & CQE_CODE_MASK, psgl_handle->cid); | ||
1520 | } else { | ||
1521 | SE_DEBUG(DBG_LVL_1, | ||
1522 | "CQ Error notification for cmd.. " | 1526 | "CQ Error notification for cmd.. " |
1523 | "code %d cid 0x%x\n", | 1527 | "code %d cid 0x%x\n", |
1524 | sol->dw[offsetof(struct amap_sol_cqe, code) / | 1528 | sol->dw[offsetof(struct amap_sol_cqe, code) / |
1525 | 32] & CQE_CODE_MASK, | 1529 | 32] & CQE_CODE_MASK, |
1526 | (sol->dw[offsetof(struct amap_sol_cqe, cid) / | 1530 | (sol->dw[offsetof(struct amap_sol_cqe, cid) / |
1527 | 32] & SOL_CID_MASK)); | 1531 | 32] & SOL_CID_MASK)); |
1528 | } | ||
1529 | break; | 1532 | break; |
1530 | case UNSOL_DATA_DIGEST_ERROR_NOTIFY: | 1533 | case UNSOL_DATA_DIGEST_ERROR_NOTIFY: |
1531 | SE_DEBUG(DBG_LVL_1, | 1534 | SE_DEBUG(DBG_LVL_1, |
@@ -1547,37 +1550,23 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) | |||
1547 | case CXN_KILLED_OVER_RUN_RESIDUAL: | 1550 | case CXN_KILLED_OVER_RUN_RESIDUAL: |
1548 | case CXN_KILLED_UNDER_RUN_RESIDUAL: | 1551 | case CXN_KILLED_UNDER_RUN_RESIDUAL: |
1549 | case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: | 1552 | case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: |
1550 | if (ring_mode) { | 1553 | SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID " |
1551 | SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID " | ||
1552 | "0x%x...\n", | ||
1553 | sol->dw[offsetof(struct amap_sol_cqe, code) / | ||
1554 | 32] & CQE_CODE_MASK, psgl_handle->cid); | ||
1555 | } else { | ||
1556 | SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID " | ||
1557 | "0x%x...\n", | 1554 | "0x%x...\n", |
1558 | sol->dw[offsetof(struct amap_sol_cqe, code) / | 1555 | sol->dw[offsetof(struct amap_sol_cqe, code) / |
1559 | 32] & CQE_CODE_MASK, | 1556 | 32] & CQE_CODE_MASK, |
1560 | sol->dw[offsetof(struct amap_sol_cqe, cid) / | 1557 | (sol->dw[offsetof(struct amap_sol_cqe, cid) / |
1561 | 32] & CQE_CID_MASK); | 1558 | 32] & CQE_CID_MASK)); |
1562 | } | ||
1563 | iscsi_conn_failure(beiscsi_conn->conn, | 1559 | iscsi_conn_failure(beiscsi_conn->conn, |
1564 | ISCSI_ERR_CONN_FAILED); | 1560 | ISCSI_ERR_CONN_FAILED); |
1565 | break; | 1561 | break; |
1566 | case CXN_KILLED_RST_SENT: | 1562 | case CXN_KILLED_RST_SENT: |
1567 | case CXN_KILLED_RST_RCVD: | 1563 | case CXN_KILLED_RST_RCVD: |
1568 | if (ring_mode) { | 1564 | SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset" |
1569 | SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset" | ||
1570 | "received/sent on CID 0x%x...\n", | ||
1571 | sol->dw[offsetof(struct amap_sol_cqe, code) / | ||
1572 | 32] & CQE_CODE_MASK, psgl_handle->cid); | ||
1573 | } else { | ||
1574 | SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset" | ||
1575 | "received/sent on CID 0x%x...\n", | 1565 | "received/sent on CID 0x%x...\n", |
1576 | sol->dw[offsetof(struct amap_sol_cqe, code) / | 1566 | sol->dw[offsetof(struct amap_sol_cqe, code) / |
1577 | 32] & CQE_CODE_MASK, | 1567 | 32] & CQE_CODE_MASK, |
1578 | sol->dw[offsetof(struct amap_sol_cqe, cid) / | 1568 | (sol->dw[offsetof(struct amap_sol_cqe, cid) / |
1579 | 32] & CQE_CID_MASK); | 1569 | 32] & CQE_CID_MASK)); |
1580 | } | ||
1581 | iscsi_conn_failure(beiscsi_conn->conn, | 1570 | iscsi_conn_failure(beiscsi_conn->conn, |
1582 | ISCSI_ERR_CONN_FAILED); | 1571 | ISCSI_ERR_CONN_FAILED); |
1583 | break; | 1572 | break; |
@@ -1586,8 +1575,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) | |||
1586 | "received on CID 0x%x...\n", | 1575 | "received on CID 0x%x...\n", |
1587 | sol->dw[offsetof(struct amap_sol_cqe, code) / | 1576 | sol->dw[offsetof(struct amap_sol_cqe, code) / |
1588 | 32] & CQE_CODE_MASK, | 1577 | 32] & CQE_CODE_MASK, |
1589 | sol->dw[offsetof(struct amap_sol_cqe, cid) / | 1578 | (sol->dw[offsetof(struct amap_sol_cqe, cid) / |
1590 | 32] & CQE_CID_MASK); | 1579 | 32] & CQE_CID_MASK)); |
1591 | break; | 1580 | break; |
1592 | } | 1581 | } |
1593 | 1582 | ||
@@ -1604,7 +1593,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) | |||
1604 | return tot_nump; | 1593 | return tot_nump; |
1605 | } | 1594 | } |
1606 | 1595 | ||
1607 | static void beiscsi_process_all_cqs(struct work_struct *work) | 1596 | void beiscsi_process_all_cqs(struct work_struct *work) |
1608 | { | 1597 | { |
1609 | unsigned long flags; | 1598 | unsigned long flags; |
1610 | struct hwi_controller *phwi_ctrlr; | 1599 | struct hwi_controller *phwi_ctrlr; |
@@ -1624,6 +1613,7 @@ static void beiscsi_process_all_cqs(struct work_struct *work) | |||
1624 | spin_lock_irqsave(&phba->isr_lock, flags); | 1613 | spin_lock_irqsave(&phba->isr_lock, flags); |
1625 | phba->todo_mcc_cq = 0; | 1614 | phba->todo_mcc_cq = 0; |
1626 | spin_unlock_irqrestore(&phba->isr_lock, flags); | 1615 | spin_unlock_irqrestore(&phba->isr_lock, flags); |
1616 | beiscsi_process_mcc_isr(phba); | ||
1627 | } | 1617 | } |
1628 | 1618 | ||
1629 | if (phba->todo_cq) { | 1619 | if (phba->todo_cq) { |
@@ -1668,7 +1658,8 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, | |||
1668 | io_task->bhs_pa.u.a32.address_hi); | 1658 | io_task->bhs_pa.u.a32.address_hi); |
1669 | 1659 | ||
1670 | l_sg = sg; | 1660 | l_sg = sg; |
1671 | for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) { | 1661 | for (index = 0; (index < num_sg) && (index < 2); index++, |
1662 | sg = sg_next(sg)) { | ||
1672 | if (index == 0) { | 1663 | if (index == 0) { |
1673 | sg_len = sg_dma_len(sg); | 1664 | sg_len = sg_dma_len(sg); |
1674 | addr = (u64) sg_dma_address(sg); | 1665 | addr = (u64) sg_dma_address(sg); |
@@ -1679,11 +1670,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, | |||
1679 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, | 1670 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, |
1680 | sg_len); | 1671 | sg_len); |
1681 | sge_len = sg_len; | 1672 | sge_len = sg_len; |
1682 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, | ||
1683 | 1); | ||
1684 | } else { | 1673 | } else { |
1685 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, | ||
1686 | 0); | ||
1687 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, | 1674 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, |
1688 | pwrb, sge_len); | 1675 | pwrb, sge_len); |
1689 | sg_len = sg_dma_len(sg); | 1676 | sg_len = sg_dma_len(sg); |
@@ -1706,13 +1693,27 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, | |||
1706 | AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, | 1693 | AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, |
1707 | io_task->bhs_pa.u.a32.address_lo); | 1694 | io_task->bhs_pa.u.a32.address_lo); |
1708 | 1695 | ||
1709 | if (num_sg == 2) | 1696 | if (num_sg == 1) { |
1710 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1); | 1697 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, |
1698 | 1); | ||
1699 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, | ||
1700 | 0); | ||
1701 | } else if (num_sg == 2) { | ||
1702 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, | ||
1703 | 0); | ||
1704 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, | ||
1705 | 1); | ||
1706 | } else { | ||
1707 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, | ||
1708 | 0); | ||
1709 | AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, | ||
1710 | 0); | ||
1711 | } | ||
1711 | sg = l_sg; | 1712 | sg = l_sg; |
1712 | psgl++; | 1713 | psgl++; |
1713 | psgl++; | 1714 | psgl++; |
1714 | offset = 0; | 1715 | offset = 0; |
1715 | for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) { | 1716 | for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { |
1716 | sg_len = sg_dma_len(sg); | 1717 | sg_len = sg_dma_len(sg); |
1717 | addr = (u64) sg_dma_address(sg); | 1718 | addr = (u64) sg_dma_address(sg); |
1718 | AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, | 1719 | AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, |
@@ -2048,10 +2049,9 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba) | |||
2048 | } | 2049 | } |
2049 | idx = 0; | 2050 | idx = 0; |
2050 | pwrb = mem_descr_wrb->mem_array[idx].virtual_address; | 2051 | pwrb = mem_descr_wrb->mem_array[idx].virtual_address; |
2051 | num_cxn_wrb = | 2052 | num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / |
2052 | ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) * | 2053 | ((sizeof(struct iscsi_wrb) * |
2053 | phba->params.wrbs_per_cxn); | 2054 | phba->params.wrbs_per_cxn)); |
2054 | |||
2055 | for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) { | 2055 | for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) { |
2056 | pwrb_context = &phwi_ctrlr->wrb_context[index]; | 2056 | pwrb_context = &phwi_ctrlr->wrb_context[index]; |
2057 | if (num_cxn_wrb) { | 2057 | if (num_cxn_wrb) { |
@@ -2064,9 +2064,9 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba) | |||
2064 | } else { | 2064 | } else { |
2065 | idx++; | 2065 | idx++; |
2066 | pwrb = mem_descr_wrb->mem_array[idx].virtual_address; | 2066 | pwrb = mem_descr_wrb->mem_array[idx].virtual_address; |
2067 | num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) / | 2067 | num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / |
2068 | (sizeof(struct iscsi_wrb)) * | 2068 | ((sizeof(struct iscsi_wrb) * |
2069 | phba->params.wrbs_per_cxn); | 2069 | phba->params.wrbs_per_cxn)); |
2070 | for (j = 0; j < phba->params.wrbs_per_cxn; j++) { | 2070 | for (j = 0; j < phba->params.wrbs_per_cxn; j++) { |
2071 | pwrb_handle = pwrb_context->pwrb_handle_base[j]; | 2071 | pwrb_handle = pwrb_context->pwrb_handle_base[j]; |
2072 | pwrb_handle->pwrb = pwrb; | 2072 | pwrb_handle->pwrb = pwrb; |
@@ -2383,7 +2383,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba, | |||
2383 | &paddr); | 2383 | &paddr); |
2384 | if (!cq_vaddress) | 2384 | if (!cq_vaddress) |
2385 | goto create_cq_error; | 2385 | goto create_cq_error; |
2386 | ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2, | 2386 | ret = be_fill_queue(cq, phba->params.num_cq_entries, |
2387 | sizeof(struct sol_cqe), cq_vaddress); | 2387 | sizeof(struct sol_cqe), cq_vaddress); |
2388 | if (ret) { | 2388 | if (ret) { |
2389 | shost_printk(KERN_ERR, phba->shost, | 2389 | shost_printk(KERN_ERR, phba->shost, |
@@ -2634,7 +2634,8 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, | |||
2634 | "wrbq create failed."); | 2634 | "wrbq create failed."); |
2635 | return status; | 2635 | return status; |
2636 | } | 2636 | } |
2637 | phwi_ctrlr->wrb_context[i].cid = phwi_context->be_wrbq[i].id; | 2637 | phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i]. |
2638 | id; | ||
2638 | } | 2639 | } |
2639 | kfree(pwrb_arr); | 2640 | kfree(pwrb_arr); |
2640 | return 0; | 2641 | return 0; |
@@ -2803,17 +2804,6 @@ static int hwi_init_port(struct beiscsi_hba *phba) | |||
2803 | goto error; | 2804 | goto error; |
2804 | } | 2805 | } |
2805 | 2806 | ||
2806 | if (phba->fw_config.iscsi_features == 0x1) | ||
2807 | ring_mode = 1; | ||
2808 | else | ||
2809 | ring_mode = 0; | ||
2810 | status = mgmt_get_fw_config(ctrl, phba); | ||
2811 | if (status != 0) { | ||
2812 | shost_printk(KERN_ERR, phba->shost, | ||
2813 | "Error getting fw config\n"); | ||
2814 | goto error; | ||
2815 | } | ||
2816 | |||
2817 | status = beiscsi_create_cqs(phba, phwi_context); | 2807 | status = beiscsi_create_cqs(phba, phwi_context); |
2818 | if (status != 0) { | 2808 | if (status != 0) { |
2819 | shost_printk(KERN_ERR, phba->shost, "CQ not created\n"); | 2809 | shost_printk(KERN_ERR, phba->shost, "CQ not created\n"); |
@@ -2941,17 +2931,6 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) | |||
2941 | phba->io_sgl_hndl_avbl = 0; | 2931 | phba->io_sgl_hndl_avbl = 0; |
2942 | phba->eh_sgl_hndl_avbl = 0; | 2932 | phba->eh_sgl_hndl_avbl = 0; |
2943 | 2933 | ||
2944 | if (ring_mode) { | ||
2945 | phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) * | ||
2946 | phba->params.icds_per_ctrl, | ||
2947 | GFP_KERNEL); | ||
2948 | if (!phba->sgl_hndl_array) { | ||
2949 | shost_printk(KERN_ERR, phba->shost, | ||
2950 | "Mem Alloc Failed. Failing to load\n"); | ||
2951 | return -ENOMEM; | ||
2952 | } | ||
2953 | } | ||
2954 | |||
2955 | mem_descr_sglh = phba->init_mem; | 2934 | mem_descr_sglh = phba->init_mem; |
2956 | mem_descr_sglh += HWI_MEM_SGLH; | 2935 | mem_descr_sglh += HWI_MEM_SGLH; |
2957 | if (1 == mem_descr_sglh->num_elements) { | 2936 | if (1 == mem_descr_sglh->num_elements) { |
@@ -2959,8 +2938,6 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) | |||
2959 | phba->params.ios_per_ctrl, | 2938 | phba->params.ios_per_ctrl, |
2960 | GFP_KERNEL); | 2939 | GFP_KERNEL); |
2961 | if (!phba->io_sgl_hndl_base) { | 2940 | if (!phba->io_sgl_hndl_base) { |
2962 | if (ring_mode) | ||
2963 | kfree(phba->sgl_hndl_array); | ||
2964 | shost_printk(KERN_ERR, phba->shost, | 2941 | shost_printk(KERN_ERR, phba->shost, |
2965 | "Mem Alloc Failed. Failing to load\n"); | 2942 | "Mem Alloc Failed. Failing to load\n"); |
2966 | return -ENOMEM; | 2943 | return -ENOMEM; |
@@ -3032,7 +3009,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) | |||
3032 | AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); | 3009 | AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); |
3033 | pfrag += phba->params.num_sge_per_io; | 3010 | pfrag += phba->params.num_sge_per_io; |
3034 | psgl_handle->sgl_index = | 3011 | psgl_handle->sgl_index = |
3035 | phba->fw_config.iscsi_cid_start + arr_index++; | 3012 | phba->fw_config.iscsi_icd_start + arr_index++; |
3036 | } | 3013 | } |
3037 | idx++; | 3014 | idx++; |
3038 | } | 3015 | } |
@@ -3047,7 +3024,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) | |||
3047 | { | 3024 | { |
3048 | int i, new_cid; | 3025 | int i, new_cid; |
3049 | 3026 | ||
3050 | phba->cid_array = kmalloc(sizeof(void *) * phba->params.cxns_per_ctrl, | 3027 | phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, |
3051 | GFP_KERNEL); | 3028 | GFP_KERNEL); |
3052 | if (!phba->cid_array) { | 3029 | if (!phba->cid_array) { |
3053 | shost_printk(KERN_ERR, phba->shost, | 3030 | shost_printk(KERN_ERR, phba->shost, |
@@ -3055,7 +3032,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) | |||
3055 | "hba_setup_cid_tbls\n"); | 3032 | "hba_setup_cid_tbls\n"); |
3056 | return -ENOMEM; | 3033 | return -ENOMEM; |
3057 | } | 3034 | } |
3058 | phba->ep_array = kmalloc(sizeof(struct iscsi_endpoint *) * | 3035 | phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * |
3059 | phba->params.cxns_per_ctrl * 2, GFP_KERNEL); | 3036 | phba->params.cxns_per_ctrl * 2, GFP_KERNEL); |
3060 | if (!phba->ep_array) { | 3037 | if (!phba->ep_array) { |
3061 | shost_printk(KERN_ERR, phba->shost, | 3038 | shost_printk(KERN_ERR, phba->shost, |
@@ -3064,7 +3041,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) | |||
3064 | kfree(phba->cid_array); | 3041 | kfree(phba->cid_array); |
3065 | return -ENOMEM; | 3042 | return -ENOMEM; |
3066 | } | 3043 | } |
3067 | new_cid = phba->fw_config.iscsi_icd_start; | 3044 | new_cid = phba->fw_config.iscsi_cid_start; |
3068 | for (i = 0; i < phba->params.cxns_per_ctrl; i++) { | 3045 | for (i = 0; i < phba->params.cxns_per_ctrl; i++) { |
3069 | phba->cid_array[i] = new_cid; | 3046 | phba->cid_array[i] = new_cid; |
3070 | new_cid += 2; | 3047 | new_cid += 2; |
@@ -3145,8 +3122,6 @@ static int beiscsi_init_port(struct beiscsi_hba *phba) | |||
3145 | if (hba_setup_cid_tbls(phba)) { | 3122 | if (hba_setup_cid_tbls(phba)) { |
3146 | shost_printk(KERN_ERR, phba->shost, | 3123 | shost_printk(KERN_ERR, phba->shost, |
3147 | "Failed in hba_setup_cid_tbls\n"); | 3124 | "Failed in hba_setup_cid_tbls\n"); |
3148 | if (ring_mode) | ||
3149 | kfree(phba->sgl_hndl_array); | ||
3150 | kfree(phba->io_sgl_hndl_base); | 3125 | kfree(phba->io_sgl_hndl_base); |
3151 | kfree(phba->eh_sgl_hndl_base); | 3126 | kfree(phba->eh_sgl_hndl_base); |
3152 | goto do_cleanup_ctrlr; | 3127 | goto do_cleanup_ctrlr; |
@@ -3166,6 +3141,7 @@ static void hwi_purge_eq(struct beiscsi_hba *phba) | |||
3166 | struct be_queue_info *eq; | 3141 | struct be_queue_info *eq; |
3167 | struct be_eq_entry *eqe = NULL; | 3142 | struct be_eq_entry *eqe = NULL; |
3168 | int i, eq_msix; | 3143 | int i, eq_msix; |
3144 | unsigned int num_processed; | ||
3169 | 3145 | ||
3170 | phwi_ctrlr = phba->phwi_ctrlr; | 3146 | phwi_ctrlr = phba->phwi_ctrlr; |
3171 | phwi_context = phwi_ctrlr->phwi_ctxt; | 3147 | phwi_context = phwi_ctrlr->phwi_ctxt; |
@@ -3177,13 +3153,17 @@ static void hwi_purge_eq(struct beiscsi_hba *phba) | |||
3177 | for (i = 0; i < (phba->num_cpus + eq_msix); i++) { | 3153 | for (i = 0; i < (phba->num_cpus + eq_msix); i++) { |
3178 | eq = &phwi_context->be_eq[i].q; | 3154 | eq = &phwi_context->be_eq[i].q; |
3179 | eqe = queue_tail_node(eq); | 3155 | eqe = queue_tail_node(eq); |
3180 | 3156 | num_processed = 0; | |
3181 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] | 3157 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] |
3182 | & EQE_VALID_MASK) { | 3158 | & EQE_VALID_MASK) { |
3183 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); | 3159 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); |
3184 | queue_tail_inc(eq); | 3160 | queue_tail_inc(eq); |
3185 | eqe = queue_tail_node(eq); | 3161 | eqe = queue_tail_node(eq); |
3162 | num_processed++; | ||
3186 | } | 3163 | } |
3164 | |||
3165 | if (num_processed) | ||
3166 | hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); | ||
3187 | } | 3167 | } |
3188 | } | 3168 | } |
3189 | 3169 | ||
@@ -3195,10 +3175,9 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba) | |||
3195 | if (mgmt_status) | 3175 | if (mgmt_status) |
3196 | shost_printk(KERN_WARNING, phba->shost, | 3176 | shost_printk(KERN_WARNING, phba->shost, |
3197 | "mgmt_epfw_cleanup FAILED \n"); | 3177 | "mgmt_epfw_cleanup FAILED \n"); |
3198 | hwi_cleanup(phba); | 3178 | |
3199 | hwi_purge_eq(phba); | 3179 | hwi_purge_eq(phba); |
3200 | if (ring_mode) | 3180 | hwi_cleanup(phba); |
3201 | kfree(phba->sgl_hndl_array); | ||
3202 | kfree(phba->io_sgl_hndl_base); | 3181 | kfree(phba->io_sgl_hndl_base); |
3203 | kfree(phba->eh_sgl_hndl_base); | 3182 | kfree(phba->eh_sgl_hndl_base); |
3204 | kfree(phba->cid_array); | 3183 | kfree(phba->cid_array); |
@@ -3219,7 +3198,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, | |||
3219 | * We can always use 0 here because it is reserved by libiscsi for | 3198 | * We can always use 0 here because it is reserved by libiscsi for |
3220 | * login/startup related tasks. | 3199 | * login/startup related tasks. |
3221 | */ | 3200 | */ |
3222 | pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 0); | 3201 | pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid - |
3202 | phba->fw_config.iscsi_cid_start)); | ||
3223 | pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb; | 3203 | pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb; |
3224 | memset(pwrb, 0, sizeof(*pwrb)); | 3204 | memset(pwrb, 0, sizeof(*pwrb)); |
3225 | AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, | 3205 | AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, |
@@ -3283,8 +3263,7 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, | |||
3283 | be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb)); | 3263 | be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb)); |
3284 | 3264 | ||
3285 | doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; | 3265 | doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; |
3286 | if (!ring_mode) | 3266 | doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) |
3287 | doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) | ||
3288 | << DB_DEF_PDU_WRB_INDEX_SHIFT; | 3267 | << DB_DEF_PDU_WRB_INDEX_SHIFT; |
3289 | doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; | 3268 | doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; |
3290 | 3269 | ||
@@ -3328,8 +3307,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) | |||
3328 | io_task->bhs_pa.u.a64.address = paddr; | 3307 | io_task->bhs_pa.u.a64.address = paddr; |
3329 | io_task->libiscsi_itt = (itt_t)task->itt; | 3308 | io_task->libiscsi_itt = (itt_t)task->itt; |
3330 | io_task->pwrb_handle = alloc_wrb_handle(phba, | 3309 | io_task->pwrb_handle = alloc_wrb_handle(phba, |
3331 | beiscsi_conn->beiscsi_conn_cid, | 3310 | beiscsi_conn->beiscsi_conn_cid - |
3332 | task->itt); | 3311 | phba->fw_config.iscsi_cid_start |
3312 | ); | ||
3333 | io_task->conn = beiscsi_conn; | 3313 | io_task->conn = beiscsi_conn; |
3334 | 3314 | ||
3335 | task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; | 3315 | task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; |
@@ -3343,7 +3323,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) | |||
3343 | goto free_hndls; | 3323 | goto free_hndls; |
3344 | } else { | 3324 | } else { |
3345 | io_task->scsi_cmnd = NULL; | 3325 | io_task->scsi_cmnd = NULL; |
3346 | if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { | 3326 | if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { |
3347 | if (!beiscsi_conn->login_in_progress) { | 3327 | if (!beiscsi_conn->login_in_progress) { |
3348 | spin_lock(&phba->mgmt_sgl_lock); | 3328 | spin_lock(&phba->mgmt_sgl_lock); |
3349 | io_task->psgl_handle = (struct sgl_handle *) | 3329 | io_task->psgl_handle = (struct sgl_handle *) |
@@ -3370,21 +3350,16 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) | |||
3370 | itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> | 3350 | itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> |
3371 | wrb_index << 16) | (unsigned int) | 3351 | wrb_index << 16) | (unsigned int) |
3372 | (io_task->psgl_handle->sgl_index)); | 3352 | (io_task->psgl_handle->sgl_index)); |
3373 | if (ring_mode) { | 3353 | io_task->pwrb_handle->pio_handle = task; |
3374 | phba->sgl_hndl_array[io_task->psgl_handle->sgl_index - | ||
3375 | phba->fw_config.iscsi_cid_start] = | ||
3376 | io_task->psgl_handle; | ||
3377 | io_task->psgl_handle->task = task; | ||
3378 | io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid; | ||
3379 | } else | ||
3380 | io_task->pwrb_handle->pio_handle = task; | ||
3381 | 3354 | ||
3382 | io_task->cmd_bhs->iscsi_hdr.itt = itt; | 3355 | io_task->cmd_bhs->iscsi_hdr.itt = itt; |
3383 | return 0; | 3356 | return 0; |
3384 | 3357 | ||
3385 | free_hndls: | 3358 | free_hndls: |
3386 | phwi_ctrlr = phba->phwi_ctrlr; | 3359 | phwi_ctrlr = phba->phwi_ctrlr; |
3387 | pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid]; | 3360 | pwrb_context = &phwi_ctrlr->wrb_context[ |
3361 | beiscsi_conn->beiscsi_conn_cid - | ||
3362 | phba->fw_config.iscsi_cid_start]; | ||
3388 | free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); | 3363 | free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); |
3389 | io_task->pwrb_handle = NULL; | 3364 | io_task->pwrb_handle = NULL; |
3390 | pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, | 3365 | pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, |
@@ -3404,7 +3379,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) | |||
3404 | struct hwi_controller *phwi_ctrlr; | 3379 | struct hwi_controller *phwi_ctrlr; |
3405 | 3380 | ||
3406 | phwi_ctrlr = phba->phwi_ctrlr; | 3381 | phwi_ctrlr = phba->phwi_ctrlr; |
3407 | pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid]; | 3382 | pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid |
3383 | - phba->fw_config.iscsi_cid_start]; | ||
3408 | if (io_task->pwrb_handle) { | 3384 | if (io_task->pwrb_handle) { |
3409 | free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); | 3385 | free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); |
3410 | io_task->pwrb_handle = NULL; | 3386 | io_task->pwrb_handle = NULL; |
@@ -3460,18 +3436,12 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, | |||
3460 | ISCSI_OPCODE_SCSI_DATA_OUT); | 3436 | ISCSI_OPCODE_SCSI_DATA_OUT); |
3461 | AMAP_SET_BITS(struct amap_pdu_data_out, final_bit, | 3437 | AMAP_SET_BITS(struct amap_pdu_data_out, final_bit, |
3462 | &io_task->cmd_bhs->iscsi_data_pdu, 1); | 3438 | &io_task->cmd_bhs->iscsi_data_pdu, 1); |
3463 | if (ring_mode) | 3439 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, |
3464 | io_task->psgl_handle->type = INI_WR_CMD; | 3440 | INI_WR_CMD); |
3465 | else | ||
3466 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | ||
3467 | INI_WR_CMD); | ||
3468 | AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); | 3441 | AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); |
3469 | } else { | 3442 | } else { |
3470 | if (ring_mode) | 3443 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, |
3471 | io_task->psgl_handle->type = INI_RD_CMD; | 3444 | INI_RD_CMD); |
3472 | else | ||
3473 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | ||
3474 | INI_RD_CMD); | ||
3475 | AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); | 3445 | AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); |
3476 | } | 3446 | } |
3477 | memcpy(&io_task->cmd_bhs->iscsi_data_pdu. | 3447 | memcpy(&io_task->cmd_bhs->iscsi_data_pdu. |
@@ -3496,8 +3466,7 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, | |||
3496 | be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); | 3466 | be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); |
3497 | 3467 | ||
3498 | doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; | 3468 | doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; |
3499 | if (!ring_mode) | 3469 | doorbell |= (io_task->pwrb_handle->wrb_index & |
3500 | doorbell |= (io_task->pwrb_handle->wrb_index & | ||
3501 | DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; | 3470 | DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; |
3502 | doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; | 3471 | doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; |
3503 | 3472 | ||
@@ -3519,49 +3488,46 @@ static int beiscsi_mtask(struct iscsi_task *task) | |||
3519 | unsigned int doorbell = 0; | 3488 | unsigned int doorbell = 0; |
3520 | unsigned int i, cid; | 3489 | unsigned int i, cid; |
3521 | struct iscsi_task *aborted_task; | 3490 | struct iscsi_task *aborted_task; |
3491 | unsigned int tag; | ||
3522 | 3492 | ||
3523 | cid = beiscsi_conn->beiscsi_conn_cid; | 3493 | cid = beiscsi_conn->beiscsi_conn_cid; |
3524 | pwrb = io_task->pwrb_handle->pwrb; | 3494 | pwrb = io_task->pwrb_handle->pwrb; |
3495 | memset(pwrb, 0, sizeof(*pwrb)); | ||
3525 | AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, | 3496 | AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, |
3526 | be32_to_cpu(task->cmdsn)); | 3497 | be32_to_cpu(task->cmdsn)); |
3527 | AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, | 3498 | AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, |
3528 | io_task->pwrb_handle->wrb_index); | 3499 | io_task->pwrb_handle->wrb_index); |
3529 | AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, | 3500 | AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, |
3530 | io_task->psgl_handle->sgl_index); | 3501 | io_task->psgl_handle->sgl_index); |
3531 | |||
3532 | switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { | 3502 | switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { |
3533 | case ISCSI_OP_LOGIN: | 3503 | case ISCSI_OP_LOGIN: |
3534 | if (ring_mode) | 3504 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, |
3535 | io_task->psgl_handle->type = TGT_DM_CMD; | 3505 | TGT_DM_CMD); |
3536 | else | ||
3537 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | ||
3538 | TGT_DM_CMD); | ||
3539 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); | 3506 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); |
3540 | AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); | 3507 | AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); |
3541 | hwi_write_buffer(pwrb, task); | 3508 | hwi_write_buffer(pwrb, task); |
3542 | break; | 3509 | break; |
3543 | case ISCSI_OP_NOOP_OUT: | 3510 | case ISCSI_OP_NOOP_OUT: |
3544 | if (ring_mode) | 3511 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, |
3545 | io_task->psgl_handle->type = INI_RD_CMD; | 3512 | INI_RD_CMD); |
3513 | if (task->hdr->ttt == ISCSI_RESERVED_TAG) | ||
3514 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); | ||
3546 | else | 3515 | else |
3547 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | 3516 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); |
3548 | INI_RD_CMD); | ||
3549 | hwi_write_buffer(pwrb, task); | 3517 | hwi_write_buffer(pwrb, task); |
3550 | break; | 3518 | break; |
3551 | case ISCSI_OP_TEXT: | 3519 | case ISCSI_OP_TEXT: |
3552 | if (ring_mode) | 3520 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, |
3553 | io_task->psgl_handle->type = INI_WR_CMD; | 3521 | TGT_DM_CMD); |
3554 | else | 3522 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); |
3555 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | ||
3556 | INI_WR_CMD); | ||
3557 | AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); | ||
3558 | hwi_write_buffer(pwrb, task); | 3523 | hwi_write_buffer(pwrb, task); |
3559 | break; | 3524 | break; |
3560 | case ISCSI_OP_SCSI_TMFUNC: | 3525 | case ISCSI_OP_SCSI_TMFUNC: |
3561 | session = conn->session; | 3526 | session = conn->session; |
3562 | i = ((struct iscsi_tm *)task->hdr)->rtt; | 3527 | i = ((struct iscsi_tm *)task->hdr)->rtt; |
3563 | phwi_ctrlr = phba->phwi_ctrlr; | 3528 | phwi_ctrlr = phba->phwi_ctrlr; |
3564 | pwrb_context = &phwi_ctrlr->wrb_context[cid]; | 3529 | pwrb_context = &phwi_ctrlr->wrb_context[cid - |
3530 | phba->fw_config.iscsi_cid_start]; | ||
3565 | pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i) | 3531 | pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i) |
3566 | >> 16]; | 3532 | >> 16]; |
3567 | aborted_task = pwrb_handle->pio_handle; | 3533 | aborted_task = pwrb_handle->pio_handle; |
@@ -3572,22 +3538,25 @@ static int beiscsi_mtask(struct iscsi_task *task) | |||
3572 | if (!aborted_io_task->scsi_cmnd) | 3538 | if (!aborted_io_task->scsi_cmnd) |
3573 | return 0; | 3539 | return 0; |
3574 | 3540 | ||
3575 | mgmt_invalidate_icds(phba, | 3541 | tag = mgmt_invalidate_icds(phba, |
3576 | aborted_io_task->psgl_handle->sgl_index, | 3542 | aborted_io_task->psgl_handle->sgl_index, |
3577 | cid); | 3543 | cid); |
3578 | if (ring_mode) | 3544 | if (!tag) { |
3579 | io_task->psgl_handle->type = INI_TMF_CMD; | 3545 | shost_printk(KERN_WARNING, phba->shost, |
3580 | else | 3546 | "mgmt_invalidate_icds could not be" |
3581 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | 3547 | " submitted\n"); |
3582 | INI_TMF_CMD); | 3548 | } else { |
3549 | wait_event_interruptible(phba->ctrl.mcc_wait[tag], | ||
3550 | phba->ctrl.mcc_numtag[tag]); | ||
3551 | free_mcc_tag(&phba->ctrl, tag); | ||
3552 | } | ||
3553 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | ||
3554 | INI_TMF_CMD); | ||
3583 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); | 3555 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); |
3584 | hwi_write_buffer(pwrb, task); | 3556 | hwi_write_buffer(pwrb, task); |
3585 | break; | 3557 | break; |
3586 | case ISCSI_OP_LOGOUT: | 3558 | case ISCSI_OP_LOGOUT: |
3587 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); | 3559 | AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); |
3588 | if (ring_mode) | ||
3589 | io_task->psgl_handle->type = HWH_TYPE_LOGOUT; | ||
3590 | else | ||
3591 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, | 3560 | AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, |
3592 | HWH_TYPE_LOGOUT); | 3561 | HWH_TYPE_LOGOUT); |
3593 | hwi_write_buffer(pwrb, task); | 3562 | hwi_write_buffer(pwrb, task); |
@@ -3600,14 +3569,13 @@ static int beiscsi_mtask(struct iscsi_task *task) | |||
3600 | } | 3569 | } |
3601 | 3570 | ||
3602 | AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, | 3571 | AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, |
3603 | be32_to_cpu(task->data_count)); | 3572 | task->data_count); |
3604 | AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, | 3573 | AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, |
3605 | io_task->pwrb_handle->nxt_wrb_index); | 3574 | io_task->pwrb_handle->nxt_wrb_index); |
3606 | be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); | 3575 | be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); |
3607 | 3576 | ||
3608 | doorbell |= cid & DB_WRB_POST_CID_MASK; | 3577 | doorbell |= cid & DB_WRB_POST_CID_MASK; |
3609 | if (!ring_mode) | 3578 | doorbell |= (io_task->pwrb_handle->wrb_index & |
3610 | doorbell |= (io_task->pwrb_handle->wrb_index & | ||
3611 | DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; | 3579 | DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; |
3612 | doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; | 3580 | doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; |
3613 | iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); | 3581 | iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); |
@@ -3649,7 +3617,6 @@ static int beiscsi_task_xmit(struct iscsi_task *task) | |||
3649 | return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); | 3617 | return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); |
3650 | } | 3618 | } |
3651 | 3619 | ||
3652 | |||
3653 | static void beiscsi_remove(struct pci_dev *pcidev) | 3620 | static void beiscsi_remove(struct pci_dev *pcidev) |
3654 | { | 3621 | { |
3655 | struct beiscsi_hba *phba = NULL; | 3622 | struct beiscsi_hba *phba = NULL; |
@@ -3734,7 +3701,20 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, | |||
3734 | } | 3701 | } |
3735 | SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba); | 3702 | SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba); |
3736 | 3703 | ||
3737 | pci_set_drvdata(pcidev, phba); | 3704 | switch (pcidev->device) { |
3705 | case BE_DEVICE_ID1: | ||
3706 | case OC_DEVICE_ID1: | ||
3707 | case OC_DEVICE_ID2: | ||
3708 | phba->generation = BE_GEN2; | ||
3709 | break; | ||
3710 | case BE_DEVICE_ID2: | ||
3711 | case OC_DEVICE_ID3: | ||
3712 | phba->generation = BE_GEN3; | ||
3713 | break; | ||
3714 | default: | ||
3715 | phba->generation = 0; | ||
3716 | } | ||
3717 | |||
3738 | if (enable_msix) | 3718 | if (enable_msix) |
3739 | num_cpus = find_num_cpus(); | 3719 | num_cpus = find_num_cpus(); |
3740 | else | 3720 | else |
@@ -3754,7 +3734,15 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, | |||
3754 | spin_lock_init(&phba->io_sgl_lock); | 3734 | spin_lock_init(&phba->io_sgl_lock); |
3755 | spin_lock_init(&phba->mgmt_sgl_lock); | 3735 | spin_lock_init(&phba->mgmt_sgl_lock); |
3756 | spin_lock_init(&phba->isr_lock); | 3736 | spin_lock_init(&phba->isr_lock); |
3737 | ret = mgmt_get_fw_config(&phba->ctrl, phba); | ||
3738 | if (ret != 0) { | ||
3739 | shost_printk(KERN_ERR, phba->shost, | ||
3740 | "Error getting fw config\n"); | ||
3741 | goto free_port; | ||
3742 | } | ||
3743 | phba->shost->max_id = phba->fw_config.iscsi_cid_count; | ||
3757 | beiscsi_get_params(phba); | 3744 | beiscsi_get_params(phba); |
3745 | phba->shost->can_queue = phba->params.ios_per_ctrl; | ||
3758 | ret = beiscsi_init_port(phba); | 3746 | ret = beiscsi_init_port(phba); |
3759 | if (ret < 0) { | 3747 | if (ret < 0) { |
3760 | shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" | 3748 | shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" |
@@ -3762,6 +3750,15 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, | |||
3762 | goto free_port; | 3750 | goto free_port; |
3763 | } | 3751 | } |
3764 | 3752 | ||
3753 | for (i = 0; i < MAX_MCC_CMD ; i++) { | ||
3754 | init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); | ||
3755 | phba->ctrl.mcc_tag[i] = i + 1; | ||
3756 | phba->ctrl.mcc_numtag[i + 1] = 0; | ||
3757 | phba->ctrl.mcc_tag_available++; | ||
3758 | } | ||
3759 | |||
3760 | phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; | ||
3761 | |||
3765 | snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", | 3762 | snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", |
3766 | phba->shost->host_no); | 3763 | phba->shost->host_no); |
3767 | phba->wq = create_workqueue(phba->wq_name); | 3764 | phba->wq = create_workqueue(phba->wq_name); |
@@ -3836,7 +3833,7 @@ disable_pci: | |||
3836 | struct iscsi_transport beiscsi_iscsi_transport = { | 3833 | struct iscsi_transport beiscsi_iscsi_transport = { |
3837 | .owner = THIS_MODULE, | 3834 | .owner = THIS_MODULE, |
3838 | .name = DRV_NAME, | 3835 | .name = DRV_NAME, |
3839 | .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | | 3836 | .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | |
3840 | CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, | 3837 | CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, |
3841 | .param_mask = ISCSI_MAX_RECV_DLENGTH | | 3838 | .param_mask = ISCSI_MAX_RECV_DLENGTH | |
3842 | ISCSI_MAX_XMIT_DLENGTH | | 3839 | ISCSI_MAX_XMIT_DLENGTH | |
@@ -3859,7 +3856,7 @@ struct iscsi_transport beiscsi_iscsi_transport = { | |||
3859 | ISCSI_USERNAME | ISCSI_PASSWORD | | 3856 | ISCSI_USERNAME | ISCSI_PASSWORD | |
3860 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | | 3857 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | |
3861 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | | 3858 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | |
3862 | ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | | 3859 | ISCSI_LU_RESET_TMO | |
3863 | ISCSI_PING_TMO | ISCSI_RECV_TMO | | 3860 | ISCSI_PING_TMO | ISCSI_RECV_TMO | |
3864 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, | 3861 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, |
3865 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | | 3862 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | |
@@ -3905,7 +3902,7 @@ static int __init beiscsi_module_init(void) | |||
3905 | SE_DEBUG(DBG_LVL_1, | 3902 | SE_DEBUG(DBG_LVL_1, |
3906 | "beiscsi_module_init - Unable to register beiscsi" | 3903 | "beiscsi_module_init - Unable to register beiscsi" |
3907 | "transport.\n"); | 3904 | "transport.\n"); |
3908 | ret = -ENOMEM; | 3905 | return -ENOMEM; |
3909 | } | 3906 | } |
3910 | SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n", | 3907 | SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n", |
3911 | &beiscsi_iscsi_transport); | 3908 | &beiscsi_iscsi_transport); |
@@ -3917,7 +3914,6 @@ static int __init beiscsi_module_init(void) | |||
3917 | "beiscsi pci driver.\n"); | 3914 | "beiscsi pci driver.\n"); |
3918 | goto unregister_iscsi_transport; | 3915 | goto unregister_iscsi_transport; |
3919 | } | 3916 | } |
3920 | ring_mode = 0; | ||
3921 | return 0; | 3917 | return 0; |
3922 | 3918 | ||
3923 | unregister_iscsi_transport: | 3919 | unregister_iscsi_transport: |
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 25e6b208b771..c53a80ab796c 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /** | 1 | /** |
2 | * Copyright (C) 2005 - 2009 ServerEngines | 2 | * Copyright (C) 2005 - 2010 ServerEngines |
3 | * All rights reserved. | 3 | * All rights reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
@@ -40,31 +40,29 @@ | |||
40 | #define DRV_DESC BE_NAME " " "Driver" | 40 | #define DRV_DESC BE_NAME " " "Driver" |
41 | 41 | ||
42 | #define BE_VENDOR_ID 0x19A2 | 42 | #define BE_VENDOR_ID 0x19A2 |
43 | /* DEVICE ID's for BE2 */ | ||
43 | #define BE_DEVICE_ID1 0x212 | 44 | #define BE_DEVICE_ID1 0x212 |
44 | #define OC_DEVICE_ID1 0x702 | 45 | #define OC_DEVICE_ID1 0x702 |
45 | #define OC_DEVICE_ID2 0x703 | 46 | #define OC_DEVICE_ID2 0x703 |
47 | |||
48 | /* DEVICE ID's for BE3 */ | ||
49 | #define BE_DEVICE_ID2 0x222 | ||
46 | #define OC_DEVICE_ID3 0x712 | 50 | #define OC_DEVICE_ID3 0x712 |
47 | #define OC_DEVICE_ID4 0x222 | ||
48 | 51 | ||
49 | #define BE2_MAX_SESSIONS 64 | 52 | #define BE2_IO_DEPTH 1024 |
53 | #define BE2_MAX_SESSIONS 256 | ||
50 | #define BE2_CMDS_PER_CXN 128 | 54 | #define BE2_CMDS_PER_CXN 128 |
51 | #define BE2_LOGOUTS BE2_MAX_SESSIONS | ||
52 | #define BE2_TMFS 16 | 55 | #define BE2_TMFS 16 |
53 | #define BE2_NOPOUT_REQ 16 | 56 | #define BE2_NOPOUT_REQ 16 |
54 | #define BE2_ASYNCPDUS BE2_MAX_SESSIONS | ||
55 | #define BE2_MAX_ICDS 2048 | ||
56 | #define BE2_SGE 32 | 57 | #define BE2_SGE 32 |
57 | #define BE2_DEFPDU_HDR_SZ 64 | 58 | #define BE2_DEFPDU_HDR_SZ 64 |
58 | #define BE2_DEFPDU_DATA_SZ 8192 | 59 | #define BE2_DEFPDU_DATA_SZ 8192 |
59 | #define BE2_IO_DEPTH \ | ||
60 | (BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ)) | ||
61 | 60 | ||
62 | #define MAX_CPUS 31 | 61 | #define MAX_CPUS 31 |
63 | #define BEISCSI_SGLIST_ELEMENTS BE2_SGE | 62 | #define BEISCSI_SGLIST_ELEMENTS 30 |
64 | 63 | ||
65 | #define BEISCSI_MAX_CMNDS 1024 /* Max IO's per Ctrlr sht->can_queue */ | ||
66 | #define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ | 64 | #define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ |
67 | #define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */ | 65 | #define BEISCSI_MAX_SECTORS 256 /* scsi_host->max_sectors */ |
68 | 66 | ||
69 | #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ | 67 | #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ |
70 | #define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ | 68 | #define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ |
@@ -330,6 +328,7 @@ struct beiscsi_hba { | |||
330 | struct workqueue_struct *wq; /* The actuak work queue */ | 328 | struct workqueue_struct *wq; /* The actuak work queue */ |
331 | struct work_struct work_cqs; /* The work being queued */ | 329 | struct work_struct work_cqs; /* The work being queued */ |
332 | struct be_ctrl_info ctrl; | 330 | struct be_ctrl_info ctrl; |
331 | unsigned int generation; | ||
333 | }; | 332 | }; |
334 | 333 | ||
335 | struct beiscsi_session { | 334 | struct beiscsi_session { |
@@ -656,11 +655,12 @@ struct amap_iscsi_wrb { | |||
656 | 655 | ||
657 | } __packed; | 656 | } __packed; |
658 | 657 | ||
659 | struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, | 658 | struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid); |
660 | int index); | ||
661 | void | 659 | void |
662 | free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); | 660 | free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); |
663 | 661 | ||
662 | void beiscsi_process_all_cqs(struct work_struct *work); | ||
663 | |||
664 | struct pdu_nop_out { | 664 | struct pdu_nop_out { |
665 | u32 dw[12]; | 665 | u32 dw[12]; |
666 | }; | 666 | }; |
@@ -802,7 +802,6 @@ struct hwi_controller { | |||
802 | struct be_ring default_pdu_hdr; | 802 | struct be_ring default_pdu_hdr; |
803 | struct be_ring default_pdu_data; | 803 | struct be_ring default_pdu_data; |
804 | struct hwi_context_memory *phwi_ctxt; | 804 | struct hwi_context_memory *phwi_ctxt; |
805 | unsigned short cq_errors[CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN]; | ||
806 | }; | 805 | }; |
807 | 806 | ||
808 | enum hwh_type_enum { | 807 | enum hwh_type_enum { |
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 79c2bd525a84..317bcd042ced 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /** | 1 | /** |
2 | * Copyright (C) 2005 - 2009 ServerEngines | 2 | * Copyright (C) 2005 - 2010 ServerEngines |
3 | * All rights reserved. | 3 | * All rights reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
@@ -48,6 +48,14 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl, | |||
48 | pfw_cfg->ulp[0].sq_base; | 48 | pfw_cfg->ulp[0].sq_base; |
49 | phba->fw_config.iscsi_cid_count = | 49 | phba->fw_config.iscsi_cid_count = |
50 | pfw_cfg->ulp[0].sq_count; | 50 | pfw_cfg->ulp[0].sq_count; |
51 | if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) { | ||
52 | SE_DEBUG(DBG_LVL_8, | ||
53 | "FW reported MAX CXNS as %d \t" | ||
54 | "Max Supported = %d.\n", | ||
55 | phba->fw_config.iscsi_cid_count, | ||
56 | BE2_MAX_SESSIONS); | ||
57 | phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2; | ||
58 | } | ||
51 | } else { | 59 | } else { |
52 | shost_printk(KERN_WARNING, phba->shost, | 60 | shost_printk(KERN_WARNING, phba->shost, |
53 | "Failed in mgmt_get_fw_config \n"); | 61 | "Failed in mgmt_get_fw_config \n"); |
@@ -77,6 +85,7 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl, | |||
77 | } | 85 | } |
78 | nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes); | 86 | nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes); |
79 | req = nonemb_cmd.va; | 87 | req = nonemb_cmd.va; |
88 | memset(req, 0, sizeof(*req)); | ||
80 | spin_lock(&ctrl->mbox_lock); | 89 | spin_lock(&ctrl->mbox_lock); |
81 | memset(wrb, 0, sizeof(*wrb)); | 90 | memset(wrb, 0, sizeof(*wrb)); |
82 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); | 91 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); |
@@ -140,10 +149,17 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, | |||
140 | { | 149 | { |
141 | struct be_dma_mem nonemb_cmd; | 150 | struct be_dma_mem nonemb_cmd; |
142 | struct be_ctrl_info *ctrl = &phba->ctrl; | 151 | struct be_ctrl_info *ctrl = &phba->ctrl; |
143 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); | 152 | struct be_mcc_wrb *wrb; |
144 | struct be_sge *sge = nonembedded_sgl(wrb); | 153 | struct be_sge *sge; |
145 | struct invalidate_commands_params_in *req; | 154 | struct invalidate_commands_params_in *req; |
146 | int status = 0; | 155 | unsigned int tag = 0; |
156 | |||
157 | spin_lock(&ctrl->mbox_lock); | ||
158 | tag = alloc_mcc_tag(phba); | ||
159 | if (!tag) { | ||
160 | spin_unlock(&ctrl->mbox_lock); | ||
161 | return tag; | ||
162 | } | ||
147 | 163 | ||
148 | nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev, | 164 | nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev, |
149 | sizeof(struct invalidate_commands_params_in), | 165 | sizeof(struct invalidate_commands_params_in), |
@@ -156,8 +172,10 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, | |||
156 | } | 172 | } |
157 | nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); | 173 | nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); |
158 | req = nonemb_cmd.va; | 174 | req = nonemb_cmd.va; |
159 | spin_lock(&ctrl->mbox_lock); | 175 | memset(req, 0, sizeof(*req)); |
160 | memset(wrb, 0, sizeof(*wrb)); | 176 | wrb = wrb_from_mccq(phba); |
177 | sge = nonembedded_sgl(wrb); | ||
178 | wrb->tag0 |= tag; | ||
161 | 179 | ||
162 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); | 180 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); |
163 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, | 181 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, |
@@ -172,14 +190,12 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, | |||
172 | sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); | 190 | sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); |
173 | sge->len = cpu_to_le32(nonemb_cmd.size); | 191 | sge->len = cpu_to_le32(nonemb_cmd.size); |
174 | 192 | ||
175 | status = be_mcc_notify_wait(phba); | 193 | be_mcc_notify(phba); |
176 | if (status) | ||
177 | SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n"); | ||
178 | spin_unlock(&ctrl->mbox_lock); | 194 | spin_unlock(&ctrl->mbox_lock); |
179 | if (nonemb_cmd.va) | 195 | if (nonemb_cmd.va) |
180 | pci_free_consistent(ctrl->pdev, nonemb_cmd.size, | 196 | pci_free_consistent(ctrl->pdev, nonemb_cmd.size, |
181 | nonemb_cmd.va, nonemb_cmd.dma); | 197 | nonemb_cmd.va, nonemb_cmd.dma); |
182 | return status; | 198 | return tag; |
183 | } | 199 | } |
184 | 200 | ||
185 | unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, | 201 | unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, |
@@ -189,13 +205,19 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, | |||
189 | unsigned short savecfg_flag) | 205 | unsigned short savecfg_flag) |
190 | { | 206 | { |
191 | struct be_ctrl_info *ctrl = &phba->ctrl; | 207 | struct be_ctrl_info *ctrl = &phba->ctrl; |
192 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); | 208 | struct be_mcc_wrb *wrb; |
193 | struct iscsi_invalidate_connection_params_in *req = | 209 | struct iscsi_invalidate_connection_params_in *req; |
194 | embedded_payload(wrb); | 210 | unsigned int tag = 0; |
195 | int status = 0; | ||
196 | 211 | ||
197 | spin_lock(&ctrl->mbox_lock); | 212 | spin_lock(&ctrl->mbox_lock); |
198 | memset(wrb, 0, sizeof(*wrb)); | 213 | tag = alloc_mcc_tag(phba); |
214 | if (!tag) { | ||
215 | spin_unlock(&ctrl->mbox_lock); | ||
216 | return tag; | ||
217 | } | ||
218 | wrb = wrb_from_mccq(phba); | ||
219 | wrb->tag0 |= tag; | ||
220 | req = embedded_payload(wrb); | ||
199 | 221 | ||
200 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 222 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
201 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, | 223 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, |
@@ -208,35 +230,37 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, | |||
208 | else | 230 | else |
209 | req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE; | 231 | req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE; |
210 | req->save_cfg = savecfg_flag; | 232 | req->save_cfg = savecfg_flag; |
211 | status = be_mcc_notify_wait(phba); | 233 | be_mcc_notify(phba); |
212 | if (status) | ||
213 | SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n"); | ||
214 | |||
215 | spin_unlock(&ctrl->mbox_lock); | 234 | spin_unlock(&ctrl->mbox_lock); |
216 | return status; | 235 | return tag; |
217 | } | 236 | } |
218 | 237 | ||
219 | unsigned char mgmt_upload_connection(struct beiscsi_hba *phba, | 238 | unsigned char mgmt_upload_connection(struct beiscsi_hba *phba, |
220 | unsigned short cid, unsigned int upload_flag) | 239 | unsigned short cid, unsigned int upload_flag) |
221 | { | 240 | { |
222 | struct be_ctrl_info *ctrl = &phba->ctrl; | 241 | struct be_ctrl_info *ctrl = &phba->ctrl; |
223 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); | 242 | struct be_mcc_wrb *wrb; |
224 | struct tcp_upload_params_in *req = embedded_payload(wrb); | 243 | struct tcp_upload_params_in *req; |
225 | int status = 0; | 244 | unsigned int tag = 0; |
226 | 245 | ||
227 | spin_lock(&ctrl->mbox_lock); | 246 | spin_lock(&ctrl->mbox_lock); |
228 | memset(wrb, 0, sizeof(*wrb)); | 247 | tag = alloc_mcc_tag(phba); |
248 | if (!tag) { | ||
249 | spin_unlock(&ctrl->mbox_lock); | ||
250 | return tag; | ||
251 | } | ||
252 | wrb = wrb_from_mccq(phba); | ||
253 | req = embedded_payload(wrb); | ||
254 | wrb->tag0 |= tag; | ||
229 | 255 | ||
230 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 256 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
231 | be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD, | 257 | be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD, |
232 | OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); | 258 | OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); |
233 | req->id = (unsigned short)cid; | 259 | req->id = (unsigned short)cid; |
234 | req->upload_type = (unsigned char)upload_flag; | 260 | req->upload_type = (unsigned char)upload_flag; |
235 | status = be_mcc_notify_wait(phba); | 261 | be_mcc_notify(phba); |
236 | if (status) | ||
237 | SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n"); | ||
238 | spin_unlock(&ctrl->mbox_lock); | 262 | spin_unlock(&ctrl->mbox_lock); |
239 | return status; | 263 | return tag; |
240 | } | 264 | } |
241 | 265 | ||
242 | int mgmt_open_connection(struct beiscsi_hba *phba, | 266 | int mgmt_open_connection(struct beiscsi_hba *phba, |
@@ -248,13 +272,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba, | |||
248 | struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr; | 272 | struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr; |
249 | struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; | 273 | struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; |
250 | struct be_ctrl_info *ctrl = &phba->ctrl; | 274 | struct be_ctrl_info *ctrl = &phba->ctrl; |
251 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); | 275 | struct be_mcc_wrb *wrb; |
252 | struct tcp_connect_and_offload_in *req = embedded_payload(wrb); | 276 | struct tcp_connect_and_offload_in *req; |
253 | unsigned short def_hdr_id; | 277 | unsigned short def_hdr_id; |
254 | unsigned short def_data_id; | 278 | unsigned short def_data_id; |
255 | struct phys_addr template_address = { 0, 0 }; | 279 | struct phys_addr template_address = { 0, 0 }; |
256 | struct phys_addr *ptemplate_address; | 280 | struct phys_addr *ptemplate_address; |
257 | int status = 0; | 281 | unsigned int tag = 0; |
258 | unsigned int i; | 282 | unsigned int i; |
259 | unsigned short cid = beiscsi_ep->ep_cid; | 283 | unsigned short cid = beiscsi_ep->ep_cid; |
260 | 284 | ||
@@ -266,7 +290,14 @@ int mgmt_open_connection(struct beiscsi_hba *phba, | |||
266 | ptemplate_address = &template_address; | 290 | ptemplate_address = &template_address; |
267 | ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); | 291 | ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); |
268 | spin_lock(&ctrl->mbox_lock); | 292 | spin_lock(&ctrl->mbox_lock); |
269 | memset(wrb, 0, sizeof(*wrb)); | 293 | tag = alloc_mcc_tag(phba); |
294 | if (!tag) { | ||
295 | spin_unlock(&ctrl->mbox_lock); | ||
296 | return tag; | ||
297 | } | ||
298 | wrb = wrb_from_mccq(phba); | ||
299 | req = embedded_payload(wrb); | ||
300 | wrb->tag0 |= tag; | ||
270 | 301 | ||
271 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 302 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
272 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, | 303 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, |
@@ -311,46 +342,36 @@ int mgmt_open_connection(struct beiscsi_hba *phba, | |||
311 | req->do_offload = 1; | 342 | req->do_offload = 1; |
312 | req->dataout_template_pa.lo = ptemplate_address->lo; | 343 | req->dataout_template_pa.lo = ptemplate_address->lo; |
313 | req->dataout_template_pa.hi = ptemplate_address->hi; | 344 | req->dataout_template_pa.hi = ptemplate_address->hi; |
314 | status = be_mcc_notify_wait(phba); | 345 | be_mcc_notify(phba); |
315 | if (!status) { | ||
316 | struct iscsi_endpoint *ep; | ||
317 | struct tcp_connect_and_offload_out *ptcpcnct_out = | ||
318 | embedded_payload(wrb); | ||
319 | |||
320 | ep = phba->ep_array[ptcpcnct_out->cid]; | ||
321 | beiscsi_ep = ep->dd_data; | ||
322 | beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; | ||
323 | beiscsi_ep->cid_vld = 1; | ||
324 | SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n"); | ||
325 | } else | ||
326 | SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed\n"); | ||
327 | spin_unlock(&ctrl->mbox_lock); | 346 | spin_unlock(&ctrl->mbox_lock); |
328 | return status; | 347 | return tag; |
329 | } | 348 | } |
330 | 349 | ||
331 | int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr) | 350 | unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba) |
332 | { | 351 | { |
333 | struct be_ctrl_info *ctrl = &phba->ctrl; | 352 | struct be_ctrl_info *ctrl = &phba->ctrl; |
334 | struct be_mcc_wrb *wrb = wrb_from_mccq(phba); | 353 | struct be_mcc_wrb *wrb; |
335 | struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb); | 354 | struct be_cmd_req_get_mac_addr *req; |
336 | int status; | 355 | unsigned int tag = 0; |
337 | 356 | ||
338 | SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n"); | 357 | SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n"); |
339 | spin_lock(&ctrl->mbox_lock); | 358 | spin_lock(&ctrl->mbox_lock); |
340 | memset(wrb, 0, sizeof(*wrb)); | 359 | tag = alloc_mcc_tag(phba); |
360 | if (!tag) { | ||
361 | spin_unlock(&ctrl->mbox_lock); | ||
362 | return tag; | ||
363 | } | ||
364 | |||
365 | wrb = wrb_from_mccq(phba); | ||
366 | req = embedded_payload(wrb); | ||
367 | wrb->tag0 |= tag; | ||
341 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | 368 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); |
342 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, | 369 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, |
343 | OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG, | 370 | OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG, |
344 | sizeof(*req)); | 371 | sizeof(*req)); |
345 | 372 | ||
346 | status = be_mcc_notify_wait(phba); | 373 | be_mcc_notify(phba); |
347 | if (!status) { | ||
348 | struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb); | ||
349 | |||
350 | memcpy(mac_addr, resp->mac_address, ETH_ALEN); | ||
351 | } | ||
352 | |||
353 | spin_unlock(&ctrl->mbox_lock); | 374 | spin_unlock(&ctrl->mbox_lock); |
354 | return status; | 375 | return tag; |
355 | } | 376 | } |
356 | 377 | ||
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 24eaff923f85..ecead6a5aa56 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /** | 1 | /** |
2 | * Copyright (C) 2005 - 2009 ServerEngines | 2 | * Copyright (C) 2005 - 2010 ServerEngines |
3 | * All rights reserved. | 3 | * All rights reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
@@ -231,6 +231,7 @@ struct beiscsi_endpoint { | |||
231 | struct beiscsi_hba *phba; | 231 | struct beiscsi_hba *phba; |
232 | struct beiscsi_sess *sess; | 232 | struct beiscsi_sess *sess; |
233 | struct beiscsi_conn *conn; | 233 | struct beiscsi_conn *conn; |
234 | struct iscsi_endpoint *openiscsi_ep; | ||
234 | unsigned short ip_type; | 235 | unsigned short ip_type; |
235 | char dst6_addr[ISCSI_ADDRESS_BUF_LEN]; | 236 | char dst6_addr[ISCSI_ADDRESS_BUF_LEN]; |
236 | unsigned long dst_addr; | 237 | unsigned long dst_addr; |
@@ -249,7 +250,4 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba, | |||
249 | unsigned short issue_reset, | 250 | unsigned short issue_reset, |
250 | unsigned short savecfg_flag); | 251 | unsigned short savecfg_flag); |
251 | 252 | ||
252 | unsigned char mgmt_fw_cmd(struct be_ctrl_info *ctrl, | ||
253 | struct beiscsi_hba *phba, | ||
254 | char *buf, unsigned int len); | ||
255 | #endif | 253 | #endif |
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 33b2294625bb..1c4d1215769d 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c | |||
@@ -1426,8 +1426,8 @@ static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn, | |||
1426 | break; | 1426 | break; |
1427 | case ISCSI_PARAM_CONN_ADDRESS: | 1427 | case ISCSI_PARAM_CONN_ADDRESS: |
1428 | if (bnx2i_conn->ep) | 1428 | if (bnx2i_conn->ep) |
1429 | len = sprintf(buf, NIPQUAD_FMT "\n", | 1429 | len = sprintf(buf, "%pI4\n", |
1430 | NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip)); | 1430 | &bnx2i_conn->ep->cm_sk->dst_ip); |
1431 | break; | 1431 | break; |
1432 | default: | 1432 | default: |
1433 | return iscsi_conn_get_param(cls_conn, param, buf); | 1433 | return iscsi_conn_get_param(cls_conn, param, buf); |
@@ -1990,6 +1990,7 @@ static struct scsi_host_template bnx2i_host_template = { | |||
1990 | .eh_abort_handler = iscsi_eh_abort, | 1990 | .eh_abort_handler = iscsi_eh_abort, |
1991 | .eh_device_reset_handler = iscsi_eh_device_reset, | 1991 | .eh_device_reset_handler = iscsi_eh_device_reset, |
1992 | .eh_target_reset_handler = iscsi_eh_target_reset, | 1992 | .eh_target_reset_handler = iscsi_eh_target_reset, |
1993 | .change_queue_depth = iscsi_change_queue_depth, | ||
1993 | .can_queue = 1024, | 1994 | .can_queue = 1024, |
1994 | .max_sectors = 127, | 1995 | .max_sectors = 127, |
1995 | .cmd_per_lun = 32, | 1996 | .cmd_per_lun = 32, |
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index 9129bcf117cf..cd05e049d5f6 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c | |||
@@ -219,18 +219,15 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len) | |||
219 | break; | 219 | break; |
220 | } | 220 | } |
221 | sa = (cdbp[8] << 8) + cdbp[9]; | 221 | sa = (cdbp[8] << 8) + cdbp[9]; |
222 | name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa); | 222 | name = get_sa_name(variable_length_arr, VARIABLE_LENGTH_SZ, sa); |
223 | if (name) { | 223 | if (name) |
224 | printk("%s", name); | 224 | printk("%s", name); |
225 | if ((cdb_len > 0) && (len != cdb_len)) | 225 | else |
226 | printk(", in_cdb_len=%d, ext_len=%d", | ||
227 | len, cdb_len); | ||
228 | } else { | ||
229 | printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); | 226 | printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); |
230 | if ((cdb_len > 0) && (len != cdb_len)) | 227 | |
231 | printk(", in_cdb_len=%d, ext_len=%d", | 228 | if ((cdb_len > 0) && (len != cdb_len)) |
232 | len, cdb_len); | 229 | printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len); |
233 | } | 230 | |
234 | break; | 231 | break; |
235 | case MAINTENANCE_IN: | 232 | case MAINTENANCE_IN: |
236 | sa = cdbp[1] & 0x1f; | 233 | sa = cdbp[1] & 0x1f; |
@@ -349,6 +346,9 @@ void scsi_print_command(struct scsi_cmnd *cmd) | |||
349 | { | 346 | { |
350 | int k; | 347 | int k; |
351 | 348 | ||
349 | if (cmd->cmnd == NULL) | ||
350 | return; | ||
351 | |||
352 | scmd_printk(KERN_INFO, cmd, "CDB: "); | 352 | scmd_printk(KERN_INFO, cmd, "CDB: "); |
353 | print_opcode_name(cmd->cmnd, cmd->cmd_len); | 353 | print_opcode_name(cmd->cmnd, cmd->cmd_len); |
354 | 354 | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index 969c83162cc4..412853c65372 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c | |||
@@ -591,8 +591,7 @@ static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session, | |||
591 | cxgb3i_conn_max_recv_dlength(conn); | 591 | cxgb3i_conn_max_recv_dlength(conn); |
592 | 592 | ||
593 | spin_lock_bh(&conn->session->lock); | 593 | spin_lock_bh(&conn->session->lock); |
594 | sprintf(conn->portal_address, NIPQUAD_FMT, | 594 | sprintf(conn->portal_address, "%pI4", &c3cn->daddr.sin_addr.s_addr); |
595 | NIPQUAD(c3cn->daddr.sin_addr.s_addr)); | ||
596 | conn->portal_port = ntohs(c3cn->daddr.sin_port); | 595 | conn->portal_port = ntohs(c3cn->daddr.sin_port); |
597 | spin_unlock_bh(&conn->session->lock); | 596 | spin_unlock_bh(&conn->session->lock); |
598 | 597 | ||
@@ -709,6 +708,12 @@ static int cxgb3i_host_set_param(struct Scsi_Host *shost, | |||
709 | { | 708 | { |
710 | struct cxgb3i_hba *hba = iscsi_host_priv(shost); | 709 | struct cxgb3i_hba *hba = iscsi_host_priv(shost); |
711 | 710 | ||
711 | if (!hba->ndev) { | ||
712 | shost_printk(KERN_ERR, shost, "Could not set host param. " | ||
713 | "Netdev for host not set.\n"); | ||
714 | return -ENODEV; | ||
715 | } | ||
716 | |||
712 | cxgb3i_api_debug("param %d, buf %s.\n", param, buf); | 717 | cxgb3i_api_debug("param %d, buf %s.\n", param, buf); |
713 | 718 | ||
714 | switch (param) { | 719 | switch (param) { |
@@ -739,6 +744,12 @@ static int cxgb3i_host_get_param(struct Scsi_Host *shost, | |||
739 | struct cxgb3i_hba *hba = iscsi_host_priv(shost); | 744 | struct cxgb3i_hba *hba = iscsi_host_priv(shost); |
740 | int len = 0; | 745 | int len = 0; |
741 | 746 | ||
747 | if (!hba->ndev) { | ||
748 | shost_printk(KERN_ERR, shost, "Could not set host param. " | ||
749 | "Netdev for host not set.\n"); | ||
750 | return -ENODEV; | ||
751 | } | ||
752 | |||
742 | cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param); | 753 | cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param); |
743 | 754 | ||
744 | switch (param) { | 755 | switch (param) { |
@@ -753,7 +764,7 @@ static int cxgb3i_host_get_param(struct Scsi_Host *shost, | |||
753 | __be32 addr; | 764 | __be32 addr; |
754 | 765 | ||
755 | addr = cxgb3i_get_private_ipv4addr(hba->ndev); | 766 | addr = cxgb3i_get_private_ipv4addr(hba->ndev); |
756 | len = sprintf(buf, NIPQUAD_FMT, NIPQUAD(addr)); | 767 | len = sprintf(buf, "%pI4", &addr); |
757 | break; | 768 | break; |
758 | } | 769 | } |
759 | default: | 770 | default: |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c index 15a00e8b7122..3e08c430ff29 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.c +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c | |||
@@ -1675,10 +1675,11 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, | |||
1675 | } else | 1675 | } else |
1676 | c3cn->saddr.sin_addr.s_addr = sipv4; | 1676 | c3cn->saddr.sin_addr.s_addr = sipv4; |
1677 | 1677 | ||
1678 | c3cn_conn_debug("c3cn 0x%p, %u.%u.%u.%u,%u-%u.%u.%u.%u,%u SYN_SENT.\n", | 1678 | c3cn_conn_debug("c3cn 0x%p, %pI4,%u-%pI4,%u SYN_SENT.\n", |
1679 | c3cn, NIPQUAD(c3cn->saddr.sin_addr.s_addr), | 1679 | c3cn, |
1680 | &c3cn->saddr.sin_addr.s_addr, | ||
1680 | ntohs(c3cn->saddr.sin_port), | 1681 | ntohs(c3cn->saddr.sin_port), |
1681 | NIPQUAD(c3cn->daddr.sin_addr.s_addr), | 1682 | &c3cn->daddr.sin_addr.s_addr, |
1682 | ntohs(c3cn->daddr.sin_port)); | 1683 | ntohs(c3cn->daddr.sin_port)); |
1683 | 1684 | ||
1684 | c3cn_set_state(c3cn, C3CN_STATE_CONNECTING); | 1685 | c3cn_set_state(c3cn, C3CN_STATE_CONNECTING); |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c index 1fe3b0f1f3c9..9c38539557fc 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c +++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c | |||
@@ -461,10 +461,8 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn) | |||
461 | skb = skb_peek(&c3cn->receive_queue); | 461 | skb = skb_peek(&c3cn->receive_queue); |
462 | } | 462 | } |
463 | read_unlock(&c3cn->callback_lock); | 463 | read_unlock(&c3cn->callback_lock); |
464 | if (c3cn) { | 464 | c3cn->copied_seq += read; |
465 | c3cn->copied_seq += read; | 465 | cxgb3i_c3cn_rx_credits(c3cn, read); |
466 | cxgb3i_c3cn_rx_credits(c3cn, read); | ||
467 | } | ||
468 | conn->rxdata_octets += read; | 466 | conn->rxdata_octets += read; |
469 | 467 | ||
470 | if (err) { | 468 | if (err) { |
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 4f0d0138f48b..bc9e94f5915e 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c | |||
@@ -717,6 +717,8 @@ static const struct scsi_dh_devlist alua_dev_list[] = { | |||
717 | {"IBM", "2145" }, | 717 | {"IBM", "2145" }, |
718 | {"Pillar", "Axiom" }, | 718 | {"Pillar", "Axiom" }, |
719 | {"Intel", "Multi-Flex"}, | 719 | {"Intel", "Multi-Flex"}, |
720 | {"NETAPP", "LUN"}, | ||
721 | {"AIX", "NVDISK"}, | ||
720 | {NULL, NULL} | 722 | {NULL, NULL} |
721 | }; | 723 | }; |
722 | 724 | ||
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index c7076ce25e21..3c5abf7cd762 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c | |||
@@ -1509,7 +1509,7 @@ static int option_setup(char *str) | |||
1509 | char *cur = str; | 1509 | char *cur = str; |
1510 | int i = 1; | 1510 | int i = 1; |
1511 | 1511 | ||
1512 | while (cur && isdigit(*cur) && i <= MAX_INT_PARAM) { | 1512 | while (cur && isdigit(*cur) && i < MAX_INT_PARAM) { |
1513 | ints[i++] = simple_strtoul(cur, NULL, 0); | 1513 | ints[i++] = simple_strtoul(cur, NULL, 0); |
1514 | 1514 | ||
1515 | if ((cur = strchr(cur, ',')) != NULL) | 1515 | if ((cur = strchr(cur, ',')) != NULL) |
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index a680e18b5f3b..e2bc779f86c1 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c | |||
@@ -1449,9 +1449,6 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp) | |||
1449 | if (offset > 15) | 1449 | if (offset > 15) |
1450 | goto do_reject; | 1450 | goto do_reject; |
1451 | 1451 | ||
1452 | if (esp->flags & ESP_FLAG_DISABLE_SYNC) | ||
1453 | offset = 0; | ||
1454 | |||
1455 | if (offset) { | 1452 | if (offset) { |
1456 | int one_clock; | 1453 | int one_clock; |
1457 | 1454 | ||
@@ -2405,12 +2402,6 @@ static int esp_slave_configure(struct scsi_device *dev) | |||
2405 | struct esp_target_data *tp = &esp->target[dev->id]; | 2402 | struct esp_target_data *tp = &esp->target[dev->id]; |
2406 | int goal_tags, queue_depth; | 2403 | int goal_tags, queue_depth; |
2407 | 2404 | ||
2408 | if (esp->flags & ESP_FLAG_DISABLE_SYNC) { | ||
2409 | /* Bypass async domain validation */ | ||
2410 | dev->ppr = 0; | ||
2411 | dev->sdtr = 0; | ||
2412 | } | ||
2413 | |||
2414 | goal_tags = 0; | 2405 | goal_tags = 0; |
2415 | 2406 | ||
2416 | if (dev->tagged_supported) { | 2407 | if (dev->tagged_supported) { |
@@ -2660,7 +2651,10 @@ static void esp_set_offset(struct scsi_target *target, int offset) | |||
2660 | struct esp *esp = shost_priv(host); | 2651 | struct esp *esp = shost_priv(host); |
2661 | struct esp_target_data *tp = &esp->target[target->id]; | 2652 | struct esp_target_data *tp = &esp->target[target->id]; |
2662 | 2653 | ||
2663 | tp->nego_goal_offset = offset; | 2654 | if (esp->flags & ESP_FLAG_DISABLE_SYNC) |
2655 | tp->nego_goal_offset = 0; | ||
2656 | else | ||
2657 | tp->nego_goal_offset = offset; | ||
2664 | tp->flags |= ESP_TGT_CHECK_NEGO; | 2658 | tp->flags |= ESP_TGT_CHECK_NEGO; |
2665 | } | 2659 | } |
2666 | 2660 | ||
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index bb208a6091e7..3966c71d0095 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h | |||
@@ -36,7 +36,7 @@ | |||
36 | 36 | ||
37 | #define DRV_NAME "fnic" | 37 | #define DRV_NAME "fnic" |
38 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" | 38 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" |
39 | #define DRV_VERSION "1.0.0.1121" | 39 | #define DRV_VERSION "1.4.0.98" |
40 | #define PFX DRV_NAME ": " | 40 | #define PFX DRV_NAME ": " |
41 | #define DFX DRV_NAME "%d: " | 41 | #define DFX DRV_NAME "%d: " |
42 | 42 | ||
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index fe1b1031f7ab..507e26c1c29f 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c | |||
@@ -620,6 +620,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev, | |||
620 | if (fnic->config.flags & VFCF_FIP_CAPABLE) { | 620 | if (fnic->config.flags & VFCF_FIP_CAPABLE) { |
621 | shost_printk(KERN_INFO, fnic->lport->host, | 621 | shost_printk(KERN_INFO, fnic->lport->host, |
622 | "firmware supports FIP\n"); | 622 | "firmware supports FIP\n"); |
623 | /* enable directed and multicast */ | ||
624 | vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); | ||
623 | vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); | 625 | vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); |
624 | vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); | 626 | vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); |
625 | } else { | 627 | } else { |
@@ -698,6 +700,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev, | |||
698 | goto err_out_remove_scsi_host; | 700 | goto err_out_remove_scsi_host; |
699 | } | 701 | } |
700 | 702 | ||
703 | fc_lport_init_stats(lp); | ||
704 | |||
701 | fc_lport_config(lp); | 705 | fc_lport_config(lp); |
702 | 706 | ||
703 | if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + | 707 | if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + |
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h index d62b9061bf12..7c9ccbd4134b 100644 --- a/drivers/scsi/fnic/vnic_devcmd.h +++ b/drivers/scsi/fnic/vnic_devcmd.h | |||
@@ -94,7 +94,7 @@ enum vnic_devcmd_cmd { | |||
94 | CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), | 94 | CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), |
95 | 95 | ||
96 | /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */ | 96 | /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */ |
97 | CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7), | 97 | CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7), |
98 | 98 | ||
99 | /* hang detection notification */ | 99 | /* hang detection notification */ |
100 | CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), | 100 | CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), |
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 9e8fce0f0c1b..ba3c94c9c25f 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -140,40 +140,40 @@ | |||
140 | #include "gdth.h" | 140 | #include "gdth.h" |
141 | 141 | ||
142 | static void gdth_delay(int milliseconds); | 142 | static void gdth_delay(int milliseconds); |
143 | static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs); | 143 | static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs); |
144 | static irqreturn_t gdth_interrupt(int irq, void *dev_id); | 144 | static irqreturn_t gdth_interrupt(int irq, void *dev_id); |
145 | static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, | 145 | static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, |
146 | int gdth_from_wait, int* pIndex); | 146 | int gdth_from_wait, int* pIndex); |
147 | static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, | 147 | static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index, |
148 | Scsi_Cmnd *scp); | 148 | Scsi_Cmnd *scp); |
149 | static int gdth_async_event(gdth_ha_str *ha); | 149 | static int gdth_async_event(gdth_ha_str *ha); |
150 | static void gdth_log_event(gdth_evt_data *dvr, char *buffer); | 150 | static void gdth_log_event(gdth_evt_data *dvr, char *buffer); |
151 | 151 | ||
152 | static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority); | 152 | static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority); |
153 | static void gdth_next(gdth_ha_str *ha); | 153 | static void gdth_next(gdth_ha_str *ha); |
154 | static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b); | 154 | static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b); |
155 | static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); | 155 | static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); |
156 | static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source, | 156 | static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source, |
157 | ushort idx, gdth_evt_data *evt); | 157 | u16 idx, gdth_evt_data *evt); |
158 | static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr); | 158 | static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr); |
159 | static void gdth_readapp_event(gdth_ha_str *ha, unchar application, | 159 | static void gdth_readapp_event(gdth_ha_str *ha, u8 application, |
160 | gdth_evt_str *estr); | 160 | gdth_evt_str *estr); |
161 | static void gdth_clear_events(void); | 161 | static void gdth_clear_events(void); |
162 | 162 | ||
163 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | 163 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, |
164 | char *buffer, ushort count); | 164 | char *buffer, u16 count); |
165 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); | 165 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); |
166 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); | 166 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive); |
167 | 167 | ||
168 | static void gdth_enable_int(gdth_ha_str *ha); | 168 | static void gdth_enable_int(gdth_ha_str *ha); |
169 | static int gdth_test_busy(gdth_ha_str *ha); | 169 | static int gdth_test_busy(gdth_ha_str *ha); |
170 | static int gdth_get_cmd_index(gdth_ha_str *ha); | 170 | static int gdth_get_cmd_index(gdth_ha_str *ha); |
171 | static void gdth_release_event(gdth_ha_str *ha); | 171 | static void gdth_release_event(gdth_ha_str *ha); |
172 | static int gdth_wait(gdth_ha_str *ha, int index,ulong32 time); | 172 | static int gdth_wait(gdth_ha_str *ha, int index,u32 time); |
173 | static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode, | 173 | static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode, |
174 | ulong32 p1, ulong64 p2,ulong64 p3); | 174 | u32 p1, u64 p2,u64 p3); |
175 | static int gdth_search_drives(gdth_ha_str *ha); | 175 | static int gdth_search_drives(gdth_ha_str *ha); |
176 | static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive); | 176 | static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive); |
177 | 177 | ||
178 | static const char *gdth_ctr_name(gdth_ha_str *ha); | 178 | static const char *gdth_ctr_name(gdth_ha_str *ha); |
179 | 179 | ||
@@ -189,7 +189,7 @@ static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, | |||
189 | static void gdth_scsi_done(struct scsi_cmnd *scp); | 189 | static void gdth_scsi_done(struct scsi_cmnd *scp); |
190 | 190 | ||
191 | #ifdef DEBUG_GDTH | 191 | #ifdef DEBUG_GDTH |
192 | static unchar DebugState = DEBUG_GDTH; | 192 | static u8 DebugState = DEBUG_GDTH; |
193 | 193 | ||
194 | #ifdef __SERIAL__ | 194 | #ifdef __SERIAL__ |
195 | #define MAX_SERBUF 160 | 195 | #define MAX_SERBUF 160 |
@@ -270,30 +270,30 @@ static int ser_printk(const char *fmt, ...) | |||
270 | #endif | 270 | #endif |
271 | 271 | ||
272 | #ifdef GDTH_STATISTICS | 272 | #ifdef GDTH_STATISTICS |
273 | static ulong32 max_rq=0, max_index=0, max_sg=0; | 273 | static u32 max_rq=0, max_index=0, max_sg=0; |
274 | #ifdef INT_COAL | 274 | #ifdef INT_COAL |
275 | static ulong32 max_int_coal=0; | 275 | static u32 max_int_coal=0; |
276 | #endif | 276 | #endif |
277 | static ulong32 act_ints=0, act_ios=0, act_stats=0, act_rq=0; | 277 | static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0; |
278 | static struct timer_list gdth_timer; | 278 | static struct timer_list gdth_timer; |
279 | #endif | 279 | #endif |
280 | 280 | ||
281 | #define PTR2USHORT(a) (ushort)(ulong)(a) | 281 | #define PTR2USHORT(a) (u16)(unsigned long)(a) |
282 | #define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b) | 282 | #define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b) |
283 | #define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t)) | 283 | #define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t)) |
284 | 284 | ||
285 | #define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b)) | 285 | #define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b)) |
286 | 286 | ||
287 | #ifdef CONFIG_ISA | 287 | #ifdef CONFIG_ISA |
288 | static unchar gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */ | 288 | static u8 gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */ |
289 | #endif | 289 | #endif |
290 | #if defined(CONFIG_EISA) || defined(CONFIG_ISA) | 290 | #if defined(CONFIG_EISA) || defined(CONFIG_ISA) |
291 | static unchar gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */ | 291 | static u8 gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */ |
292 | #endif | 292 | #endif |
293 | static unchar gdth_polling; /* polling if TRUE */ | 293 | static u8 gdth_polling; /* polling if TRUE */ |
294 | static int gdth_ctr_count = 0; /* controller count */ | 294 | static int gdth_ctr_count = 0; /* controller count */ |
295 | static LIST_HEAD(gdth_instances); /* controller list */ | 295 | static LIST_HEAD(gdth_instances); /* controller list */ |
296 | static unchar gdth_write_through = FALSE; /* write through */ | 296 | static u8 gdth_write_through = FALSE; /* write through */ |
297 | static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */ | 297 | static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */ |
298 | static int elastidx; | 298 | static int elastidx; |
299 | static int eoldidx; | 299 | static int eoldidx; |
@@ -303,7 +303,7 @@ static int major; | |||
303 | #define DOU 2 /* OUT data direction */ | 303 | #define DOU 2 /* OUT data direction */ |
304 | #define DNO DIN /* no data transfer */ | 304 | #define DNO DIN /* no data transfer */ |
305 | #define DUN DIN /* unknown data direction */ | 305 | #define DUN DIN /* unknown data direction */ |
306 | static unchar gdth_direction_tab[0x100] = { | 306 | static u8 gdth_direction_tab[0x100] = { |
307 | DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN, | 307 | DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN, |
308 | DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN, | 308 | DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN, |
309 | DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU, | 309 | DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU, |
@@ -390,7 +390,7 @@ static gdth_ha_str *gdth_find_ha(int hanum) | |||
390 | static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha) | 390 | static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha) |
391 | { | 391 | { |
392 | struct gdth_cmndinfo *priv = NULL; | 392 | struct gdth_cmndinfo *priv = NULL; |
393 | ulong flags; | 393 | unsigned long flags; |
394 | int i; | 394 | int i; |
395 | 395 | ||
396 | spin_lock_irqsave(&ha->smp_lock, flags); | 396 | spin_lock_irqsave(&ha->smp_lock, flags); |
@@ -493,7 +493,7 @@ int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd, | |||
493 | return rval; | 493 | return rval; |
494 | } | 494 | } |
495 | 495 | ||
496 | static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs) | 496 | static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs) |
497 | { | 497 | { |
498 | *cyls = size /HEADS/SECS; | 498 | *cyls = size /HEADS/SECS; |
499 | if (*cyls <= MAXCYLS) { | 499 | if (*cyls <= MAXCYLS) { |
@@ -514,9 +514,9 @@ static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs | |||
514 | 514 | ||
515 | /* controller search and initialization functions */ | 515 | /* controller search and initialization functions */ |
516 | #ifdef CONFIG_EISA | 516 | #ifdef CONFIG_EISA |
517 | static int __init gdth_search_eisa(ushort eisa_adr) | 517 | static int __init gdth_search_eisa(u16 eisa_adr) |
518 | { | 518 | { |
519 | ulong32 id; | 519 | u32 id; |
520 | 520 | ||
521 | TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr)); | 521 | TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr)); |
522 | id = inl(eisa_adr+ID0REG); | 522 | id = inl(eisa_adr+ID0REG); |
@@ -533,13 +533,13 @@ static int __init gdth_search_eisa(ushort eisa_adr) | |||
533 | #endif /* CONFIG_EISA */ | 533 | #endif /* CONFIG_EISA */ |
534 | 534 | ||
535 | #ifdef CONFIG_ISA | 535 | #ifdef CONFIG_ISA |
536 | static int __init gdth_search_isa(ulong32 bios_adr) | 536 | static int __init gdth_search_isa(u32 bios_adr) |
537 | { | 537 | { |
538 | void __iomem *addr; | 538 | void __iomem *addr; |
539 | ulong32 id; | 539 | u32 id; |
540 | 540 | ||
541 | TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr)); | 541 | TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr)); |
542 | if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(ulong32))) != NULL) { | 542 | if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(u32))) != NULL) { |
543 | id = readl(addr); | 543 | id = readl(addr); |
544 | iounmap(addr); | 544 | iounmap(addr); |
545 | if (id == GDT2_ID) /* GDT2000 */ | 545 | if (id == GDT2_ID) /* GDT2000 */ |
@@ -551,7 +551,7 @@ static int __init gdth_search_isa(ulong32 bios_adr) | |||
551 | 551 | ||
552 | #ifdef CONFIG_PCI | 552 | #ifdef CONFIG_PCI |
553 | 553 | ||
554 | static bool gdth_search_vortex(ushort device) | 554 | static bool gdth_search_vortex(u16 device) |
555 | { | 555 | { |
556 | if (device <= PCI_DEVICE_ID_VORTEX_GDT6555) | 556 | if (device <= PCI_DEVICE_ID_VORTEX_GDT6555) |
557 | return true; | 557 | return true; |
@@ -603,9 +603,9 @@ static void __devexit gdth_pci_remove_one(struct pci_dev *pdev) | |||
603 | static int __devinit gdth_pci_init_one(struct pci_dev *pdev, | 603 | static int __devinit gdth_pci_init_one(struct pci_dev *pdev, |
604 | const struct pci_device_id *ent) | 604 | const struct pci_device_id *ent) |
605 | { | 605 | { |
606 | ushort vendor = pdev->vendor; | 606 | u16 vendor = pdev->vendor; |
607 | ushort device = pdev->device; | 607 | u16 device = pdev->device; |
608 | ulong base0, base1, base2; | 608 | unsigned long base0, base1, base2; |
609 | int rc; | 609 | int rc; |
610 | gdth_pci_str gdth_pcistr; | 610 | gdth_pci_str gdth_pcistr; |
611 | gdth_ha_str *ha = NULL; | 611 | gdth_ha_str *ha = NULL; |
@@ -658,10 +658,10 @@ static int __devinit gdth_pci_init_one(struct pci_dev *pdev, | |||
658 | #endif /* CONFIG_PCI */ | 658 | #endif /* CONFIG_PCI */ |
659 | 659 | ||
660 | #ifdef CONFIG_EISA | 660 | #ifdef CONFIG_EISA |
661 | static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha) | 661 | static int __init gdth_init_eisa(u16 eisa_adr,gdth_ha_str *ha) |
662 | { | 662 | { |
663 | ulong32 retries,id; | 663 | u32 retries,id; |
664 | unchar prot_ver,eisacf,i,irq_found; | 664 | u8 prot_ver,eisacf,i,irq_found; |
665 | 665 | ||
666 | TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr)); | 666 | TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr)); |
667 | 667 | ||
@@ -688,7 +688,7 @@ static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha) | |||
688 | return 0; | 688 | return 0; |
689 | } | 689 | } |
690 | ha->bmic = eisa_adr; | 690 | ha->bmic = eisa_adr; |
691 | ha->brd_phys = (ulong32)eisa_adr >> 12; | 691 | ha->brd_phys = (u32)eisa_adr >> 12; |
692 | 692 | ||
693 | outl(0,eisa_adr+MAILBOXREG); | 693 | outl(0,eisa_adr+MAILBOXREG); |
694 | outl(0,eisa_adr+MAILBOXREG+4); | 694 | outl(0,eisa_adr+MAILBOXREG+4); |
@@ -752,12 +752,12 @@ static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha) | |||
752 | #endif /* CONFIG_EISA */ | 752 | #endif /* CONFIG_EISA */ |
753 | 753 | ||
754 | #ifdef CONFIG_ISA | 754 | #ifdef CONFIG_ISA |
755 | static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha) | 755 | static int __init gdth_init_isa(u32 bios_adr,gdth_ha_str *ha) |
756 | { | 756 | { |
757 | register gdt2_dpram_str __iomem *dp2_ptr; | 757 | register gdt2_dpram_str __iomem *dp2_ptr; |
758 | int i; | 758 | int i; |
759 | unchar irq_drq,prot_ver; | 759 | u8 irq_drq,prot_ver; |
760 | ulong32 retries; | 760 | u32 retries; |
761 | 761 | ||
762 | TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr)); | 762 | TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr)); |
763 | 763 | ||
@@ -812,7 +812,7 @@ static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha) | |||
812 | } | 812 | } |
813 | gdth_delay(1); | 813 | gdth_delay(1); |
814 | } | 814 | } |
815 | prot_ver = (unchar)readl(&dp2_ptr->u.ic.S_Info[0]); | 815 | prot_ver = (u8)readl(&dp2_ptr->u.ic.S_Info[0]); |
816 | writeb(0, &dp2_ptr->u.ic.Status); | 816 | writeb(0, &dp2_ptr->u.ic.Status); |
817 | writeb(0xff, &dp2_ptr->io.irqdel); | 817 | writeb(0xff, &dp2_ptr->io.irqdel); |
818 | if (prot_ver != PROTOCOL_VERSION) { | 818 | if (prot_ver != PROTOCOL_VERSION) { |
@@ -859,9 +859,9 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, | |||
859 | register gdt6_dpram_str __iomem *dp6_ptr; | 859 | register gdt6_dpram_str __iomem *dp6_ptr; |
860 | register gdt6c_dpram_str __iomem *dp6c_ptr; | 860 | register gdt6c_dpram_str __iomem *dp6c_ptr; |
861 | register gdt6m_dpram_str __iomem *dp6m_ptr; | 861 | register gdt6m_dpram_str __iomem *dp6m_ptr; |
862 | ulong32 retries; | 862 | u32 retries; |
863 | unchar prot_ver; | 863 | u8 prot_ver; |
864 | ushort command; | 864 | u16 command; |
865 | int i, found = FALSE; | 865 | int i, found = FALSE; |
866 | 866 | ||
867 | TRACE(("gdth_init_pci()\n")); | 867 | TRACE(("gdth_init_pci()\n")); |
@@ -871,7 +871,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, | |||
871 | else | 871 | else |
872 | ha->oem_id = OEM_ID_ICP; | 872 | ha->oem_id = OEM_ID_ICP; |
873 | ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8); | 873 | ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8); |
874 | ha->stype = (ulong32)pdev->device; | 874 | ha->stype = (u32)pdev->device; |
875 | ha->irq = pdev->irq; | 875 | ha->irq = pdev->irq; |
876 | ha->pdev = pdev; | 876 | ha->pdev = pdev; |
877 | 877 | ||
@@ -891,7 +891,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, | |||
891 | found = FALSE; | 891 | found = FALSE; |
892 | for (i = 0xC8000; i < 0xE8000; i += 0x4000) { | 892 | for (i = 0xC8000; i < 0xE8000; i += 0x4000) { |
893 | iounmap(ha->brd); | 893 | iounmap(ha->brd); |
894 | ha->brd = ioremap(i, sizeof(ushort)); | 894 | ha->brd = ioremap(i, sizeof(u16)); |
895 | if (ha->brd == NULL) { | 895 | if (ha->brd == NULL) { |
896 | printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); | 896 | printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); |
897 | return 0; | 897 | return 0; |
@@ -947,7 +947,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, | |||
947 | } | 947 | } |
948 | gdth_delay(1); | 948 | gdth_delay(1); |
949 | } | 949 | } |
950 | prot_ver = (unchar)readl(&dp6_ptr->u.ic.S_Info[0]); | 950 | prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]); |
951 | writeb(0, &dp6_ptr->u.ic.S_Status); | 951 | writeb(0, &dp6_ptr->u.ic.S_Status); |
952 | writeb(0xff, &dp6_ptr->io.irqdel); | 952 | writeb(0xff, &dp6_ptr->io.irqdel); |
953 | if (prot_ver != PROTOCOL_VERSION) { | 953 | if (prot_ver != PROTOCOL_VERSION) { |
@@ -1000,7 +1000,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, | |||
1000 | found = FALSE; | 1000 | found = FALSE; |
1001 | for (i = 0xC8000; i < 0xE8000; i += 0x4000) { | 1001 | for (i = 0xC8000; i < 0xE8000; i += 0x4000) { |
1002 | iounmap(ha->brd); | 1002 | iounmap(ha->brd); |
1003 | ha->brd = ioremap(i, sizeof(ushort)); | 1003 | ha->brd = ioremap(i, sizeof(u16)); |
1004 | if (ha->brd == NULL) { | 1004 | if (ha->brd == NULL) { |
1005 | printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); | 1005 | printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); |
1006 | return 0; | 1006 | return 0; |
@@ -1059,7 +1059,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, | |||
1059 | } | 1059 | } |
1060 | gdth_delay(1); | 1060 | gdth_delay(1); |
1061 | } | 1061 | } |
1062 | prot_ver = (unchar)readl(&dp6c_ptr->u.ic.S_Info[0]); | 1062 | prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]); |
1063 | writeb(0, &dp6c_ptr->u.ic.Status); | 1063 | writeb(0, &dp6c_ptr->u.ic.Status); |
1064 | if (prot_ver != PROTOCOL_VERSION) { | 1064 | if (prot_ver != PROTOCOL_VERSION) { |
1065 | printk("GDT-PCI: Illegal protocol version\n"); | 1065 | printk("GDT-PCI: Illegal protocol version\n"); |
@@ -1128,7 +1128,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, | |||
1128 | found = FALSE; | 1128 | found = FALSE; |
1129 | for (i = 0xC8000; i < 0xE8000; i += 0x4000) { | 1129 | for (i = 0xC8000; i < 0xE8000; i += 0x4000) { |
1130 | iounmap(ha->brd); | 1130 | iounmap(ha->brd); |
1131 | ha->brd = ioremap(i, sizeof(ushort)); | 1131 | ha->brd = ioremap(i, sizeof(u16)); |
1132 | if (ha->brd == NULL) { | 1132 | if (ha->brd == NULL) { |
1133 | printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); | 1133 | printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); |
1134 | return 0; | 1134 | return 0; |
@@ -1180,7 +1180,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, | |||
1180 | } | 1180 | } |
1181 | gdth_delay(1); | 1181 | gdth_delay(1); |
1182 | } | 1182 | } |
1183 | prot_ver = (unchar)readl(&dp6m_ptr->u.ic.S_Info[0]); | 1183 | prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]); |
1184 | writeb(0, &dp6m_ptr->u.ic.S_Status); | 1184 | writeb(0, &dp6m_ptr->u.ic.S_Status); |
1185 | if (prot_ver != PROTOCOL_VERSION) { | 1185 | if (prot_ver != PROTOCOL_VERSION) { |
1186 | printk("GDT-PCI: Illegal protocol version\n"); | 1186 | printk("GDT-PCI: Illegal protocol version\n"); |
@@ -1223,7 +1223,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, | |||
1223 | } | 1223 | } |
1224 | gdth_delay(1); | 1224 | gdth_delay(1); |
1225 | } | 1225 | } |
1226 | prot_ver = (unchar)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16); | 1226 | prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16); |
1227 | writeb(0, &dp6m_ptr->u.ic.S_Status); | 1227 | writeb(0, &dp6m_ptr->u.ic.S_Status); |
1228 | if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */ | 1228 | if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */ |
1229 | ha->dma64_support = 0; | 1229 | ha->dma64_support = 0; |
@@ -1239,7 +1239,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, | |||
1239 | 1239 | ||
1240 | static void __devinit gdth_enable_int(gdth_ha_str *ha) | 1240 | static void __devinit gdth_enable_int(gdth_ha_str *ha) |
1241 | { | 1241 | { |
1242 | ulong flags; | 1242 | unsigned long flags; |
1243 | gdt2_dpram_str __iomem *dp2_ptr; | 1243 | gdt2_dpram_str __iomem *dp2_ptr; |
1244 | gdt6_dpram_str __iomem *dp6_ptr; | 1244 | gdt6_dpram_str __iomem *dp6_ptr; |
1245 | gdt6m_dpram_str __iomem *dp6m_ptr; | 1245 | gdt6m_dpram_str __iomem *dp6m_ptr; |
@@ -1274,14 +1274,14 @@ static void __devinit gdth_enable_int(gdth_ha_str *ha) | |||
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | /* return IStatus if interrupt was from this card else 0 */ | 1276 | /* return IStatus if interrupt was from this card else 0 */ |
1277 | static unchar gdth_get_status(gdth_ha_str *ha) | 1277 | static u8 gdth_get_status(gdth_ha_str *ha) |
1278 | { | 1278 | { |
1279 | unchar IStatus = 0; | 1279 | u8 IStatus = 0; |
1280 | 1280 | ||
1281 | TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count)); | 1281 | TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count)); |
1282 | 1282 | ||
1283 | if (ha->type == GDT_EISA) | 1283 | if (ha->type == GDT_EISA) |
1284 | IStatus = inb((ushort)ha->bmic + EDOORREG); | 1284 | IStatus = inb((u16)ha->bmic + EDOORREG); |
1285 | else if (ha->type == GDT_ISA) | 1285 | else if (ha->type == GDT_ISA) |
1286 | IStatus = | 1286 | IStatus = |
1287 | readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index); | 1287 | readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index); |
@@ -1329,7 +1329,7 @@ static int gdth_get_cmd_index(gdth_ha_str *ha) | |||
1329 | if (ha->cmd_tab[i].cmnd == UNUSED_CMND) { | 1329 | if (ha->cmd_tab[i].cmnd == UNUSED_CMND) { |
1330 | ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer; | 1330 | ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer; |
1331 | ha->cmd_tab[i].service = ha->pccb->Service; | 1331 | ha->cmd_tab[i].service = ha->pccb->Service; |
1332 | ha->pccb->CommandIndex = (ulong32)i+2; | 1332 | ha->pccb->CommandIndex = (u32)i+2; |
1333 | return (i+2); | 1333 | return (i+2); |
1334 | } | 1334 | } |
1335 | } | 1335 | } |
@@ -1362,7 +1362,7 @@ static void gdth_copy_command(gdth_ha_str *ha) | |||
1362 | register gdt6c_dpram_str __iomem *dp6c_ptr; | 1362 | register gdt6c_dpram_str __iomem *dp6c_ptr; |
1363 | gdt6_dpram_str __iomem *dp6_ptr; | 1363 | gdt6_dpram_str __iomem *dp6_ptr; |
1364 | gdt2_dpram_str __iomem *dp2_ptr; | 1364 | gdt2_dpram_str __iomem *dp2_ptr; |
1365 | ushort cp_count,dp_offset,cmd_no; | 1365 | u16 cp_count,dp_offset,cmd_no; |
1366 | 1366 | ||
1367 | TRACE(("gdth_copy_command() hanum %d\n", ha->hanum)); | 1367 | TRACE(("gdth_copy_command() hanum %d\n", ha->hanum)); |
1368 | 1368 | ||
@@ -1386,28 +1386,28 @@ static void gdth_copy_command(gdth_ha_str *ha) | |||
1386 | dp2_ptr = ha->brd; | 1386 | dp2_ptr = ha->brd; |
1387 | writew(dp_offset + DPMEM_COMMAND_OFFSET, | 1387 | writew(dp_offset + DPMEM_COMMAND_OFFSET, |
1388 | &dp2_ptr->u.ic.comm_queue[cmd_no].offset); | 1388 | &dp2_ptr->u.ic.comm_queue[cmd_no].offset); |
1389 | writew((ushort)cmd_ptr->Service, | 1389 | writew((u16)cmd_ptr->Service, |
1390 | &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id); | 1390 | &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id); |
1391 | memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); | 1391 | memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); |
1392 | } else if (ha->type == GDT_PCI) { | 1392 | } else if (ha->type == GDT_PCI) { |
1393 | dp6_ptr = ha->brd; | 1393 | dp6_ptr = ha->brd; |
1394 | writew(dp_offset + DPMEM_COMMAND_OFFSET, | 1394 | writew(dp_offset + DPMEM_COMMAND_OFFSET, |
1395 | &dp6_ptr->u.ic.comm_queue[cmd_no].offset); | 1395 | &dp6_ptr->u.ic.comm_queue[cmd_no].offset); |
1396 | writew((ushort)cmd_ptr->Service, | 1396 | writew((u16)cmd_ptr->Service, |
1397 | &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id); | 1397 | &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id); |
1398 | memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); | 1398 | memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); |
1399 | } else if (ha->type == GDT_PCINEW) { | 1399 | } else if (ha->type == GDT_PCINEW) { |
1400 | dp6c_ptr = ha->brd; | 1400 | dp6c_ptr = ha->brd; |
1401 | writew(dp_offset + DPMEM_COMMAND_OFFSET, | 1401 | writew(dp_offset + DPMEM_COMMAND_OFFSET, |
1402 | &dp6c_ptr->u.ic.comm_queue[cmd_no].offset); | 1402 | &dp6c_ptr->u.ic.comm_queue[cmd_no].offset); |
1403 | writew((ushort)cmd_ptr->Service, | 1403 | writew((u16)cmd_ptr->Service, |
1404 | &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id); | 1404 | &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id); |
1405 | memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); | 1405 | memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); |
1406 | } else if (ha->type == GDT_PCIMPR) { | 1406 | } else if (ha->type == GDT_PCIMPR) { |
1407 | dp6m_ptr = ha->brd; | 1407 | dp6m_ptr = ha->brd; |
1408 | writew(dp_offset + DPMEM_COMMAND_OFFSET, | 1408 | writew(dp_offset + DPMEM_COMMAND_OFFSET, |
1409 | &dp6m_ptr->u.ic.comm_queue[cmd_no].offset); | 1409 | &dp6m_ptr->u.ic.comm_queue[cmd_no].offset); |
1410 | writew((ushort)cmd_ptr->Service, | 1410 | writew((u16)cmd_ptr->Service, |
1411 | &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id); | 1411 | &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id); |
1412 | memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); | 1412 | memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); |
1413 | } | 1413 | } |
@@ -1420,14 +1420,14 @@ static void gdth_release_event(gdth_ha_str *ha) | |||
1420 | 1420 | ||
1421 | #ifdef GDTH_STATISTICS | 1421 | #ifdef GDTH_STATISTICS |
1422 | { | 1422 | { |
1423 | ulong32 i,j; | 1423 | u32 i,j; |
1424 | for (i=0,j=0; j<GDTH_MAXCMDS; ++j) { | 1424 | for (i=0,j=0; j<GDTH_MAXCMDS; ++j) { |
1425 | if (ha->cmd_tab[j].cmnd != UNUSED_CMND) | 1425 | if (ha->cmd_tab[j].cmnd != UNUSED_CMND) |
1426 | ++i; | 1426 | ++i; |
1427 | } | 1427 | } |
1428 | if (max_index < i) { | 1428 | if (max_index < i) { |
1429 | max_index = i; | 1429 | max_index = i; |
1430 | TRACE3(("GDT: max_index = %d\n",(ushort)i)); | 1430 | TRACE3(("GDT: max_index = %d\n",(u16)i)); |
1431 | } | 1431 | } |
1432 | } | 1432 | } |
1433 | #endif | 1433 | #endif |
@@ -1450,7 +1450,7 @@ static void gdth_release_event(gdth_ha_str *ha) | |||
1450 | } | 1450 | } |
1451 | } | 1451 | } |
1452 | 1452 | ||
1453 | static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time) | 1453 | static int gdth_wait(gdth_ha_str *ha, int index, u32 time) |
1454 | { | 1454 | { |
1455 | int answer_found = FALSE; | 1455 | int answer_found = FALSE; |
1456 | int wait_index = 0; | 1456 | int wait_index = 0; |
@@ -1476,8 +1476,8 @@ static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time) | |||
1476 | } | 1476 | } |
1477 | 1477 | ||
1478 | 1478 | ||
1479 | static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode, | 1479 | static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode, |
1480 | ulong32 p1, ulong64 p2, ulong64 p3) | 1480 | u32 p1, u64 p2, u64 p3) |
1481 | { | 1481 | { |
1482 | register gdth_cmd_str *cmd_ptr; | 1482 | register gdth_cmd_str *cmd_ptr; |
1483 | int retries,index; | 1483 | int retries,index; |
@@ -1501,35 +1501,35 @@ static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode, | |||
1501 | if (service == CACHESERVICE) { | 1501 | if (service == CACHESERVICE) { |
1502 | if (opcode == GDT_IOCTL) { | 1502 | if (opcode == GDT_IOCTL) { |
1503 | cmd_ptr->u.ioctl.subfunc = p1; | 1503 | cmd_ptr->u.ioctl.subfunc = p1; |
1504 | cmd_ptr->u.ioctl.channel = (ulong32)p2; | 1504 | cmd_ptr->u.ioctl.channel = (u32)p2; |
1505 | cmd_ptr->u.ioctl.param_size = (ushort)p3; | 1505 | cmd_ptr->u.ioctl.param_size = (u16)p3; |
1506 | cmd_ptr->u.ioctl.p_param = ha->scratch_phys; | 1506 | cmd_ptr->u.ioctl.p_param = ha->scratch_phys; |
1507 | } else { | 1507 | } else { |
1508 | if (ha->cache_feat & GDT_64BIT) { | 1508 | if (ha->cache_feat & GDT_64BIT) { |
1509 | cmd_ptr->u.cache64.DeviceNo = (ushort)p1; | 1509 | cmd_ptr->u.cache64.DeviceNo = (u16)p1; |
1510 | cmd_ptr->u.cache64.BlockNo = p2; | 1510 | cmd_ptr->u.cache64.BlockNo = p2; |
1511 | } else { | 1511 | } else { |
1512 | cmd_ptr->u.cache.DeviceNo = (ushort)p1; | 1512 | cmd_ptr->u.cache.DeviceNo = (u16)p1; |
1513 | cmd_ptr->u.cache.BlockNo = (ulong32)p2; | 1513 | cmd_ptr->u.cache.BlockNo = (u32)p2; |
1514 | } | 1514 | } |
1515 | } | 1515 | } |
1516 | } else if (service == SCSIRAWSERVICE) { | 1516 | } else if (service == SCSIRAWSERVICE) { |
1517 | if (ha->raw_feat & GDT_64BIT) { | 1517 | if (ha->raw_feat & GDT_64BIT) { |
1518 | cmd_ptr->u.raw64.direction = p1; | 1518 | cmd_ptr->u.raw64.direction = p1; |
1519 | cmd_ptr->u.raw64.bus = (unchar)p2; | 1519 | cmd_ptr->u.raw64.bus = (u8)p2; |
1520 | cmd_ptr->u.raw64.target = (unchar)p3; | 1520 | cmd_ptr->u.raw64.target = (u8)p3; |
1521 | cmd_ptr->u.raw64.lun = (unchar)(p3 >> 8); | 1521 | cmd_ptr->u.raw64.lun = (u8)(p3 >> 8); |
1522 | } else { | 1522 | } else { |
1523 | cmd_ptr->u.raw.direction = p1; | 1523 | cmd_ptr->u.raw.direction = p1; |
1524 | cmd_ptr->u.raw.bus = (unchar)p2; | 1524 | cmd_ptr->u.raw.bus = (u8)p2; |
1525 | cmd_ptr->u.raw.target = (unchar)p3; | 1525 | cmd_ptr->u.raw.target = (u8)p3; |
1526 | cmd_ptr->u.raw.lun = (unchar)(p3 >> 8); | 1526 | cmd_ptr->u.raw.lun = (u8)(p3 >> 8); |
1527 | } | 1527 | } |
1528 | } else if (service == SCREENSERVICE) { | 1528 | } else if (service == SCREENSERVICE) { |
1529 | if (opcode == GDT_REALTIME) { | 1529 | if (opcode == GDT_REALTIME) { |
1530 | *(ulong32 *)&cmd_ptr->u.screen.su.data[0] = p1; | 1530 | *(u32 *)&cmd_ptr->u.screen.su.data[0] = p1; |
1531 | *(ulong32 *)&cmd_ptr->u.screen.su.data[4] = (ulong32)p2; | 1531 | *(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2; |
1532 | *(ulong32 *)&cmd_ptr->u.screen.su.data[8] = (ulong32)p3; | 1532 | *(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3; |
1533 | } | 1533 | } |
1534 | } | 1534 | } |
1535 | ha->cmd_len = sizeof(gdth_cmd_str); | 1535 | ha->cmd_len = sizeof(gdth_cmd_str); |
@@ -1555,9 +1555,9 @@ static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode, | |||
1555 | 1555 | ||
1556 | static int __devinit gdth_search_drives(gdth_ha_str *ha) | 1556 | static int __devinit gdth_search_drives(gdth_ha_str *ha) |
1557 | { | 1557 | { |
1558 | ushort cdev_cnt, i; | 1558 | u16 cdev_cnt, i; |
1559 | int ok; | 1559 | int ok; |
1560 | ulong32 bus_no, drv_cnt, drv_no, j; | 1560 | u32 bus_no, drv_cnt, drv_no, j; |
1561 | gdth_getch_str *chn; | 1561 | gdth_getch_str *chn; |
1562 | gdth_drlist_str *drl; | 1562 | gdth_drlist_str *drl; |
1563 | gdth_iochan_str *ioc; | 1563 | gdth_iochan_str *ioc; |
@@ -1570,8 +1570,8 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha) | |||
1570 | #endif | 1570 | #endif |
1571 | 1571 | ||
1572 | #ifdef GDTH_RTC | 1572 | #ifdef GDTH_RTC |
1573 | unchar rtc[12]; | 1573 | u8 rtc[12]; |
1574 | ulong flags; | 1574 | unsigned long flags; |
1575 | #endif | 1575 | #endif |
1576 | 1576 | ||
1577 | TRACE(("gdth_search_drives() hanum %d\n", ha->hanum)); | 1577 | TRACE(("gdth_search_drives() hanum %d\n", ha->hanum)); |
@@ -1584,7 +1584,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha) | |||
1584 | if (ok) | 1584 | if (ok) |
1585 | ha->screen_feat = GDT_64BIT; | 1585 | ha->screen_feat = GDT_64BIT; |
1586 | } | 1586 | } |
1587 | if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) | 1587 | if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC)) |
1588 | ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0); | 1588 | ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0); |
1589 | if (!ok) { | 1589 | if (!ok) { |
1590 | printk("GDT-HA %d: Initialization error screen service (code %d)\n", | 1590 | printk("GDT-HA %d: Initialization error screen service (code %d)\n", |
@@ -1609,11 +1609,11 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha) | |||
1609 | rtc[j] = CMOS_READ(j); | 1609 | rtc[j] = CMOS_READ(j); |
1610 | } while (rtc[0] != CMOS_READ(0)); | 1610 | } while (rtc[0] != CMOS_READ(0)); |
1611 | spin_unlock_irqrestore(&rtc_lock, flags); | 1611 | spin_unlock_irqrestore(&rtc_lock, flags); |
1612 | TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0], | 1612 | TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(u32 *)&rtc[0], |
1613 | *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8])); | 1613 | *(u32 *)&rtc[4], *(u32 *)&rtc[8])); |
1614 | /* 3. send to controller firmware */ | 1614 | /* 3. send to controller firmware */ |
1615 | gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(ulong32 *)&rtc[0], | 1615 | gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(u32 *)&rtc[0], |
1616 | *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]); | 1616 | *(u32 *)&rtc[4], *(u32 *)&rtc[8]); |
1617 | #endif | 1617 | #endif |
1618 | 1618 | ||
1619 | /* unfreeze all IOs */ | 1619 | /* unfreeze all IOs */ |
@@ -1627,7 +1627,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha) | |||
1627 | if (ok) | 1627 | if (ok) |
1628 | ha->cache_feat = GDT_64BIT; | 1628 | ha->cache_feat = GDT_64BIT; |
1629 | } | 1629 | } |
1630 | if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) | 1630 | if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC)) |
1631 | ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0); | 1631 | ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0); |
1632 | if (!ok) { | 1632 | if (!ok) { |
1633 | printk("GDT-HA %d: Initialization error cache service (code %d)\n", | 1633 | printk("GDT-HA %d: Initialization error cache service (code %d)\n", |
@@ -1635,7 +1635,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha) | |||
1635 | return 0; | 1635 | return 0; |
1636 | } | 1636 | } |
1637 | TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n")); | 1637 | TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n")); |
1638 | cdev_cnt = (ushort)ha->info; | 1638 | cdev_cnt = (u16)ha->info; |
1639 | ha->fw_vers = ha->service; | 1639 | ha->fw_vers = ha->service; |
1640 | 1640 | ||
1641 | #ifdef INT_COAL | 1641 | #ifdef INT_COAL |
@@ -1644,7 +1644,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha) | |||
1644 | pmod = (gdth_perf_modes *)ha->pscratch; | 1644 | pmod = (gdth_perf_modes *)ha->pscratch; |
1645 | pmod->version = 1; | 1645 | pmod->version = 1; |
1646 | pmod->st_mode = 1; /* enable one status buffer */ | 1646 | pmod->st_mode = 1; /* enable one status buffer */ |
1647 | *((ulong64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys; | 1647 | *((u64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys; |
1648 | pmod->st_buff_indx1 = COALINDEX; | 1648 | pmod->st_buff_indx1 = COALINDEX; |
1649 | pmod->st_buff_addr2 = 0; | 1649 | pmod->st_buff_addr2 = 0; |
1650 | pmod->st_buff_u_addr2 = 0; | 1650 | pmod->st_buff_u_addr2 = 0; |
@@ -1705,7 +1705,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha) | |||
1705 | else | 1705 | else |
1706 | ha->bus_id[bus_no] = 0xff; | 1706 | ha->bus_id[bus_no] = 0xff; |
1707 | } | 1707 | } |
1708 | ha->bus_cnt = (unchar)bus_no; | 1708 | ha->bus_cnt = (u8)bus_no; |
1709 | } | 1709 | } |
1710 | TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt)); | 1710 | TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt)); |
1711 | 1711 | ||
@@ -1789,12 +1789,12 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha) | |||
1789 | 1789 | ||
1790 | /* logical drives */ | 1790 | /* logical drives */ |
1791 | if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT, | 1791 | if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT, |
1792 | INVALID_CHANNEL,sizeof(ulong32))) { | 1792 | INVALID_CHANNEL,sizeof(u32))) { |
1793 | drv_cnt = *(ulong32 *)ha->pscratch; | 1793 | drv_cnt = *(u32 *)ha->pscratch; |
1794 | if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST, | 1794 | if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST, |
1795 | INVALID_CHANNEL,drv_cnt * sizeof(ulong32))) { | 1795 | INVALID_CHANNEL,drv_cnt * sizeof(u32))) { |
1796 | for (j = 0; j < drv_cnt; ++j) { | 1796 | for (j = 0; j < drv_cnt; ++j) { |
1797 | drv_no = ((ulong32 *)ha->pscratch)[j]; | 1797 | drv_no = ((u32 *)ha->pscratch)[j]; |
1798 | if (drv_no < MAX_LDRIVES) { | 1798 | if (drv_no < MAX_LDRIVES) { |
1799 | ha->hdr[drv_no].is_logdrv = TRUE; | 1799 | ha->hdr[drv_no].is_logdrv = TRUE; |
1800 | TRACE2(("Drive %d is log. drive\n",drv_no)); | 1800 | TRACE2(("Drive %d is log. drive\n",drv_no)); |
@@ -1838,7 +1838,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha) | |||
1838 | if (ok) | 1838 | if (ok) |
1839 | ha->raw_feat = GDT_64BIT; | 1839 | ha->raw_feat = GDT_64BIT; |
1840 | } | 1840 | } |
1841 | if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) | 1841 | if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC)) |
1842 | ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0); | 1842 | ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0); |
1843 | if (!ok) { | 1843 | if (!ok) { |
1844 | printk("GDT-HA %d: Initialization error raw service (code %d)\n", | 1844 | printk("GDT-HA %d: Initialization error raw service (code %d)\n", |
@@ -1854,7 +1854,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha) | |||
1854 | if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) { | 1854 | if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) { |
1855 | TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n", | 1855 | TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n", |
1856 | ha->info)); | 1856 | ha->info)); |
1857 | ha->raw_feat |= (ushort)ha->info; | 1857 | ha->raw_feat |= (u16)ha->info; |
1858 | } | 1858 | } |
1859 | } | 1859 | } |
1860 | 1860 | ||
@@ -1865,7 +1865,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha) | |||
1865 | if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) { | 1865 | if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) { |
1866 | TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n", | 1866 | TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n", |
1867 | ha->info)); | 1867 | ha->info)); |
1868 | ha->cache_feat |= (ushort)ha->info; | 1868 | ha->cache_feat |= (u16)ha->info; |
1869 | } | 1869 | } |
1870 | } | 1870 | } |
1871 | 1871 | ||
@@ -1923,9 +1923,9 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha) | |||
1923 | return 1; | 1923 | return 1; |
1924 | } | 1924 | } |
1925 | 1925 | ||
1926 | static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive) | 1926 | static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive) |
1927 | { | 1927 | { |
1928 | ulong32 drv_cyls; | 1928 | u32 drv_cyls; |
1929 | int drv_hds, drv_secs; | 1929 | int drv_hds, drv_secs; |
1930 | 1930 | ||
1931 | TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive)); | 1931 | TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive)); |
@@ -1944,17 +1944,17 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive) | |||
1944 | } else { | 1944 | } else { |
1945 | drv_hds = ha->info2 & 0xff; | 1945 | drv_hds = ha->info2 & 0xff; |
1946 | drv_secs = (ha->info2 >> 8) & 0xff; | 1946 | drv_secs = (ha->info2 >> 8) & 0xff; |
1947 | drv_cyls = (ulong32)ha->hdr[hdrive].size / drv_hds / drv_secs; | 1947 | drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs; |
1948 | } | 1948 | } |
1949 | ha->hdr[hdrive].heads = (unchar)drv_hds; | 1949 | ha->hdr[hdrive].heads = (u8)drv_hds; |
1950 | ha->hdr[hdrive].secs = (unchar)drv_secs; | 1950 | ha->hdr[hdrive].secs = (u8)drv_secs; |
1951 | /* round size */ | 1951 | /* round size */ |
1952 | ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs; | 1952 | ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs; |
1953 | 1953 | ||
1954 | if (ha->cache_feat & GDT_64BIT) { | 1954 | if (ha->cache_feat & GDT_64BIT) { |
1955 | if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0) | 1955 | if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0) |
1956 | && ha->info2 != 0) { | 1956 | && ha->info2 != 0) { |
1957 | ha->hdr[hdrive].size = ((ulong64)ha->info2 << 32) | ha->info; | 1957 | ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info; |
1958 | } | 1958 | } |
1959 | } | 1959 | } |
1960 | TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n", | 1960 | TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n", |
@@ -1964,7 +1964,7 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive) | |||
1964 | if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) { | 1964 | if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) { |
1965 | TRACE2(("gdth_search_dr() cache drive %d devtype %d\n", | 1965 | TRACE2(("gdth_search_dr() cache drive %d devtype %d\n", |
1966 | hdrive,ha->info)); | 1966 | hdrive,ha->info)); |
1967 | ha->hdr[hdrive].devtype = (ushort)ha->info; | 1967 | ha->hdr[hdrive].devtype = (u16)ha->info; |
1968 | } | 1968 | } |
1969 | 1969 | ||
1970 | /* cluster info */ | 1970 | /* cluster info */ |
@@ -1972,14 +1972,14 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive) | |||
1972 | TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n", | 1972 | TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n", |
1973 | hdrive,ha->info)); | 1973 | hdrive,ha->info)); |
1974 | if (!shared_access) | 1974 | if (!shared_access) |
1975 | ha->hdr[hdrive].cluster_type = (unchar)ha->info; | 1975 | ha->hdr[hdrive].cluster_type = (u8)ha->info; |
1976 | } | 1976 | } |
1977 | 1977 | ||
1978 | /* R/W attributes */ | 1978 | /* R/W attributes */ |
1979 | if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) { | 1979 | if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) { |
1980 | TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n", | 1980 | TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n", |
1981 | hdrive,ha->info)); | 1981 | hdrive,ha->info)); |
1982 | ha->hdr[hdrive].rw_attribs = (unchar)ha->info; | 1982 | ha->hdr[hdrive].rw_attribs = (u8)ha->info; |
1983 | } | 1983 | } |
1984 | 1984 | ||
1985 | return 1; | 1985 | return 1; |
@@ -1988,12 +1988,12 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive) | |||
1988 | 1988 | ||
1989 | /* command queueing/sending functions */ | 1989 | /* command queueing/sending functions */ |
1990 | 1990 | ||
1991 | static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority) | 1991 | static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority) |
1992 | { | 1992 | { |
1993 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | 1993 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); |
1994 | register Scsi_Cmnd *pscp; | 1994 | register Scsi_Cmnd *pscp; |
1995 | register Scsi_Cmnd *nscp; | 1995 | register Scsi_Cmnd *nscp; |
1996 | ulong flags; | 1996 | unsigned long flags; |
1997 | 1997 | ||
1998 | TRACE(("gdth_putq() priority %d\n",priority)); | 1998 | TRACE(("gdth_putq() priority %d\n",priority)); |
1999 | spin_lock_irqsave(&ha->smp_lock, flags); | 1999 | spin_lock_irqsave(&ha->smp_lock, flags); |
@@ -2023,7 +2023,7 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority) | |||
2023 | ++flags; | 2023 | ++flags; |
2024 | if (max_rq < flags) { | 2024 | if (max_rq < flags) { |
2025 | max_rq = flags; | 2025 | max_rq = flags; |
2026 | TRACE3(("GDT: max_rq = %d\n",(ushort)max_rq)); | 2026 | TRACE3(("GDT: max_rq = %d\n",(u16)max_rq)); |
2027 | } | 2027 | } |
2028 | #endif | 2028 | #endif |
2029 | } | 2029 | } |
@@ -2032,9 +2032,9 @@ static void gdth_next(gdth_ha_str *ha) | |||
2032 | { | 2032 | { |
2033 | register Scsi_Cmnd *pscp; | 2033 | register Scsi_Cmnd *pscp; |
2034 | register Scsi_Cmnd *nscp; | 2034 | register Scsi_Cmnd *nscp; |
2035 | unchar b, t, l, firsttime; | 2035 | u8 b, t, l, firsttime; |
2036 | unchar this_cmd, next_cmd; | 2036 | u8 this_cmd, next_cmd; |
2037 | ulong flags = 0; | 2037 | unsigned long flags = 0; |
2038 | int cmd_index; | 2038 | int cmd_index; |
2039 | 2039 | ||
2040 | TRACE(("gdth_next() hanum %d\n", ha->hanum)); | 2040 | TRACE(("gdth_next() hanum %d\n", ha->hanum)); |
@@ -2282,20 +2282,20 @@ static void gdth_next(gdth_ha_str *ha) | |||
2282 | * buffers, kmap_atomic() as needed. | 2282 | * buffers, kmap_atomic() as needed. |
2283 | */ | 2283 | */ |
2284 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | 2284 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, |
2285 | char *buffer, ushort count) | 2285 | char *buffer, u16 count) |
2286 | { | 2286 | { |
2287 | ushort cpcount,i, max_sg = scsi_sg_count(scp); | 2287 | u16 cpcount,i, max_sg = scsi_sg_count(scp); |
2288 | ushort cpsum,cpnow; | 2288 | u16 cpsum,cpnow; |
2289 | struct scatterlist *sl; | 2289 | struct scatterlist *sl; |
2290 | char *address; | 2290 | char *address; |
2291 | 2291 | ||
2292 | cpcount = min_t(ushort, count, scsi_bufflen(scp)); | 2292 | cpcount = min_t(u16, count, scsi_bufflen(scp)); |
2293 | 2293 | ||
2294 | if (cpcount) { | 2294 | if (cpcount) { |
2295 | cpsum=0; | 2295 | cpsum=0; |
2296 | scsi_for_each_sg(scp, sl, max_sg, i) { | 2296 | scsi_for_each_sg(scp, sl, max_sg, i) { |
2297 | unsigned long flags; | 2297 | unsigned long flags; |
2298 | cpnow = (ushort)sl->length; | 2298 | cpnow = (u16)sl->length; |
2299 | TRACE(("copy_internal() now %d sum %d count %d %d\n", | 2299 | TRACE(("copy_internal() now %d sum %d count %d %d\n", |
2300 | cpnow, cpsum, cpcount, scsi_bufflen(scp))); | 2300 | cpnow, cpsum, cpcount, scsi_bufflen(scp))); |
2301 | if (cpsum+cpnow > cpcount) | 2301 | if (cpsum+cpnow > cpcount) |
@@ -2325,7 +2325,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | |||
2325 | 2325 | ||
2326 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | 2326 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) |
2327 | { | 2327 | { |
2328 | unchar t; | 2328 | u8 t; |
2329 | gdth_inq_data inq; | 2329 | gdth_inq_data inq; |
2330 | gdth_rdcap_data rdc; | 2330 | gdth_rdcap_data rdc; |
2331 | gdth_sense_data sd; | 2331 | gdth_sense_data sd; |
@@ -2389,7 +2389,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2389 | 2389 | ||
2390 | case READ_CAPACITY: | 2390 | case READ_CAPACITY: |
2391 | TRACE2(("Read capacity hdrive %d\n",t)); | 2391 | TRACE2(("Read capacity hdrive %d\n",t)); |
2392 | if (ha->hdr[t].size > (ulong64)0xffffffff) | 2392 | if (ha->hdr[t].size > (u64)0xffffffff) |
2393 | rdc.last_block_no = 0xffffffff; | 2393 | rdc.last_block_no = 0xffffffff; |
2394 | else | 2394 | else |
2395 | rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); | 2395 | rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); |
@@ -2425,12 +2425,12 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2425 | return 0; | 2425 | return 0; |
2426 | } | 2426 | } |
2427 | 2427 | ||
2428 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive) | 2428 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive) |
2429 | { | 2429 | { |
2430 | register gdth_cmd_str *cmdp; | 2430 | register gdth_cmd_str *cmdp; |
2431 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | 2431 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); |
2432 | ulong32 cnt, blockcnt; | 2432 | u32 cnt, blockcnt; |
2433 | ulong64 no, blockno; | 2433 | u64 no, blockno; |
2434 | int i, cmd_index, read_write, sgcnt, mode64; | 2434 | int i, cmd_index, read_write, sgcnt, mode64; |
2435 | 2435 | ||
2436 | cmdp = ha->pccb; | 2436 | cmdp = ha->pccb; |
@@ -2498,17 +2498,17 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive) | |||
2498 | 2498 | ||
2499 | if (read_write) { | 2499 | if (read_write) { |
2500 | if (scp->cmd_len == 16) { | 2500 | if (scp->cmd_len == 16) { |
2501 | memcpy(&no, &scp->cmnd[2], sizeof(ulong64)); | 2501 | memcpy(&no, &scp->cmnd[2], sizeof(u64)); |
2502 | blockno = be64_to_cpu(no); | 2502 | blockno = be64_to_cpu(no); |
2503 | memcpy(&cnt, &scp->cmnd[10], sizeof(ulong32)); | 2503 | memcpy(&cnt, &scp->cmnd[10], sizeof(u32)); |
2504 | blockcnt = be32_to_cpu(cnt); | 2504 | blockcnt = be32_to_cpu(cnt); |
2505 | } else if (scp->cmd_len == 10) { | 2505 | } else if (scp->cmd_len == 10) { |
2506 | memcpy(&no, &scp->cmnd[2], sizeof(ulong32)); | 2506 | memcpy(&no, &scp->cmnd[2], sizeof(u32)); |
2507 | blockno = be32_to_cpu(no); | 2507 | blockno = be32_to_cpu(no); |
2508 | memcpy(&cnt, &scp->cmnd[7], sizeof(ushort)); | 2508 | memcpy(&cnt, &scp->cmnd[7], sizeof(u16)); |
2509 | blockcnt = be16_to_cpu(cnt); | 2509 | blockcnt = be16_to_cpu(cnt); |
2510 | } else { | 2510 | } else { |
2511 | memcpy(&no, &scp->cmnd[0], sizeof(ulong32)); | 2511 | memcpy(&no, &scp->cmnd[0], sizeof(u32)); |
2512 | blockno = be32_to_cpu(no) & 0x001fffffUL; | 2512 | blockno = be32_to_cpu(no) & 0x001fffffUL; |
2513 | blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4]; | 2513 | blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4]; |
2514 | } | 2514 | } |
@@ -2516,7 +2516,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive) | |||
2516 | cmdp->u.cache64.BlockNo = blockno; | 2516 | cmdp->u.cache64.BlockNo = blockno; |
2517 | cmdp->u.cache64.BlockCnt = blockcnt; | 2517 | cmdp->u.cache64.BlockCnt = blockcnt; |
2518 | } else { | 2518 | } else { |
2519 | cmdp->u.cache.BlockNo = (ulong32)blockno; | 2519 | cmdp->u.cache.BlockNo = (u32)blockno; |
2520 | cmdp->u.cache.BlockCnt = blockcnt; | 2520 | cmdp->u.cache.BlockCnt = blockcnt; |
2521 | } | 2521 | } |
2522 | 2522 | ||
@@ -2528,12 +2528,12 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive) | |||
2528 | if (mode64) { | 2528 | if (mode64) { |
2529 | struct scatterlist *sl; | 2529 | struct scatterlist *sl; |
2530 | 2530 | ||
2531 | cmdp->u.cache64.DestAddr= (ulong64)-1; | 2531 | cmdp->u.cache64.DestAddr= (u64)-1; |
2532 | cmdp->u.cache64.sg_canz = sgcnt; | 2532 | cmdp->u.cache64.sg_canz = sgcnt; |
2533 | scsi_for_each_sg(scp, sl, sgcnt, i) { | 2533 | scsi_for_each_sg(scp, sl, sgcnt, i) { |
2534 | cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl); | 2534 | cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl); |
2535 | #ifdef GDTH_DMA_STATISTICS | 2535 | #ifdef GDTH_DMA_STATISTICS |
2536 | if (cmdp->u.cache64.sg_lst[i].sg_ptr > (ulong64)0xffffffff) | 2536 | if (cmdp->u.cache64.sg_lst[i].sg_ptr > (u64)0xffffffff) |
2537 | ha->dma64_cnt++; | 2537 | ha->dma64_cnt++; |
2538 | else | 2538 | else |
2539 | ha->dma32_cnt++; | 2539 | ha->dma32_cnt++; |
@@ -2555,8 +2555,8 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive) | |||
2555 | } | 2555 | } |
2556 | 2556 | ||
2557 | #ifdef GDTH_STATISTICS | 2557 | #ifdef GDTH_STATISTICS |
2558 | if (max_sg < (ulong32)sgcnt) { | 2558 | if (max_sg < (u32)sgcnt) { |
2559 | max_sg = (ulong32)sgcnt; | 2559 | max_sg = (u32)sgcnt; |
2560 | TRACE3(("GDT: max_sg = %d\n",max_sg)); | 2560 | TRACE3(("GDT: max_sg = %d\n",max_sg)); |
2561 | } | 2561 | } |
2562 | #endif | 2562 | #endif |
@@ -2572,7 +2572,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive) | |||
2572 | TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", | 2572 | TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", |
2573 | cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt)); | 2573 | cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt)); |
2574 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + | 2574 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + |
2575 | (ushort)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str); | 2575 | (u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str); |
2576 | } else { | 2576 | } else { |
2577 | TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", | 2577 | TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", |
2578 | cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz, | 2578 | cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz, |
@@ -2581,7 +2581,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive) | |||
2581 | TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", | 2581 | TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", |
2582 | cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt)); | 2582 | cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt)); |
2583 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + | 2583 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + |
2584 | (ushort)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str); | 2584 | (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str); |
2585 | } | 2585 | } |
2586 | if (ha->cmd_len & 3) | 2586 | if (ha->cmd_len & 3) |
2587 | ha->cmd_len += (4 - (ha->cmd_len & 3)); | 2587 | ha->cmd_len += (4 - (ha->cmd_len & 3)); |
@@ -2600,15 +2600,15 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive) | |||
2600 | return cmd_index; | 2600 | return cmd_index; |
2601 | } | 2601 | } |
2602 | 2602 | ||
2603 | static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) | 2603 | static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b) |
2604 | { | 2604 | { |
2605 | register gdth_cmd_str *cmdp; | 2605 | register gdth_cmd_str *cmdp; |
2606 | ushort i; | 2606 | u16 i; |
2607 | dma_addr_t sense_paddr; | 2607 | dma_addr_t sense_paddr; |
2608 | int cmd_index, sgcnt, mode64; | 2608 | int cmd_index, sgcnt, mode64; |
2609 | unchar t,l; | 2609 | u8 t,l; |
2610 | struct page *page; | 2610 | struct page *page; |
2611 | ulong offset; | 2611 | unsigned long offset; |
2612 | struct gdth_cmndinfo *cmndinfo; | 2612 | struct gdth_cmndinfo *cmndinfo; |
2613 | 2613 | ||
2614 | t = scp->device->id; | 2614 | t = scp->device->id; |
@@ -2654,7 +2654,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) | |||
2654 | 2654 | ||
2655 | } else { | 2655 | } else { |
2656 | page = virt_to_page(scp->sense_buffer); | 2656 | page = virt_to_page(scp->sense_buffer); |
2657 | offset = (ulong)scp->sense_buffer & ~PAGE_MASK; | 2657 | offset = (unsigned long)scp->sense_buffer & ~PAGE_MASK; |
2658 | sense_paddr = pci_map_page(ha->pdev,page,offset, | 2658 | sense_paddr = pci_map_page(ha->pdev,page,offset, |
2659 | 16,PCI_DMA_FROMDEVICE); | 2659 | 16,PCI_DMA_FROMDEVICE); |
2660 | 2660 | ||
@@ -2703,12 +2703,12 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) | |||
2703 | if (mode64) { | 2703 | if (mode64) { |
2704 | struct scatterlist *sl; | 2704 | struct scatterlist *sl; |
2705 | 2705 | ||
2706 | cmdp->u.raw64.sdata = (ulong64)-1; | 2706 | cmdp->u.raw64.sdata = (u64)-1; |
2707 | cmdp->u.raw64.sg_ranz = sgcnt; | 2707 | cmdp->u.raw64.sg_ranz = sgcnt; |
2708 | scsi_for_each_sg(scp, sl, sgcnt, i) { | 2708 | scsi_for_each_sg(scp, sl, sgcnt, i) { |
2709 | cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl); | 2709 | cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl); |
2710 | #ifdef GDTH_DMA_STATISTICS | 2710 | #ifdef GDTH_DMA_STATISTICS |
2711 | if (cmdp->u.raw64.sg_lst[i].sg_ptr > (ulong64)0xffffffff) | 2711 | if (cmdp->u.raw64.sg_lst[i].sg_ptr > (u64)0xffffffff) |
2712 | ha->dma64_cnt++; | 2712 | ha->dma64_cnt++; |
2713 | else | 2713 | else |
2714 | ha->dma32_cnt++; | 2714 | ha->dma32_cnt++; |
@@ -2744,7 +2744,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) | |||
2744 | cmdp->u.raw64.sg_lst[0].sg_len)); | 2744 | cmdp->u.raw64.sg_lst[0].sg_len)); |
2745 | /* evaluate command size */ | 2745 | /* evaluate command size */ |
2746 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + | 2746 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + |
2747 | (ushort)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str); | 2747 | (u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str); |
2748 | } else { | 2748 | } else { |
2749 | TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", | 2749 | TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", |
2750 | cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz, | 2750 | cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz, |
@@ -2752,7 +2752,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) | |||
2752 | cmdp->u.raw.sg_lst[0].sg_len)); | 2752 | cmdp->u.raw.sg_lst[0].sg_len)); |
2753 | /* evaluate command size */ | 2753 | /* evaluate command size */ |
2754 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + | 2754 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + |
2755 | (ushort)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str); | 2755 | (u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str); |
2756 | } | 2756 | } |
2757 | } | 2757 | } |
2758 | /* check space */ | 2758 | /* check space */ |
@@ -2802,7 +2802,7 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2802 | if (cmdp->OpCode == GDT_IOCTL) { | 2802 | if (cmdp->OpCode == GDT_IOCTL) { |
2803 | TRACE2(("IOCTL\n")); | 2803 | TRACE2(("IOCTL\n")); |
2804 | ha->cmd_len = | 2804 | ha->cmd_len = |
2805 | GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(ulong64); | 2805 | GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64); |
2806 | } else if (cmdp->Service == CACHESERVICE) { | 2806 | } else if (cmdp->Service == CACHESERVICE) { |
2807 | TRACE2(("cache command %d\n",cmdp->OpCode)); | 2807 | TRACE2(("cache command %d\n",cmdp->OpCode)); |
2808 | if (ha->cache_feat & GDT_64BIT) | 2808 | if (ha->cache_feat & GDT_64BIT) |
@@ -2840,8 +2840,8 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2840 | 2840 | ||
2841 | 2841 | ||
2842 | /* Controller event handling functions */ | 2842 | /* Controller event handling functions */ |
2843 | static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source, | 2843 | static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source, |
2844 | ushort idx, gdth_evt_data *evt) | 2844 | u16 idx, gdth_evt_data *evt) |
2845 | { | 2845 | { |
2846 | gdth_evt_str *e; | 2846 | gdth_evt_str *e; |
2847 | struct timeval tv; | 2847 | struct timeval tv; |
@@ -2890,7 +2890,7 @@ static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr) | |||
2890 | { | 2890 | { |
2891 | gdth_evt_str *e; | 2891 | gdth_evt_str *e; |
2892 | int eindex; | 2892 | int eindex; |
2893 | ulong flags; | 2893 | unsigned long flags; |
2894 | 2894 | ||
2895 | TRACE2(("gdth_read_event() handle %d\n", handle)); | 2895 | TRACE2(("gdth_read_event() handle %d\n", handle)); |
2896 | spin_lock_irqsave(&ha->smp_lock, flags); | 2896 | spin_lock_irqsave(&ha->smp_lock, flags); |
@@ -2919,12 +2919,12 @@ static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr) | |||
2919 | } | 2919 | } |
2920 | 2920 | ||
2921 | static void gdth_readapp_event(gdth_ha_str *ha, | 2921 | static void gdth_readapp_event(gdth_ha_str *ha, |
2922 | unchar application, gdth_evt_str *estr) | 2922 | u8 application, gdth_evt_str *estr) |
2923 | { | 2923 | { |
2924 | gdth_evt_str *e; | 2924 | gdth_evt_str *e; |
2925 | int eindex; | 2925 | int eindex; |
2926 | ulong flags; | 2926 | unsigned long flags; |
2927 | unchar found = FALSE; | 2927 | u8 found = FALSE; |
2928 | 2928 | ||
2929 | TRACE2(("gdth_readapp_event() app. %d\n", application)); | 2929 | TRACE2(("gdth_readapp_event() app. %d\n", application)); |
2930 | spin_lock_irqsave(&ha->smp_lock, flags); | 2930 | spin_lock_irqsave(&ha->smp_lock, flags); |
@@ -2969,9 +2969,9 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, | |||
2969 | gdt2_dpram_str __iomem *dp2_ptr; | 2969 | gdt2_dpram_str __iomem *dp2_ptr; |
2970 | Scsi_Cmnd *scp; | 2970 | Scsi_Cmnd *scp; |
2971 | int rval, i; | 2971 | int rval, i; |
2972 | unchar IStatus; | 2972 | u8 IStatus; |
2973 | ushort Service; | 2973 | u16 Service; |
2974 | ulong flags = 0; | 2974 | unsigned long flags = 0; |
2975 | #ifdef INT_COAL | 2975 | #ifdef INT_COAL |
2976 | int coalesced = FALSE; | 2976 | int coalesced = FALSE; |
2977 | int next = FALSE; | 2977 | int next = FALSE; |
@@ -3018,7 +3018,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, | |||
3018 | if (coalesced) { | 3018 | if (coalesced) { |
3019 | /* For coalesced requests all status | 3019 | /* For coalesced requests all status |
3020 | information is found in the status buffer */ | 3020 | information is found in the status buffer */ |
3021 | IStatus = (unchar)(pcs->status & 0xff); | 3021 | IStatus = (u8)(pcs->status & 0xff); |
3022 | } | 3022 | } |
3023 | #endif | 3023 | #endif |
3024 | 3024 | ||
@@ -3197,7 +3197,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, | |||
3197 | ++act_int_coal; | 3197 | ++act_int_coal; |
3198 | if (act_int_coal > max_int_coal) { | 3198 | if (act_int_coal > max_int_coal) { |
3199 | max_int_coal = act_int_coal; | 3199 | max_int_coal = act_int_coal; |
3200 | printk("GDT: max_int_coal = %d\n",(ushort)max_int_coal); | 3200 | printk("GDT: max_int_coal = %d\n",(u16)max_int_coal); |
3201 | } | 3201 | } |
3202 | #endif | 3202 | #endif |
3203 | /* see if there is another status */ | 3203 | /* see if there is another status */ |
@@ -3225,12 +3225,12 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id) | |||
3225 | return __gdth_interrupt(ha, false, NULL); | 3225 | return __gdth_interrupt(ha, false, NULL); |
3226 | } | 3226 | } |
3227 | 3227 | ||
3228 | static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, | 3228 | static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index, |
3229 | Scsi_Cmnd *scp) | 3229 | Scsi_Cmnd *scp) |
3230 | { | 3230 | { |
3231 | gdth_msg_str *msg; | 3231 | gdth_msg_str *msg; |
3232 | gdth_cmd_str *cmdp; | 3232 | gdth_cmd_str *cmdp; |
3233 | unchar b, t; | 3233 | u8 b, t; |
3234 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | 3234 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); |
3235 | 3235 | ||
3236 | cmdp = ha->pccb; | 3236 | cmdp = ha->pccb; |
@@ -3263,7 +3263,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, | |||
3263 | cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; | 3263 | cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; |
3264 | ha->cmd_offs_dpmem = 0; | 3264 | ha->cmd_offs_dpmem = 0; |
3265 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) | 3265 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) |
3266 | + sizeof(ulong64); | 3266 | + sizeof(u64); |
3267 | ha->cmd_cnt = 0; | 3267 | ha->cmd_cnt = 0; |
3268 | gdth_copy_command(ha); | 3268 | gdth_copy_command(ha); |
3269 | gdth_release_event(ha); | 3269 | gdth_release_event(ha); |
@@ -3297,7 +3297,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, | |||
3297 | cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; | 3297 | cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; |
3298 | ha->cmd_offs_dpmem = 0; | 3298 | ha->cmd_offs_dpmem = 0; |
3299 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) | 3299 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) |
3300 | + sizeof(ulong64); | 3300 | + sizeof(u64); |
3301 | ha->cmd_cnt = 0; | 3301 | ha->cmd_cnt = 0; |
3302 | gdth_copy_command(ha); | 3302 | gdth_copy_command(ha); |
3303 | gdth_release_event(ha); | 3303 | gdth_release_event(ha); |
@@ -3335,7 +3335,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, | |||
3335 | cmndinfo->OpCode)); | 3335 | cmndinfo->OpCode)); |
3336 | /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */ | 3336 | /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */ |
3337 | if (cmndinfo->OpCode == GDT_CLUST_INFO) { | 3337 | if (cmndinfo->OpCode == GDT_CLUST_INFO) { |
3338 | ha->hdr[t].cluster_type = (unchar)ha->info; | 3338 | ha->hdr[t].cluster_type = (u8)ha->info; |
3339 | if (!(ha->hdr[t].cluster_type & | 3339 | if (!(ha->hdr[t].cluster_type & |
3340 | CLUSTER_MOUNTED)) { | 3340 | CLUSTER_MOUNTED)) { |
3341 | /* NOT MOUNTED -> MOUNT */ | 3341 | /* NOT MOUNTED -> MOUNT */ |
@@ -3397,7 +3397,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, | |||
3397 | ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED; | 3397 | ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED; |
3398 | } | 3398 | } |
3399 | memset((char*)scp->sense_buffer,0,16); | 3399 | memset((char*)scp->sense_buffer,0,16); |
3400 | if (ha->status == (ushort)S_CACHE_RESERV) { | 3400 | if (ha->status == (u16)S_CACHE_RESERV) { |
3401 | scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1); | 3401 | scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1); |
3402 | } else { | 3402 | } else { |
3403 | scp->sense_buffer[0] = 0x70; | 3403 | scp->sense_buffer[0] = 0x70; |
@@ -3614,16 +3614,16 @@ static int gdth_async_event(gdth_ha_str *ha) | |||
3614 | cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; | 3614 | cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; |
3615 | ha->cmd_offs_dpmem = 0; | 3615 | ha->cmd_offs_dpmem = 0; |
3616 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) | 3616 | ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) |
3617 | + sizeof(ulong64); | 3617 | + sizeof(u64); |
3618 | ha->cmd_cnt = 0; | 3618 | ha->cmd_cnt = 0; |
3619 | gdth_copy_command(ha); | 3619 | gdth_copy_command(ha); |
3620 | if (ha->type == GDT_EISA) | 3620 | if (ha->type == GDT_EISA) |
3621 | printk("[EISA slot %d] ",(ushort)ha->brd_phys); | 3621 | printk("[EISA slot %d] ",(u16)ha->brd_phys); |
3622 | else if (ha->type == GDT_ISA) | 3622 | else if (ha->type == GDT_ISA) |
3623 | printk("[DPMEM 0x%4X] ",(ushort)ha->brd_phys); | 3623 | printk("[DPMEM 0x%4X] ",(u16)ha->brd_phys); |
3624 | else | 3624 | else |
3625 | printk("[PCI %d/%d] ",(ushort)(ha->brd_phys>>8), | 3625 | printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8), |
3626 | (ushort)((ha->brd_phys>>3)&0x1f)); | 3626 | (u16)((ha->brd_phys>>3)&0x1f)); |
3627 | gdth_release_event(ha); | 3627 | gdth_release_event(ha); |
3628 | } | 3628 | } |
3629 | 3629 | ||
@@ -3640,7 +3640,7 @@ static int gdth_async_event(gdth_ha_str *ha) | |||
3640 | ha->dvr.eu.async.service = ha->service; | 3640 | ha->dvr.eu.async.service = ha->service; |
3641 | ha->dvr.eu.async.status = ha->status; | 3641 | ha->dvr.eu.async.status = ha->status; |
3642 | ha->dvr.eu.async.info = ha->info; | 3642 | ha->dvr.eu.async.info = ha->info; |
3643 | *(ulong32 *)ha->dvr.eu.async.scsi_coord = ha->info2; | 3643 | *(u32 *)ha->dvr.eu.async.scsi_coord = ha->info2; |
3644 | } | 3644 | } |
3645 | gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr ); | 3645 | gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr ); |
3646 | gdth_log_event( &ha->dvr, NULL ); | 3646 | gdth_log_event( &ha->dvr, NULL ); |
@@ -3648,8 +3648,8 @@ static int gdth_async_event(gdth_ha_str *ha) | |||
3648 | /* new host drive from expand? */ | 3648 | /* new host drive from expand? */ |
3649 | if (ha->service == CACHESERVICE && ha->status == 56) { | 3649 | if (ha->service == CACHESERVICE && ha->status == 56) { |
3650 | TRACE2(("gdth_async_event(): new host drive %d created\n", | 3650 | TRACE2(("gdth_async_event(): new host drive %d created\n", |
3651 | (ushort)ha->info)); | 3651 | (u16)ha->info)); |
3652 | /* gdth_analyse_hdrive(hanum, (ushort)ha->info); */ | 3652 | /* gdth_analyse_hdrive(hanum, (u16)ha->info); */ |
3653 | } | 3653 | } |
3654 | } | 3654 | } |
3655 | return 1; | 3655 | return 1; |
@@ -3680,13 +3680,13 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer) | |||
3680 | for (j=0,i=1; i < f[0]; i+=2) { | 3680 | for (j=0,i=1; i < f[0]; i+=2) { |
3681 | switch (f[i+1]) { | 3681 | switch (f[i+1]) { |
3682 | case 4: | 3682 | case 4: |
3683 | stack.b[j++] = *(ulong32*)&dvr->eu.stream[(int)f[i]]; | 3683 | stack.b[j++] = *(u32*)&dvr->eu.stream[(int)f[i]]; |
3684 | break; | 3684 | break; |
3685 | case 2: | 3685 | case 2: |
3686 | stack.b[j++] = *(ushort*)&dvr->eu.stream[(int)f[i]]; | 3686 | stack.b[j++] = *(u16*)&dvr->eu.stream[(int)f[i]]; |
3687 | break; | 3687 | break; |
3688 | case 1: | 3688 | case 1: |
3689 | stack.b[j++] = *(unchar*)&dvr->eu.stream[(int)f[i]]; | 3689 | stack.b[j++] = *(u8*)&dvr->eu.stream[(int)f[i]]; |
3690 | break; | 3690 | break; |
3691 | default: | 3691 | default: |
3692 | break; | 3692 | break; |
@@ -3712,14 +3712,14 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer) | |||
3712 | } | 3712 | } |
3713 | 3713 | ||
3714 | #ifdef GDTH_STATISTICS | 3714 | #ifdef GDTH_STATISTICS |
3715 | static unchar gdth_timer_running; | 3715 | static u8 gdth_timer_running; |
3716 | 3716 | ||
3717 | static void gdth_timeout(ulong data) | 3717 | static void gdth_timeout(unsigned long data) |
3718 | { | 3718 | { |
3719 | ulong32 i; | 3719 | u32 i; |
3720 | Scsi_Cmnd *nscp; | 3720 | Scsi_Cmnd *nscp; |
3721 | gdth_ha_str *ha; | 3721 | gdth_ha_str *ha; |
3722 | ulong flags; | 3722 | unsigned long flags; |
3723 | 3723 | ||
3724 | if(unlikely(list_empty(&gdth_instances))) { | 3724 | if(unlikely(list_empty(&gdth_instances))) { |
3725 | gdth_timer_running = 0; | 3725 | gdth_timer_running = 0; |
@@ -3891,8 +3891,8 @@ static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp) | |||
3891 | { | 3891 | { |
3892 | gdth_ha_str *ha = shost_priv(scp->device->host); | 3892 | gdth_ha_str *ha = shost_priv(scp->device->host); |
3893 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | 3893 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); |
3894 | unchar b, t; | 3894 | u8 b, t; |
3895 | ulong flags; | 3895 | unsigned long flags; |
3896 | enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED; | 3896 | enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED; |
3897 | 3897 | ||
3898 | TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__)); | 3898 | TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__)); |
@@ -3924,9 +3924,9 @@ static int gdth_eh_bus_reset(Scsi_Cmnd *scp) | |||
3924 | { | 3924 | { |
3925 | gdth_ha_str *ha = shost_priv(scp->device->host); | 3925 | gdth_ha_str *ha = shost_priv(scp->device->host); |
3926 | int i; | 3926 | int i; |
3927 | ulong flags; | 3927 | unsigned long flags; |
3928 | Scsi_Cmnd *cmnd; | 3928 | Scsi_Cmnd *cmnd; |
3929 | unchar b; | 3929 | u8 b; |
3930 | 3930 | ||
3931 | TRACE2(("gdth_eh_bus_reset()\n")); | 3931 | TRACE2(("gdth_eh_bus_reset()\n")); |
3932 | 3932 | ||
@@ -3974,7 +3974,7 @@ static int gdth_eh_bus_reset(Scsi_Cmnd *scp) | |||
3974 | 3974 | ||
3975 | static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip) | 3975 | static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip) |
3976 | { | 3976 | { |
3977 | unchar b, t; | 3977 | u8 b, t; |
3978 | gdth_ha_str *ha = shost_priv(sdev->host); | 3978 | gdth_ha_str *ha = shost_priv(sdev->host); |
3979 | struct scsi_device *sd; | 3979 | struct scsi_device *sd; |
3980 | unsigned capacity; | 3980 | unsigned capacity; |
@@ -4062,7 +4062,7 @@ static int ioc_event(void __user *arg) | |||
4062 | { | 4062 | { |
4063 | gdth_ioctl_event evt; | 4063 | gdth_ioctl_event evt; |
4064 | gdth_ha_str *ha; | 4064 | gdth_ha_str *ha; |
4065 | ulong flags; | 4065 | unsigned long flags; |
4066 | 4066 | ||
4067 | if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event))) | 4067 | if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event))) |
4068 | return -EFAULT; | 4068 | return -EFAULT; |
@@ -4098,8 +4098,8 @@ static int ioc_event(void __user *arg) | |||
4098 | static int ioc_lockdrv(void __user *arg) | 4098 | static int ioc_lockdrv(void __user *arg) |
4099 | { | 4099 | { |
4100 | gdth_ioctl_lockdrv ldrv; | 4100 | gdth_ioctl_lockdrv ldrv; |
4101 | unchar i, j; | 4101 | u8 i, j; |
4102 | ulong flags; | 4102 | unsigned long flags; |
4103 | gdth_ha_str *ha; | 4103 | gdth_ha_str *ha; |
4104 | 4104 | ||
4105 | if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv))) | 4105 | if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv))) |
@@ -4165,7 +4165,7 @@ static int ioc_general(void __user *arg, char *cmnd) | |||
4165 | { | 4165 | { |
4166 | gdth_ioctl_general gen; | 4166 | gdth_ioctl_general gen; |
4167 | char *buf = NULL; | 4167 | char *buf = NULL; |
4168 | ulong64 paddr; | 4168 | u64 paddr; |
4169 | gdth_ha_str *ha; | 4169 | gdth_ha_str *ha; |
4170 | int rval; | 4170 | int rval; |
4171 | 4171 | ||
@@ -4194,7 +4194,7 @@ static int ioc_general(void __user *arg, char *cmnd) | |||
4194 | gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo; | 4194 | gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo; |
4195 | /* addresses */ | 4195 | /* addresses */ |
4196 | if (ha->cache_feat & SCATTER_GATHER) { | 4196 | if (ha->cache_feat & SCATTER_GATHER) { |
4197 | gen.command.u.cache64.DestAddr = (ulong64)-1; | 4197 | gen.command.u.cache64.DestAddr = (u64)-1; |
4198 | gen.command.u.cache64.sg_canz = 1; | 4198 | gen.command.u.cache64.sg_canz = 1; |
4199 | gen.command.u.cache64.sg_lst[0].sg_ptr = paddr; | 4199 | gen.command.u.cache64.sg_lst[0].sg_ptr = paddr; |
4200 | gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len; | 4200 | gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len; |
@@ -4207,7 +4207,7 @@ static int ioc_general(void __user *arg, char *cmnd) | |||
4207 | if (ha->cache_feat & SCATTER_GATHER) { | 4207 | if (ha->cache_feat & SCATTER_GATHER) { |
4208 | gen.command.u.cache.DestAddr = 0xffffffff; | 4208 | gen.command.u.cache.DestAddr = 0xffffffff; |
4209 | gen.command.u.cache.sg_canz = 1; | 4209 | gen.command.u.cache.sg_canz = 1; |
4210 | gen.command.u.cache.sg_lst[0].sg_ptr = (ulong32)paddr; | 4210 | gen.command.u.cache.sg_lst[0].sg_ptr = (u32)paddr; |
4211 | gen.command.u.cache.sg_lst[0].sg_len = gen.data_len; | 4211 | gen.command.u.cache.sg_lst[0].sg_len = gen.data_len; |
4212 | gen.command.u.cache.sg_lst[1].sg_len = 0; | 4212 | gen.command.u.cache.sg_lst[1].sg_len = 0; |
4213 | } else { | 4213 | } else { |
@@ -4230,7 +4230,7 @@ static int ioc_general(void __user *arg, char *cmnd) | |||
4230 | gen.command.u.raw64.direction = gen.command.u.raw.direction; | 4230 | gen.command.u.raw64.direction = gen.command.u.raw.direction; |
4231 | /* addresses */ | 4231 | /* addresses */ |
4232 | if (ha->raw_feat & SCATTER_GATHER) { | 4232 | if (ha->raw_feat & SCATTER_GATHER) { |
4233 | gen.command.u.raw64.sdata = (ulong64)-1; | 4233 | gen.command.u.raw64.sdata = (u64)-1; |
4234 | gen.command.u.raw64.sg_ranz = 1; | 4234 | gen.command.u.raw64.sg_ranz = 1; |
4235 | gen.command.u.raw64.sg_lst[0].sg_ptr = paddr; | 4235 | gen.command.u.raw64.sg_lst[0].sg_ptr = paddr; |
4236 | gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len; | 4236 | gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len; |
@@ -4244,14 +4244,14 @@ static int ioc_general(void __user *arg, char *cmnd) | |||
4244 | if (ha->raw_feat & SCATTER_GATHER) { | 4244 | if (ha->raw_feat & SCATTER_GATHER) { |
4245 | gen.command.u.raw.sdata = 0xffffffff; | 4245 | gen.command.u.raw.sdata = 0xffffffff; |
4246 | gen.command.u.raw.sg_ranz = 1; | 4246 | gen.command.u.raw.sg_ranz = 1; |
4247 | gen.command.u.raw.sg_lst[0].sg_ptr = (ulong32)paddr; | 4247 | gen.command.u.raw.sg_lst[0].sg_ptr = (u32)paddr; |
4248 | gen.command.u.raw.sg_lst[0].sg_len = gen.data_len; | 4248 | gen.command.u.raw.sg_lst[0].sg_len = gen.data_len; |
4249 | gen.command.u.raw.sg_lst[1].sg_len = 0; | 4249 | gen.command.u.raw.sg_lst[1].sg_len = 0; |
4250 | } else { | 4250 | } else { |
4251 | gen.command.u.raw.sdata = paddr; | 4251 | gen.command.u.raw.sdata = paddr; |
4252 | gen.command.u.raw.sg_ranz = 0; | 4252 | gen.command.u.raw.sg_ranz = 0; |
4253 | } | 4253 | } |
4254 | gen.command.u.raw.sense_data = (ulong32)paddr + gen.data_len; | 4254 | gen.command.u.raw.sense_data = (u32)paddr + gen.data_len; |
4255 | } | 4255 | } |
4256 | } else { | 4256 | } else { |
4257 | gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); | 4257 | gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); |
@@ -4283,7 +4283,7 @@ static int ioc_hdrlist(void __user *arg, char *cmnd) | |||
4283 | gdth_ioctl_rescan *rsc; | 4283 | gdth_ioctl_rescan *rsc; |
4284 | gdth_cmd_str *cmd; | 4284 | gdth_cmd_str *cmd; |
4285 | gdth_ha_str *ha; | 4285 | gdth_ha_str *ha; |
4286 | unchar i; | 4286 | u8 i; |
4287 | int rc = -ENOMEM; | 4287 | int rc = -ENOMEM; |
4288 | u32 cluster_type = 0; | 4288 | u32 cluster_type = 0; |
4289 | 4289 | ||
@@ -4335,11 +4335,11 @@ static int ioc_rescan(void __user *arg, char *cmnd) | |||
4335 | { | 4335 | { |
4336 | gdth_ioctl_rescan *rsc; | 4336 | gdth_ioctl_rescan *rsc; |
4337 | gdth_cmd_str *cmd; | 4337 | gdth_cmd_str *cmd; |
4338 | ushort i, status, hdr_cnt; | 4338 | u16 i, status, hdr_cnt; |
4339 | ulong32 info; | 4339 | u32 info; |
4340 | int cyls, hds, secs; | 4340 | int cyls, hds, secs; |
4341 | int rc = -ENOMEM; | 4341 | int rc = -ENOMEM; |
4342 | ulong flags; | 4342 | unsigned long flags; |
4343 | gdth_ha_str *ha; | 4343 | gdth_ha_str *ha; |
4344 | 4344 | ||
4345 | rsc = kmalloc(sizeof(*rsc), GFP_KERNEL); | 4345 | rsc = kmalloc(sizeof(*rsc), GFP_KERNEL); |
@@ -4367,7 +4367,7 @@ static int ioc_rescan(void __user *arg, char *cmnd) | |||
4367 | 4367 | ||
4368 | status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); | 4368 | status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); |
4369 | i = 0; | 4369 | i = 0; |
4370 | hdr_cnt = (status == S_OK ? (ushort)info : 0); | 4370 | hdr_cnt = (status == S_OK ? (u16)info : 0); |
4371 | } else { | 4371 | } else { |
4372 | i = rsc->hdr_no; | 4372 | i = rsc->hdr_no; |
4373 | hdr_cnt = i + 1; | 4373 | hdr_cnt = i + 1; |
@@ -4418,7 +4418,7 @@ static int ioc_rescan(void __user *arg, char *cmnd) | |||
4418 | status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); | 4418 | status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); |
4419 | 4419 | ||
4420 | spin_lock_irqsave(&ha->smp_lock, flags); | 4420 | spin_lock_irqsave(&ha->smp_lock, flags); |
4421 | ha->hdr[i].devtype = (status == S_OK ? (ushort)info : 0); | 4421 | ha->hdr[i].devtype = (status == S_OK ? (u16)info : 0); |
4422 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 4422 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
4423 | 4423 | ||
4424 | cmd->Service = CACHESERVICE; | 4424 | cmd->Service = CACHESERVICE; |
@@ -4432,7 +4432,7 @@ static int ioc_rescan(void __user *arg, char *cmnd) | |||
4432 | 4432 | ||
4433 | spin_lock_irqsave(&ha->smp_lock, flags); | 4433 | spin_lock_irqsave(&ha->smp_lock, flags); |
4434 | ha->hdr[i].cluster_type = | 4434 | ha->hdr[i].cluster_type = |
4435 | ((status == S_OK && !shared_access) ? (ushort)info : 0); | 4435 | ((status == S_OK && !shared_access) ? (u16)info : 0); |
4436 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 4436 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
4437 | rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type; | 4437 | rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type; |
4438 | 4438 | ||
@@ -4446,7 +4446,7 @@ static int ioc_rescan(void __user *arg, char *cmnd) | |||
4446 | status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); | 4446 | status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); |
4447 | 4447 | ||
4448 | spin_lock_irqsave(&ha->smp_lock, flags); | 4448 | spin_lock_irqsave(&ha->smp_lock, flags); |
4449 | ha->hdr[i].rw_attribs = (status == S_OK ? (ushort)info : 0); | 4449 | ha->hdr[i].rw_attribs = (status == S_OK ? (u16)info : 0); |
4450 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 4450 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
4451 | } | 4451 | } |
4452 | 4452 | ||
@@ -4466,7 +4466,7 @@ static int gdth_ioctl(struct inode *inode, struct file *filep, | |||
4466 | { | 4466 | { |
4467 | gdth_ha_str *ha; | 4467 | gdth_ha_str *ha; |
4468 | Scsi_Cmnd *scp; | 4468 | Scsi_Cmnd *scp; |
4469 | ulong flags; | 4469 | unsigned long flags; |
4470 | char cmnd[MAX_COMMAND_SIZE]; | 4470 | char cmnd[MAX_COMMAND_SIZE]; |
4471 | void __user *argp = (void __user *)arg; | 4471 | void __user *argp = (void __user *)arg; |
4472 | 4472 | ||
@@ -4495,9 +4495,9 @@ static int gdth_ioctl(struct inode *inode, struct file *filep, | |||
4495 | { | 4495 | { |
4496 | gdth_ioctl_osvers osv; | 4496 | gdth_ioctl_osvers osv; |
4497 | 4497 | ||
4498 | osv.version = (unchar)(LINUX_VERSION_CODE >> 16); | 4498 | osv.version = (u8)(LINUX_VERSION_CODE >> 16); |
4499 | osv.subversion = (unchar)(LINUX_VERSION_CODE >> 8); | 4499 | osv.subversion = (u8)(LINUX_VERSION_CODE >> 8); |
4500 | osv.revision = (ushort)(LINUX_VERSION_CODE & 0xff); | 4500 | osv.revision = (u16)(LINUX_VERSION_CODE & 0xff); |
4501 | if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers))) | 4501 | if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers))) |
4502 | return -EFAULT; | 4502 | return -EFAULT; |
4503 | break; | 4503 | break; |
@@ -4512,10 +4512,10 @@ static int gdth_ioctl(struct inode *inode, struct file *filep, | |||
4512 | return -EFAULT; | 4512 | return -EFAULT; |
4513 | 4513 | ||
4514 | if (ha->type == GDT_ISA || ha->type == GDT_EISA) { | 4514 | if (ha->type == GDT_ISA || ha->type == GDT_EISA) { |
4515 | ctrt.type = (unchar)((ha->stype>>20) - 0x10); | 4515 | ctrt.type = (u8)((ha->stype>>20) - 0x10); |
4516 | } else { | 4516 | } else { |
4517 | if (ha->type != GDT_PCIMPR) { | 4517 | if (ha->type != GDT_PCIMPR) { |
4518 | ctrt.type = (unchar)((ha->stype<<4) + 6); | 4518 | ctrt.type = (u8)((ha->stype<<4) + 6); |
4519 | } else { | 4519 | } else { |
4520 | ctrt.type = | 4520 | ctrt.type = |
4521 | (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe); | 4521 | (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe); |
@@ -4546,7 +4546,7 @@ static int gdth_ioctl(struct inode *inode, struct file *filep, | |||
4546 | case GDTIOCTL_LOCKCHN: | 4546 | case GDTIOCTL_LOCKCHN: |
4547 | { | 4547 | { |
4548 | gdth_ioctl_lockchn lchn; | 4548 | gdth_ioctl_lockchn lchn; |
4549 | unchar i, j; | 4549 | u8 i, j; |
4550 | 4550 | ||
4551 | if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) || | 4551 | if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) || |
4552 | (NULL == (ha = gdth_find_ha(lchn.ionode)))) | 4552 | (NULL == (ha = gdth_find_ha(lchn.ionode)))) |
@@ -4670,7 +4670,7 @@ static struct scsi_host_template gdth_template = { | |||
4670 | }; | 4670 | }; |
4671 | 4671 | ||
4672 | #ifdef CONFIG_ISA | 4672 | #ifdef CONFIG_ISA |
4673 | static int __init gdth_isa_probe_one(ulong32 isa_bios) | 4673 | static int __init gdth_isa_probe_one(u32 isa_bios) |
4674 | { | 4674 | { |
4675 | struct Scsi_Host *shp; | 4675 | struct Scsi_Host *shp; |
4676 | gdth_ha_str *ha; | 4676 | gdth_ha_str *ha; |
@@ -4802,7 +4802,7 @@ static int __init gdth_isa_probe_one(ulong32 isa_bios) | |||
4802 | #endif /* CONFIG_ISA */ | 4802 | #endif /* CONFIG_ISA */ |
4803 | 4803 | ||
4804 | #ifdef CONFIG_EISA | 4804 | #ifdef CONFIG_EISA |
4805 | static int __init gdth_eisa_probe_one(ushort eisa_slot) | 4805 | static int __init gdth_eisa_probe_one(u16 eisa_slot) |
4806 | { | 4806 | { |
4807 | struct Scsi_Host *shp; | 4807 | struct Scsi_Host *shp; |
4808 | gdth_ha_str *ha; | 4808 | gdth_ha_str *ha; |
@@ -5120,7 +5120,7 @@ static void gdth_remove_one(gdth_ha_str *ha) | |||
5120 | scsi_host_put(shp); | 5120 | scsi_host_put(shp); |
5121 | } | 5121 | } |
5122 | 5122 | ||
5123 | static int gdth_halt(struct notifier_block *nb, ulong event, void *buf) | 5123 | static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf) |
5124 | { | 5124 | { |
5125 | gdth_ha_str *ha; | 5125 | gdth_ha_str *ha; |
5126 | 5126 | ||
@@ -5158,14 +5158,14 @@ static int __init gdth_init(void) | |||
5158 | if (probe_eisa_isa) { | 5158 | if (probe_eisa_isa) { |
5159 | /* scanning for controllers, at first: ISA controller */ | 5159 | /* scanning for controllers, at first: ISA controller */ |
5160 | #ifdef CONFIG_ISA | 5160 | #ifdef CONFIG_ISA |
5161 | ulong32 isa_bios; | 5161 | u32 isa_bios; |
5162 | for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL; | 5162 | for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL; |
5163 | isa_bios += 0x8000UL) | 5163 | isa_bios += 0x8000UL) |
5164 | gdth_isa_probe_one(isa_bios); | 5164 | gdth_isa_probe_one(isa_bios); |
5165 | #endif | 5165 | #endif |
5166 | #ifdef CONFIG_EISA | 5166 | #ifdef CONFIG_EISA |
5167 | { | 5167 | { |
5168 | ushort eisa_slot; | 5168 | u16 eisa_slot; |
5169 | for (eisa_slot = 0x1000; eisa_slot <= 0x8000; | 5169 | for (eisa_slot = 0x1000; eisa_slot <= 0x8000; |
5170 | eisa_slot += 0x1000) | 5170 | eisa_slot += 0x1000) |
5171 | gdth_eisa_probe_one(eisa_slot); | 5171 | gdth_eisa_probe_one(eisa_slot); |
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index 1646444e9bd5..120a0625a7b5 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h | |||
@@ -321,524 +321,524 @@ | |||
321 | 321 | ||
322 | /* screenservice message */ | 322 | /* screenservice message */ |
323 | typedef struct { | 323 | typedef struct { |
324 | ulong32 msg_handle; /* message handle */ | 324 | u32 msg_handle; /* message handle */ |
325 | ulong32 msg_len; /* size of message */ | 325 | u32 msg_len; /* size of message */ |
326 | ulong32 msg_alen; /* answer length */ | 326 | u32 msg_alen; /* answer length */ |
327 | unchar msg_answer; /* answer flag */ | 327 | u8 msg_answer; /* answer flag */ |
328 | unchar msg_ext; /* more messages */ | 328 | u8 msg_ext; /* more messages */ |
329 | unchar msg_reserved[2]; | 329 | u8 msg_reserved[2]; |
330 | char msg_text[MSGLEN+2]; /* the message text */ | 330 | char msg_text[MSGLEN+2]; /* the message text */ |
331 | } PACKED gdth_msg_str; | 331 | } __attribute__((packed)) gdth_msg_str; |
332 | 332 | ||
333 | 333 | ||
334 | /* IOCTL data structures */ | 334 | /* IOCTL data structures */ |
335 | 335 | ||
336 | /* Status coalescing buffer for returning multiple requests per interrupt */ | 336 | /* Status coalescing buffer for returning multiple requests per interrupt */ |
337 | typedef struct { | 337 | typedef struct { |
338 | ulong32 status; | 338 | u32 status; |
339 | ulong32 ext_status; | 339 | u32 ext_status; |
340 | ulong32 info0; | 340 | u32 info0; |
341 | ulong32 info1; | 341 | u32 info1; |
342 | } PACKED gdth_coal_status; | 342 | } __attribute__((packed)) gdth_coal_status; |
343 | 343 | ||
344 | /* performance mode data structure */ | 344 | /* performance mode data structure */ |
345 | typedef struct { | 345 | typedef struct { |
346 | ulong32 version; /* The version of this IOCTL structure. */ | 346 | u32 version; /* The version of this IOCTL structure. */ |
347 | ulong32 st_mode; /* 0=dis., 1=st_buf_addr1 valid, 2=both */ | 347 | u32 st_mode; /* 0=dis., 1=st_buf_addr1 valid, 2=both */ |
348 | ulong32 st_buff_addr1; /* physical address of status buffer 1 */ | 348 | u32 st_buff_addr1; /* physical address of status buffer 1 */ |
349 | ulong32 st_buff_u_addr1; /* reserved for 64 bit addressing */ | 349 | u32 st_buff_u_addr1; /* reserved for 64 bit addressing */ |
350 | ulong32 st_buff_indx1; /* reserved command idx. for this buffer */ | 350 | u32 st_buff_indx1; /* reserved command idx. for this buffer */ |
351 | ulong32 st_buff_addr2; /* physical address of status buffer 1 */ | 351 | u32 st_buff_addr2; /* physical address of status buffer 1 */ |
352 | ulong32 st_buff_u_addr2; /* reserved for 64 bit addressing */ | 352 | u32 st_buff_u_addr2; /* reserved for 64 bit addressing */ |
353 | ulong32 st_buff_indx2; /* reserved command idx. for this buffer */ | 353 | u32 st_buff_indx2; /* reserved command idx. for this buffer */ |
354 | ulong32 st_buff_size; /* size of each buffer in bytes */ | 354 | u32 st_buff_size; /* size of each buffer in bytes */ |
355 | ulong32 cmd_mode; /* 0 = mode disabled, 1 = cmd_buff_addr1 */ | 355 | u32 cmd_mode; /* 0 = mode disabled, 1 = cmd_buff_addr1 */ |
356 | ulong32 cmd_buff_addr1; /* physical address of cmd buffer 1 */ | 356 | u32 cmd_buff_addr1; /* physical address of cmd buffer 1 */ |
357 | ulong32 cmd_buff_u_addr1; /* reserved for 64 bit addressing */ | 357 | u32 cmd_buff_u_addr1; /* reserved for 64 bit addressing */ |
358 | ulong32 cmd_buff_indx1; /* cmd buf addr1 unique identifier */ | 358 | u32 cmd_buff_indx1; /* cmd buf addr1 unique identifier */ |
359 | ulong32 cmd_buff_addr2; /* physical address of cmd buffer 1 */ | 359 | u32 cmd_buff_addr2; /* physical address of cmd buffer 1 */ |
360 | ulong32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */ | 360 | u32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */ |
361 | ulong32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */ | 361 | u32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */ |
362 | ulong32 cmd_buff_size; /* size of each cmd bufer in bytes */ | 362 | u32 cmd_buff_size; /* size of each cmd bufer in bytes */ |
363 | ulong32 reserved1; | 363 | u32 reserved1; |
364 | ulong32 reserved2; | 364 | u32 reserved2; |
365 | } PACKED gdth_perf_modes; | 365 | } __attribute__((packed)) gdth_perf_modes; |
366 | 366 | ||
367 | /* SCSI drive info */ | 367 | /* SCSI drive info */ |
368 | typedef struct { | 368 | typedef struct { |
369 | unchar vendor[8]; /* vendor string */ | 369 | u8 vendor[8]; /* vendor string */ |
370 | unchar product[16]; /* product string */ | 370 | u8 product[16]; /* product string */ |
371 | unchar revision[4]; /* revision */ | 371 | u8 revision[4]; /* revision */ |
372 | ulong32 sy_rate; /* current rate for sync. tr. */ | 372 | u32 sy_rate; /* current rate for sync. tr. */ |
373 | ulong32 sy_max_rate; /* max. rate for sync. tr. */ | 373 | u32 sy_max_rate; /* max. rate for sync. tr. */ |
374 | ulong32 no_ldrive; /* belongs to this log. drv.*/ | 374 | u32 no_ldrive; /* belongs to this log. drv.*/ |
375 | ulong32 blkcnt; /* number of blocks */ | 375 | u32 blkcnt; /* number of blocks */ |
376 | ushort blksize; /* size of block in bytes */ | 376 | u16 blksize; /* size of block in bytes */ |
377 | unchar available; /* flag: access is available */ | 377 | u8 available; /* flag: access is available */ |
378 | unchar init; /* medium is initialized */ | 378 | u8 init; /* medium is initialized */ |
379 | unchar devtype; /* SCSI devicetype */ | 379 | u8 devtype; /* SCSI devicetype */ |
380 | unchar rm_medium; /* medium is removable */ | 380 | u8 rm_medium; /* medium is removable */ |
381 | unchar wp_medium; /* medium is write protected */ | 381 | u8 wp_medium; /* medium is write protected */ |
382 | unchar ansi; /* SCSI I/II or III? */ | 382 | u8 ansi; /* SCSI I/II or III? */ |
383 | unchar protocol; /* same as ansi */ | 383 | u8 protocol; /* same as ansi */ |
384 | unchar sync; /* flag: sync. transfer enab. */ | 384 | u8 sync; /* flag: sync. transfer enab. */ |
385 | unchar disc; /* flag: disconnect enabled */ | 385 | u8 disc; /* flag: disconnect enabled */ |
386 | unchar queueing; /* flag: command queing enab. */ | 386 | u8 queueing; /* flag: command queing enab. */ |
387 | unchar cached; /* flag: caching enabled */ | 387 | u8 cached; /* flag: caching enabled */ |
388 | unchar target_id; /* target ID of device */ | 388 | u8 target_id; /* target ID of device */ |
389 | unchar lun; /* LUN id of device */ | 389 | u8 lun; /* LUN id of device */ |
390 | unchar orphan; /* flag: drive fragment */ | 390 | u8 orphan; /* flag: drive fragment */ |
391 | ulong32 last_error; /* sense key or drive state */ | 391 | u32 last_error; /* sense key or drive state */ |
392 | ulong32 last_result; /* result of last command */ | 392 | u32 last_result; /* result of last command */ |
393 | ulong32 check_errors; /* err. in last surface check */ | 393 | u32 check_errors; /* err. in last surface check */ |
394 | unchar percent; /* progress for surface check */ | 394 | u8 percent; /* progress for surface check */ |
395 | unchar last_check; /* IOCTRL operation */ | 395 | u8 last_check; /* IOCTRL operation */ |
396 | unchar res[2]; | 396 | u8 res[2]; |
397 | ulong32 flags; /* from 1.19/2.19: raw reserv.*/ | 397 | u32 flags; /* from 1.19/2.19: raw reserv.*/ |
398 | unchar multi_bus; /* multi bus dev? (fibre ch.) */ | 398 | u8 multi_bus; /* multi bus dev? (fibre ch.) */ |
399 | unchar mb_status; /* status: available? */ | 399 | u8 mb_status; /* status: available? */ |
400 | unchar res2[2]; | 400 | u8 res2[2]; |
401 | unchar mb_alt_status; /* status on second bus */ | 401 | u8 mb_alt_status; /* status on second bus */ |
402 | unchar mb_alt_bid; /* number of second bus */ | 402 | u8 mb_alt_bid; /* number of second bus */ |
403 | unchar mb_alt_tid; /* target id on second bus */ | 403 | u8 mb_alt_tid; /* target id on second bus */ |
404 | unchar res3; | 404 | u8 res3; |
405 | unchar fc_flag; /* from 1.22/2.22: info valid?*/ | 405 | u8 fc_flag; /* from 1.22/2.22: info valid?*/ |
406 | unchar res4; | 406 | u8 res4; |
407 | ushort fc_frame_size; /* frame size (bytes) */ | 407 | u16 fc_frame_size; /* frame size (bytes) */ |
408 | char wwn[8]; /* world wide name */ | 408 | char wwn[8]; /* world wide name */ |
409 | } PACKED gdth_diskinfo_str; | 409 | } __attribute__((packed)) gdth_diskinfo_str; |
410 | 410 | ||
411 | /* get SCSI channel count */ | 411 | /* get SCSI channel count */ |
412 | typedef struct { | 412 | typedef struct { |
413 | ulong32 channel_no; /* number of channel */ | 413 | u32 channel_no; /* number of channel */ |
414 | ulong32 drive_cnt; /* drive count */ | 414 | u32 drive_cnt; /* drive count */ |
415 | unchar siop_id; /* SCSI processor ID */ | 415 | u8 siop_id; /* SCSI processor ID */ |
416 | unchar siop_state; /* SCSI processor state */ | 416 | u8 siop_state; /* SCSI processor state */ |
417 | } PACKED gdth_getch_str; | 417 | } __attribute__((packed)) gdth_getch_str; |
418 | 418 | ||
419 | /* get SCSI drive numbers */ | 419 | /* get SCSI drive numbers */ |
420 | typedef struct { | 420 | typedef struct { |
421 | ulong32 sc_no; /* SCSI channel */ | 421 | u32 sc_no; /* SCSI channel */ |
422 | ulong32 sc_cnt; /* sc_list[] elements */ | 422 | u32 sc_cnt; /* sc_list[] elements */ |
423 | ulong32 sc_list[MAXID]; /* minor device numbers */ | 423 | u32 sc_list[MAXID]; /* minor device numbers */ |
424 | } PACKED gdth_drlist_str; | 424 | } __attribute__((packed)) gdth_drlist_str; |
425 | 425 | ||
426 | /* get grown/primary defect count */ | 426 | /* get grown/primary defect count */ |
427 | typedef struct { | 427 | typedef struct { |
428 | unchar sddc_type; /* 0x08: grown, 0x10: prim. */ | 428 | u8 sddc_type; /* 0x08: grown, 0x10: prim. */ |
429 | unchar sddc_format; /* list entry format */ | 429 | u8 sddc_format; /* list entry format */ |
430 | unchar sddc_len; /* list entry length */ | 430 | u8 sddc_len; /* list entry length */ |
431 | unchar sddc_res; | 431 | u8 sddc_res; |
432 | ulong32 sddc_cnt; /* entry count */ | 432 | u32 sddc_cnt; /* entry count */ |
433 | } PACKED gdth_defcnt_str; | 433 | } __attribute__((packed)) gdth_defcnt_str; |
434 | 434 | ||
435 | /* disk statistics */ | 435 | /* disk statistics */ |
436 | typedef struct { | 436 | typedef struct { |
437 | ulong32 bid; /* SCSI channel */ | 437 | u32 bid; /* SCSI channel */ |
438 | ulong32 first; /* first SCSI disk */ | 438 | u32 first; /* first SCSI disk */ |
439 | ulong32 entries; /* number of elements */ | 439 | u32 entries; /* number of elements */ |
440 | ulong32 count; /* (R) number of init. el. */ | 440 | u32 count; /* (R) number of init. el. */ |
441 | ulong32 mon_time; /* time stamp */ | 441 | u32 mon_time; /* time stamp */ |
442 | struct { | 442 | struct { |
443 | unchar tid; /* target ID */ | 443 | u8 tid; /* target ID */ |
444 | unchar lun; /* LUN */ | 444 | u8 lun; /* LUN */ |
445 | unchar res[2]; | 445 | u8 res[2]; |
446 | ulong32 blk_size; /* block size in bytes */ | 446 | u32 blk_size; /* block size in bytes */ |
447 | ulong32 rd_count; /* bytes read */ | 447 | u32 rd_count; /* bytes read */ |
448 | ulong32 wr_count; /* bytes written */ | 448 | u32 wr_count; /* bytes written */ |
449 | ulong32 rd_blk_count; /* blocks read */ | 449 | u32 rd_blk_count; /* blocks read */ |
450 | ulong32 wr_blk_count; /* blocks written */ | 450 | u32 wr_blk_count; /* blocks written */ |
451 | ulong32 retries; /* retries */ | 451 | u32 retries; /* retries */ |
452 | ulong32 reassigns; /* reassigns */ | 452 | u32 reassigns; /* reassigns */ |
453 | } PACKED list[1]; | 453 | } __attribute__((packed)) list[1]; |
454 | } PACKED gdth_dskstat_str; | 454 | } __attribute__((packed)) gdth_dskstat_str; |
455 | 455 | ||
456 | /* IO channel header */ | 456 | /* IO channel header */ |
457 | typedef struct { | 457 | typedef struct { |
458 | ulong32 version; /* version (-1UL: newest) */ | 458 | u32 version; /* version (-1UL: newest) */ |
459 | unchar list_entries; /* list entry count */ | 459 | u8 list_entries; /* list entry count */ |
460 | unchar first_chan; /* first channel number */ | 460 | u8 first_chan; /* first channel number */ |
461 | unchar last_chan; /* last channel number */ | 461 | u8 last_chan; /* last channel number */ |
462 | unchar chan_count; /* (R) channel count */ | 462 | u8 chan_count; /* (R) channel count */ |
463 | ulong32 list_offset; /* offset of list[0] */ | 463 | u32 list_offset; /* offset of list[0] */ |
464 | } PACKED gdth_iochan_header; | 464 | } __attribute__((packed)) gdth_iochan_header; |
465 | 465 | ||
466 | /* get IO channel description */ | 466 | /* get IO channel description */ |
467 | typedef struct { | 467 | typedef struct { |
468 | gdth_iochan_header hdr; | 468 | gdth_iochan_header hdr; |
469 | struct { | 469 | struct { |
470 | ulong32 address; /* channel address */ | 470 | u32 address; /* channel address */ |
471 | unchar type; /* type (SCSI, FCAL) */ | 471 | u8 type; /* type (SCSI, FCAL) */ |
472 | unchar local_no; /* local number */ | 472 | u8 local_no; /* local number */ |
473 | ushort features; /* channel features */ | 473 | u16 features; /* channel features */ |
474 | } PACKED list[MAXBUS]; | 474 | } __attribute__((packed)) list[MAXBUS]; |
475 | } PACKED gdth_iochan_str; | 475 | } __attribute__((packed)) gdth_iochan_str; |
476 | 476 | ||
477 | /* get raw IO channel description */ | 477 | /* get raw IO channel description */ |
478 | typedef struct { | 478 | typedef struct { |
479 | gdth_iochan_header hdr; | 479 | gdth_iochan_header hdr; |
480 | struct { | 480 | struct { |
481 | unchar proc_id; /* processor id */ | 481 | u8 proc_id; /* processor id */ |
482 | unchar proc_defect; /* defect ? */ | 482 | u8 proc_defect; /* defect ? */ |
483 | unchar reserved[2]; | 483 | u8 reserved[2]; |
484 | } PACKED list[MAXBUS]; | 484 | } __attribute__((packed)) list[MAXBUS]; |
485 | } PACKED gdth_raw_iochan_str; | 485 | } __attribute__((packed)) gdth_raw_iochan_str; |
486 | 486 | ||
487 | /* array drive component */ | 487 | /* array drive component */ |
488 | typedef struct { | 488 | typedef struct { |
489 | ulong32 al_controller; /* controller ID */ | 489 | u32 al_controller; /* controller ID */ |
490 | unchar al_cache_drive; /* cache drive number */ | 490 | u8 al_cache_drive; /* cache drive number */ |
491 | unchar al_status; /* cache drive state */ | 491 | u8 al_status; /* cache drive state */ |
492 | unchar al_res[2]; | 492 | u8 al_res[2]; |
493 | } PACKED gdth_arraycomp_str; | 493 | } __attribute__((packed)) gdth_arraycomp_str; |
494 | 494 | ||
495 | /* array drive information */ | 495 | /* array drive information */ |
496 | typedef struct { | 496 | typedef struct { |
497 | unchar ai_type; /* array type (RAID0,4,5) */ | 497 | u8 ai_type; /* array type (RAID0,4,5) */ |
498 | unchar ai_cache_drive_cnt; /* active cachedrives */ | 498 | u8 ai_cache_drive_cnt; /* active cachedrives */ |
499 | unchar ai_state; /* array drive state */ | 499 | u8 ai_state; /* array drive state */ |
500 | unchar ai_master_cd; /* master cachedrive */ | 500 | u8 ai_master_cd; /* master cachedrive */ |
501 | ulong32 ai_master_controller; /* ID of master controller */ | 501 | u32 ai_master_controller; /* ID of master controller */ |
502 | ulong32 ai_size; /* user capacity [sectors] */ | 502 | u32 ai_size; /* user capacity [sectors] */ |
503 | ulong32 ai_striping_size; /* striping size [sectors] */ | 503 | u32 ai_striping_size; /* striping size [sectors] */ |
504 | ulong32 ai_secsize; /* sector size [bytes] */ | 504 | u32 ai_secsize; /* sector size [bytes] */ |
505 | ulong32 ai_err_info; /* failed cache drive */ | 505 | u32 ai_err_info; /* failed cache drive */ |
506 | unchar ai_name[8]; /* name of the array drive */ | 506 | u8 ai_name[8]; /* name of the array drive */ |
507 | unchar ai_controller_cnt; /* number of controllers */ | 507 | u8 ai_controller_cnt; /* number of controllers */ |
508 | unchar ai_removable; /* flag: removable */ | 508 | u8 ai_removable; /* flag: removable */ |
509 | unchar ai_write_protected; /* flag: write protected */ | 509 | u8 ai_write_protected; /* flag: write protected */ |
510 | unchar ai_devtype; /* type: always direct access */ | 510 | u8 ai_devtype; /* type: always direct access */ |
511 | gdth_arraycomp_str ai_drives[35]; /* drive components: */ | 511 | gdth_arraycomp_str ai_drives[35]; /* drive components: */ |
512 | unchar ai_drive_entries; /* number of drive components */ | 512 | u8 ai_drive_entries; /* number of drive components */ |
513 | unchar ai_protected; /* protection flag */ | 513 | u8 ai_protected; /* protection flag */ |
514 | unchar ai_verify_state; /* state of a parity verify */ | 514 | u8 ai_verify_state; /* state of a parity verify */ |
515 | unchar ai_ext_state; /* extended array drive state */ | 515 | u8 ai_ext_state; /* extended array drive state */ |
516 | unchar ai_expand_state; /* array expand state (>=2.18)*/ | 516 | u8 ai_expand_state; /* array expand state (>=2.18)*/ |
517 | unchar ai_reserved[3]; | 517 | u8 ai_reserved[3]; |
518 | } PACKED gdth_arrayinf_str; | 518 | } __attribute__((packed)) gdth_arrayinf_str; |
519 | 519 | ||
520 | /* get array drive list */ | 520 | /* get array drive list */ |
521 | typedef struct { | 521 | typedef struct { |
522 | ulong32 controller_no; /* controller no. */ | 522 | u32 controller_no; /* controller no. */ |
523 | unchar cd_handle; /* master cachedrive */ | 523 | u8 cd_handle; /* master cachedrive */ |
524 | unchar is_arrayd; /* Flag: is array drive? */ | 524 | u8 is_arrayd; /* Flag: is array drive? */ |
525 | unchar is_master; /* Flag: is array master? */ | 525 | u8 is_master; /* Flag: is array master? */ |
526 | unchar is_parity; /* Flag: is parity drive? */ | 526 | u8 is_parity; /* Flag: is parity drive? */ |
527 | unchar is_hotfix; /* Flag: is hotfix drive? */ | 527 | u8 is_hotfix; /* Flag: is hotfix drive? */ |
528 | unchar res[3]; | 528 | u8 res[3]; |
529 | } PACKED gdth_alist_str; | 529 | } __attribute__((packed)) gdth_alist_str; |
530 | 530 | ||
531 | typedef struct { | 531 | typedef struct { |
532 | ulong32 entries_avail; /* allocated entries */ | 532 | u32 entries_avail; /* allocated entries */ |
533 | ulong32 entries_init; /* returned entries */ | 533 | u32 entries_init; /* returned entries */ |
534 | ulong32 first_entry; /* first entry number */ | 534 | u32 first_entry; /* first entry number */ |
535 | ulong32 list_offset; /* offset of following list */ | 535 | u32 list_offset; /* offset of following list */ |
536 | gdth_alist_str list[1]; /* list */ | 536 | gdth_alist_str list[1]; /* list */ |
537 | } PACKED gdth_arcdl_str; | 537 | } __attribute__((packed)) gdth_arcdl_str; |
538 | 538 | ||
539 | /* cache info/config IOCTL */ | 539 | /* cache info/config IOCTL */ |
540 | typedef struct { | 540 | typedef struct { |
541 | ulong32 version; /* firmware version */ | 541 | u32 version; /* firmware version */ |
542 | ushort state; /* cache state (on/off) */ | 542 | u16 state; /* cache state (on/off) */ |
543 | ushort strategy; /* cache strategy */ | 543 | u16 strategy; /* cache strategy */ |
544 | ushort write_back; /* write back state (on/off) */ | 544 | u16 write_back; /* write back state (on/off) */ |
545 | ushort block_size; /* cache block size */ | 545 | u16 block_size; /* cache block size */ |
546 | } PACKED gdth_cpar_str; | 546 | } __attribute__((packed)) gdth_cpar_str; |
547 | 547 | ||
548 | typedef struct { | 548 | typedef struct { |
549 | ulong32 csize; /* cache size */ | 549 | u32 csize; /* cache size */ |
550 | ulong32 read_cnt; /* read/write counter */ | 550 | u32 read_cnt; /* read/write counter */ |
551 | ulong32 write_cnt; | 551 | u32 write_cnt; |
552 | ulong32 tr_hits; /* hits */ | 552 | u32 tr_hits; /* hits */ |
553 | ulong32 sec_hits; | 553 | u32 sec_hits; |
554 | ulong32 sec_miss; /* misses */ | 554 | u32 sec_miss; /* misses */ |
555 | } PACKED gdth_cstat_str; | 555 | } __attribute__((packed)) gdth_cstat_str; |
556 | 556 | ||
557 | typedef struct { | 557 | typedef struct { |
558 | gdth_cpar_str cpar; | 558 | gdth_cpar_str cpar; |
559 | gdth_cstat_str cstat; | 559 | gdth_cstat_str cstat; |
560 | } PACKED gdth_cinfo_str; | 560 | } __attribute__((packed)) gdth_cinfo_str; |
561 | 561 | ||
562 | /* cache drive info */ | 562 | /* cache drive info */ |
563 | typedef struct { | 563 | typedef struct { |
564 | unchar cd_name[8]; /* cache drive name */ | 564 | u8 cd_name[8]; /* cache drive name */ |
565 | ulong32 cd_devtype; /* SCSI devicetype */ | 565 | u32 cd_devtype; /* SCSI devicetype */ |
566 | ulong32 cd_ldcnt; /* number of log. drives */ | 566 | u32 cd_ldcnt; /* number of log. drives */ |
567 | ulong32 cd_last_error; /* last error */ | 567 | u32 cd_last_error; /* last error */ |
568 | unchar cd_initialized; /* drive is initialized */ | 568 | u8 cd_initialized; /* drive is initialized */ |
569 | unchar cd_removable; /* media is removable */ | 569 | u8 cd_removable; /* media is removable */ |
570 | unchar cd_write_protected; /* write protected */ | 570 | u8 cd_write_protected; /* write protected */ |
571 | unchar cd_flags; /* Pool Hot Fix? */ | 571 | u8 cd_flags; /* Pool Hot Fix? */ |
572 | ulong32 ld_blkcnt; /* number of blocks */ | 572 | u32 ld_blkcnt; /* number of blocks */ |
573 | ulong32 ld_blksize; /* blocksize */ | 573 | u32 ld_blksize; /* blocksize */ |
574 | ulong32 ld_dcnt; /* number of disks */ | 574 | u32 ld_dcnt; /* number of disks */ |
575 | ulong32 ld_slave; /* log. drive index */ | 575 | u32 ld_slave; /* log. drive index */ |
576 | ulong32 ld_dtype; /* type of logical drive */ | 576 | u32 ld_dtype; /* type of logical drive */ |
577 | ulong32 ld_last_error; /* last error */ | 577 | u32 ld_last_error; /* last error */ |
578 | unchar ld_name[8]; /* log. drive name */ | 578 | u8 ld_name[8]; /* log. drive name */ |
579 | unchar ld_error; /* error */ | 579 | u8 ld_error; /* error */ |
580 | } PACKED gdth_cdrinfo_str; | 580 | } __attribute__((packed)) gdth_cdrinfo_str; |
581 | 581 | ||
582 | /* OEM string */ | 582 | /* OEM string */ |
583 | typedef struct { | 583 | typedef struct { |
584 | ulong32 ctl_version; | 584 | u32 ctl_version; |
585 | ulong32 file_major_version; | 585 | u32 file_major_version; |
586 | ulong32 file_minor_version; | 586 | u32 file_minor_version; |
587 | ulong32 buffer_size; | 587 | u32 buffer_size; |
588 | ulong32 cpy_count; | 588 | u32 cpy_count; |
589 | ulong32 ext_error; | 589 | u32 ext_error; |
590 | ulong32 oem_id; | 590 | u32 oem_id; |
591 | ulong32 board_id; | 591 | u32 board_id; |
592 | } PACKED gdth_oem_str_params; | 592 | } __attribute__((packed)) gdth_oem_str_params; |
593 | 593 | ||
594 | typedef struct { | 594 | typedef struct { |
595 | unchar product_0_1_name[16]; | 595 | u8 product_0_1_name[16]; |
596 | unchar product_4_5_name[16]; | 596 | u8 product_4_5_name[16]; |
597 | unchar product_cluster_name[16]; | 597 | u8 product_cluster_name[16]; |
598 | unchar product_reserved[16]; | 598 | u8 product_reserved[16]; |
599 | unchar scsi_cluster_target_vendor_id[16]; | 599 | u8 scsi_cluster_target_vendor_id[16]; |
600 | unchar cluster_raid_fw_name[16]; | 600 | u8 cluster_raid_fw_name[16]; |
601 | unchar oem_brand_name[16]; | 601 | u8 oem_brand_name[16]; |
602 | unchar oem_raid_type[16]; | 602 | u8 oem_raid_type[16]; |
603 | unchar bios_type[13]; | 603 | u8 bios_type[13]; |
604 | unchar bios_title[50]; | 604 | u8 bios_title[50]; |
605 | unchar oem_company_name[37]; | 605 | u8 oem_company_name[37]; |
606 | ulong32 pci_id_1; | 606 | u32 pci_id_1; |
607 | ulong32 pci_id_2; | 607 | u32 pci_id_2; |
608 | unchar validation_status[80]; | 608 | u8 validation_status[80]; |
609 | unchar reserved_1[4]; | 609 | u8 reserved_1[4]; |
610 | unchar scsi_host_drive_inquiry_vendor_id[16]; | 610 | u8 scsi_host_drive_inquiry_vendor_id[16]; |
611 | unchar library_file_template[16]; | 611 | u8 library_file_template[16]; |
612 | unchar reserved_2[16]; | 612 | u8 reserved_2[16]; |
613 | unchar tool_name_1[32]; | 613 | u8 tool_name_1[32]; |
614 | unchar tool_name_2[32]; | 614 | u8 tool_name_2[32]; |
615 | unchar tool_name_3[32]; | 615 | u8 tool_name_3[32]; |
616 | unchar oem_contact_1[84]; | 616 | u8 oem_contact_1[84]; |
617 | unchar oem_contact_2[84]; | 617 | u8 oem_contact_2[84]; |
618 | unchar oem_contact_3[84]; | 618 | u8 oem_contact_3[84]; |
619 | } PACKED gdth_oem_str; | 619 | } __attribute__((packed)) gdth_oem_str; |
620 | 620 | ||
621 | typedef struct { | 621 | typedef struct { |
622 | gdth_oem_str_params params; | 622 | gdth_oem_str_params params; |
623 | gdth_oem_str text; | 623 | gdth_oem_str text; |
624 | } PACKED gdth_oem_str_ioctl; | 624 | } __attribute__((packed)) gdth_oem_str_ioctl; |
625 | 625 | ||
626 | /* board features */ | 626 | /* board features */ |
627 | typedef struct { | 627 | typedef struct { |
628 | unchar chaining; /* Chaining supported */ | 628 | u8 chaining; /* Chaining supported */ |
629 | unchar striping; /* Striping (RAID-0) supp. */ | 629 | u8 striping; /* Striping (RAID-0) supp. */ |
630 | unchar mirroring; /* Mirroring (RAID-1) supp. */ | 630 | u8 mirroring; /* Mirroring (RAID-1) supp. */ |
631 | unchar raid; /* RAID-4/5/10 supported */ | 631 | u8 raid; /* RAID-4/5/10 supported */ |
632 | } PACKED gdth_bfeat_str; | 632 | } __attribute__((packed)) gdth_bfeat_str; |
633 | 633 | ||
634 | /* board info IOCTL */ | 634 | /* board info IOCTL */ |
635 | typedef struct { | 635 | typedef struct { |
636 | ulong32 ser_no; /* serial no. */ | 636 | u32 ser_no; /* serial no. */ |
637 | unchar oem_id[2]; /* OEM ID */ | 637 | u8 oem_id[2]; /* OEM ID */ |
638 | ushort ep_flags; /* eprom flags */ | 638 | u16 ep_flags; /* eprom flags */ |
639 | ulong32 proc_id; /* processor ID */ | 639 | u32 proc_id; /* processor ID */ |
640 | ulong32 memsize; /* memory size (bytes) */ | 640 | u32 memsize; /* memory size (bytes) */ |
641 | unchar mem_banks; /* memory banks */ | 641 | u8 mem_banks; /* memory banks */ |
642 | unchar chan_type; /* channel type */ | 642 | u8 chan_type; /* channel type */ |
643 | unchar chan_count; /* channel count */ | 643 | u8 chan_count; /* channel count */ |
644 | unchar rdongle_pres; /* dongle present? */ | 644 | u8 rdongle_pres; /* dongle present? */ |
645 | ulong32 epr_fw_ver; /* (eprom) firmware version */ | 645 | u32 epr_fw_ver; /* (eprom) firmware version */ |
646 | ulong32 upd_fw_ver; /* (update) firmware version */ | 646 | u32 upd_fw_ver; /* (update) firmware version */ |
647 | ulong32 upd_revision; /* update revision */ | 647 | u32 upd_revision; /* update revision */ |
648 | char type_string[16]; /* controller name */ | 648 | char type_string[16]; /* controller name */ |
649 | char raid_string[16]; /* RAID firmware name */ | 649 | char raid_string[16]; /* RAID firmware name */ |
650 | unchar update_pres; /* update present? */ | 650 | u8 update_pres; /* update present? */ |
651 | unchar xor_pres; /* XOR engine present? */ | 651 | u8 xor_pres; /* XOR engine present? */ |
652 | unchar prom_type; /* ROM type (eprom/flash) */ | 652 | u8 prom_type; /* ROM type (eprom/flash) */ |
653 | unchar prom_count; /* number of ROM devices */ | 653 | u8 prom_count; /* number of ROM devices */ |
654 | ulong32 dup_pres; /* duplexing module present? */ | 654 | u32 dup_pres; /* duplexing module present? */ |
655 | ulong32 chan_pres; /* number of expansion chn. */ | 655 | u32 chan_pres; /* number of expansion chn. */ |
656 | ulong32 mem_pres; /* memory expansion inst. ? */ | 656 | u32 mem_pres; /* memory expansion inst. ? */ |
657 | unchar ft_bus_system; /* fault bus supported? */ | 657 | u8 ft_bus_system; /* fault bus supported? */ |
658 | unchar subtype_valid; /* board_subtype valid? */ | 658 | u8 subtype_valid; /* board_subtype valid? */ |
659 | unchar board_subtype; /* subtype/hardware level */ | 659 | u8 board_subtype; /* subtype/hardware level */ |
660 | unchar ramparity_pres; /* RAM parity check hardware? */ | 660 | u8 ramparity_pres; /* RAM parity check hardware? */ |
661 | } PACKED gdth_binfo_str; | 661 | } __attribute__((packed)) gdth_binfo_str; |
662 | 662 | ||
663 | /* get host drive info */ | 663 | /* get host drive info */ |
664 | typedef struct { | 664 | typedef struct { |
665 | char name[8]; /* host drive name */ | 665 | char name[8]; /* host drive name */ |
666 | ulong32 size; /* size (sectors) */ | 666 | u32 size; /* size (sectors) */ |
667 | unchar host_drive; /* host drive number */ | 667 | u8 host_drive; /* host drive number */ |
668 | unchar log_drive; /* log. drive (master) */ | 668 | u8 log_drive; /* log. drive (master) */ |
669 | unchar reserved; | 669 | u8 reserved; |
670 | unchar rw_attribs; /* r/w attribs */ | 670 | u8 rw_attribs; /* r/w attribs */ |
671 | ulong32 start_sec; /* start sector */ | 671 | u32 start_sec; /* start sector */ |
672 | } PACKED gdth_hentry_str; | 672 | } __attribute__((packed)) gdth_hentry_str; |
673 | 673 | ||
674 | typedef struct { | 674 | typedef struct { |
675 | ulong32 entries; /* entry count */ | 675 | u32 entries; /* entry count */ |
676 | ulong32 offset; /* offset of entries */ | 676 | u32 offset; /* offset of entries */ |
677 | unchar secs_p_head; /* sectors/head */ | 677 | u8 secs_p_head; /* sectors/head */ |
678 | unchar heads_p_cyl; /* heads/cylinder */ | 678 | u8 heads_p_cyl; /* heads/cylinder */ |
679 | unchar reserved; | 679 | u8 reserved; |
680 | unchar clust_drvtype; /* cluster drive type */ | 680 | u8 clust_drvtype; /* cluster drive type */ |
681 | ulong32 location; /* controller number */ | 681 | u32 location; /* controller number */ |
682 | gdth_hentry_str entry[MAX_HDRIVES]; /* entries */ | 682 | gdth_hentry_str entry[MAX_HDRIVES]; /* entries */ |
683 | } PACKED gdth_hget_str; | 683 | } __attribute__((packed)) gdth_hget_str; |
684 | 684 | ||
685 | 685 | ||
686 | /* DPRAM structures */ | 686 | /* DPRAM structures */ |
687 | 687 | ||
688 | /* interface area ISA/PCI */ | 688 | /* interface area ISA/PCI */ |
689 | typedef struct { | 689 | typedef struct { |
690 | unchar S_Cmd_Indx; /* special command */ | 690 | u8 S_Cmd_Indx; /* special command */ |
691 | unchar volatile S_Status; /* status special command */ | 691 | u8 volatile S_Status; /* status special command */ |
692 | ushort reserved1; | 692 | u16 reserved1; |
693 | ulong32 S_Info[4]; /* add. info special command */ | 693 | u32 S_Info[4]; /* add. info special command */ |
694 | unchar volatile Sema0; /* command semaphore */ | 694 | u8 volatile Sema0; /* command semaphore */ |
695 | unchar reserved2[3]; | 695 | u8 reserved2[3]; |
696 | unchar Cmd_Index; /* command number */ | 696 | u8 Cmd_Index; /* command number */ |
697 | unchar reserved3[3]; | 697 | u8 reserved3[3]; |
698 | ushort volatile Status; /* command status */ | 698 | u16 volatile Status; /* command status */ |
699 | ushort Service; /* service(for async.events) */ | 699 | u16 Service; /* service(for async.events) */ |
700 | ulong32 Info[2]; /* additional info */ | 700 | u32 Info[2]; /* additional info */ |
701 | struct { | 701 | struct { |
702 | ushort offset; /* command offs. in the DPRAM*/ | 702 | u16 offset; /* command offs. in the DPRAM*/ |
703 | ushort serv_id; /* service */ | 703 | u16 serv_id; /* service */ |
704 | } PACKED comm_queue[MAXOFFSETS]; /* command queue */ | 704 | } __attribute__((packed)) comm_queue[MAXOFFSETS]; /* command queue */ |
705 | ulong32 bios_reserved[2]; | 705 | u32 bios_reserved[2]; |
706 | unchar gdt_dpr_cmd[1]; /* commands */ | 706 | u8 gdt_dpr_cmd[1]; /* commands */ |
707 | } PACKED gdt_dpr_if; | 707 | } __attribute__((packed)) gdt_dpr_if; |
708 | 708 | ||
709 | /* SRAM structure PCI controllers */ | 709 | /* SRAM structure PCI controllers */ |
710 | typedef struct { | 710 | typedef struct { |
711 | ulong32 magic; /* controller ID from BIOS */ | 711 | u32 magic; /* controller ID from BIOS */ |
712 | ushort need_deinit; /* switch betw. BIOS/driver */ | 712 | u16 need_deinit; /* switch betw. BIOS/driver */ |
713 | unchar switch_support; /* see need_deinit */ | 713 | u8 switch_support; /* see need_deinit */ |
714 | unchar padding[9]; | 714 | u8 padding[9]; |
715 | unchar os_used[16]; /* OS code per service */ | 715 | u8 os_used[16]; /* OS code per service */ |
716 | unchar unused[28]; | 716 | u8 unused[28]; |
717 | unchar fw_magic; /* contr. ID from firmware */ | 717 | u8 fw_magic; /* contr. ID from firmware */ |
718 | } PACKED gdt_pci_sram; | 718 | } __attribute__((packed)) gdt_pci_sram; |
719 | 719 | ||
720 | /* SRAM structure EISA controllers (but NOT GDT3000/3020) */ | 720 | /* SRAM structure EISA controllers (but NOT GDT3000/3020) */ |
721 | typedef struct { | 721 | typedef struct { |
722 | unchar os_used[16]; /* OS code per service */ | 722 | u8 os_used[16]; /* OS code per service */ |
723 | ushort need_deinit; /* switch betw. BIOS/driver */ | 723 | u16 need_deinit; /* switch betw. BIOS/driver */ |
724 | unchar switch_support; /* see need_deinit */ | 724 | u8 switch_support; /* see need_deinit */ |
725 | unchar padding; | 725 | u8 padding; |
726 | } PACKED gdt_eisa_sram; | 726 | } __attribute__((packed)) gdt_eisa_sram; |
727 | 727 | ||
728 | 728 | ||
729 | /* DPRAM ISA controllers */ | 729 | /* DPRAM ISA controllers */ |
730 | typedef struct { | 730 | typedef struct { |
731 | union { | 731 | union { |
732 | struct { | 732 | struct { |
733 | unchar bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */ | 733 | u8 bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */ |
734 | ulong32 magic; /* controller (EISA) ID */ | 734 | u32 magic; /* controller (EISA) ID */ |
735 | ushort need_deinit; /* switch betw. BIOS/driver */ | 735 | u16 need_deinit; /* switch betw. BIOS/driver */ |
736 | unchar switch_support; /* see need_deinit */ | 736 | u8 switch_support; /* see need_deinit */ |
737 | unchar padding[9]; | 737 | u8 padding[9]; |
738 | unchar os_used[16]; /* OS code per service */ | 738 | u8 os_used[16]; /* OS code per service */ |
739 | } PACKED dp_sram; | 739 | } __attribute__((packed)) dp_sram; |
740 | unchar bios_area[0x4000]; /* 16KB reserved for BIOS */ | 740 | u8 bios_area[0x4000]; /* 16KB reserved for BIOS */ |
741 | } bu; | 741 | } bu; |
742 | union { | 742 | union { |
743 | gdt_dpr_if ic; /* interface area */ | 743 | gdt_dpr_if ic; /* interface area */ |
744 | unchar if_area[0x3000]; /* 12KB for interface */ | 744 | u8 if_area[0x3000]; /* 12KB for interface */ |
745 | } u; | 745 | } u; |
746 | struct { | 746 | struct { |
747 | unchar memlock; /* write protection DPRAM */ | 747 | u8 memlock; /* write protection DPRAM */ |
748 | unchar event; /* release event */ | 748 | u8 event; /* release event */ |
749 | unchar irqen; /* board interrupts enable */ | 749 | u8 irqen; /* board interrupts enable */ |
750 | unchar irqdel; /* acknowledge board int. */ | 750 | u8 irqdel; /* acknowledge board int. */ |
751 | unchar volatile Sema1; /* status semaphore */ | 751 | u8 volatile Sema1; /* status semaphore */ |
752 | unchar rq; /* IRQ/DRQ configuration */ | 752 | u8 rq; /* IRQ/DRQ configuration */ |
753 | } PACKED io; | 753 | } __attribute__((packed)) io; |
754 | } PACKED gdt2_dpram_str; | 754 | } __attribute__((packed)) gdt2_dpram_str; |
755 | 755 | ||
756 | /* DPRAM PCI controllers */ | 756 | /* DPRAM PCI controllers */ |
757 | typedef struct { | 757 | typedef struct { |
758 | union { | 758 | union { |
759 | gdt_dpr_if ic; /* interface area */ | 759 | gdt_dpr_if ic; /* interface area */ |
760 | unchar if_area[0xff0-sizeof(gdt_pci_sram)]; | 760 | u8 if_area[0xff0-sizeof(gdt_pci_sram)]; |
761 | } u; | 761 | } u; |
762 | gdt_pci_sram gdt6sr; /* SRAM structure */ | 762 | gdt_pci_sram gdt6sr; /* SRAM structure */ |
763 | struct { | 763 | struct { |
764 | unchar unused0[1]; | 764 | u8 unused0[1]; |
765 | unchar volatile Sema1; /* command semaphore */ | 765 | u8 volatile Sema1; /* command semaphore */ |
766 | unchar unused1[3]; | 766 | u8 unused1[3]; |
767 | unchar irqen; /* board interrupts enable */ | 767 | u8 irqen; /* board interrupts enable */ |
768 | unchar unused2[2]; | 768 | u8 unused2[2]; |
769 | unchar event; /* release event */ | 769 | u8 event; /* release event */ |
770 | unchar unused3[3]; | 770 | u8 unused3[3]; |
771 | unchar irqdel; /* acknowledge board int. */ | 771 | u8 irqdel; /* acknowledge board int. */ |
772 | unchar unused4[3]; | 772 | u8 unused4[3]; |
773 | } PACKED io; | 773 | } __attribute__((packed)) io; |
774 | } PACKED gdt6_dpram_str; | 774 | } __attribute__((packed)) gdt6_dpram_str; |
775 | 775 | ||
776 | /* PLX register structure (new PCI controllers) */ | 776 | /* PLX register structure (new PCI controllers) */ |
777 | typedef struct { | 777 | typedef struct { |
778 | unchar cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/ | 778 | u8 cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/ |
779 | unchar unused1[0x3f]; | 779 | u8 unused1[0x3f]; |
780 | unchar volatile sema0_reg; /* command semaphore */ | 780 | u8 volatile sema0_reg; /* command semaphore */ |
781 | unchar volatile sema1_reg; /* status semaphore */ | 781 | u8 volatile sema1_reg; /* status semaphore */ |
782 | unchar unused2[2]; | 782 | u8 unused2[2]; |
783 | ushort volatile status; /* command status */ | 783 | u16 volatile status; /* command status */ |
784 | ushort service; /* service */ | 784 | u16 service; /* service */ |
785 | ulong32 info[2]; /* additional info */ | 785 | u32 info[2]; /* additional info */ |
786 | unchar unused3[0x10]; | 786 | u8 unused3[0x10]; |
787 | unchar ldoor_reg; /* PCI to local doorbell */ | 787 | u8 ldoor_reg; /* PCI to local doorbell */ |
788 | unchar unused4[3]; | 788 | u8 unused4[3]; |
789 | unchar volatile edoor_reg; /* local to PCI doorbell */ | 789 | u8 volatile edoor_reg; /* local to PCI doorbell */ |
790 | unchar unused5[3]; | 790 | u8 unused5[3]; |
791 | unchar control0; /* control0 register(unused) */ | 791 | u8 control0; /* control0 register(unused) */ |
792 | unchar control1; /* board interrupts enable */ | 792 | u8 control1; /* board interrupts enable */ |
793 | unchar unused6[0x16]; | 793 | u8 unused6[0x16]; |
794 | } PACKED gdt6c_plx_regs; | 794 | } __attribute__((packed)) gdt6c_plx_regs; |
795 | 795 | ||
796 | /* DPRAM new PCI controllers */ | 796 | /* DPRAM new PCI controllers */ |
797 | typedef struct { | 797 | typedef struct { |
798 | union { | 798 | union { |
799 | gdt_dpr_if ic; /* interface area */ | 799 | gdt_dpr_if ic; /* interface area */ |
800 | unchar if_area[0x4000-sizeof(gdt_pci_sram)]; | 800 | u8 if_area[0x4000-sizeof(gdt_pci_sram)]; |
801 | } u; | 801 | } u; |
802 | gdt_pci_sram gdt6sr; /* SRAM structure */ | 802 | gdt_pci_sram gdt6sr; /* SRAM structure */ |
803 | } PACKED gdt6c_dpram_str; | 803 | } __attribute__((packed)) gdt6c_dpram_str; |
804 | 804 | ||
805 | /* i960 register structure (PCI MPR controllers) */ | 805 | /* i960 register structure (PCI MPR controllers) */ |
806 | typedef struct { | 806 | typedef struct { |
807 | unchar unused1[16]; | 807 | u8 unused1[16]; |
808 | unchar volatile sema0_reg; /* command semaphore */ | 808 | u8 volatile sema0_reg; /* command semaphore */ |
809 | unchar unused2; | 809 | u8 unused2; |
810 | unchar volatile sema1_reg; /* status semaphore */ | 810 | u8 volatile sema1_reg; /* status semaphore */ |
811 | unchar unused3; | 811 | u8 unused3; |
812 | ushort volatile status; /* command status */ | 812 | u16 volatile status; /* command status */ |
813 | ushort service; /* service */ | 813 | u16 service; /* service */ |
814 | ulong32 info[2]; /* additional info */ | 814 | u32 info[2]; /* additional info */ |
815 | unchar ldoor_reg; /* PCI to local doorbell */ | 815 | u8 ldoor_reg; /* PCI to local doorbell */ |
816 | unchar unused4[11]; | 816 | u8 unused4[11]; |
817 | unchar volatile edoor_reg; /* local to PCI doorbell */ | 817 | u8 volatile edoor_reg; /* local to PCI doorbell */ |
818 | unchar unused5[7]; | 818 | u8 unused5[7]; |
819 | unchar edoor_en_reg; /* board interrupts enable */ | 819 | u8 edoor_en_reg; /* board interrupts enable */ |
820 | unchar unused6[27]; | 820 | u8 unused6[27]; |
821 | ulong32 unused7[939]; | 821 | u32 unused7[939]; |
822 | ulong32 severity; | 822 | u32 severity; |
823 | char evt_str[256]; /* event string */ | 823 | char evt_str[256]; /* event string */ |
824 | } PACKED gdt6m_i960_regs; | 824 | } __attribute__((packed)) gdt6m_i960_regs; |
825 | 825 | ||
826 | /* DPRAM PCI MPR controllers */ | 826 | /* DPRAM PCI MPR controllers */ |
827 | typedef struct { | 827 | typedef struct { |
828 | gdt6m_i960_regs i960r; /* 4KB i960 registers */ | 828 | gdt6m_i960_regs i960r; /* 4KB i960 registers */ |
829 | union { | 829 | union { |
830 | gdt_dpr_if ic; /* interface area */ | 830 | gdt_dpr_if ic; /* interface area */ |
831 | unchar if_area[0x3000-sizeof(gdt_pci_sram)]; | 831 | u8 if_area[0x3000-sizeof(gdt_pci_sram)]; |
832 | } u; | 832 | } u; |
833 | gdt_pci_sram gdt6sr; /* SRAM structure */ | 833 | gdt_pci_sram gdt6sr; /* SRAM structure */ |
834 | } PACKED gdt6m_dpram_str; | 834 | } __attribute__((packed)) gdt6m_dpram_str; |
835 | 835 | ||
836 | 836 | ||
837 | /* PCI resources */ | 837 | /* PCI resources */ |
838 | typedef struct { | 838 | typedef struct { |
839 | struct pci_dev *pdev; | 839 | struct pci_dev *pdev; |
840 | ulong dpmem; /* DPRAM address */ | 840 | unsigned long dpmem; /* DPRAM address */ |
841 | ulong io; /* IO address */ | 841 | unsigned long io; /* IO address */ |
842 | } gdth_pci_str; | 842 | } gdth_pci_str; |
843 | 843 | ||
844 | 844 | ||
@@ -846,93 +846,93 @@ typedef struct { | |||
846 | typedef struct { | 846 | typedef struct { |
847 | struct Scsi_Host *shost; | 847 | struct Scsi_Host *shost; |
848 | struct list_head list; | 848 | struct list_head list; |
849 | ushort hanum; | 849 | u16 hanum; |
850 | ushort oem_id; /* OEM */ | 850 | u16 oem_id; /* OEM */ |
851 | ushort type; /* controller class */ | 851 | u16 type; /* controller class */ |
852 | ulong32 stype; /* subtype (PCI: device ID) */ | 852 | u32 stype; /* subtype (PCI: device ID) */ |
853 | ushort fw_vers; /* firmware version */ | 853 | u16 fw_vers; /* firmware version */ |
854 | ushort cache_feat; /* feat. cache serv. (s/g,..)*/ | 854 | u16 cache_feat; /* feat. cache serv. (s/g,..)*/ |
855 | ushort raw_feat; /* feat. raw service (s/g,..)*/ | 855 | u16 raw_feat; /* feat. raw service (s/g,..)*/ |
856 | ushort screen_feat; /* feat. raw service (s/g,..)*/ | 856 | u16 screen_feat; /* feat. raw service (s/g,..)*/ |
857 | ushort bmic; /* BMIC address (EISA) */ | 857 | u16 bmic; /* BMIC address (EISA) */ |
858 | void __iomem *brd; /* DPRAM address */ | 858 | void __iomem *brd; /* DPRAM address */ |
859 | ulong32 brd_phys; /* slot number/BIOS address */ | 859 | u32 brd_phys; /* slot number/BIOS address */ |
860 | gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */ | 860 | gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */ |
861 | gdth_cmd_str cmdext; | 861 | gdth_cmd_str cmdext; |
862 | gdth_cmd_str *pccb; /* address command structure */ | 862 | gdth_cmd_str *pccb; /* address command structure */ |
863 | ulong32 ccb_phys; /* phys. address */ | 863 | u32 ccb_phys; /* phys. address */ |
864 | #ifdef INT_COAL | 864 | #ifdef INT_COAL |
865 | gdth_coal_status *coal_stat; /* buffer for coalescing int.*/ | 865 | gdth_coal_status *coal_stat; /* buffer for coalescing int.*/ |
866 | ulong64 coal_stat_phys; /* phys. address */ | 866 | u64 coal_stat_phys; /* phys. address */ |
867 | #endif | 867 | #endif |
868 | char *pscratch; /* scratch (DMA) buffer */ | 868 | char *pscratch; /* scratch (DMA) buffer */ |
869 | ulong64 scratch_phys; /* phys. address */ | 869 | u64 scratch_phys; /* phys. address */ |
870 | unchar scratch_busy; /* in use? */ | 870 | u8 scratch_busy; /* in use? */ |
871 | unchar dma64_support; /* 64-bit DMA supported? */ | 871 | u8 dma64_support; /* 64-bit DMA supported? */ |
872 | gdth_msg_str *pmsg; /* message buffer */ | 872 | gdth_msg_str *pmsg; /* message buffer */ |
873 | ulong64 msg_phys; /* phys. address */ | 873 | u64 msg_phys; /* phys. address */ |
874 | unchar scan_mode; /* current scan mode */ | 874 | u8 scan_mode; /* current scan mode */ |
875 | unchar irq; /* IRQ */ | 875 | u8 irq; /* IRQ */ |
876 | unchar drq; /* DRQ (ISA controllers) */ | 876 | u8 drq; /* DRQ (ISA controllers) */ |
877 | ushort status; /* command status */ | 877 | u16 status; /* command status */ |
878 | ushort service; /* service/firmware ver./.. */ | 878 | u16 service; /* service/firmware ver./.. */ |
879 | ulong32 info; | 879 | u32 info; |
880 | ulong32 info2; /* additional info */ | 880 | u32 info2; /* additional info */ |
881 | Scsi_Cmnd *req_first; /* top of request queue */ | 881 | Scsi_Cmnd *req_first; /* top of request queue */ |
882 | struct { | 882 | struct { |
883 | unchar present; /* Flag: host drive present? */ | 883 | u8 present; /* Flag: host drive present? */ |
884 | unchar is_logdrv; /* Flag: log. drive (master)? */ | 884 | u8 is_logdrv; /* Flag: log. drive (master)? */ |
885 | unchar is_arraydrv; /* Flag: array drive? */ | 885 | u8 is_arraydrv; /* Flag: array drive? */ |
886 | unchar is_master; /* Flag: array drive master? */ | 886 | u8 is_master; /* Flag: array drive master? */ |
887 | unchar is_parity; /* Flag: parity drive? */ | 887 | u8 is_parity; /* Flag: parity drive? */ |
888 | unchar is_hotfix; /* Flag: hotfix drive? */ | 888 | u8 is_hotfix; /* Flag: hotfix drive? */ |
889 | unchar master_no; /* number of master drive */ | 889 | u8 master_no; /* number of master drive */ |
890 | unchar lock; /* drive locked? (hot plug) */ | 890 | u8 lock; /* drive locked? (hot plug) */ |
891 | unchar heads; /* mapping */ | 891 | u8 heads; /* mapping */ |
892 | unchar secs; | 892 | u8 secs; |
893 | ushort devtype; /* further information */ | 893 | u16 devtype; /* further information */ |
894 | ulong64 size; /* capacity */ | 894 | u64 size; /* capacity */ |
895 | unchar ldr_no; /* log. drive no. */ | 895 | u8 ldr_no; /* log. drive no. */ |
896 | unchar rw_attribs; /* r/w attributes */ | 896 | u8 rw_attribs; /* r/w attributes */ |
897 | unchar cluster_type; /* cluster properties */ | 897 | u8 cluster_type; /* cluster properties */ |
898 | unchar media_changed; /* Flag:MOUNT/UNMOUNT occured */ | 898 | u8 media_changed; /* Flag:MOUNT/UNMOUNT occured */ |
899 | ulong32 start_sec; /* start sector */ | 899 | u32 start_sec; /* start sector */ |
900 | } hdr[MAX_LDRIVES]; /* host drives */ | 900 | } hdr[MAX_LDRIVES]; /* host drives */ |
901 | struct { | 901 | struct { |
902 | unchar lock; /* channel locked? (hot plug) */ | 902 | u8 lock; /* channel locked? (hot plug) */ |
903 | unchar pdev_cnt; /* physical device count */ | 903 | u8 pdev_cnt; /* physical device count */ |
904 | unchar local_no; /* local channel number */ | 904 | u8 local_no; /* local channel number */ |
905 | unchar io_cnt[MAXID]; /* current IO count */ | 905 | u8 io_cnt[MAXID]; /* current IO count */ |
906 | ulong32 address; /* channel address */ | 906 | u32 address; /* channel address */ |
907 | ulong32 id_list[MAXID]; /* IDs of the phys. devices */ | 907 | u32 id_list[MAXID]; /* IDs of the phys. devices */ |
908 | } raw[MAXBUS]; /* SCSI channels */ | 908 | } raw[MAXBUS]; /* SCSI channels */ |
909 | struct { | 909 | struct { |
910 | Scsi_Cmnd *cmnd; /* pending request */ | 910 | Scsi_Cmnd *cmnd; /* pending request */ |
911 | ushort service; /* service */ | 911 | u16 service; /* service */ |
912 | } cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */ | 912 | } cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */ |
913 | struct gdth_cmndinfo { /* per-command private info */ | 913 | struct gdth_cmndinfo { /* per-command private info */ |
914 | int index; | 914 | int index; |
915 | int internal_command; /* don't call scsi_done */ | 915 | int internal_command; /* don't call scsi_done */ |
916 | gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ | 916 | gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ |
917 | dma_addr_t sense_paddr; /* sense dma-addr */ | 917 | dma_addr_t sense_paddr; /* sense dma-addr */ |
918 | unchar priority; | 918 | u8 priority; |
919 | int timeout_count; /* # of timeout calls */ | 919 | int timeout_count; /* # of timeout calls */ |
920 | volatile int wait_for_completion; | 920 | volatile int wait_for_completion; |
921 | ushort status; | 921 | u16 status; |
922 | ulong32 info; | 922 | u32 info; |
923 | enum dma_data_direction dma_dir; | 923 | enum dma_data_direction dma_dir; |
924 | int phase; /* ???? */ | 924 | int phase; /* ???? */ |
925 | int OpCode; | 925 | int OpCode; |
926 | } cmndinfo[GDTH_MAXCMDS]; /* index==0 is free */ | 926 | } cmndinfo[GDTH_MAXCMDS]; /* index==0 is free */ |
927 | unchar bus_cnt; /* SCSI bus count */ | 927 | u8 bus_cnt; /* SCSI bus count */ |
928 | unchar tid_cnt; /* Target ID count */ | 928 | u8 tid_cnt; /* Target ID count */ |
929 | unchar bus_id[MAXBUS]; /* IOP IDs */ | 929 | u8 bus_id[MAXBUS]; /* IOP IDs */ |
930 | unchar virt_bus; /* number of virtual bus */ | 930 | u8 virt_bus; /* number of virtual bus */ |
931 | unchar more_proc; /* more /proc info supported */ | 931 | u8 more_proc; /* more /proc info supported */ |
932 | ushort cmd_cnt; /* command count in DPRAM */ | 932 | u16 cmd_cnt; /* command count in DPRAM */ |
933 | ushort cmd_len; /* length of actual command */ | 933 | u16 cmd_len; /* length of actual command */ |
934 | ushort cmd_offs_dpmem; /* actual offset in DPRAM */ | 934 | u16 cmd_offs_dpmem; /* actual offset in DPRAM */ |
935 | ushort ic_all_size; /* sizeof DPRAM interf. area */ | 935 | u16 ic_all_size; /* sizeof DPRAM interf. area */ |
936 | gdth_cpar_str cpar; /* controller cache par. */ | 936 | gdth_cpar_str cpar; /* controller cache par. */ |
937 | gdth_bfeat_str bfeat; /* controller features */ | 937 | gdth_bfeat_str bfeat; /* controller features */ |
938 | gdth_binfo_str binfo; /* controller info */ | 938 | gdth_binfo_str binfo; /* controller info */ |
@@ -941,7 +941,7 @@ typedef struct { | |||
941 | struct pci_dev *pdev; | 941 | struct pci_dev *pdev; |
942 | char oem_name[8]; | 942 | char oem_name[8]; |
943 | #ifdef GDTH_DMA_STATISTICS | 943 | #ifdef GDTH_DMA_STATISTICS |
944 | ulong dma32_cnt, dma64_cnt; /* statistics: DMA buffer */ | 944 | unsigned long dma32_cnt, dma64_cnt; /* statistics: DMA buffer */ |
945 | #endif | 945 | #endif |
946 | struct scsi_device *sdev; | 946 | struct scsi_device *sdev; |
947 | } gdth_ha_str; | 947 | } gdth_ha_str; |
@@ -953,65 +953,65 @@ static inline struct gdth_cmndinfo *gdth_cmnd_priv(struct scsi_cmnd* cmd) | |||
953 | 953 | ||
954 | /* INQUIRY data format */ | 954 | /* INQUIRY data format */ |
955 | typedef struct { | 955 | typedef struct { |
956 | unchar type_qual; | 956 | u8 type_qual; |
957 | unchar modif_rmb; | 957 | u8 modif_rmb; |
958 | unchar version; | 958 | u8 version; |
959 | unchar resp_aenc; | 959 | u8 resp_aenc; |
960 | unchar add_length; | 960 | u8 add_length; |
961 | unchar reserved1; | 961 | u8 reserved1; |
962 | unchar reserved2; | 962 | u8 reserved2; |
963 | unchar misc; | 963 | u8 misc; |
964 | unchar vendor[8]; | 964 | u8 vendor[8]; |
965 | unchar product[16]; | 965 | u8 product[16]; |
966 | unchar revision[4]; | 966 | u8 revision[4]; |
967 | } PACKED gdth_inq_data; | 967 | } __attribute__((packed)) gdth_inq_data; |
968 | 968 | ||
969 | /* READ_CAPACITY data format */ | 969 | /* READ_CAPACITY data format */ |
970 | typedef struct { | 970 | typedef struct { |
971 | ulong32 last_block_no; | 971 | u32 last_block_no; |
972 | ulong32 block_length; | 972 | u32 block_length; |
973 | } PACKED gdth_rdcap_data; | 973 | } __attribute__((packed)) gdth_rdcap_data; |
974 | 974 | ||
975 | /* READ_CAPACITY (16) data format */ | 975 | /* READ_CAPACITY (16) data format */ |
976 | typedef struct { | 976 | typedef struct { |
977 | ulong64 last_block_no; | 977 | u64 last_block_no; |
978 | ulong32 block_length; | 978 | u32 block_length; |
979 | } PACKED gdth_rdcap16_data; | 979 | } __attribute__((packed)) gdth_rdcap16_data; |
980 | 980 | ||
981 | /* REQUEST_SENSE data format */ | 981 | /* REQUEST_SENSE data format */ |
982 | typedef struct { | 982 | typedef struct { |
983 | unchar errorcode; | 983 | u8 errorcode; |
984 | unchar segno; | 984 | u8 segno; |
985 | unchar key; | 985 | u8 key; |
986 | ulong32 info; | 986 | u32 info; |
987 | unchar add_length; | 987 | u8 add_length; |
988 | ulong32 cmd_info; | 988 | u32 cmd_info; |
989 | unchar adsc; | 989 | u8 adsc; |
990 | unchar adsq; | 990 | u8 adsq; |
991 | unchar fruc; | 991 | u8 fruc; |
992 | unchar key_spec[3]; | 992 | u8 key_spec[3]; |
993 | } PACKED gdth_sense_data; | 993 | } __attribute__((packed)) gdth_sense_data; |
994 | 994 | ||
995 | /* MODE_SENSE data format */ | 995 | /* MODE_SENSE data format */ |
996 | typedef struct { | 996 | typedef struct { |
997 | struct { | 997 | struct { |
998 | unchar data_length; | 998 | u8 data_length; |
999 | unchar med_type; | 999 | u8 med_type; |
1000 | unchar dev_par; | 1000 | u8 dev_par; |
1001 | unchar bd_length; | 1001 | u8 bd_length; |
1002 | } PACKED hd; | 1002 | } __attribute__((packed)) hd; |
1003 | struct { | 1003 | struct { |
1004 | unchar dens_code; | 1004 | u8 dens_code; |
1005 | unchar block_count[3]; | 1005 | u8 block_count[3]; |
1006 | unchar reserved; | 1006 | u8 reserved; |
1007 | unchar block_length[3]; | 1007 | u8 block_length[3]; |
1008 | } PACKED bd; | 1008 | } __attribute__((packed)) bd; |
1009 | } PACKED gdth_modep_data; | 1009 | } __attribute__((packed)) gdth_modep_data; |
1010 | 1010 | ||
1011 | /* stack frame */ | 1011 | /* stack frame */ |
1012 | typedef struct { | 1012 | typedef struct { |
1013 | ulong b[10]; /* 32/64 bit compiler ! */ | 1013 | unsigned long b[10]; /* 32/64 bit compiler ! */ |
1014 | } PACKED gdth_stackframe; | 1014 | } __attribute__((packed)) gdth_stackframe; |
1015 | 1015 | ||
1016 | 1016 | ||
1017 | /* function prototyping */ | 1017 | /* function prototyping */ |
diff --git a/drivers/scsi/gdth_ioctl.h b/drivers/scsi/gdth_ioctl.h index 783fae737f17..b004c6165887 100644 --- a/drivers/scsi/gdth_ioctl.h +++ b/drivers/scsi/gdth_ioctl.h | |||
@@ -32,109 +32,101 @@ | |||
32 | #define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */ | 32 | #define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */ |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /* typedefs */ | ||
36 | #ifdef __KERNEL__ | ||
37 | typedef u32 ulong32; | ||
38 | typedef u64 ulong64; | ||
39 | #endif | ||
40 | |||
41 | #define PACKED __attribute__((packed)) | ||
42 | |||
43 | /* scatter/gather element */ | 35 | /* scatter/gather element */ |
44 | typedef struct { | 36 | typedef struct { |
45 | ulong32 sg_ptr; /* address */ | 37 | u32 sg_ptr; /* address */ |
46 | ulong32 sg_len; /* length */ | 38 | u32 sg_len; /* length */ |
47 | } PACKED gdth_sg_str; | 39 | } __attribute__((packed)) gdth_sg_str; |
48 | 40 | ||
49 | /* scatter/gather element - 64bit addresses */ | 41 | /* scatter/gather element - 64bit addresses */ |
50 | typedef struct { | 42 | typedef struct { |
51 | ulong64 sg_ptr; /* address */ | 43 | u64 sg_ptr; /* address */ |
52 | ulong32 sg_len; /* length */ | 44 | u32 sg_len; /* length */ |
53 | } PACKED gdth_sg64_str; | 45 | } __attribute__((packed)) gdth_sg64_str; |
54 | 46 | ||
55 | /* command structure */ | 47 | /* command structure */ |
56 | typedef struct { | 48 | typedef struct { |
57 | ulong32 BoardNode; /* board node (always 0) */ | 49 | u32 BoardNode; /* board node (always 0) */ |
58 | ulong32 CommandIndex; /* command number */ | 50 | u32 CommandIndex; /* command number */ |
59 | ushort OpCode; /* the command (READ,..) */ | 51 | u16 OpCode; /* the command (READ,..) */ |
60 | union { | 52 | union { |
61 | struct { | 53 | struct { |
62 | ushort DeviceNo; /* number of cache drive */ | 54 | u16 DeviceNo; /* number of cache drive */ |
63 | ulong32 BlockNo; /* block number */ | 55 | u32 BlockNo; /* block number */ |
64 | ulong32 BlockCnt; /* block count */ | 56 | u32 BlockCnt; /* block count */ |
65 | ulong32 DestAddr; /* dest. addr. (if s/g: -1) */ | 57 | u32 DestAddr; /* dest. addr. (if s/g: -1) */ |
66 | ulong32 sg_canz; /* s/g element count */ | 58 | u32 sg_canz; /* s/g element count */ |
67 | gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */ | 59 | gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */ |
68 | } PACKED cache; /* cache service cmd. str. */ | 60 | } __attribute__((packed)) cache; /* cache service cmd. str. */ |
69 | struct { | 61 | struct { |
70 | ushort DeviceNo; /* number of cache drive */ | 62 | u16 DeviceNo; /* number of cache drive */ |
71 | ulong64 BlockNo; /* block number */ | 63 | u64 BlockNo; /* block number */ |
72 | ulong32 BlockCnt; /* block count */ | 64 | u32 BlockCnt; /* block count */ |
73 | ulong64 DestAddr; /* dest. addr. (if s/g: -1) */ | 65 | u64 DestAddr; /* dest. addr. (if s/g: -1) */ |
74 | ulong32 sg_canz; /* s/g element count */ | 66 | u32 sg_canz; /* s/g element count */ |
75 | gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */ | 67 | gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */ |
76 | } PACKED cache64; /* cache service cmd. str. */ | 68 | } __attribute__((packed)) cache64; /* cache service cmd. str. */ |
77 | struct { | 69 | struct { |
78 | ushort param_size; /* size of p_param buffer */ | 70 | u16 param_size; /* size of p_param buffer */ |
79 | ulong32 subfunc; /* IOCTL function */ | 71 | u32 subfunc; /* IOCTL function */ |
80 | ulong32 channel; /* device */ | 72 | u32 channel; /* device */ |
81 | ulong64 p_param; /* buffer */ | 73 | u64 p_param; /* buffer */ |
82 | } PACKED ioctl; /* IOCTL command structure */ | 74 | } __attribute__((packed)) ioctl; /* IOCTL command structure */ |
83 | struct { | 75 | struct { |
84 | ushort reserved; | 76 | u16 reserved; |
85 | union { | 77 | union { |
86 | struct { | 78 | struct { |
87 | ulong32 msg_handle; /* message handle */ | 79 | u32 msg_handle; /* message handle */ |
88 | ulong64 msg_addr; /* message buffer address */ | 80 | u64 msg_addr; /* message buffer address */ |
89 | } PACKED msg; | 81 | } __attribute__((packed)) msg; |
90 | unchar data[12]; /* buffer for rtc data, ... */ | 82 | u8 data[12]; /* buffer for rtc data, ... */ |
91 | } su; | 83 | } su; |
92 | } PACKED screen; /* screen service cmd. str. */ | 84 | } __attribute__((packed)) screen; /* screen service cmd. str. */ |
93 | struct { | 85 | struct { |
94 | ushort reserved; | 86 | u16 reserved; |
95 | ulong32 direction; /* data direction */ | 87 | u32 direction; /* data direction */ |
96 | ulong32 mdisc_time; /* disc. time (0: no timeout)*/ | 88 | u32 mdisc_time; /* disc. time (0: no timeout)*/ |
97 | ulong32 mcon_time; /* connect time(0: no to.) */ | 89 | u32 mcon_time; /* connect time(0: no to.) */ |
98 | ulong32 sdata; /* dest. addr. (if s/g: -1) */ | 90 | u32 sdata; /* dest. addr. (if s/g: -1) */ |
99 | ulong32 sdlen; /* data length (bytes) */ | 91 | u32 sdlen; /* data length (bytes) */ |
100 | ulong32 clen; /* SCSI cmd. length(6,10,12) */ | 92 | u32 clen; /* SCSI cmd. length(6,10,12) */ |
101 | unchar cmd[12]; /* SCSI command */ | 93 | u8 cmd[12]; /* SCSI command */ |
102 | unchar target; /* target ID */ | 94 | u8 target; /* target ID */ |
103 | unchar lun; /* LUN */ | 95 | u8 lun; /* LUN */ |
104 | unchar bus; /* SCSI bus number */ | 96 | u8 bus; /* SCSI bus number */ |
105 | unchar priority; /* only 0 used */ | 97 | u8 priority; /* only 0 used */ |
106 | ulong32 sense_len; /* sense data length */ | 98 | u32 sense_len; /* sense data length */ |
107 | ulong32 sense_data; /* sense data addr. */ | 99 | u32 sense_data; /* sense data addr. */ |
108 | ulong32 link_p; /* linked cmds (not supp.) */ | 100 | u32 link_p; /* linked cmds (not supp.) */ |
109 | ulong32 sg_ranz; /* s/g element count */ | 101 | u32 sg_ranz; /* s/g element count */ |
110 | gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */ | 102 | gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */ |
111 | } PACKED raw; /* raw service cmd. struct. */ | 103 | } __attribute__((packed)) raw; /* raw service cmd. struct. */ |
112 | struct { | 104 | struct { |
113 | ushort reserved; | 105 | u16 reserved; |
114 | ulong32 direction; /* data direction */ | 106 | u32 direction; /* data direction */ |
115 | ulong32 mdisc_time; /* disc. time (0: no timeout)*/ | 107 | u32 mdisc_time; /* disc. time (0: no timeout)*/ |
116 | ulong32 mcon_time; /* connect time(0: no to.) */ | 108 | u32 mcon_time; /* connect time(0: no to.) */ |
117 | ulong64 sdata; /* dest. addr. (if s/g: -1) */ | 109 | u64 sdata; /* dest. addr. (if s/g: -1) */ |
118 | ulong32 sdlen; /* data length (bytes) */ | 110 | u32 sdlen; /* data length (bytes) */ |
119 | ulong32 clen; /* SCSI cmd. length(6,..,16) */ | 111 | u32 clen; /* SCSI cmd. length(6,..,16) */ |
120 | unchar cmd[16]; /* SCSI command */ | 112 | u8 cmd[16]; /* SCSI command */ |
121 | unchar target; /* target ID */ | 113 | u8 target; /* target ID */ |
122 | unchar lun; /* LUN */ | 114 | u8 lun; /* LUN */ |
123 | unchar bus; /* SCSI bus number */ | 115 | u8 bus; /* SCSI bus number */ |
124 | unchar priority; /* only 0 used */ | 116 | u8 priority; /* only 0 used */ |
125 | ulong32 sense_len; /* sense data length */ | 117 | u32 sense_len; /* sense data length */ |
126 | ulong64 sense_data; /* sense data addr. */ | 118 | u64 sense_data; /* sense data addr. */ |
127 | ulong32 sg_ranz; /* s/g element count */ | 119 | u32 sg_ranz; /* s/g element count */ |
128 | gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */ | 120 | gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */ |
129 | } PACKED raw64; /* raw service cmd. struct. */ | 121 | } __attribute__((packed)) raw64; /* raw service cmd. struct. */ |
130 | } u; | 122 | } u; |
131 | /* additional variables */ | 123 | /* additional variables */ |
132 | unchar Service; /* controller service */ | 124 | u8 Service; /* controller service */ |
133 | unchar reserved; | 125 | u8 reserved; |
134 | ushort Status; /* command result */ | 126 | u16 Status; /* command result */ |
135 | ulong32 Info; /* additional information */ | 127 | u32 Info; /* additional information */ |
136 | void *RequestBuffer; /* request buffer */ | 128 | void *RequestBuffer; /* request buffer */ |
137 | } PACKED gdth_cmd_str; | 129 | } __attribute__((packed)) gdth_cmd_str; |
138 | 130 | ||
139 | /* controller event structure */ | 131 | /* controller event structure */ |
140 | #define ES_ASYNC 1 | 132 | #define ES_ASYNC 1 |
@@ -142,129 +134,129 @@ typedef struct { | |||
142 | #define ES_TEST 3 | 134 | #define ES_TEST 3 |
143 | #define ES_SYNC 4 | 135 | #define ES_SYNC 4 |
144 | typedef struct { | 136 | typedef struct { |
145 | ushort size; /* size of structure */ | 137 | u16 size; /* size of structure */ |
146 | union { | 138 | union { |
147 | char stream[16]; | 139 | char stream[16]; |
148 | struct { | 140 | struct { |
149 | ushort ionode; | 141 | u16 ionode; |
150 | ushort service; | 142 | u16 service; |
151 | ulong32 index; | 143 | u32 index; |
152 | } PACKED driver; | 144 | } __attribute__((packed)) driver; |
153 | struct { | 145 | struct { |
154 | ushort ionode; | 146 | u16 ionode; |
155 | ushort service; | 147 | u16 service; |
156 | ushort status; | 148 | u16 status; |
157 | ulong32 info; | 149 | u32 info; |
158 | unchar scsi_coord[3]; | 150 | u8 scsi_coord[3]; |
159 | } PACKED async; | 151 | } __attribute__((packed)) async; |
160 | struct { | 152 | struct { |
161 | ushort ionode; | 153 | u16 ionode; |
162 | ushort service; | 154 | u16 service; |
163 | ushort status; | 155 | u16 status; |
164 | ulong32 info; | 156 | u32 info; |
165 | ushort hostdrive; | 157 | u16 hostdrive; |
166 | unchar scsi_coord[3]; | 158 | u8 scsi_coord[3]; |
167 | unchar sense_key; | 159 | u8 sense_key; |
168 | } PACKED sync; | 160 | } __attribute__((packed)) sync; |
169 | struct { | 161 | struct { |
170 | ulong32 l1, l2, l3, l4; | 162 | u32 l1, l2, l3, l4; |
171 | } PACKED test; | 163 | } __attribute__((packed)) test; |
172 | } eu; | 164 | } eu; |
173 | ulong32 severity; | 165 | u32 severity; |
174 | unchar event_string[256]; | 166 | u8 event_string[256]; |
175 | } PACKED gdth_evt_data; | 167 | } __attribute__((packed)) gdth_evt_data; |
176 | 168 | ||
177 | typedef struct { | 169 | typedef struct { |
178 | ulong32 first_stamp; | 170 | u32 first_stamp; |
179 | ulong32 last_stamp; | 171 | u32 last_stamp; |
180 | ushort same_count; | 172 | u16 same_count; |
181 | ushort event_source; | 173 | u16 event_source; |
182 | ushort event_idx; | 174 | u16 event_idx; |
183 | unchar application; | 175 | u8 application; |
184 | unchar reserved; | 176 | u8 reserved; |
185 | gdth_evt_data event_data; | 177 | gdth_evt_data event_data; |
186 | } PACKED gdth_evt_str; | 178 | } __attribute__((packed)) gdth_evt_str; |
187 | 179 | ||
188 | 180 | ||
189 | #ifdef GDTH_IOCTL_PROC | 181 | #ifdef GDTH_IOCTL_PROC |
190 | /* IOCTL structure (write) */ | 182 | /* IOCTL structure (write) */ |
191 | typedef struct { | 183 | typedef struct { |
192 | ulong32 magic; /* IOCTL magic */ | 184 | u32 magic; /* IOCTL magic */ |
193 | ushort ioctl; /* IOCTL */ | 185 | u16 ioctl; /* IOCTL */ |
194 | ushort ionode; /* controller number */ | 186 | u16 ionode; /* controller number */ |
195 | ushort service; /* controller service */ | 187 | u16 service; /* controller service */ |
196 | ushort timeout; /* timeout */ | 188 | u16 timeout; /* timeout */ |
197 | union { | 189 | union { |
198 | struct { | 190 | struct { |
199 | unchar command[512]; /* controller command */ | 191 | u8 command[512]; /* controller command */ |
200 | unchar data[1]; /* add. data */ | 192 | u8 data[1]; /* add. data */ |
201 | } general; | 193 | } general; |
202 | struct { | 194 | struct { |
203 | unchar lock; /* lock/unlock */ | 195 | u8 lock; /* lock/unlock */ |
204 | unchar drive_cnt; /* drive count */ | 196 | u8 drive_cnt; /* drive count */ |
205 | ushort drives[MAX_HDRIVES];/* drives */ | 197 | u16 drives[MAX_HDRIVES];/* drives */ |
206 | } lockdrv; | 198 | } lockdrv; |
207 | struct { | 199 | struct { |
208 | unchar lock; /* lock/unlock */ | 200 | u8 lock; /* lock/unlock */ |
209 | unchar channel; /* channel */ | 201 | u8 channel; /* channel */ |
210 | } lockchn; | 202 | } lockchn; |
211 | struct { | 203 | struct { |
212 | int erase; /* erase event ? */ | 204 | int erase; /* erase event ? */ |
213 | int handle; | 205 | int handle; |
214 | unchar evt[EVENT_SIZE]; /* event structure */ | 206 | u8 evt[EVENT_SIZE]; /* event structure */ |
215 | } event; | 207 | } event; |
216 | struct { | 208 | struct { |
217 | unchar bus; /* SCSI bus */ | 209 | u8 bus; /* SCSI bus */ |
218 | unchar target; /* target ID */ | 210 | u8 target; /* target ID */ |
219 | unchar lun; /* LUN */ | 211 | u8 lun; /* LUN */ |
220 | unchar cmd_len; /* command length */ | 212 | u8 cmd_len; /* command length */ |
221 | unchar cmd[12]; /* SCSI command */ | 213 | u8 cmd[12]; /* SCSI command */ |
222 | } scsi; | 214 | } scsi; |
223 | struct { | 215 | struct { |
224 | ushort hdr_no; /* host drive number */ | 216 | u16 hdr_no; /* host drive number */ |
225 | unchar flag; /* old meth./add/remove */ | 217 | u8 flag; /* old meth./add/remove */ |
226 | } rescan; | 218 | } rescan; |
227 | } iu; | 219 | } iu; |
228 | } gdth_iowr_str; | 220 | } gdth_iowr_str; |
229 | 221 | ||
230 | /* IOCTL structure (read) */ | 222 | /* IOCTL structure (read) */ |
231 | typedef struct { | 223 | typedef struct { |
232 | ulong32 size; /* buffer size */ | 224 | u32 size; /* buffer size */ |
233 | ulong32 status; /* IOCTL error code */ | 225 | u32 status; /* IOCTL error code */ |
234 | union { | 226 | union { |
235 | struct { | 227 | struct { |
236 | unchar data[1]; /* data */ | 228 | u8 data[1]; /* data */ |
237 | } general; | 229 | } general; |
238 | struct { | 230 | struct { |
239 | ushort version; /* driver version */ | 231 | u16 version; /* driver version */ |
240 | } drvers; | 232 | } drvers; |
241 | struct { | 233 | struct { |
242 | unchar type; /* controller type */ | 234 | u8 type; /* controller type */ |
243 | ushort info; /* slot etc. */ | 235 | u16 info; /* slot etc. */ |
244 | ushort oem_id; /* OEM ID */ | 236 | u16 oem_id; /* OEM ID */ |
245 | ushort bios_ver; /* not used */ | 237 | u16 bios_ver; /* not used */ |
246 | ushort access; /* not used */ | 238 | u16 access; /* not used */ |
247 | ushort ext_type; /* extended type */ | 239 | u16 ext_type; /* extended type */ |
248 | ushort device_id; /* device ID */ | 240 | u16 device_id; /* device ID */ |
249 | ushort sub_device_id; /* sub device ID */ | 241 | u16 sub_device_id; /* sub device ID */ |
250 | } ctrtype; | 242 | } ctrtype; |
251 | struct { | 243 | struct { |
252 | unchar version; /* OS version */ | 244 | u8 version; /* OS version */ |
253 | unchar subversion; /* OS subversion */ | 245 | u8 subversion; /* OS subversion */ |
254 | ushort revision; /* revision */ | 246 | u16 revision; /* revision */ |
255 | } osvers; | 247 | } osvers; |
256 | struct { | 248 | struct { |
257 | ushort count; /* controller count */ | 249 | u16 count; /* controller count */ |
258 | } ctrcnt; | 250 | } ctrcnt; |
259 | struct { | 251 | struct { |
260 | int handle; | 252 | int handle; |
261 | unchar evt[EVENT_SIZE]; /* event structure */ | 253 | u8 evt[EVENT_SIZE]; /* event structure */ |
262 | } event; | 254 | } event; |
263 | struct { | 255 | struct { |
264 | unchar bus; /* SCSI bus, 0xff: invalid */ | 256 | u8 bus; /* SCSI bus, 0xff: invalid */ |
265 | unchar target; /* target ID */ | 257 | u8 target; /* target ID */ |
266 | unchar lun; /* LUN */ | 258 | u8 lun; /* LUN */ |
267 | unchar cluster_type; /* cluster properties */ | 259 | u8 cluster_type; /* cluster properties */ |
268 | } hdr_list[MAX_HDRIVES]; /* index is host drive number */ | 260 | } hdr_list[MAX_HDRIVES]; /* index is host drive number */ |
269 | } iu; | 261 | } iu; |
270 | } gdth_iord_str; | 262 | } gdth_iord_str; |
@@ -272,53 +264,53 @@ typedef struct { | |||
272 | 264 | ||
273 | /* GDTIOCTL_GENERAL */ | 265 | /* GDTIOCTL_GENERAL */ |
274 | typedef struct { | 266 | typedef struct { |
275 | ushort ionode; /* controller number */ | 267 | u16 ionode; /* controller number */ |
276 | ushort timeout; /* timeout */ | 268 | u16 timeout; /* timeout */ |
277 | ulong32 info; /* error info */ | 269 | u32 info; /* error info */ |
278 | ushort status; /* status */ | 270 | u16 status; /* status */ |
279 | ulong data_len; /* data buffer size */ | 271 | unsigned long data_len; /* data buffer size */ |
280 | ulong sense_len; /* sense buffer size */ | 272 | unsigned long sense_len; /* sense buffer size */ |
281 | gdth_cmd_str command; /* command */ | 273 | gdth_cmd_str command; /* command */ |
282 | } gdth_ioctl_general; | 274 | } gdth_ioctl_general; |
283 | 275 | ||
284 | /* GDTIOCTL_LOCKDRV */ | 276 | /* GDTIOCTL_LOCKDRV */ |
285 | typedef struct { | 277 | typedef struct { |
286 | ushort ionode; /* controller number */ | 278 | u16 ionode; /* controller number */ |
287 | unchar lock; /* lock/unlock */ | 279 | u8 lock; /* lock/unlock */ |
288 | unchar drive_cnt; /* drive count */ | 280 | u8 drive_cnt; /* drive count */ |
289 | ushort drives[MAX_HDRIVES]; /* drives */ | 281 | u16 drives[MAX_HDRIVES]; /* drives */ |
290 | } gdth_ioctl_lockdrv; | 282 | } gdth_ioctl_lockdrv; |
291 | 283 | ||
292 | /* GDTIOCTL_LOCKCHN */ | 284 | /* GDTIOCTL_LOCKCHN */ |
293 | typedef struct { | 285 | typedef struct { |
294 | ushort ionode; /* controller number */ | 286 | u16 ionode; /* controller number */ |
295 | unchar lock; /* lock/unlock */ | 287 | u8 lock; /* lock/unlock */ |
296 | unchar channel; /* channel */ | 288 | u8 channel; /* channel */ |
297 | } gdth_ioctl_lockchn; | 289 | } gdth_ioctl_lockchn; |
298 | 290 | ||
299 | /* GDTIOCTL_OSVERS */ | 291 | /* GDTIOCTL_OSVERS */ |
300 | typedef struct { | 292 | typedef struct { |
301 | unchar version; /* OS version */ | 293 | u8 version; /* OS version */ |
302 | unchar subversion; /* OS subversion */ | 294 | u8 subversion; /* OS subversion */ |
303 | ushort revision; /* revision */ | 295 | u16 revision; /* revision */ |
304 | } gdth_ioctl_osvers; | 296 | } gdth_ioctl_osvers; |
305 | 297 | ||
306 | /* GDTIOCTL_CTRTYPE */ | 298 | /* GDTIOCTL_CTRTYPE */ |
307 | typedef struct { | 299 | typedef struct { |
308 | ushort ionode; /* controller number */ | 300 | u16 ionode; /* controller number */ |
309 | unchar type; /* controller type */ | 301 | u8 type; /* controller type */ |
310 | ushort info; /* slot etc. */ | 302 | u16 info; /* slot etc. */ |
311 | ushort oem_id; /* OEM ID */ | 303 | u16 oem_id; /* OEM ID */ |
312 | ushort bios_ver; /* not used */ | 304 | u16 bios_ver; /* not used */ |
313 | ushort access; /* not used */ | 305 | u16 access; /* not used */ |
314 | ushort ext_type; /* extended type */ | 306 | u16 ext_type; /* extended type */ |
315 | ushort device_id; /* device ID */ | 307 | u16 device_id; /* device ID */ |
316 | ushort sub_device_id; /* sub device ID */ | 308 | u16 sub_device_id; /* sub device ID */ |
317 | } gdth_ioctl_ctrtype; | 309 | } gdth_ioctl_ctrtype; |
318 | 310 | ||
319 | /* GDTIOCTL_EVENT */ | 311 | /* GDTIOCTL_EVENT */ |
320 | typedef struct { | 312 | typedef struct { |
321 | ushort ionode; | 313 | u16 ionode; |
322 | int erase; /* erase event? */ | 314 | int erase; /* erase event? */ |
323 | int handle; /* event handle */ | 315 | int handle; /* event handle */ |
324 | gdth_evt_str event; | 316 | gdth_evt_str event; |
@@ -326,22 +318,22 @@ typedef struct { | |||
326 | 318 | ||
327 | /* GDTIOCTL_RESCAN/GDTIOCTL_HDRLIST */ | 319 | /* GDTIOCTL_RESCAN/GDTIOCTL_HDRLIST */ |
328 | typedef struct { | 320 | typedef struct { |
329 | ushort ionode; /* controller number */ | 321 | u16 ionode; /* controller number */ |
330 | unchar flag; /* add/remove */ | 322 | u8 flag; /* add/remove */ |
331 | ushort hdr_no; /* drive no. */ | 323 | u16 hdr_no; /* drive no. */ |
332 | struct { | 324 | struct { |
333 | unchar bus; /* SCSI bus */ | 325 | u8 bus; /* SCSI bus */ |
334 | unchar target; /* target ID */ | 326 | u8 target; /* target ID */ |
335 | unchar lun; /* LUN */ | 327 | u8 lun; /* LUN */ |
336 | unchar cluster_type; /* cluster properties */ | 328 | u8 cluster_type; /* cluster properties */ |
337 | } hdr_list[MAX_HDRIVES]; /* index is host drive number */ | 329 | } hdr_list[MAX_HDRIVES]; /* index is host drive number */ |
338 | } gdth_ioctl_rescan; | 330 | } gdth_ioctl_rescan; |
339 | 331 | ||
340 | /* GDTIOCTL_RESET_BUS/GDTIOCTL_RESET_DRV */ | 332 | /* GDTIOCTL_RESET_BUS/GDTIOCTL_RESET_DRV */ |
341 | typedef struct { | 333 | typedef struct { |
342 | ushort ionode; /* controller number */ | 334 | u16 ionode; /* controller number */ |
343 | ushort number; /* bus/host drive number */ | 335 | u16 number; /* bus/host drive number */ |
344 | ushort status; /* status */ | 336 | u16 status; /* status */ |
345 | } gdth_ioctl_reset; | 337 | } gdth_ioctl_reset; |
346 | 338 | ||
347 | #endif | 339 | #endif |
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index 1258da34fbc2..ffb2b21992ba 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c | |||
@@ -43,7 +43,7 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer, | |||
43 | int i, found; | 43 | int i, found; |
44 | gdth_cmd_str gdtcmd; | 44 | gdth_cmd_str gdtcmd; |
45 | gdth_cpar_str *pcpar; | 45 | gdth_cpar_str *pcpar; |
46 | ulong64 paddr; | 46 | u64 paddr; |
47 | 47 | ||
48 | char cmnd[MAX_COMMAND_SIZE]; | 48 | char cmnd[MAX_COMMAND_SIZE]; |
49 | memset(cmnd, 0xff, 12); | 49 | memset(cmnd, 0xff, 12); |
@@ -156,8 +156,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, | |||
156 | off_t begin = 0,pos = 0; | 156 | off_t begin = 0,pos = 0; |
157 | int id, i, j, k, sec, flag; | 157 | int id, i, j, k, sec, flag; |
158 | int no_mdrv = 0, drv_no, is_mirr; | 158 | int no_mdrv = 0, drv_no, is_mirr; |
159 | ulong32 cnt; | 159 | u32 cnt; |
160 | ulong64 paddr; | 160 | u64 paddr; |
161 | int rc = -ENOMEM; | 161 | int rc = -ENOMEM; |
162 | 162 | ||
163 | gdth_cmd_str *gdtcmd; | 163 | gdth_cmd_str *gdtcmd; |
@@ -220,14 +220,14 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, | |||
220 | 220 | ||
221 | if (ha->more_proc) | 221 | if (ha->more_proc) |
222 | sprintf(hrec, "%d.%02d.%02d-%c%03X", | 222 | sprintf(hrec, "%d.%02d.%02d-%c%03X", |
223 | (unchar)(ha->binfo.upd_fw_ver>>24), | 223 | (u8)(ha->binfo.upd_fw_ver>>24), |
224 | (unchar)(ha->binfo.upd_fw_ver>>16), | 224 | (u8)(ha->binfo.upd_fw_ver>>16), |
225 | (unchar)(ha->binfo.upd_fw_ver), | 225 | (u8)(ha->binfo.upd_fw_ver), |
226 | ha->bfeat.raid ? 'R':'N', | 226 | ha->bfeat.raid ? 'R':'N', |
227 | ha->binfo.upd_revision); | 227 | ha->binfo.upd_revision); |
228 | else | 228 | else |
229 | sprintf(hrec, "%d.%02d", (unchar)(ha->cpar.version>>8), | 229 | sprintf(hrec, "%d.%02d", (u8)(ha->cpar.version>>8), |
230 | (unchar)(ha->cpar.version)); | 230 | (u8)(ha->cpar.version)); |
231 | 231 | ||
232 | size = sprintf(buffer+len, | 232 | size = sprintf(buffer+len, |
233 | " Driver Ver.: \t%-10s\tFirmware Ver.: \t%s\n", | 233 | " Driver Ver.: \t%-10s\tFirmware Ver.: \t%s\n", |
@@ -281,7 +281,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, | |||
281 | pds->bid = ha->raw[i].local_no; | 281 | pds->bid = ha->raw[i].local_no; |
282 | pds->first = 0; | 282 | pds->first = 0; |
283 | pds->entries = ha->raw[i].pdev_cnt; | 283 | pds->entries = ha->raw[i].pdev_cnt; |
284 | cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(ulong32)) / | 284 | cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(u32)) / |
285 | sizeof(pds->list[0]); | 285 | sizeof(pds->list[0]); |
286 | if (pds->entries > cnt) | 286 | if (pds->entries > cnt) |
287 | pds->entries = cnt; | 287 | pds->entries = cnt; |
@@ -604,7 +604,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, | |||
604 | 604 | ||
605 | size = sprintf(buffer+len, | 605 | size = sprintf(buffer+len, |
606 | " Capacity [MB]:\t%-6d \tStart Sector: \t%d\n", | 606 | " Capacity [MB]:\t%-6d \tStart Sector: \t%d\n", |
607 | (ulong32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec); | 607 | (u32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec); |
608 | len += size; pos = begin + len; | 608 | len += size; pos = begin + len; |
609 | if (pos < offset) { | 609 | if (pos < offset) { |
610 | len = 0; | 610 | len = 0; |
@@ -664,9 +664,9 @@ free_fail: | |||
664 | } | 664 | } |
665 | 665 | ||
666 | static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, | 666 | static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, |
667 | ulong64 *paddr) | 667 | u64 *paddr) |
668 | { | 668 | { |
669 | ulong flags; | 669 | unsigned long flags; |
670 | char *ret_val; | 670 | char *ret_val; |
671 | 671 | ||
672 | if (size == 0) | 672 | if (size == 0) |
@@ -691,9 +691,9 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, | |||
691 | return ret_val; | 691 | return ret_val; |
692 | } | 692 | } |
693 | 693 | ||
694 | static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr) | 694 | static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr) |
695 | { | 695 | { |
696 | ulong flags; | 696 | unsigned long flags; |
697 | 697 | ||
698 | if (buf == ha->pscratch) { | 698 | if (buf == ha->pscratch) { |
699 | spin_lock_irqsave(&ha->smp_lock, flags); | 699 | spin_lock_irqsave(&ha->smp_lock, flags); |
@@ -705,16 +705,16 @@ static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr) | |||
705 | } | 705 | } |
706 | 706 | ||
707 | #ifdef GDTH_IOCTL_PROC | 707 | #ifdef GDTH_IOCTL_PROC |
708 | static int gdth_ioctl_check_bin(gdth_ha_str *ha, ushort size) | 708 | static int gdth_ioctl_check_bin(gdth_ha_str *ha, u16 size) |
709 | { | 709 | { |
710 | ulong flags; | 710 | unsigned long flags; |
711 | int ret_val; | 711 | int ret_val; |
712 | 712 | ||
713 | spin_lock_irqsave(&ha->smp_lock, flags); | 713 | spin_lock_irqsave(&ha->smp_lock, flags); |
714 | 714 | ||
715 | ret_val = FALSE; | 715 | ret_val = FALSE; |
716 | if (ha->scratch_busy) { | 716 | if (ha->scratch_busy) { |
717 | if (((gdth_iord_str *)ha->pscratch)->size == (ulong32)size) | 717 | if (((gdth_iord_str *)ha->pscratch)->size == (u32)size) |
718 | ret_val = TRUE; | 718 | ret_val = TRUE; |
719 | } | 719 | } |
720 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 720 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
@@ -724,11 +724,11 @@ static int gdth_ioctl_check_bin(gdth_ha_str *ha, ushort size) | |||
724 | 724 | ||
725 | static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id) | 725 | static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id) |
726 | { | 726 | { |
727 | ulong flags; | 727 | unsigned long flags; |
728 | int i; | 728 | int i; |
729 | Scsi_Cmnd *scp; | 729 | Scsi_Cmnd *scp; |
730 | struct gdth_cmndinfo *cmndinfo; | 730 | struct gdth_cmndinfo *cmndinfo; |
731 | unchar b, t; | 731 | u8 b, t; |
732 | 732 | ||
733 | spin_lock_irqsave(&ha->smp_lock, flags); | 733 | spin_lock_irqsave(&ha->smp_lock, flags); |
734 | 734 | ||
@@ -738,8 +738,8 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id) | |||
738 | 738 | ||
739 | b = scp->device->channel; | 739 | b = scp->device->channel; |
740 | t = scp->device->id; | 740 | t = scp->device->id; |
741 | if (!SPECIAL_SCP(scp) && t == (unchar)id && | 741 | if (!SPECIAL_SCP(scp) && t == (u8)id && |
742 | b == (unchar)busnum) { | 742 | b == (u8)busnum) { |
743 | cmndinfo->wait_for_completion = 0; | 743 | cmndinfo->wait_for_completion = 0; |
744 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 744 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
745 | while (!cmndinfo->wait_for_completion) | 745 | while (!cmndinfo->wait_for_completion) |
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h index 9b900cc9ebe8..dab15f59f2cc 100644 --- a/drivers/scsi/gdth_proc.h +++ b/drivers/scsi/gdth_proc.h | |||
@@ -17,8 +17,8 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer, | |||
17 | int length, gdth_ha_str *ha); | 17 | int length, gdth_ha_str *ha); |
18 | 18 | ||
19 | static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, | 19 | static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, |
20 | ulong64 *paddr); | 20 | u64 *paddr); |
21 | static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr); | 21 | static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr); |
22 | static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); | 22 | static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); |
23 | 23 | ||
24 | #endif | 24 | #endif |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index bb96fdd58e23..03697ba94251 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -52,7 +52,7 @@ | |||
52 | #include "hpsa.h" | 52 | #include "hpsa.h" |
53 | 53 | ||
54 | /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ | 54 | /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ |
55 | #define HPSA_DRIVER_VERSION "1.0.0" | 55 | #define HPSA_DRIVER_VERSION "2.0.1-3" |
56 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" | 56 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
57 | 57 | ||
58 | /* How long to wait (in milliseconds) for board to go into simple mode */ | 58 | /* How long to wait (in milliseconds) for board to go into simple mode */ |
@@ -77,9 +77,6 @@ MODULE_PARM_DESC(hpsa_allow_any, | |||
77 | 77 | ||
78 | /* define the PCI info for the cards we can control */ | 78 | /* define the PCI info for the cards we can control */ |
79 | static const struct pci_device_id hpsa_pci_device_id[] = { | 79 | static const struct pci_device_id hpsa_pci_device_id[] = { |
80 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223}, | ||
81 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234}, | ||
82 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D}, | ||
83 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, | 80 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, |
84 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, | 81 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, |
85 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, | 82 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, |
@@ -87,6 +84,9 @@ static const struct pci_device_id hpsa_pci_device_id[] = { | |||
87 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, | 84 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, |
88 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, | 85 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, |
89 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, | 86 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, |
87 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, | ||
88 | #define PCI_DEVICE_ID_HP_CISSF 0x333f | ||
89 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F}, | ||
90 | {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 90 | {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
91 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, | 91 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, |
92 | {0,} | 92 | {0,} |
@@ -99,9 +99,6 @@ MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); | |||
99 | * access = Address of the struct of function pointers | 99 | * access = Address of the struct of function pointers |
100 | */ | 100 | */ |
101 | static struct board_type products[] = { | 101 | static struct board_type products[] = { |
102 | {0x3223103C, "Smart Array P800", &SA5_access}, | ||
103 | {0x3234103C, "Smart Array P400", &SA5_access}, | ||
104 | {0x323d103c, "Smart Array P700M", &SA5_access}, | ||
105 | {0x3241103C, "Smart Array P212", &SA5_access}, | 102 | {0x3241103C, "Smart Array P212", &SA5_access}, |
106 | {0x3243103C, "Smart Array P410", &SA5_access}, | 103 | {0x3243103C, "Smart Array P410", &SA5_access}, |
107 | {0x3245103C, "Smart Array P410i", &SA5_access}, | 104 | {0x3245103C, "Smart Array P410i", &SA5_access}, |
@@ -109,6 +106,8 @@ static struct board_type products[] = { | |||
109 | {0x3249103C, "Smart Array P812", &SA5_access}, | 106 | {0x3249103C, "Smart Array P812", &SA5_access}, |
110 | {0x324a103C, "Smart Array P712m", &SA5_access}, | 107 | {0x324a103C, "Smart Array P712m", &SA5_access}, |
111 | {0x324b103C, "Smart Array P711m", &SA5_access}, | 108 | {0x324b103C, "Smart Array P711m", &SA5_access}, |
109 | {0x3233103C, "StorageWorks P1210m", &SA5_access}, | ||
110 | {0x333F103C, "StorageWorks P1210m", &SA5_access}, | ||
112 | {0xFFFF103C, "Unknown Smart Array", &SA5_access}, | 111 | {0xFFFF103C, "Unknown Smart Array", &SA5_access}, |
113 | }; | 112 | }; |
114 | 113 | ||
@@ -126,12 +125,15 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c); | |||
126 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); | 125 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); |
127 | static struct CommandList *cmd_alloc(struct ctlr_info *h); | 126 | static struct CommandList *cmd_alloc(struct ctlr_info *h); |
128 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h); | 127 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h); |
129 | static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h, | 128 | static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
130 | void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, | 129 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, |
131 | int cmd_type); | 130 | int cmd_type); |
132 | 131 | ||
133 | static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, | 132 | static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, |
134 | void (*done)(struct scsi_cmnd *)); | 133 | void (*done)(struct scsi_cmnd *)); |
134 | static void hpsa_scan_start(struct Scsi_Host *); | ||
135 | static int hpsa_scan_finished(struct Scsi_Host *sh, | ||
136 | unsigned long elapsed_time); | ||
135 | 137 | ||
136 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); | 138 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); |
137 | static int hpsa_slave_alloc(struct scsi_device *sdev); | 139 | static int hpsa_slave_alloc(struct scsi_device *sdev); |
@@ -150,6 +152,11 @@ static int check_for_unit_attention(struct ctlr_info *h, | |||
150 | struct CommandList *c); | 152 | struct CommandList *c); |
151 | static void check_ioctl_unit_attention(struct ctlr_info *h, | 153 | static void check_ioctl_unit_attention(struct ctlr_info *h, |
152 | struct CommandList *c); | 154 | struct CommandList *c); |
155 | /* performant mode helper functions */ | ||
156 | static void calc_bucket_map(int *bucket, int num_buckets, | ||
157 | int nsgs, int *bucket_map); | ||
158 | static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); | ||
159 | static inline u32 next_command(struct ctlr_info *h); | ||
153 | 160 | ||
154 | static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); | 161 | static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); |
155 | static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); | 162 | static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); |
@@ -173,10 +180,10 @@ static struct scsi_host_template hpsa_driver_template = { | |||
173 | .name = "hpsa", | 180 | .name = "hpsa", |
174 | .proc_name = "hpsa", | 181 | .proc_name = "hpsa", |
175 | .queuecommand = hpsa_scsi_queue_command, | 182 | .queuecommand = hpsa_scsi_queue_command, |
176 | .can_queue = 512, | 183 | .scan_start = hpsa_scan_start, |
184 | .scan_finished = hpsa_scan_finished, | ||
177 | .this_id = -1, | 185 | .this_id = -1, |
178 | .sg_tablesize = MAXSGENTRIES, | 186 | .sg_tablesize = MAXSGENTRIES, |
179 | .cmd_per_lun = 512, | ||
180 | .use_clustering = ENABLE_CLUSTERING, | 187 | .use_clustering = ENABLE_CLUSTERING, |
181 | .eh_device_reset_handler = hpsa_eh_device_reset_handler, | 188 | .eh_device_reset_handler = hpsa_eh_device_reset_handler, |
182 | .ioctl = hpsa_ioctl, | 189 | .ioctl = hpsa_ioctl, |
@@ -195,6 +202,12 @@ static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) | |||
195 | return (struct ctlr_info *) *priv; | 202 | return (struct ctlr_info *) *priv; |
196 | } | 203 | } |
197 | 204 | ||
205 | static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) | ||
206 | { | ||
207 | unsigned long *priv = shost_priv(sh); | ||
208 | return (struct ctlr_info *) *priv; | ||
209 | } | ||
210 | |||
198 | static struct task_struct *hpsa_scan_thread; | 211 | static struct task_struct *hpsa_scan_thread; |
199 | static DEFINE_MUTEX(hpsa_scan_mutex); | 212 | static DEFINE_MUTEX(hpsa_scan_mutex); |
200 | static LIST_HEAD(hpsa_scan_q); | 213 | static LIST_HEAD(hpsa_scan_q); |
@@ -312,7 +325,7 @@ static int hpsa_scan_func(__attribute__((unused)) void *data) | |||
312 | h->busy_scanning = 1; | 325 | h->busy_scanning = 1; |
313 | mutex_unlock(&hpsa_scan_mutex); | 326 | mutex_unlock(&hpsa_scan_mutex); |
314 | host_no = h->scsi_host ? h->scsi_host->host_no : -1; | 327 | host_no = h->scsi_host ? h->scsi_host->host_no : -1; |
315 | hpsa_update_scsi_devices(h, host_no); | 328 | hpsa_scan_start(h->scsi_host); |
316 | complete_all(&h->scan_wait); | 329 | complete_all(&h->scan_wait); |
317 | mutex_lock(&hpsa_scan_mutex); | 330 | mutex_lock(&hpsa_scan_mutex); |
318 | h->busy_scanning = 0; | 331 | h->busy_scanning = 0; |
@@ -379,8 +392,7 @@ static ssize_t host_store_rescan(struct device *dev, | |||
379 | { | 392 | { |
380 | struct ctlr_info *h; | 393 | struct ctlr_info *h; |
381 | struct Scsi_Host *shost = class_to_shost(dev); | 394 | struct Scsi_Host *shost = class_to_shost(dev); |
382 | unsigned long *priv = shost_priv(shost); | 395 | h = shost_to_hba(shost); |
383 | h = (struct ctlr_info *) *priv; | ||
384 | if (add_to_scan_list(h)) { | 396 | if (add_to_scan_list(h)) { |
385 | wake_up_process(hpsa_scan_thread); | 397 | wake_up_process(hpsa_scan_thread); |
386 | wait_for_completion_interruptible(&h->scan_wait); | 398 | wait_for_completion_interruptible(&h->scan_wait); |
@@ -394,10 +406,44 @@ static inline void addQ(struct hlist_head *list, struct CommandList *c) | |||
394 | hlist_add_head(&c->list, list); | 406 | hlist_add_head(&c->list, list); |
395 | } | 407 | } |
396 | 408 | ||
409 | static inline u32 next_command(struct ctlr_info *h) | ||
410 | { | ||
411 | u32 a; | ||
412 | |||
413 | if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) | ||
414 | return h->access.command_completed(h); | ||
415 | |||
416 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { | ||
417 | a = *(h->reply_pool_head); /* Next cmd in ring buffer */ | ||
418 | (h->reply_pool_head)++; | ||
419 | h->commands_outstanding--; | ||
420 | } else { | ||
421 | a = FIFO_EMPTY; | ||
422 | } | ||
423 | /* Check for wraparound */ | ||
424 | if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { | ||
425 | h->reply_pool_head = h->reply_pool; | ||
426 | h->reply_pool_wraparound ^= 1; | ||
427 | } | ||
428 | return a; | ||
429 | } | ||
430 | |||
431 | /* set_performant_mode: Modify the tag for cciss performant | ||
432 | * set bit 0 for pull model, bits 3-1 for block fetch | ||
433 | * register number | ||
434 | */ | ||
435 | static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) | ||
436 | { | ||
437 | if (likely(h->transMethod == CFGTBL_Trans_Performant)) | ||
438 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); | ||
439 | } | ||
440 | |||
397 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, | 441 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, |
398 | struct CommandList *c) | 442 | struct CommandList *c) |
399 | { | 443 | { |
400 | unsigned long flags; | 444 | unsigned long flags; |
445 | |||
446 | set_performant_mode(h, c); | ||
401 | spin_lock_irqsave(&h->lock, flags); | 447 | spin_lock_irqsave(&h->lock, flags); |
402 | addQ(&h->reqQ, c); | 448 | addQ(&h->reqQ, c); |
403 | h->Qdepth++; | 449 | h->Qdepth++; |
@@ -422,6 +468,15 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) | |||
422 | return (scsi3addr[3] & 0xC0) == 0x40; | 468 | return (scsi3addr[3] & 0xC0) == 0x40; |
423 | } | 469 | } |
424 | 470 | ||
471 | static inline int is_scsi_rev_5(struct ctlr_info *h) | ||
472 | { | ||
473 | if (!h->hba_inquiry_data) | ||
474 | return 0; | ||
475 | if ((h->hba_inquiry_data[2] & 0x07) == 5) | ||
476 | return 1; | ||
477 | return 0; | ||
478 | } | ||
479 | |||
425 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | 480 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
426 | "UNKNOWN" | 481 | "UNKNOWN" |
427 | }; | 482 | }; |
@@ -431,7 +486,7 @@ static ssize_t raid_level_show(struct device *dev, | |||
431 | struct device_attribute *attr, char *buf) | 486 | struct device_attribute *attr, char *buf) |
432 | { | 487 | { |
433 | ssize_t l = 0; | 488 | ssize_t l = 0; |
434 | int rlevel; | 489 | unsigned char rlevel; |
435 | struct ctlr_info *h; | 490 | struct ctlr_info *h; |
436 | struct scsi_device *sdev; | 491 | struct scsi_device *sdev; |
437 | struct hpsa_scsi_dev_t *hdev; | 492 | struct hpsa_scsi_dev_t *hdev; |
@@ -455,7 +510,7 @@ static ssize_t raid_level_show(struct device *dev, | |||
455 | 510 | ||
456 | rlevel = hdev->raid_level; | 511 | rlevel = hdev->raid_level; |
457 | spin_unlock_irqrestore(&h->lock, flags); | 512 | spin_unlock_irqrestore(&h->lock, flags); |
458 | if (rlevel < 0 || rlevel > RAID_UNKNOWN) | 513 | if (rlevel > RAID_UNKNOWN) |
459 | rlevel = RAID_UNKNOWN; | 514 | rlevel = RAID_UNKNOWN; |
460 | l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); | 515 | l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); |
461 | return l; | 516 | return l; |
@@ -620,6 +675,24 @@ lun_assigned: | |||
620 | return 0; | 675 | return 0; |
621 | } | 676 | } |
622 | 677 | ||
678 | /* Replace an entry from h->dev[] array. */ | ||
679 | static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, | ||
680 | int entry, struct hpsa_scsi_dev_t *new_entry, | ||
681 | struct hpsa_scsi_dev_t *added[], int *nadded, | ||
682 | struct hpsa_scsi_dev_t *removed[], int *nremoved) | ||
683 | { | ||
684 | /* assumes h->devlock is held */ | ||
685 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); | ||
686 | removed[*nremoved] = h->dev[entry]; | ||
687 | (*nremoved)++; | ||
688 | h->dev[entry] = new_entry; | ||
689 | added[*nadded] = new_entry; | ||
690 | (*nadded)++; | ||
691 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", | ||
692 | scsi_device_type(new_entry->devtype), hostno, new_entry->bus, | ||
693 | new_entry->target, new_entry->lun); | ||
694 | } | ||
695 | |||
623 | /* Remove an entry from h->dev[] array. */ | 696 | /* Remove an entry from h->dev[] array. */ |
624 | static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, | 697 | static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, |
625 | struct hpsa_scsi_dev_t *removed[], int *nremoved) | 698 | struct hpsa_scsi_dev_t *removed[], int *nremoved) |
@@ -628,8 +701,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, | |||
628 | int i; | 701 | int i; |
629 | struct hpsa_scsi_dev_t *sd; | 702 | struct hpsa_scsi_dev_t *sd; |
630 | 703 | ||
631 | if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA) | 704 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); |
632 | BUG(); | ||
633 | 705 | ||
634 | sd = h->dev[entry]; | 706 | sd = h->dev[entry]; |
635 | removed[*nremoved] = h->dev[entry]; | 707 | removed[*nremoved] = h->dev[entry]; |
@@ -722,6 +794,8 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, | |||
722 | #define DEVICE_CHANGED 1 | 794 | #define DEVICE_CHANGED 1 |
723 | #define DEVICE_SAME 2 | 795 | #define DEVICE_SAME 2 |
724 | for (i = 0; i < haystack_size; i++) { | 796 | for (i = 0; i < haystack_size; i++) { |
797 | if (haystack[i] == NULL) /* previously removed. */ | ||
798 | continue; | ||
725 | if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { | 799 | if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { |
726 | *index = i; | 800 | *index = i; |
727 | if (device_is_the_same(needle, haystack[i])) | 801 | if (device_is_the_same(needle, haystack[i])) |
@@ -734,7 +808,7 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, | |||
734 | return DEVICE_NOT_FOUND; | 808 | return DEVICE_NOT_FOUND; |
735 | } | 809 | } |
736 | 810 | ||
737 | static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, | 811 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, |
738 | struct hpsa_scsi_dev_t *sd[], int nsds) | 812 | struct hpsa_scsi_dev_t *sd[], int nsds) |
739 | { | 813 | { |
740 | /* sd contains scsi3 addresses and devtypes, and inquiry | 814 | /* sd contains scsi3 addresses and devtypes, and inquiry |
@@ -779,12 +853,12 @@ static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, | |||
779 | continue; /* remove ^^^, hence i not incremented */ | 853 | continue; /* remove ^^^, hence i not incremented */ |
780 | } else if (device_change == DEVICE_CHANGED) { | 854 | } else if (device_change == DEVICE_CHANGED) { |
781 | changes++; | 855 | changes++; |
782 | hpsa_scsi_remove_entry(h, hostno, i, | 856 | hpsa_scsi_replace_entry(h, hostno, i, sd[entry], |
783 | removed, &nremoved); | 857 | added, &nadded, removed, &nremoved); |
784 | (void) hpsa_scsi_add_entry(h, hostno, sd[entry], | 858 | /* Set it to NULL to prevent it from being freed |
785 | added, &nadded); | 859 | * at the bottom of hpsa_update_scsi_devices() |
786 | /* add can't fail, we just removed one. */ | 860 | */ |
787 | sd[entry] = NULL; /* prevent it from being freed */ | 861 | sd[entry] = NULL; |
788 | } | 862 | } |
789 | i++; | 863 | i++; |
790 | } | 864 | } |
@@ -860,7 +934,6 @@ static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, | |||
860 | free_and_out: | 934 | free_and_out: |
861 | kfree(added); | 935 | kfree(added); |
862 | kfree(removed); | 936 | kfree(removed); |
863 | return 0; | ||
864 | } | 937 | } |
865 | 938 | ||
866 | /* | 939 | /* |
@@ -900,7 +973,7 @@ static int hpsa_slave_alloc(struct scsi_device *sdev) | |||
900 | 973 | ||
901 | static void hpsa_slave_destroy(struct scsi_device *sdev) | 974 | static void hpsa_slave_destroy(struct scsi_device *sdev) |
902 | { | 975 | { |
903 | return; /* nothing to do. */ | 976 | /* nothing to do. */ |
904 | } | 977 | } |
905 | 978 | ||
906 | static void hpsa_scsi_setup(struct ctlr_info *h) | 979 | static void hpsa_scsi_setup(struct ctlr_info *h) |
@@ -908,11 +981,10 @@ static void hpsa_scsi_setup(struct ctlr_info *h) | |||
908 | h->ndevices = 0; | 981 | h->ndevices = 0; |
909 | h->scsi_host = NULL; | 982 | h->scsi_host = NULL; |
910 | spin_lock_init(&h->devlock); | 983 | spin_lock_init(&h->devlock); |
911 | return; | ||
912 | } | 984 | } |
913 | 985 | ||
914 | static void complete_scsi_command(struct CommandList *cp, | 986 | static void complete_scsi_command(struct CommandList *cp, |
915 | int timeout, __u32 tag) | 987 | int timeout, u32 tag) |
916 | { | 988 | { |
917 | struct scsi_cmnd *cmd; | 989 | struct scsi_cmnd *cmd; |
918 | struct ctlr_info *h; | 990 | struct ctlr_info *h; |
@@ -987,7 +1059,6 @@ static void complete_scsi_command(struct CommandList *cp, | |||
987 | * required | 1059 | * required |
988 | */ | 1060 | */ |
989 | if ((asc == 0x04) && (ascq == 0x03)) { | 1061 | if ((asc == 0x04) && (ascq == 0x03)) { |
990 | cmd->result = DID_NO_CONNECT << 16; | ||
991 | dev_warn(&h->pdev->dev, "cp %p " | 1062 | dev_warn(&h->pdev->dev, "cp %p " |
992 | "has check condition: unit " | 1063 | "has check condition: unit " |
993 | "not ready, manual " | 1064 | "not ready, manual " |
@@ -995,14 +1066,22 @@ static void complete_scsi_command(struct CommandList *cp, | |||
995 | break; | 1066 | break; |
996 | } | 1067 | } |
997 | } | 1068 | } |
998 | 1069 | if (sense_key == ABORTED_COMMAND) { | |
999 | 1070 | /* Aborted command is retryable */ | |
1071 | dev_warn(&h->pdev->dev, "cp %p " | ||
1072 | "has check condition: aborted command: " | ||
1073 | "ASC: 0x%x, ASCQ: 0x%x\n", | ||
1074 | cp, asc, ascq); | ||
1075 | cmd->result = DID_SOFT_ERROR << 16; | ||
1076 | break; | ||
1077 | } | ||
1000 | /* Must be some other type of check condition */ | 1078 | /* Must be some other type of check condition */ |
1001 | dev_warn(&h->pdev->dev, "cp %p has check condition: " | 1079 | dev_warn(&h->pdev->dev, "cp %p has check condition: " |
1002 | "unknown type: " | 1080 | "unknown type: " |
1003 | "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " | 1081 | "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " |
1004 | "Returning result: 0x%x, " | 1082 | "Returning result: 0x%x, " |
1005 | "cmd=[%02x %02x %02x %02x %02x " | 1083 | "cmd=[%02x %02x %02x %02x %02x " |
1084 | "%02x %02x %02x %02x %02x %02x " | ||
1006 | "%02x %02x %02x %02x %02x]\n", | 1085 | "%02x %02x %02x %02x %02x]\n", |
1007 | cp, sense_key, asc, ascq, | 1086 | cp, sense_key, asc, ascq, |
1008 | cmd->result, | 1087 | cmd->result, |
@@ -1010,7 +1089,10 @@ static void complete_scsi_command(struct CommandList *cp, | |||
1010 | cmd->cmnd[2], cmd->cmnd[3], | 1089 | cmd->cmnd[2], cmd->cmnd[3], |
1011 | cmd->cmnd[4], cmd->cmnd[5], | 1090 | cmd->cmnd[4], cmd->cmnd[5], |
1012 | cmd->cmnd[6], cmd->cmnd[7], | 1091 | cmd->cmnd[6], cmd->cmnd[7], |
1013 | cmd->cmnd[8], cmd->cmnd[9]); | 1092 | cmd->cmnd[8], cmd->cmnd[9], |
1093 | cmd->cmnd[10], cmd->cmnd[11], | ||
1094 | cmd->cmnd[12], cmd->cmnd[13], | ||
1095 | cmd->cmnd[14], cmd->cmnd[15]); | ||
1014 | break; | 1096 | break; |
1015 | } | 1097 | } |
1016 | 1098 | ||
@@ -1086,7 +1168,7 @@ static void complete_scsi_command(struct CommandList *cp, | |||
1086 | dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); | 1168 | dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); |
1087 | break; | 1169 | break; |
1088 | case CMD_UNSOLICITED_ABORT: | 1170 | case CMD_UNSOLICITED_ABORT: |
1089 | cmd->result = DID_ABORT << 16; | 1171 | cmd->result = DID_RESET << 16; |
1090 | dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " | 1172 | dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " |
1091 | "abort\n", cp); | 1173 | "abort\n", cp); |
1092 | break; | 1174 | break; |
@@ -1119,9 +1201,11 @@ static int hpsa_scsi_detect(struct ctlr_info *h) | |||
1119 | sh->max_cmd_len = MAX_COMMAND_SIZE; | 1201 | sh->max_cmd_len = MAX_COMMAND_SIZE; |
1120 | sh->max_lun = HPSA_MAX_LUN; | 1202 | sh->max_lun = HPSA_MAX_LUN; |
1121 | sh->max_id = HPSA_MAX_LUN; | 1203 | sh->max_id = HPSA_MAX_LUN; |
1204 | sh->can_queue = h->nr_cmds; | ||
1205 | sh->cmd_per_lun = h->nr_cmds; | ||
1122 | h->scsi_host = sh; | 1206 | h->scsi_host = sh; |
1123 | sh->hostdata[0] = (unsigned long) h; | 1207 | sh->hostdata[0] = (unsigned long) h; |
1124 | sh->irq = h->intr[SIMPLE_MODE_INT]; | 1208 | sh->irq = h->intr[PERF_MODE_INT]; |
1125 | sh->unique_id = sh->irq; | 1209 | sh->unique_id = sh->irq; |
1126 | error = scsi_add_host(sh, &h->pdev->dev); | 1210 | error = scsi_add_host(sh, &h->pdev->dev); |
1127 | if (error) | 1211 | if (error) |
@@ -1133,11 +1217,11 @@ static int hpsa_scsi_detect(struct ctlr_info *h) | |||
1133 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" | 1217 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" |
1134 | " failed for controller %d\n", h->ctlr); | 1218 | " failed for controller %d\n", h->ctlr); |
1135 | scsi_host_put(sh); | 1219 | scsi_host_put(sh); |
1136 | return -1; | 1220 | return error; |
1137 | fail: | 1221 | fail: |
1138 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" | 1222 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" |
1139 | " failed for controller %d\n", h->ctlr); | 1223 | " failed for controller %d\n", h->ctlr); |
1140 | return -1; | 1224 | return -ENOMEM; |
1141 | } | 1225 | } |
1142 | 1226 | ||
1143 | static void hpsa_pci_unmap(struct pci_dev *pdev, | 1227 | static void hpsa_pci_unmap(struct pci_dev *pdev, |
@@ -1160,7 +1244,7 @@ static void hpsa_map_one(struct pci_dev *pdev, | |||
1160 | size_t buflen, | 1244 | size_t buflen, |
1161 | int data_direction) | 1245 | int data_direction) |
1162 | { | 1246 | { |
1163 | __u64 addr64; | 1247 | u64 addr64; |
1164 | 1248 | ||
1165 | if (buflen == 0 || data_direction == PCI_DMA_NONE) { | 1249 | if (buflen == 0 || data_direction == PCI_DMA_NONE) { |
1166 | cp->Header.SGList = 0; | 1250 | cp->Header.SGList = 0; |
@@ -1168,14 +1252,14 @@ static void hpsa_map_one(struct pci_dev *pdev, | |||
1168 | return; | 1252 | return; |
1169 | } | 1253 | } |
1170 | 1254 | ||
1171 | addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction); | 1255 | addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); |
1172 | cp->SG[0].Addr.lower = | 1256 | cp->SG[0].Addr.lower = |
1173 | (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); | 1257 | (u32) (addr64 & (u64) 0x00000000FFFFFFFF); |
1174 | cp->SG[0].Addr.upper = | 1258 | cp->SG[0].Addr.upper = |
1175 | (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); | 1259 | (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); |
1176 | cp->SG[0].Len = buflen; | 1260 | cp->SG[0].Len = buflen; |
1177 | cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */ | 1261 | cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ |
1178 | cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */ | 1262 | cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ |
1179 | } | 1263 | } |
1180 | 1264 | ||
1181 | static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, | 1265 | static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, |
@@ -1274,7 +1358,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, | |||
1274 | 1358 | ||
1275 | if (c == NULL) { /* trouble... */ | 1359 | if (c == NULL) { /* trouble... */ |
1276 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 1360 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); |
1277 | return -1; | 1361 | return -ENOMEM; |
1278 | } | 1362 | } |
1279 | 1363 | ||
1280 | fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); | 1364 | fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); |
@@ -1366,9 +1450,8 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | |||
1366 | dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 1450 | dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); |
1367 | return -1; | 1451 | return -1; |
1368 | } | 1452 | } |
1369 | 1453 | /* address the controller */ | |
1370 | memset(&scsi3addr[0], 0, 8); /* address the controller */ | 1454 | memset(scsi3addr, 0, sizeof(scsi3addr)); |
1371 | |||
1372 | fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, | 1455 | fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, |
1373 | buf, bufsize, 0, scsi3addr, TYPE_CMD); | 1456 | buf, bufsize, 0, scsi3addr, TYPE_CMD); |
1374 | if (extended_response) | 1457 | if (extended_response) |
@@ -1409,13 +1492,12 @@ static int hpsa_update_device_info(struct ctlr_info *h, | |||
1409 | unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) | 1492 | unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) |
1410 | { | 1493 | { |
1411 | #define OBDR_TAPE_INQ_SIZE 49 | 1494 | #define OBDR_TAPE_INQ_SIZE 49 |
1412 | unsigned char *inq_buff = NULL; | 1495 | unsigned char *inq_buff; |
1413 | 1496 | ||
1414 | inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); | 1497 | inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); |
1415 | if (!inq_buff) | 1498 | if (!inq_buff) |
1416 | goto bail_out; | 1499 | goto bail_out; |
1417 | 1500 | ||
1418 | memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); | ||
1419 | /* Do an inquiry to the device to see what it is. */ | 1501 | /* Do an inquiry to the device to see what it is. */ |
1420 | if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, | 1502 | if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, |
1421 | (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { | 1503 | (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { |
@@ -1485,32 +1567,51 @@ static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) | |||
1485 | * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) | 1567 | * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) |
1486 | */ | 1568 | */ |
1487 | static void figure_bus_target_lun(struct ctlr_info *h, | 1569 | static void figure_bus_target_lun(struct ctlr_info *h, |
1488 | __u8 *lunaddrbytes, int *bus, int *target, int *lun, | 1570 | u8 *lunaddrbytes, int *bus, int *target, int *lun, |
1489 | struct hpsa_scsi_dev_t *device) | 1571 | struct hpsa_scsi_dev_t *device) |
1490 | { | 1572 | { |
1491 | 1573 | u32 lunid; | |
1492 | __u32 lunid; | ||
1493 | 1574 | ||
1494 | if (is_logical_dev_addr_mode(lunaddrbytes)) { | 1575 | if (is_logical_dev_addr_mode(lunaddrbytes)) { |
1495 | /* logical device */ | 1576 | /* logical device */ |
1496 | memcpy(&lunid, lunaddrbytes, sizeof(lunid)); | 1577 | if (unlikely(is_scsi_rev_5(h))) { |
1497 | lunid = le32_to_cpu(lunid); | 1578 | /* p1210m, logical drives lun assignments |
1498 | 1579 | * match SCSI REPORT LUNS data. | |
1499 | if (is_msa2xxx(h, device)) { | 1580 | */ |
1500 | *bus = 1; | 1581 | lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); |
1501 | *target = (lunid >> 16) & 0x3fff; | ||
1502 | *lun = lunid & 0x00ff; | ||
1503 | } else { | ||
1504 | *bus = 0; | 1582 | *bus = 0; |
1505 | *lun = 0; | 1583 | *target = 0; |
1506 | *target = lunid & 0x3fff; | 1584 | *lun = (lunid & 0x3fff) + 1; |
1585 | } else { | ||
1586 | /* not p1210m... */ | ||
1587 | lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); | ||
1588 | if (is_msa2xxx(h, device)) { | ||
1589 | /* msa2xxx way, put logicals on bus 1 | ||
1590 | * and match target/lun numbers box | ||
1591 | * reports. | ||
1592 | */ | ||
1593 | *bus = 1; | ||
1594 | *target = (lunid >> 16) & 0x3fff; | ||
1595 | *lun = lunid & 0x00ff; | ||
1596 | } else { | ||
1597 | /* Traditional smart array way. */ | ||
1598 | *bus = 0; | ||
1599 | *lun = 0; | ||
1600 | *target = lunid & 0x3fff; | ||
1601 | } | ||
1507 | } | 1602 | } |
1508 | } else { | 1603 | } else { |
1509 | /* physical device */ | 1604 | /* physical device */ |
1510 | if (is_hba_lunid(lunaddrbytes)) | 1605 | if (is_hba_lunid(lunaddrbytes)) |
1511 | *bus = 3; | 1606 | if (unlikely(is_scsi_rev_5(h))) { |
1607 | *bus = 0; /* put p1210m ctlr at 0,0,0 */ | ||
1608 | *target = 0; | ||
1609 | *lun = 0; | ||
1610 | return; | ||
1611 | } else | ||
1612 | *bus = 3; /* traditional smartarray */ | ||
1512 | else | 1613 | else |
1513 | *bus = 2; | 1614 | *bus = 2; /* physical disk */ |
1514 | *target = -1; | 1615 | *target = -1; |
1515 | *lun = -1; /* we will fill these in later. */ | 1616 | *lun = -1; /* we will fill these in later. */ |
1516 | } | 1617 | } |
@@ -1529,7 +1630,7 @@ static void figure_bus_target_lun(struct ctlr_info *h, | |||
1529 | */ | 1630 | */ |
1530 | static int add_msa2xxx_enclosure_device(struct ctlr_info *h, | 1631 | static int add_msa2xxx_enclosure_device(struct ctlr_info *h, |
1531 | struct hpsa_scsi_dev_t *tmpdevice, | 1632 | struct hpsa_scsi_dev_t *tmpdevice, |
1532 | struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes, | 1633 | struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, |
1533 | int bus, int target, int lun, unsigned long lunzerobits[], | 1634 | int bus, int target, int lun, unsigned long lunzerobits[], |
1534 | int *nmsa2xxx_enclosures) | 1635 | int *nmsa2xxx_enclosures) |
1535 | { | 1636 | { |
@@ -1550,6 +1651,9 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, | |||
1550 | if (is_hba_lunid(scsi3addr)) | 1651 | if (is_hba_lunid(scsi3addr)) |
1551 | return 0; /* Don't add the RAID controller here. */ | 1652 | return 0; /* Don't add the RAID controller here. */ |
1552 | 1653 | ||
1654 | if (is_scsi_rev_5(h)) | ||
1655 | return 0; /* p1210m doesn't need to do this. */ | ||
1656 | |||
1553 | #define MAX_MSA2XXX_ENCLOSURES 32 | 1657 | #define MAX_MSA2XXX_ENCLOSURES 32 |
1554 | if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { | 1658 | if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { |
1555 | dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " | 1659 | dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " |
@@ -1576,18 +1680,14 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, | |||
1576 | */ | 1680 | */ |
1577 | static int hpsa_gather_lun_info(struct ctlr_info *h, | 1681 | static int hpsa_gather_lun_info(struct ctlr_info *h, |
1578 | int reportlunsize, | 1682 | int reportlunsize, |
1579 | struct ReportLUNdata *physdev, __u32 *nphysicals, | 1683 | struct ReportLUNdata *physdev, u32 *nphysicals, |
1580 | struct ReportLUNdata *logdev, __u32 *nlogicals) | 1684 | struct ReportLUNdata *logdev, u32 *nlogicals) |
1581 | { | 1685 | { |
1582 | if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { | 1686 | if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { |
1583 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); | 1687 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); |
1584 | return -1; | 1688 | return -1; |
1585 | } | 1689 | } |
1586 | memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals)); | 1690 | *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; |
1587 | *nphysicals = be32_to_cpu(*nphysicals) / 8; | ||
1588 | #ifdef DEBUG | ||
1589 | dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals); | ||
1590 | #endif | ||
1591 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { | 1691 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { |
1592 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." | 1692 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." |
1593 | " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, | 1693 | " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, |
@@ -1598,11 +1698,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h, | |||
1598 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); | 1698 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); |
1599 | return -1; | 1699 | return -1; |
1600 | } | 1700 | } |
1601 | memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals)); | 1701 | *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; |
1602 | *nlogicals = be32_to_cpu(*nlogicals) / 8; | ||
1603 | #ifdef DEBUG | ||
1604 | dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals); | ||
1605 | #endif | ||
1606 | /* Reject Logicals in excess of our max capability. */ | 1702 | /* Reject Logicals in excess of our max capability. */ |
1607 | if (*nlogicals > HPSA_MAX_LUN) { | 1703 | if (*nlogicals > HPSA_MAX_LUN) { |
1608 | dev_warn(&h->pdev->dev, | 1704 | dev_warn(&h->pdev->dev, |
@@ -1621,6 +1717,31 @@ static int hpsa_gather_lun_info(struct ctlr_info *h, | |||
1621 | return 0; | 1717 | return 0; |
1622 | } | 1718 | } |
1623 | 1719 | ||
1720 | u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, | ||
1721 | int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, | ||
1722 | struct ReportLUNdata *logdev_list) | ||
1723 | { | ||
1724 | /* Helper function, figure out where the LUN ID info is coming from | ||
1725 | * given index i, lists of physical and logical devices, where in | ||
1726 | * the list the raid controller is supposed to appear (first or last) | ||
1727 | */ | ||
1728 | |||
1729 | int logicals_start = nphysicals + (raid_ctlr_position == 0); | ||
1730 | int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); | ||
1731 | |||
1732 | if (i == raid_ctlr_position) | ||
1733 | return RAID_CTLR_LUNID; | ||
1734 | |||
1735 | if (i < logicals_start) | ||
1736 | return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; | ||
1737 | |||
1738 | if (i < last_device) | ||
1739 | return &logdev_list->LUN[i - nphysicals - | ||
1740 | (raid_ctlr_position == 0)][0]; | ||
1741 | BUG(); | ||
1742 | return NULL; | ||
1743 | } | ||
1744 | |||
1624 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | 1745 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) |
1625 | { | 1746 | { |
1626 | /* the idea here is we could get notified | 1747 | /* the idea here is we could get notified |
@@ -1636,14 +1757,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1636 | struct ReportLUNdata *physdev_list = NULL; | 1757 | struct ReportLUNdata *physdev_list = NULL; |
1637 | struct ReportLUNdata *logdev_list = NULL; | 1758 | struct ReportLUNdata *logdev_list = NULL; |
1638 | unsigned char *inq_buff = NULL; | 1759 | unsigned char *inq_buff = NULL; |
1639 | __u32 nphysicals = 0; | 1760 | u32 nphysicals = 0; |
1640 | __u32 nlogicals = 0; | 1761 | u32 nlogicals = 0; |
1641 | __u32 ndev_allocated = 0; | 1762 | u32 ndev_allocated = 0; |
1642 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; | 1763 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; |
1643 | int ncurrent = 0; | 1764 | int ncurrent = 0; |
1644 | int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; | 1765 | int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; |
1645 | int i, nmsa2xxx_enclosures, ndevs_to_allocate; | 1766 | int i, nmsa2xxx_enclosures, ndevs_to_allocate; |
1646 | int bus, target, lun; | 1767 | int bus, target, lun; |
1768 | int raid_ctlr_position; | ||
1647 | DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); | 1769 | DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); |
1648 | 1770 | ||
1649 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, | 1771 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, |
@@ -1681,23 +1803,22 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1681 | ndev_allocated++; | 1803 | ndev_allocated++; |
1682 | } | 1804 | } |
1683 | 1805 | ||
1806 | if (unlikely(is_scsi_rev_5(h))) | ||
1807 | raid_ctlr_position = 0; | ||
1808 | else | ||
1809 | raid_ctlr_position = nphysicals + nlogicals; | ||
1810 | |||
1684 | /* adjust our table of devices */ | 1811 | /* adjust our table of devices */ |
1685 | nmsa2xxx_enclosures = 0; | 1812 | nmsa2xxx_enclosures = 0; |
1686 | for (i = 0; i < nphysicals + nlogicals + 1; i++) { | 1813 | for (i = 0; i < nphysicals + nlogicals + 1; i++) { |
1687 | __u8 *lunaddrbytes; | 1814 | u8 *lunaddrbytes; |
1688 | 1815 | ||
1689 | /* Figure out where the LUN ID info is coming from */ | 1816 | /* Figure out where the LUN ID info is coming from */ |
1690 | if (i < nphysicals) | 1817 | lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, |
1691 | lunaddrbytes = &physdev_list->LUN[i][0]; | 1818 | i, nphysicals, nlogicals, physdev_list, logdev_list); |
1692 | else | ||
1693 | if (i < nphysicals + nlogicals) | ||
1694 | lunaddrbytes = | ||
1695 | &logdev_list->LUN[i-nphysicals][0]; | ||
1696 | else /* jam in the RAID controller at the end */ | ||
1697 | lunaddrbytes = RAID_CTLR_LUNID; | ||
1698 | |||
1699 | /* skip masked physical devices. */ | 1819 | /* skip masked physical devices. */ |
1700 | if (lunaddrbytes[3] & 0xC0 && i < nphysicals) | 1820 | if (lunaddrbytes[3] & 0xC0 && |
1821 | i < nphysicals + (raid_ctlr_position == 0)) | ||
1701 | continue; | 1822 | continue; |
1702 | 1823 | ||
1703 | /* Get device type, vendor, model, device id */ | 1824 | /* Get device type, vendor, model, device id */ |
@@ -1777,7 +1898,6 @@ out: | |||
1777 | kfree(inq_buff); | 1898 | kfree(inq_buff); |
1778 | kfree(physdev_list); | 1899 | kfree(physdev_list); |
1779 | kfree(logdev_list); | 1900 | kfree(logdev_list); |
1780 | return; | ||
1781 | } | 1901 | } |
1782 | 1902 | ||
1783 | /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci | 1903 | /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci |
@@ -1790,7 +1910,7 @@ static int hpsa_scatter_gather(struct pci_dev *pdev, | |||
1790 | { | 1910 | { |
1791 | unsigned int len; | 1911 | unsigned int len; |
1792 | struct scatterlist *sg; | 1912 | struct scatterlist *sg; |
1793 | __u64 addr64; | 1913 | u64 addr64; |
1794 | int use_sg, i; | 1914 | int use_sg, i; |
1795 | 1915 | ||
1796 | BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); | 1916 | BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); |
@@ -1803,20 +1923,20 @@ static int hpsa_scatter_gather(struct pci_dev *pdev, | |||
1803 | goto sglist_finished; | 1923 | goto sglist_finished; |
1804 | 1924 | ||
1805 | scsi_for_each_sg(cmd, sg, use_sg, i) { | 1925 | scsi_for_each_sg(cmd, sg, use_sg, i) { |
1806 | addr64 = (__u64) sg_dma_address(sg); | 1926 | addr64 = (u64) sg_dma_address(sg); |
1807 | len = sg_dma_len(sg); | 1927 | len = sg_dma_len(sg); |
1808 | cp->SG[i].Addr.lower = | 1928 | cp->SG[i].Addr.lower = |
1809 | (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); | 1929 | (u32) (addr64 & (u64) 0x00000000FFFFFFFF); |
1810 | cp->SG[i].Addr.upper = | 1930 | cp->SG[i].Addr.upper = |
1811 | (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); | 1931 | (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); |
1812 | cp->SG[i].Len = len; | 1932 | cp->SG[i].Len = len; |
1813 | cp->SG[i].Ext = 0; /* we are not chaining */ | 1933 | cp->SG[i].Ext = 0; /* we are not chaining */ |
1814 | } | 1934 | } |
1815 | 1935 | ||
1816 | sglist_finished: | 1936 | sglist_finished: |
1817 | 1937 | ||
1818 | cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */ | 1938 | cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ |
1819 | cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */ | 1939 | cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ |
1820 | return 0; | 1940 | return 0; |
1821 | } | 1941 | } |
1822 | 1942 | ||
@@ -1860,7 +1980,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, | |||
1860 | c->scsi_cmd = cmd; | 1980 | c->scsi_cmd = cmd; |
1861 | c->Header.ReplyQueue = 0; /* unused in simple mode */ | 1981 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
1862 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); | 1982 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); |
1863 | c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */ | 1983 | c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); |
1984 | c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; | ||
1864 | 1985 | ||
1865 | /* Fill in the request block... */ | 1986 | /* Fill in the request block... */ |
1866 | 1987 | ||
@@ -1914,6 +2035,48 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, | |||
1914 | return 0; | 2035 | return 0; |
1915 | } | 2036 | } |
1916 | 2037 | ||
2038 | static void hpsa_scan_start(struct Scsi_Host *sh) | ||
2039 | { | ||
2040 | struct ctlr_info *h = shost_to_hba(sh); | ||
2041 | unsigned long flags; | ||
2042 | |||
2043 | /* wait until any scan already in progress is finished. */ | ||
2044 | while (1) { | ||
2045 | spin_lock_irqsave(&h->scan_lock, flags); | ||
2046 | if (h->scan_finished) | ||
2047 | break; | ||
2048 | spin_unlock_irqrestore(&h->scan_lock, flags); | ||
2049 | wait_event(h->scan_wait_queue, h->scan_finished); | ||
2050 | /* Note: We don't need to worry about a race between this | ||
2051 | * thread and driver unload because the midlayer will | ||
2052 | * have incremented the reference count, so unload won't | ||
2053 | * happen if we're in here. | ||
2054 | */ | ||
2055 | } | ||
2056 | h->scan_finished = 0; /* mark scan as in progress */ | ||
2057 | spin_unlock_irqrestore(&h->scan_lock, flags); | ||
2058 | |||
2059 | hpsa_update_scsi_devices(h, h->scsi_host->host_no); | ||
2060 | |||
2061 | spin_lock_irqsave(&h->scan_lock, flags); | ||
2062 | h->scan_finished = 1; /* mark scan as finished. */ | ||
2063 | wake_up_all(&h->scan_wait_queue); | ||
2064 | spin_unlock_irqrestore(&h->scan_lock, flags); | ||
2065 | } | ||
2066 | |||
2067 | static int hpsa_scan_finished(struct Scsi_Host *sh, | ||
2068 | unsigned long elapsed_time) | ||
2069 | { | ||
2070 | struct ctlr_info *h = shost_to_hba(sh); | ||
2071 | unsigned long flags; | ||
2072 | int finished; | ||
2073 | |||
2074 | spin_lock_irqsave(&h->scan_lock, flags); | ||
2075 | finished = h->scan_finished; | ||
2076 | spin_unlock_irqrestore(&h->scan_lock, flags); | ||
2077 | return finished; | ||
2078 | } | ||
2079 | |||
1917 | static void hpsa_unregister_scsi(struct ctlr_info *h) | 2080 | static void hpsa_unregister_scsi(struct ctlr_info *h) |
1918 | { | 2081 | { |
1919 | /* we are being forcibly unloaded, and may not refuse. */ | 2082 | /* we are being forcibly unloaded, and may not refuse. */ |
@@ -1926,7 +2089,6 @@ static int hpsa_register_scsi(struct ctlr_info *h) | |||
1926 | { | 2089 | { |
1927 | int rc; | 2090 | int rc; |
1928 | 2091 | ||
1929 | hpsa_update_scsi_devices(h, -1); | ||
1930 | rc = hpsa_scsi_detect(h); | 2092 | rc = hpsa_scsi_detect(h); |
1931 | if (rc != 0) | 2093 | if (rc != 0) |
1932 | dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" | 2094 | dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" |
@@ -2003,14 +2165,14 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |||
2003 | h = sdev_to_hba(scsicmd->device); | 2165 | h = sdev_to_hba(scsicmd->device); |
2004 | if (h == NULL) /* paranoia */ | 2166 | if (h == NULL) /* paranoia */ |
2005 | return FAILED; | 2167 | return FAILED; |
2006 | dev_warn(&h->pdev->dev, "resetting drive\n"); | ||
2007 | |||
2008 | dev = scsicmd->device->hostdata; | 2168 | dev = scsicmd->device->hostdata; |
2009 | if (!dev) { | 2169 | if (!dev) { |
2010 | dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " | 2170 | dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " |
2011 | "device lookup failed.\n"); | 2171 | "device lookup failed.\n"); |
2012 | return FAILED; | 2172 | return FAILED; |
2013 | } | 2173 | } |
2174 | dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", | ||
2175 | h->scsi_host->host_no, dev->bus, dev->target, dev->lun); | ||
2014 | /* send a reset to the SCSI LUN which the command was sent to */ | 2176 | /* send a reset to the SCSI LUN which the command was sent to */ |
2015 | rc = hpsa_send_reset(h, dev->scsi3addr); | 2177 | rc = hpsa_send_reset(h, dev->scsi3addr); |
2016 | if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) | 2178 | if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) |
@@ -2053,8 +2215,8 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) | |||
2053 | c->cmdindex = i; | 2215 | c->cmdindex = i; |
2054 | 2216 | ||
2055 | INIT_HLIST_NODE(&c->list); | 2217 | INIT_HLIST_NODE(&c->list); |
2056 | c->busaddr = (__u32) cmd_dma_handle; | 2218 | c->busaddr = (u32) cmd_dma_handle; |
2057 | temp64.val = (__u64) err_dma_handle; | 2219 | temp64.val = (u64) err_dma_handle; |
2058 | c->ErrDesc.Addr.lower = temp64.val32.lower; | 2220 | c->ErrDesc.Addr.lower = temp64.val32.lower; |
2059 | c->ErrDesc.Addr.upper = temp64.val32.upper; | 2221 | c->ErrDesc.Addr.upper = temp64.val32.upper; |
2060 | c->ErrDesc.Len = sizeof(*c->err_info); | 2222 | c->ErrDesc.Len = sizeof(*c->err_info); |
@@ -2091,8 +2253,8 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h) | |||
2091 | memset(c->err_info, 0, sizeof(*c->err_info)); | 2253 | memset(c->err_info, 0, sizeof(*c->err_info)); |
2092 | 2254 | ||
2093 | INIT_HLIST_NODE(&c->list); | 2255 | INIT_HLIST_NODE(&c->list); |
2094 | c->busaddr = (__u32) cmd_dma_handle; | 2256 | c->busaddr = (u32) cmd_dma_handle; |
2095 | temp64.val = (__u64) err_dma_handle; | 2257 | temp64.val = (u64) err_dma_handle; |
2096 | c->ErrDesc.Addr.lower = temp64.val32.lower; | 2258 | c->ErrDesc.Addr.lower = temp64.val32.lower; |
2097 | c->ErrDesc.Addr.upper = temp64.val32.upper; | 2259 | c->ErrDesc.Addr.upper = temp64.val32.upper; |
2098 | c->ErrDesc.Len = sizeof(*c->err_info); | 2260 | c->ErrDesc.Len = sizeof(*c->err_info); |
@@ -2125,50 +2287,6 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) | |||
2125 | 2287 | ||
2126 | #ifdef CONFIG_COMPAT | 2288 | #ifdef CONFIG_COMPAT |
2127 | 2289 | ||
2128 | static int do_ioctl(struct scsi_device *dev, int cmd, void *arg) | ||
2129 | { | ||
2130 | int ret; | ||
2131 | |||
2132 | lock_kernel(); | ||
2133 | ret = hpsa_ioctl(dev, cmd, arg); | ||
2134 | unlock_kernel(); | ||
2135 | return ret; | ||
2136 | } | ||
2137 | |||
2138 | static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg); | ||
2139 | static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, | ||
2140 | int cmd, void *arg); | ||
2141 | |||
2142 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) | ||
2143 | { | ||
2144 | switch (cmd) { | ||
2145 | case CCISS_GETPCIINFO: | ||
2146 | case CCISS_GETINTINFO: | ||
2147 | case CCISS_SETINTINFO: | ||
2148 | case CCISS_GETNODENAME: | ||
2149 | case CCISS_SETNODENAME: | ||
2150 | case CCISS_GETHEARTBEAT: | ||
2151 | case CCISS_GETBUSTYPES: | ||
2152 | case CCISS_GETFIRMVER: | ||
2153 | case CCISS_GETDRIVVER: | ||
2154 | case CCISS_REVALIDVOLS: | ||
2155 | case CCISS_DEREGDISK: | ||
2156 | case CCISS_REGNEWDISK: | ||
2157 | case CCISS_REGNEWD: | ||
2158 | case CCISS_RESCANDISK: | ||
2159 | case CCISS_GETLUNINFO: | ||
2160 | return do_ioctl(dev, cmd, arg); | ||
2161 | |||
2162 | case CCISS_PASSTHRU32: | ||
2163 | return hpsa_ioctl32_passthru(dev, cmd, arg); | ||
2164 | case CCISS_BIG_PASSTHRU32: | ||
2165 | return hpsa_ioctl32_big_passthru(dev, cmd, arg); | ||
2166 | |||
2167 | default: | ||
2168 | return -ENOIOCTLCMD; | ||
2169 | } | ||
2170 | } | ||
2171 | |||
2172 | static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) | 2290 | static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) |
2173 | { | 2291 | { |
2174 | IOCTL32_Command_struct __user *arg32 = | 2292 | IOCTL32_Command_struct __user *arg32 = |
@@ -2193,7 +2311,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) | |||
2193 | if (err) | 2311 | if (err) |
2194 | return -EFAULT; | 2312 | return -EFAULT; |
2195 | 2313 | ||
2196 | err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p); | 2314 | err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); |
2197 | if (err) | 2315 | if (err) |
2198 | return err; | 2316 | return err; |
2199 | err |= copy_in_user(&arg32->error_info, &p->error_info, | 2317 | err |= copy_in_user(&arg32->error_info, &p->error_info, |
@@ -2230,7 +2348,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, | |||
2230 | if (err) | 2348 | if (err) |
2231 | return -EFAULT; | 2349 | return -EFAULT; |
2232 | 2350 | ||
2233 | err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); | 2351 | err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); |
2234 | if (err) | 2352 | if (err) |
2235 | return err; | 2353 | return err; |
2236 | err |= copy_in_user(&arg32->error_info, &p->error_info, | 2354 | err |= copy_in_user(&arg32->error_info, &p->error_info, |
@@ -2239,6 +2357,36 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, | |||
2239 | return -EFAULT; | 2357 | return -EFAULT; |
2240 | return err; | 2358 | return err; |
2241 | } | 2359 | } |
2360 | |||
2361 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) | ||
2362 | { | ||
2363 | switch (cmd) { | ||
2364 | case CCISS_GETPCIINFO: | ||
2365 | case CCISS_GETINTINFO: | ||
2366 | case CCISS_SETINTINFO: | ||
2367 | case CCISS_GETNODENAME: | ||
2368 | case CCISS_SETNODENAME: | ||
2369 | case CCISS_GETHEARTBEAT: | ||
2370 | case CCISS_GETBUSTYPES: | ||
2371 | case CCISS_GETFIRMVER: | ||
2372 | case CCISS_GETDRIVVER: | ||
2373 | case CCISS_REVALIDVOLS: | ||
2374 | case CCISS_DEREGDISK: | ||
2375 | case CCISS_REGNEWDISK: | ||
2376 | case CCISS_REGNEWD: | ||
2377 | case CCISS_RESCANDISK: | ||
2378 | case CCISS_GETLUNINFO: | ||
2379 | return hpsa_ioctl(dev, cmd, arg); | ||
2380 | |||
2381 | case CCISS_PASSTHRU32: | ||
2382 | return hpsa_ioctl32_passthru(dev, cmd, arg); | ||
2383 | case CCISS_BIG_PASSTHRU32: | ||
2384 | return hpsa_ioctl32_big_passthru(dev, cmd, arg); | ||
2385 | |||
2386 | default: | ||
2387 | return -ENOIOCTLCMD; | ||
2388 | } | ||
2389 | } | ||
2242 | #endif | 2390 | #endif |
2243 | 2391 | ||
2244 | static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) | 2392 | static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) |
@@ -2378,8 +2526,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
2378 | BYTE sg_used = 0; | 2526 | BYTE sg_used = 0; |
2379 | int status = 0; | 2527 | int status = 0; |
2380 | int i; | 2528 | int i; |
2381 | __u32 left; | 2529 | u32 left; |
2382 | __u32 sz; | 2530 | u32 sz; |
2383 | BYTE __user *data_ptr; | 2531 | BYTE __user *data_ptr; |
2384 | 2532 | ||
2385 | if (!argp) | 2533 | if (!argp) |
@@ -2527,7 +2675,7 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) | |||
2527 | case CCISS_DEREGDISK: | 2675 | case CCISS_DEREGDISK: |
2528 | case CCISS_REGNEWDISK: | 2676 | case CCISS_REGNEWDISK: |
2529 | case CCISS_REGNEWD: | 2677 | case CCISS_REGNEWD: |
2530 | hpsa_update_scsi_devices(h, dev->host->host_no); | 2678 | hpsa_scan_start(h->scsi_host); |
2531 | return 0; | 2679 | return 0; |
2532 | case CCISS_GETPCIINFO: | 2680 | case CCISS_GETPCIINFO: |
2533 | return hpsa_getpciinfo_ioctl(h, argp); | 2681 | return hpsa_getpciinfo_ioctl(h, argp); |
@@ -2542,8 +2690,8 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) | |||
2542 | } | 2690 | } |
2543 | } | 2691 | } |
2544 | 2692 | ||
2545 | static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h, | 2693 | static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
2546 | void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, | 2694 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, |
2547 | int cmd_type) | 2695 | int cmd_type) |
2548 | { | 2696 | { |
2549 | int pci_dir = XFER_NONE; | 2697 | int pci_dir = XFER_NONE; |
@@ -2710,19 +2858,20 @@ static inline unsigned long get_next_completion(struct ctlr_info *h) | |||
2710 | return h->access.command_completed(h); | 2858 | return h->access.command_completed(h); |
2711 | } | 2859 | } |
2712 | 2860 | ||
2713 | static inline int interrupt_pending(struct ctlr_info *h) | 2861 | static inline bool interrupt_pending(struct ctlr_info *h) |
2714 | { | 2862 | { |
2715 | return h->access.intr_pending(h); | 2863 | return h->access.intr_pending(h); |
2716 | } | 2864 | } |
2717 | 2865 | ||
2718 | static inline long interrupt_not_for_us(struct ctlr_info *h) | 2866 | static inline long interrupt_not_for_us(struct ctlr_info *h) |
2719 | { | 2867 | { |
2720 | return ((h->access.intr_pending(h) == 0) || | 2868 | return !(h->msi_vector || h->msix_vector) && |
2721 | (h->interrupts_enabled == 0)); | 2869 | ((h->access.intr_pending(h) == 0) || |
2870 | (h->interrupts_enabled == 0)); | ||
2722 | } | 2871 | } |
2723 | 2872 | ||
2724 | static inline int bad_tag(struct ctlr_info *h, __u32 tag_index, | 2873 | static inline int bad_tag(struct ctlr_info *h, u32 tag_index, |
2725 | __u32 raw_tag) | 2874 | u32 raw_tag) |
2726 | { | 2875 | { |
2727 | if (unlikely(tag_index >= h->nr_cmds)) { | 2876 | if (unlikely(tag_index >= h->nr_cmds)) { |
2728 | dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); | 2877 | dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); |
@@ -2731,7 +2880,7 @@ static inline int bad_tag(struct ctlr_info *h, __u32 tag_index, | |||
2731 | return 0; | 2880 | return 0; |
2732 | } | 2881 | } |
2733 | 2882 | ||
2734 | static inline void finish_cmd(struct CommandList *c, __u32 raw_tag) | 2883 | static inline void finish_cmd(struct CommandList *c, u32 raw_tag) |
2735 | { | 2884 | { |
2736 | removeQ(c); | 2885 | removeQ(c); |
2737 | if (likely(c->cmd_type == CMD_SCSI)) | 2886 | if (likely(c->cmd_type == CMD_SCSI)) |
@@ -2740,42 +2889,79 @@ static inline void finish_cmd(struct CommandList *c, __u32 raw_tag) | |||
2740 | complete(c->waiting); | 2889 | complete(c->waiting); |
2741 | } | 2890 | } |
2742 | 2891 | ||
2892 | static inline u32 hpsa_tag_contains_index(u32 tag) | ||
2893 | { | ||
2894 | #define DIRECT_LOOKUP_BIT 0x10 | ||
2895 | return tag & DIRECT_LOOKUP_BIT; | ||
2896 | } | ||
2897 | |||
2898 | static inline u32 hpsa_tag_to_index(u32 tag) | ||
2899 | { | ||
2900 | #define DIRECT_LOOKUP_SHIFT 5 | ||
2901 | return tag >> DIRECT_LOOKUP_SHIFT; | ||
2902 | } | ||
2903 | |||
2904 | static inline u32 hpsa_tag_discard_error_bits(u32 tag) | ||
2905 | { | ||
2906 | #define HPSA_ERROR_BITS 0x03 | ||
2907 | return tag & ~HPSA_ERROR_BITS; | ||
2908 | } | ||
2909 | |||
2910 | /* process completion of an indexed ("direct lookup") command */ | ||
2911 | static inline u32 process_indexed_cmd(struct ctlr_info *h, | ||
2912 | u32 raw_tag) | ||
2913 | { | ||
2914 | u32 tag_index; | ||
2915 | struct CommandList *c; | ||
2916 | |||
2917 | tag_index = hpsa_tag_to_index(raw_tag); | ||
2918 | if (bad_tag(h, tag_index, raw_tag)) | ||
2919 | return next_command(h); | ||
2920 | c = h->cmd_pool + tag_index; | ||
2921 | finish_cmd(c, raw_tag); | ||
2922 | return next_command(h); | ||
2923 | } | ||
2924 | |||
2925 | /* process completion of a non-indexed command */ | ||
2926 | static inline u32 process_nonindexed_cmd(struct ctlr_info *h, | ||
2927 | u32 raw_tag) | ||
2928 | { | ||
2929 | u32 tag; | ||
2930 | struct CommandList *c = NULL; | ||
2931 | struct hlist_node *tmp; | ||
2932 | |||
2933 | tag = hpsa_tag_discard_error_bits(raw_tag); | ||
2934 | hlist_for_each_entry(c, tmp, &h->cmpQ, list) { | ||
2935 | if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { | ||
2936 | finish_cmd(c, raw_tag); | ||
2937 | return next_command(h); | ||
2938 | } | ||
2939 | } | ||
2940 | bad_tag(h, h->nr_cmds + 1, raw_tag); | ||
2941 | return next_command(h); | ||
2942 | } | ||
2943 | |||
2743 | static irqreturn_t do_hpsa_intr(int irq, void *dev_id) | 2944 | static irqreturn_t do_hpsa_intr(int irq, void *dev_id) |
2744 | { | 2945 | { |
2745 | struct ctlr_info *h = dev_id; | 2946 | struct ctlr_info *h = dev_id; |
2746 | struct CommandList *c; | ||
2747 | unsigned long flags; | 2947 | unsigned long flags; |
2748 | __u32 raw_tag, tag, tag_index; | 2948 | u32 raw_tag; |
2749 | struct hlist_node *tmp; | ||
2750 | 2949 | ||
2751 | if (interrupt_not_for_us(h)) | 2950 | if (interrupt_not_for_us(h)) |
2752 | return IRQ_NONE; | 2951 | return IRQ_NONE; |
2753 | spin_lock_irqsave(&h->lock, flags); | 2952 | spin_lock_irqsave(&h->lock, flags); |
2754 | while (interrupt_pending(h)) { | 2953 | raw_tag = get_next_completion(h); |
2755 | while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) { | 2954 | while (raw_tag != FIFO_EMPTY) { |
2756 | if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) { | 2955 | if (hpsa_tag_contains_index(raw_tag)) |
2757 | tag_index = HPSA_TAG_TO_INDEX(raw_tag); | 2956 | raw_tag = process_indexed_cmd(h, raw_tag); |
2758 | if (bad_tag(h, tag_index, raw_tag)) | 2957 | else |
2759 | return IRQ_HANDLED; | 2958 | raw_tag = process_nonindexed_cmd(h, raw_tag); |
2760 | c = h->cmd_pool + tag_index; | ||
2761 | finish_cmd(c, raw_tag); | ||
2762 | continue; | ||
2763 | } | ||
2764 | tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag); | ||
2765 | c = NULL; | ||
2766 | hlist_for_each_entry(c, tmp, &h->cmpQ, list) { | ||
2767 | if (c->busaddr == tag) { | ||
2768 | finish_cmd(c, raw_tag); | ||
2769 | break; | ||
2770 | } | ||
2771 | } | ||
2772 | } | ||
2773 | } | 2959 | } |
2774 | spin_unlock_irqrestore(&h->lock, flags); | 2960 | spin_unlock_irqrestore(&h->lock, flags); |
2775 | return IRQ_HANDLED; | 2961 | return IRQ_HANDLED; |
2776 | } | 2962 | } |
2777 | 2963 | ||
2778 | /* Send a message CDB to the firmware. */ | 2964 | /* Send a message CDB to the firmwart. */ |
2779 | static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | 2965 | static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, |
2780 | unsigned char type) | 2966 | unsigned char type) |
2781 | { | 2967 | { |
@@ -2841,7 +3027,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | |||
2841 | 3027 | ||
2842 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { | 3028 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { |
2843 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); | 3029 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); |
2844 | if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32) | 3030 | if (hpsa_tag_discard_error_bits(tag) == paddr32) |
2845 | break; | 3031 | break; |
2846 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); | 3032 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); |
2847 | } | 3033 | } |
@@ -3063,7 +3249,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) | |||
3063 | */ | 3249 | */ |
3064 | 3250 | ||
3065 | static void __devinit hpsa_interrupt_mode(struct ctlr_info *h, | 3251 | static void __devinit hpsa_interrupt_mode(struct ctlr_info *h, |
3066 | struct pci_dev *pdev, __u32 board_id) | 3252 | struct pci_dev *pdev, u32 board_id) |
3067 | { | 3253 | { |
3068 | #ifdef CONFIG_PCI_MSI | 3254 | #ifdef CONFIG_PCI_MSI |
3069 | int err; | 3255 | int err; |
@@ -3107,22 +3293,22 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h, | |||
3107 | default_int_mode: | 3293 | default_int_mode: |
3108 | #endif /* CONFIG_PCI_MSI */ | 3294 | #endif /* CONFIG_PCI_MSI */ |
3109 | /* if we get here we're going to use the default interrupt mode */ | 3295 | /* if we get here we're going to use the default interrupt mode */ |
3110 | h->intr[SIMPLE_MODE_INT] = pdev->irq; | 3296 | h->intr[PERF_MODE_INT] = pdev->irq; |
3111 | return; | ||
3112 | } | 3297 | } |
3113 | 3298 | ||
3114 | static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) | 3299 | static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) |
3115 | { | 3300 | { |
3116 | ushort subsystem_vendor_id, subsystem_device_id, command; | 3301 | ushort subsystem_vendor_id, subsystem_device_id, command; |
3117 | __u32 board_id, scratchpad = 0; | 3302 | u32 board_id, scratchpad = 0; |
3118 | __u64 cfg_offset; | 3303 | u64 cfg_offset; |
3119 | __u32 cfg_base_addr; | 3304 | u32 cfg_base_addr; |
3120 | __u64 cfg_base_addr_index; | 3305 | u64 cfg_base_addr_index; |
3306 | u32 trans_offset; | ||
3121 | int i, prod_index, err; | 3307 | int i, prod_index, err; |
3122 | 3308 | ||
3123 | subsystem_vendor_id = pdev->subsystem_vendor; | 3309 | subsystem_vendor_id = pdev->subsystem_vendor; |
3124 | subsystem_device_id = pdev->subsystem_device; | 3310 | subsystem_device_id = pdev->subsystem_device; |
3125 | board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | | 3311 | board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) | |
3126 | subsystem_vendor_id); | 3312 | subsystem_vendor_id); |
3127 | 3313 | ||
3128 | for (i = 0; i < ARRAY_SIZE(products); i++) | 3314 | for (i = 0; i < ARRAY_SIZE(products); i++) |
@@ -3199,7 +3385,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) | |||
3199 | 3385 | ||
3200 | /* get the address index number */ | 3386 | /* get the address index number */ |
3201 | cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET); | 3387 | cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET); |
3202 | cfg_base_addr &= (__u32) 0x0000ffff; | 3388 | cfg_base_addr &= (u32) 0x0000ffff; |
3203 | cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); | 3389 | cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); |
3204 | if (cfg_base_addr_index == -1) { | 3390 | if (cfg_base_addr_index == -1) { |
3205 | dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); | 3391 | dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); |
@@ -3211,11 +3397,14 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) | |||
3211 | h->cfgtable = remap_pci_mem(pci_resource_start(pdev, | 3397 | h->cfgtable = remap_pci_mem(pci_resource_start(pdev, |
3212 | cfg_base_addr_index) + cfg_offset, | 3398 | cfg_base_addr_index) + cfg_offset, |
3213 | sizeof(h->cfgtable)); | 3399 | sizeof(h->cfgtable)); |
3214 | h->board_id = board_id; | 3400 | /* Find performant mode table. */ |
3215 | 3401 | trans_offset = readl(&(h->cfgtable->TransMethodOffset)); | |
3216 | /* Query controller for max supported commands: */ | 3402 | h->transtable = remap_pci_mem(pci_resource_start(pdev, |
3217 | h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); | 3403 | cfg_base_addr_index)+cfg_offset+trans_offset, |
3404 | sizeof(*h->transtable)); | ||
3218 | 3405 | ||
3406 | h->board_id = board_id; | ||
3407 | h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); | ||
3219 | h->product_name = products[prod_index].product_name; | 3408 | h->product_name = products[prod_index].product_name; |
3220 | h->access = *(products[prod_index].access); | 3409 | h->access = *(products[prod_index].access); |
3221 | /* Allow room for some ioctls */ | 3410 | /* Allow room for some ioctls */ |
@@ -3232,7 +3421,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) | |||
3232 | #ifdef CONFIG_X86 | 3421 | #ifdef CONFIG_X86 |
3233 | { | 3422 | { |
3234 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ | 3423 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ |
3235 | __u32 prefetch; | 3424 | u32 prefetch; |
3236 | prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); | 3425 | prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); |
3237 | prefetch |= 0x100; | 3426 | prefetch |= 0x100; |
3238 | writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); | 3427 | writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); |
@@ -3244,7 +3433,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) | |||
3244 | * physical memory. | 3433 | * physical memory. |
3245 | */ | 3434 | */ |
3246 | if (board_id == 0x3225103C) { | 3435 | if (board_id == 0x3225103C) { |
3247 | __u32 dma_prefetch; | 3436 | u32 dma_prefetch; |
3248 | dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); | 3437 | dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); |
3249 | dma_prefetch |= 0x8000; | 3438 | dma_prefetch |= 0x8000; |
3250 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); | 3439 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); |
@@ -3286,10 +3475,26 @@ err_out_free_res: | |||
3286 | return err; | 3475 | return err; |
3287 | } | 3476 | } |
3288 | 3477 | ||
3478 | static void __devinit hpsa_hba_inquiry(struct ctlr_info *h) | ||
3479 | { | ||
3480 | int rc; | ||
3481 | |||
3482 | #define HBA_INQUIRY_BYTE_COUNT 64 | ||
3483 | h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); | ||
3484 | if (!h->hba_inquiry_data) | ||
3485 | return; | ||
3486 | rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, | ||
3487 | h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); | ||
3488 | if (rc != 0) { | ||
3489 | kfree(h->hba_inquiry_data); | ||
3490 | h->hba_inquiry_data = NULL; | ||
3491 | } | ||
3492 | } | ||
3493 | |||
3289 | static int __devinit hpsa_init_one(struct pci_dev *pdev, | 3494 | static int __devinit hpsa_init_one(struct pci_dev *pdev, |
3290 | const struct pci_device_id *ent) | 3495 | const struct pci_device_id *ent) |
3291 | { | 3496 | { |
3292 | int i; | 3497 | int i, rc; |
3293 | int dac; | 3498 | int dac; |
3294 | struct ctlr_info *h; | 3499 | struct ctlr_info *h; |
3295 | 3500 | ||
@@ -3314,17 +3519,23 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, | |||
3314 | } | 3519 | } |
3315 | } | 3520 | } |
3316 | 3521 | ||
3317 | BUILD_BUG_ON(sizeof(struct CommandList) % 8); | 3522 | /* Command structures must be aligned on a 32-byte boundary because |
3523 | * the 5 lower bits of the address are used by the hardware. and by | ||
3524 | * the driver. See comments in hpsa.h for more info. | ||
3525 | */ | ||
3526 | #define COMMANDLIST_ALIGNMENT 32 | ||
3527 | BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); | ||
3318 | h = kzalloc(sizeof(*h), GFP_KERNEL); | 3528 | h = kzalloc(sizeof(*h), GFP_KERNEL); |
3319 | if (!h) | 3529 | if (!h) |
3320 | return -1; | 3530 | return -ENOMEM; |
3321 | 3531 | ||
3322 | h->busy_initializing = 1; | 3532 | h->busy_initializing = 1; |
3323 | INIT_HLIST_HEAD(&h->cmpQ); | 3533 | INIT_HLIST_HEAD(&h->cmpQ); |
3324 | INIT_HLIST_HEAD(&h->reqQ); | 3534 | INIT_HLIST_HEAD(&h->reqQ); |
3325 | mutex_init(&h->busy_shutting_down); | 3535 | mutex_init(&h->busy_shutting_down); |
3326 | init_completion(&h->scan_wait); | 3536 | init_completion(&h->scan_wait); |
3327 | if (hpsa_pci_init(h, pdev) != 0) | 3537 | rc = hpsa_pci_init(h, pdev); |
3538 | if (rc != 0) | ||
3328 | goto clean1; | 3539 | goto clean1; |
3329 | 3540 | ||
3330 | sprintf(h->devname, "hpsa%d", number_of_controllers); | 3541 | sprintf(h->devname, "hpsa%d", number_of_controllers); |
@@ -3333,27 +3544,32 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, | |||
3333 | h->pdev = pdev; | 3544 | h->pdev = pdev; |
3334 | 3545 | ||
3335 | /* configure PCI DMA stuff */ | 3546 | /* configure PCI DMA stuff */ |
3336 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) | 3547 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
3548 | if (rc == 0) { | ||
3337 | dac = 1; | 3549 | dac = 1; |
3338 | else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) | 3550 | } else { |
3339 | dac = 0; | 3551 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
3340 | else { | 3552 | if (rc == 0) { |
3341 | dev_err(&pdev->dev, "no suitable DMA available\n"); | 3553 | dac = 0; |
3342 | goto clean1; | 3554 | } else { |
3555 | dev_err(&pdev->dev, "no suitable DMA available\n"); | ||
3556 | goto clean1; | ||
3557 | } | ||
3343 | } | 3558 | } |
3344 | 3559 | ||
3345 | /* make sure the board interrupts are off */ | 3560 | /* make sure the board interrupts are off */ |
3346 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | 3561 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
3347 | if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr, | 3562 | rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr, |
3348 | IRQF_DISABLED | IRQF_SHARED, h->devname, h)) { | 3563 | IRQF_DISABLED, h->devname, h); |
3564 | if (rc) { | ||
3349 | dev_err(&pdev->dev, "unable to get irq %d for %s\n", | 3565 | dev_err(&pdev->dev, "unable to get irq %d for %s\n", |
3350 | h->intr[SIMPLE_MODE_INT], h->devname); | 3566 | h->intr[PERF_MODE_INT], h->devname); |
3351 | goto clean2; | 3567 | goto clean2; |
3352 | } | 3568 | } |
3353 | 3569 | ||
3354 | dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", | 3570 | dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", |
3355 | h->devname, pdev->device, pci_name(pdev), | 3571 | h->devname, pdev->device, |
3356 | h->intr[SIMPLE_MODE_INT], dac ? "" : " not"); | 3572 | h->intr[PERF_MODE_INT], dac ? "" : " not"); |
3357 | 3573 | ||
3358 | h->cmd_pool_bits = | 3574 | h->cmd_pool_bits = |
3359 | kmalloc(((h->nr_cmds + BITS_PER_LONG - | 3575 | kmalloc(((h->nr_cmds + BITS_PER_LONG - |
@@ -3368,9 +3584,13 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, | |||
3368 | || (h->cmd_pool == NULL) | 3584 | || (h->cmd_pool == NULL) |
3369 | || (h->errinfo_pool == NULL)) { | 3585 | || (h->errinfo_pool == NULL)) { |
3370 | dev_err(&pdev->dev, "out of memory"); | 3586 | dev_err(&pdev->dev, "out of memory"); |
3587 | rc = -ENOMEM; | ||
3371 | goto clean4; | 3588 | goto clean4; |
3372 | } | 3589 | } |
3373 | spin_lock_init(&h->lock); | 3590 | spin_lock_init(&h->lock); |
3591 | spin_lock_init(&h->scan_lock); | ||
3592 | init_waitqueue_head(&h->scan_wait_queue); | ||
3593 | h->scan_finished = 1; /* no scan currently in progress */ | ||
3374 | 3594 | ||
3375 | pci_set_drvdata(pdev, h); | 3595 | pci_set_drvdata(pdev, h); |
3376 | memset(h->cmd_pool_bits, 0, | 3596 | memset(h->cmd_pool_bits, 0, |
@@ -3382,6 +3602,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, | |||
3382 | /* Turn the interrupts on so we can service requests */ | 3602 | /* Turn the interrupts on so we can service requests */ |
3383 | h->access.set_intr_mask(h, HPSA_INTR_ON); | 3603 | h->access.set_intr_mask(h, HPSA_INTR_ON); |
3384 | 3604 | ||
3605 | hpsa_put_ctlr_into_performant_mode(h); | ||
3606 | hpsa_hba_inquiry(h); | ||
3385 | hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ | 3607 | hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ |
3386 | h->busy_initializing = 0; | 3608 | h->busy_initializing = 0; |
3387 | return 1; | 3609 | return 1; |
@@ -3397,12 +3619,12 @@ clean4: | |||
3397 | h->nr_cmds * sizeof(struct ErrorInfo), | 3619 | h->nr_cmds * sizeof(struct ErrorInfo), |
3398 | h->errinfo_pool, | 3620 | h->errinfo_pool, |
3399 | h->errinfo_pool_dhandle); | 3621 | h->errinfo_pool_dhandle); |
3400 | free_irq(h->intr[SIMPLE_MODE_INT], h); | 3622 | free_irq(h->intr[PERF_MODE_INT], h); |
3401 | clean2: | 3623 | clean2: |
3402 | clean1: | 3624 | clean1: |
3403 | h->busy_initializing = 0; | 3625 | h->busy_initializing = 0; |
3404 | kfree(h); | 3626 | kfree(h); |
3405 | return -1; | 3627 | return rc; |
3406 | } | 3628 | } |
3407 | 3629 | ||
3408 | static void hpsa_flush_cache(struct ctlr_info *h) | 3630 | static void hpsa_flush_cache(struct ctlr_info *h) |
@@ -3441,7 +3663,7 @@ static void hpsa_shutdown(struct pci_dev *pdev) | |||
3441 | */ | 3663 | */ |
3442 | hpsa_flush_cache(h); | 3664 | hpsa_flush_cache(h); |
3443 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | 3665 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
3444 | free_irq(h->intr[2], h); | 3666 | free_irq(h->intr[PERF_MODE_INT], h); |
3445 | #ifdef CONFIG_PCI_MSI | 3667 | #ifdef CONFIG_PCI_MSI |
3446 | if (h->msix_vector) | 3668 | if (h->msix_vector) |
3447 | pci_disable_msix(h->pdev); | 3669 | pci_disable_msix(h->pdev); |
@@ -3470,7 +3692,11 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev) | |||
3470 | pci_free_consistent(h->pdev, | 3692 | pci_free_consistent(h->pdev, |
3471 | h->nr_cmds * sizeof(struct ErrorInfo), | 3693 | h->nr_cmds * sizeof(struct ErrorInfo), |
3472 | h->errinfo_pool, h->errinfo_pool_dhandle); | 3694 | h->errinfo_pool, h->errinfo_pool_dhandle); |
3695 | pci_free_consistent(h->pdev, h->reply_pool_size, | ||
3696 | h->reply_pool, h->reply_pool_dhandle); | ||
3473 | kfree(h->cmd_pool_bits); | 3697 | kfree(h->cmd_pool_bits); |
3698 | kfree(h->blockFetchTable); | ||
3699 | kfree(h->hba_inquiry_data); | ||
3474 | /* | 3700 | /* |
3475 | * Deliberately omit pci_disable_device(): it does something nasty to | 3701 | * Deliberately omit pci_disable_device(): it does something nasty to |
3476 | * Smart Array controllers that pci_enable_device does not undo | 3702 | * Smart Array controllers that pci_enable_device does not undo |
@@ -3502,6 +3728,129 @@ static struct pci_driver hpsa_pci_driver = { | |||
3502 | .resume = hpsa_resume, | 3728 | .resume = hpsa_resume, |
3503 | }; | 3729 | }; |
3504 | 3730 | ||
3731 | /* Fill in bucket_map[], given nsgs (the max number of | ||
3732 | * scatter gather elements supported) and bucket[], | ||
3733 | * which is an array of 8 integers. The bucket[] array | ||
3734 | * contains 8 different DMA transfer sizes (in 16 | ||
3735 | * byte increments) which the controller uses to fetch | ||
3736 | * commands. This function fills in bucket_map[], which | ||
3737 | * maps a given number of scatter gather elements to one of | ||
3738 | * the 8 DMA transfer sizes. The point of it is to allow the | ||
3739 | * controller to only do as much DMA as needed to fetch the | ||
3740 | * command, with the DMA transfer size encoded in the lower | ||
3741 | * bits of the command address. | ||
3742 | */ | ||
3743 | static void calc_bucket_map(int bucket[], int num_buckets, | ||
3744 | int nsgs, int *bucket_map) | ||
3745 | { | ||
3746 | int i, j, b, size; | ||
3747 | |||
3748 | /* even a command with 0 SGs requires 4 blocks */ | ||
3749 | #define MINIMUM_TRANSFER_BLOCKS 4 | ||
3750 | #define NUM_BUCKETS 8 | ||
3751 | /* Note, bucket_map must have nsgs+1 entries. */ | ||
3752 | for (i = 0; i <= nsgs; i++) { | ||
3753 | /* Compute size of a command with i SG entries */ | ||
3754 | size = i + MINIMUM_TRANSFER_BLOCKS; | ||
3755 | b = num_buckets; /* Assume the biggest bucket */ | ||
3756 | /* Find the bucket that is just big enough */ | ||
3757 | for (j = 0; j < 8; j++) { | ||
3758 | if (bucket[j] >= size) { | ||
3759 | b = j; | ||
3760 | break; | ||
3761 | } | ||
3762 | } | ||
3763 | /* for a command with i SG entries, use bucket b. */ | ||
3764 | bucket_map[i] = b; | ||
3765 | } | ||
3766 | } | ||
3767 | |||
3768 | static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) | ||
3769 | { | ||
3770 | u32 trans_support; | ||
3771 | u64 trans_offset; | ||
3772 | /* 5 = 1 s/g entry or 4k | ||
3773 | * 6 = 2 s/g entry or 8k | ||
3774 | * 8 = 4 s/g entry or 16k | ||
3775 | * 10 = 6 s/g entry or 24k | ||
3776 | */ | ||
3777 | int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */ | ||
3778 | int i = 0; | ||
3779 | int l = 0; | ||
3780 | unsigned long register_value; | ||
3781 | |||
3782 | trans_support = readl(&(h->cfgtable->TransportSupport)); | ||
3783 | if (!(trans_support & PERFORMANT_MODE)) | ||
3784 | return; | ||
3785 | |||
3786 | h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); | ||
3787 | h->max_sg_entries = 32; | ||
3788 | /* Performant mode ring buffer and supporting data structures */ | ||
3789 | h->reply_pool_size = h->max_commands * sizeof(u64); | ||
3790 | h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, | ||
3791 | &(h->reply_pool_dhandle)); | ||
3792 | |||
3793 | /* Need a block fetch table for performant mode */ | ||
3794 | h->blockFetchTable = kmalloc(((h->max_sg_entries+1) * | ||
3795 | sizeof(u32)), GFP_KERNEL); | ||
3796 | |||
3797 | if ((h->reply_pool == NULL) | ||
3798 | || (h->blockFetchTable == NULL)) | ||
3799 | goto clean_up; | ||
3800 | |||
3801 | h->reply_pool_wraparound = 1; /* spec: init to 1 */ | ||
3802 | |||
3803 | /* Controller spec: zero out this buffer. */ | ||
3804 | memset(h->reply_pool, 0, h->reply_pool_size); | ||
3805 | h->reply_pool_head = h->reply_pool; | ||
3806 | |||
3807 | trans_offset = readl(&(h->cfgtable->TransMethodOffset)); | ||
3808 | bft[7] = h->max_sg_entries + 4; | ||
3809 | calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable); | ||
3810 | for (i = 0; i < 8; i++) | ||
3811 | writel(bft[i], &h->transtable->BlockFetch[i]); | ||
3812 | |||
3813 | /* size of controller ring buffer */ | ||
3814 | writel(h->max_commands, &h->transtable->RepQSize); | ||
3815 | writel(1, &h->transtable->RepQCount); | ||
3816 | writel(0, &h->transtable->RepQCtrAddrLow32); | ||
3817 | writel(0, &h->transtable->RepQCtrAddrHigh32); | ||
3818 | writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); | ||
3819 | writel(0, &h->transtable->RepQAddr0High32); | ||
3820 | writel(CFGTBL_Trans_Performant, | ||
3821 | &(h->cfgtable->HostWrite.TransportRequest)); | ||
3822 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | ||
3823 | /* under certain very rare conditions, this can take awhile. | ||
3824 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right | ||
3825 | * as we enter this code.) */ | ||
3826 | for (l = 0; l < MAX_CONFIG_WAIT; l++) { | ||
3827 | register_value = readl(h->vaddr + SA5_DOORBELL); | ||
3828 | if (!(register_value & CFGTBL_ChangeReq)) | ||
3829 | break; | ||
3830 | /* delay and try again */ | ||
3831 | set_current_state(TASK_INTERRUPTIBLE); | ||
3832 | schedule_timeout(10); | ||
3833 | } | ||
3834 | register_value = readl(&(h->cfgtable->TransportActive)); | ||
3835 | if (!(register_value & CFGTBL_Trans_Performant)) { | ||
3836 | dev_warn(&h->pdev->dev, "unable to get board into" | ||
3837 | " performant mode\n"); | ||
3838 | return; | ||
3839 | } | ||
3840 | |||
3841 | /* Change the access methods to the performant access methods */ | ||
3842 | h->access = SA5_performant_access; | ||
3843 | h->transMethod = CFGTBL_Trans_Performant; | ||
3844 | |||
3845 | return; | ||
3846 | |||
3847 | clean_up: | ||
3848 | if (h->reply_pool) | ||
3849 | pci_free_consistent(h->pdev, h->reply_pool_size, | ||
3850 | h->reply_pool, h->reply_pool_dhandle); | ||
3851 | kfree(h->blockFetchTable); | ||
3852 | } | ||
3853 | |||
3505 | /* | 3854 | /* |
3506 | * This is it. Register the PCI driver information for the cards we control | 3855 | * This is it. Register the PCI driver information for the cards we control |
3507 | * the OS will call our registered routines when it finds one of our cards. | 3856 | * the OS will call our registered routines when it finds one of our cards. |
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 6bd1949144b5..a0502b3ac17e 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h | |||
@@ -33,7 +33,7 @@ struct access_method { | |||
33 | struct CommandList *c); | 33 | struct CommandList *c); |
34 | void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); | 34 | void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); |
35 | unsigned long (*fifo_full)(struct ctlr_info *h); | 35 | unsigned long (*fifo_full)(struct ctlr_info *h); |
36 | unsigned long (*intr_pending)(struct ctlr_info *h); | 36 | bool (*intr_pending)(struct ctlr_info *h); |
37 | unsigned long (*command_completed)(struct ctlr_info *h); | 37 | unsigned long (*command_completed)(struct ctlr_info *h); |
38 | }; | 38 | }; |
39 | 39 | ||
@@ -55,19 +55,20 @@ struct ctlr_info { | |||
55 | char *product_name; | 55 | char *product_name; |
56 | char firm_ver[4]; /* Firmware version */ | 56 | char firm_ver[4]; /* Firmware version */ |
57 | struct pci_dev *pdev; | 57 | struct pci_dev *pdev; |
58 | __u32 board_id; | 58 | u32 board_id; |
59 | void __iomem *vaddr; | 59 | void __iomem *vaddr; |
60 | unsigned long paddr; | 60 | unsigned long paddr; |
61 | int nr_cmds; /* Number of commands allowed on this controller */ | 61 | int nr_cmds; /* Number of commands allowed on this controller */ |
62 | struct CfgTable __iomem *cfgtable; | 62 | struct CfgTable __iomem *cfgtable; |
63 | int max_sg_entries; | ||
63 | int interrupts_enabled; | 64 | int interrupts_enabled; |
64 | int major; | 65 | int major; |
65 | int max_commands; | 66 | int max_commands; |
66 | int commands_outstanding; | 67 | int commands_outstanding; |
67 | int max_outstanding; /* Debug */ | 68 | int max_outstanding; /* Debug */ |
68 | int usage_count; /* number of opens all all minor devices */ | 69 | int usage_count; /* number of opens all all minor devices */ |
69 | # define DOORBELL_INT 0 | 70 | # define PERF_MODE_INT 0 |
70 | # define PERF_MODE_INT 1 | 71 | # define DOORBELL_INT 1 |
71 | # define SIMPLE_MODE_INT 2 | 72 | # define SIMPLE_MODE_INT 2 |
72 | # define MEMQ_MODE_INT 3 | 73 | # define MEMQ_MODE_INT 3 |
73 | unsigned int intr[4]; | 74 | unsigned int intr[4]; |
@@ -93,6 +94,9 @@ struct ctlr_info { | |||
93 | int nr_frees; | 94 | int nr_frees; |
94 | int busy_initializing; | 95 | int busy_initializing; |
95 | int busy_scanning; | 96 | int busy_scanning; |
97 | int scan_finished; | ||
98 | spinlock_t scan_lock; | ||
99 | wait_queue_head_t scan_wait_queue; | ||
96 | struct mutex busy_shutting_down; | 100 | struct mutex busy_shutting_down; |
97 | struct list_head scan_list; | 101 | struct list_head scan_list; |
98 | struct completion scan_wait; | 102 | struct completion scan_wait; |
@@ -102,6 +106,24 @@ struct ctlr_info { | |||
102 | int ndevices; /* number of used elements in .dev[] array. */ | 106 | int ndevices; /* number of used elements in .dev[] array. */ |
103 | #define HPSA_MAX_SCSI_DEVS_PER_HBA 256 | 107 | #define HPSA_MAX_SCSI_DEVS_PER_HBA 256 |
104 | struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA]; | 108 | struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA]; |
109 | /* | ||
110 | * Performant mode tables. | ||
111 | */ | ||
112 | u32 trans_support; | ||
113 | u32 trans_offset; | ||
114 | struct TransTable_struct *transtable; | ||
115 | unsigned long transMethod; | ||
116 | |||
117 | /* | ||
118 | * Performant mode completion buffer | ||
119 | */ | ||
120 | u64 *reply_pool; | ||
121 | dma_addr_t reply_pool_dhandle; | ||
122 | u64 *reply_pool_head; | ||
123 | size_t reply_pool_size; | ||
124 | unsigned char reply_pool_wraparound; | ||
125 | u32 *blockFetchTable; | ||
126 | unsigned char *hba_inquiry_data; | ||
105 | }; | 127 | }; |
106 | #define HPSA_ABORT_MSG 0 | 128 | #define HPSA_ABORT_MSG 0 |
107 | #define HPSA_DEVICE_RESET_MSG 1 | 129 | #define HPSA_DEVICE_RESET_MSG 1 |
@@ -164,9 +186,16 @@ struct ctlr_info { | |||
164 | #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ | 186 | #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ |
165 | 187 | ||
166 | #define HPSA_ERROR_BIT 0x02 | 188 | #define HPSA_ERROR_BIT 0x02 |
167 | #define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04) | 189 | |
168 | #define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3) | 190 | /* Performant mode flags */ |
169 | #define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3) | 191 | #define SA5_PERF_INTR_PENDING 0x04 |
192 | #define SA5_PERF_INTR_OFF 0x05 | ||
193 | #define SA5_OUTDB_STATUS_PERF_BIT 0x01 | ||
194 | #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 | ||
195 | #define SA5_OUTDB_CLEAR 0xA0 | ||
196 | #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 | ||
197 | #define SA5_OUTDB_STATUS 0x9C | ||
198 | |||
170 | 199 | ||
171 | #define HPSA_INTR_ON 1 | 200 | #define HPSA_INTR_ON 1 |
172 | #define HPSA_INTR_OFF 0 | 201 | #define HPSA_INTR_OFF 0 |
@@ -176,10 +205,8 @@ struct ctlr_info { | |||
176 | static void SA5_submit_command(struct ctlr_info *h, | 205 | static void SA5_submit_command(struct ctlr_info *h, |
177 | struct CommandList *c) | 206 | struct CommandList *c) |
178 | { | 207 | { |
179 | #ifdef HPSA_DEBUG | 208 | dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, |
180 | printk(KERN_WARNING "hpsa: Sending %x - down to controller\n", | 209 | c->Header.Tag.lower); |
181 | c->busaddr); | ||
182 | #endif /* HPSA_DEBUG */ | ||
183 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); | 210 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); |
184 | h->commands_outstanding++; | 211 | h->commands_outstanding++; |
185 | if (h->commands_outstanding > h->max_outstanding) | 212 | if (h->commands_outstanding > h->max_outstanding) |
@@ -202,6 +229,52 @@ static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) | |||
202 | h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); | 229 | h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); |
203 | } | 230 | } |
204 | } | 231 | } |
232 | |||
233 | static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) | ||
234 | { | ||
235 | if (val) { /* turn on interrupts */ | ||
236 | h->interrupts_enabled = 1; | ||
237 | writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); | ||
238 | } else { | ||
239 | h->interrupts_enabled = 0; | ||
240 | writel(SA5_PERF_INTR_OFF, | ||
241 | h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | static unsigned long SA5_performant_completed(struct ctlr_info *h) | ||
246 | { | ||
247 | unsigned long register_value = FIFO_EMPTY; | ||
248 | |||
249 | /* flush the controller write of the reply queue by reading | ||
250 | * outbound doorbell status register. | ||
251 | */ | ||
252 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); | ||
253 | /* msi auto clears the interrupt pending bit. */ | ||
254 | if (!(h->msi_vector || h->msix_vector)) { | ||
255 | writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); | ||
256 | /* Do a read in order to flush the write to the controller | ||
257 | * (as per spec.) | ||
258 | */ | ||
259 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); | ||
260 | } | ||
261 | |||
262 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { | ||
263 | register_value = *(h->reply_pool_head); | ||
264 | (h->reply_pool_head)++; | ||
265 | h->commands_outstanding--; | ||
266 | } else { | ||
267 | register_value = FIFO_EMPTY; | ||
268 | } | ||
269 | /* Check for wraparound */ | ||
270 | if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { | ||
271 | h->reply_pool_head = h->reply_pool; | ||
272 | h->reply_pool_wraparound ^= 1; | ||
273 | } | ||
274 | |||
275 | return register_value; | ||
276 | } | ||
277 | |||
205 | /* | 278 | /* |
206 | * Returns true if fifo is full. | 279 | * Returns true if fifo is full. |
207 | * | 280 | * |
@@ -228,10 +301,10 @@ static unsigned long SA5_completed(struct ctlr_info *h) | |||
228 | 301 | ||
229 | #ifdef HPSA_DEBUG | 302 | #ifdef HPSA_DEBUG |
230 | if (register_value != FIFO_EMPTY) | 303 | if (register_value != FIFO_EMPTY) |
231 | printk(KERN_INFO "hpsa: Read %lx back from board\n", | 304 | dev_dbg(&h->pdev->dev, "Read %lx back from board\n", |
232 | register_value); | 305 | register_value); |
233 | else | 306 | else |
234 | printk(KERN_INFO "hpsa: FIFO Empty read\n"); | 307 | dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n"); |
235 | #endif | 308 | #endif |
236 | 309 | ||
237 | return register_value; | 310 | return register_value; |
@@ -239,18 +312,28 @@ static unsigned long SA5_completed(struct ctlr_info *h) | |||
239 | /* | 312 | /* |
240 | * Returns true if an interrupt is pending.. | 313 | * Returns true if an interrupt is pending.. |
241 | */ | 314 | */ |
242 | static unsigned long SA5_intr_pending(struct ctlr_info *h) | 315 | static bool SA5_intr_pending(struct ctlr_info *h) |
243 | { | 316 | { |
244 | unsigned long register_value = | 317 | unsigned long register_value = |
245 | readl(h->vaddr + SA5_INTR_STATUS); | 318 | readl(h->vaddr + SA5_INTR_STATUS); |
246 | #ifdef HPSA_DEBUG | 319 | dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value); |
247 | printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value); | 320 | return register_value & SA5_INTR_PENDING; |
248 | #endif /* HPSA_DEBUG */ | ||
249 | if (register_value & SA5_INTR_PENDING) | ||
250 | return 1; | ||
251 | return 0 ; | ||
252 | } | 321 | } |
253 | 322 | ||
323 | static bool SA5_performant_intr_pending(struct ctlr_info *h) | ||
324 | { | ||
325 | unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); | ||
326 | |||
327 | if (!register_value) | ||
328 | return false; | ||
329 | |||
330 | if (h->msi_vector || h->msix_vector) | ||
331 | return true; | ||
332 | |||
333 | /* Read outbound doorbell to flush */ | ||
334 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); | ||
335 | return register_value & SA5_OUTDB_STATUS_PERF_BIT; | ||
336 | } | ||
254 | 337 | ||
255 | static struct access_method SA5_access = { | 338 | static struct access_method SA5_access = { |
256 | SA5_submit_command, | 339 | SA5_submit_command, |
@@ -260,14 +343,19 @@ static struct access_method SA5_access = { | |||
260 | SA5_completed, | 343 | SA5_completed, |
261 | }; | 344 | }; |
262 | 345 | ||
346 | static struct access_method SA5_performant_access = { | ||
347 | SA5_submit_command, | ||
348 | SA5_performant_intr_mask, | ||
349 | SA5_fifo_full, | ||
350 | SA5_performant_intr_pending, | ||
351 | SA5_performant_completed, | ||
352 | }; | ||
353 | |||
263 | struct board_type { | 354 | struct board_type { |
264 | __u32 board_id; | 355 | u32 board_id; |
265 | char *product_name; | 356 | char *product_name; |
266 | struct access_method *access; | 357 | struct access_method *access; |
267 | }; | 358 | }; |
268 | 359 | ||
269 | |||
270 | /* end of old hpsa_scsi.h file */ | ||
271 | |||
272 | #endif /* HPSA_H */ | 360 | #endif /* HPSA_H */ |
273 | 361 | ||
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index 12d71387ed9a..3e0abdf76689 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h | |||
@@ -101,19 +101,20 @@ | |||
101 | #define CFGTBL_AccCmds 0x00000001l | 101 | #define CFGTBL_AccCmds 0x00000001l |
102 | 102 | ||
103 | #define CFGTBL_Trans_Simple 0x00000002l | 103 | #define CFGTBL_Trans_Simple 0x00000002l |
104 | #define CFGTBL_Trans_Performant 0x00000004l | ||
104 | 105 | ||
105 | #define CFGTBL_BusType_Ultra2 0x00000001l | 106 | #define CFGTBL_BusType_Ultra2 0x00000001l |
106 | #define CFGTBL_BusType_Ultra3 0x00000002l | 107 | #define CFGTBL_BusType_Ultra3 0x00000002l |
107 | #define CFGTBL_BusType_Fibre1G 0x00000100l | 108 | #define CFGTBL_BusType_Fibre1G 0x00000100l |
108 | #define CFGTBL_BusType_Fibre2G 0x00000200l | 109 | #define CFGTBL_BusType_Fibre2G 0x00000200l |
109 | struct vals32 { | 110 | struct vals32 { |
110 | __u32 lower; | 111 | u32 lower; |
111 | __u32 upper; | 112 | u32 upper; |
112 | }; | 113 | }; |
113 | 114 | ||
114 | union u64bit { | 115 | union u64bit { |
115 | struct vals32 val32; | 116 | struct vals32 val32; |
116 | __u64 val; | 117 | u64 val; |
117 | }; | 118 | }; |
118 | 119 | ||
119 | /* FIXME this is a per controller value (barf!) */ | 120 | /* FIXME this is a per controller value (barf!) */ |
@@ -126,34 +127,34 @@ union u64bit { | |||
126 | 127 | ||
127 | #define HPSA_INQUIRY 0x12 | 128 | #define HPSA_INQUIRY 0x12 |
128 | struct InquiryData { | 129 | struct InquiryData { |
129 | __u8 data_byte[36]; | 130 | u8 data_byte[36]; |
130 | }; | 131 | }; |
131 | 132 | ||
132 | #define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */ | 133 | #define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */ |
133 | #define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */ | 134 | #define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */ |
134 | struct ReportLUNdata { | 135 | struct ReportLUNdata { |
135 | __u8 LUNListLength[4]; | 136 | u8 LUNListLength[4]; |
136 | __u32 reserved; | 137 | u32 reserved; |
137 | __u8 LUN[HPSA_MAX_LUN][8]; | 138 | u8 LUN[HPSA_MAX_LUN][8]; |
138 | }; | 139 | }; |
139 | 140 | ||
140 | struct ReportExtendedLUNdata { | 141 | struct ReportExtendedLUNdata { |
141 | __u8 LUNListLength[4]; | 142 | u8 LUNListLength[4]; |
142 | __u8 extended_response_flag; | 143 | u8 extended_response_flag; |
143 | __u8 reserved[3]; | 144 | u8 reserved[3]; |
144 | __u8 LUN[HPSA_MAX_LUN][24]; | 145 | u8 LUN[HPSA_MAX_LUN][24]; |
145 | }; | 146 | }; |
146 | 147 | ||
147 | struct SenseSubsystem_info { | 148 | struct SenseSubsystem_info { |
148 | __u8 reserved[36]; | 149 | u8 reserved[36]; |
149 | __u8 portname[8]; | 150 | u8 portname[8]; |
150 | __u8 reserved1[1108]; | 151 | u8 reserved1[1108]; |
151 | }; | 152 | }; |
152 | 153 | ||
153 | #define HPSA_READ_CAPACITY 0x25 /* Read Capacity */ | 154 | #define HPSA_READ_CAPACITY 0x25 /* Read Capacity */ |
154 | struct ReadCapdata { | 155 | struct ReadCapdata { |
155 | __u8 total_size[4]; /* Total size in blocks */ | 156 | u8 total_size[4]; /* Total size in blocks */ |
156 | __u8 block_size[4]; /* Size of blocks in bytes */ | 157 | u8 block_size[4]; /* Size of blocks in bytes */ |
157 | }; | 158 | }; |
158 | 159 | ||
159 | #if 0 | 160 | #if 0 |
@@ -174,112 +175,131 @@ struct ReadCapdata { | |||
174 | /* Command List Structure */ | 175 | /* Command List Structure */ |
175 | union SCSI3Addr { | 176 | union SCSI3Addr { |
176 | struct { | 177 | struct { |
177 | __u8 Dev; | 178 | u8 Dev; |
178 | __u8 Bus:6; | 179 | u8 Bus:6; |
179 | __u8 Mode:2; /* b00 */ | 180 | u8 Mode:2; /* b00 */ |
180 | } PeripDev; | 181 | } PeripDev; |
181 | struct { | 182 | struct { |
182 | __u8 DevLSB; | 183 | u8 DevLSB; |
183 | __u8 DevMSB:6; | 184 | u8 DevMSB:6; |
184 | __u8 Mode:2; /* b01 */ | 185 | u8 Mode:2; /* b01 */ |
185 | } LogDev; | 186 | } LogDev; |
186 | struct { | 187 | struct { |
187 | __u8 Dev:5; | 188 | u8 Dev:5; |
188 | __u8 Bus:3; | 189 | u8 Bus:3; |
189 | __u8 Targ:6; | 190 | u8 Targ:6; |
190 | __u8 Mode:2; /* b10 */ | 191 | u8 Mode:2; /* b10 */ |
191 | } LogUnit; | 192 | } LogUnit; |
192 | }; | 193 | }; |
193 | 194 | ||
194 | struct PhysDevAddr { | 195 | struct PhysDevAddr { |
195 | __u32 TargetId:24; | 196 | u32 TargetId:24; |
196 | __u32 Bus:6; | 197 | u32 Bus:6; |
197 | __u32 Mode:2; | 198 | u32 Mode:2; |
198 | /* 2 level target device addr */ | 199 | /* 2 level target device addr */ |
199 | union SCSI3Addr Target[2]; | 200 | union SCSI3Addr Target[2]; |
200 | }; | 201 | }; |
201 | 202 | ||
202 | struct LogDevAddr { | 203 | struct LogDevAddr { |
203 | __u32 VolId:30; | 204 | u32 VolId:30; |
204 | __u32 Mode:2; | 205 | u32 Mode:2; |
205 | __u8 reserved[4]; | 206 | u8 reserved[4]; |
206 | }; | 207 | }; |
207 | 208 | ||
208 | union LUNAddr { | 209 | union LUNAddr { |
209 | __u8 LunAddrBytes[8]; | 210 | u8 LunAddrBytes[8]; |
210 | union SCSI3Addr SCSI3Lun[4]; | 211 | union SCSI3Addr SCSI3Lun[4]; |
211 | struct PhysDevAddr PhysDev; | 212 | struct PhysDevAddr PhysDev; |
212 | struct LogDevAddr LogDev; | 213 | struct LogDevAddr LogDev; |
213 | }; | 214 | }; |
214 | 215 | ||
215 | struct CommandListHeader { | 216 | struct CommandListHeader { |
216 | __u8 ReplyQueue; | 217 | u8 ReplyQueue; |
217 | __u8 SGList; | 218 | u8 SGList; |
218 | __u16 SGTotal; | 219 | u16 SGTotal; |
219 | struct vals32 Tag; | 220 | struct vals32 Tag; |
220 | union LUNAddr LUN; | 221 | union LUNAddr LUN; |
221 | }; | 222 | }; |
222 | 223 | ||
223 | struct RequestBlock { | 224 | struct RequestBlock { |
224 | __u8 CDBLen; | 225 | u8 CDBLen; |
225 | struct { | 226 | struct { |
226 | __u8 Type:3; | 227 | u8 Type:3; |
227 | __u8 Attribute:3; | 228 | u8 Attribute:3; |
228 | __u8 Direction:2; | 229 | u8 Direction:2; |
229 | } Type; | 230 | } Type; |
230 | __u16 Timeout; | 231 | u16 Timeout; |
231 | __u8 CDB[16]; | 232 | u8 CDB[16]; |
232 | }; | 233 | }; |
233 | 234 | ||
234 | struct ErrDescriptor { | 235 | struct ErrDescriptor { |
235 | struct vals32 Addr; | 236 | struct vals32 Addr; |
236 | __u32 Len; | 237 | u32 Len; |
237 | }; | 238 | }; |
238 | 239 | ||
239 | struct SGDescriptor { | 240 | struct SGDescriptor { |
240 | struct vals32 Addr; | 241 | struct vals32 Addr; |
241 | __u32 Len; | 242 | u32 Len; |
242 | __u32 Ext; | 243 | u32 Ext; |
243 | }; | 244 | }; |
244 | 245 | ||
245 | union MoreErrInfo { | 246 | union MoreErrInfo { |
246 | struct { | 247 | struct { |
247 | __u8 Reserved[3]; | 248 | u8 Reserved[3]; |
248 | __u8 Type; | 249 | u8 Type; |
249 | __u32 ErrorInfo; | 250 | u32 ErrorInfo; |
250 | } Common_Info; | 251 | } Common_Info; |
251 | struct { | 252 | struct { |
252 | __u8 Reserved[2]; | 253 | u8 Reserved[2]; |
253 | __u8 offense_size; /* size of offending entry */ | 254 | u8 offense_size; /* size of offending entry */ |
254 | __u8 offense_num; /* byte # of offense 0-base */ | 255 | u8 offense_num; /* byte # of offense 0-base */ |
255 | __u32 offense_value; | 256 | u32 offense_value; |
256 | } Invalid_Cmd; | 257 | } Invalid_Cmd; |
257 | }; | 258 | }; |
258 | struct ErrorInfo { | 259 | struct ErrorInfo { |
259 | __u8 ScsiStatus; | 260 | u8 ScsiStatus; |
260 | __u8 SenseLen; | 261 | u8 SenseLen; |
261 | __u16 CommandStatus; | 262 | u16 CommandStatus; |
262 | __u32 ResidualCnt; | 263 | u32 ResidualCnt; |
263 | union MoreErrInfo MoreErrInfo; | 264 | union MoreErrInfo MoreErrInfo; |
264 | __u8 SenseInfo[SENSEINFOBYTES]; | 265 | u8 SenseInfo[SENSEINFOBYTES]; |
265 | }; | 266 | }; |
266 | /* Command types */ | 267 | /* Command types */ |
267 | #define CMD_IOCTL_PEND 0x01 | 268 | #define CMD_IOCTL_PEND 0x01 |
268 | #define CMD_SCSI 0x03 | 269 | #define CMD_SCSI 0x03 |
269 | 270 | ||
271 | /* This structure needs to be divisible by 32 for new | ||
272 | * indexing method and performant mode. | ||
273 | */ | ||
274 | #define PAD32 32 | ||
275 | #define PAD64DIFF 0 | ||
276 | #define USEEXTRA ((sizeof(void *) - 4)/4) | ||
277 | #define PADSIZE (PAD32 + PAD64DIFF * USEEXTRA) | ||
278 | |||
279 | #define DIRECT_LOOKUP_SHIFT 5 | ||
280 | #define DIRECT_LOOKUP_BIT 0x10 | ||
281 | |||
282 | #define HPSA_ERROR_BIT 0x02 | ||
270 | struct ctlr_info; /* defined in hpsa.h */ | 283 | struct ctlr_info; /* defined in hpsa.h */ |
271 | /* The size of this structure needs to be divisible by 8 | 284 | /* The size of this structure needs to be divisible by 32 |
272 | * od on all architectures, because the controller uses 2 | 285 | * on all architectures because low 5 bits of the addresses |
273 | * lower bits of the address, and the driver uses 1 lower | 286 | * are used as follows: |
274 | * bit (3 bits total.) | 287 | * |
288 | * bit 0: to device, used to indicate "performant mode" command | ||
289 | * from device, indidcates error status. | ||
290 | * bit 1-3: to device, indicates block fetch table entry for | ||
291 | * reducing DMA in fetching commands from host memory. | ||
292 | * bit 4: used to indicate whether tag is "direct lookup" (index), | ||
293 | * or a bus address. | ||
275 | */ | 294 | */ |
295 | |||
276 | struct CommandList { | 296 | struct CommandList { |
277 | struct CommandListHeader Header; | 297 | struct CommandListHeader Header; |
278 | struct RequestBlock Request; | 298 | struct RequestBlock Request; |
279 | struct ErrDescriptor ErrDesc; | 299 | struct ErrDescriptor ErrDesc; |
280 | struct SGDescriptor SG[MAXSGENTRIES]; | 300 | struct SGDescriptor SG[MAXSGENTRIES]; |
281 | /* information associated with the command */ | 301 | /* information associated with the command */ |
282 | __u32 busaddr; /* physical addr of this record */ | 302 | u32 busaddr; /* physical addr of this record */ |
283 | struct ErrorInfo *err_info; /* pointer to the allocated mem */ | 303 | struct ErrorInfo *err_info; /* pointer to the allocated mem */ |
284 | struct ctlr_info *h; | 304 | struct ctlr_info *h; |
285 | int cmd_type; | 305 | int cmd_type; |
@@ -291,35 +311,63 @@ struct CommandList { | |||
291 | struct completion *waiting; | 311 | struct completion *waiting; |
292 | int retry_count; | 312 | int retry_count; |
293 | void *scsi_cmd; | 313 | void *scsi_cmd; |
314 | |||
315 | /* on 64 bit architectures, to get this to be 32-byte-aligned | ||
316 | * it so happens we need no padding, on 32 bit systems, | ||
317 | * we need 8 bytes of padding. This does that. | ||
318 | */ | ||
319 | #define COMMANDLIST_PAD ((8 - sizeof(long))/4 * 8) | ||
320 | u8 pad[COMMANDLIST_PAD]; | ||
321 | |||
294 | }; | 322 | }; |
295 | 323 | ||
296 | /* Configuration Table Structure */ | 324 | /* Configuration Table Structure */ |
297 | struct HostWrite { | 325 | struct HostWrite { |
298 | __u32 TransportRequest; | 326 | u32 TransportRequest; |
299 | __u32 Reserved; | 327 | u32 Reserved; |
300 | __u32 CoalIntDelay; | 328 | u32 CoalIntDelay; |
301 | __u32 CoalIntCount; | 329 | u32 CoalIntCount; |
302 | }; | 330 | }; |
303 | 331 | ||
332 | #define SIMPLE_MODE 0x02 | ||
333 | #define PERFORMANT_MODE 0x04 | ||
334 | #define MEMQ_MODE 0x08 | ||
335 | |||
304 | struct CfgTable { | 336 | struct CfgTable { |
305 | __u8 Signature[4]; | 337 | u8 Signature[4]; |
306 | __u32 SpecValence; | 338 | u32 SpecValence; |
307 | __u32 TransportSupport; | 339 | u32 TransportSupport; |
308 | __u32 TransportActive; | 340 | u32 TransportActive; |
309 | struct HostWrite HostWrite; | 341 | struct HostWrite HostWrite; |
310 | __u32 CmdsOutMax; | 342 | u32 CmdsOutMax; |
311 | __u32 BusTypes; | 343 | u32 BusTypes; |
312 | __u32 Reserved; | 344 | u32 TransMethodOffset; |
313 | __u8 ServerName[16]; | 345 | u8 ServerName[16]; |
314 | __u32 HeartBeat; | 346 | u32 HeartBeat; |
315 | __u32 SCSI_Prefetch; | 347 | u32 SCSI_Prefetch; |
348 | u32 MaxScatterGatherElements; | ||
349 | u32 MaxLogicalUnits; | ||
350 | u32 MaxPhysicalDevices; | ||
351 | u32 MaxPhysicalDrivesPerLogicalUnit; | ||
352 | u32 MaxPerformantModeCommands; | ||
353 | }; | ||
354 | |||
355 | #define NUM_BLOCKFETCH_ENTRIES 8 | ||
356 | struct TransTable_struct { | ||
357 | u32 BlockFetch[NUM_BLOCKFETCH_ENTRIES]; | ||
358 | u32 RepQSize; | ||
359 | u32 RepQCount; | ||
360 | u32 RepQCtrAddrLow32; | ||
361 | u32 RepQCtrAddrHigh32; | ||
362 | u32 RepQAddr0Low32; | ||
363 | u32 RepQAddr0High32; | ||
316 | }; | 364 | }; |
317 | 365 | ||
318 | struct hpsa_pci_info { | 366 | struct hpsa_pci_info { |
319 | unsigned char bus; | 367 | unsigned char bus; |
320 | unsigned char dev_fn; | 368 | unsigned char dev_fn; |
321 | unsigned short domain; | 369 | unsigned short domain; |
322 | __u32 board_id; | 370 | u32 board_id; |
323 | }; | 371 | }; |
324 | 372 | ||
325 | #pragma pack() | 373 | #pragma pack() |
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c index 9c1e6a5b5af0..9a4b69d4f4eb 100644 --- a/drivers/scsi/ibmmca.c +++ b/drivers/scsi/ibmmca.c | |||
@@ -2336,7 +2336,7 @@ static int option_setup(char *str) | |||
2336 | char *cur = str; | 2336 | char *cur = str; |
2337 | int i = 1; | 2337 | int i = 1; |
2338 | 2338 | ||
2339 | while (cur && isdigit(*cur) && i <= IM_MAX_HOSTS) { | 2339 | while (cur && isdigit(*cur) && i < IM_MAX_HOSTS) { |
2340 | ints[i++] = simple_strtoul(cur, NULL, 0); | 2340 | ints[i++] = simple_strtoul(cur, NULL, 0); |
2341 | if ((cur = strchr(cur, ',')) != NULL) | 2341 | if ((cur = strchr(cur, ',')) != NULL) |
2342 | cur++; | 2342 | cur++; |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index e475b7957c2d..e3a18e0ef276 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -40,7 +40,7 @@ | |||
40 | * (CRQ), which is just a buffer of 16 byte entries in the receiver's | 40 | * (CRQ), which is just a buffer of 16 byte entries in the receiver's |
41 | * Senders cannot access the buffer directly, but send messages by | 41 | * Senders cannot access the buffer directly, but send messages by |
42 | * making a hypervisor call and passing in the 16 bytes. The hypervisor | 42 | * making a hypervisor call and passing in the 16 bytes. The hypervisor |
43 | * puts the message in the next 16 byte space in round-robbin fashion, | 43 | * puts the message in the next 16 byte space in round-robin fashion, |
44 | * turns on the high order bit of the message (the valid bit), and | 44 | * turns on the high order bit of the message (the valid bit), and |
45 | * generates an interrupt to the receiver (if interrupts are turned on.) | 45 | * generates an interrupt to the receiver (if interrupts are turned on.) |
46 | * The receiver just turns off the valid bit when they have copied out | 46 | * The receiver just turns off the valid bit when they have copied out |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 517da3fd89d3..8a89ba900588 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -584,9 +584,10 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | |||
584 | struct iscsi_conn *conn = cls_conn->dd_data; | 584 | struct iscsi_conn *conn = cls_conn->dd_data; |
585 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 585 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
586 | struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; | 586 | struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; |
587 | struct socket *sock = tcp_sw_conn->sock; | ||
587 | 588 | ||
588 | /* userspace may have goofed up and not bound us */ | 589 | /* userspace may have goofed up and not bound us */ |
589 | if (!tcp_sw_conn->sock) | 590 | if (!sock) |
590 | return; | 591 | return; |
591 | /* | 592 | /* |
592 | * Make sure our recv side is stopped. | 593 | * Make sure our recv side is stopped. |
@@ -597,6 +598,11 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | |||
597 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); | 598 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); |
598 | write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); | 599 | write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); |
599 | 600 | ||
601 | if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) { | ||
602 | sock->sk->sk_err = EIO; | ||
603 | wake_up_interruptible(sock->sk->sk_sleep); | ||
604 | } | ||
605 | |||
600 | iscsi_conn_stop(cls_conn, flag); | 606 | iscsi_conn_stop(cls_conn, flag); |
601 | iscsi_sw_tcp_release_conn(conn); | 607 | iscsi_sw_tcp_release_conn(conn); |
602 | } | 608 | } |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index c28a712fd4db..703eb6a88790 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1919,10 +1919,11 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) | |||
1919 | static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) | 1919 | static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) |
1920 | { | 1920 | { |
1921 | enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; | 1921 | enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; |
1922 | struct iscsi_task *task = NULL; | 1922 | struct iscsi_task *task = NULL, *running_task; |
1923 | struct iscsi_cls_session *cls_session; | 1923 | struct iscsi_cls_session *cls_session; |
1924 | struct iscsi_session *session; | 1924 | struct iscsi_session *session; |
1925 | struct iscsi_conn *conn; | 1925 | struct iscsi_conn *conn; |
1926 | int i; | ||
1926 | 1927 | ||
1927 | cls_session = starget_to_session(scsi_target(sc->device)); | 1928 | cls_session = starget_to_session(scsi_target(sc->device)); |
1928 | session = cls_session->dd_data; | 1929 | session = cls_session->dd_data; |
@@ -1947,8 +1948,15 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) | |||
1947 | } | 1948 | } |
1948 | 1949 | ||
1949 | task = (struct iscsi_task *)sc->SCp.ptr; | 1950 | task = (struct iscsi_task *)sc->SCp.ptr; |
1950 | if (!task) | 1951 | if (!task) { |
1952 | /* | ||
1953 | * Raced with completion. Just reset timer, and let it | ||
1954 | * complete normally | ||
1955 | */ | ||
1956 | rc = BLK_EH_RESET_TIMER; | ||
1951 | goto done; | 1957 | goto done; |
1958 | } | ||
1959 | |||
1952 | /* | 1960 | /* |
1953 | * If we have sent (at least queued to the network layer) a pdu or | 1961 | * If we have sent (at least queued to the network layer) a pdu or |
1954 | * recvd one for the task since the last timeout ask for | 1962 | * recvd one for the task since the last timeout ask for |
@@ -1956,10 +1964,10 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) | |||
1956 | * we can check if it is the task or connection when we send the | 1964 | * we can check if it is the task or connection when we send the |
1957 | * nop as a ping. | 1965 | * nop as a ping. |
1958 | */ | 1966 | */ |
1959 | if (time_after_eq(task->last_xfer, task->last_timeout)) { | 1967 | if (time_after(task->last_xfer, task->last_timeout)) { |
1960 | ISCSI_DBG_EH(session, "Command making progress. Asking " | 1968 | ISCSI_DBG_EH(session, "Command making progress. Asking " |
1961 | "scsi-ml for more time to complete. " | 1969 | "scsi-ml for more time to complete. " |
1962 | "Last data recv at %lu. Last timeout was at " | 1970 | "Last data xfer at %lu. Last timeout was at " |
1963 | "%lu\n.", task->last_xfer, task->last_timeout); | 1971 | "%lu\n.", task->last_xfer, task->last_timeout); |
1964 | task->have_checked_conn = false; | 1972 | task->have_checked_conn = false; |
1965 | rc = BLK_EH_RESET_TIMER; | 1973 | rc = BLK_EH_RESET_TIMER; |
@@ -1977,6 +1985,43 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) | |||
1977 | goto done; | 1985 | goto done; |
1978 | } | 1986 | } |
1979 | 1987 | ||
1988 | for (i = 0; i < conn->session->cmds_max; i++) { | ||
1989 | running_task = conn->session->cmds[i]; | ||
1990 | if (!running_task->sc || running_task == task || | ||
1991 | running_task->state != ISCSI_TASK_RUNNING) | ||
1992 | continue; | ||
1993 | |||
1994 | /* | ||
1995 | * Only check if cmds started before this one have made | ||
1996 | * progress, or this could never fail | ||
1997 | */ | ||
1998 | if (time_after(running_task->sc->jiffies_at_alloc, | ||
1999 | task->sc->jiffies_at_alloc)) | ||
2000 | continue; | ||
2001 | |||
2002 | if (time_after(running_task->last_xfer, task->last_timeout)) { | ||
2003 | /* | ||
2004 | * This task has not made progress, but a task | ||
2005 | * started before us has transferred data since | ||
2006 | * we started/last-checked. We could be queueing | ||
2007 | * too many tasks or the LU is bad. | ||
2008 | * | ||
2009 | * If the device is bad the cmds ahead of us on | ||
2010 | * other devs will complete, and this loop will | ||
2011 | * eventually fail starting the scsi eh. | ||
2012 | */ | ||
2013 | ISCSI_DBG_EH(session, "Command has not made progress " | ||
2014 | "but commands ahead of it have. " | ||
2015 | "Asking scsi-ml for more time to " | ||
2016 | "complete. Our last xfer vs running task " | ||
2017 | "last xfer %lu/%lu. Last check %lu.\n", | ||
2018 | task->last_xfer, running_task->last_xfer, | ||
2019 | task->last_timeout); | ||
2020 | rc = BLK_EH_RESET_TIMER; | ||
2021 | goto done; | ||
2022 | } | ||
2023 | } | ||
2024 | |||
1980 | /* Assumes nop timeout is shorter than scsi cmd timeout */ | 2025 | /* Assumes nop timeout is shorter than scsi cmd timeout */ |
1981 | if (task->have_checked_conn) | 2026 | if (task->have_checked_conn) |
1982 | goto done; | 2027 | goto done; |
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c index ab19b3b4be52..22775165bf6a 100644 --- a/drivers/scsi/libsrp.c +++ b/drivers/scsi/libsrp.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * SCSI RDAM Protocol lib functions | 2 | * SCSI RDMA Protocol lib functions |
3 | * | 3 | * |
4 | * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org> | 4 | * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org> |
5 | * | 5 | * |
@@ -328,7 +328,7 @@ int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd, | |||
328 | int offset, err = 0; | 328 | int offset, err = 0; |
329 | u8 format; | 329 | u8 format; |
330 | 330 | ||
331 | offset = cmd->add_cdb_len * 4; | 331 | offset = cmd->add_cdb_len & ~3; |
332 | 332 | ||
333 | dir = srp_cmd_direction(cmd); | 333 | dir = srp_cmd_direction(cmd); |
334 | if (dir == DMA_FROM_DEVICE) | 334 | if (dir == DMA_FROM_DEVICE) |
@@ -366,7 +366,7 @@ static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir) | |||
366 | { | 366 | { |
367 | struct srp_direct_buf *md; | 367 | struct srp_direct_buf *md; |
368 | struct srp_indirect_buf *id; | 368 | struct srp_indirect_buf *id; |
369 | int len = 0, offset = cmd->add_cdb_len * 4; | 369 | int len = 0, offset = cmd->add_cdb_len & ~3; |
370 | u8 fmt; | 370 | u8 fmt; |
371 | 371 | ||
372 | if (dir == DMA_TO_DEVICE) | 372 | if (dir == DMA_TO_DEVICE) |
@@ -440,6 +440,6 @@ int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info, | |||
440 | } | 440 | } |
441 | EXPORT_SYMBOL_GPL(srp_cmd_queue); | 441 | EXPORT_SYMBOL_GPL(srp_cmd_queue); |
442 | 442 | ||
443 | MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions"); | 443 | MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions"); |
444 | MODULE_AUTHOR("FUJITA Tomonori"); | 444 | MODULE_AUTHOR("FUJITA Tomonori"); |
445 | MODULE_LICENSE("GPL"); | 445 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 1cc23a69db5e..84b696463a58 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2010 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -315,6 +315,9 @@ struct lpfc_vport { | |||
315 | #define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ | 315 | #define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ |
316 | #define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ | 316 | #define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ |
317 | #define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */ | 317 | #define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */ |
318 | #define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */ | ||
319 | #define FC_VFI_REGISTERED 0x800000 /* VFI is registered */ | ||
320 | #define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */ | ||
318 | 321 | ||
319 | uint32_t ct_flags; | 322 | uint32_t ct_flags; |
320 | #define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ | 323 | #define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ |
@@ -448,6 +451,8 @@ struct unsol_rcv_ct_ctx { | |||
448 | uint32_t ctxt_id; | 451 | uint32_t ctxt_id; |
449 | uint32_t SID; | 452 | uint32_t SID; |
450 | uint32_t oxid; | 453 | uint32_t oxid; |
454 | uint32_t flags; | ||
455 | #define UNSOL_VALID 0x00000001 | ||
451 | }; | 456 | }; |
452 | 457 | ||
453 | struct lpfc_hba { | 458 | struct lpfc_hba { |
@@ -499,6 +504,10 @@ struct lpfc_hba { | |||
499 | (struct lpfc_hba *); | 504 | (struct lpfc_hba *); |
500 | void (*lpfc_stop_port) | 505 | void (*lpfc_stop_port) |
501 | (struct lpfc_hba *); | 506 | (struct lpfc_hba *); |
507 | int (*lpfc_hba_init_link) | ||
508 | (struct lpfc_hba *); | ||
509 | int (*lpfc_hba_down_link) | ||
510 | (struct lpfc_hba *); | ||
502 | 511 | ||
503 | 512 | ||
504 | /* SLI4 specific HBA data structure */ | 513 | /* SLI4 specific HBA data structure */ |
@@ -613,6 +622,7 @@ struct lpfc_hba { | |||
613 | uint32_t cfg_enable_bg; | 622 | uint32_t cfg_enable_bg; |
614 | uint32_t cfg_log_verbose; | 623 | uint32_t cfg_log_verbose; |
615 | uint32_t cfg_aer_support; | 624 | uint32_t cfg_aer_support; |
625 | uint32_t cfg_suppress_link_up; | ||
616 | 626 | ||
617 | lpfc_vpd_t vpd; /* vital product data */ | 627 | lpfc_vpd_t vpd; /* vital product data */ |
618 | 628 | ||
@@ -790,7 +800,7 @@ struct lpfc_hba { | |||
790 | uint16_t vlan_id; | 800 | uint16_t vlan_id; |
791 | struct list_head fcf_conn_rec_list; | 801 | struct list_head fcf_conn_rec_list; |
792 | 802 | ||
793 | struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */ | 803 | spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */ |
794 | struct list_head ct_ev_waiters; | 804 | struct list_head ct_ev_waiters; |
795 | struct unsol_rcv_ct_ctx ct_ctx[64]; | 805 | struct unsol_rcv_ct_ctx ct_ctx[64]; |
796 | uint32_t ctx_idx; | 806 | uint32_t ctx_idx; |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 91542f786edf..c992e8328f9e 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -482,6 +482,41 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr, | |||
482 | } | 482 | } |
483 | 483 | ||
484 | /** | 484 | /** |
485 | * lpfc_link_state_store - Transition the link_state on an HBA port | ||
486 | * @dev: class device that is converted into a Scsi_host. | ||
487 | * @attr: device attribute, not used. | ||
488 | * @buf: one or more lpfc_polling_flags values. | ||
489 | * @count: not used. | ||
490 | * | ||
491 | * Returns: | ||
492 | * -EINVAL if the buffer is not "up" or "down" | ||
493 | * return from link state change function if non-zero | ||
494 | * length of the buf on success | ||
495 | **/ | ||
496 | static ssize_t | ||
497 | lpfc_link_state_store(struct device *dev, struct device_attribute *attr, | ||
498 | const char *buf, size_t count) | ||
499 | { | ||
500 | struct Scsi_Host *shost = class_to_shost(dev); | ||
501 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
502 | struct lpfc_hba *phba = vport->phba; | ||
503 | |||
504 | int status = -EINVAL; | ||
505 | |||
506 | if ((strncmp(buf, "up", sizeof("up") - 1) == 0) && | ||
507 | (phba->link_state == LPFC_LINK_DOWN)) | ||
508 | status = phba->lpfc_hba_init_link(phba); | ||
509 | else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) && | ||
510 | (phba->link_state >= LPFC_LINK_UP)) | ||
511 | status = phba->lpfc_hba_down_link(phba); | ||
512 | |||
513 | if (status == 0) | ||
514 | return strlen(buf); | ||
515 | else | ||
516 | return status; | ||
517 | } | ||
518 | |||
519 | /** | ||
485 | * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports | 520 | * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports |
486 | * @dev: class device that is converted into a Scsi_host. | 521 | * @dev: class device that is converted into a Scsi_host. |
487 | * @attr: device attribute, not used. | 522 | * @attr: device attribute, not used. |
@@ -1219,7 +1254,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | |||
1219 | struct Scsi_Host *shost = class_to_shost(dev);\ | 1254 | struct Scsi_Host *shost = class_to_shost(dev);\ |
1220 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ | 1255 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ |
1221 | struct lpfc_hba *phba = vport->phba;\ | 1256 | struct lpfc_hba *phba = vport->phba;\ |
1222 | int val = 0;\ | 1257 | uint val = 0;\ |
1223 | val = phba->cfg_##attr;\ | 1258 | val = phba->cfg_##attr;\ |
1224 | return snprintf(buf, PAGE_SIZE, "%d\n",\ | 1259 | return snprintf(buf, PAGE_SIZE, "%d\n",\ |
1225 | phba->cfg_##attr);\ | 1260 | phba->cfg_##attr);\ |
@@ -1247,7 +1282,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | |||
1247 | struct Scsi_Host *shost = class_to_shost(dev);\ | 1282 | struct Scsi_Host *shost = class_to_shost(dev);\ |
1248 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ | 1283 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ |
1249 | struct lpfc_hba *phba = vport->phba;\ | 1284 | struct lpfc_hba *phba = vport->phba;\ |
1250 | int val = 0;\ | 1285 | uint val = 0;\ |
1251 | val = phba->cfg_##attr;\ | 1286 | val = phba->cfg_##attr;\ |
1252 | return snprintf(buf, PAGE_SIZE, "%#x\n",\ | 1287 | return snprintf(buf, PAGE_SIZE, "%#x\n",\ |
1253 | phba->cfg_##attr);\ | 1288 | phba->cfg_##attr);\ |
@@ -1274,7 +1309,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | |||
1274 | **/ | 1309 | **/ |
1275 | #define lpfc_param_init(attr, default, minval, maxval) \ | 1310 | #define lpfc_param_init(attr, default, minval, maxval) \ |
1276 | static int \ | 1311 | static int \ |
1277 | lpfc_##attr##_init(struct lpfc_hba *phba, int val) \ | 1312 | lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ |
1278 | { \ | 1313 | { \ |
1279 | if (val >= minval && val <= maxval) {\ | 1314 | if (val >= minval && val <= maxval) {\ |
1280 | phba->cfg_##attr = val;\ | 1315 | phba->cfg_##attr = val;\ |
@@ -1309,7 +1344,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, int val) \ | |||
1309 | **/ | 1344 | **/ |
1310 | #define lpfc_param_set(attr, default, minval, maxval) \ | 1345 | #define lpfc_param_set(attr, default, minval, maxval) \ |
1311 | static int \ | 1346 | static int \ |
1312 | lpfc_##attr##_set(struct lpfc_hba *phba, int val) \ | 1347 | lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ |
1313 | { \ | 1348 | { \ |
1314 | if (val >= minval && val <= maxval) {\ | 1349 | if (val >= minval && val <= maxval) {\ |
1315 | phba->cfg_##attr = val;\ | 1350 | phba->cfg_##attr = val;\ |
@@ -1350,7 +1385,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ | |||
1350 | struct Scsi_Host *shost = class_to_shost(dev);\ | 1385 | struct Scsi_Host *shost = class_to_shost(dev);\ |
1351 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ | 1386 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ |
1352 | struct lpfc_hba *phba = vport->phba;\ | 1387 | struct lpfc_hba *phba = vport->phba;\ |
1353 | int val=0;\ | 1388 | uint val = 0;\ |
1354 | if (!isdigit(buf[0]))\ | 1389 | if (!isdigit(buf[0]))\ |
1355 | return -EINVAL;\ | 1390 | return -EINVAL;\ |
1356 | if (sscanf(buf, "%i", &val) != 1)\ | 1391 | if (sscanf(buf, "%i", &val) != 1)\ |
@@ -1382,7 +1417,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | |||
1382 | { \ | 1417 | { \ |
1383 | struct Scsi_Host *shost = class_to_shost(dev);\ | 1418 | struct Scsi_Host *shost = class_to_shost(dev);\ |
1384 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ | 1419 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ |
1385 | int val = 0;\ | 1420 | uint val = 0;\ |
1386 | val = vport->cfg_##attr;\ | 1421 | val = vport->cfg_##attr;\ |
1387 | return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ | 1422 | return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ |
1388 | } | 1423 | } |
@@ -1409,7 +1444,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | |||
1409 | { \ | 1444 | { \ |
1410 | struct Scsi_Host *shost = class_to_shost(dev);\ | 1445 | struct Scsi_Host *shost = class_to_shost(dev);\ |
1411 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ | 1446 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ |
1412 | int val = 0;\ | 1447 | uint val = 0;\ |
1413 | val = vport->cfg_##attr;\ | 1448 | val = vport->cfg_##attr;\ |
1414 | return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ | 1449 | return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ |
1415 | } | 1450 | } |
@@ -1434,7 +1469,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | |||
1434 | **/ | 1469 | **/ |
1435 | #define lpfc_vport_param_init(attr, default, minval, maxval) \ | 1470 | #define lpfc_vport_param_init(attr, default, minval, maxval) \ |
1436 | static int \ | 1471 | static int \ |
1437 | lpfc_##attr##_init(struct lpfc_vport *vport, int val) \ | 1472 | lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ |
1438 | { \ | 1473 | { \ |
1439 | if (val >= minval && val <= maxval) {\ | 1474 | if (val >= minval && val <= maxval) {\ |
1440 | vport->cfg_##attr = val;\ | 1475 | vport->cfg_##attr = val;\ |
@@ -1466,7 +1501,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, int val) \ | |||
1466 | **/ | 1501 | **/ |
1467 | #define lpfc_vport_param_set(attr, default, minval, maxval) \ | 1502 | #define lpfc_vport_param_set(attr, default, minval, maxval) \ |
1468 | static int \ | 1503 | static int \ |
1469 | lpfc_##attr##_set(struct lpfc_vport *vport, int val) \ | 1504 | lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ |
1470 | { \ | 1505 | { \ |
1471 | if (val >= minval && val <= maxval) {\ | 1506 | if (val >= minval && val <= maxval) {\ |
1472 | vport->cfg_##attr = val;\ | 1507 | vport->cfg_##attr = val;\ |
@@ -1502,7 +1537,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ | |||
1502 | { \ | 1537 | { \ |
1503 | struct Scsi_Host *shost = class_to_shost(dev);\ | 1538 | struct Scsi_Host *shost = class_to_shost(dev);\ |
1504 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ | 1539 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ |
1505 | int val=0;\ | 1540 | uint val = 0;\ |
1506 | if (!isdigit(buf[0]))\ | 1541 | if (!isdigit(buf[0]))\ |
1507 | return -EINVAL;\ | 1542 | return -EINVAL;\ |
1508 | if (sscanf(buf, "%i", &val) != 1)\ | 1543 | if (sscanf(buf, "%i", &val) != 1)\ |
@@ -1515,22 +1550,22 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ | |||
1515 | 1550 | ||
1516 | 1551 | ||
1517 | #define LPFC_ATTR(name, defval, minval, maxval, desc) \ | 1552 | #define LPFC_ATTR(name, defval, minval, maxval, desc) \ |
1518 | static int lpfc_##name = defval;\ | 1553 | static uint lpfc_##name = defval;\ |
1519 | module_param(lpfc_##name, int, 0);\ | 1554 | module_param(lpfc_##name, uint, 0);\ |
1520 | MODULE_PARM_DESC(lpfc_##name, desc);\ | 1555 | MODULE_PARM_DESC(lpfc_##name, desc);\ |
1521 | lpfc_param_init(name, defval, minval, maxval) | 1556 | lpfc_param_init(name, defval, minval, maxval) |
1522 | 1557 | ||
1523 | #define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ | 1558 | #define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ |
1524 | static int lpfc_##name = defval;\ | 1559 | static uint lpfc_##name = defval;\ |
1525 | module_param(lpfc_##name, int, 0);\ | 1560 | module_param(lpfc_##name, uint, 0);\ |
1526 | MODULE_PARM_DESC(lpfc_##name, desc);\ | 1561 | MODULE_PARM_DESC(lpfc_##name, desc);\ |
1527 | lpfc_param_show(name)\ | 1562 | lpfc_param_show(name)\ |
1528 | lpfc_param_init(name, defval, minval, maxval)\ | 1563 | lpfc_param_init(name, defval, minval, maxval)\ |
1529 | static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) | 1564 | static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) |
1530 | 1565 | ||
1531 | #define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ | 1566 | #define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ |
1532 | static int lpfc_##name = defval;\ | 1567 | static uint lpfc_##name = defval;\ |
1533 | module_param(lpfc_##name, int, 0);\ | 1568 | module_param(lpfc_##name, uint, 0);\ |
1534 | MODULE_PARM_DESC(lpfc_##name, desc);\ | 1569 | MODULE_PARM_DESC(lpfc_##name, desc);\ |
1535 | lpfc_param_show(name)\ | 1570 | lpfc_param_show(name)\ |
1536 | lpfc_param_init(name, defval, minval, maxval)\ | 1571 | lpfc_param_init(name, defval, minval, maxval)\ |
@@ -1540,16 +1575,16 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ | |||
1540 | lpfc_##name##_show, lpfc_##name##_store) | 1575 | lpfc_##name##_show, lpfc_##name##_store) |
1541 | 1576 | ||
1542 | #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ | 1577 | #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ |
1543 | static int lpfc_##name = defval;\ | 1578 | static uint lpfc_##name = defval;\ |
1544 | module_param(lpfc_##name, int, 0);\ | 1579 | module_param(lpfc_##name, uint, 0);\ |
1545 | MODULE_PARM_DESC(lpfc_##name, desc);\ | 1580 | MODULE_PARM_DESC(lpfc_##name, desc);\ |
1546 | lpfc_param_hex_show(name)\ | 1581 | lpfc_param_hex_show(name)\ |
1547 | lpfc_param_init(name, defval, minval, maxval)\ | 1582 | lpfc_param_init(name, defval, minval, maxval)\ |
1548 | static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) | 1583 | static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) |
1549 | 1584 | ||
1550 | #define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ | 1585 | #define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ |
1551 | static int lpfc_##name = defval;\ | 1586 | static uint lpfc_##name = defval;\ |
1552 | module_param(lpfc_##name, int, 0);\ | 1587 | module_param(lpfc_##name, uint, 0);\ |
1553 | MODULE_PARM_DESC(lpfc_##name, desc);\ | 1588 | MODULE_PARM_DESC(lpfc_##name, desc);\ |
1554 | lpfc_param_hex_show(name)\ | 1589 | lpfc_param_hex_show(name)\ |
1555 | lpfc_param_init(name, defval, minval, maxval)\ | 1590 | lpfc_param_init(name, defval, minval, maxval)\ |
@@ -1559,22 +1594,22 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ | |||
1559 | lpfc_##name##_show, lpfc_##name##_store) | 1594 | lpfc_##name##_show, lpfc_##name##_store) |
1560 | 1595 | ||
1561 | #define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ | 1596 | #define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ |
1562 | static int lpfc_##name = defval;\ | 1597 | static uint lpfc_##name = defval;\ |
1563 | module_param(lpfc_##name, int, 0);\ | 1598 | module_param(lpfc_##name, uint, 0);\ |
1564 | MODULE_PARM_DESC(lpfc_##name, desc);\ | 1599 | MODULE_PARM_DESC(lpfc_##name, desc);\ |
1565 | lpfc_vport_param_init(name, defval, minval, maxval) | 1600 | lpfc_vport_param_init(name, defval, minval, maxval) |
1566 | 1601 | ||
1567 | #define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ | 1602 | #define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ |
1568 | static int lpfc_##name = defval;\ | 1603 | static uint lpfc_##name = defval;\ |
1569 | module_param(lpfc_##name, int, 0);\ | 1604 | module_param(lpfc_##name, uint, 0);\ |
1570 | MODULE_PARM_DESC(lpfc_##name, desc);\ | 1605 | MODULE_PARM_DESC(lpfc_##name, desc);\ |
1571 | lpfc_vport_param_show(name)\ | 1606 | lpfc_vport_param_show(name)\ |
1572 | lpfc_vport_param_init(name, defval, minval, maxval)\ | 1607 | lpfc_vport_param_init(name, defval, minval, maxval)\ |
1573 | static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) | 1608 | static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) |
1574 | 1609 | ||
1575 | #define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ | 1610 | #define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ |
1576 | static int lpfc_##name = defval;\ | 1611 | static uint lpfc_##name = defval;\ |
1577 | module_param(lpfc_##name, int, 0);\ | 1612 | module_param(lpfc_##name, uint, 0);\ |
1578 | MODULE_PARM_DESC(lpfc_##name, desc);\ | 1613 | MODULE_PARM_DESC(lpfc_##name, desc);\ |
1579 | lpfc_vport_param_show(name)\ | 1614 | lpfc_vport_param_show(name)\ |
1580 | lpfc_vport_param_init(name, defval, minval, maxval)\ | 1615 | lpfc_vport_param_init(name, defval, minval, maxval)\ |
@@ -1584,16 +1619,16 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ | |||
1584 | lpfc_##name##_show, lpfc_##name##_store) | 1619 | lpfc_##name##_show, lpfc_##name##_store) |
1585 | 1620 | ||
1586 | #define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ | 1621 | #define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ |
1587 | static int lpfc_##name = defval;\ | 1622 | static uint lpfc_##name = defval;\ |
1588 | module_param(lpfc_##name, int, 0);\ | 1623 | module_param(lpfc_##name, uint, 0);\ |
1589 | MODULE_PARM_DESC(lpfc_##name, desc);\ | 1624 | MODULE_PARM_DESC(lpfc_##name, desc);\ |
1590 | lpfc_vport_param_hex_show(name)\ | 1625 | lpfc_vport_param_hex_show(name)\ |
1591 | lpfc_vport_param_init(name, defval, minval, maxval)\ | 1626 | lpfc_vport_param_init(name, defval, minval, maxval)\ |
1592 | static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) | 1627 | static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) |
1593 | 1628 | ||
1594 | #define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ | 1629 | #define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ |
1595 | static int lpfc_##name = defval;\ | 1630 | static uint lpfc_##name = defval;\ |
1596 | module_param(lpfc_##name, int, 0);\ | 1631 | module_param(lpfc_##name, uint, 0);\ |
1597 | MODULE_PARM_DESC(lpfc_##name, desc);\ | 1632 | MODULE_PARM_DESC(lpfc_##name, desc);\ |
1598 | lpfc_vport_param_hex_show(name)\ | 1633 | lpfc_vport_param_hex_show(name)\ |
1599 | lpfc_vport_param_init(name, defval, minval, maxval)\ | 1634 | lpfc_vport_param_init(name, defval, minval, maxval)\ |
@@ -1614,7 +1649,8 @@ static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL); | |||
1614 | static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); | 1649 | static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); |
1615 | static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); | 1650 | static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); |
1616 | static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); | 1651 | static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); |
1617 | static DEVICE_ATTR(link_state, S_IRUGO, lpfc_link_state_show, NULL); | 1652 | static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show, |
1653 | lpfc_link_state_store); | ||
1618 | static DEVICE_ATTR(option_rom_version, S_IRUGO, | 1654 | static DEVICE_ATTR(option_rom_version, S_IRUGO, |
1619 | lpfc_option_rom_version_show, NULL); | 1655 | lpfc_option_rom_version_show, NULL); |
1620 | static DEVICE_ATTR(num_discovered_ports, S_IRUGO, | 1656 | static DEVICE_ATTR(num_discovered_ports, S_IRUGO, |
@@ -1897,6 +1933,15 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, | |||
1897 | lpfc_enable_npiv_show, NULL); | 1933 | lpfc_enable_npiv_show, NULL); |
1898 | 1934 | ||
1899 | /* | 1935 | /* |
1936 | # lpfc_suppress_link_up: Bring link up at initialization | ||
1937 | # 0x0 = bring link up (issue MBX_INIT_LINK) | ||
1938 | # 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK) | ||
1939 | # 0x2 = never bring up link | ||
1940 | # Default value is 0. | ||
1941 | */ | ||
1942 | LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization"); | ||
1943 | |||
1944 | /* | ||
1900 | # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear | 1945 | # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear |
1901 | # until the timer expires. Value range is [0,255]. Default value is 30. | 1946 | # until the timer expires. Value range is [0,255]. Default value is 30. |
1902 | */ | 1947 | */ |
@@ -3114,12 +3159,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, | |||
3114 | /* | 3159 | /* |
3115 | # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that | 3160 | # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that |
3116 | # support this feature | 3161 | # support this feature |
3117 | # 0 = MSI disabled (default) | 3162 | # 0 = MSI disabled |
3118 | # 1 = MSI enabled | 3163 | # 1 = MSI enabled |
3119 | # 2 = MSI-X enabled | 3164 | # 2 = MSI-X enabled (default) |
3120 | # Value range is [0,2]. Default value is 0. | 3165 | # Value range is [0,2]. Default value is 2. |
3121 | */ | 3166 | */ |
3122 | LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " | 3167 | LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " |
3123 | "MSI-X (2), if possible"); | 3168 | "MSI-X (2), if possible"); |
3124 | 3169 | ||
3125 | /* | 3170 | /* |
@@ -3278,6 +3323,7 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
3278 | &dev_attr_lpfc_prot_sg_seg_cnt, | 3323 | &dev_attr_lpfc_prot_sg_seg_cnt, |
3279 | &dev_attr_lpfc_aer_support, | 3324 | &dev_attr_lpfc_aer_support, |
3280 | &dev_attr_lpfc_aer_state_cleanup, | 3325 | &dev_attr_lpfc_aer_state_cleanup, |
3326 | &dev_attr_lpfc_suppress_link_up, | ||
3281 | NULL, | 3327 | NULL, |
3282 | }; | 3328 | }; |
3283 | 3329 | ||
@@ -4456,7 +4502,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
4456 | lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); | 4502 | lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); |
4457 | lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); | 4503 | lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); |
4458 | lpfc_aer_support_init(phba, lpfc_aer_support); | 4504 | lpfc_aer_support_init(phba, lpfc_aer_support); |
4459 | 4505 | lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); | |
4460 | return; | 4506 | return; |
4461 | } | 4507 | } |
4462 | 4508 | ||
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index a5d9048235d9..f3f1bf1a0a71 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2009-2010 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/mempool.h> | 22 | #include <linux/mempool.h> |
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/delay.h> | ||
24 | 25 | ||
25 | #include <scsi/scsi.h> | 26 | #include <scsi/scsi.h> |
26 | #include <scsi/scsi_host.h> | 27 | #include <scsi/scsi_host.h> |
@@ -33,6 +34,7 @@ | |||
33 | #include "lpfc_sli.h" | 34 | #include "lpfc_sli.h" |
34 | #include "lpfc_sli4.h" | 35 | #include "lpfc_sli4.h" |
35 | #include "lpfc_nl.h" | 36 | #include "lpfc_nl.h" |
37 | #include "lpfc_bsg.h" | ||
36 | #include "lpfc_disc.h" | 38 | #include "lpfc_disc.h" |
37 | #include "lpfc_scsi.h" | 39 | #include "lpfc_scsi.h" |
38 | #include "lpfc.h" | 40 | #include "lpfc.h" |
@@ -41,14 +43,183 @@ | |||
41 | #include "lpfc_vport.h" | 43 | #include "lpfc_vport.h" |
42 | #include "lpfc_version.h" | 44 | #include "lpfc_version.h" |
43 | 45 | ||
46 | struct lpfc_bsg_event { | ||
47 | struct list_head node; | ||
48 | struct kref kref; | ||
49 | wait_queue_head_t wq; | ||
50 | |||
51 | /* Event type and waiter identifiers */ | ||
52 | uint32_t type_mask; | ||
53 | uint32_t req_id; | ||
54 | uint32_t reg_id; | ||
55 | |||
56 | /* next two flags are here for the auto-delete logic */ | ||
57 | unsigned long wait_time_stamp; | ||
58 | int waiting; | ||
59 | |||
60 | /* seen and not seen events */ | ||
61 | struct list_head events_to_get; | ||
62 | struct list_head events_to_see; | ||
63 | |||
64 | /* job waiting for this event to finish */ | ||
65 | struct fc_bsg_job *set_job; | ||
66 | }; | ||
67 | |||
68 | struct lpfc_bsg_iocb { | ||
69 | struct lpfc_iocbq *cmdiocbq; | ||
70 | struct lpfc_iocbq *rspiocbq; | ||
71 | struct lpfc_dmabuf *bmp; | ||
72 | struct lpfc_nodelist *ndlp; | ||
73 | |||
74 | /* job waiting for this iocb to finish */ | ||
75 | struct fc_bsg_job *set_job; | ||
76 | }; | ||
77 | |||
78 | struct lpfc_bsg_mbox { | ||
79 | LPFC_MBOXQ_t *pmboxq; | ||
80 | MAILBOX_t *mb; | ||
81 | |||
82 | /* job waiting for this mbox command to finish */ | ||
83 | struct fc_bsg_job *set_job; | ||
84 | }; | ||
85 | |||
86 | #define TYPE_EVT 1 | ||
87 | #define TYPE_IOCB 2 | ||
88 | #define TYPE_MBOX 3 | ||
89 | struct bsg_job_data { | ||
90 | uint32_t type; | ||
91 | union { | ||
92 | struct lpfc_bsg_event *evt; | ||
93 | struct lpfc_bsg_iocb iocb; | ||
94 | struct lpfc_bsg_mbox mbox; | ||
95 | } context_un; | ||
96 | }; | ||
97 | |||
98 | struct event_data { | ||
99 | struct list_head node; | ||
100 | uint32_t type; | ||
101 | uint32_t immed_dat; | ||
102 | void *data; | ||
103 | uint32_t len; | ||
104 | }; | ||
105 | |||
106 | #define BUF_SZ_4K 4096 | ||
107 | #define SLI_CT_ELX_LOOPBACK 0x10 | ||
108 | |||
109 | enum ELX_LOOPBACK_CMD { | ||
110 | ELX_LOOPBACK_XRI_SETUP, | ||
111 | ELX_LOOPBACK_DATA, | ||
112 | }; | ||
113 | |||
114 | #define ELX_LOOPBACK_HEADER_SZ \ | ||
115 | (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) | ||
116 | |||
117 | struct lpfc_dmabufext { | ||
118 | struct lpfc_dmabuf dma; | ||
119 | uint32_t size; | ||
120 | uint32_t flag; | ||
121 | }; | ||
122 | |||
123 | /** | ||
124 | * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler | ||
125 | * @phba: Pointer to HBA context object. | ||
126 | * @cmdiocbq: Pointer to command iocb. | ||
127 | * @rspiocbq: Pointer to response iocb. | ||
128 | * | ||
129 | * This function is the completion handler for iocbs issued using | ||
130 | * lpfc_bsg_send_mgmt_cmd function. This function is called by the | ||
131 | * ring event handler function without any lock held. This function | ||
132 | * can be called from both worker thread context and interrupt | ||
133 | * context. This function also can be called from another thread which | ||
134 | * cleans up the SLI layer objects. | ||
135 | * This function copies the contents of the response iocb to the | ||
136 | * response iocb memory object provided by the caller of | ||
137 | * lpfc_sli_issue_iocb_wait and then wakes up the thread which | ||
138 | * sleeps for the iocb completion. | ||
139 | **/ | ||
140 | static void | ||
141 | lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, | ||
142 | struct lpfc_iocbq *cmdiocbq, | ||
143 | struct lpfc_iocbq *rspiocbq) | ||
144 | { | ||
145 | unsigned long iflags; | ||
146 | struct bsg_job_data *dd_data; | ||
147 | struct fc_bsg_job *job; | ||
148 | IOCB_t *rsp; | ||
149 | struct lpfc_dmabuf *bmp; | ||
150 | struct lpfc_nodelist *ndlp; | ||
151 | struct lpfc_bsg_iocb *iocb; | ||
152 | unsigned long flags; | ||
153 | int rc = 0; | ||
154 | |||
155 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
156 | dd_data = cmdiocbq->context1; | ||
157 | if (!dd_data) { | ||
158 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
159 | return; | ||
160 | } | ||
161 | |||
162 | iocb = &dd_data->context_un.iocb; | ||
163 | job = iocb->set_job; | ||
164 | job->dd_data = NULL; /* so timeout handler does not reply */ | ||
165 | |||
166 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
167 | cmdiocbq->iocb_flag |= LPFC_IO_WAKE; | ||
168 | if (cmdiocbq->context2 && rspiocbq) | ||
169 | memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, | ||
170 | &rspiocbq->iocb, sizeof(IOCB_t)); | ||
171 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
172 | |||
173 | bmp = iocb->bmp; | ||
174 | rspiocbq = iocb->rspiocbq; | ||
175 | rsp = &rspiocbq->iocb; | ||
176 | ndlp = iocb->ndlp; | ||
177 | |||
178 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | ||
179 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
180 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | ||
181 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
182 | |||
183 | if (rsp->ulpStatus) { | ||
184 | if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { | ||
185 | switch (rsp->un.ulpWord[4] & 0xff) { | ||
186 | case IOERR_SEQUENCE_TIMEOUT: | ||
187 | rc = -ETIMEDOUT; | ||
188 | break; | ||
189 | case IOERR_INVALID_RPI: | ||
190 | rc = -EFAULT; | ||
191 | break; | ||
192 | default: | ||
193 | rc = -EACCES; | ||
194 | break; | ||
195 | } | ||
196 | } else | ||
197 | rc = -EACCES; | ||
198 | } else | ||
199 | job->reply->reply_payload_rcv_len = | ||
200 | rsp->un.genreq64.bdl.bdeSize; | ||
201 | |||
202 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | ||
203 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
204 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
205 | lpfc_nlp_put(ndlp); | ||
206 | kfree(bmp); | ||
207 | kfree(dd_data); | ||
208 | /* make error code available to userspace */ | ||
209 | job->reply->result = rc; | ||
210 | /* complete the job back to userspace */ | ||
211 | job->job_done(job); | ||
212 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
213 | return; | ||
214 | } | ||
215 | |||
44 | /** | 216 | /** |
45 | * lpfc_bsg_rport_ct - send a CT command from a bsg request | 217 | * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request |
46 | * @job: fc_bsg_job to handle | 218 | * @job: fc_bsg_job to handle |
47 | */ | 219 | **/ |
48 | static int | 220 | static int |
49 | lpfc_bsg_rport_ct(struct fc_bsg_job *job) | 221 | lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) |
50 | { | 222 | { |
51 | struct Scsi_Host *shost = job->shost; | ||
52 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | 223 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; |
53 | struct lpfc_hba *phba = vport->phba; | 224 | struct lpfc_hba *phba = vport->phba; |
54 | struct lpfc_rport_data *rdata = job->rport->dd_data; | 225 | struct lpfc_rport_data *rdata = job->rport->dd_data; |
@@ -65,57 +236,60 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job) | |||
65 | struct scatterlist *sgel = NULL; | 236 | struct scatterlist *sgel = NULL; |
66 | int numbde; | 237 | int numbde; |
67 | dma_addr_t busaddr; | 238 | dma_addr_t busaddr; |
239 | struct bsg_job_data *dd_data; | ||
240 | uint32_t creg_val; | ||
68 | int rc = 0; | 241 | int rc = 0; |
69 | 242 | ||
70 | /* in case no data is transferred */ | 243 | /* in case no data is transferred */ |
71 | job->reply->reply_payload_rcv_len = 0; | 244 | job->reply->reply_payload_rcv_len = 0; |
72 | 245 | ||
246 | /* allocate our bsg tracking structure */ | ||
247 | dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); | ||
248 | if (!dd_data) { | ||
249 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
250 | "2733 Failed allocation of dd_data\n"); | ||
251 | rc = -ENOMEM; | ||
252 | goto no_dd_data; | ||
253 | } | ||
254 | |||
73 | if (!lpfc_nlp_get(ndlp)) { | 255 | if (!lpfc_nlp_get(ndlp)) { |
74 | job->reply->result = -ENODEV; | 256 | rc = -ENODEV; |
75 | return 0; | 257 | goto no_ndlp; |
258 | } | ||
259 | |||
260 | bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
261 | if (!bmp) { | ||
262 | rc = -ENOMEM; | ||
263 | goto free_ndlp; | ||
76 | } | 264 | } |
77 | 265 | ||
78 | if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { | 266 | if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { |
79 | rc = -ENODEV; | 267 | rc = -ENODEV; |
80 | goto free_ndlp_exit; | 268 | goto free_bmp; |
81 | } | 269 | } |
82 | 270 | ||
83 | spin_lock_irq(shost->host_lock); | ||
84 | cmdiocbq = lpfc_sli_get_iocbq(phba); | 271 | cmdiocbq = lpfc_sli_get_iocbq(phba); |
85 | if (!cmdiocbq) { | 272 | if (!cmdiocbq) { |
86 | rc = -ENOMEM; | 273 | rc = -ENOMEM; |
87 | spin_unlock_irq(shost->host_lock); | 274 | goto free_bmp; |
88 | goto free_ndlp_exit; | ||
89 | } | 275 | } |
90 | cmd = &cmdiocbq->iocb; | ||
91 | 276 | ||
277 | cmd = &cmdiocbq->iocb; | ||
92 | rspiocbq = lpfc_sli_get_iocbq(phba); | 278 | rspiocbq = lpfc_sli_get_iocbq(phba); |
93 | if (!rspiocbq) { | 279 | if (!rspiocbq) { |
94 | rc = -ENOMEM; | 280 | rc = -ENOMEM; |
95 | goto free_cmdiocbq; | 281 | goto free_cmdiocbq; |
96 | } | 282 | } |
97 | spin_unlock_irq(shost->host_lock); | ||
98 | 283 | ||
99 | rsp = &rspiocbq->iocb; | 284 | rsp = &rspiocbq->iocb; |
100 | |||
101 | bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
102 | if (!bmp) { | ||
103 | rc = -ENOMEM; | ||
104 | spin_lock_irq(shost->host_lock); | ||
105 | goto free_rspiocbq; | ||
106 | } | ||
107 | |||
108 | spin_lock_irq(shost->host_lock); | ||
109 | bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); | 285 | bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); |
110 | if (!bmp->virt) { | 286 | if (!bmp->virt) { |
111 | rc = -ENOMEM; | 287 | rc = -ENOMEM; |
112 | goto free_bmp; | 288 | goto free_rspiocbq; |
113 | } | 289 | } |
114 | spin_unlock_irq(shost->host_lock); | ||
115 | 290 | ||
116 | INIT_LIST_HEAD(&bmp->list); | 291 | INIT_LIST_HEAD(&bmp->list); |
117 | bpl = (struct ulp_bde64 *) bmp->virt; | 292 | bpl = (struct ulp_bde64 *) bmp->virt; |
118 | |||
119 | request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, | 293 | request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, |
120 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | 294 | job->request_payload.sg_cnt, DMA_TO_DEVICE); |
121 | for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { | 295 | for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { |
@@ -157,78 +331,152 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job) | |||
157 | cmd->ulpContext = ndlp->nlp_rpi; | 331 | cmd->ulpContext = ndlp->nlp_rpi; |
158 | cmd->ulpOwner = OWN_CHIP; | 332 | cmd->ulpOwner = OWN_CHIP; |
159 | cmdiocbq->vport = phba->pport; | 333 | cmdiocbq->vport = phba->pport; |
160 | cmdiocbq->context1 = NULL; | 334 | cmdiocbq->context3 = bmp; |
161 | cmdiocbq->context2 = NULL; | ||
162 | cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; | 335 | cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; |
163 | |||
164 | timeout = phba->fc_ratov * 2; | 336 | timeout = phba->fc_ratov * 2; |
165 | job->dd_data = cmdiocbq; | 337 | cmd->ulpTimeout = timeout; |
166 | 338 | ||
167 | rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, | 339 | cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; |
168 | timeout + LPFC_DRVR_TIMEOUT); | 340 | cmdiocbq->context1 = dd_data; |
169 | 341 | cmdiocbq->context2 = rspiocbq; | |
170 | if (rc != IOCB_TIMEDOUT) { | 342 | dd_data->type = TYPE_IOCB; |
171 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | 343 | dd_data->context_un.iocb.cmdiocbq = cmdiocbq; |
172 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | 344 | dd_data->context_un.iocb.rspiocbq = rspiocbq; |
173 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | 345 | dd_data->context_un.iocb.set_job = job; |
174 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | 346 | dd_data->context_un.iocb.bmp = bmp; |
347 | dd_data->context_un.iocb.ndlp = ndlp; | ||
348 | |||
349 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | ||
350 | creg_val = readl(phba->HCregaddr); | ||
351 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | ||
352 | writel(creg_val, phba->HCregaddr); | ||
353 | readl(phba->HCregaddr); /* flush */ | ||
175 | } | 354 | } |
176 | 355 | ||
177 | if (rc == IOCB_TIMEDOUT) { | 356 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); |
178 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
179 | rc = -EACCES; | ||
180 | goto free_ndlp_exit; | ||
181 | } | ||
182 | 357 | ||
183 | if (rc != IOCB_SUCCESS) { | 358 | if (rc == IOCB_SUCCESS) |
184 | rc = -EACCES; | 359 | return 0; /* done for now */ |
185 | goto free_outdmp; | ||
186 | } | ||
187 | 360 | ||
188 | if (rsp->ulpStatus) { | 361 | /* iocb failed so cleanup */ |
189 | if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { | 362 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, |
190 | switch (rsp->un.ulpWord[4] & 0xff) { | 363 | job->request_payload.sg_cnt, DMA_TO_DEVICE); |
191 | case IOERR_SEQUENCE_TIMEOUT: | 364 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, |
192 | rc = -ETIMEDOUT; | 365 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); |
193 | break; | ||
194 | case IOERR_INVALID_RPI: | ||
195 | rc = -EFAULT; | ||
196 | break; | ||
197 | default: | ||
198 | rc = -EACCES; | ||
199 | break; | ||
200 | } | ||
201 | goto free_outdmp; | ||
202 | } | ||
203 | } else | ||
204 | job->reply->reply_payload_rcv_len = | ||
205 | rsp->un.genreq64.bdl.bdeSize; | ||
206 | 366 | ||
207 | free_outdmp: | ||
208 | spin_lock_irq(shost->host_lock); | ||
209 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | 367 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); |
210 | free_bmp: | 368 | |
211 | kfree(bmp); | ||
212 | free_rspiocbq: | 369 | free_rspiocbq: |
213 | lpfc_sli_release_iocbq(phba, rspiocbq); | 370 | lpfc_sli_release_iocbq(phba, rspiocbq); |
214 | free_cmdiocbq: | 371 | free_cmdiocbq: |
215 | lpfc_sli_release_iocbq(phba, cmdiocbq); | 372 | lpfc_sli_release_iocbq(phba, cmdiocbq); |
216 | spin_unlock_irq(shost->host_lock); | 373 | free_bmp: |
217 | free_ndlp_exit: | 374 | kfree(bmp); |
375 | free_ndlp: | ||
218 | lpfc_nlp_put(ndlp); | 376 | lpfc_nlp_put(ndlp); |
377 | no_ndlp: | ||
378 | kfree(dd_data); | ||
379 | no_dd_data: | ||
380 | /* make error code available to userspace */ | ||
381 | job->reply->result = rc; | ||
382 | job->dd_data = NULL; | ||
383 | return rc; | ||
384 | } | ||
385 | |||
386 | /** | ||
387 | * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler | ||
388 | * @phba: Pointer to HBA context object. | ||
389 | * @cmdiocbq: Pointer to command iocb. | ||
390 | * @rspiocbq: Pointer to response iocb. | ||
391 | * | ||
392 | * This function is the completion handler for iocbs issued using | ||
393 | * lpfc_bsg_rport_els_cmp function. This function is called by the | ||
394 | * ring event handler function without any lock held. This function | ||
395 | * can be called from both worker thread context and interrupt | ||
396 | * context. This function also can be called from other thread which | ||
397 | * cleans up the SLI layer objects. | ||
398 | * This function copies the contents of the response iocb to the | ||
399 | * response iocb memory object provided by the caller of | ||
400 | * lpfc_sli_issue_iocb_wait and then wakes up the thread which | ||
401 | * sleeps for the iocb completion. | ||
402 | **/ | ||
403 | static void | ||
404 | lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, | ||
405 | struct lpfc_iocbq *cmdiocbq, | ||
406 | struct lpfc_iocbq *rspiocbq) | ||
407 | { | ||
408 | struct bsg_job_data *dd_data; | ||
409 | struct fc_bsg_job *job; | ||
410 | IOCB_t *rsp; | ||
411 | struct lpfc_nodelist *ndlp; | ||
412 | struct lpfc_dmabuf *pbuflist = NULL; | ||
413 | struct fc_bsg_ctels_reply *els_reply; | ||
414 | uint8_t *rjt_data; | ||
415 | unsigned long flags; | ||
416 | int rc = 0; | ||
417 | |||
418 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
419 | dd_data = cmdiocbq->context1; | ||
420 | /* normal completion and timeout crossed paths, already done */ | ||
421 | if (!dd_data) { | ||
422 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
423 | return; | ||
424 | } | ||
425 | |||
426 | cmdiocbq->iocb_flag |= LPFC_IO_WAKE; | ||
427 | if (cmdiocbq->context2 && rspiocbq) | ||
428 | memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, | ||
429 | &rspiocbq->iocb, sizeof(IOCB_t)); | ||
430 | |||
431 | job = dd_data->context_un.iocb.set_job; | ||
432 | cmdiocbq = dd_data->context_un.iocb.cmdiocbq; | ||
433 | rspiocbq = dd_data->context_un.iocb.rspiocbq; | ||
434 | rsp = &rspiocbq->iocb; | ||
435 | ndlp = dd_data->context_un.iocb.ndlp; | ||
436 | |||
437 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | ||
438 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
439 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | ||
440 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
219 | 441 | ||
442 | if (job->reply->result == -EAGAIN) | ||
443 | rc = -EAGAIN; | ||
444 | else if (rsp->ulpStatus == IOSTAT_SUCCESS) | ||
445 | job->reply->reply_payload_rcv_len = | ||
446 | rsp->un.elsreq64.bdl.bdeSize; | ||
447 | else if (rsp->ulpStatus == IOSTAT_LS_RJT) { | ||
448 | job->reply->reply_payload_rcv_len = | ||
449 | sizeof(struct fc_bsg_ctels_reply); | ||
450 | /* LS_RJT data returned in word 4 */ | ||
451 | rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; | ||
452 | els_reply = &job->reply->reply_data.ctels_reply; | ||
453 | els_reply->status = FC_CTELS_STATUS_REJECT; | ||
454 | els_reply->rjt_data.action = rjt_data[3]; | ||
455 | els_reply->rjt_data.reason_code = rjt_data[2]; | ||
456 | els_reply->rjt_data.reason_explanation = rjt_data[1]; | ||
457 | els_reply->rjt_data.vendor_unique = rjt_data[0]; | ||
458 | } else | ||
459 | rc = -EIO; | ||
460 | |||
461 | pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; | ||
462 | lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); | ||
463 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
464 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
465 | lpfc_nlp_put(ndlp); | ||
466 | kfree(dd_data); | ||
220 | /* make error code available to userspace */ | 467 | /* make error code available to userspace */ |
221 | job->reply->result = rc; | 468 | job->reply->result = rc; |
469 | job->dd_data = NULL; | ||
222 | /* complete the job back to userspace */ | 470 | /* complete the job back to userspace */ |
223 | job->job_done(job); | 471 | job->job_done(job); |
224 | 472 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | |
225 | return 0; | 473 | return; |
226 | } | 474 | } |
227 | 475 | ||
228 | /** | 476 | /** |
229 | * lpfc_bsg_rport_els - send an ELS command from a bsg request | 477 | * lpfc_bsg_rport_els - send an ELS command from a bsg request |
230 | * @job: fc_bsg_job to handle | 478 | * @job: fc_bsg_job to handle |
231 | */ | 479 | **/ |
232 | static int | 480 | static int |
233 | lpfc_bsg_rport_els(struct fc_bsg_job *job) | 481 | lpfc_bsg_rport_els(struct fc_bsg_job *job) |
234 | { | 482 | { |
@@ -236,7 +484,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
236 | struct lpfc_hba *phba = vport->phba; | 484 | struct lpfc_hba *phba = vport->phba; |
237 | struct lpfc_rport_data *rdata = job->rport->dd_data; | 485 | struct lpfc_rport_data *rdata = job->rport->dd_data; |
238 | struct lpfc_nodelist *ndlp = rdata->pnode; | 486 | struct lpfc_nodelist *ndlp = rdata->pnode; |
239 | |||
240 | uint32_t elscmd; | 487 | uint32_t elscmd; |
241 | uint32_t cmdsize; | 488 | uint32_t cmdsize; |
242 | uint32_t rspsize; | 489 | uint32_t rspsize; |
@@ -248,20 +495,30 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
248 | struct lpfc_dmabuf *prsp; | 495 | struct lpfc_dmabuf *prsp; |
249 | struct lpfc_dmabuf *pbuflist = NULL; | 496 | struct lpfc_dmabuf *pbuflist = NULL; |
250 | struct ulp_bde64 *bpl; | 497 | struct ulp_bde64 *bpl; |
251 | int iocb_status; | ||
252 | int request_nseg; | 498 | int request_nseg; |
253 | int reply_nseg; | 499 | int reply_nseg; |
254 | struct scatterlist *sgel = NULL; | 500 | struct scatterlist *sgel = NULL; |
255 | int numbde; | 501 | int numbde; |
256 | dma_addr_t busaddr; | 502 | dma_addr_t busaddr; |
503 | struct bsg_job_data *dd_data; | ||
504 | uint32_t creg_val; | ||
257 | int rc = 0; | 505 | int rc = 0; |
258 | 506 | ||
259 | /* in case no data is transferred */ | 507 | /* in case no data is transferred */ |
260 | job->reply->reply_payload_rcv_len = 0; | 508 | job->reply->reply_payload_rcv_len = 0; |
261 | 509 | ||
510 | /* allocate our bsg tracking structure */ | ||
511 | dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); | ||
512 | if (!dd_data) { | ||
513 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
514 | "2735 Failed allocation of dd_data\n"); | ||
515 | rc = -ENOMEM; | ||
516 | goto no_dd_data; | ||
517 | } | ||
518 | |||
262 | if (!lpfc_nlp_get(ndlp)) { | 519 | if (!lpfc_nlp_get(ndlp)) { |
263 | rc = -ENODEV; | 520 | rc = -ENODEV; |
264 | goto out; | 521 | goto free_dd_data; |
265 | } | 522 | } |
266 | 523 | ||
267 | elscmd = job->request->rqst_data.r_els.els_code; | 524 | elscmd = job->request->rqst_data.r_els.els_code; |
@@ -271,24 +528,24 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
271 | if (!rspiocbq) { | 528 | if (!rspiocbq) { |
272 | lpfc_nlp_put(ndlp); | 529 | lpfc_nlp_put(ndlp); |
273 | rc = -ENOMEM; | 530 | rc = -ENOMEM; |
274 | goto out; | 531 | goto free_dd_data; |
275 | } | 532 | } |
276 | 533 | ||
277 | rsp = &rspiocbq->iocb; | 534 | rsp = &rspiocbq->iocb; |
278 | rpi = ndlp->nlp_rpi; | 535 | rpi = ndlp->nlp_rpi; |
279 | 536 | ||
280 | cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp, | 537 | cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, |
281 | ndlp->nlp_DID, elscmd); | 538 | ndlp->nlp_DID, elscmd); |
282 | |||
283 | if (!cmdiocbq) { | 539 | if (!cmdiocbq) { |
284 | lpfc_sli_release_iocbq(phba, rspiocbq); | 540 | rc = -EIO; |
285 | return -EIO; | 541 | goto free_rspiocbq; |
286 | } | 542 | } |
287 | 543 | ||
288 | job->dd_data = cmdiocbq; | 544 | /* prep els iocb set context1 to the ndlp, context2 to the command |
545 | * dmabuf, context3 holds the data dmabuf | ||
546 | */ | ||
289 | pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; | 547 | pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; |
290 | prsp = (struct lpfc_dmabuf *) pcmd->list.next; | 548 | prsp = (struct lpfc_dmabuf *) pcmd->list.next; |
291 | |||
292 | lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); | 549 | lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); |
293 | kfree(pcmd); | 550 | kfree(pcmd); |
294 | lpfc_mbuf_free(phba, prsp->virt, prsp->phys); | 551 | lpfc_mbuf_free(phba, prsp->virt, prsp->phys); |
@@ -300,7 +557,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
300 | 557 | ||
301 | request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, | 558 | request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, |
302 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | 559 | job->request_payload.sg_cnt, DMA_TO_DEVICE); |
303 | |||
304 | for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { | 560 | for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { |
305 | busaddr = sg_dma_address(sgel); | 561 | busaddr = sg_dma_address(sgel); |
306 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | 562 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
@@ -322,7 +578,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
322 | bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); | 578 | bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); |
323 | bpl++; | 579 | bpl++; |
324 | } | 580 | } |
325 | |||
326 | cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = | 581 | cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = |
327 | (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); | 582 | (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); |
328 | cmdiocbq->iocb.ulpContext = rpi; | 583 | cmdiocbq->iocb.ulpContext = rpi; |
@@ -330,102 +585,62 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
330 | cmdiocbq->context1 = NULL; | 585 | cmdiocbq->context1 = NULL; |
331 | cmdiocbq->context2 = NULL; | 586 | cmdiocbq->context2 = NULL; |
332 | 587 | ||
333 | iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, | 588 | cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; |
334 | rspiocbq, (phba->fc_ratov * 2) | 589 | cmdiocbq->context1 = dd_data; |
335 | + LPFC_DRVR_TIMEOUT); | 590 | cmdiocbq->context2 = rspiocbq; |
336 | 591 | dd_data->type = TYPE_IOCB; | |
337 | /* release the new ndlp once the iocb completes */ | 592 | dd_data->context_un.iocb.cmdiocbq = cmdiocbq; |
338 | lpfc_nlp_put(ndlp); | 593 | dd_data->context_un.iocb.rspiocbq = rspiocbq; |
339 | if (iocb_status != IOCB_TIMEDOUT) { | 594 | dd_data->context_un.iocb.set_job = job; |
340 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | 595 | dd_data->context_un.iocb.bmp = NULL;; |
341 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | 596 | dd_data->context_un.iocb.ndlp = ndlp; |
342 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | 597 | |
343 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | 598 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
599 | creg_val = readl(phba->HCregaddr); | ||
600 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | ||
601 | writel(creg_val, phba->HCregaddr); | ||
602 | readl(phba->HCregaddr); /* flush */ | ||
344 | } | 603 | } |
604 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); | ||
605 | lpfc_nlp_put(ndlp); | ||
606 | if (rc == IOCB_SUCCESS) | ||
607 | return 0; /* done for now */ | ||
345 | 608 | ||
346 | if (iocb_status == IOCB_SUCCESS) { | 609 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, |
347 | if (rsp->ulpStatus == IOSTAT_SUCCESS) { | 610 | job->request_payload.sg_cnt, DMA_TO_DEVICE); |
348 | job->reply->reply_payload_rcv_len = | 611 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, |
349 | rsp->un.elsreq64.bdl.bdeSize; | 612 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); |
350 | rc = 0; | 613 | |
351 | } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { | 614 | lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); |
352 | struct fc_bsg_ctels_reply *els_reply; | ||
353 | /* LS_RJT data returned in word 4 */ | ||
354 | uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; | ||
355 | |||
356 | els_reply = &job->reply->reply_data.ctels_reply; | ||
357 | job->reply->result = 0; | ||
358 | els_reply->status = FC_CTELS_STATUS_REJECT; | ||
359 | els_reply->rjt_data.action = rjt_data[0]; | ||
360 | els_reply->rjt_data.reason_code = rjt_data[1]; | ||
361 | els_reply->rjt_data.reason_explanation = rjt_data[2]; | ||
362 | els_reply->rjt_data.vendor_unique = rjt_data[3]; | ||
363 | } else | ||
364 | rc = -EIO; | ||
365 | } else | ||
366 | rc = -EIO; | ||
367 | 615 | ||
368 | if (iocb_status != IOCB_TIMEDOUT) | 616 | lpfc_sli_release_iocbq(phba, cmdiocbq); |
369 | lpfc_els_free_iocb(phba, cmdiocbq); | ||
370 | 617 | ||
618 | free_rspiocbq: | ||
371 | lpfc_sli_release_iocbq(phba, rspiocbq); | 619 | lpfc_sli_release_iocbq(phba, rspiocbq); |
372 | 620 | ||
373 | out: | 621 | free_dd_data: |
622 | kfree(dd_data); | ||
623 | |||
624 | no_dd_data: | ||
374 | /* make error code available to userspace */ | 625 | /* make error code available to userspace */ |
375 | job->reply->result = rc; | 626 | job->reply->result = rc; |
376 | /* complete the job back to userspace */ | 627 | job->dd_data = NULL; |
377 | job->job_done(job); | 628 | return rc; |
378 | |||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | struct lpfc_ct_event { | ||
383 | struct list_head node; | ||
384 | int ref; | ||
385 | wait_queue_head_t wq; | ||
386 | |||
387 | /* Event type and waiter identifiers */ | ||
388 | uint32_t type_mask; | ||
389 | uint32_t req_id; | ||
390 | uint32_t reg_id; | ||
391 | |||
392 | /* next two flags are here for the auto-delete logic */ | ||
393 | unsigned long wait_time_stamp; | ||
394 | int waiting; | ||
395 | |||
396 | /* seen and not seen events */ | ||
397 | struct list_head events_to_get; | ||
398 | struct list_head events_to_see; | ||
399 | }; | ||
400 | |||
401 | struct event_data { | ||
402 | struct list_head node; | ||
403 | uint32_t type; | ||
404 | uint32_t immed_dat; | ||
405 | void *data; | ||
406 | uint32_t len; | ||
407 | }; | ||
408 | |||
409 | static struct lpfc_ct_event * | ||
410 | lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id) | ||
411 | { | ||
412 | struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); | ||
413 | if (!evt) | ||
414 | return NULL; | ||
415 | |||
416 | INIT_LIST_HEAD(&evt->events_to_get); | ||
417 | INIT_LIST_HEAD(&evt->events_to_see); | ||
418 | evt->req_id = ev_req_id; | ||
419 | evt->reg_id = ev_reg_id; | ||
420 | evt->wait_time_stamp = jiffies; | ||
421 | init_waitqueue_head(&evt->wq); | ||
422 | |||
423 | return evt; | ||
424 | } | 629 | } |
425 | 630 | ||
631 | /** | ||
632 | * lpfc_bsg_event_free - frees an allocated event structure | ||
633 | * @kref: Pointer to a kref. | ||
634 | * | ||
635 | * Called from kref_put. Back cast the kref into an event structure address. | ||
636 | * Free any events to get, delete associated nodes, free any events to see, | ||
637 | * free any data then free the event itself. | ||
638 | **/ | ||
426 | static void | 639 | static void |
427 | lpfc_ct_event_free(struct lpfc_ct_event *evt) | 640 | lpfc_bsg_event_free(struct kref *kref) |
428 | { | 641 | { |
642 | struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, | ||
643 | kref); | ||
429 | struct event_data *ed; | 644 | struct event_data *ed; |
430 | 645 | ||
431 | list_del(&evt->node); | 646 | list_del(&evt->node); |
@@ -447,25 +662,82 @@ lpfc_ct_event_free(struct lpfc_ct_event *evt) | |||
447 | kfree(evt); | 662 | kfree(evt); |
448 | } | 663 | } |
449 | 664 | ||
665 | /** | ||
666 | * lpfc_bsg_event_ref - increments the kref for an event | ||
667 | * @evt: Pointer to an event structure. | ||
668 | **/ | ||
450 | static inline void | 669 | static inline void |
451 | lpfc_ct_event_ref(struct lpfc_ct_event *evt) | 670 | lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) |
452 | { | 671 | { |
453 | evt->ref++; | 672 | kref_get(&evt->kref); |
454 | } | 673 | } |
455 | 674 | ||
675 | /** | ||
676 | * lpfc_bsg_event_unref - Uses kref_put to free an event structure | ||
677 | * @evt: Pointer to an event structure. | ||
678 | **/ | ||
456 | static inline void | 679 | static inline void |
457 | lpfc_ct_event_unref(struct lpfc_ct_event *evt) | 680 | lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) |
458 | { | 681 | { |
459 | if (--evt->ref < 0) | 682 | kref_put(&evt->kref, lpfc_bsg_event_free); |
460 | lpfc_ct_event_free(evt); | ||
461 | } | 683 | } |
462 | 684 | ||
463 | #define SLI_CT_ELX_LOOPBACK 0x10 | 685 | /** |
686 | * lpfc_bsg_event_new - allocate and initialize a event structure | ||
687 | * @ev_mask: Mask of events. | ||
688 | * @ev_reg_id: Event reg id. | ||
689 | * @ev_req_id: Event request id. | ||
690 | **/ | ||
691 | static struct lpfc_bsg_event * | ||
692 | lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) | ||
693 | { | ||
694 | struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); | ||
464 | 695 | ||
465 | enum ELX_LOOPBACK_CMD { | 696 | if (!evt) |
466 | ELX_LOOPBACK_XRI_SETUP, | 697 | return NULL; |
467 | ELX_LOOPBACK_DATA, | 698 | |
468 | }; | 699 | INIT_LIST_HEAD(&evt->events_to_get); |
700 | INIT_LIST_HEAD(&evt->events_to_see); | ||
701 | evt->type_mask = ev_mask; | ||
702 | evt->req_id = ev_req_id; | ||
703 | evt->reg_id = ev_reg_id; | ||
704 | evt->wait_time_stamp = jiffies; | ||
705 | init_waitqueue_head(&evt->wq); | ||
706 | kref_init(&evt->kref); | ||
707 | return evt; | ||
708 | } | ||
709 | |||
710 | /** | ||
711 | * diag_cmd_data_free - Frees an lpfc dma buffer extension | ||
712 | * @phba: Pointer to HBA context object. | ||
713 | * @mlist: Pointer to an lpfc dma buffer extension. | ||
714 | **/ | ||
715 | static int | ||
716 | diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) | ||
717 | { | ||
718 | struct lpfc_dmabufext *mlast; | ||
719 | struct pci_dev *pcidev; | ||
720 | struct list_head head, *curr, *next; | ||
721 | |||
722 | if ((!mlist) || (!lpfc_is_link_up(phba) && | ||
723 | (phba->link_flag & LS_LOOPBACK_MODE))) { | ||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | pcidev = phba->pcidev; | ||
728 | list_add_tail(&head, &mlist->dma.list); | ||
729 | |||
730 | list_for_each_safe(curr, next, &head) { | ||
731 | mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); | ||
732 | if (mlast->dma.virt) | ||
733 | dma_free_coherent(&pcidev->dev, | ||
734 | mlast->size, | ||
735 | mlast->dma.virt, | ||
736 | mlast->dma.phys); | ||
737 | kfree(mlast); | ||
738 | } | ||
739 | return 0; | ||
740 | } | ||
469 | 741 | ||
470 | /** | 742 | /** |
471 | * lpfc_bsg_ct_unsol_event - process an unsolicited CT command | 743 | * lpfc_bsg_ct_unsol_event - process an unsolicited CT command |
@@ -474,9 +746,9 @@ enum ELX_LOOPBACK_CMD { | |||
474 | * @piocbq: | 746 | * @piocbq: |
475 | * | 747 | * |
476 | * This function is called when an unsolicited CT command is received. It | 748 | * This function is called when an unsolicited CT command is received. It |
477 | * forwards the event to any processes registerd to receive CT events. | 749 | * forwards the event to any processes registered to receive CT events. |
478 | */ | 750 | **/ |
479 | void | 751 | int |
480 | lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 752 | lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
481 | struct lpfc_iocbq *piocbq) | 753 | struct lpfc_iocbq *piocbq) |
482 | { | 754 | { |
@@ -484,7 +756,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
484 | uint32_t cmd; | 756 | uint32_t cmd; |
485 | uint32_t len; | 757 | uint32_t len; |
486 | struct lpfc_dmabuf *dmabuf = NULL; | 758 | struct lpfc_dmabuf *dmabuf = NULL; |
487 | struct lpfc_ct_event *evt; | 759 | struct lpfc_bsg_event *evt; |
488 | struct event_data *evt_dat = NULL; | 760 | struct event_data *evt_dat = NULL; |
489 | struct lpfc_iocbq *iocbq; | 761 | struct lpfc_iocbq *iocbq; |
490 | size_t offset = 0; | 762 | size_t offset = 0; |
@@ -496,6 +768,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
496 | struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; | 768 | struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; |
497 | struct lpfc_hbq_entry *hbqe; | 769 | struct lpfc_hbq_entry *hbqe; |
498 | struct lpfc_sli_ct_request *ct_req; | 770 | struct lpfc_sli_ct_request *ct_req; |
771 | struct fc_bsg_job *job = NULL; | ||
772 | unsigned long flags; | ||
773 | int size = 0; | ||
499 | 774 | ||
500 | INIT_LIST_HEAD(&head); | 775 | INIT_LIST_HEAD(&head); |
501 | list_add_tail(&head, &piocbq->list); | 776 | list_add_tail(&head, &piocbq->list); |
@@ -504,6 +779,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
504 | piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) | 779 | piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) |
505 | goto error_ct_unsol_exit; | 780 | goto error_ct_unsol_exit; |
506 | 781 | ||
782 | if (phba->link_state == LPFC_HBA_ERROR || | ||
783 | (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) | ||
784 | goto error_ct_unsol_exit; | ||
785 | |||
507 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) | 786 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) |
508 | dmabuf = bdeBuf1; | 787 | dmabuf = bdeBuf1; |
509 | else { | 788 | else { |
@@ -511,7 +790,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
511 | piocbq->iocb.un.cont64[0].addrLow); | 790 | piocbq->iocb.un.cont64[0].addrLow); |
512 | dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); | 791 | dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); |
513 | } | 792 | } |
514 | 793 | if (dmabuf == NULL) | |
794 | goto error_ct_unsol_exit; | ||
515 | ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; | 795 | ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; |
516 | evt_req_id = ct_req->FsType; | 796 | evt_req_id = ct_req->FsType; |
517 | cmd = ct_req->CommandResponse.bits.CmdRsp; | 797 | cmd = ct_req->CommandResponse.bits.CmdRsp; |
@@ -519,24 +799,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
519 | if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) | 799 | if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) |
520 | lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); | 800 | lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); |
521 | 801 | ||
522 | mutex_lock(&phba->ct_event_mutex); | 802 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
523 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { | 803 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { |
524 | if (evt->req_id != evt_req_id) | 804 | if (!(evt->type_mask & FC_REG_CT_EVENT) || |
805 | evt->req_id != evt_req_id) | ||
525 | continue; | 806 | continue; |
526 | 807 | ||
527 | lpfc_ct_event_ref(evt); | 808 | lpfc_bsg_event_ref(evt); |
528 | 809 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | |
529 | evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); | 810 | evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); |
530 | if (!evt_dat) { | 811 | if (evt_dat == NULL) { |
531 | lpfc_ct_event_unref(evt); | 812 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
813 | lpfc_bsg_event_unref(evt); | ||
532 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | 814 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, |
533 | "2614 Memory allocation failed for " | 815 | "2614 Memory allocation failed for " |
534 | "CT event\n"); | 816 | "CT event\n"); |
535 | break; | 817 | break; |
536 | } | 818 | } |
537 | 819 | ||
538 | mutex_unlock(&phba->ct_event_mutex); | ||
539 | |||
540 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | 820 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
541 | /* take accumulated byte count from the last iocbq */ | 821 | /* take accumulated byte count from the last iocbq */ |
542 | iocbq = list_entry(head.prev, typeof(*iocbq), list); | 822 | iocbq = list_entry(head.prev, typeof(*iocbq), list); |
@@ -550,25 +830,25 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
550 | } | 830 | } |
551 | 831 | ||
552 | evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); | 832 | evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); |
553 | if (!evt_dat->data) { | 833 | if (evt_dat->data == NULL) { |
554 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | 834 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, |
555 | "2615 Memory allocation failed for " | 835 | "2615 Memory allocation failed for " |
556 | "CT event data, size %d\n", | 836 | "CT event data, size %d\n", |
557 | evt_dat->len); | 837 | evt_dat->len); |
558 | kfree(evt_dat); | 838 | kfree(evt_dat); |
559 | mutex_lock(&phba->ct_event_mutex); | 839 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
560 | lpfc_ct_event_unref(evt); | 840 | lpfc_bsg_event_unref(evt); |
561 | mutex_unlock(&phba->ct_event_mutex); | 841 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
562 | goto error_ct_unsol_exit; | 842 | goto error_ct_unsol_exit; |
563 | } | 843 | } |
564 | 844 | ||
565 | list_for_each_entry(iocbq, &head, list) { | 845 | list_for_each_entry(iocbq, &head, list) { |
846 | size = 0; | ||
566 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | 847 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
567 | bdeBuf1 = iocbq->context2; | 848 | bdeBuf1 = iocbq->context2; |
568 | bdeBuf2 = iocbq->context3; | 849 | bdeBuf2 = iocbq->context3; |
569 | } | 850 | } |
570 | for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { | 851 | for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { |
571 | int size = 0; | ||
572 | if (phba->sli3_options & | 852 | if (phba->sli3_options & |
573 | LPFC_SLI3_HBQ_ENABLED) { | 853 | LPFC_SLI3_HBQ_ENABLED) { |
574 | if (i == 0) { | 854 | if (i == 0) { |
@@ -601,9 +881,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
601 | iocbq); | 881 | iocbq); |
602 | kfree(evt_dat->data); | 882 | kfree(evt_dat->data); |
603 | kfree(evt_dat); | 883 | kfree(evt_dat); |
604 | mutex_lock(&phba->ct_event_mutex); | 884 | spin_lock_irqsave(&phba->ct_ev_lock, |
605 | lpfc_ct_event_unref(evt); | 885 | flags); |
606 | mutex_unlock(&phba->ct_event_mutex); | 886 | lpfc_bsg_event_unref(evt); |
887 | spin_unlock_irqrestore( | ||
888 | &phba->ct_ev_lock, flags); | ||
607 | goto error_ct_unsol_exit; | 889 | goto error_ct_unsol_exit; |
608 | } | 890 | } |
609 | memcpy((char *)(evt_dat->data) + offset, | 891 | memcpy((char *)(evt_dat->data) + offset, |
@@ -616,15 +898,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
616 | dmabuf); | 898 | dmabuf); |
617 | } else { | 899 | } else { |
618 | switch (cmd) { | 900 | switch (cmd) { |
901 | case ELX_LOOPBACK_DATA: | ||
902 | diag_cmd_data_free(phba, | ||
903 | (struct lpfc_dmabufext *) | ||
904 | dmabuf); | ||
905 | break; | ||
619 | case ELX_LOOPBACK_XRI_SETUP: | 906 | case ELX_LOOPBACK_XRI_SETUP: |
620 | if (!(phba->sli3_options & | 907 | if ((phba->sli_rev == |
621 | LPFC_SLI3_HBQ_ENABLED)) | 908 | LPFC_SLI_REV2) || |
909 | (phba->sli3_options & | ||
910 | LPFC_SLI3_HBQ_ENABLED | ||
911 | )) { | ||
912 | lpfc_in_buf_free(phba, | ||
913 | dmabuf); | ||
914 | } else { | ||
622 | lpfc_post_buffer(phba, | 915 | lpfc_post_buffer(phba, |
623 | pring, | 916 | pring, |
624 | 1); | 917 | 1); |
625 | else | 918 | } |
626 | lpfc_in_buf_free(phba, | ||
627 | dmabuf); | ||
628 | break; | 919 | break; |
629 | default: | 920 | default: |
630 | if (!(phba->sli3_options & | 921 | if (!(phba->sli3_options & |
@@ -638,7 +929,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
638 | } | 929 | } |
639 | } | 930 | } |
640 | 931 | ||
641 | mutex_lock(&phba->ct_event_mutex); | 932 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
642 | if (phba->sli_rev == LPFC_SLI_REV4) { | 933 | if (phba->sli_rev == LPFC_SLI_REV4) { |
643 | evt_dat->immed_dat = phba->ctx_idx; | 934 | evt_dat->immed_dat = phba->ctx_idx; |
644 | phba->ctx_idx = (phba->ctx_idx + 1) % 64; | 935 | phba->ctx_idx = (phba->ctx_idx + 1) % 64; |
@@ -651,122 +942,144 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
651 | 942 | ||
652 | evt_dat->type = FC_REG_CT_EVENT; | 943 | evt_dat->type = FC_REG_CT_EVENT; |
653 | list_add(&evt_dat->node, &evt->events_to_see); | 944 | list_add(&evt_dat->node, &evt->events_to_see); |
654 | wake_up_interruptible(&evt->wq); | 945 | if (evt_req_id == SLI_CT_ELX_LOOPBACK) { |
655 | lpfc_ct_event_unref(evt); | 946 | wake_up_interruptible(&evt->wq); |
656 | if (evt_req_id == SLI_CT_ELX_LOOPBACK) | 947 | lpfc_bsg_event_unref(evt); |
657 | break; | 948 | break; |
949 | } | ||
950 | |||
951 | list_move(evt->events_to_see.prev, &evt->events_to_get); | ||
952 | lpfc_bsg_event_unref(evt); | ||
953 | |||
954 | job = evt->set_job; | ||
955 | evt->set_job = NULL; | ||
956 | if (job) { | ||
957 | job->reply->reply_payload_rcv_len = size; | ||
958 | /* make error code available to userspace */ | ||
959 | job->reply->result = 0; | ||
960 | job->dd_data = NULL; | ||
961 | /* complete the job back to userspace */ | ||
962 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
963 | job->job_done(job); | ||
964 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
965 | } | ||
658 | } | 966 | } |
659 | mutex_unlock(&phba->ct_event_mutex); | 967 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
660 | 968 | ||
661 | error_ct_unsol_exit: | 969 | error_ct_unsol_exit: |
662 | if (!list_empty(&head)) | 970 | if (!list_empty(&head)) |
663 | list_del(&head); | 971 | list_del(&head); |
664 | 972 | if (evt_req_id == SLI_CT_ELX_LOOPBACK) | |
665 | return; | 973 | return 0; |
974 | return 1; | ||
666 | } | 975 | } |
667 | 976 | ||
668 | /** | 977 | /** |
669 | * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command | 978 | * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command |
670 | * @job: SET_EVENT fc_bsg_job | 979 | * @job: SET_EVENT fc_bsg_job |
671 | */ | 980 | **/ |
672 | static int | 981 | static int |
673 | lpfc_bsg_set_event(struct fc_bsg_job *job) | 982 | lpfc_bsg_hba_set_event(struct fc_bsg_job *job) |
674 | { | 983 | { |
675 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | 984 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; |
676 | struct lpfc_hba *phba = vport->phba; | 985 | struct lpfc_hba *phba = vport->phba; |
677 | struct set_ct_event *event_req; | 986 | struct set_ct_event *event_req; |
678 | struct lpfc_ct_event *evt; | 987 | struct lpfc_bsg_event *evt; |
679 | int rc = 0; | 988 | int rc = 0; |
989 | struct bsg_job_data *dd_data = NULL; | ||
990 | uint32_t ev_mask; | ||
991 | unsigned long flags; | ||
680 | 992 | ||
681 | if (job->request_len < | 993 | if (job->request_len < |
682 | sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { | 994 | sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { |
683 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | 995 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, |
684 | "2612 Received SET_CT_EVENT below minimum " | 996 | "2612 Received SET_CT_EVENT below minimum " |
685 | "size\n"); | 997 | "size\n"); |
686 | return -EINVAL; | 998 | rc = -EINVAL; |
999 | goto job_error; | ||
1000 | } | ||
1001 | |||
1002 | dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); | ||
1003 | if (dd_data == NULL) { | ||
1004 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
1005 | "2734 Failed allocation of dd_data\n"); | ||
1006 | rc = -ENOMEM; | ||
1007 | goto job_error; | ||
687 | } | 1008 | } |
688 | 1009 | ||
689 | event_req = (struct set_ct_event *) | 1010 | event_req = (struct set_ct_event *) |
690 | job->request->rqst_data.h_vendor.vendor_cmd; | 1011 | job->request->rqst_data.h_vendor.vendor_cmd; |
691 | 1012 | ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & | |
692 | mutex_lock(&phba->ct_event_mutex); | 1013 | FC_REG_EVENT_MASK); |
1014 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
693 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { | 1015 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { |
694 | if (evt->reg_id == event_req->ev_reg_id) { | 1016 | if (evt->reg_id == event_req->ev_reg_id) { |
695 | lpfc_ct_event_ref(evt); | 1017 | lpfc_bsg_event_ref(evt); |
696 | evt->wait_time_stamp = jiffies; | 1018 | evt->wait_time_stamp = jiffies; |
697 | break; | 1019 | break; |
698 | } | 1020 | } |
699 | } | 1021 | } |
700 | mutex_unlock(&phba->ct_event_mutex); | 1022 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
701 | 1023 | ||
702 | if (&evt->node == &phba->ct_ev_waiters) { | 1024 | if (&evt->node == &phba->ct_ev_waiters) { |
703 | /* no event waiting struct yet - first call */ | 1025 | /* no event waiting struct yet - first call */ |
704 | evt = lpfc_ct_event_new(event_req->ev_reg_id, | 1026 | evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, |
705 | event_req->ev_req_id); | 1027 | event_req->ev_req_id); |
706 | if (!evt) { | 1028 | if (!evt) { |
707 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | 1029 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, |
708 | "2617 Failed allocation of event " | 1030 | "2617 Failed allocation of event " |
709 | "waiter\n"); | 1031 | "waiter\n"); |
710 | return -ENOMEM; | 1032 | rc = -ENOMEM; |
1033 | goto job_error; | ||
711 | } | 1034 | } |
712 | 1035 | ||
713 | mutex_lock(&phba->ct_event_mutex); | 1036 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
714 | list_add(&evt->node, &phba->ct_ev_waiters); | 1037 | list_add(&evt->node, &phba->ct_ev_waiters); |
715 | lpfc_ct_event_ref(evt); | 1038 | lpfc_bsg_event_ref(evt); |
716 | mutex_unlock(&phba->ct_event_mutex); | 1039 | evt->wait_time_stamp = jiffies; |
1040 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
717 | } | 1041 | } |
718 | 1042 | ||
1043 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
719 | evt->waiting = 1; | 1044 | evt->waiting = 1; |
720 | if (wait_event_interruptible(evt->wq, | 1045 | dd_data->type = TYPE_EVT; |
721 | !list_empty(&evt->events_to_see))) { | 1046 | dd_data->context_un.evt = evt; |
722 | mutex_lock(&phba->ct_event_mutex); | 1047 | evt->set_job = job; /* for unsolicited command */ |
723 | lpfc_ct_event_unref(evt); /* release ref */ | 1048 | job->dd_data = dd_data; /* for fc transport timeout callback*/ |
724 | lpfc_ct_event_unref(evt); /* delete */ | 1049 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
725 | mutex_unlock(&phba->ct_event_mutex); | 1050 | return 0; /* call job done later */ |
726 | rc = -EINTR; | 1051 | |
727 | goto set_event_out; | 1052 | job_error: |
728 | } | 1053 | if (dd_data != NULL) |
729 | 1054 | kfree(dd_data); | |
730 | evt->wait_time_stamp = jiffies; | 1055 | |
731 | evt->waiting = 0; | 1056 | job->dd_data = NULL; |
732 | 1057 | return rc; | |
733 | mutex_lock(&phba->ct_event_mutex); | ||
734 | list_move(evt->events_to_see.prev, &evt->events_to_get); | ||
735 | lpfc_ct_event_unref(evt); /* release ref */ | ||
736 | mutex_unlock(&phba->ct_event_mutex); | ||
737 | |||
738 | set_event_out: | ||
739 | /* set_event carries no reply payload */ | ||
740 | job->reply->reply_payload_rcv_len = 0; | ||
741 | /* make error code available to userspace */ | ||
742 | job->reply->result = rc; | ||
743 | /* complete the job back to userspace */ | ||
744 | job->job_done(job); | ||
745 | |||
746 | return 0; | ||
747 | } | 1058 | } |
748 | 1059 | ||
749 | /** | 1060 | /** |
750 | * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command | 1061 | * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command |
751 | * @job: GET_EVENT fc_bsg_job | 1062 | * @job: GET_EVENT fc_bsg_job |
752 | */ | 1063 | **/ |
753 | static int | 1064 | static int |
754 | lpfc_bsg_get_event(struct fc_bsg_job *job) | 1065 | lpfc_bsg_hba_get_event(struct fc_bsg_job *job) |
755 | { | 1066 | { |
756 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | 1067 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; |
757 | struct lpfc_hba *phba = vport->phba; | 1068 | struct lpfc_hba *phba = vport->phba; |
758 | struct get_ct_event *event_req; | 1069 | struct get_ct_event *event_req; |
759 | struct get_ct_event_reply *event_reply; | 1070 | struct get_ct_event_reply *event_reply; |
760 | struct lpfc_ct_event *evt; | 1071 | struct lpfc_bsg_event *evt; |
761 | struct event_data *evt_dat = NULL; | 1072 | struct event_data *evt_dat = NULL; |
762 | int rc = 0; | 1073 | unsigned long flags; |
1074 | uint32_t rc = 0; | ||
763 | 1075 | ||
764 | if (job->request_len < | 1076 | if (job->request_len < |
765 | sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { | 1077 | sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { |
766 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | 1078 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, |
767 | "2613 Received GET_CT_EVENT request below " | 1079 | "2613 Received GET_CT_EVENT request below " |
768 | "minimum size\n"); | 1080 | "minimum size\n"); |
769 | return -EINVAL; | 1081 | rc = -EINVAL; |
1082 | goto job_error; | ||
770 | } | 1083 | } |
771 | 1084 | ||
772 | event_req = (struct get_ct_event *) | 1085 | event_req = (struct get_ct_event *) |
@@ -774,13 +1087,12 @@ lpfc_bsg_get_event(struct fc_bsg_job *job) | |||
774 | 1087 | ||
775 | event_reply = (struct get_ct_event_reply *) | 1088 | event_reply = (struct get_ct_event_reply *) |
776 | job->reply->reply_data.vendor_reply.vendor_rsp; | 1089 | job->reply->reply_data.vendor_reply.vendor_rsp; |
777 | 1090 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | |
778 | mutex_lock(&phba->ct_event_mutex); | ||
779 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { | 1091 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { |
780 | if (evt->reg_id == event_req->ev_reg_id) { | 1092 | if (evt->reg_id == event_req->ev_reg_id) { |
781 | if (list_empty(&evt->events_to_get)) | 1093 | if (list_empty(&evt->events_to_get)) |
782 | break; | 1094 | break; |
783 | lpfc_ct_event_ref(evt); | 1095 | lpfc_bsg_event_ref(evt); |
784 | evt->wait_time_stamp = jiffies; | 1096 | evt->wait_time_stamp = jiffies; |
785 | evt_dat = list_entry(evt->events_to_get.prev, | 1097 | evt_dat = list_entry(evt->events_to_get.prev, |
786 | struct event_data, node); | 1098 | struct event_data, node); |
@@ -788,45 +1100,1539 @@ lpfc_bsg_get_event(struct fc_bsg_job *job) | |||
788 | break; | 1100 | break; |
789 | } | 1101 | } |
790 | } | 1102 | } |
791 | mutex_unlock(&phba->ct_event_mutex); | 1103 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
792 | 1104 | ||
793 | if (!evt_dat) { | 1105 | /* The app may continue to ask for event data until it gets |
1106 | * an error indicating that there isn't anymore | ||
1107 | */ | ||
1108 | if (evt_dat == NULL) { | ||
794 | job->reply->reply_payload_rcv_len = 0; | 1109 | job->reply->reply_payload_rcv_len = 0; |
795 | rc = -ENOENT; | 1110 | rc = -ENOENT; |
796 | goto error_get_event_exit; | 1111 | goto job_error; |
797 | } | 1112 | } |
798 | 1113 | ||
799 | if (evt_dat->len > job->reply_payload.payload_len) { | 1114 | if (evt_dat->len > job->request_payload.payload_len) { |
800 | evt_dat->len = job->reply_payload.payload_len; | 1115 | evt_dat->len = job->request_payload.payload_len; |
801 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | 1116 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, |
802 | "2618 Truncated event data at %d " | 1117 | "2618 Truncated event data at %d " |
803 | "bytes\n", | 1118 | "bytes\n", |
804 | job->reply_payload.payload_len); | 1119 | job->request_payload.payload_len); |
805 | } | 1120 | } |
806 | 1121 | ||
1122 | event_reply->type = evt_dat->type; | ||
807 | event_reply->immed_data = evt_dat->immed_dat; | 1123 | event_reply->immed_data = evt_dat->immed_dat; |
808 | |||
809 | if (evt_dat->len > 0) | 1124 | if (evt_dat->len > 0) |
810 | job->reply->reply_payload_rcv_len = | 1125 | job->reply->reply_payload_rcv_len = |
811 | sg_copy_from_buffer(job->reply_payload.sg_list, | 1126 | sg_copy_from_buffer(job->request_payload.sg_list, |
812 | job->reply_payload.sg_cnt, | 1127 | job->request_payload.sg_cnt, |
813 | evt_dat->data, evt_dat->len); | 1128 | evt_dat->data, evt_dat->len); |
814 | else | 1129 | else |
815 | job->reply->reply_payload_rcv_len = 0; | 1130 | job->reply->reply_payload_rcv_len = 0; |
816 | rc = 0; | ||
817 | 1131 | ||
818 | if (evt_dat) | 1132 | if (evt_dat) { |
819 | kfree(evt_dat->data); | 1133 | kfree(evt_dat->data); |
820 | kfree(evt_dat); | 1134 | kfree(evt_dat); |
821 | mutex_lock(&phba->ct_event_mutex); | 1135 | } |
822 | lpfc_ct_event_unref(evt); | 1136 | |
823 | mutex_unlock(&phba->ct_event_mutex); | 1137 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
1138 | lpfc_bsg_event_unref(evt); | ||
1139 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
1140 | job->dd_data = NULL; | ||
1141 | job->reply->result = 0; | ||
1142 | job->job_done(job); | ||
1143 | return 0; | ||
1144 | |||
1145 | job_error: | ||
1146 | job->dd_data = NULL; | ||
1147 | job->reply->result = rc; | ||
1148 | return rc; | ||
1149 | } | ||
1150 | |||
1151 | /** | ||
1152 | * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler | ||
1153 | * @phba: Pointer to HBA context object. | ||
1154 | * @cmdiocbq: Pointer to command iocb. | ||
1155 | * @rspiocbq: Pointer to response iocb. | ||
1156 | * | ||
1157 | * This function is the completion handler for iocbs issued using | ||
1158 | * lpfc_issue_ct_rsp_cmp function. This function is called by the | ||
1159 | * ring event handler function without any lock held. This function | ||
1160 | * can be called from both worker thread context and interrupt | ||
1161 | * context. This function also can be called from other thread which | ||
1162 | * cleans up the SLI layer objects. | ||
1163 | * This function copy the contents of the response iocb to the | ||
1164 | * response iocb memory object provided by the caller of | ||
1165 | * lpfc_sli_issue_iocb_wait and then wakes up the thread which | ||
1166 | * sleeps for the iocb completion. | ||
1167 | **/ | ||
1168 | static void | ||
1169 | lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, | ||
1170 | struct lpfc_iocbq *cmdiocbq, | ||
1171 | struct lpfc_iocbq *rspiocbq) | ||
1172 | { | ||
1173 | struct bsg_job_data *dd_data; | ||
1174 | struct fc_bsg_job *job; | ||
1175 | IOCB_t *rsp; | ||
1176 | struct lpfc_dmabuf *bmp; | ||
1177 | struct lpfc_nodelist *ndlp; | ||
1178 | unsigned long flags; | ||
1179 | int rc = 0; | ||
1180 | |||
1181 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
1182 | dd_data = cmdiocbq->context1; | ||
1183 | /* normal completion and timeout crossed paths, already done */ | ||
1184 | if (!dd_data) { | ||
1185 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
1186 | return; | ||
1187 | } | ||
824 | 1188 | ||
825 | error_get_event_exit: | 1189 | job = dd_data->context_un.iocb.set_job; |
1190 | bmp = dd_data->context_un.iocb.bmp; | ||
1191 | rsp = &rspiocbq->iocb; | ||
1192 | ndlp = dd_data->context_un.iocb.ndlp; | ||
1193 | |||
1194 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | ||
1195 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1196 | |||
1197 | if (rsp->ulpStatus) { | ||
1198 | if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { | ||
1199 | switch (rsp->un.ulpWord[4] & 0xff) { | ||
1200 | case IOERR_SEQUENCE_TIMEOUT: | ||
1201 | rc = -ETIMEDOUT; | ||
1202 | break; | ||
1203 | case IOERR_INVALID_RPI: | ||
1204 | rc = -EFAULT; | ||
1205 | break; | ||
1206 | default: | ||
1207 | rc = -EACCES; | ||
1208 | break; | ||
1209 | } | ||
1210 | } else | ||
1211 | rc = -EACCES; | ||
1212 | } else | ||
1213 | job->reply->reply_payload_rcv_len = | ||
1214 | rsp->un.genreq64.bdl.bdeSize; | ||
1215 | |||
1216 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | ||
1217 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
1218 | lpfc_nlp_put(ndlp); | ||
1219 | kfree(bmp); | ||
1220 | kfree(dd_data); | ||
826 | /* make error code available to userspace */ | 1221 | /* make error code available to userspace */ |
827 | job->reply->result = rc; | 1222 | job->reply->result = rc; |
1223 | job->dd_data = NULL; | ||
828 | /* complete the job back to userspace */ | 1224 | /* complete the job back to userspace */ |
829 | job->job_done(job); | 1225 | job->job_done(job); |
1226 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
1227 | return; | ||
1228 | } | ||
1229 | |||
1230 | /** | ||
1231 | * lpfc_issue_ct_rsp - issue a ct response | ||
1232 | * @phba: Pointer to HBA context object. | ||
1233 | * @job: Pointer to the job object. | ||
1234 | * @tag: tag index value into the ports context exchange array. | ||
1235 | * @bmp: Pointer to a dma buffer descriptor. | ||
1236 | * @num_entry: Number of enties in the bde. | ||
1237 | **/ | ||
1238 | static int | ||
1239 | lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, | ||
1240 | struct lpfc_dmabuf *bmp, int num_entry) | ||
1241 | { | ||
1242 | IOCB_t *icmd; | ||
1243 | struct lpfc_iocbq *ctiocb = NULL; | ||
1244 | int rc = 0; | ||
1245 | struct lpfc_nodelist *ndlp = NULL; | ||
1246 | struct bsg_job_data *dd_data; | ||
1247 | uint32_t creg_val; | ||
1248 | |||
1249 | /* allocate our bsg tracking structure */ | ||
1250 | dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); | ||
1251 | if (!dd_data) { | ||
1252 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
1253 | "2736 Failed allocation of dd_data\n"); | ||
1254 | rc = -ENOMEM; | ||
1255 | goto no_dd_data; | ||
1256 | } | ||
1257 | |||
1258 | /* Allocate buffer for command iocb */ | ||
1259 | ctiocb = lpfc_sli_get_iocbq(phba); | ||
1260 | if (!ctiocb) { | ||
1261 | rc = ENOMEM; | ||
1262 | goto no_ctiocb; | ||
1263 | } | ||
1264 | |||
1265 | icmd = &ctiocb->iocb; | ||
1266 | icmd->un.xseq64.bdl.ulpIoTag32 = 0; | ||
1267 | icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); | ||
1268 | icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); | ||
1269 | icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
1270 | icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); | ||
1271 | icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); | ||
1272 | icmd->un.xseq64.w5.hcsw.Dfctl = 0; | ||
1273 | icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL; | ||
1274 | icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; | ||
1275 | |||
1276 | /* Fill in rest of iocb */ | ||
1277 | icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; | ||
1278 | icmd->ulpBdeCount = 1; | ||
1279 | icmd->ulpLe = 1; | ||
1280 | icmd->ulpClass = CLASS3; | ||
1281 | if (phba->sli_rev == LPFC_SLI_REV4) { | ||
1282 | /* Do not issue unsol response if oxid not marked as valid */ | ||
1283 | if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) { | ||
1284 | rc = IOCB_ERROR; | ||
1285 | goto issue_ct_rsp_exit; | ||
1286 | } | ||
1287 | icmd->ulpContext = phba->ct_ctx[tag].oxid; | ||
1288 | ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); | ||
1289 | if (!ndlp) { | ||
1290 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, | ||
1291 | "2721 ndlp null for oxid %x SID %x\n", | ||
1292 | icmd->ulpContext, | ||
1293 | phba->ct_ctx[tag].SID); | ||
1294 | rc = IOCB_ERROR; | ||
1295 | goto issue_ct_rsp_exit; | ||
1296 | } | ||
1297 | icmd->un.ulpWord[3] = ndlp->nlp_rpi; | ||
1298 | /* The exchange is done, mark the entry as invalid */ | ||
1299 | phba->ct_ctx[tag].flags &= ~UNSOL_VALID; | ||
1300 | } else | ||
1301 | icmd->ulpContext = (ushort) tag; | ||
1302 | |||
1303 | icmd->ulpTimeout = phba->fc_ratov * 2; | ||
1304 | |||
1305 | /* Xmit CT response on exchange <xid> */ | ||
1306 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | ||
1307 | "2722 Xmit CT response on exchange x%x Data: x%x x%x\n", | ||
1308 | icmd->ulpContext, icmd->ulpIoTag, phba->link_state); | ||
1309 | |||
1310 | ctiocb->iocb_cmpl = NULL; | ||
1311 | ctiocb->iocb_flag |= LPFC_IO_LIBDFC; | ||
1312 | ctiocb->vport = phba->pport; | ||
1313 | ctiocb->context3 = bmp; | ||
1314 | |||
1315 | ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; | ||
1316 | ctiocb->context1 = dd_data; | ||
1317 | ctiocb->context2 = NULL; | ||
1318 | dd_data->type = TYPE_IOCB; | ||
1319 | dd_data->context_un.iocb.cmdiocbq = ctiocb; | ||
1320 | dd_data->context_un.iocb.rspiocbq = NULL; | ||
1321 | dd_data->context_un.iocb.set_job = job; | ||
1322 | dd_data->context_un.iocb.bmp = bmp; | ||
1323 | dd_data->context_un.iocb.ndlp = ndlp; | ||
1324 | |||
1325 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | ||
1326 | creg_val = readl(phba->HCregaddr); | ||
1327 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | ||
1328 | writel(creg_val, phba->HCregaddr); | ||
1329 | readl(phba->HCregaddr); /* flush */ | ||
1330 | } | ||
1331 | |||
1332 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); | ||
1333 | |||
1334 | if (rc == IOCB_SUCCESS) | ||
1335 | return 0; /* done for now */ | ||
1336 | |||
1337 | issue_ct_rsp_exit: | ||
1338 | lpfc_sli_release_iocbq(phba, ctiocb); | ||
1339 | no_ctiocb: | ||
1340 | kfree(dd_data); | ||
1341 | no_dd_data: | ||
1342 | return rc; | ||
1343 | } | ||
1344 | |||
1345 | /** | ||
1346 | * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command | ||
1347 | * @job: SEND_MGMT_RESP fc_bsg_job | ||
1348 | **/ | ||
1349 | static int | ||
1350 | lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) | ||
1351 | { | ||
1352 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | ||
1353 | struct lpfc_hba *phba = vport->phba; | ||
1354 | struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) | ||
1355 | job->request->rqst_data.h_vendor.vendor_cmd; | ||
1356 | struct ulp_bde64 *bpl; | ||
1357 | struct lpfc_dmabuf *bmp = NULL; | ||
1358 | struct scatterlist *sgel = NULL; | ||
1359 | int request_nseg; | ||
1360 | int numbde; | ||
1361 | dma_addr_t busaddr; | ||
1362 | uint32_t tag = mgmt_resp->tag; | ||
1363 | unsigned long reqbfrcnt = | ||
1364 | (unsigned long)job->request_payload.payload_len; | ||
1365 | int rc = 0; | ||
1366 | |||
1367 | /* in case no data is transferred */ | ||
1368 | job->reply->reply_payload_rcv_len = 0; | ||
1369 | |||
1370 | if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { | ||
1371 | rc = -ERANGE; | ||
1372 | goto send_mgmt_rsp_exit; | ||
1373 | } | ||
1374 | |||
1375 | bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
1376 | if (!bmp) { | ||
1377 | rc = -ENOMEM; | ||
1378 | goto send_mgmt_rsp_exit; | ||
1379 | } | ||
1380 | |||
1381 | bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); | ||
1382 | if (!bmp->virt) { | ||
1383 | rc = -ENOMEM; | ||
1384 | goto send_mgmt_rsp_free_bmp; | ||
1385 | } | ||
1386 | |||
1387 | INIT_LIST_HEAD(&bmp->list); | ||
1388 | bpl = (struct ulp_bde64 *) bmp->virt; | ||
1389 | request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, | ||
1390 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1391 | for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { | ||
1392 | busaddr = sg_dma_address(sgel); | ||
1393 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
1394 | bpl->tus.f.bdeSize = sg_dma_len(sgel); | ||
1395 | bpl->tus.w = cpu_to_le32(bpl->tus.w); | ||
1396 | bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); | ||
1397 | bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); | ||
1398 | bpl++; | ||
1399 | } | ||
1400 | |||
1401 | rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg); | ||
1402 | |||
1403 | if (rc == IOCB_SUCCESS) | ||
1404 | return 0; /* done for now */ | ||
1405 | |||
1406 | /* TBD need to handle a timeout */ | ||
1407 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | ||
1408 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1409 | rc = -EACCES; | ||
1410 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | ||
1411 | |||
1412 | send_mgmt_rsp_free_bmp: | ||
1413 | kfree(bmp); | ||
1414 | send_mgmt_rsp_exit: | ||
1415 | /* make error code available to userspace */ | ||
1416 | job->reply->result = rc; | ||
1417 | job->dd_data = NULL; | ||
1418 | return rc; | ||
1419 | } | ||
1420 | |||
1421 | /** | ||
1422 | * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command | ||
1423 | * @job: LPFC_BSG_VENDOR_DIAG_MODE | ||
1424 | * | ||
1425 | * This function is responsible for placing a port into diagnostic loopback | ||
1426 | * mode in order to perform a diagnostic loopback test. | ||
1427 | * All new scsi requests are blocked, a small delay is used to allow the | ||
1428 | * scsi requests to complete then the link is brought down. If the link is | ||
1429 | * is placed in loopback mode then scsi requests are again allowed | ||
1430 | * so the scsi mid-layer doesn't give up on the port. | ||
1431 | * All of this is done in-line. | ||
1432 | */ | ||
1433 | static int | ||
1434 | lpfc_bsg_diag_mode(struct fc_bsg_job *job) | ||
1435 | { | ||
1436 | struct Scsi_Host *shost = job->shost; | ||
1437 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | ||
1438 | struct lpfc_hba *phba = vport->phba; | ||
1439 | struct diag_mode_set *loopback_mode; | ||
1440 | struct lpfc_sli *psli = &phba->sli; | ||
1441 | struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; | ||
1442 | uint32_t link_flags; | ||
1443 | uint32_t timeout; | ||
1444 | struct lpfc_vport **vports; | ||
1445 | LPFC_MBOXQ_t *pmboxq; | ||
1446 | int mbxstatus; | ||
1447 | int i = 0; | ||
1448 | int rc = 0; | ||
1449 | |||
1450 | /* no data to return just the return code */ | ||
1451 | job->reply->reply_payload_rcv_len = 0; | ||
1452 | |||
1453 | if (job->request_len < | ||
1454 | sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) { | ||
1455 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
1456 | "2738 Received DIAG MODE request below minimum " | ||
1457 | "size\n"); | ||
1458 | rc = -EINVAL; | ||
1459 | goto job_error; | ||
1460 | } | ||
1461 | |||
1462 | loopback_mode = (struct diag_mode_set *) | ||
1463 | job->request->rqst_data.h_vendor.vendor_cmd; | ||
1464 | link_flags = loopback_mode->type; | ||
1465 | timeout = loopback_mode->timeout; | ||
1466 | |||
1467 | if ((phba->link_state == LPFC_HBA_ERROR) || | ||
1468 | (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || | ||
1469 | (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { | ||
1470 | rc = -EACCES; | ||
1471 | goto job_error; | ||
1472 | } | ||
1473 | |||
1474 | pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
1475 | if (!pmboxq) { | ||
1476 | rc = -ENOMEM; | ||
1477 | goto job_error; | ||
1478 | } | ||
1479 | |||
1480 | vports = lpfc_create_vport_work_array(phba); | ||
1481 | if (vports) { | ||
1482 | for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | ||
1483 | shost = lpfc_shost_from_vport(vports[i]); | ||
1484 | scsi_block_requests(shost); | ||
1485 | } | ||
1486 | |||
1487 | lpfc_destroy_vport_work_array(phba, vports); | ||
1488 | } else { | ||
1489 | shost = lpfc_shost_from_vport(phba->pport); | ||
1490 | scsi_block_requests(shost); | ||
1491 | } | ||
1492 | |||
1493 | while (pring->txcmplq_cnt) { | ||
1494 | if (i++ > 500) /* wait up to 5 seconds */ | ||
1495 | break; | ||
1496 | |||
1497 | msleep(10); | ||
1498 | } | ||
1499 | |||
1500 | memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); | ||
1501 | pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; | ||
1502 | pmboxq->u.mb.mbxOwner = OWN_HOST; | ||
1503 | |||
1504 | mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); | ||
1505 | |||
1506 | if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { | ||
1507 | /* wait for link down before proceeding */ | ||
1508 | i = 0; | ||
1509 | while (phba->link_state != LPFC_LINK_DOWN) { | ||
1510 | if (i++ > timeout) { | ||
1511 | rc = -ETIMEDOUT; | ||
1512 | goto loopback_mode_exit; | ||
1513 | } | ||
1514 | |||
1515 | msleep(10); | ||
1516 | } | ||
1517 | |||
1518 | memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); | ||
1519 | if (link_flags == INTERNAL_LOOP_BACK) | ||
1520 | pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; | ||
1521 | else | ||
1522 | pmboxq->u.mb.un.varInitLnk.link_flags = | ||
1523 | FLAGS_TOPOLOGY_MODE_LOOP; | ||
1524 | |||
1525 | pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; | ||
1526 | pmboxq->u.mb.mbxOwner = OWN_HOST; | ||
1527 | |||
1528 | mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, | ||
1529 | LPFC_MBOX_TMO); | ||
1530 | |||
1531 | if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) | ||
1532 | rc = -ENODEV; | ||
1533 | else { | ||
1534 | phba->link_flag |= LS_LOOPBACK_MODE; | ||
1535 | /* wait for the link attention interrupt */ | ||
1536 | msleep(100); | ||
1537 | |||
1538 | i = 0; | ||
1539 | while (phba->link_state != LPFC_HBA_READY) { | ||
1540 | if (i++ > timeout) { | ||
1541 | rc = -ETIMEDOUT; | ||
1542 | break; | ||
1543 | } | ||
1544 | |||
1545 | msleep(10); | ||
1546 | } | ||
1547 | } | ||
1548 | |||
1549 | } else | ||
1550 | rc = -ENODEV; | ||
1551 | |||
1552 | loopback_mode_exit: | ||
1553 | vports = lpfc_create_vport_work_array(phba); | ||
1554 | if (vports) { | ||
1555 | for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | ||
1556 | shost = lpfc_shost_from_vport(vports[i]); | ||
1557 | scsi_unblock_requests(shost); | ||
1558 | } | ||
1559 | lpfc_destroy_vport_work_array(phba, vports); | ||
1560 | } else { | ||
1561 | shost = lpfc_shost_from_vport(phba->pport); | ||
1562 | scsi_unblock_requests(shost); | ||
1563 | } | ||
1564 | |||
1565 | /* | ||
1566 | * Let SLI layer release mboxq if mbox command completed after timeout. | ||
1567 | */ | ||
1568 | if (mbxstatus != MBX_TIMEOUT) | ||
1569 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
1570 | |||
1571 | job_error: | ||
1572 | /* make error code available to userspace */ | ||
1573 | job->reply->result = rc; | ||
1574 | /* complete the job back to userspace if no error */ | ||
1575 | if (rc == 0) | ||
1576 | job->job_done(job); | ||
1577 | return rc; | ||
1578 | } | ||
1579 | |||
1580 | /** | ||
1581 | * lpfcdiag_loop_self_reg - obtains a remote port login id | ||
1582 | * @phba: Pointer to HBA context object | ||
1583 | * @rpi: Pointer to a remote port login id | ||
1584 | * | ||
1585 | * This function obtains a remote port login id so the diag loopback test | ||
1586 | * can send and receive its own unsolicited CT command. | ||
1587 | **/ | ||
1588 | static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi) | ||
1589 | { | ||
1590 | LPFC_MBOXQ_t *mbox; | ||
1591 | struct lpfc_dmabuf *dmabuff; | ||
1592 | int status; | ||
1593 | |||
1594 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
1595 | if (!mbox) | ||
1596 | return ENOMEM; | ||
1597 | |||
1598 | status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, | ||
1599 | (uint8_t *)&phba->pport->fc_sparam, mbox, 0); | ||
1600 | if (status) { | ||
1601 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1602 | return ENOMEM; | ||
1603 | } | ||
1604 | |||
1605 | dmabuff = (struct lpfc_dmabuf *) mbox->context1; | ||
1606 | mbox->context1 = NULL; | ||
1607 | status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); | ||
1608 | |||
1609 | if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { | ||
1610 | lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); | ||
1611 | kfree(dmabuff); | ||
1612 | if (status != MBX_TIMEOUT) | ||
1613 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1614 | return ENODEV; | ||
1615 | } | ||
1616 | |||
1617 | *rpi = mbox->u.mb.un.varWords[0]; | ||
1618 | |||
1619 | lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); | ||
1620 | kfree(dmabuff); | ||
1621 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1622 | return 0; | ||
1623 | } | ||
1624 | |||
1625 | /** | ||
1626 | * lpfcdiag_loop_self_unreg - unregs from the rpi | ||
1627 | * @phba: Pointer to HBA context object | ||
1628 | * @rpi: Remote port login id | ||
1629 | * | ||
1630 | * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg | ||
1631 | **/ | ||
1632 | static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) | ||
1633 | { | ||
1634 | LPFC_MBOXQ_t *mbox; | ||
1635 | int status; | ||
1636 | |||
1637 | /* Allocate mboxq structure */ | ||
1638 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
1639 | if (mbox == NULL) | ||
1640 | return ENOMEM; | ||
1641 | |||
1642 | lpfc_unreg_login(phba, 0, rpi, mbox); | ||
1643 | status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); | ||
1644 | |||
1645 | if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { | ||
1646 | if (status != MBX_TIMEOUT) | ||
1647 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1648 | return EIO; | ||
1649 | } | ||
1650 | |||
1651 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1652 | return 0; | ||
1653 | } | ||
1654 | |||
1655 | /** | ||
1656 | * lpfcdiag_loop_get_xri - obtains the transmit and receive ids | ||
1657 | * @phba: Pointer to HBA context object | ||
1658 | * @rpi: Remote port login id | ||
1659 | * @txxri: Pointer to transmit exchange id | ||
1660 | * @rxxri: Pointer to response exchabge id | ||
1661 | * | ||
1662 | * This function obtains the transmit and receive ids required to send | ||
1663 | * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp | ||
1664 | * flags are used to the unsolicted response handler is able to process | ||
1665 | * the ct command sent on the same port. | ||
1666 | **/ | ||
1667 | static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, | ||
1668 | uint16_t *txxri, uint16_t * rxxri) | ||
1669 | { | ||
1670 | struct lpfc_bsg_event *evt; | ||
1671 | struct lpfc_iocbq *cmdiocbq, *rspiocbq; | ||
1672 | IOCB_t *cmd, *rsp; | ||
1673 | struct lpfc_dmabuf *dmabuf; | ||
1674 | struct ulp_bde64 *bpl = NULL; | ||
1675 | struct lpfc_sli_ct_request *ctreq = NULL; | ||
1676 | int ret_val = 0; | ||
1677 | unsigned long flags; | ||
1678 | |||
1679 | *txxri = 0; | ||
1680 | *rxxri = 0; | ||
1681 | evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, | ||
1682 | SLI_CT_ELX_LOOPBACK); | ||
1683 | if (!evt) | ||
1684 | return ENOMEM; | ||
1685 | |||
1686 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
1687 | list_add(&evt->node, &phba->ct_ev_waiters); | ||
1688 | lpfc_bsg_event_ref(evt); | ||
1689 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
1690 | |||
1691 | cmdiocbq = lpfc_sli_get_iocbq(phba); | ||
1692 | rspiocbq = lpfc_sli_get_iocbq(phba); | ||
1693 | |||
1694 | dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
1695 | if (dmabuf) { | ||
1696 | dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); | ||
1697 | INIT_LIST_HEAD(&dmabuf->list); | ||
1698 | bpl = (struct ulp_bde64 *) dmabuf->virt; | ||
1699 | memset(bpl, 0, sizeof(*bpl)); | ||
1700 | ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); | ||
1701 | bpl->addrHigh = | ||
1702 | le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl))); | ||
1703 | bpl->addrLow = | ||
1704 | le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl))); | ||
1705 | bpl->tus.f.bdeFlags = 0; | ||
1706 | bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; | ||
1707 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | ||
1708 | } | ||
1709 | |||
1710 | if (cmdiocbq == NULL || rspiocbq == NULL || | ||
1711 | dmabuf == NULL || bpl == NULL || ctreq == NULL) { | ||
1712 | ret_val = ENOMEM; | ||
1713 | goto err_get_xri_exit; | ||
1714 | } | ||
1715 | |||
1716 | cmd = &cmdiocbq->iocb; | ||
1717 | rsp = &rspiocbq->iocb; | ||
1718 | |||
1719 | memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); | ||
1720 | |||
1721 | ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; | ||
1722 | ctreq->RevisionId.bits.InId = 0; | ||
1723 | ctreq->FsType = SLI_CT_ELX_LOOPBACK; | ||
1724 | ctreq->FsSubType = 0; | ||
1725 | ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; | ||
1726 | ctreq->CommandResponse.bits.Size = 0; | ||
1727 | |||
1728 | |||
1729 | cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys); | ||
1730 | cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys); | ||
1731 | cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
1732 | cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl); | ||
1733 | |||
1734 | cmd->un.xseq64.w5.hcsw.Fctl = LA; | ||
1735 | cmd->un.xseq64.w5.hcsw.Dfctl = 0; | ||
1736 | cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; | ||
1737 | cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; | ||
1738 | |||
1739 | cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; | ||
1740 | cmd->ulpBdeCount = 1; | ||
1741 | cmd->ulpLe = 1; | ||
1742 | cmd->ulpClass = CLASS3; | ||
1743 | cmd->ulpContext = rpi; | ||
1744 | |||
1745 | cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; | ||
1746 | cmdiocbq->vport = phba->pport; | ||
1747 | |||
1748 | ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, | ||
1749 | rspiocbq, | ||
1750 | (phba->fc_ratov * 2) | ||
1751 | + LPFC_DRVR_TIMEOUT); | ||
1752 | if (ret_val) | ||
1753 | goto err_get_xri_exit; | ||
1754 | |||
1755 | *txxri = rsp->ulpContext; | ||
1756 | |||
1757 | evt->waiting = 1; | ||
1758 | evt->wait_time_stamp = jiffies; | ||
1759 | ret_val = wait_event_interruptible_timeout( | ||
1760 | evt->wq, !list_empty(&evt->events_to_see), | ||
1761 | ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); | ||
1762 | if (list_empty(&evt->events_to_see)) | ||
1763 | ret_val = (ret_val) ? EINTR : ETIMEDOUT; | ||
1764 | else { | ||
1765 | ret_val = IOCB_SUCCESS; | ||
1766 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
1767 | list_move(evt->events_to_see.prev, &evt->events_to_get); | ||
1768 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
1769 | *rxxri = (list_entry(evt->events_to_get.prev, | ||
1770 | typeof(struct event_data), | ||
1771 | node))->immed_dat; | ||
1772 | } | ||
1773 | evt->waiting = 0; | ||
1774 | |||
1775 | err_get_xri_exit: | ||
1776 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
1777 | lpfc_bsg_event_unref(evt); /* release ref */ | ||
1778 | lpfc_bsg_event_unref(evt); /* delete */ | ||
1779 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
1780 | |||
1781 | if (dmabuf) { | ||
1782 | if (dmabuf->virt) | ||
1783 | lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); | ||
1784 | kfree(dmabuf); | ||
1785 | } | ||
1786 | |||
1787 | if (cmdiocbq && (ret_val != IOCB_TIMEDOUT)) | ||
1788 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
1789 | if (rspiocbq) | ||
1790 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
1791 | return ret_val; | ||
1792 | } | ||
1793 | |||
1794 | /** | ||
1795 | * diag_cmd_data_alloc - fills in a bde struct with dma buffers | ||
1796 | * @phba: Pointer to HBA context object | ||
1797 | * @bpl: Pointer to 64 bit bde structure | ||
1798 | * @size: Number of bytes to process | ||
1799 | * @nocopydata: Flag to copy user data into the allocated buffer | ||
1800 | * | ||
1801 | * This function allocates page size buffers and populates an lpfc_dmabufext. | ||
1802 | * If allowed the user data pointed to with indataptr is copied into the kernel | ||
1803 | * memory. The chained list of page size buffers is returned. | ||
1804 | **/ | ||
1805 | static struct lpfc_dmabufext * | ||
1806 | diag_cmd_data_alloc(struct lpfc_hba *phba, | ||
1807 | struct ulp_bde64 *bpl, uint32_t size, | ||
1808 | int nocopydata) | ||
1809 | { | ||
1810 | struct lpfc_dmabufext *mlist = NULL; | ||
1811 | struct lpfc_dmabufext *dmp; | ||
1812 | int cnt, offset = 0, i = 0; | ||
1813 | struct pci_dev *pcidev; | ||
1814 | |||
1815 | pcidev = phba->pcidev; | ||
1816 | |||
1817 | while (size) { | ||
1818 | /* We get chunks of 4K */ | ||
1819 | if (size > BUF_SZ_4K) | ||
1820 | cnt = BUF_SZ_4K; | ||
1821 | else | ||
1822 | cnt = size; | ||
1823 | |||
1824 | /* allocate struct lpfc_dmabufext buffer header */ | ||
1825 | dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); | ||
1826 | if (!dmp) | ||
1827 | goto out; | ||
1828 | |||
1829 | INIT_LIST_HEAD(&dmp->dma.list); | ||
1830 | |||
1831 | /* Queue it to a linked list */ | ||
1832 | if (mlist) | ||
1833 | list_add_tail(&dmp->dma.list, &mlist->dma.list); | ||
1834 | else | ||
1835 | mlist = dmp; | ||
1836 | |||
1837 | /* allocate buffer */ | ||
1838 | dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, | ||
1839 | cnt, | ||
1840 | &(dmp->dma.phys), | ||
1841 | GFP_KERNEL); | ||
1842 | |||
1843 | if (!dmp->dma.virt) | ||
1844 | goto out; | ||
1845 | |||
1846 | dmp->size = cnt; | ||
1847 | |||
1848 | if (nocopydata) { | ||
1849 | bpl->tus.f.bdeFlags = 0; | ||
1850 | pci_dma_sync_single_for_device(phba->pcidev, | ||
1851 | dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); | ||
1852 | |||
1853 | } else { | ||
1854 | memset((uint8_t *)dmp->dma.virt, 0, cnt); | ||
1855 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; | ||
1856 | } | ||
1857 | |||
1858 | /* build buffer ptr list for IOCB */ | ||
1859 | bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); | ||
1860 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); | ||
1861 | bpl->tus.f.bdeSize = (ushort) cnt; | ||
1862 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | ||
1863 | bpl++; | ||
1864 | |||
1865 | i++; | ||
1866 | offset += cnt; | ||
1867 | size -= cnt; | ||
1868 | } | ||
1869 | |||
1870 | mlist->flag = i; | ||
1871 | return mlist; | ||
1872 | out: | ||
1873 | diag_cmd_data_free(phba, mlist); | ||
1874 | return NULL; | ||
1875 | } | ||
1876 | |||
1877 | /** | ||
1878 | * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd | ||
1879 | * @phba: Pointer to HBA context object | ||
1880 | * @rxxri: Receive exchange id | ||
1881 | * @len: Number of data bytes | ||
1882 | * | ||
1883 | * This function allocates and posts a data buffer of sufficient size to recieve | ||
1884 | * an unsolicted CT command. | ||
1885 | **/ | ||
1886 | static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, | ||
1887 | size_t len) | ||
1888 | { | ||
1889 | struct lpfc_sli *psli = &phba->sli; | ||
1890 | struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; | ||
1891 | struct lpfc_iocbq *cmdiocbq; | ||
1892 | IOCB_t *cmd = NULL; | ||
1893 | struct list_head head, *curr, *next; | ||
1894 | struct lpfc_dmabuf *rxbmp; | ||
1895 | struct lpfc_dmabuf *dmp; | ||
1896 | struct lpfc_dmabuf *mp[2] = {NULL, NULL}; | ||
1897 | struct ulp_bde64 *rxbpl = NULL; | ||
1898 | uint32_t num_bde; | ||
1899 | struct lpfc_dmabufext *rxbuffer = NULL; | ||
1900 | int ret_val = 0; | ||
1901 | int i = 0; | ||
1902 | |||
1903 | cmdiocbq = lpfc_sli_get_iocbq(phba); | ||
1904 | rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
1905 | if (rxbmp != NULL) { | ||
1906 | rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); | ||
1907 | INIT_LIST_HEAD(&rxbmp->list); | ||
1908 | rxbpl = (struct ulp_bde64 *) rxbmp->virt; | ||
1909 | rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); | ||
1910 | } | ||
1911 | |||
1912 | if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { | ||
1913 | ret_val = ENOMEM; | ||
1914 | goto err_post_rxbufs_exit; | ||
1915 | } | ||
1916 | |||
1917 | /* Queue buffers for the receive exchange */ | ||
1918 | num_bde = (uint32_t)rxbuffer->flag; | ||
1919 | dmp = &rxbuffer->dma; | ||
1920 | |||
1921 | cmd = &cmdiocbq->iocb; | ||
1922 | i = 0; | ||
1923 | |||
1924 | INIT_LIST_HEAD(&head); | ||
1925 | list_add_tail(&head, &dmp->list); | ||
1926 | list_for_each_safe(curr, next, &head) { | ||
1927 | mp[i] = list_entry(curr, struct lpfc_dmabuf, list); | ||
1928 | list_del(curr); | ||
1929 | |||
1930 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | ||
1931 | mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); | ||
1932 | cmd->un.quexri64cx.buff.bde.addrHigh = | ||
1933 | putPaddrHigh(mp[i]->phys); | ||
1934 | cmd->un.quexri64cx.buff.bde.addrLow = | ||
1935 | putPaddrLow(mp[i]->phys); | ||
1936 | cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = | ||
1937 | ((struct lpfc_dmabufext *)mp[i])->size; | ||
1938 | cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; | ||
1939 | cmd->ulpCommand = CMD_QUE_XRI64_CX; | ||
1940 | cmd->ulpPU = 0; | ||
1941 | cmd->ulpLe = 1; | ||
1942 | cmd->ulpBdeCount = 1; | ||
1943 | cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; | ||
1944 | |||
1945 | } else { | ||
1946 | cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); | ||
1947 | cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); | ||
1948 | cmd->un.cont64[i].tus.f.bdeSize = | ||
1949 | ((struct lpfc_dmabufext *)mp[i])->size; | ||
1950 | cmd->ulpBdeCount = ++i; | ||
1951 | |||
1952 | if ((--num_bde > 0) && (i < 2)) | ||
1953 | continue; | ||
1954 | |||
1955 | cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; | ||
1956 | cmd->ulpLe = 1; | ||
1957 | } | ||
1958 | |||
1959 | cmd->ulpClass = CLASS3; | ||
1960 | cmd->ulpContext = rxxri; | ||
1961 | |||
1962 | ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); | ||
1963 | |||
1964 | if (ret_val == IOCB_ERROR) { | ||
1965 | diag_cmd_data_free(phba, | ||
1966 | (struct lpfc_dmabufext *)mp[0]); | ||
1967 | if (mp[1]) | ||
1968 | diag_cmd_data_free(phba, | ||
1969 | (struct lpfc_dmabufext *)mp[1]); | ||
1970 | dmp = list_entry(next, struct lpfc_dmabuf, list); | ||
1971 | ret_val = EIO; | ||
1972 | goto err_post_rxbufs_exit; | ||
1973 | } | ||
1974 | |||
1975 | lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); | ||
1976 | if (mp[1]) { | ||
1977 | lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); | ||
1978 | mp[1] = NULL; | ||
1979 | } | ||
1980 | |||
1981 | /* The iocb was freed by lpfc_sli_issue_iocb */ | ||
1982 | cmdiocbq = lpfc_sli_get_iocbq(phba); | ||
1983 | if (!cmdiocbq) { | ||
1984 | dmp = list_entry(next, struct lpfc_dmabuf, list); | ||
1985 | ret_val = EIO; | ||
1986 | goto err_post_rxbufs_exit; | ||
1987 | } | ||
1988 | |||
1989 | cmd = &cmdiocbq->iocb; | ||
1990 | i = 0; | ||
1991 | } | ||
1992 | list_del(&head); | ||
1993 | |||
1994 | err_post_rxbufs_exit: | ||
1995 | |||
1996 | if (rxbmp) { | ||
1997 | if (rxbmp->virt) | ||
1998 | lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); | ||
1999 | kfree(rxbmp); | ||
2000 | } | ||
2001 | |||
2002 | if (cmdiocbq) | ||
2003 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
2004 | return ret_val; | ||
2005 | } | ||
2006 | |||
2007 | /** | ||
2008 | * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself | ||
2009 | * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job | ||
2010 | * | ||
2011 | * This function receives a user data buffer to be transmitted and received on | ||
2012 | * the same port, the link must be up and in loopback mode prior | ||
2013 | * to being called. | ||
2014 | * 1. A kernel buffer is allocated to copy the user data into. | ||
2015 | * 2. The port registers with "itself". | ||
2016 | * 3. The transmit and receive exchange ids are obtained. | ||
2017 | * 4. The receive exchange id is posted. | ||
2018 | * 5. A new els loopback event is created. | ||
2019 | * 6. The command and response iocbs are allocated. | ||
2020 | * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. | ||
2021 | * | ||
2022 | * This function is meant to be called n times while the port is in loopback | ||
2023 | * so it is the apps responsibility to issue a reset to take the port out | ||
2024 | * of loopback mode. | ||
2025 | **/ | ||
2026 | static int | ||
2027 | lpfc_bsg_diag_test(struct fc_bsg_job *job) | ||
2028 | { | ||
2029 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | ||
2030 | struct lpfc_hba *phba = vport->phba; | ||
2031 | struct diag_mode_test *diag_mode; | ||
2032 | struct lpfc_bsg_event *evt; | ||
2033 | struct event_data *evdat; | ||
2034 | struct lpfc_sli *psli = &phba->sli; | ||
2035 | uint32_t size; | ||
2036 | uint32_t full_size; | ||
2037 | size_t segment_len = 0, segment_offset = 0, current_offset = 0; | ||
2038 | uint16_t rpi; | ||
2039 | struct lpfc_iocbq *cmdiocbq, *rspiocbq; | ||
2040 | IOCB_t *cmd, *rsp; | ||
2041 | struct lpfc_sli_ct_request *ctreq; | ||
2042 | struct lpfc_dmabuf *txbmp; | ||
2043 | struct ulp_bde64 *txbpl = NULL; | ||
2044 | struct lpfc_dmabufext *txbuffer = NULL; | ||
2045 | struct list_head head; | ||
2046 | struct lpfc_dmabuf *curr; | ||
2047 | uint16_t txxri, rxxri; | ||
2048 | uint32_t num_bde; | ||
2049 | uint8_t *ptr = NULL, *rx_databuf = NULL; | ||
2050 | int rc = 0; | ||
2051 | unsigned long flags; | ||
2052 | void *dataout = NULL; | ||
2053 | uint32_t total_mem; | ||
2054 | |||
2055 | /* in case no data is returned return just the return code */ | ||
2056 | job->reply->reply_payload_rcv_len = 0; | ||
2057 | |||
2058 | if (job->request_len < | ||
2059 | sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { | ||
2060 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2061 | "2739 Received DIAG TEST request below minimum " | ||
2062 | "size\n"); | ||
2063 | rc = -EINVAL; | ||
2064 | goto loopback_test_exit; | ||
2065 | } | ||
2066 | |||
2067 | if (job->request_payload.payload_len != | ||
2068 | job->reply_payload.payload_len) { | ||
2069 | rc = -EINVAL; | ||
2070 | goto loopback_test_exit; | ||
2071 | } | ||
2072 | |||
2073 | diag_mode = (struct diag_mode_test *) | ||
2074 | job->request->rqst_data.h_vendor.vendor_cmd; | ||
2075 | |||
2076 | if ((phba->link_state == LPFC_HBA_ERROR) || | ||
2077 | (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || | ||
2078 | (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { | ||
2079 | rc = -EACCES; | ||
2080 | goto loopback_test_exit; | ||
2081 | } | ||
2082 | |||
2083 | if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { | ||
2084 | rc = -EACCES; | ||
2085 | goto loopback_test_exit; | ||
2086 | } | ||
2087 | |||
2088 | size = job->request_payload.payload_len; | ||
2089 | full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ | ||
2090 | |||
2091 | if ((size == 0) || (size > 80 * BUF_SZ_4K)) { | ||
2092 | rc = -ERANGE; | ||
2093 | goto loopback_test_exit; | ||
2094 | } | ||
2095 | |||
2096 | if (size >= BUF_SZ_4K) { | ||
2097 | /* | ||
2098 | * Allocate memory for ioctl data. If buffer is bigger than 64k, | ||
2099 | * then we allocate 64k and re-use that buffer over and over to | ||
2100 | * xfer the whole block. This is because Linux kernel has a | ||
2101 | * problem allocating more than 120k of kernel space memory. Saw | ||
2102 | * problem with GET_FCPTARGETMAPPING... | ||
2103 | */ | ||
2104 | if (size <= (64 * 1024)) | ||
2105 | total_mem = size; | ||
2106 | else | ||
2107 | total_mem = 64 * 1024; | ||
2108 | } else | ||
2109 | /* Allocate memory for ioctl data */ | ||
2110 | total_mem = BUF_SZ_4K; | ||
2111 | |||
2112 | dataout = kmalloc(total_mem, GFP_KERNEL); | ||
2113 | if (dataout == NULL) { | ||
2114 | rc = -ENOMEM; | ||
2115 | goto loopback_test_exit; | ||
2116 | } | ||
2117 | |||
2118 | ptr = dataout; | ||
2119 | ptr += ELX_LOOPBACK_HEADER_SZ; | ||
2120 | sg_copy_to_buffer(job->request_payload.sg_list, | ||
2121 | job->request_payload.sg_cnt, | ||
2122 | ptr, size); | ||
2123 | |||
2124 | rc = lpfcdiag_loop_self_reg(phba, &rpi); | ||
2125 | if (rc) { | ||
2126 | rc = -ENOMEM; | ||
2127 | goto loopback_test_exit; | ||
2128 | } | ||
2129 | |||
2130 | rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); | ||
2131 | if (rc) { | ||
2132 | lpfcdiag_loop_self_unreg(phba, rpi); | ||
2133 | rc = -ENOMEM; | ||
2134 | goto loopback_test_exit; | ||
2135 | } | ||
2136 | |||
2137 | rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); | ||
2138 | if (rc) { | ||
2139 | lpfcdiag_loop_self_unreg(phba, rpi); | ||
2140 | rc = -ENOMEM; | ||
2141 | goto loopback_test_exit; | ||
2142 | } | ||
2143 | |||
2144 | evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, | ||
2145 | SLI_CT_ELX_LOOPBACK); | ||
2146 | if (!evt) { | ||
2147 | lpfcdiag_loop_self_unreg(phba, rpi); | ||
2148 | rc = -ENOMEM; | ||
2149 | goto loopback_test_exit; | ||
2150 | } | ||
2151 | |||
2152 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
2153 | list_add(&evt->node, &phba->ct_ev_waiters); | ||
2154 | lpfc_bsg_event_ref(evt); | ||
2155 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2156 | |||
2157 | cmdiocbq = lpfc_sli_get_iocbq(phba); | ||
2158 | rspiocbq = lpfc_sli_get_iocbq(phba); | ||
2159 | txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
2160 | |||
2161 | if (txbmp) { | ||
2162 | txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); | ||
2163 | INIT_LIST_HEAD(&txbmp->list); | ||
2164 | txbpl = (struct ulp_bde64 *) txbmp->virt; | ||
2165 | if (txbpl) | ||
2166 | txbuffer = diag_cmd_data_alloc(phba, | ||
2167 | txbpl, full_size, 0); | ||
2168 | } | ||
2169 | |||
2170 | if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) { | ||
2171 | rc = -ENOMEM; | ||
2172 | goto err_loopback_test_exit; | ||
2173 | } | ||
2174 | |||
2175 | cmd = &cmdiocbq->iocb; | ||
2176 | rsp = &rspiocbq->iocb; | ||
2177 | |||
2178 | INIT_LIST_HEAD(&head); | ||
2179 | list_add_tail(&head, &txbuffer->dma.list); | ||
2180 | list_for_each_entry(curr, &head, list) { | ||
2181 | segment_len = ((struct lpfc_dmabufext *)curr)->size; | ||
2182 | if (current_offset == 0) { | ||
2183 | ctreq = curr->virt; | ||
2184 | memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); | ||
2185 | ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; | ||
2186 | ctreq->RevisionId.bits.InId = 0; | ||
2187 | ctreq->FsType = SLI_CT_ELX_LOOPBACK; | ||
2188 | ctreq->FsSubType = 0; | ||
2189 | ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; | ||
2190 | ctreq->CommandResponse.bits.Size = size; | ||
2191 | segment_offset = ELX_LOOPBACK_HEADER_SZ; | ||
2192 | } else | ||
2193 | segment_offset = 0; | ||
2194 | |||
2195 | BUG_ON(segment_offset >= segment_len); | ||
2196 | memcpy(curr->virt + segment_offset, | ||
2197 | ptr + current_offset, | ||
2198 | segment_len - segment_offset); | ||
2199 | |||
2200 | current_offset += segment_len - segment_offset; | ||
2201 | BUG_ON(current_offset > size); | ||
2202 | } | ||
2203 | list_del(&head); | ||
2204 | |||
2205 | /* Build the XMIT_SEQUENCE iocb */ | ||
2206 | |||
2207 | num_bde = (uint32_t)txbuffer->flag; | ||
2208 | |||
2209 | cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); | ||
2210 | cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys); | ||
2211 | cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
2212 | cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64)); | ||
2213 | |||
2214 | cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); | ||
2215 | cmd->un.xseq64.w5.hcsw.Dfctl = 0; | ||
2216 | cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; | ||
2217 | cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; | ||
2218 | |||
2219 | cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; | ||
2220 | cmd->ulpBdeCount = 1; | ||
2221 | cmd->ulpLe = 1; | ||
2222 | cmd->ulpClass = CLASS3; | ||
2223 | cmd->ulpContext = txxri; | ||
2224 | |||
2225 | cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; | ||
2226 | cmdiocbq->vport = phba->pport; | ||
2227 | |||
2228 | rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, | ||
2229 | (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); | ||
2230 | |||
2231 | if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) { | ||
2232 | rc = -EIO; | ||
2233 | goto err_loopback_test_exit; | ||
2234 | } | ||
2235 | |||
2236 | evt->waiting = 1; | ||
2237 | rc = wait_event_interruptible_timeout( | ||
2238 | evt->wq, !list_empty(&evt->events_to_see), | ||
2239 | ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); | ||
2240 | evt->waiting = 0; | ||
2241 | if (list_empty(&evt->events_to_see)) | ||
2242 | rc = (rc) ? -EINTR : -ETIMEDOUT; | ||
2243 | else { | ||
2244 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
2245 | list_move(evt->events_to_see.prev, &evt->events_to_get); | ||
2246 | evdat = list_entry(evt->events_to_get.prev, | ||
2247 | typeof(*evdat), node); | ||
2248 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2249 | rx_databuf = evdat->data; | ||
2250 | if (evdat->len != full_size) { | ||
2251 | lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, | ||
2252 | "1603 Loopback test did not receive expected " | ||
2253 | "data length. actual length 0x%x expected " | ||
2254 | "length 0x%x\n", | ||
2255 | evdat->len, full_size); | ||
2256 | rc = -EIO; | ||
2257 | } else if (rx_databuf == NULL) | ||
2258 | rc = -EIO; | ||
2259 | else { | ||
2260 | rc = IOCB_SUCCESS; | ||
2261 | /* skip over elx loopback header */ | ||
2262 | rx_databuf += ELX_LOOPBACK_HEADER_SZ; | ||
2263 | job->reply->reply_payload_rcv_len = | ||
2264 | sg_copy_from_buffer(job->reply_payload.sg_list, | ||
2265 | job->reply_payload.sg_cnt, | ||
2266 | rx_databuf, size); | ||
2267 | job->reply->reply_payload_rcv_len = size; | ||
2268 | } | ||
2269 | } | ||
2270 | |||
2271 | err_loopback_test_exit: | ||
2272 | lpfcdiag_loop_self_unreg(phba, rpi); | ||
2273 | |||
2274 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
2275 | lpfc_bsg_event_unref(evt); /* release ref */ | ||
2276 | lpfc_bsg_event_unref(evt); /* delete */ | ||
2277 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2278 | |||
2279 | if (cmdiocbq != NULL) | ||
2280 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
2281 | |||
2282 | if (rspiocbq != NULL) | ||
2283 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
2284 | |||
2285 | if (txbmp != NULL) { | ||
2286 | if (txbpl != NULL) { | ||
2287 | if (txbuffer != NULL) | ||
2288 | diag_cmd_data_free(phba, txbuffer); | ||
2289 | lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); | ||
2290 | } | ||
2291 | kfree(txbmp); | ||
2292 | } | ||
2293 | |||
2294 | loopback_test_exit: | ||
2295 | kfree(dataout); | ||
2296 | /* make error code available to userspace */ | ||
2297 | job->reply->result = rc; | ||
2298 | job->dd_data = NULL; | ||
2299 | /* complete the job back to userspace if no error */ | ||
2300 | if (rc == 0) | ||
2301 | job->job_done(job); | ||
2302 | return rc; | ||
2303 | } | ||
2304 | |||
2305 | /** | ||
2306 | * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command | ||
2307 | * @job: GET_DFC_REV fc_bsg_job | ||
2308 | **/ | ||
2309 | static int | ||
2310 | lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job) | ||
2311 | { | ||
2312 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | ||
2313 | struct lpfc_hba *phba = vport->phba; | ||
2314 | struct get_mgmt_rev *event_req; | ||
2315 | struct get_mgmt_rev_reply *event_reply; | ||
2316 | int rc = 0; | ||
2317 | |||
2318 | if (job->request_len < | ||
2319 | sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { | ||
2320 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2321 | "2740 Received GET_DFC_REV request below " | ||
2322 | "minimum size\n"); | ||
2323 | rc = -EINVAL; | ||
2324 | goto job_error; | ||
2325 | } | ||
2326 | |||
2327 | event_req = (struct get_mgmt_rev *) | ||
2328 | job->request->rqst_data.h_vendor.vendor_cmd; | ||
2329 | |||
2330 | event_reply = (struct get_mgmt_rev_reply *) | ||
2331 | job->reply->reply_data.vendor_reply.vendor_rsp; | ||
2332 | |||
2333 | if (job->reply_len < | ||
2334 | sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { | ||
2335 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2336 | "2741 Received GET_DFC_REV reply below " | ||
2337 | "minimum size\n"); | ||
2338 | rc = -EINVAL; | ||
2339 | goto job_error; | ||
2340 | } | ||
2341 | |||
2342 | event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; | ||
2343 | event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; | ||
2344 | job_error: | ||
2345 | job->reply->result = rc; | ||
2346 | if (rc == 0) | ||
2347 | job->job_done(job); | ||
2348 | return rc; | ||
2349 | } | ||
2350 | |||
2351 | /** | ||
2352 | * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler | ||
2353 | * @phba: Pointer to HBA context object. | ||
2354 | * @pmboxq: Pointer to mailbox command. | ||
2355 | * | ||
2356 | * This is completion handler function for mailbox commands issued from | ||
2357 | * lpfc_bsg_issue_mbox function. This function is called by the | ||
2358 | * mailbox event handler function with no lock held. This function | ||
2359 | * will wake up thread waiting on the wait queue pointed by context1 | ||
2360 | * of the mailbox. | ||
2361 | **/ | ||
2362 | void | ||
2363 | lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) | ||
2364 | { | ||
2365 | struct bsg_job_data *dd_data; | ||
2366 | MAILBOX_t *pmb; | ||
2367 | MAILBOX_t *mb; | ||
2368 | struct fc_bsg_job *job; | ||
2369 | uint32_t size; | ||
2370 | unsigned long flags; | ||
2371 | |||
2372 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
2373 | dd_data = pmboxq->context1; | ||
2374 | if (!dd_data) { | ||
2375 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2376 | return; | ||
2377 | } | ||
2378 | |||
2379 | pmb = &dd_data->context_un.mbox.pmboxq->u.mb; | ||
2380 | mb = dd_data->context_un.mbox.mb; | ||
2381 | job = dd_data->context_un.mbox.set_job; | ||
2382 | memcpy(mb, pmb, sizeof(*pmb)); | ||
2383 | size = job->request_payload.payload_len; | ||
2384 | job->reply->reply_payload_rcv_len = | ||
2385 | sg_copy_from_buffer(job->reply_payload.sg_list, | ||
2386 | job->reply_payload.sg_cnt, | ||
2387 | mb, size); | ||
2388 | job->reply->result = 0; | ||
2389 | dd_data->context_un.mbox.set_job = NULL; | ||
2390 | job->dd_data = NULL; | ||
2391 | job->job_done(job); | ||
2392 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2393 | mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); | ||
2394 | kfree(mb); | ||
2395 | kfree(dd_data); | ||
2396 | return; | ||
2397 | } | ||
2398 | |||
2399 | /** | ||
2400 | * lpfc_bsg_check_cmd_access - test for a supported mailbox command | ||
2401 | * @phba: Pointer to HBA context object. | ||
2402 | * @mb: Pointer to a mailbox object. | ||
2403 | * @vport: Pointer to a vport object. | ||
2404 | * | ||
2405 | * Some commands require the port to be offline, some may not be called from | ||
2406 | * the application. | ||
2407 | **/ | ||
2408 | static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, | ||
2409 | MAILBOX_t *mb, struct lpfc_vport *vport) | ||
2410 | { | ||
2411 | /* return negative error values for bsg job */ | ||
2412 | switch (mb->mbxCommand) { | ||
2413 | /* Offline only */ | ||
2414 | case MBX_INIT_LINK: | ||
2415 | case MBX_DOWN_LINK: | ||
2416 | case MBX_CONFIG_LINK: | ||
2417 | case MBX_CONFIG_RING: | ||
2418 | case MBX_RESET_RING: | ||
2419 | case MBX_UNREG_LOGIN: | ||
2420 | case MBX_CLEAR_LA: | ||
2421 | case MBX_DUMP_CONTEXT: | ||
2422 | case MBX_RUN_DIAGS: | ||
2423 | case MBX_RESTART: | ||
2424 | case MBX_SET_MASK: | ||
2425 | if (!(vport->fc_flag & FC_OFFLINE_MODE)) { | ||
2426 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2427 | "2743 Command 0x%x is illegal in on-line " | ||
2428 | "state\n", | ||
2429 | mb->mbxCommand); | ||
2430 | return -EPERM; | ||
2431 | } | ||
2432 | case MBX_WRITE_NV: | ||
2433 | case MBX_WRITE_VPARMS: | ||
2434 | case MBX_LOAD_SM: | ||
2435 | case MBX_READ_NV: | ||
2436 | case MBX_READ_CONFIG: | ||
2437 | case MBX_READ_RCONFIG: | ||
2438 | case MBX_READ_STATUS: | ||
2439 | case MBX_READ_XRI: | ||
2440 | case MBX_READ_REV: | ||
2441 | case MBX_READ_LNK_STAT: | ||
2442 | case MBX_DUMP_MEMORY: | ||
2443 | case MBX_DOWN_LOAD: | ||
2444 | case MBX_UPDATE_CFG: | ||
2445 | case MBX_KILL_BOARD: | ||
2446 | case MBX_LOAD_AREA: | ||
2447 | case MBX_LOAD_EXP_ROM: | ||
2448 | case MBX_BEACON: | ||
2449 | case MBX_DEL_LD_ENTRY: | ||
2450 | case MBX_SET_DEBUG: | ||
2451 | case MBX_WRITE_WWN: | ||
2452 | case MBX_SLI4_CONFIG: | ||
2453 | case MBX_READ_EVENT_LOG_STATUS: | ||
2454 | case MBX_WRITE_EVENT_LOG: | ||
2455 | case MBX_PORT_CAPABILITIES: | ||
2456 | case MBX_PORT_IOV_CONTROL: | ||
2457 | break; | ||
2458 | case MBX_SET_VARIABLE: | ||
2459 | case MBX_RUN_BIU_DIAG64: | ||
2460 | case MBX_READ_EVENT_LOG: | ||
2461 | case MBX_READ_SPARM64: | ||
2462 | case MBX_READ_LA: | ||
2463 | case MBX_READ_LA64: | ||
2464 | case MBX_REG_LOGIN: | ||
2465 | case MBX_REG_LOGIN64: | ||
2466 | case MBX_CONFIG_PORT: | ||
2467 | case MBX_RUN_BIU_DIAG: | ||
2468 | default: | ||
2469 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2470 | "2742 Unknown Command 0x%x\n", | ||
2471 | mb->mbxCommand); | ||
2472 | return -EPERM; | ||
2473 | } | ||
2474 | |||
2475 | return 0; /* ok */ | ||
2476 | } | ||
2477 | |||
2478 | /** | ||
2479 | * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app | ||
2480 | * @phba: Pointer to HBA context object. | ||
2481 | * @mb: Pointer to a mailbox object. | ||
2482 | * @vport: Pointer to a vport object. | ||
2483 | * | ||
2484 | * Allocate a tracking object, mailbox command memory, get a mailbox | ||
2485 | * from the mailbox pool, copy the caller mailbox command. | ||
2486 | * | ||
2487 | * If offline and the sli is active we need to poll for the command (port is | ||
2488 | * being reset) and com-plete the job, otherwise issue the mailbox command and | ||
2489 | * let our completion handler finish the command. | ||
2490 | **/ | ||
2491 | static uint32_t | ||
2492 | lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, | ||
2493 | struct lpfc_vport *vport) | ||
2494 | { | ||
2495 | LPFC_MBOXQ_t *pmboxq; | ||
2496 | MAILBOX_t *pmb; | ||
2497 | MAILBOX_t *mb; | ||
2498 | struct bsg_job_data *dd_data; | ||
2499 | uint32_t size; | ||
2500 | int rc = 0; | ||
2501 | |||
2502 | /* allocate our bsg tracking structure */ | ||
2503 | dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); | ||
2504 | if (!dd_data) { | ||
2505 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2506 | "2727 Failed allocation of dd_data\n"); | ||
2507 | return -ENOMEM; | ||
2508 | } | ||
2509 | |||
2510 | mb = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
2511 | if (!mb) { | ||
2512 | kfree(dd_data); | ||
2513 | return -ENOMEM; | ||
2514 | } | ||
2515 | |||
2516 | pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
2517 | if (!pmboxq) { | ||
2518 | kfree(dd_data); | ||
2519 | kfree(mb); | ||
2520 | return -ENOMEM; | ||
2521 | } | ||
2522 | |||
2523 | size = job->request_payload.payload_len; | ||
2524 | job->reply->reply_payload_rcv_len = | ||
2525 | sg_copy_to_buffer(job->request_payload.sg_list, | ||
2526 | job->request_payload.sg_cnt, | ||
2527 | mb, size); | ||
2528 | |||
2529 | rc = lpfc_bsg_check_cmd_access(phba, mb, vport); | ||
2530 | if (rc != 0) { | ||
2531 | kfree(dd_data); | ||
2532 | kfree(mb); | ||
2533 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
2534 | return rc; /* must be negative */ | ||
2535 | } | ||
2536 | |||
2537 | memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); | ||
2538 | pmb = &pmboxq->u.mb; | ||
2539 | memcpy(pmb, mb, sizeof(*pmb)); | ||
2540 | pmb->mbxOwner = OWN_HOST; | ||
2541 | pmboxq->context1 = NULL; | ||
2542 | pmboxq->vport = vport; | ||
2543 | |||
2544 | if ((vport->fc_flag & FC_OFFLINE_MODE) || | ||
2545 | (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { | ||
2546 | rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); | ||
2547 | if (rc != MBX_SUCCESS) { | ||
2548 | if (rc != MBX_TIMEOUT) { | ||
2549 | kfree(dd_data); | ||
2550 | kfree(mb); | ||
2551 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
2552 | } | ||
2553 | return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; | ||
2554 | } | ||
2555 | |||
2556 | memcpy(mb, pmb, sizeof(*pmb)); | ||
2557 | job->reply->reply_payload_rcv_len = | ||
2558 | sg_copy_from_buffer(job->reply_payload.sg_list, | ||
2559 | job->reply_payload.sg_cnt, | ||
2560 | mb, size); | ||
2561 | kfree(dd_data); | ||
2562 | kfree(mb); | ||
2563 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
2564 | /* not waiting mbox already done */ | ||
2565 | return 0; | ||
2566 | } | ||
2567 | |||
2568 | /* setup wake call as IOCB callback */ | ||
2569 | pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait; | ||
2570 | /* setup context field to pass wait_queue pointer to wake function */ | ||
2571 | pmboxq->context1 = dd_data; | ||
2572 | dd_data->type = TYPE_MBOX; | ||
2573 | dd_data->context_un.mbox.pmboxq = pmboxq; | ||
2574 | dd_data->context_un.mbox.mb = mb; | ||
2575 | dd_data->context_un.mbox.set_job = job; | ||
2576 | job->dd_data = dd_data; | ||
2577 | rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); | ||
2578 | if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { | ||
2579 | kfree(dd_data); | ||
2580 | kfree(mb); | ||
2581 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
2582 | return -EIO; | ||
2583 | } | ||
2584 | |||
2585 | return 1; | ||
2586 | } | ||
2587 | |||
2588 | /** | ||
2589 | * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command | ||
2590 | * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. | ||
2591 | **/ | ||
2592 | static int | ||
2593 | lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) | ||
2594 | { | ||
2595 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | ||
2596 | struct lpfc_hba *phba = vport->phba; | ||
2597 | int rc = 0; | ||
2598 | |||
2599 | /* in case no data is transferred */ | ||
2600 | job->reply->reply_payload_rcv_len = 0; | ||
2601 | if (job->request_len < | ||
2602 | sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { | ||
2603 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2604 | "2737 Received MBOX_REQ request below " | ||
2605 | "minimum size\n"); | ||
2606 | rc = -EINVAL; | ||
2607 | goto job_error; | ||
2608 | } | ||
2609 | |||
2610 | if (job->request_payload.payload_len != PAGE_SIZE) { | ||
2611 | rc = -EINVAL; | ||
2612 | goto job_error; | ||
2613 | } | ||
2614 | |||
2615 | if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { | ||
2616 | rc = -EAGAIN; | ||
2617 | goto job_error; | ||
2618 | } | ||
2619 | |||
2620 | rc = lpfc_bsg_issue_mbox(phba, job, vport); | ||
2621 | |||
2622 | job_error: | ||
2623 | if (rc == 0) { | ||
2624 | /* job done */ | ||
2625 | job->reply->result = 0; | ||
2626 | job->dd_data = NULL; | ||
2627 | job->job_done(job); | ||
2628 | } else if (rc == 1) | ||
2629 | /* job submitted, will complete later*/ | ||
2630 | rc = 0; /* return zero, no error */ | ||
2631 | else { | ||
2632 | /* some error occurred */ | ||
2633 | job->reply->result = rc; | ||
2634 | job->dd_data = NULL; | ||
2635 | } | ||
830 | 2636 | ||
831 | return rc; | 2637 | return rc; |
832 | } | 2638 | } |
@@ -834,38 +2640,57 @@ error_get_event_exit: | |||
834 | /** | 2640 | /** |
835 | * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job | 2641 | * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job |
836 | * @job: fc_bsg_job to handle | 2642 | * @job: fc_bsg_job to handle |
837 | */ | 2643 | **/ |
838 | static int | 2644 | static int |
839 | lpfc_bsg_hst_vendor(struct fc_bsg_job *job) | 2645 | lpfc_bsg_hst_vendor(struct fc_bsg_job *job) |
840 | { | 2646 | { |
841 | int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; | 2647 | int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; |
2648 | int rc; | ||
842 | 2649 | ||
843 | switch (command) { | 2650 | switch (command) { |
844 | case LPFC_BSG_VENDOR_SET_CT_EVENT: | 2651 | case LPFC_BSG_VENDOR_SET_CT_EVENT: |
845 | return lpfc_bsg_set_event(job); | 2652 | rc = lpfc_bsg_hba_set_event(job); |
846 | break; | 2653 | break; |
847 | |||
848 | case LPFC_BSG_VENDOR_GET_CT_EVENT: | 2654 | case LPFC_BSG_VENDOR_GET_CT_EVENT: |
849 | return lpfc_bsg_get_event(job); | 2655 | rc = lpfc_bsg_hba_get_event(job); |
2656 | break; | ||
2657 | case LPFC_BSG_VENDOR_SEND_MGMT_RESP: | ||
2658 | rc = lpfc_bsg_send_mgmt_rsp(job); | ||
2659 | break; | ||
2660 | case LPFC_BSG_VENDOR_DIAG_MODE: | ||
2661 | rc = lpfc_bsg_diag_mode(job); | ||
2662 | break; | ||
2663 | case LPFC_BSG_VENDOR_DIAG_TEST: | ||
2664 | rc = lpfc_bsg_diag_test(job); | ||
2665 | break; | ||
2666 | case LPFC_BSG_VENDOR_GET_MGMT_REV: | ||
2667 | rc = lpfc_bsg_get_dfc_rev(job); | ||
2668 | break; | ||
2669 | case LPFC_BSG_VENDOR_MBOX: | ||
2670 | rc = lpfc_bsg_mbox_cmd(job); | ||
850 | break; | 2671 | break; |
851 | |||
852 | default: | 2672 | default: |
853 | return -EINVAL; | 2673 | rc = -EINVAL; |
2674 | job->reply->reply_payload_rcv_len = 0; | ||
2675 | /* make error code available to userspace */ | ||
2676 | job->reply->result = rc; | ||
2677 | break; | ||
854 | } | 2678 | } |
2679 | |||
2680 | return rc; | ||
855 | } | 2681 | } |
856 | 2682 | ||
857 | /** | 2683 | /** |
858 | * lpfc_bsg_request - handle a bsg request from the FC transport | 2684 | * lpfc_bsg_request - handle a bsg request from the FC transport |
859 | * @job: fc_bsg_job to handle | 2685 | * @job: fc_bsg_job to handle |
860 | */ | 2686 | **/ |
861 | int | 2687 | int |
862 | lpfc_bsg_request(struct fc_bsg_job *job) | 2688 | lpfc_bsg_request(struct fc_bsg_job *job) |
863 | { | 2689 | { |
864 | uint32_t msgcode; | 2690 | uint32_t msgcode; |
865 | int rc = -EINVAL; | 2691 | int rc; |
866 | 2692 | ||
867 | msgcode = job->request->msgcode; | 2693 | msgcode = job->request->msgcode; |
868 | |||
869 | switch (msgcode) { | 2694 | switch (msgcode) { |
870 | case FC_BSG_HST_VENDOR: | 2695 | case FC_BSG_HST_VENDOR: |
871 | rc = lpfc_bsg_hst_vendor(job); | 2696 | rc = lpfc_bsg_hst_vendor(job); |
@@ -874,9 +2699,13 @@ lpfc_bsg_request(struct fc_bsg_job *job) | |||
874 | rc = lpfc_bsg_rport_els(job); | 2699 | rc = lpfc_bsg_rport_els(job); |
875 | break; | 2700 | break; |
876 | case FC_BSG_RPT_CT: | 2701 | case FC_BSG_RPT_CT: |
877 | rc = lpfc_bsg_rport_ct(job); | 2702 | rc = lpfc_bsg_send_mgmt_cmd(job); |
878 | break; | 2703 | break; |
879 | default: | 2704 | default: |
2705 | rc = -EINVAL; | ||
2706 | job->reply->reply_payload_rcv_len = 0; | ||
2707 | /* make error code available to userspace */ | ||
2708 | job->reply->result = rc; | ||
880 | break; | 2709 | break; |
881 | } | 2710 | } |
882 | 2711 | ||
@@ -889,17 +2718,71 @@ lpfc_bsg_request(struct fc_bsg_job *job) | |||
889 | * | 2718 | * |
890 | * This function just aborts the job's IOCB. The aborted IOCB will return to | 2719 | * This function just aborts the job's IOCB. The aborted IOCB will return to |
891 | * the waiting function which will handle passing the error back to userspace | 2720 | * the waiting function which will handle passing the error back to userspace |
892 | */ | 2721 | **/ |
893 | int | 2722 | int |
894 | lpfc_bsg_timeout(struct fc_bsg_job *job) | 2723 | lpfc_bsg_timeout(struct fc_bsg_job *job) |
895 | { | 2724 | { |
896 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | 2725 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; |
897 | struct lpfc_hba *phba = vport->phba; | 2726 | struct lpfc_hba *phba = vport->phba; |
898 | struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data; | 2727 | struct lpfc_iocbq *cmdiocb; |
2728 | struct lpfc_bsg_event *evt; | ||
2729 | struct lpfc_bsg_iocb *iocb; | ||
2730 | struct lpfc_bsg_mbox *mbox; | ||
899 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | 2731 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; |
2732 | struct bsg_job_data *dd_data; | ||
2733 | unsigned long flags; | ||
2734 | |||
2735 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
2736 | dd_data = (struct bsg_job_data *)job->dd_data; | ||
2737 | /* timeout and completion crossed paths if no dd_data */ | ||
2738 | if (!dd_data) { | ||
2739 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2740 | return 0; | ||
2741 | } | ||
900 | 2742 | ||
901 | if (cmdiocb) | 2743 | switch (dd_data->type) { |
2744 | case TYPE_IOCB: | ||
2745 | iocb = &dd_data->context_un.iocb; | ||
2746 | cmdiocb = iocb->cmdiocbq; | ||
2747 | /* hint to completion handler that the job timed out */ | ||
2748 | job->reply->result = -EAGAIN; | ||
2749 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2750 | /* this will call our completion handler */ | ||
2751 | spin_lock_irq(&phba->hbalock); | ||
902 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); | 2752 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); |
2753 | spin_unlock_irq(&phba->hbalock); | ||
2754 | break; | ||
2755 | case TYPE_EVT: | ||
2756 | evt = dd_data->context_un.evt; | ||
2757 | /* this event has no job anymore */ | ||
2758 | evt->set_job = NULL; | ||
2759 | job->dd_data = NULL; | ||
2760 | job->reply->reply_payload_rcv_len = 0; | ||
2761 | /* Return -EAGAIN which is our way of signallying the | ||
2762 | * app to retry. | ||
2763 | */ | ||
2764 | job->reply->result = -EAGAIN; | ||
2765 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2766 | job->job_done(job); | ||
2767 | break; | ||
2768 | case TYPE_MBOX: | ||
2769 | mbox = &dd_data->context_un.mbox; | ||
2770 | /* this mbox has no job anymore */ | ||
2771 | mbox->set_job = NULL; | ||
2772 | job->dd_data = NULL; | ||
2773 | job->reply->reply_payload_rcv_len = 0; | ||
2774 | job->reply->result = -EAGAIN; | ||
2775 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2776 | job->job_done(job); | ||
2777 | break; | ||
2778 | default: | ||
2779 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2780 | break; | ||
2781 | } | ||
903 | 2782 | ||
2783 | /* scsi transport fc fc_bsg_job_timeout expects a zero return code, | ||
2784 | * otherwise an error message will be displayed on the console | ||
2785 | * so always return success (zero) | ||
2786 | */ | ||
904 | return 0; | 2787 | return 0; |
905 | } | 2788 | } |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h new file mode 100644 index 000000000000..6c8f87e39b98 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_bsg.h | |||
@@ -0,0 +1,98 @@ | |||
1 | /******************************************************************* | ||
2 | * This file is part of the Emulex Linux Device Driver for * | ||
3 | * Fibre Channel Host Bus Adapters. * | ||
4 | * Copyright (C) 2010 Emulex. All rights reserved. * | ||
5 | * EMULEX and SLI are trademarks of Emulex. * | ||
6 | * www.emulex.com * | ||
7 | * * | ||
8 | * This program is free software; you can redistribute it and/or * | ||
9 | * modify it under the terms of version 2 of the GNU General * | ||
10 | * Public License as published by the Free Software Foundation. * | ||
11 | * This program is distributed in the hope that it will be useful. * | ||
12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
17 | * more details, a copy of which can be found in the file COPYING * | ||
18 | * included with this package. * | ||
19 | *******************************************************************/ | ||
20 | /* bsg definitions | ||
21 | * No pointers to user data are allowed, all application buffers and sizes will | ||
22 | * derived through the bsg interface. | ||
23 | * | ||
24 | * These are the vendor unique structures passed in using the bsg | ||
25 | * FC_BSG_HST_VENDOR message code type. | ||
26 | */ | ||
27 | #define LPFC_BSG_VENDOR_SET_CT_EVENT 1 | ||
28 | #define LPFC_BSG_VENDOR_GET_CT_EVENT 2 | ||
29 | #define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3 | ||
30 | #define LPFC_BSG_VENDOR_DIAG_MODE 4 | ||
31 | #define LPFC_BSG_VENDOR_DIAG_TEST 5 | ||
32 | #define LPFC_BSG_VENDOR_GET_MGMT_REV 6 | ||
33 | #define LPFC_BSG_VENDOR_MBOX 7 | ||
34 | |||
35 | struct set_ct_event { | ||
36 | uint32_t command; | ||
37 | uint32_t type_mask; | ||
38 | uint32_t ev_req_id; | ||
39 | uint32_t ev_reg_id; | ||
40 | }; | ||
41 | |||
42 | struct get_ct_event { | ||
43 | uint32_t command; | ||
44 | uint32_t ev_reg_id; | ||
45 | uint32_t ev_req_id; | ||
46 | }; | ||
47 | |||
48 | struct get_ct_event_reply { | ||
49 | uint32_t immed_data; | ||
50 | uint32_t type; | ||
51 | }; | ||
52 | |||
53 | struct send_mgmt_resp { | ||
54 | uint32_t command; | ||
55 | uint32_t tag; | ||
56 | }; | ||
57 | |||
58 | |||
59 | #define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */ | ||
60 | #define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */ | ||
61 | |||
62 | struct diag_mode_set { | ||
63 | uint32_t command; | ||
64 | uint32_t type; | ||
65 | uint32_t timeout; | ||
66 | }; | ||
67 | |||
68 | struct diag_mode_test { | ||
69 | uint32_t command; | ||
70 | }; | ||
71 | |||
72 | #define LPFC_WWNN_TYPE 0 | ||
73 | #define LPFC_WWPN_TYPE 1 | ||
74 | |||
75 | struct get_mgmt_rev { | ||
76 | uint32_t command; | ||
77 | }; | ||
78 | |||
79 | #define MANAGEMENT_MAJOR_REV 1 | ||
80 | #define MANAGEMENT_MINOR_REV 0 | ||
81 | |||
82 | /* the MgmtRevInfo structure */ | ||
83 | struct MgmtRevInfo { | ||
84 | uint32_t a_Major; | ||
85 | uint32_t a_Minor; | ||
86 | }; | ||
87 | |||
88 | struct get_mgmt_rev_reply { | ||
89 | struct MgmtRevInfo info; | ||
90 | }; | ||
91 | |||
92 | struct dfc_mbox_req { | ||
93 | uint32_t command; | ||
94 | uint32_t inExtWLen; | ||
95 | uint32_t outExtWLen; | ||
96 | uint8_t mbOffset; | ||
97 | }; | ||
98 | |||
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 650494d622c1..6f0fb51eb461 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2010 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -44,18 +44,26 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, | |||
44 | void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); | 44 | void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); |
45 | void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); | 45 | void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); |
46 | void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); | 46 | void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); |
47 | void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *, | ||
48 | struct lpfc_nodelist *); | ||
47 | void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); | 49 | void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); |
48 | void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); | 50 | void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); |
49 | void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); | 51 | void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); |
52 | void lpfc_supported_pages(struct lpfcMboxq *); | ||
53 | void lpfc_sli4_params(struct lpfcMboxq *); | ||
54 | int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *); | ||
50 | 55 | ||
51 | struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); | 56 | struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); |
52 | void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); | 57 | void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); |
53 | void lpfc_rcv_seq_check_edtov(struct lpfc_vport *); | 58 | void lpfc_rcv_seq_check_edtov(struct lpfc_vport *); |
54 | void lpfc_cleanup_rpis(struct lpfc_vport *, int); | 59 | void lpfc_cleanup_rpis(struct lpfc_vport *, int); |
60 | void lpfc_cleanup_pending_mbox(struct lpfc_vport *); | ||
55 | int lpfc_linkdown(struct lpfc_hba *); | 61 | int lpfc_linkdown(struct lpfc_hba *); |
56 | void lpfc_linkdown_port(struct lpfc_vport *); | 62 | void lpfc_linkdown_port(struct lpfc_vport *); |
57 | void lpfc_port_link_failure(struct lpfc_vport *); | 63 | void lpfc_port_link_failure(struct lpfc_vport *); |
58 | void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); | 64 | void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); |
65 | void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); | ||
66 | void lpfc_retry_pport_discovery(struct lpfc_hba *); | ||
59 | 67 | ||
60 | void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | 68 | void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); |
61 | void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); | 69 | void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); |
@@ -73,6 +81,7 @@ void lpfc_set_disctmo(struct lpfc_vport *); | |||
73 | int lpfc_can_disctmo(struct lpfc_vport *); | 81 | int lpfc_can_disctmo(struct lpfc_vport *); |
74 | int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *); | 82 | int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *); |
75 | void lpfc_unreg_all_rpis(struct lpfc_vport *); | 83 | void lpfc_unreg_all_rpis(struct lpfc_vport *); |
84 | void lpfc_unreg_hba_rpis(struct lpfc_hba *); | ||
76 | void lpfc_unreg_default_rpis(struct lpfc_vport *); | 85 | void lpfc_unreg_default_rpis(struct lpfc_vport *); |
77 | void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *); | 86 | void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *); |
78 | 87 | ||
@@ -99,7 +108,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *, | |||
99 | 108 | ||
100 | void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); | 109 | void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); |
101 | int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, | 110 | int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, |
102 | struct serv_parm *, uint32_t); | 111 | struct serv_parm *, uint32_t, int); |
103 | int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); | 112 | int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); |
104 | void lpfc_more_plogi(struct lpfc_vport *); | 113 | void lpfc_more_plogi(struct lpfc_vport *); |
105 | void lpfc_more_adisc(struct lpfc_vport *); | 114 | void lpfc_more_adisc(struct lpfc_vport *); |
@@ -197,6 +206,7 @@ void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); | |||
197 | void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); | 206 | void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); |
198 | void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); | 207 | void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); |
199 | int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t); | 208 | int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t); |
209 | void lpfc_issue_init_vpi(struct lpfc_vport *); | ||
200 | 210 | ||
201 | void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, | 211 | void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, |
202 | uint32_t , LPFC_MBOXQ_t *); | 212 | uint32_t , LPFC_MBOXQ_t *); |
@@ -206,7 +216,11 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); | |||
206 | void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); | 216 | void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); |
207 | void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, | 217 | void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, |
208 | uint16_t); | 218 | uint16_t); |
219 | void lpfc_unregister_fcf(struct lpfc_hba *); | ||
220 | void lpfc_unregister_fcf_rescan(struct lpfc_hba *); | ||
209 | void lpfc_unregister_unused_fcf(struct lpfc_hba *); | 221 | void lpfc_unregister_unused_fcf(struct lpfc_hba *); |
222 | int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *); | ||
223 | void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); | ||
210 | 224 | ||
211 | int lpfc_mem_alloc(struct lpfc_hba *, int align); | 225 | int lpfc_mem_alloc(struct lpfc_hba *, int align); |
212 | void lpfc_mem_free(struct lpfc_hba *); | 226 | void lpfc_mem_free(struct lpfc_hba *); |
@@ -365,6 +379,8 @@ void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); | |||
365 | void lpfc_create_static_vport(struct lpfc_hba *); | 379 | void lpfc_create_static_vport(struct lpfc_hba *); |
366 | void lpfc_stop_hba_timers(struct lpfc_hba *); | 380 | void lpfc_stop_hba_timers(struct lpfc_hba *); |
367 | void lpfc_stop_port(struct lpfc_hba *); | 381 | void lpfc_stop_port(struct lpfc_hba *); |
382 | void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *); | ||
383 | void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *); | ||
368 | void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); | 384 | void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); |
369 | int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); | 385 | int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); |
370 | void lpfc_start_fdiscs(struct lpfc_hba *phba); | 386 | void lpfc_start_fdiscs(struct lpfc_hba *phba); |
@@ -378,5 +394,5 @@ struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); | |||
378 | /* functions to support SGIOv4/bsg interface */ | 394 | /* functions to support SGIOv4/bsg interface */ |
379 | int lpfc_bsg_request(struct fc_bsg_job *); | 395 | int lpfc_bsg_request(struct fc_bsg_job *); |
380 | int lpfc_bsg_timeout(struct fc_bsg_job *); | 396 | int lpfc_bsg_timeout(struct fc_bsg_job *); |
381 | void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, | 397 | int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, |
382 | struct lpfc_iocbq *); | 398 | struct lpfc_iocbq *); |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 0ebcd9baca79..c7e921973f66 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2010 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -97,7 +97,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
97 | struct list_head head; | 97 | struct list_head head; |
98 | struct lpfc_dmabuf *bdeBuf; | 98 | struct lpfc_dmabuf *bdeBuf; |
99 | 99 | ||
100 | lpfc_bsg_ct_unsol_event(phba, pring, piocbq); | 100 | if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0) |
101 | return; | ||
101 | 102 | ||
102 | if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { | 103 | if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { |
103 | lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); | 104 | lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); |
@@ -181,7 +182,8 @@ lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba, | |||
181 | uint32_t size; | 182 | uint32_t size; |
182 | 183 | ||
183 | /* Forward abort event to any process registered to receive ct event */ | 184 | /* Forward abort event to any process registered to receive ct event */ |
184 | lpfc_bsg_ct_unsol_event(phba, pring, piocbq); | 185 | if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0) |
186 | return; | ||
185 | 187 | ||
186 | /* If there is no BDE associated with IOCB, there is nothing to do */ | 188 | /* If there is no BDE associated with IOCB, there is nothing to do */ |
187 | if (icmd->ulpBdeCount == 0) | 189 | if (icmd->ulpBdeCount == 0) |
@@ -1843,12 +1845,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) | |||
1843 | c = (rev & 0x0000ff00) >> 8; | 1845 | c = (rev & 0x0000ff00) >> 8; |
1844 | b4 = (rev & 0x000000ff); | 1846 | b4 = (rev & 0x000000ff); |
1845 | 1847 | ||
1846 | if (flag) | 1848 | sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4); |
1847 | sprintf(fwrevision, "%d.%d%d%c%d ", b1, | ||
1848 | b2, b3, c, b4); | ||
1849 | else | ||
1850 | sprintf(fwrevision, "%d.%d%d%c%d ", b1, | ||
1851 | b2, b3, c, b4); | ||
1852 | } | 1849 | } |
1853 | return; | 1850 | return; |
1854 | } | 1851 | } |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 2cc39684ce97..08b6634cb994 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -50,9 +50,6 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, | |||
50 | struct lpfc_nodelist *ndlp, uint8_t retry); | 50 | struct lpfc_nodelist *ndlp, uint8_t retry); |
51 | static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, | 51 | static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, |
52 | struct lpfc_iocbq *iocb); | 52 | struct lpfc_iocbq *iocb); |
53 | static void lpfc_register_new_vport(struct lpfc_hba *phba, | ||
54 | struct lpfc_vport *vport, | ||
55 | struct lpfc_nodelist *ndlp); | ||
56 | 53 | ||
57 | static int lpfc_max_els_tries = 3; | 54 | static int lpfc_max_els_tries = 3; |
58 | 55 | ||
@@ -592,6 +589,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
592 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 589 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
593 | spin_unlock_irq(shost->host_lock); | 590 | spin_unlock_irq(shost->host_lock); |
594 | } | 591 | } |
592 | /* | ||
593 | * If VPI is unreged, driver need to do INIT_VPI | ||
594 | * before re-registering | ||
595 | */ | ||
596 | if (phba->sli_rev == LPFC_SLI_REV4) { | ||
597 | spin_lock_irq(shost->host_lock); | ||
598 | vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; | ||
599 | spin_unlock_irq(shost->host_lock); | ||
600 | } | ||
595 | } | 601 | } |
596 | 602 | ||
597 | if (phba->sli_rev < LPFC_SLI_REV4) { | 603 | if (phba->sli_rev < LPFC_SLI_REV4) { |
@@ -604,10 +610,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
604 | } else { | 610 | } else { |
605 | ndlp->nlp_type |= NLP_FABRIC; | 611 | ndlp->nlp_type |= NLP_FABRIC; |
606 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); | 612 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
607 | if (vport->vpi_state & LPFC_VPI_REGISTERED) { | 613 | if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && |
614 | (vport->vpi_state & LPFC_VPI_REGISTERED)) { | ||
608 | lpfc_start_fdiscs(phba); | 615 | lpfc_start_fdiscs(phba); |
609 | lpfc_do_scr_ns_plogi(phba, vport); | 616 | lpfc_do_scr_ns_plogi(phba, vport); |
610 | } else | 617 | } else if (vport->fc_flag & FC_VFI_REGISTERED) |
618 | lpfc_issue_init_vpi(vport); | ||
619 | else | ||
611 | lpfc_issue_reg_vfi(vport); | 620 | lpfc_issue_reg_vfi(vport); |
612 | } | 621 | } |
613 | return 0; | 622 | return 0; |
@@ -804,6 +813,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
804 | irsp->ulpTimeout); | 813 | irsp->ulpTimeout); |
805 | goto flogifail; | 814 | goto flogifail; |
806 | } | 815 | } |
816 | spin_lock_irq(shost->host_lock); | ||
817 | vport->fc_flag &= ~FC_VPORT_CVL_RCVD; | ||
818 | spin_unlock_irq(shost->host_lock); | ||
807 | 819 | ||
808 | /* | 820 | /* |
809 | * The FLogI succeeded. Sync the data for the CPU before | 821 | * The FLogI succeeded. Sync the data for the CPU before |
@@ -2720,7 +2732,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2720 | if (did == FDMI_DID) | 2732 | if (did == FDMI_DID) |
2721 | retry = 1; | 2733 | retry = 1; |
2722 | 2734 | ||
2723 | if ((cmd == ELS_CMD_FLOGI) && | 2735 | if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) && |
2724 | (phba->fc_topology != TOPOLOGY_LOOP) && | 2736 | (phba->fc_topology != TOPOLOGY_LOOP) && |
2725 | !lpfc_error_lost_link(irsp)) { | 2737 | !lpfc_error_lost_link(irsp)) { |
2726 | /* FLOGI retry policy */ | 2738 | /* FLOGI retry policy */ |
@@ -4385,7 +4397,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
4385 | 4397 | ||
4386 | did = Fabric_DID; | 4398 | did = Fabric_DID; |
4387 | 4399 | ||
4388 | if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) { | 4400 | if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) { |
4389 | /* For a FLOGI we accept, then if our portname is greater | 4401 | /* For a FLOGI we accept, then if our portname is greater |
4390 | * then the remote portname we initiate Nport login. | 4402 | * then the remote portname we initiate Nport login. |
4391 | */ | 4403 | */ |
@@ -5915,6 +5927,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
5915 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 5927 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
5916 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; | 5928 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; |
5917 | MAILBOX_t *mb = &pmb->u.mb; | 5929 | MAILBOX_t *mb = &pmb->u.mb; |
5930 | int rc; | ||
5918 | 5931 | ||
5919 | spin_lock_irq(shost->host_lock); | 5932 | spin_lock_irq(shost->host_lock); |
5920 | vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; | 5933 | vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; |
@@ -5936,6 +5949,26 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
5936 | spin_unlock_irq(shost->host_lock); | 5949 | spin_unlock_irq(shost->host_lock); |
5937 | lpfc_can_disctmo(vport); | 5950 | lpfc_can_disctmo(vport); |
5938 | break; | 5951 | break; |
5952 | /* If reg_vpi fail with invalid VPI status, re-init VPI */ | ||
5953 | case 0x20: | ||
5954 | spin_lock_irq(shost->host_lock); | ||
5955 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | ||
5956 | spin_unlock_irq(shost->host_lock); | ||
5957 | lpfc_init_vpi(phba, pmb, vport->vpi); | ||
5958 | pmb->vport = vport; | ||
5959 | pmb->mbox_cmpl = lpfc_init_vpi_cmpl; | ||
5960 | rc = lpfc_sli_issue_mbox(phba, pmb, | ||
5961 | MBX_NOWAIT); | ||
5962 | if (rc == MBX_NOT_FINISHED) { | ||
5963 | lpfc_printf_vlog(vport, | ||
5964 | KERN_ERR, LOG_MBOX, | ||
5965 | "2732 Failed to issue INIT_VPI" | ||
5966 | " mailbox command\n"); | ||
5967 | } else { | ||
5968 | lpfc_nlp_put(ndlp); | ||
5969 | return; | ||
5970 | } | ||
5971 | |||
5939 | default: | 5972 | default: |
5940 | /* Try to recover from this error */ | 5973 | /* Try to recover from this error */ |
5941 | lpfc_mbx_unreg_vpi(vport); | 5974 | lpfc_mbx_unreg_vpi(vport); |
@@ -5949,13 +5982,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
5949 | break; | 5982 | break; |
5950 | } | 5983 | } |
5951 | } else { | 5984 | } else { |
5985 | spin_lock_irq(shost->host_lock); | ||
5952 | vport->vpi_state |= LPFC_VPI_REGISTERED; | 5986 | vport->vpi_state |= LPFC_VPI_REGISTERED; |
5953 | if (vport == phba->pport) | 5987 | spin_unlock_irq(shost->host_lock); |
5988 | if (vport == phba->pport) { | ||
5954 | if (phba->sli_rev < LPFC_SLI_REV4) | 5989 | if (phba->sli_rev < LPFC_SLI_REV4) |
5955 | lpfc_issue_fabric_reglogin(vport); | 5990 | lpfc_issue_fabric_reglogin(vport); |
5956 | else | 5991 | else { |
5957 | lpfc_issue_reg_vfi(vport); | 5992 | lpfc_start_fdiscs(phba); |
5958 | else | 5993 | lpfc_do_scr_ns_plogi(phba, vport); |
5994 | } | ||
5995 | } else | ||
5959 | lpfc_do_scr_ns_plogi(phba, vport); | 5996 | lpfc_do_scr_ns_plogi(phba, vport); |
5960 | } | 5997 | } |
5961 | 5998 | ||
@@ -5977,7 +6014,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
5977 | * This routine registers the @vport as a new virtual port with a HBA. | 6014 | * This routine registers the @vport as a new virtual port with a HBA. |
5978 | * It is done through a registering vpi mailbox command. | 6015 | * It is done through a registering vpi mailbox command. |
5979 | **/ | 6016 | **/ |
5980 | static void | 6017 | void |
5981 | lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, | 6018 | lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, |
5982 | struct lpfc_nodelist *ndlp) | 6019 | struct lpfc_nodelist *ndlp) |
5983 | { | 6020 | { |
@@ -6018,6 +6055,78 @@ mbox_err_exit: | |||
6018 | } | 6055 | } |
6019 | 6056 | ||
6020 | /** | 6057 | /** |
6058 | * lpfc_retry_pport_discovery - Start timer to retry FLOGI. | ||
6059 | * @phba: pointer to lpfc hba data structure. | ||
6060 | * | ||
6061 | * This routine abort all pending discovery commands and | ||
6062 | * start a timer to retry FLOGI for the physical port | ||
6063 | * discovery. | ||
6064 | **/ | ||
6065 | void | ||
6066 | lpfc_retry_pport_discovery(struct lpfc_hba *phba) | ||
6067 | { | ||
6068 | struct lpfc_vport **vports; | ||
6069 | struct lpfc_nodelist *ndlp; | ||
6070 | struct Scsi_Host *shost; | ||
6071 | int i; | ||
6072 | uint32_t link_state; | ||
6073 | |||
6074 | /* Treat this failure as linkdown for all vports */ | ||
6075 | link_state = phba->link_state; | ||
6076 | lpfc_linkdown(phba); | ||
6077 | phba->link_state = link_state; | ||
6078 | |||
6079 | vports = lpfc_create_vport_work_array(phba); | ||
6080 | |||
6081 | if (vports) { | ||
6082 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { | ||
6083 | ndlp = lpfc_findnode_did(vports[i], Fabric_DID); | ||
6084 | if (ndlp) | ||
6085 | lpfc_cancel_retry_delay_tmo(vports[i], ndlp); | ||
6086 | lpfc_els_flush_cmd(vports[i]); | ||
6087 | } | ||
6088 | lpfc_destroy_vport_work_array(phba, vports); | ||
6089 | } | ||
6090 | |||
6091 | /* If fabric require FLOGI, then re-instantiate physical login */ | ||
6092 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); | ||
6093 | if (!ndlp) | ||
6094 | return; | ||
6095 | |||
6096 | |||
6097 | shost = lpfc_shost_from_vport(phba->pport); | ||
6098 | mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); | ||
6099 | spin_lock_irq(shost->host_lock); | ||
6100 | ndlp->nlp_flag |= NLP_DELAY_TMO; | ||
6101 | spin_unlock_irq(shost->host_lock); | ||
6102 | ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; | ||
6103 | phba->pport->port_state = LPFC_FLOGI; | ||
6104 | return; | ||
6105 | } | ||
6106 | |||
6107 | /** | ||
6108 | * lpfc_fabric_login_reqd - Check if FLOGI required. | ||
6109 | * @phba: pointer to lpfc hba data structure. | ||
6110 | * @cmdiocb: pointer to FDISC command iocb. | ||
6111 | * @rspiocb: pointer to FDISC response iocb. | ||
6112 | * | ||
6113 | * This routine checks if a FLOGI is reguired for FDISC | ||
6114 | * to succeed. | ||
6115 | **/ | ||
6116 | static int | ||
6117 | lpfc_fabric_login_reqd(struct lpfc_hba *phba, | ||
6118 | struct lpfc_iocbq *cmdiocb, | ||
6119 | struct lpfc_iocbq *rspiocb) | ||
6120 | { | ||
6121 | |||
6122 | if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || | ||
6123 | (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) | ||
6124 | return 0; | ||
6125 | else | ||
6126 | return 1; | ||
6127 | } | ||
6128 | |||
6129 | /** | ||
6021 | * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command | 6130 | * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command |
6022 | * @phba: pointer to lpfc hba data structure. | 6131 | * @phba: pointer to lpfc hba data structure. |
6023 | * @cmdiocb: pointer to lpfc command iocb data structure. | 6132 | * @cmdiocb: pointer to lpfc command iocb data structure. |
@@ -6066,6 +6175,12 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
6066 | irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); | 6175 | irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); |
6067 | 6176 | ||
6068 | if (irsp->ulpStatus) { | 6177 | if (irsp->ulpStatus) { |
6178 | |||
6179 | if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { | ||
6180 | lpfc_retry_pport_discovery(phba); | ||
6181 | goto out; | ||
6182 | } | ||
6183 | |||
6069 | /* Check for retry */ | 6184 | /* Check for retry */ |
6070 | if (lpfc_els_retry(phba, cmdiocb, rspiocb)) | 6185 | if (lpfc_els_retry(phba, cmdiocb, rspiocb)) |
6071 | goto out; | 6186 | goto out; |
@@ -6076,6 +6191,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
6076 | goto fdisc_failed; | 6191 | goto fdisc_failed; |
6077 | } | 6192 | } |
6078 | spin_lock_irq(shost->host_lock); | 6193 | spin_lock_irq(shost->host_lock); |
6194 | vport->fc_flag &= ~FC_VPORT_CVL_RCVD; | ||
6079 | vport->fc_flag |= FC_FABRIC; | 6195 | vport->fc_flag |= FC_FABRIC; |
6080 | if (vport->phba->fc_topology == TOPOLOGY_LOOP) | 6196 | if (vport->phba->fc_topology == TOPOLOGY_LOOP) |
6081 | vport->fc_flag |= FC_PUBLIC_LOOP; | 6197 | vport->fc_flag |= FC_PUBLIC_LOOP; |
@@ -6103,10 +6219,13 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
6103 | lpfc_mbx_unreg_vpi(vport); | 6219 | lpfc_mbx_unreg_vpi(vport); |
6104 | spin_lock_irq(shost->host_lock); | 6220 | spin_lock_irq(shost->host_lock); |
6105 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 6221 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
6222 | vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; | ||
6106 | spin_unlock_irq(shost->host_lock); | 6223 | spin_unlock_irq(shost->host_lock); |
6107 | } | 6224 | } |
6108 | 6225 | ||
6109 | if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) | 6226 | if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) |
6227 | lpfc_issue_init_vpi(vport); | ||
6228 | else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) | ||
6110 | lpfc_register_new_vport(phba, vport, ndlp); | 6229 | lpfc_register_new_vport(phba, vport, ndlp); |
6111 | else | 6230 | else |
6112 | lpfc_do_scr_ns_plogi(phba, vport); | 6231 | lpfc_do_scr_ns_plogi(phba, vport); |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 2445e399fd60..2359d0bfb734 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -525,6 +525,8 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
525 | spin_unlock_irq(&phba->hbalock); | 525 | spin_unlock_irq(&phba->hbalock); |
526 | lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); | 526 | lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); |
527 | } | 527 | } |
528 | if (phba->fcf.fcf_flag & FCF_REDISC_EVT) | ||
529 | lpfc_sli4_fcf_redisc_event_proc(phba); | ||
528 | } | 530 | } |
529 | 531 | ||
530 | vports = lpfc_create_vport_work_array(phba); | 532 | vports = lpfc_create_vport_work_array(phba); |
@@ -706,6 +708,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) | |||
706 | void | 708 | void |
707 | lpfc_port_link_failure(struct lpfc_vport *vport) | 709 | lpfc_port_link_failure(struct lpfc_vport *vport) |
708 | { | 710 | { |
711 | lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); | ||
712 | |||
709 | /* Cleanup any outstanding received buffers */ | 713 | /* Cleanup any outstanding received buffers */ |
710 | lpfc_cleanup_rcv_buffers(vport); | 714 | lpfc_cleanup_rcv_buffers(vport); |
711 | 715 | ||
@@ -752,12 +756,14 @@ lpfc_linkdown(struct lpfc_hba *phba) | |||
752 | lpfc_scsi_dev_block(phba); | 756 | lpfc_scsi_dev_block(phba); |
753 | 757 | ||
754 | spin_lock_irq(&phba->hbalock); | 758 | spin_lock_irq(&phba->hbalock); |
755 | phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); | 759 | phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); |
760 | spin_unlock_irq(&phba->hbalock); | ||
756 | if (phba->link_state > LPFC_LINK_DOWN) { | 761 | if (phba->link_state > LPFC_LINK_DOWN) { |
757 | phba->link_state = LPFC_LINK_DOWN; | 762 | phba->link_state = LPFC_LINK_DOWN; |
763 | spin_lock_irq(shost->host_lock); | ||
758 | phba->pport->fc_flag &= ~FC_LBIT; | 764 | phba->pport->fc_flag &= ~FC_LBIT; |
765 | spin_unlock_irq(shost->host_lock); | ||
759 | } | 766 | } |
760 | spin_unlock_irq(&phba->hbalock); | ||
761 | vports = lpfc_create_vport_work_array(phba); | 767 | vports = lpfc_create_vport_work_array(phba); |
762 | if (vports != NULL) | 768 | if (vports != NULL) |
763 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { | 769 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
@@ -1023,7 +1029,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1023 | return; | 1029 | return; |
1024 | } | 1030 | } |
1025 | spin_lock_irqsave(&phba->hbalock, flags); | 1031 | spin_lock_irqsave(&phba->hbalock, flags); |
1026 | phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); | 1032 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); |
1027 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1033 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; |
1028 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1034 | spin_unlock_irqrestore(&phba->hbalock, flags); |
1029 | if (vport->port_state != LPFC_FLOGI) | 1035 | if (vport->port_state != LPFC_FLOGI) |
@@ -1045,25 +1051,23 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1045 | static uint32_t | 1051 | static uint32_t |
1046 | lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) | 1052 | lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) |
1047 | { | 1053 | { |
1048 | if ((fab_name[0] == | 1054 | if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) |
1049 | bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) && | 1055 | return 0; |
1050 | (fab_name[1] == | 1056 | if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) |
1051 | bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) && | 1057 | return 0; |
1052 | (fab_name[2] == | 1058 | if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) |
1053 | bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) && | ||
1054 | (fab_name[3] == | ||
1055 | bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) && | ||
1056 | (fab_name[4] == | ||
1057 | bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) && | ||
1058 | (fab_name[5] == | ||
1059 | bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) && | ||
1060 | (fab_name[6] == | ||
1061 | bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) && | ||
1062 | (fab_name[7] == | ||
1063 | bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))) | ||
1064 | return 1; | ||
1065 | else | ||
1066 | return 0; | 1059 | return 0; |
1060 | if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) | ||
1061 | return 0; | ||
1062 | if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) | ||
1063 | return 0; | ||
1064 | if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) | ||
1065 | return 0; | ||
1066 | if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) | ||
1067 | return 0; | ||
1068 | if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)) | ||
1069 | return 0; | ||
1070 | return 1; | ||
1067 | } | 1071 | } |
1068 | 1072 | ||
1069 | /** | 1073 | /** |
@@ -1078,30 +1082,28 @@ lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) | |||
1078 | static uint32_t | 1082 | static uint32_t |
1079 | lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) | 1083 | lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) |
1080 | { | 1084 | { |
1081 | if ((sw_name[0] == | 1085 | if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) |
1082 | bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) && | ||
1083 | (sw_name[1] == | ||
1084 | bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) && | ||
1085 | (sw_name[2] == | ||
1086 | bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) && | ||
1087 | (sw_name[3] == | ||
1088 | bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) && | ||
1089 | (sw_name[4] == | ||
1090 | bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) && | ||
1091 | (sw_name[5] == | ||
1092 | bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) && | ||
1093 | (sw_name[6] == | ||
1094 | bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) && | ||
1095 | (sw_name[7] == | ||
1096 | bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))) | ||
1097 | return 1; | ||
1098 | else | ||
1099 | return 0; | 1086 | return 0; |
1087 | if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) | ||
1088 | return 0; | ||
1089 | if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) | ||
1090 | return 0; | ||
1091 | if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) | ||
1092 | return 0; | ||
1093 | if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) | ||
1094 | return 0; | ||
1095 | if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) | ||
1096 | return 0; | ||
1097 | if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) | ||
1098 | return 0; | ||
1099 | if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)) | ||
1100 | return 0; | ||
1101 | return 1; | ||
1100 | } | 1102 | } |
1101 | 1103 | ||
1102 | /** | 1104 | /** |
1103 | * lpfc_mac_addr_match - Check if the fcf mac address match. | 1105 | * lpfc_mac_addr_match - Check if the fcf mac address match. |
1104 | * @phba: pointer to lpfc hba data structure. | 1106 | * @mac_addr: pointer to mac address. |
1105 | * @new_fcf_record: pointer to fcf record. | 1107 | * @new_fcf_record: pointer to fcf record. |
1106 | * | 1108 | * |
1107 | * This routine compare the fcf record's mac address with HBA's | 1109 | * This routine compare the fcf record's mac address with HBA's |
@@ -1109,85 +1111,115 @@ lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) | |||
1109 | * returns 1 else return 0. | 1111 | * returns 1 else return 0. |
1110 | **/ | 1112 | **/ |
1111 | static uint32_t | 1113 | static uint32_t |
1112 | lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) | 1114 | lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record) |
1113 | { | 1115 | { |
1114 | if ((phba->fcf.mac_addr[0] == | 1116 | if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) |
1115 | bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) && | 1117 | return 0; |
1116 | (phba->fcf.mac_addr[1] == | 1118 | if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) |
1117 | bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) && | 1119 | return 0; |
1118 | (phba->fcf.mac_addr[2] == | 1120 | if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) |
1119 | bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) && | ||
1120 | (phba->fcf.mac_addr[3] == | ||
1121 | bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) && | ||
1122 | (phba->fcf.mac_addr[4] == | ||
1123 | bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) && | ||
1124 | (phba->fcf.mac_addr[5] == | ||
1125 | bf_get(lpfc_fcf_record_mac_5, new_fcf_record))) | ||
1126 | return 1; | ||
1127 | else | ||
1128 | return 0; | 1121 | return 0; |
1122 | if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) | ||
1123 | return 0; | ||
1124 | if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) | ||
1125 | return 0; | ||
1126 | if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record)) | ||
1127 | return 0; | ||
1128 | return 1; | ||
1129 | } | ||
1130 | |||
1131 | static bool | ||
1132 | lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id) | ||
1133 | { | ||
1134 | return (curr_vlan_id == new_vlan_id); | ||
1129 | } | 1135 | } |
1130 | 1136 | ||
1131 | /** | 1137 | /** |
1132 | * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. | 1138 | * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. |
1133 | * @phba: pointer to lpfc hba data structure. | 1139 | * @fcf: pointer to driver fcf record. |
1134 | * @new_fcf_record: pointer to fcf record. | 1140 | * @new_fcf_record: pointer to fcf record. |
1135 | * | 1141 | * |
1136 | * This routine copies the FCF information from the FCF | 1142 | * This routine copies the FCF information from the FCF |
1137 | * record to lpfc_hba data structure. | 1143 | * record to lpfc_hba data structure. |
1138 | **/ | 1144 | **/ |
1139 | static void | 1145 | static void |
1140 | lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) | 1146 | lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec, |
1147 | struct fcf_record *new_fcf_record) | ||
1141 | { | 1148 | { |
1142 | phba->fcf.fabric_name[0] = | 1149 | /* Fabric name */ |
1150 | fcf_rec->fabric_name[0] = | ||
1143 | bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); | 1151 | bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); |
1144 | phba->fcf.fabric_name[1] = | 1152 | fcf_rec->fabric_name[1] = |
1145 | bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); | 1153 | bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); |
1146 | phba->fcf.fabric_name[2] = | 1154 | fcf_rec->fabric_name[2] = |
1147 | bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); | 1155 | bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); |
1148 | phba->fcf.fabric_name[3] = | 1156 | fcf_rec->fabric_name[3] = |
1149 | bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); | 1157 | bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); |
1150 | phba->fcf.fabric_name[4] = | 1158 | fcf_rec->fabric_name[4] = |
1151 | bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); | 1159 | bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); |
1152 | phba->fcf.fabric_name[5] = | 1160 | fcf_rec->fabric_name[5] = |
1153 | bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); | 1161 | bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); |
1154 | phba->fcf.fabric_name[6] = | 1162 | fcf_rec->fabric_name[6] = |
1155 | bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); | 1163 | bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); |
1156 | phba->fcf.fabric_name[7] = | 1164 | fcf_rec->fabric_name[7] = |
1157 | bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); | 1165 | bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); |
1158 | phba->fcf.mac_addr[0] = | 1166 | /* Mac address */ |
1159 | bf_get(lpfc_fcf_record_mac_0, new_fcf_record); | 1167 | fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record); |
1160 | phba->fcf.mac_addr[1] = | 1168 | fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record); |
1161 | bf_get(lpfc_fcf_record_mac_1, new_fcf_record); | 1169 | fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record); |
1162 | phba->fcf.mac_addr[2] = | 1170 | fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record); |
1163 | bf_get(lpfc_fcf_record_mac_2, new_fcf_record); | 1171 | fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record); |
1164 | phba->fcf.mac_addr[3] = | 1172 | fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record); |
1165 | bf_get(lpfc_fcf_record_mac_3, new_fcf_record); | 1173 | /* FCF record index */ |
1166 | phba->fcf.mac_addr[4] = | 1174 | fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); |
1167 | bf_get(lpfc_fcf_record_mac_4, new_fcf_record); | 1175 | /* FCF record priority */ |
1168 | phba->fcf.mac_addr[5] = | 1176 | fcf_rec->priority = new_fcf_record->fip_priority; |
1169 | bf_get(lpfc_fcf_record_mac_5, new_fcf_record); | 1177 | /* Switch name */ |
1170 | phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); | 1178 | fcf_rec->switch_name[0] = |
1171 | phba->fcf.priority = new_fcf_record->fip_priority; | ||
1172 | phba->fcf.switch_name[0] = | ||
1173 | bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); | 1179 | bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); |
1174 | phba->fcf.switch_name[1] = | 1180 | fcf_rec->switch_name[1] = |
1175 | bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); | 1181 | bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); |
1176 | phba->fcf.switch_name[2] = | 1182 | fcf_rec->switch_name[2] = |
1177 | bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); | 1183 | bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); |
1178 | phba->fcf.switch_name[3] = | 1184 | fcf_rec->switch_name[3] = |
1179 | bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); | 1185 | bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); |
1180 | phba->fcf.switch_name[4] = | 1186 | fcf_rec->switch_name[4] = |
1181 | bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); | 1187 | bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); |
1182 | phba->fcf.switch_name[5] = | 1188 | fcf_rec->switch_name[5] = |
1183 | bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); | 1189 | bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); |
1184 | phba->fcf.switch_name[6] = | 1190 | fcf_rec->switch_name[6] = |
1185 | bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); | 1191 | bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); |
1186 | phba->fcf.switch_name[7] = | 1192 | fcf_rec->switch_name[7] = |
1187 | bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); | 1193 | bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); |
1188 | } | 1194 | } |
1189 | 1195 | ||
1190 | /** | 1196 | /** |
1197 | * lpfc_update_fcf_record - Update driver fcf record | ||
1198 | * @phba: pointer to lpfc hba data structure. | ||
1199 | * @fcf_rec: pointer to driver fcf record. | ||
1200 | * @new_fcf_record: pointer to hba fcf record. | ||
1201 | * @addr_mode: address mode to be set to the driver fcf record. | ||
1202 | * @vlan_id: vlan tag to be set to the driver fcf record. | ||
1203 | * @flag: flag bits to be set to the driver fcf record. | ||
1204 | * | ||
1205 | * This routine updates the driver FCF record from the new HBA FCF record | ||
1206 | * together with the address mode, vlan_id, and other informations. This | ||
1207 | * routine is called with the host lock held. | ||
1208 | **/ | ||
1209 | static void | ||
1210 | __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, | ||
1211 | struct fcf_record *new_fcf_record, uint32_t addr_mode, | ||
1212 | uint16_t vlan_id, uint32_t flag) | ||
1213 | { | ||
1214 | /* Copy the fields from the HBA's FCF record */ | ||
1215 | lpfc_copy_fcf_record(fcf_rec, new_fcf_record); | ||
1216 | /* Update other fields of driver FCF record */ | ||
1217 | fcf_rec->addr_mode = addr_mode; | ||
1218 | fcf_rec->vlan_id = vlan_id; | ||
1219 | fcf_rec->flag |= (flag | RECORD_VALID); | ||
1220 | } | ||
1221 | |||
1222 | /** | ||
1191 | * lpfc_register_fcf - Register the FCF with hba. | 1223 | * lpfc_register_fcf - Register the FCF with hba. |
1192 | * @phba: pointer to lpfc hba data structure. | 1224 | * @phba: pointer to lpfc hba data structure. |
1193 | * | 1225 | * |
@@ -1212,7 +1244,7 @@ lpfc_register_fcf(struct lpfc_hba *phba) | |||
1212 | 1244 | ||
1213 | /* The FCF is already registered, start discovery */ | 1245 | /* The FCF is already registered, start discovery */ |
1214 | if (phba->fcf.fcf_flag & FCF_REGISTERED) { | 1246 | if (phba->fcf.fcf_flag & FCF_REGISTERED) { |
1215 | phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); | 1247 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); |
1216 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1248 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; |
1217 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1249 | spin_unlock_irqrestore(&phba->hbalock, flags); |
1218 | if (phba->pport->port_state != LPFC_FLOGI) | 1250 | if (phba->pport->port_state != LPFC_FLOGI) |
@@ -1250,6 +1282,7 @@ lpfc_register_fcf(struct lpfc_hba *phba) | |||
1250 | * @new_fcf_record: pointer to fcf record. | 1282 | * @new_fcf_record: pointer to fcf record. |
1251 | * @boot_flag: Indicates if this record used by boot bios. | 1283 | * @boot_flag: Indicates if this record used by boot bios. |
1252 | * @addr_mode: The address mode to be used by this FCF | 1284 | * @addr_mode: The address mode to be used by this FCF |
1285 | * @vlan_id: The vlan id to be used as vlan tagging by this FCF. | ||
1253 | * | 1286 | * |
1254 | * This routine compare the fcf record with connect list obtained from the | 1287 | * This routine compare the fcf record with connect list obtained from the |
1255 | * config region to decide if this FCF can be used for SAN discovery. It returns | 1288 | * config region to decide if this FCF can be used for SAN discovery. It returns |
@@ -1323,7 +1356,8 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, | |||
1323 | return 1; | 1356 | return 1; |
1324 | } | 1357 | } |
1325 | 1358 | ||
1326 | list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) { | 1359 | list_for_each_entry(conn_entry, |
1360 | &phba->fcf_conn_rec_list, list) { | ||
1327 | if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) | 1361 | if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) |
1328 | continue; | 1362 | continue; |
1329 | 1363 | ||
@@ -1470,6 +1504,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) | |||
1470 | */ | 1504 | */ |
1471 | spin_lock_irq(&phba->hbalock); | 1505 | spin_lock_irq(&phba->hbalock); |
1472 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1506 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; |
1507 | phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; | ||
1473 | spin_unlock_irq(&phba->hbalock); | 1508 | spin_unlock_irq(&phba->hbalock); |
1474 | } | 1509 | } |
1475 | 1510 | ||
@@ -1524,11 +1559,12 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1524 | uint32_t shdr_status, shdr_add_status; | 1559 | uint32_t shdr_status, shdr_add_status; |
1525 | union lpfc_sli4_cfg_shdr *shdr; | 1560 | union lpfc_sli4_cfg_shdr *shdr; |
1526 | struct fcf_record *new_fcf_record; | 1561 | struct fcf_record *new_fcf_record; |
1527 | int rc; | ||
1528 | uint32_t boot_flag, addr_mode; | 1562 | uint32_t boot_flag, addr_mode; |
1529 | uint32_t next_fcf_index; | 1563 | uint32_t next_fcf_index; |
1530 | unsigned long flags; | 1564 | struct lpfc_fcf_rec *fcf_rec = NULL; |
1565 | unsigned long iflags; | ||
1531 | uint16_t vlan_id; | 1566 | uint16_t vlan_id; |
1567 | int rc; | ||
1532 | 1568 | ||
1533 | /* If there is pending FCoE event restart FCF table scan */ | 1569 | /* If there is pending FCoE event restart FCF table scan */ |
1534 | if (lpfc_check_pending_fcoe_event(phba, 0)) { | 1570 | if (lpfc_check_pending_fcoe_event(phba, 0)) { |
@@ -1583,9 +1619,8 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1583 | sizeof(struct fcf_record)); | 1619 | sizeof(struct fcf_record)); |
1584 | bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); | 1620 | bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); |
1585 | 1621 | ||
1586 | rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, | 1622 | rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, |
1587 | &boot_flag, &addr_mode, | 1623 | &addr_mode, &vlan_id); |
1588 | &vlan_id); | ||
1589 | /* | 1624 | /* |
1590 | * If the fcf record does not match with connect list entries | 1625 | * If the fcf record does not match with connect list entries |
1591 | * read the next entry. | 1626 | * read the next entry. |
@@ -1594,90 +1629,159 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1594 | goto read_next_fcf; | 1629 | goto read_next_fcf; |
1595 | /* | 1630 | /* |
1596 | * If this is not the first FCF discovery of the HBA, use last | 1631 | * If this is not the first FCF discovery of the HBA, use last |
1597 | * FCF record for the discovery. | 1632 | * FCF record for the discovery. The condition that a rescan |
1633 | * matches the in-use FCF record: fabric name, switch name, mac | ||
1634 | * address, and vlan_id. | ||
1598 | */ | 1635 | */ |
1599 | spin_lock_irqsave(&phba->hbalock, flags); | 1636 | spin_lock_irqsave(&phba->hbalock, iflags); |
1600 | if (phba->fcf.fcf_flag & FCF_IN_USE) { | 1637 | if (phba->fcf.fcf_flag & FCF_IN_USE) { |
1601 | if (lpfc_fab_name_match(phba->fcf.fabric_name, | 1638 | if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, |
1602 | new_fcf_record) && | 1639 | new_fcf_record) && |
1603 | lpfc_sw_name_match(phba->fcf.switch_name, | 1640 | lpfc_sw_name_match(phba->fcf.current_rec.switch_name, |
1604 | new_fcf_record) && | 1641 | new_fcf_record) && |
1605 | lpfc_mac_addr_match(phba, new_fcf_record)) { | 1642 | lpfc_mac_addr_match(phba->fcf.current_rec.mac_addr, |
1643 | new_fcf_record) && | ||
1644 | lpfc_vlan_id_match(phba->fcf.current_rec.vlan_id, | ||
1645 | vlan_id)) { | ||
1606 | phba->fcf.fcf_flag |= FCF_AVAILABLE; | 1646 | phba->fcf.fcf_flag |= FCF_AVAILABLE; |
1607 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1647 | if (phba->fcf.fcf_flag & FCF_REDISC_PEND) |
1648 | /* Stop FCF redisc wait timer if pending */ | ||
1649 | __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); | ||
1650 | else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) | ||
1651 | /* If in fast failover, mark it's completed */ | ||
1652 | phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; | ||
1653 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
1608 | goto out; | 1654 | goto out; |
1609 | } | 1655 | } |
1610 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1656 | /* |
1611 | goto read_next_fcf; | 1657 | * Read next FCF record from HBA searching for the matching |
1658 | * with in-use record only if not during the fast failover | ||
1659 | * period. In case of fast failover period, it shall try to | ||
1660 | * determine whether the FCF record just read should be the | ||
1661 | * next candidate. | ||
1662 | */ | ||
1663 | if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { | ||
1664 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
1665 | goto read_next_fcf; | ||
1666 | } | ||
1612 | } | 1667 | } |
1668 | /* | ||
1669 | * Update on failover FCF record only if it's in FCF fast-failover | ||
1670 | * period; otherwise, update on current FCF record. | ||
1671 | */ | ||
1672 | if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { | ||
1673 | /* Fast FCF failover only to the same fabric name */ | ||
1674 | if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, | ||
1675 | new_fcf_record)) | ||
1676 | fcf_rec = &phba->fcf.failover_rec; | ||
1677 | else | ||
1678 | goto read_next_fcf; | ||
1679 | } else | ||
1680 | fcf_rec = &phba->fcf.current_rec; | ||
1681 | |||
1613 | if (phba->fcf.fcf_flag & FCF_AVAILABLE) { | 1682 | if (phba->fcf.fcf_flag & FCF_AVAILABLE) { |
1614 | /* | 1683 | /* |
1615 | * If the current FCF record does not have boot flag | 1684 | * If the driver FCF record does not have boot flag |
1616 | * set and new fcf record has boot flag set, use the | 1685 | * set and new hba fcf record has boot flag set, use |
1617 | * new fcf record. | 1686 | * the new hba fcf record. |
1618 | */ | 1687 | */ |
1619 | if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { | 1688 | if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) { |
1620 | /* Use this FCF record */ | 1689 | /* Choose this FCF record */ |
1621 | lpfc_copy_fcf_record(phba, new_fcf_record); | 1690 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, |
1622 | phba->fcf.addr_mode = addr_mode; | 1691 | addr_mode, vlan_id, BOOT_ENABLE); |
1623 | phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; | 1692 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
1624 | if (vlan_id != 0xFFFF) { | ||
1625 | phba->fcf.fcf_flag |= FCF_VALID_VLAN; | ||
1626 | phba->fcf.vlan_id = vlan_id; | ||
1627 | } | ||
1628 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
1629 | goto read_next_fcf; | 1693 | goto read_next_fcf; |
1630 | } | 1694 | } |
1631 | /* | 1695 | /* |
1632 | * If the current FCF record has boot flag set and the | 1696 | * If the driver FCF record has boot flag set and the |
1633 | * new FCF record does not have boot flag, read the next | 1697 | * new hba FCF record does not have boot flag, read |
1634 | * FCF record. | 1698 | * the next FCF record. |
1635 | */ | 1699 | */ |
1636 | if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { | 1700 | if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { |
1637 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1701 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
1638 | goto read_next_fcf; | 1702 | goto read_next_fcf; |
1639 | } | 1703 | } |
1640 | /* | 1704 | /* |
1641 | * If there is a record with lower priority value for | 1705 | * If the new hba FCF record has lower priority value |
1642 | * the current FCF, use that record. | 1706 | * than the driver FCF record, use the new record. |
1643 | */ | 1707 | */ |
1644 | if (lpfc_fab_name_match(phba->fcf.fabric_name, | 1708 | if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) && |
1645 | new_fcf_record) && | 1709 | (new_fcf_record->fip_priority < fcf_rec->priority)) { |
1646 | (new_fcf_record->fip_priority < phba->fcf.priority)) { | 1710 | /* Choose this FCF record */ |
1647 | /* Use this FCF record */ | 1711 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, |
1648 | lpfc_copy_fcf_record(phba, new_fcf_record); | 1712 | addr_mode, vlan_id, 0); |
1649 | phba->fcf.addr_mode = addr_mode; | ||
1650 | if (vlan_id != 0xFFFF) { | ||
1651 | phba->fcf.fcf_flag |= FCF_VALID_VLAN; | ||
1652 | phba->fcf.vlan_id = vlan_id; | ||
1653 | } | ||
1654 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
1655 | goto read_next_fcf; | ||
1656 | } | 1713 | } |
1657 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1714 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
1658 | goto read_next_fcf; | 1715 | goto read_next_fcf; |
1659 | } | 1716 | } |
1660 | /* | 1717 | /* |
1661 | * This is the first available FCF record, use this | 1718 | * This is the first suitable FCF record, choose this record for |
1662 | * record. | 1719 | * initial best-fit FCF. |
1663 | */ | 1720 | */ |
1664 | lpfc_copy_fcf_record(phba, new_fcf_record); | 1721 | if (fcf_rec) { |
1665 | phba->fcf.addr_mode = addr_mode; | 1722 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, |
1666 | if (boot_flag) | 1723 | addr_mode, vlan_id, (boot_flag ? |
1667 | phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; | 1724 | BOOT_ENABLE : 0)); |
1668 | phba->fcf.fcf_flag |= FCF_AVAILABLE; | 1725 | phba->fcf.fcf_flag |= FCF_AVAILABLE; |
1669 | if (vlan_id != 0xFFFF) { | ||
1670 | phba->fcf.fcf_flag |= FCF_VALID_VLAN; | ||
1671 | phba->fcf.vlan_id = vlan_id; | ||
1672 | } | 1726 | } |
1673 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1727 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
1674 | goto read_next_fcf; | 1728 | goto read_next_fcf; |
1675 | 1729 | ||
1676 | read_next_fcf: | 1730 | read_next_fcf: |
1677 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 1731 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
1678 | if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) | 1732 | if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) { |
1679 | lpfc_register_fcf(phba); | 1733 | if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { |
1680 | else | 1734 | /* |
1735 | * Case of FCF fast failover scan | ||
1736 | */ | ||
1737 | |||
1738 | /* | ||
1739 | * It has not found any suitable FCF record, cancel | ||
1740 | * FCF scan inprogress, and do nothing | ||
1741 | */ | ||
1742 | if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { | ||
1743 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
1744 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | ||
1745 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
1746 | return; | ||
1747 | } | ||
1748 | /* | ||
1749 | * It has found a suitable FCF record that is not | ||
1750 | * the same as in-use FCF record, unregister the | ||
1751 | * in-use FCF record, replace the in-use FCF record | ||
1752 | * with the new FCF record, mark FCF fast failover | ||
1753 | * completed, and then start register the new FCF | ||
1754 | * record. | ||
1755 | */ | ||
1756 | |||
1757 | /* unregister the current in-use FCF record */ | ||
1758 | lpfc_unregister_fcf(phba); | ||
1759 | /* replace in-use record with the new record */ | ||
1760 | memcpy(&phba->fcf.current_rec, | ||
1761 | &phba->fcf.failover_rec, | ||
1762 | sizeof(struct lpfc_fcf_rec)); | ||
1763 | /* mark the FCF fast failover completed */ | ||
1764 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
1765 | phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; | ||
1766 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
1767 | /* Register to the new FCF record */ | ||
1768 | lpfc_register_fcf(phba); | ||
1769 | } else { | ||
1770 | /* | ||
1771 | * In case of transaction period to fast FCF failover, | ||
1772 | * do nothing when search to the end of the FCF table. | ||
1773 | */ | ||
1774 | if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || | ||
1775 | (phba->fcf.fcf_flag & FCF_REDISC_PEND)) | ||
1776 | return; | ||
1777 | /* | ||
1778 | * Otherwise, initial scan or post linkdown rescan, | ||
1779 | * register with the best fit FCF record found so | ||
1780 | * far through the scanning process. | ||
1781 | */ | ||
1782 | lpfc_register_fcf(phba); | ||
1783 | } | ||
1784 | } else | ||
1681 | lpfc_sli4_read_fcf_record(phba, next_fcf_index); | 1785 | lpfc_sli4_read_fcf_record(phba, next_fcf_index); |
1682 | return; | 1786 | return; |
1683 | 1787 | ||
@@ -1695,10 +1799,13 @@ out: | |||
1695 | * | 1799 | * |
1696 | * This function handles completion of init vpi mailbox command. | 1800 | * This function handles completion of init vpi mailbox command. |
1697 | */ | 1801 | */ |
1698 | static void | 1802 | void |
1699 | lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | 1803 | lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
1700 | { | 1804 | { |
1701 | struct lpfc_vport *vport = mboxq->vport; | 1805 | struct lpfc_vport *vport = mboxq->vport; |
1806 | struct lpfc_nodelist *ndlp; | ||
1807 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1808 | |||
1702 | if (mboxq->u.mb.mbxStatus) { | 1809 | if (mboxq->u.mb.mbxStatus) { |
1703 | lpfc_printf_vlog(vport, KERN_ERR, | 1810 | lpfc_printf_vlog(vport, KERN_ERR, |
1704 | LOG_MBOX, | 1811 | LOG_MBOX, |
@@ -1708,9 +1815,23 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1708 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 1815 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
1709 | return; | 1816 | return; |
1710 | } | 1817 | } |
1711 | spin_lock_irq(&phba->hbalock); | 1818 | spin_lock_irq(shost->host_lock); |
1712 | vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; | 1819 | vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; |
1713 | spin_unlock_irq(&phba->hbalock); | 1820 | spin_unlock_irq(shost->host_lock); |
1821 | |||
1822 | /* If this port is physical port or FDISC is done, do reg_vpi */ | ||
1823 | if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) { | ||
1824 | ndlp = lpfc_findnode_did(vport, Fabric_DID); | ||
1825 | if (!ndlp) | ||
1826 | lpfc_printf_vlog(vport, KERN_ERR, | ||
1827 | LOG_DISCOVERY, | ||
1828 | "2731 Cannot find fabric " | ||
1829 | "controller node\n"); | ||
1830 | else | ||
1831 | lpfc_register_new_vport(phba, vport, ndlp); | ||
1832 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
1833 | return; | ||
1834 | } | ||
1714 | 1835 | ||
1715 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) | 1836 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) |
1716 | lpfc_initial_fdisc(vport); | 1837 | lpfc_initial_fdisc(vport); |
@@ -1719,10 +1840,42 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1719 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 1840 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
1720 | "2606 No NPIV Fabric support\n"); | 1841 | "2606 No NPIV Fabric support\n"); |
1721 | } | 1842 | } |
1843 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
1722 | return; | 1844 | return; |
1723 | } | 1845 | } |
1724 | 1846 | ||
1725 | /** | 1847 | /** |
1848 | * lpfc_issue_init_vpi - Issue init_vpi mailbox command. | ||
1849 | * @vport: pointer to lpfc_vport data structure. | ||
1850 | * | ||
1851 | * This function issue a init_vpi mailbox command to initialize | ||
1852 | * VPI for the vport. | ||
1853 | */ | ||
1854 | void | ||
1855 | lpfc_issue_init_vpi(struct lpfc_vport *vport) | ||
1856 | { | ||
1857 | LPFC_MBOXQ_t *mboxq; | ||
1858 | int rc; | ||
1859 | |||
1860 | mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); | ||
1861 | if (!mboxq) { | ||
1862 | lpfc_printf_vlog(vport, KERN_ERR, | ||
1863 | LOG_MBOX, "2607 Failed to allocate " | ||
1864 | "init_vpi mailbox\n"); | ||
1865 | return; | ||
1866 | } | ||
1867 | lpfc_init_vpi(vport->phba, mboxq, vport->vpi); | ||
1868 | mboxq->vport = vport; | ||
1869 | mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; | ||
1870 | rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); | ||
1871 | if (rc == MBX_NOT_FINISHED) { | ||
1872 | lpfc_printf_vlog(vport, KERN_ERR, | ||
1873 | LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n"); | ||
1874 | mempool_free(mboxq, vport->phba->mbox_mem_pool); | ||
1875 | } | ||
1876 | } | ||
1877 | |||
1878 | /** | ||
1726 | * lpfc_start_fdiscs - send fdiscs for each vports on this port. | 1879 | * lpfc_start_fdiscs - send fdiscs for each vports on this port. |
1727 | * @phba: pointer to lpfc hba data structure. | 1880 | * @phba: pointer to lpfc hba data structure. |
1728 | * | 1881 | * |
@@ -1734,8 +1887,6 @@ lpfc_start_fdiscs(struct lpfc_hba *phba) | |||
1734 | { | 1887 | { |
1735 | struct lpfc_vport **vports; | 1888 | struct lpfc_vport **vports; |
1736 | int i; | 1889 | int i; |
1737 | LPFC_MBOXQ_t *mboxq; | ||
1738 | int rc; | ||
1739 | 1890 | ||
1740 | vports = lpfc_create_vport_work_array(phba); | 1891 | vports = lpfc_create_vport_work_array(phba); |
1741 | if (vports != NULL) { | 1892 | if (vports != NULL) { |
@@ -1754,26 +1905,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba) | |||
1754 | continue; | 1905 | continue; |
1755 | } | 1906 | } |
1756 | if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { | 1907 | if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { |
1757 | mboxq = mempool_alloc(phba->mbox_mem_pool, | 1908 | lpfc_issue_init_vpi(vports[i]); |
1758 | GFP_KERNEL); | ||
1759 | if (!mboxq) { | ||
1760 | lpfc_printf_vlog(vports[i], KERN_ERR, | ||
1761 | LOG_MBOX, "2607 Failed to allocate " | ||
1762 | "init_vpi mailbox\n"); | ||
1763 | continue; | ||
1764 | } | ||
1765 | lpfc_init_vpi(phba, mboxq, vports[i]->vpi); | ||
1766 | mboxq->vport = vports[i]; | ||
1767 | mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; | ||
1768 | rc = lpfc_sli_issue_mbox(phba, mboxq, | ||
1769 | MBX_NOWAIT); | ||
1770 | if (rc == MBX_NOT_FINISHED) { | ||
1771 | lpfc_printf_vlog(vports[i], KERN_ERR, | ||
1772 | LOG_MBOX, "2608 Failed to issue " | ||
1773 | "init_vpi mailbox\n"); | ||
1774 | mempool_free(mboxq, | ||
1775 | phba->mbox_mem_pool); | ||
1776 | } | ||
1777 | continue; | 1909 | continue; |
1778 | } | 1910 | } |
1779 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) | 1911 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) |
@@ -1796,6 +1928,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1796 | { | 1928 | { |
1797 | struct lpfc_dmabuf *dmabuf = mboxq->context1; | 1929 | struct lpfc_dmabuf *dmabuf = mboxq->context1; |
1798 | struct lpfc_vport *vport = mboxq->vport; | 1930 | struct lpfc_vport *vport = mboxq->vport; |
1931 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1799 | 1932 | ||
1800 | if (mboxq->u.mb.mbxStatus) { | 1933 | if (mboxq->u.mb.mbxStatus) { |
1801 | lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, | 1934 | lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, |
@@ -1813,7 +1946,11 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1813 | goto fail_free_mem; | 1946 | goto fail_free_mem; |
1814 | } | 1947 | } |
1815 | /* The VPI is implicitly registered when the VFI is registered */ | 1948 | /* The VPI is implicitly registered when the VFI is registered */ |
1949 | spin_lock_irq(shost->host_lock); | ||
1816 | vport->vpi_state |= LPFC_VPI_REGISTERED; | 1950 | vport->vpi_state |= LPFC_VPI_REGISTERED; |
1951 | vport->fc_flag |= FC_VFI_REGISTERED; | ||
1952 | vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; | ||
1953 | spin_unlock_irq(shost->host_lock); | ||
1817 | 1954 | ||
1818 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { | 1955 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { |
1819 | lpfc_start_fdiscs(phba); | 1956 | lpfc_start_fdiscs(phba); |
@@ -2050,8 +2187,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | |||
2050 | return; | 2187 | return; |
2051 | } | 2188 | } |
2052 | spin_unlock_irq(&phba->hbalock); | 2189 | spin_unlock_irq(&phba->hbalock); |
2053 | rc = lpfc_sli4_read_fcf_record(phba, | 2190 | rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); |
2054 | LPFC_FCOE_FCF_GET_FIRST); | ||
2055 | if (rc) | 2191 | if (rc) |
2056 | goto out; | 2192 | goto out; |
2057 | } | 2193 | } |
@@ -2139,10 +2275,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
2139 | } | 2275 | } |
2140 | 2276 | ||
2141 | phba->fc_eventTag = la->eventTag; | 2277 | phba->fc_eventTag = la->eventTag; |
2278 | spin_lock_irq(&phba->hbalock); | ||
2142 | if (la->mm) | 2279 | if (la->mm) |
2143 | phba->sli.sli_flag |= LPFC_MENLO_MAINT; | 2280 | phba->sli.sli_flag |= LPFC_MENLO_MAINT; |
2144 | else | 2281 | else |
2145 | phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; | 2282 | phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; |
2283 | spin_unlock_irq(&phba->hbalock); | ||
2146 | 2284 | ||
2147 | phba->link_events++; | 2285 | phba->link_events++; |
2148 | if (la->attType == AT_LINK_UP && (!la->mm)) { | 2286 | if (la->attType == AT_LINK_UP && (!la->mm)) { |
@@ -2271,10 +2409,10 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
2271 | mb->mbxStatus); | 2409 | mb->mbxStatus); |
2272 | break; | 2410 | break; |
2273 | } | 2411 | } |
2274 | spin_lock_irq(&phba->hbalock); | 2412 | spin_lock_irq(shost->host_lock); |
2275 | vport->vpi_state &= ~LPFC_VPI_REGISTERED; | 2413 | vport->vpi_state &= ~LPFC_VPI_REGISTERED; |
2276 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 2414 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
2277 | spin_unlock_irq(&phba->hbalock); | 2415 | spin_unlock_irq(shost->host_lock); |
2278 | vport->unreg_vpi_cmpl = VPORT_OK; | 2416 | vport->unreg_vpi_cmpl = VPORT_OK; |
2279 | mempool_free(pmb, phba->mbox_mem_pool); | 2417 | mempool_free(pmb, phba->mbox_mem_pool); |
2280 | /* | 2418 | /* |
@@ -2332,7 +2470,10 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
2332 | goto out; | 2470 | goto out; |
2333 | } | 2471 | } |
2334 | 2472 | ||
2473 | spin_lock_irq(shost->host_lock); | ||
2335 | vport->vpi_state |= LPFC_VPI_REGISTERED; | 2474 | vport->vpi_state |= LPFC_VPI_REGISTERED; |
2475 | vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; | ||
2476 | spin_unlock_irq(shost->host_lock); | ||
2336 | vport->num_disc_nodes = 0; | 2477 | vport->num_disc_nodes = 0; |
2337 | /* go thru NPR list and issue ELS PLOGIs */ | 2478 | /* go thru NPR list and issue ELS PLOGIs */ |
2338 | if (vport->fc_npr_cnt) | 2479 | if (vport->fc_npr_cnt) |
@@ -3218,6 +3359,34 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
3218 | return 0; | 3359 | return 0; |
3219 | } | 3360 | } |
3220 | 3361 | ||
3362 | /** | ||
3363 | * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba. | ||
3364 | * @phba: pointer to lpfc hba data structure. | ||
3365 | * | ||
3366 | * This routine is invoked to unregister all the currently registered RPIs | ||
3367 | * to the HBA. | ||
3368 | **/ | ||
3369 | void | ||
3370 | lpfc_unreg_hba_rpis(struct lpfc_hba *phba) | ||
3371 | { | ||
3372 | struct lpfc_vport **vports; | ||
3373 | struct lpfc_nodelist *ndlp; | ||
3374 | struct Scsi_Host *shost; | ||
3375 | int i; | ||
3376 | |||
3377 | vports = lpfc_create_vport_work_array(phba); | ||
3378 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { | ||
3379 | shost = lpfc_shost_from_vport(vports[i]); | ||
3380 | spin_lock_irq(shost->host_lock); | ||
3381 | list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { | ||
3382 | if (ndlp->nlp_flag & NLP_RPI_VALID) | ||
3383 | lpfc_unreg_rpi(vports[i], ndlp); | ||
3384 | } | ||
3385 | spin_unlock_irq(shost->host_lock); | ||
3386 | } | ||
3387 | lpfc_destroy_vport_work_array(phba, vports); | ||
3388 | } | ||
3389 | |||
3221 | void | 3390 | void |
3222 | lpfc_unreg_all_rpis(struct lpfc_vport *vport) | 3391 | lpfc_unreg_all_rpis(struct lpfc_vport *vport) |
3223 | { | 3392 | { |
@@ -4448,63 +4617,56 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
4448 | } | 4617 | } |
4449 | 4618 | ||
4450 | /** | 4619 | /** |
4451 | * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. | 4620 | * lpfc_unregister_fcf_prep - Unregister fcf record preparation |
4452 | * @phba: Pointer to hba context object. | 4621 | * @phba: Pointer to hba context object. |
4453 | * | 4622 | * |
4454 | * This function check if there are any connected remote port for the FCF and | 4623 | * This function prepare the HBA for unregistering the currently registered |
4455 | * if all the devices are disconnected, this function unregister FCFI. | 4624 | * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and |
4456 | * This function also tries to use another FCF for discovery. | 4625 | * VFIs. |
4457 | */ | 4626 | */ |
4458 | void | 4627 | int |
4459 | lpfc_unregister_unused_fcf(struct lpfc_hba *phba) | 4628 | lpfc_unregister_fcf_prep(struct lpfc_hba *phba) |
4460 | { | 4629 | { |
4461 | LPFC_MBOXQ_t *mbox; | 4630 | LPFC_MBOXQ_t *mbox; |
4462 | int rc; | ||
4463 | struct lpfc_vport **vports; | 4631 | struct lpfc_vport **vports; |
4464 | int i; | 4632 | struct lpfc_nodelist *ndlp; |
4465 | 4633 | struct Scsi_Host *shost; | |
4466 | spin_lock_irq(&phba->hbalock); | 4634 | int i, rc; |
4467 | /* | ||
4468 | * If HBA is not running in FIP mode or | ||
4469 | * If HBA does not support FCoE or | ||
4470 | * If FCF is not registered. | ||
4471 | * do nothing. | ||
4472 | */ | ||
4473 | if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || | ||
4474 | !(phba->fcf.fcf_flag & FCF_REGISTERED) || | ||
4475 | (!(phba->hba_flag & HBA_FIP_SUPPORT))) { | ||
4476 | spin_unlock_irq(&phba->hbalock); | ||
4477 | return; | ||
4478 | } | ||
4479 | spin_unlock_irq(&phba->hbalock); | ||
4480 | 4635 | ||
4636 | /* Unregister RPIs */ | ||
4481 | if (lpfc_fcf_inuse(phba)) | 4637 | if (lpfc_fcf_inuse(phba)) |
4482 | return; | 4638 | lpfc_unreg_hba_rpis(phba); |
4483 | 4639 | ||
4484 | /* At this point, all discovery is aborted */ | 4640 | /* At this point, all discovery is aborted */ |
4485 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; | 4641 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; |
4486 | 4642 | ||
4487 | /* Unregister VPIs */ | 4643 | /* Unregister VPIs */ |
4488 | vports = lpfc_create_vport_work_array(phba); | 4644 | vports = lpfc_create_vport_work_array(phba); |
4489 | if (vports && | 4645 | if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) |
4490 | (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) | ||
4491 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { | 4646 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
4647 | /* Stop FLOGI/FDISC retries */ | ||
4648 | ndlp = lpfc_findnode_did(vports[i], Fabric_DID); | ||
4649 | if (ndlp) | ||
4650 | lpfc_cancel_retry_delay_tmo(vports[i], ndlp); | ||
4492 | lpfc_mbx_unreg_vpi(vports[i]); | 4651 | lpfc_mbx_unreg_vpi(vports[i]); |
4493 | spin_lock_irq(&phba->hbalock); | 4652 | shost = lpfc_shost_from_vport(vports[i]); |
4653 | spin_lock_irq(shost->host_lock); | ||
4494 | vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; | 4654 | vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; |
4495 | vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; | 4655 | vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; |
4496 | spin_unlock_irq(&phba->hbalock); | 4656 | spin_unlock_irq(shost->host_lock); |
4497 | } | 4657 | } |
4498 | lpfc_destroy_vport_work_array(phba, vports); | 4658 | lpfc_destroy_vport_work_array(phba, vports); |
4499 | 4659 | ||
4660 | /* Cleanup any outstanding ELS commands */ | ||
4661 | lpfc_els_flush_all_cmd(phba); | ||
4662 | |||
4500 | /* Unregister VFI */ | 4663 | /* Unregister VFI */ |
4501 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 4664 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
4502 | if (!mbox) { | 4665 | if (!mbox) { |
4503 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, | 4666 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, |
4504 | "2556 UNREG_VFI mbox allocation failed" | 4667 | "2556 UNREG_VFI mbox allocation failed" |
4505 | "HBA state x%x\n", | 4668 | "HBA state x%x\n", phba->pport->port_state); |
4506 | phba->pport->port_state); | 4669 | return -ENOMEM; |
4507 | return; | ||
4508 | } | 4670 | } |
4509 | 4671 | ||
4510 | lpfc_unreg_vfi(mbox, phba->pport); | 4672 | lpfc_unreg_vfi(mbox, phba->pport); |
@@ -4514,58 +4676,163 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) | |||
4514 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); | 4676 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
4515 | if (rc == MBX_NOT_FINISHED) { | 4677 | if (rc == MBX_NOT_FINISHED) { |
4516 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, | 4678 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, |
4517 | "2557 UNREG_VFI issue mbox failed rc x%x " | 4679 | "2557 UNREG_VFI issue mbox failed rc x%x " |
4518 | "HBA state x%x\n", | 4680 | "HBA state x%x\n", |
4519 | rc, phba->pport->port_state); | 4681 | rc, phba->pport->port_state); |
4520 | mempool_free(mbox, phba->mbox_mem_pool); | 4682 | mempool_free(mbox, phba->mbox_mem_pool); |
4521 | return; | 4683 | return -EIO; |
4522 | } | 4684 | } |
4523 | 4685 | ||
4524 | /* Unregister FCF */ | 4686 | shost = lpfc_shost_from_vport(phba->pport); |
4687 | spin_lock_irq(shost->host_lock); | ||
4688 | phba->pport->fc_flag &= ~FC_VFI_REGISTERED; | ||
4689 | spin_unlock_irq(shost->host_lock); | ||
4690 | |||
4691 | return 0; | ||
4692 | } | ||
4693 | |||
4694 | /** | ||
4695 | * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record | ||
4696 | * @phba: Pointer to hba context object. | ||
4697 | * | ||
4698 | * This function issues synchronous unregister FCF mailbox command to HBA to | ||
4699 | * unregister the currently registered FCF record. The driver does not reset | ||
4700 | * the driver FCF usage state flags. | ||
4701 | * | ||
4702 | * Return 0 if successfully issued, none-zero otherwise. | ||
4703 | */ | ||
4704 | int | ||
4705 | lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) | ||
4706 | { | ||
4707 | LPFC_MBOXQ_t *mbox; | ||
4708 | int rc; | ||
4709 | |||
4525 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 4710 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
4526 | if (!mbox) { | 4711 | if (!mbox) { |
4527 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, | 4712 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, |
4528 | "2551 UNREG_FCFI mbox allocation failed" | 4713 | "2551 UNREG_FCFI mbox allocation failed" |
4529 | "HBA state x%x\n", | 4714 | "HBA state x%x\n", phba->pport->port_state); |
4530 | phba->pport->port_state); | 4715 | return -ENOMEM; |
4531 | return; | ||
4532 | } | 4716 | } |
4533 | |||
4534 | lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); | 4717 | lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); |
4535 | mbox->vport = phba->pport; | 4718 | mbox->vport = phba->pport; |
4536 | mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; | 4719 | mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; |
4537 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); | 4720 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
4538 | 4721 | ||
4539 | if (rc == MBX_NOT_FINISHED) { | 4722 | if (rc == MBX_NOT_FINISHED) { |
4540 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, | 4723 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
4541 | "2552 UNREG_FCFI issue mbox failed rc x%x " | 4724 | "2552 Unregister FCFI command failed rc x%x " |
4542 | "HBA state x%x\n", | 4725 | "HBA state x%x\n", |
4543 | rc, phba->pport->port_state); | 4726 | rc, phba->pport->port_state); |
4544 | mempool_free(mbox, phba->mbox_mem_pool); | 4727 | return -EINVAL; |
4728 | } | ||
4729 | return 0; | ||
4730 | } | ||
4731 | |||
4732 | /** | ||
4733 | * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan | ||
4734 | * @phba: Pointer to hba context object. | ||
4735 | * | ||
4736 | * This function unregisters the currently reigstered FCF. This function | ||
4737 | * also tries to find another FCF for discovery by rescan the HBA FCF table. | ||
4738 | */ | ||
4739 | void | ||
4740 | lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) | ||
4741 | { | ||
4742 | int rc; | ||
4743 | |||
4744 | /* Preparation for unregistering fcf */ | ||
4745 | rc = lpfc_unregister_fcf_prep(phba); | ||
4746 | if (rc) { | ||
4747 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | ||
4748 | "2748 Failed to prepare for unregistering " | ||
4749 | "HBA's FCF record: rc=%d\n", rc); | ||
4545 | return; | 4750 | return; |
4546 | } | 4751 | } |
4547 | 4752 | ||
4548 | spin_lock_irq(&phba->hbalock); | 4753 | /* Now, unregister FCF record and reset HBA FCF state */ |
4549 | phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED | | 4754 | rc = lpfc_sli4_unregister_fcf(phba); |
4550 | FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE | | 4755 | if (rc) |
4551 | FCF_VALID_VLAN); | 4756 | return; |
4552 | spin_unlock_irq(&phba->hbalock); | 4757 | /* Reset HBA FCF states after successful unregister FCF */ |
4758 | phba->fcf.fcf_flag = 0; | ||
4553 | 4759 | ||
4554 | /* | 4760 | /* |
4555 | * If driver is not unloading, check if there is any other | 4761 | * If driver is not unloading, check if there is any other |
4556 | * FCF record that can be used for discovery. | 4762 | * FCF record that can be used for discovery. |
4557 | */ | 4763 | */ |
4558 | if ((phba->pport->load_flag & FC_UNLOADING) || | 4764 | if ((phba->pport->load_flag & FC_UNLOADING) || |
4559 | (phba->link_state < LPFC_LINK_UP)) | 4765 | (phba->link_state < LPFC_LINK_UP)) |
4560 | return; | 4766 | return; |
4561 | 4767 | ||
4562 | rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); | 4768 | rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); |
4563 | 4769 | ||
4564 | if (rc) | 4770 | if (rc) |
4565 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, | 4771 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, |
4566 | "2553 lpfc_unregister_unused_fcf failed to read FCF" | 4772 | "2553 lpfc_unregister_unused_fcf failed " |
4567 | " record HBA state x%x\n", | 4773 | "to read FCF record HBA state x%x\n", |
4568 | phba->pport->port_state); | 4774 | phba->pport->port_state); |
4775 | } | ||
4776 | |||
4777 | /** | ||
4778 | * lpfc_unregister_fcf - Unregister the currently registered fcf record | ||
4779 | * @phba: Pointer to hba context object. | ||
4780 | * | ||
4781 | * This function just unregisters the currently reigstered FCF. It does not | ||
4782 | * try to find another FCF for discovery. | ||
4783 | */ | ||
4784 | void | ||
4785 | lpfc_unregister_fcf(struct lpfc_hba *phba) | ||
4786 | { | ||
4787 | int rc; | ||
4788 | |||
4789 | /* Preparation for unregistering fcf */ | ||
4790 | rc = lpfc_unregister_fcf_prep(phba); | ||
4791 | if (rc) { | ||
4792 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | ||
4793 | "2749 Failed to prepare for unregistering " | ||
4794 | "HBA's FCF record: rc=%d\n", rc); | ||
4795 | return; | ||
4796 | } | ||
4797 | |||
4798 | /* Now, unregister FCF record and reset HBA FCF state */ | ||
4799 | rc = lpfc_sli4_unregister_fcf(phba); | ||
4800 | if (rc) | ||
4801 | return; | ||
4802 | /* Set proper HBA FCF states after successful unregister FCF */ | ||
4803 | spin_lock_irq(&phba->hbalock); | ||
4804 | phba->fcf.fcf_flag &= ~FCF_REGISTERED; | ||
4805 | spin_unlock_irq(&phba->hbalock); | ||
4806 | } | ||
4807 | |||
4808 | /** | ||
4809 | * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. | ||
4810 | * @phba: Pointer to hba context object. | ||
4811 | * | ||
4812 | * This function check if there are any connected remote port for the FCF and | ||
4813 | * if all the devices are disconnected, this function unregister FCFI. | ||
4814 | * This function also tries to use another FCF for discovery. | ||
4815 | */ | ||
4816 | void | ||
4817 | lpfc_unregister_unused_fcf(struct lpfc_hba *phba) | ||
4818 | { | ||
4819 | /* | ||
4820 | * If HBA is not running in FIP mode or if HBA does not support | ||
4821 | * FCoE or if FCF is not registered, do nothing. | ||
4822 | */ | ||
4823 | spin_lock_irq(&phba->hbalock); | ||
4824 | if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || | ||
4825 | !(phba->fcf.fcf_flag & FCF_REGISTERED) || | ||
4826 | !(phba->hba_flag & HBA_FIP_SUPPORT)) { | ||
4827 | spin_unlock_irq(&phba->hbalock); | ||
4828 | return; | ||
4829 | } | ||
4830 | spin_unlock_irq(&phba->hbalock); | ||
4831 | |||
4832 | if (lpfc_fcf_inuse(phba)) | ||
4833 | return; | ||
4834 | |||
4835 | lpfc_unregister_fcf_rescan(phba); | ||
4569 | } | 4836 | } |
4570 | 4837 | ||
4571 | /** | 4838 | /** |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index c9faa1d8c3c8..89ff7c09e298 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2010 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -1346,6 +1346,9 @@ typedef struct { /* FireFly BIU registers */ | |||
1346 | #define MBX_HEARTBEAT 0x31 | 1346 | #define MBX_HEARTBEAT 0x31 |
1347 | #define MBX_WRITE_VPARMS 0x32 | 1347 | #define MBX_WRITE_VPARMS 0x32 |
1348 | #define MBX_ASYNCEVT_ENABLE 0x33 | 1348 | #define MBX_ASYNCEVT_ENABLE 0x33 |
1349 | #define MBX_READ_EVENT_LOG_STATUS 0x37 | ||
1350 | #define MBX_READ_EVENT_LOG 0x38 | ||
1351 | #define MBX_WRITE_EVENT_LOG 0x39 | ||
1349 | 1352 | ||
1350 | #define MBX_PORT_CAPABILITIES 0x3B | 1353 | #define MBX_PORT_CAPABILITIES 0x3B |
1351 | #define MBX_PORT_IOV_CONTROL 0x3C | 1354 | #define MBX_PORT_IOV_CONTROL 0x3C |
@@ -1465,17 +1468,13 @@ typedef struct { /* FireFly BIU registers */ | |||
1465 | #define CMD_IOCB_LOGENTRY_CN 0x94 | 1468 | #define CMD_IOCB_LOGENTRY_CN 0x94 |
1466 | #define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 | 1469 | #define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 |
1467 | 1470 | ||
1468 | /* Unhandled Data Security SLI Commands */ | 1471 | /* Data Security SLI Commands */ |
1469 | #define DSSCMD_IWRITE64_CR 0xD8 | 1472 | #define DSSCMD_IWRITE64_CR 0xF8 |
1470 | #define DSSCMD_IWRITE64_CX 0xD9 | 1473 | #define DSSCMD_IWRITE64_CX 0xF9 |
1471 | #define DSSCMD_IREAD64_CR 0xDA | 1474 | #define DSSCMD_IREAD64_CR 0xFA |
1472 | #define DSSCMD_IREAD64_CX 0xDB | 1475 | #define DSSCMD_IREAD64_CX 0xFB |
1473 | #define DSSCMD_INVALIDATE_DEK 0xDC | 1476 | |
1474 | #define DSSCMD_SET_KEK 0xDD | 1477 | #define CMD_MAX_IOCB_CMD 0xFB |
1475 | #define DSSCMD_GET_KEK_ID 0xDE | ||
1476 | #define DSSCMD_GEN_XFER 0xDF | ||
1477 | |||
1478 | #define CMD_MAX_IOCB_CMD 0xE6 | ||
1479 | #define CMD_IOCB_MASK 0xff | 1478 | #define CMD_IOCB_MASK 0xff |
1480 | 1479 | ||
1481 | #define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG | 1480 | #define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 8a2a1c5935c6..820015fbc4d6 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -52,35 +52,37 @@ struct dma_address { | |||
52 | uint32_t addr_hi; | 52 | uint32_t addr_hi; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | #define LPFC_SLIREV_CONF_WORD 0x58 | ||
56 | struct lpfc_sli_intf { | 55 | struct lpfc_sli_intf { |
57 | uint32_t word0; | 56 | uint32_t word0; |
58 | #define lpfc_sli_intf_iftype_MASK 0x00000007 | 57 | #define lpfc_sli_intf_valid_SHIFT 29 |
59 | #define lpfc_sli_intf_iftype_SHIFT 0 | 58 | #define lpfc_sli_intf_valid_MASK 0x00000007 |
60 | #define lpfc_sli_intf_iftype_WORD word0 | 59 | #define lpfc_sli_intf_valid_WORD word0 |
61 | #define lpfc_sli_intf_rev_MASK 0x0000000f | ||
62 | #define lpfc_sli_intf_rev_SHIFT 4 | ||
63 | #define lpfc_sli_intf_rev_WORD word0 | ||
64 | #define LPFC_SLIREV_CONF_SLI4 4 | ||
65 | #define lpfc_sli_intf_family_MASK 0x000000ff | ||
66 | #define lpfc_sli_intf_family_SHIFT 8 | ||
67 | #define lpfc_sli_intf_family_WORD word0 | ||
68 | #define lpfc_sli_intf_feat1_MASK 0x000000ff | ||
69 | #define lpfc_sli_intf_feat1_SHIFT 16 | ||
70 | #define lpfc_sli_intf_feat1_WORD word0 | ||
71 | #define lpfc_sli_intf_feat2_MASK 0x0000001f | ||
72 | #define lpfc_sli_intf_feat2_SHIFT 24 | ||
73 | #define lpfc_sli_intf_feat2_WORD word0 | ||
74 | #define lpfc_sli_intf_valid_MASK 0x00000007 | ||
75 | #define lpfc_sli_intf_valid_SHIFT 29 | ||
76 | #define lpfc_sli_intf_valid_WORD word0 | ||
77 | #define LPFC_SLI_INTF_VALID 6 | 60 | #define LPFC_SLI_INTF_VALID 6 |
61 | #define lpfc_sli_intf_featurelevel2_SHIFT 24 | ||
62 | #define lpfc_sli_intf_featurelevel2_MASK 0x0000001F | ||
63 | #define lpfc_sli_intf_featurelevel2_WORD word0 | ||
64 | #define lpfc_sli_intf_featurelevel1_SHIFT 16 | ||
65 | #define lpfc_sli_intf_featurelevel1_MASK 0x000000FF | ||
66 | #define lpfc_sli_intf_featurelevel1_WORD word0 | ||
67 | #define LPFC_SLI_INTF_FEATURELEVEL1_1 1 | ||
68 | #define LPFC_SLI_INTF_FEATURELEVEL1_2 2 | ||
69 | #define lpfc_sli_intf_sli_family_SHIFT 8 | ||
70 | #define lpfc_sli_intf_sli_family_MASK 0x000000FF | ||
71 | #define lpfc_sli_intf_sli_family_WORD word0 | ||
72 | #define LPFC_SLI_INTF_FAMILY_BE2 0 | ||
73 | #define LPFC_SLI_INTF_FAMILY_BE3 1 | ||
74 | #define lpfc_sli_intf_slirev_SHIFT 4 | ||
75 | #define lpfc_sli_intf_slirev_MASK 0x0000000F | ||
76 | #define lpfc_sli_intf_slirev_WORD word0 | ||
77 | #define LPFC_SLI_INTF_REV_SLI3 3 | ||
78 | #define LPFC_SLI_INTF_REV_SLI4 4 | ||
79 | #define lpfc_sli_intf_if_type_SHIFT 0 | ||
80 | #define lpfc_sli_intf_if_type_MASK 0x00000007 | ||
81 | #define lpfc_sli_intf_if_type_WORD word0 | ||
82 | #define LPFC_SLI_INTF_IF_TYPE_0 0 | ||
83 | #define LPFC_SLI_INTF_IF_TYPE_1 1 | ||
78 | }; | 84 | }; |
79 | 85 | ||
80 | #define LPFC_SLI4_BAR0 1 | ||
81 | #define LPFC_SLI4_BAR1 2 | ||
82 | #define LPFC_SLI4_BAR2 4 | ||
83 | |||
84 | #define LPFC_SLI4_MBX_EMBED true | 86 | #define LPFC_SLI4_MBX_EMBED true |
85 | #define LPFC_SLI4_MBX_NEMBED false | 87 | #define LPFC_SLI4_MBX_NEMBED false |
86 | 88 | ||
@@ -161,6 +163,9 @@ struct lpfc_sli_intf { | |||
161 | #define LPFC_FP_DEF_IMAX 10000 | 163 | #define LPFC_FP_DEF_IMAX 10000 |
162 | #define LPFC_SP_DEF_IMAX 10000 | 164 | #define LPFC_SP_DEF_IMAX 10000 |
163 | 165 | ||
166 | /* PORT_CAPABILITIES constants. */ | ||
167 | #define LPFC_MAX_SUPPORTED_PAGES 8 | ||
168 | |||
164 | struct ulp_bde64 { | 169 | struct ulp_bde64 { |
165 | union ULP_BDE_TUS { | 170 | union ULP_BDE_TUS { |
166 | uint32_t w; | 171 | uint32_t w; |
@@ -516,7 +521,7 @@ struct lpfc_register { | |||
516 | #define LPFC_UERR_STATUS_LO 0x00A0 | 521 | #define LPFC_UERR_STATUS_LO 0x00A0 |
517 | #define LPFC_UE_MASK_HI 0x00AC | 522 | #define LPFC_UE_MASK_HI 0x00AC |
518 | #define LPFC_UE_MASK_LO 0x00A8 | 523 | #define LPFC_UE_MASK_LO 0x00A8 |
519 | #define LPFC_SCRATCHPAD 0x0058 | 524 | #define LPFC_SLI_INTF 0x0058 |
520 | 525 | ||
521 | /* BAR0 Registers */ | 526 | /* BAR0 Registers */ |
522 | #define LPFC_HST_STATE 0x00AC | 527 | #define LPFC_HST_STATE 0x00AC |
@@ -576,19 +581,6 @@ struct lpfc_register { | |||
576 | #define LPFC_POST_STAGE_ARMFW_READY 0xC000 | 581 | #define LPFC_POST_STAGE_ARMFW_READY 0xC000 |
577 | #define LPFC_POST_STAGE_ARMFW_UE 0xF000 | 582 | #define LPFC_POST_STAGE_ARMFW_UE 0xF000 |
578 | 583 | ||
579 | #define lpfc_scratchpad_slirev_SHIFT 4 | ||
580 | #define lpfc_scratchpad_slirev_MASK 0xF | ||
581 | #define lpfc_scratchpad_slirev_WORD word0 | ||
582 | #define lpfc_scratchpad_chiptype_SHIFT 8 | ||
583 | #define lpfc_scratchpad_chiptype_MASK 0xFF | ||
584 | #define lpfc_scratchpad_chiptype_WORD word0 | ||
585 | #define lpfc_scratchpad_featurelevel1_SHIFT 16 | ||
586 | #define lpfc_scratchpad_featurelevel1_MASK 0xFF | ||
587 | #define lpfc_scratchpad_featurelevel1_WORD word0 | ||
588 | #define lpfc_scratchpad_featurelevel2_SHIFT 24 | ||
589 | #define lpfc_scratchpad_featurelevel2_MASK 0xFF | ||
590 | #define lpfc_scratchpad_featurelevel2_WORD word0 | ||
591 | |||
592 | /* BAR1 Registers */ | 584 | /* BAR1 Registers */ |
593 | #define LPFC_IMR_MASK_ALL 0xFFFFFFFF | 585 | #define LPFC_IMR_MASK_ALL 0xFFFFFFFF |
594 | #define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF | 586 | #define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF |
@@ -801,6 +793,7 @@ struct mbox_header { | |||
801 | #define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09 | 793 | #define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09 |
802 | #define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A | 794 | #define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A |
803 | #define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B | 795 | #define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B |
796 | #define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10 | ||
804 | 797 | ||
805 | /* Mailbox command structures */ | 798 | /* Mailbox command structures */ |
806 | struct eq_context { | 799 | struct eq_context { |
@@ -1149,10 +1142,7 @@ struct sli4_sge { /* SLI-4 */ | |||
1149 | this flag !! */ | 1142 | this flag !! */ |
1150 | #define lpfc_sli4_sge_last_MASK 0x00000001 | 1143 | #define lpfc_sli4_sge_last_MASK 0x00000001 |
1151 | #define lpfc_sli4_sge_last_WORD word2 | 1144 | #define lpfc_sli4_sge_last_WORD word2 |
1152 | uint32_t word3; | 1145 | uint32_t sge_len; |
1153 | #define lpfc_sli4_sge_len_SHIFT 0 | ||
1154 | #define lpfc_sli4_sge_len_MASK 0x0001FFFF | ||
1155 | #define lpfc_sli4_sge_len_WORD word3 | ||
1156 | }; | 1146 | }; |
1157 | 1147 | ||
1158 | struct fcf_record { | 1148 | struct fcf_record { |
@@ -1301,6 +1291,19 @@ struct lpfc_mbx_del_fcf_tbl_entry { | |||
1301 | #define lpfc_mbx_del_fcf_tbl_index_WORD word10 | 1291 | #define lpfc_mbx_del_fcf_tbl_index_WORD word10 |
1302 | }; | 1292 | }; |
1303 | 1293 | ||
1294 | struct lpfc_mbx_redisc_fcf_tbl { | ||
1295 | struct mbox_header header; | ||
1296 | uint32_t word10; | ||
1297 | #define lpfc_mbx_redisc_fcf_count_SHIFT 0 | ||
1298 | #define lpfc_mbx_redisc_fcf_count_MASK 0x0000FFFF | ||
1299 | #define lpfc_mbx_redisc_fcf_count_WORD word10 | ||
1300 | uint32_t resvd; | ||
1301 | uint32_t word12; | ||
1302 | #define lpfc_mbx_redisc_fcf_index_SHIFT 0 | ||
1303 | #define lpfc_mbx_redisc_fcf_index_MASK 0x0000FFFF | ||
1304 | #define lpfc_mbx_redisc_fcf_index_WORD word12 | ||
1305 | }; | ||
1306 | |||
1304 | struct lpfc_mbx_query_fw_cfg { | 1307 | struct lpfc_mbx_query_fw_cfg { |
1305 | struct mbox_header header; | 1308 | struct mbox_header header; |
1306 | uint32_t config_number; | 1309 | uint32_t config_number; |
@@ -1834,6 +1837,177 @@ struct lpfc_mbx_request_features { | |||
1834 | #define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 | 1837 | #define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 |
1835 | }; | 1838 | }; |
1836 | 1839 | ||
1840 | struct lpfc_mbx_supp_pages { | ||
1841 | uint32_t word1; | ||
1842 | #define qs_SHIFT 0 | ||
1843 | #define qs_MASK 0x00000001 | ||
1844 | #define qs_WORD word1 | ||
1845 | #define wr_SHIFT 1 | ||
1846 | #define wr_MASK 0x00000001 | ||
1847 | #define wr_WORD word1 | ||
1848 | #define pf_SHIFT 8 | ||
1849 | #define pf_MASK 0x000000ff | ||
1850 | #define pf_WORD word1 | ||
1851 | #define cpn_SHIFT 16 | ||
1852 | #define cpn_MASK 0x000000ff | ||
1853 | #define cpn_WORD word1 | ||
1854 | uint32_t word2; | ||
1855 | #define list_offset_SHIFT 0 | ||
1856 | #define list_offset_MASK 0x000000ff | ||
1857 | #define list_offset_WORD word2 | ||
1858 | #define next_offset_SHIFT 8 | ||
1859 | #define next_offset_MASK 0x000000ff | ||
1860 | #define next_offset_WORD word2 | ||
1861 | #define elem_cnt_SHIFT 16 | ||
1862 | #define elem_cnt_MASK 0x000000ff | ||
1863 | #define elem_cnt_WORD word2 | ||
1864 | uint32_t word3; | ||
1865 | #define pn_0_SHIFT 24 | ||
1866 | #define pn_0_MASK 0x000000ff | ||
1867 | #define pn_0_WORD word3 | ||
1868 | #define pn_1_SHIFT 16 | ||
1869 | #define pn_1_MASK 0x000000ff | ||
1870 | #define pn_1_WORD word3 | ||
1871 | #define pn_2_SHIFT 8 | ||
1872 | #define pn_2_MASK 0x000000ff | ||
1873 | #define pn_2_WORD word3 | ||
1874 | #define pn_3_SHIFT 0 | ||
1875 | #define pn_3_MASK 0x000000ff | ||
1876 | #define pn_3_WORD word3 | ||
1877 | uint32_t word4; | ||
1878 | #define pn_4_SHIFT 24 | ||
1879 | #define pn_4_MASK 0x000000ff | ||
1880 | #define pn_4_WORD word4 | ||
1881 | #define pn_5_SHIFT 16 | ||
1882 | #define pn_5_MASK 0x000000ff | ||
1883 | #define pn_5_WORD word4 | ||
1884 | #define pn_6_SHIFT 8 | ||
1885 | #define pn_6_MASK 0x000000ff | ||
1886 | #define pn_6_WORD word4 | ||
1887 | #define pn_7_SHIFT 0 | ||
1888 | #define pn_7_MASK 0x000000ff | ||
1889 | #define pn_7_WORD word4 | ||
1890 | uint32_t rsvd[27]; | ||
1891 | #define LPFC_SUPP_PAGES 0 | ||
1892 | #define LPFC_BLOCK_GUARD_PROFILES 1 | ||
1893 | #define LPFC_SLI4_PARAMETERS 2 | ||
1894 | }; | ||
1895 | |||
1896 | struct lpfc_mbx_sli4_params { | ||
1897 | uint32_t word1; | ||
1898 | #define qs_SHIFT 0 | ||
1899 | #define qs_MASK 0x00000001 | ||
1900 | #define qs_WORD word1 | ||
1901 | #define wr_SHIFT 1 | ||
1902 | #define wr_MASK 0x00000001 | ||
1903 | #define wr_WORD word1 | ||
1904 | #define pf_SHIFT 8 | ||
1905 | #define pf_MASK 0x000000ff | ||
1906 | #define pf_WORD word1 | ||
1907 | #define cpn_SHIFT 16 | ||
1908 | #define cpn_MASK 0x000000ff | ||
1909 | #define cpn_WORD word1 | ||
1910 | uint32_t word2; | ||
1911 | #define if_type_SHIFT 0 | ||
1912 | #define if_type_MASK 0x00000007 | ||
1913 | #define if_type_WORD word2 | ||
1914 | #define sli_rev_SHIFT 4 | ||
1915 | #define sli_rev_MASK 0x0000000f | ||
1916 | #define sli_rev_WORD word2 | ||
1917 | #define sli_family_SHIFT 8 | ||
1918 | #define sli_family_MASK 0x000000ff | ||
1919 | #define sli_family_WORD word2 | ||
1920 | #define featurelevel_1_SHIFT 16 | ||
1921 | #define featurelevel_1_MASK 0x000000ff | ||
1922 | #define featurelevel_1_WORD word2 | ||
1923 | #define featurelevel_2_SHIFT 24 | ||
1924 | #define featurelevel_2_MASK 0x0000001f | ||
1925 | #define featurelevel_2_WORD word2 | ||
1926 | uint32_t word3; | ||
1927 | #define fcoe_SHIFT 0 | ||
1928 | #define fcoe_MASK 0x00000001 | ||
1929 | #define fcoe_WORD word3 | ||
1930 | #define fc_SHIFT 1 | ||
1931 | #define fc_MASK 0x00000001 | ||
1932 | #define fc_WORD word3 | ||
1933 | #define nic_SHIFT 2 | ||
1934 | #define nic_MASK 0x00000001 | ||
1935 | #define nic_WORD word3 | ||
1936 | #define iscsi_SHIFT 3 | ||
1937 | #define iscsi_MASK 0x00000001 | ||
1938 | #define iscsi_WORD word3 | ||
1939 | #define rdma_SHIFT 4 | ||
1940 | #define rdma_MASK 0x00000001 | ||
1941 | #define rdma_WORD word3 | ||
1942 | uint32_t sge_supp_len; | ||
1943 | uint32_t word5; | ||
1944 | #define if_page_sz_SHIFT 0 | ||
1945 | #define if_page_sz_MASK 0x0000ffff | ||
1946 | #define if_page_sz_WORD word5 | ||
1947 | #define loopbk_scope_SHIFT 24 | ||
1948 | #define loopbk_scope_MASK 0x0000000f | ||
1949 | #define loopbk_scope_WORD word5 | ||
1950 | #define rq_db_window_SHIFT 28 | ||
1951 | #define rq_db_window_MASK 0x0000000f | ||
1952 | #define rq_db_window_WORD word5 | ||
1953 | uint32_t word6; | ||
1954 | #define eq_pages_SHIFT 0 | ||
1955 | #define eq_pages_MASK 0x0000000f | ||
1956 | #define eq_pages_WORD word6 | ||
1957 | #define eqe_size_SHIFT 8 | ||
1958 | #define eqe_size_MASK 0x000000ff | ||
1959 | #define eqe_size_WORD word6 | ||
1960 | uint32_t word7; | ||
1961 | #define cq_pages_SHIFT 0 | ||
1962 | #define cq_pages_MASK 0x0000000f | ||
1963 | #define cq_pages_WORD word7 | ||
1964 | #define cqe_size_SHIFT 8 | ||
1965 | #define cqe_size_MASK 0x000000ff | ||
1966 | #define cqe_size_WORD word7 | ||
1967 | uint32_t word8; | ||
1968 | #define mq_pages_SHIFT 0 | ||
1969 | #define mq_pages_MASK 0x0000000f | ||
1970 | #define mq_pages_WORD word8 | ||
1971 | #define mqe_size_SHIFT 8 | ||
1972 | #define mqe_size_MASK 0x000000ff | ||
1973 | #define mqe_size_WORD word8 | ||
1974 | #define mq_elem_cnt_SHIFT 16 | ||
1975 | #define mq_elem_cnt_MASK 0x000000ff | ||
1976 | #define mq_elem_cnt_WORD word8 | ||
1977 | uint32_t word9; | ||
1978 | #define wq_pages_SHIFT 0 | ||
1979 | #define wq_pages_MASK 0x0000ffff | ||
1980 | #define wq_pages_WORD word9 | ||
1981 | #define wqe_size_SHIFT 8 | ||
1982 | #define wqe_size_MASK 0x000000ff | ||
1983 | #define wqe_size_WORD word9 | ||
1984 | uint32_t word10; | ||
1985 | #define rq_pages_SHIFT 0 | ||
1986 | #define rq_pages_MASK 0x0000ffff | ||
1987 | #define rq_pages_WORD word10 | ||
1988 | #define rqe_size_SHIFT 8 | ||
1989 | #define rqe_size_MASK 0x000000ff | ||
1990 | #define rqe_size_WORD word10 | ||
1991 | uint32_t word11; | ||
1992 | #define hdr_pages_SHIFT 0 | ||
1993 | #define hdr_pages_MASK 0x0000000f | ||
1994 | #define hdr_pages_WORD word11 | ||
1995 | #define hdr_size_SHIFT 8 | ||
1996 | #define hdr_size_MASK 0x0000000f | ||
1997 | #define hdr_size_WORD word11 | ||
1998 | #define hdr_pp_align_SHIFT 16 | ||
1999 | #define hdr_pp_align_MASK 0x0000ffff | ||
2000 | #define hdr_pp_align_WORD word11 | ||
2001 | uint32_t word12; | ||
2002 | #define sgl_pages_SHIFT 0 | ||
2003 | #define sgl_pages_MASK 0x0000000f | ||
2004 | #define sgl_pages_WORD word12 | ||
2005 | #define sgl_pp_align_SHIFT 16 | ||
2006 | #define sgl_pp_align_MASK 0x0000ffff | ||
2007 | #define sgl_pp_align_WORD word12 | ||
2008 | uint32_t rsvd_13_63[51]; | ||
2009 | }; | ||
2010 | |||
1837 | /* Mailbox Completion Queue Error Messages */ | 2011 | /* Mailbox Completion Queue Error Messages */ |
1838 | #define MB_CQE_STATUS_SUCCESS 0x0 | 2012 | #define MB_CQE_STATUS_SUCCESS 0x0 |
1839 | #define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 | 2013 | #define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 |
@@ -1863,6 +2037,7 @@ struct lpfc_mqe { | |||
1863 | struct lpfc_mbx_read_fcf_tbl read_fcf_tbl; | 2037 | struct lpfc_mbx_read_fcf_tbl read_fcf_tbl; |
1864 | struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry; | 2038 | struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry; |
1865 | struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; | 2039 | struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; |
2040 | struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl; | ||
1866 | struct lpfc_mbx_reg_fcfi reg_fcfi; | 2041 | struct lpfc_mbx_reg_fcfi reg_fcfi; |
1867 | struct lpfc_mbx_unreg_fcfi unreg_fcfi; | 2042 | struct lpfc_mbx_unreg_fcfi unreg_fcfi; |
1868 | struct lpfc_mbx_mq_create mq_create; | 2043 | struct lpfc_mbx_mq_create mq_create; |
@@ -1883,6 +2058,8 @@ struct lpfc_mqe { | |||
1883 | struct lpfc_mbx_request_features req_ftrs; | 2058 | struct lpfc_mbx_request_features req_ftrs; |
1884 | struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; | 2059 | struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; |
1885 | struct lpfc_mbx_query_fw_cfg query_fw_cfg; | 2060 | struct lpfc_mbx_query_fw_cfg query_fw_cfg; |
2061 | struct lpfc_mbx_supp_pages supp_pages; | ||
2062 | struct lpfc_mbx_sli4_params sli4_params; | ||
1886 | struct lpfc_mbx_nop nop; | 2063 | struct lpfc_mbx_nop nop; |
1887 | } un; | 2064 | } un; |
1888 | }; | 2065 | }; |
@@ -1959,6 +2136,9 @@ struct lpfc_acqe_link { | |||
1959 | #define LPFC_ASYNC_LINK_FAULT_NONE 0x0 | 2136 | #define LPFC_ASYNC_LINK_FAULT_NONE 0x0 |
1960 | #define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 | 2137 | #define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 |
1961 | #define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 | 2138 | #define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 |
2139 | #define lpfc_acqe_qos_link_speed_SHIFT 16 | ||
2140 | #define lpfc_acqe_qos_link_speed_MASK 0x0000FFFF | ||
2141 | #define lpfc_acqe_qos_link_speed_WORD word1 | ||
1962 | uint32_t event_tag; | 2142 | uint32_t event_tag; |
1963 | uint32_t trailer; | 2143 | uint32_t trailer; |
1964 | }; | 2144 | }; |
@@ -1976,6 +2156,7 @@ struct lpfc_acqe_fcoe { | |||
1976 | #define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 | 2156 | #define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 |
1977 | #define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 | 2157 | #define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 |
1978 | #define LPFC_FCOE_EVENT_TYPE_CVL 0x4 | 2158 | #define LPFC_FCOE_EVENT_TYPE_CVL 0x4 |
2159 | #define LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD 0x5 | ||
1979 | uint32_t event_tag; | 2160 | uint32_t event_tag; |
1980 | uint32_t trailer; | 2161 | uint32_t trailer; |
1981 | }; | 2162 | }; |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b8eb1b6e5e77..d29ac7c317d9 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2010 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -544,7 +544,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
544 | mempool_free(pmb, phba->mbox_mem_pool); | 544 | mempool_free(pmb, phba->mbox_mem_pool); |
545 | return -EIO; | 545 | return -EIO; |
546 | } | 546 | } |
547 | } else { | 547 | } else if (phba->cfg_suppress_link_up == 0) { |
548 | lpfc_init_link(phba, pmb, phba->cfg_topology, | 548 | lpfc_init_link(phba, pmb, phba->cfg_topology, |
549 | phba->cfg_link_speed); | 549 | phba->cfg_link_speed); |
550 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | 550 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
@@ -603,6 +603,102 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
603 | } | 603 | } |
604 | 604 | ||
605 | /** | 605 | /** |
606 | * lpfc_hba_init_link - Initialize the FC link | ||
607 | * @phba: pointer to lpfc hba data structure. | ||
608 | * | ||
609 | * This routine will issue the INIT_LINK mailbox command call. | ||
610 | * It is available to other drivers through the lpfc_hba data | ||
611 | * structure for use as a delayed link up mechanism with the | ||
612 | * module parameter lpfc_suppress_link_up. | ||
613 | * | ||
614 | * Return code | ||
615 | * 0 - success | ||
616 | * Any other value - error | ||
617 | **/ | ||
618 | int | ||
619 | lpfc_hba_init_link(struct lpfc_hba *phba) | ||
620 | { | ||
621 | struct lpfc_vport *vport = phba->pport; | ||
622 | LPFC_MBOXQ_t *pmb; | ||
623 | MAILBOX_t *mb; | ||
624 | int rc; | ||
625 | |||
626 | pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
627 | if (!pmb) { | ||
628 | phba->link_state = LPFC_HBA_ERROR; | ||
629 | return -ENOMEM; | ||
630 | } | ||
631 | mb = &pmb->u.mb; | ||
632 | pmb->vport = vport; | ||
633 | |||
634 | lpfc_init_link(phba, pmb, phba->cfg_topology, | ||
635 | phba->cfg_link_speed); | ||
636 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
637 | lpfc_set_loopback_flag(phba); | ||
638 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | ||
639 | if (rc != MBX_SUCCESS) { | ||
640 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
641 | "0498 Adapter failed to init, mbxCmd x%x " | ||
642 | "INIT_LINK, mbxStatus x%x\n", | ||
643 | mb->mbxCommand, mb->mbxStatus); | ||
644 | /* Clear all interrupt enable conditions */ | ||
645 | writel(0, phba->HCregaddr); | ||
646 | readl(phba->HCregaddr); /* flush */ | ||
647 | /* Clear all pending interrupts */ | ||
648 | writel(0xffffffff, phba->HAregaddr); | ||
649 | readl(phba->HAregaddr); /* flush */ | ||
650 | phba->link_state = LPFC_HBA_ERROR; | ||
651 | if (rc != MBX_BUSY) | ||
652 | mempool_free(pmb, phba->mbox_mem_pool); | ||
653 | return -EIO; | ||
654 | } | ||
655 | phba->cfg_suppress_link_up = 0; | ||
656 | |||
657 | return 0; | ||
658 | } | ||
659 | |||
660 | /** | ||
661 | * lpfc_hba_down_link - this routine downs the FC link | ||
662 | * | ||
663 | * This routine will issue the DOWN_LINK mailbox command call. | ||
664 | * It is available to other drivers through the lpfc_hba data | ||
665 | * structure for use to stop the link. | ||
666 | * | ||
667 | * Return code | ||
668 | * 0 - success | ||
669 | * Any other value - error | ||
670 | **/ | ||
671 | int | ||
672 | lpfc_hba_down_link(struct lpfc_hba *phba) | ||
673 | { | ||
674 | LPFC_MBOXQ_t *pmb; | ||
675 | int rc; | ||
676 | |||
677 | pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
678 | if (!pmb) { | ||
679 | phba->link_state = LPFC_HBA_ERROR; | ||
680 | return -ENOMEM; | ||
681 | } | ||
682 | |||
683 | lpfc_printf_log(phba, | ||
684 | KERN_ERR, LOG_INIT, | ||
685 | "0491 Adapter Link is disabled.\n"); | ||
686 | lpfc_down_link(phba, pmb); | ||
687 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
688 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | ||
689 | if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { | ||
690 | lpfc_printf_log(phba, | ||
691 | KERN_ERR, LOG_INIT, | ||
692 | "2522 Adapter failed to issue DOWN_LINK" | ||
693 | " mbox command rc 0x%x\n", rc); | ||
694 | |||
695 | mempool_free(pmb, phba->mbox_mem_pool); | ||
696 | return -EIO; | ||
697 | } | ||
698 | return 0; | ||
699 | } | ||
700 | |||
701 | /** | ||
606 | * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset | 702 | * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset |
607 | * @phba: pointer to lpfc HBA data structure. | 703 | * @phba: pointer to lpfc HBA data structure. |
608 | * | 704 | * |
@@ -2073,6 +2169,44 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) | |||
2073 | } | 2169 | } |
2074 | 2170 | ||
2075 | /** | 2171 | /** |
2172 | * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer | ||
2173 | * @phba: pointer to lpfc hba data structure. | ||
2174 | * | ||
2175 | * This routine stops the SLI4 FCF rediscover wait timer if it's on. The | ||
2176 | * caller of this routine should already hold the host lock. | ||
2177 | **/ | ||
2178 | void | ||
2179 | __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) | ||
2180 | { | ||
2181 | /* Clear pending FCF rediscovery wait timer */ | ||
2182 | phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; | ||
2183 | /* Now, try to stop the timer */ | ||
2184 | del_timer(&phba->fcf.redisc_wait); | ||
2185 | } | ||
2186 | |||
2187 | /** | ||
2188 | * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer | ||
2189 | * @phba: pointer to lpfc hba data structure. | ||
2190 | * | ||
2191 | * This routine stops the SLI4 FCF rediscover wait timer if it's on. It | ||
2192 | * checks whether the FCF rediscovery wait timer is pending with the host | ||
2193 | * lock held before proceeding with disabling the timer and clearing the | ||
2194 | * wait timer pendig flag. | ||
2195 | **/ | ||
2196 | void | ||
2197 | lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) | ||
2198 | { | ||
2199 | spin_lock_irq(&phba->hbalock); | ||
2200 | if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { | ||
2201 | /* FCF rediscovery timer already fired or stopped */ | ||
2202 | spin_unlock_irq(&phba->hbalock); | ||
2203 | return; | ||
2204 | } | ||
2205 | __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); | ||
2206 | spin_unlock_irq(&phba->hbalock); | ||
2207 | } | ||
2208 | |||
2209 | /** | ||
2076 | * lpfc_stop_hba_timers - Stop all the timers associated with an HBA | 2210 | * lpfc_stop_hba_timers - Stop all the timers associated with an HBA |
2077 | * @phba: pointer to lpfc hba data structure. | 2211 | * @phba: pointer to lpfc hba data structure. |
2078 | * | 2212 | * |
@@ -2096,6 +2230,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) | |||
2096 | break; | 2230 | break; |
2097 | case LPFC_PCI_DEV_OC: | 2231 | case LPFC_PCI_DEV_OC: |
2098 | /* Stop any OneConnect device sepcific driver timers */ | 2232 | /* Stop any OneConnect device sepcific driver timers */ |
2233 | lpfc_sli4_stop_fcf_redisc_wait_timer(phba); | ||
2099 | break; | 2234 | break; |
2100 | default: | 2235 | default: |
2101 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2236 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
@@ -2228,6 +2363,7 @@ lpfc_offline_prep(struct lpfc_hba * phba) | |||
2228 | struct lpfc_vport *vport = phba->pport; | 2363 | struct lpfc_vport *vport = phba->pport; |
2229 | struct lpfc_nodelist *ndlp, *next_ndlp; | 2364 | struct lpfc_nodelist *ndlp, *next_ndlp; |
2230 | struct lpfc_vport **vports; | 2365 | struct lpfc_vport **vports; |
2366 | struct Scsi_Host *shost; | ||
2231 | int i; | 2367 | int i; |
2232 | 2368 | ||
2233 | if (vport->fc_flag & FC_OFFLINE_MODE) | 2369 | if (vport->fc_flag & FC_OFFLINE_MODE) |
@@ -2241,11 +2377,15 @@ lpfc_offline_prep(struct lpfc_hba * phba) | |||
2241 | vports = lpfc_create_vport_work_array(phba); | 2377 | vports = lpfc_create_vport_work_array(phba); |
2242 | if (vports != NULL) { | 2378 | if (vports != NULL) { |
2243 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { | 2379 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
2244 | struct Scsi_Host *shost; | ||
2245 | |||
2246 | if (vports[i]->load_flag & FC_UNLOADING) | 2380 | if (vports[i]->load_flag & FC_UNLOADING) |
2247 | continue; | 2381 | continue; |
2382 | shost = lpfc_shost_from_vport(vports[i]); | ||
2383 | spin_lock_irq(shost->host_lock); | ||
2248 | vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; | 2384 | vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; |
2385 | vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | ||
2386 | vports[i]->fc_flag &= ~FC_VFI_REGISTERED; | ||
2387 | spin_unlock_irq(shost->host_lock); | ||
2388 | |||
2249 | shost = lpfc_shost_from_vport(vports[i]); | 2389 | shost = lpfc_shost_from_vport(vports[i]); |
2250 | list_for_each_entry_safe(ndlp, next_ndlp, | 2390 | list_for_each_entry_safe(ndlp, next_ndlp, |
2251 | &vports[i]->fc_nodes, | 2391 | &vports[i]->fc_nodes, |
@@ -2401,7 +2541,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) | |||
2401 | shost->this_id = -1; | 2541 | shost->this_id = -1; |
2402 | shost->max_cmd_len = 16; | 2542 | shost->max_cmd_len = 16; |
2403 | if (phba->sli_rev == LPFC_SLI_REV4) { | 2543 | if (phba->sli_rev == LPFC_SLI_REV4) { |
2404 | shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; | 2544 | shost->dma_boundary = |
2545 | phba->sli4_hba.pc_sli4_params.sge_supp_len; | ||
2405 | shost->sg_tablesize = phba->cfg_sg_seg_cnt; | 2546 | shost->sg_tablesize = phba->cfg_sg_seg_cnt; |
2406 | } | 2547 | } |
2407 | 2548 | ||
@@ -2650,8 +2791,6 @@ lpfc_stop_port_s4(struct lpfc_hba *phba) | |||
2650 | lpfc_stop_hba_timers(phba); | 2791 | lpfc_stop_hba_timers(phba); |
2651 | phba->pport->work_port_events = 0; | 2792 | phba->pport->work_port_events = 0; |
2652 | phba->sli4_hba.intr_enable = 0; | 2793 | phba->sli4_hba.intr_enable = 0; |
2653 | /* Hard clear it for now, shall have more graceful way to wait later */ | ||
2654 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | ||
2655 | } | 2794 | } |
2656 | 2795 | ||
2657 | /** | 2796 | /** |
@@ -2703,7 +2842,7 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) | |||
2703 | del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; | 2842 | del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; |
2704 | bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); | 2843 | bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); |
2705 | bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, | 2844 | bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, |
2706 | phba->fcf.fcf_indx); | 2845 | phba->fcf.current_rec.fcf_indx); |
2707 | 2846 | ||
2708 | if (!phba->sli4_hba.intr_enable) | 2847 | if (!phba->sli4_hba.intr_enable) |
2709 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | 2848 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
@@ -2727,6 +2866,57 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) | |||
2727 | } | 2866 | } |
2728 | 2867 | ||
2729 | /** | 2868 | /** |
2869 | * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer | ||
2870 | * @phba: Pointer to hba for which this call is being executed. | ||
2871 | * | ||
2872 | * This routine starts the timer waiting for the FCF rediscovery to complete. | ||
2873 | **/ | ||
2874 | void | ||
2875 | lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) | ||
2876 | { | ||
2877 | unsigned long fcf_redisc_wait_tmo = | ||
2878 | (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); | ||
2879 | /* Start fcf rediscovery wait period timer */ | ||
2880 | mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); | ||
2881 | spin_lock_irq(&phba->hbalock); | ||
2882 | /* Allow action to new fcf asynchronous event */ | ||
2883 | phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); | ||
2884 | /* Mark the FCF rediscovery pending state */ | ||
2885 | phba->fcf.fcf_flag |= FCF_REDISC_PEND; | ||
2886 | spin_unlock_irq(&phba->hbalock); | ||
2887 | } | ||
2888 | |||
2889 | /** | ||
2890 | * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout | ||
2891 | * @ptr: Map to lpfc_hba data structure pointer. | ||
2892 | * | ||
2893 | * This routine is invoked when waiting for FCF table rediscover has been | ||
2894 | * timed out. If new FCF record(s) has (have) been discovered during the | ||
2895 | * wait period, a new FCF event shall be added to the FCOE async event | ||
2896 | * list, and then worker thread shall be waked up for processing from the | ||
2897 | * worker thread context. | ||
2898 | **/ | ||
2899 | void | ||
2900 | lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) | ||
2901 | { | ||
2902 | struct lpfc_hba *phba = (struct lpfc_hba *)ptr; | ||
2903 | |||
2904 | /* Don't send FCF rediscovery event if timer cancelled */ | ||
2905 | spin_lock_irq(&phba->hbalock); | ||
2906 | if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { | ||
2907 | spin_unlock_irq(&phba->hbalock); | ||
2908 | return; | ||
2909 | } | ||
2910 | /* Clear FCF rediscovery timer pending flag */ | ||
2911 | phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; | ||
2912 | /* FCF rediscovery event to worker thread */ | ||
2913 | phba->fcf.fcf_flag |= FCF_REDISC_EVT; | ||
2914 | spin_unlock_irq(&phba->hbalock); | ||
2915 | /* wake up worker thread */ | ||
2916 | lpfc_worker_wake_up(phba); | ||
2917 | } | ||
2918 | |||
2919 | /** | ||
2730 | * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support | 2920 | * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support |
2731 | * @phba: pointer to lpfc hba data structure. | 2921 | * @phba: pointer to lpfc hba data structure. |
2732 | * | 2922 | * |
@@ -2978,6 +3168,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, | |||
2978 | bf_get(lpfc_acqe_link_physical, acqe_link); | 3168 | bf_get(lpfc_acqe_link_physical, acqe_link); |
2979 | phba->sli4_hba.link_state.fault = | 3169 | phba->sli4_hba.link_state.fault = |
2980 | bf_get(lpfc_acqe_link_fault, acqe_link); | 3170 | bf_get(lpfc_acqe_link_fault, acqe_link); |
3171 | phba->sli4_hba.link_state.logical_speed = | ||
3172 | bf_get(lpfc_acqe_qos_link_speed, acqe_link); | ||
2981 | 3173 | ||
2982 | /* Invoke the lpfc_handle_latt mailbox command callback function */ | 3174 | /* Invoke the lpfc_handle_latt mailbox command callback function */ |
2983 | lpfc_mbx_cmpl_read_la(phba, pmb); | 3175 | lpfc_mbx_cmpl_read_la(phba, pmb); |
@@ -3007,22 +3199,34 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3007 | struct lpfc_nodelist *ndlp; | 3199 | struct lpfc_nodelist *ndlp; |
3008 | struct Scsi_Host *shost; | 3200 | struct Scsi_Host *shost; |
3009 | uint32_t link_state; | 3201 | uint32_t link_state; |
3202 | int active_vlink_present; | ||
3203 | struct lpfc_vport **vports; | ||
3204 | int i; | ||
3010 | 3205 | ||
3011 | phba->fc_eventTag = acqe_fcoe->event_tag; | 3206 | phba->fc_eventTag = acqe_fcoe->event_tag; |
3012 | phba->fcoe_eventtag = acqe_fcoe->event_tag; | 3207 | phba->fcoe_eventtag = acqe_fcoe->event_tag; |
3013 | switch (event_type) { | 3208 | switch (event_type) { |
3014 | case LPFC_FCOE_EVENT_TYPE_NEW_FCF: | 3209 | case LPFC_FCOE_EVENT_TYPE_NEW_FCF: |
3210 | case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: | ||
3015 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 3211 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
3016 | "2546 New FCF found index 0x%x tag 0x%x\n", | 3212 | "2546 New FCF found index 0x%x tag 0x%x\n", |
3017 | acqe_fcoe->index, | 3213 | acqe_fcoe->index, |
3018 | acqe_fcoe->event_tag); | 3214 | acqe_fcoe->event_tag); |
3019 | /* | ||
3020 | * If the current FCF is in discovered state, or | ||
3021 | * FCF discovery is in progress do nothing. | ||
3022 | */ | ||
3023 | spin_lock_irq(&phba->hbalock); | 3215 | spin_lock_irq(&phba->hbalock); |
3024 | if ((phba->fcf.fcf_flag & FCF_DISCOVERED) || | 3216 | if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || |
3025 | (phba->hba_flag & FCF_DISC_INPROGRESS)) { | 3217 | (phba->hba_flag & FCF_DISC_INPROGRESS)) { |
3218 | /* | ||
3219 | * If the current FCF is in discovered state or | ||
3220 | * FCF discovery is in progress, do nothing. | ||
3221 | */ | ||
3222 | spin_unlock_irq(&phba->hbalock); | ||
3223 | break; | ||
3224 | } | ||
3225 | if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { | ||
3226 | /* | ||
3227 | * If fast FCF failover rescan event is pending, | ||
3228 | * do nothing. | ||
3229 | */ | ||
3026 | spin_unlock_irq(&phba->hbalock); | 3230 | spin_unlock_irq(&phba->hbalock); |
3027 | break; | 3231 | break; |
3028 | } | 3232 | } |
@@ -3049,7 +3253,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3049 | " tag 0x%x\n", acqe_fcoe->index, | 3253 | " tag 0x%x\n", acqe_fcoe->index, |
3050 | acqe_fcoe->event_tag); | 3254 | acqe_fcoe->event_tag); |
3051 | /* If the event is not for currently used fcf do nothing */ | 3255 | /* If the event is not for currently used fcf do nothing */ |
3052 | if (phba->fcf.fcf_indx != acqe_fcoe->index) | 3256 | if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) |
3053 | break; | 3257 | break; |
3054 | /* | 3258 | /* |
3055 | * Currently, driver support only one FCF - so treat this as | 3259 | * Currently, driver support only one FCF - so treat this as |
@@ -3074,14 +3278,58 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3074 | if (!ndlp) | 3278 | if (!ndlp) |
3075 | break; | 3279 | break; |
3076 | shost = lpfc_shost_from_vport(vport); | 3280 | shost = lpfc_shost_from_vport(vport); |
3281 | if (phba->pport->port_state <= LPFC_FLOGI) | ||
3282 | break; | ||
3283 | /* If virtual link is not yet instantiated ignore CVL */ | ||
3284 | if (vport->port_state <= LPFC_FDISC) | ||
3285 | break; | ||
3286 | |||
3077 | lpfc_linkdown_port(vport); | 3287 | lpfc_linkdown_port(vport); |
3078 | if (vport->port_type != LPFC_NPIV_PORT) { | 3288 | lpfc_cleanup_pending_mbox(vport); |
3289 | spin_lock_irq(shost->host_lock); | ||
3290 | vport->fc_flag |= FC_VPORT_CVL_RCVD; | ||
3291 | spin_unlock_irq(shost->host_lock); | ||
3292 | active_vlink_present = 0; | ||
3293 | |||
3294 | vports = lpfc_create_vport_work_array(phba); | ||
3295 | if (vports) { | ||
3296 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; | ||
3297 | i++) { | ||
3298 | if ((!(vports[i]->fc_flag & | ||
3299 | FC_VPORT_CVL_RCVD)) && | ||
3300 | (vports[i]->port_state > LPFC_FDISC)) { | ||
3301 | active_vlink_present = 1; | ||
3302 | break; | ||
3303 | } | ||
3304 | } | ||
3305 | lpfc_destroy_vport_work_array(phba, vports); | ||
3306 | } | ||
3307 | |||
3308 | if (active_vlink_present) { | ||
3309 | /* | ||
3310 | * If there are other active VLinks present, | ||
3311 | * re-instantiate the Vlink using FDISC. | ||
3312 | */ | ||
3079 | mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); | 3313 | mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); |
3080 | spin_lock_irq(shost->host_lock); | 3314 | spin_lock_irq(shost->host_lock); |
3081 | ndlp->nlp_flag |= NLP_DELAY_TMO; | 3315 | ndlp->nlp_flag |= NLP_DELAY_TMO; |
3082 | spin_unlock_irq(shost->host_lock); | 3316 | spin_unlock_irq(shost->host_lock); |
3083 | ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; | 3317 | ndlp->nlp_last_elscmd = ELS_CMD_FDISC; |
3084 | vport->port_state = LPFC_FLOGI; | 3318 | vport->port_state = LPFC_FDISC; |
3319 | } else { | ||
3320 | /* | ||
3321 | * Otherwise, we request port to rediscover | ||
3322 | * the entire FCF table for a fast recovery | ||
3323 | * from possible case that the current FCF | ||
3324 | * is no longer valid. | ||
3325 | */ | ||
3326 | rc = lpfc_sli4_redisc_fcf_table(phba); | ||
3327 | if (rc) | ||
3328 | /* | ||
3329 | * Last resort will be re-try on the | ||
3330 | * the current registered FCF entry. | ||
3331 | */ | ||
3332 | lpfc_retry_pport_discovery(phba); | ||
3085 | } | 3333 | } |
3086 | break; | 3334 | break; |
3087 | default: | 3335 | default: |
@@ -3158,6 +3406,34 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) | |||
3158 | } | 3406 | } |
3159 | 3407 | ||
3160 | /** | 3408 | /** |
3409 | * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event | ||
3410 | * @phba: pointer to lpfc hba data structure. | ||
3411 | * | ||
3412 | * This routine is invoked by the worker thread to process FCF table | ||
3413 | * rediscovery pending completion event. | ||
3414 | **/ | ||
3415 | void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) | ||
3416 | { | ||
3417 | int rc; | ||
3418 | |||
3419 | spin_lock_irq(&phba->hbalock); | ||
3420 | /* Clear FCF rediscovery timeout event */ | ||
3421 | phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; | ||
3422 | /* Clear driver fast failover FCF record flag */ | ||
3423 | phba->fcf.failover_rec.flag = 0; | ||
3424 | /* Set state for FCF fast failover */ | ||
3425 | phba->fcf.fcf_flag |= FCF_REDISC_FOV; | ||
3426 | spin_unlock_irq(&phba->hbalock); | ||
3427 | |||
3428 | /* Scan FCF table from the first entry to re-discover SAN */ | ||
3429 | rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); | ||
3430 | if (rc) | ||
3431 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | ||
3432 | "2747 Post FCF rediscovery read FCF record " | ||
3433 | "failed 0x%x\n", rc); | ||
3434 | } | ||
3435 | |||
3436 | /** | ||
3161 | * lpfc_api_table_setup - Set up per hba pci-device group func api jump table | 3437 | * lpfc_api_table_setup - Set up per hba pci-device group func api jump table |
3162 | * @phba: pointer to lpfc hba data structure. | 3438 | * @phba: pointer to lpfc hba data structure. |
3163 | * @dev_grp: The HBA PCI-Device group number. | 3439 | * @dev_grp: The HBA PCI-Device group number. |
@@ -3442,8 +3718,10 @@ static int | |||
3442 | lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | 3718 | lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) |
3443 | { | 3719 | { |
3444 | struct lpfc_sli *psli; | 3720 | struct lpfc_sli *psli; |
3445 | int rc; | 3721 | LPFC_MBOXQ_t *mboxq; |
3446 | int i, hbq_count; | 3722 | int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; |
3723 | uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; | ||
3724 | struct lpfc_mqe *mqe; | ||
3447 | 3725 | ||
3448 | /* Before proceed, wait for POST done and device ready */ | 3726 | /* Before proceed, wait for POST done and device ready */ |
3449 | rc = lpfc_sli4_post_status_check(phba); | 3727 | rc = lpfc_sli4_post_status_check(phba); |
@@ -3472,6 +3750,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
3472 | init_timer(&phba->eratt_poll); | 3750 | init_timer(&phba->eratt_poll); |
3473 | phba->eratt_poll.function = lpfc_poll_eratt; | 3751 | phba->eratt_poll.function = lpfc_poll_eratt; |
3474 | phba->eratt_poll.data = (unsigned long) phba; | 3752 | phba->eratt_poll.data = (unsigned long) phba; |
3753 | /* FCF rediscover timer */ | ||
3754 | init_timer(&phba->fcf.redisc_wait); | ||
3755 | phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; | ||
3756 | phba->fcf.redisc_wait.data = (unsigned long)phba; | ||
3757 | |||
3475 | /* | 3758 | /* |
3476 | * We need to do a READ_CONFIG mailbox command here before | 3759 | * We need to do a READ_CONFIG mailbox command here before |
3477 | * calling lpfc_get_cfgparam. For VFs this will report the | 3760 | * calling lpfc_get_cfgparam. For VFs this will report the |
@@ -3496,31 +3779,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
3496 | * used to create the sg_dma_buf_pool must be dynamically calculated. | 3779 | * used to create the sg_dma_buf_pool must be dynamically calculated. |
3497 | * 2 segments are added since the IOCB needs a command and response bde. | 3780 | * 2 segments are added since the IOCB needs a command and response bde. |
3498 | * To insure that the scsi sgl does not cross a 4k page boundary only | 3781 | * To insure that the scsi sgl does not cross a 4k page boundary only |
3499 | * sgl sizes of 1k, 2k, 4k, and 8k are supported. | 3782 | * sgl sizes of must be a power of 2. |
3500 | * Table of sgl sizes and seg_cnt: | ||
3501 | * sgl size, sg_seg_cnt total seg | ||
3502 | * 1k 50 52 | ||
3503 | * 2k 114 116 | ||
3504 | * 4k 242 244 | ||
3505 | * 8k 498 500 | ||
3506 | * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024 | ||
3507 | * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048 | ||
3508 | * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096 | ||
3509 | * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192 | ||
3510 | */ | 3783 | */ |
3511 | if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) | 3784 | buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + |
3512 | phba->cfg_sg_seg_cnt = 50; | 3785 | ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); |
3513 | else if (phba->cfg_sg_seg_cnt <= 114) | 3786 | /* Feature Level 1 hardware is limited to 2 pages */ |
3514 | phba->cfg_sg_seg_cnt = 114; | 3787 | if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) == |
3515 | else if (phba->cfg_sg_seg_cnt <= 242) | 3788 | LPFC_SLI_INTF_FEATURELEVEL1_1)) |
3516 | phba->cfg_sg_seg_cnt = 242; | 3789 | max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; |
3517 | else | 3790 | else |
3518 | phba->cfg_sg_seg_cnt = 498; | 3791 | max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; |
3519 | 3792 | for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; | |
3520 | phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) | 3793 | dma_buf_size < max_buf_size && buf_size > dma_buf_size; |
3521 | + sizeof(struct fcp_rsp); | 3794 | dma_buf_size = dma_buf_size << 1) |
3522 | phba->cfg_sg_dma_buf_size += | 3795 | ; |
3523 | ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); | 3796 | if (dma_buf_size == max_buf_size) |
3797 | phba->cfg_sg_seg_cnt = (dma_buf_size - | ||
3798 | sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - | ||
3799 | (2 * sizeof(struct sli4_sge))) / | ||
3800 | sizeof(struct sli4_sge); | ||
3801 | phba->cfg_sg_dma_buf_size = dma_buf_size; | ||
3524 | 3802 | ||
3525 | /* Initialize buffer queue management fields */ | 3803 | /* Initialize buffer queue management fields */ |
3526 | hbq_count = lpfc_sli_hbq_count(); | 3804 | hbq_count = lpfc_sli_hbq_count(); |
@@ -3638,6 +3916,43 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
3638 | goto out_free_fcp_eq_hdl; | 3916 | goto out_free_fcp_eq_hdl; |
3639 | } | 3917 | } |
3640 | 3918 | ||
3919 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, | ||
3920 | GFP_KERNEL); | ||
3921 | if (!mboxq) { | ||
3922 | rc = -ENOMEM; | ||
3923 | goto out_free_fcp_eq_hdl; | ||
3924 | } | ||
3925 | |||
3926 | /* Get the Supported Pages. It is always available. */ | ||
3927 | lpfc_supported_pages(mboxq); | ||
3928 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
3929 | if (unlikely(rc)) { | ||
3930 | rc = -EIO; | ||
3931 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
3932 | goto out_free_fcp_eq_hdl; | ||
3933 | } | ||
3934 | |||
3935 | mqe = &mboxq->u.mqe; | ||
3936 | memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), | ||
3937 | LPFC_MAX_SUPPORTED_PAGES); | ||
3938 | for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { | ||
3939 | switch (pn_page[i]) { | ||
3940 | case LPFC_SLI4_PARAMETERS: | ||
3941 | phba->sli4_hba.pc_sli4_params.supported = 1; | ||
3942 | break; | ||
3943 | default: | ||
3944 | break; | ||
3945 | } | ||
3946 | } | ||
3947 | |||
3948 | /* Read the port's SLI4 Parameters capabilities if supported. */ | ||
3949 | if (phba->sli4_hba.pc_sli4_params.supported) | ||
3950 | rc = lpfc_pc_sli4_params_get(phba, mboxq); | ||
3951 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
3952 | if (rc) { | ||
3953 | rc = -EIO; | ||
3954 | goto out_free_fcp_eq_hdl; | ||
3955 | } | ||
3641 | return rc; | 3956 | return rc; |
3642 | 3957 | ||
3643 | out_free_fcp_eq_hdl: | 3958 | out_free_fcp_eq_hdl: |
@@ -3733,6 +4048,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) | |||
3733 | int | 4048 | int |
3734 | lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | 4049 | lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) |
3735 | { | 4050 | { |
4051 | phba->lpfc_hba_init_link = lpfc_hba_init_link; | ||
4052 | phba->lpfc_hba_down_link = lpfc_hba_down_link; | ||
3736 | switch (dev_grp) { | 4053 | switch (dev_grp) { |
3737 | case LPFC_PCI_DEV_LP: | 4054 | case LPFC_PCI_DEV_LP: |
3738 | phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; | 4055 | phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; |
@@ -4291,7 +4608,7 @@ lpfc_hba_alloc(struct pci_dev *pdev) | |||
4291 | return NULL; | 4608 | return NULL; |
4292 | } | 4609 | } |
4293 | 4610 | ||
4294 | mutex_init(&phba->ct_event_mutex); | 4611 | spin_lock_init(&phba->ct_ev_lock); |
4295 | INIT_LIST_HEAD(&phba->ct_ev_waiters); | 4612 | INIT_LIST_HEAD(&phba->ct_ev_waiters); |
4296 | 4613 | ||
4297 | return phba; | 4614 | return phba; |
@@ -4641,7 +4958,7 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) | |||
4641 | int | 4958 | int |
4642 | lpfc_sli4_post_status_check(struct lpfc_hba *phba) | 4959 | lpfc_sli4_post_status_check(struct lpfc_hba *phba) |
4643 | { | 4960 | { |
4644 | struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; | 4961 | struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg; |
4645 | int i, port_error = -ENODEV; | 4962 | int i, port_error = -ENODEV; |
4646 | 4963 | ||
4647 | if (!phba->sli4_hba.STAregaddr) | 4964 | if (!phba->sli4_hba.STAregaddr) |
@@ -4677,14 +4994,21 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) | |||
4677 | bf_get(lpfc_hst_state_port_status, &sta_reg)); | 4994 | bf_get(lpfc_hst_state_port_status, &sta_reg)); |
4678 | 4995 | ||
4679 | /* Log device information */ | 4996 | /* Log device information */ |
4680 | scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); | 4997 | phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr); |
4681 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 4998 | if (bf_get(lpfc_sli_intf_valid, |
4682 | "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " | 4999 | &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) { |
4683 | "FeatureL1=0x%x, FeatureL2=0x%x\n", | 5000 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
4684 | bf_get(lpfc_scratchpad_chiptype, &scratchpad), | 5001 | "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " |
4685 | bf_get(lpfc_scratchpad_slirev, &scratchpad), | 5002 | "FeatureL1=0x%x, FeatureL2=0x%x\n", |
4686 | bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), | 5003 | bf_get(lpfc_sli_intf_sli_family, |
4687 | bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); | 5004 | &phba->sli4_hba.sli_intf), |
5005 | bf_get(lpfc_sli_intf_slirev, | ||
5006 | &phba->sli4_hba.sli_intf), | ||
5007 | bf_get(lpfc_sli_intf_featurelevel1, | ||
5008 | &phba->sli4_hba.sli_intf), | ||
5009 | bf_get(lpfc_sli_intf_featurelevel2, | ||
5010 | &phba->sli4_hba.sli_intf)); | ||
5011 | } | ||
4688 | phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); | 5012 | phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); |
4689 | phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); | 5013 | phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); |
4690 | /* With uncoverable error, log the error message and return error */ | 5014 | /* With uncoverable error, log the error message and return error */ |
@@ -4723,8 +5047,8 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) | |||
4723 | LPFC_UE_MASK_LO; | 5047 | LPFC_UE_MASK_LO; |
4724 | phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + | 5048 | phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + |
4725 | LPFC_UE_MASK_HI; | 5049 | LPFC_UE_MASK_HI; |
4726 | phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + | 5050 | phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + |
4727 | LPFC_SCRATCHPAD; | 5051 | LPFC_SLI_INTF; |
4728 | } | 5052 | } |
4729 | 5053 | ||
4730 | /** | 5054 | /** |
@@ -5999,7 +6323,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) | |||
5999 | spin_lock_irqsave(&phba->hbalock, flags); | 6323 | spin_lock_irqsave(&phba->hbalock, flags); |
6000 | /* Mark the FCFI is no longer registered */ | 6324 | /* Mark the FCFI is no longer registered */ |
6001 | phba->fcf.fcf_flag &= | 6325 | phba->fcf.fcf_flag &= |
6002 | ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); | 6326 | ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE); |
6003 | spin_unlock_irqrestore(&phba->hbalock, flags); | 6327 | spin_unlock_irqrestore(&phba->hbalock, flags); |
6004 | } | 6328 | } |
6005 | } | 6329 | } |
@@ -6039,16 +6363,20 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
6039 | 6363 | ||
6040 | /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the | 6364 | /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the |
6041 | * number of bytes required by each mapping. They are actually | 6365 | * number of bytes required by each mapping. They are actually |
6042 | * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. | 6366 | * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device. |
6043 | */ | 6367 | */ |
6044 | phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); | 6368 | if (pci_resource_start(pdev, 0)) { |
6045 | bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); | 6369 | phba->pci_bar0_map = pci_resource_start(pdev, 0); |
6046 | 6370 | bar0map_len = pci_resource_len(pdev, 0); | |
6047 | phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); | 6371 | } else { |
6048 | bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); | 6372 | phba->pci_bar0_map = pci_resource_start(pdev, 1); |
6373 | bar0map_len = pci_resource_len(pdev, 1); | ||
6374 | } | ||
6375 | phba->pci_bar1_map = pci_resource_start(pdev, 2); | ||
6376 | bar1map_len = pci_resource_len(pdev, 2); | ||
6049 | 6377 | ||
6050 | phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); | 6378 | phba->pci_bar2_map = pci_resource_start(pdev, 4); |
6051 | bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); | 6379 | bar2map_len = pci_resource_len(pdev, 4); |
6052 | 6380 | ||
6053 | /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ | 6381 | /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ |
6054 | phba->sli4_hba.conf_regs_memmap_p = | 6382 | phba->sli4_hba.conf_regs_memmap_p = |
@@ -6793,6 +7121,73 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) | |||
6793 | phba->pport->work_port_events = 0; | 7121 | phba->pport->work_port_events = 0; |
6794 | } | 7122 | } |
6795 | 7123 | ||
7124 | /** | ||
7125 | * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. | ||
7126 | * @phba: Pointer to HBA context object. | ||
7127 | * @mboxq: Pointer to the mailboxq memory for the mailbox command response. | ||
7128 | * | ||
7129 | * This function is called in the SLI4 code path to read the port's | ||
7130 | * sli4 capabilities. | ||
7131 | * | ||
7132 | * This function may be be called from any context that can block-wait | ||
7133 | * for the completion. The expectation is that this routine is called | ||
7134 | * typically from probe_one or from the online routine. | ||
7135 | **/ | ||
7136 | int | ||
7137 | lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | ||
7138 | { | ||
7139 | int rc; | ||
7140 | struct lpfc_mqe *mqe; | ||
7141 | struct lpfc_pc_sli4_params *sli4_params; | ||
7142 | uint32_t mbox_tmo; | ||
7143 | |||
7144 | rc = 0; | ||
7145 | mqe = &mboxq->u.mqe; | ||
7146 | |||
7147 | /* Read the port's SLI4 Parameters port capabilities */ | ||
7148 | lpfc_sli4_params(mboxq); | ||
7149 | if (!phba->sli4_hba.intr_enable) | ||
7150 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
7151 | else { | ||
7152 | mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); | ||
7153 | rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); | ||
7154 | } | ||
7155 | |||
7156 | if (unlikely(rc)) | ||
7157 | return 1; | ||
7158 | |||
7159 | sli4_params = &phba->sli4_hba.pc_sli4_params; | ||
7160 | sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); | ||
7161 | sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); | ||
7162 | sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); | ||
7163 | sli4_params->featurelevel_1 = bf_get(featurelevel_1, | ||
7164 | &mqe->un.sli4_params); | ||
7165 | sli4_params->featurelevel_2 = bf_get(featurelevel_2, | ||
7166 | &mqe->un.sli4_params); | ||
7167 | sli4_params->proto_types = mqe->un.sli4_params.word3; | ||
7168 | sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; | ||
7169 | sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); | ||
7170 | sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); | ||
7171 | sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); | ||
7172 | sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); | ||
7173 | sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); | ||
7174 | sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); | ||
7175 | sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); | ||
7176 | sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); | ||
7177 | sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); | ||
7178 | sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); | ||
7179 | sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); | ||
7180 | sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); | ||
7181 | sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); | ||
7182 | sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); | ||
7183 | sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); | ||
7184 | sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); | ||
7185 | sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); | ||
7186 | sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); | ||
7187 | sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); | ||
7188 | return rc; | ||
7189 | } | ||
7190 | |||
6796 | /** | 7191 | /** |
6797 | * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. | 7192 | * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. |
6798 | * @pdev: pointer to PCI device | 7193 | * @pdev: pointer to PCI device |
@@ -7134,6 +7529,12 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) | |||
7134 | pci_set_power_state(pdev, PCI_D0); | 7529 | pci_set_power_state(pdev, PCI_D0); |
7135 | pci_restore_state(pdev); | 7530 | pci_restore_state(pdev); |
7136 | 7531 | ||
7532 | /* | ||
7533 | * As the new kernel behavior of pci_restore_state() API call clears | ||
7534 | * device saved_state flag, need to save the restored state again. | ||
7535 | */ | ||
7536 | pci_save_state(pdev); | ||
7537 | |||
7137 | if (pdev->is_busmaster) | 7538 | if (pdev->is_busmaster) |
7138 | pci_set_master(pdev); | 7539 | pci_set_master(pdev); |
7139 | 7540 | ||
@@ -7317,6 +7718,13 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev) | |||
7317 | } | 7718 | } |
7318 | 7719 | ||
7319 | pci_restore_state(pdev); | 7720 | pci_restore_state(pdev); |
7721 | |||
7722 | /* | ||
7723 | * As the new kernel behavior of pci_restore_state() API call clears | ||
7724 | * device saved_state flag, need to save the restored state again. | ||
7725 | */ | ||
7726 | pci_save_state(pdev); | ||
7727 | |||
7320 | if (pdev->is_busmaster) | 7728 | if (pdev->is_busmaster) |
7321 | pci_set_master(pdev); | 7729 | pci_set_master(pdev); |
7322 | 7730 | ||
@@ -7726,6 +8134,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev) | |||
7726 | /* Restore device state from PCI config space */ | 8134 | /* Restore device state from PCI config space */ |
7727 | pci_set_power_state(pdev, PCI_D0); | 8135 | pci_set_power_state(pdev, PCI_D0); |
7728 | pci_restore_state(pdev); | 8136 | pci_restore_state(pdev); |
8137 | |||
8138 | /* | ||
8139 | * As the new kernel behavior of pci_restore_state() API call clears | ||
8140 | * device saved_state flag, need to save the restored state again. | ||
8141 | */ | ||
8142 | pci_save_state(pdev); | ||
8143 | |||
7729 | if (pdev->is_busmaster) | 8144 | if (pdev->is_busmaster) |
7730 | pci_set_master(pdev); | 8145 | pci_set_master(pdev); |
7731 | 8146 | ||
@@ -7845,11 +8260,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
7845 | int rc; | 8260 | int rc; |
7846 | struct lpfc_sli_intf intf; | 8261 | struct lpfc_sli_intf intf; |
7847 | 8262 | ||
7848 | if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0)) | 8263 | if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) |
7849 | return -ENODEV; | 8264 | return -ENODEV; |
7850 | 8265 | ||
7851 | if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && | 8266 | if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && |
7852 | (bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4)) | 8267 | (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) |
7853 | rc = lpfc_pci_probe_one_s4(pdev, pid); | 8268 | rc = lpfc_pci_probe_one_s4(pdev, pid); |
7854 | else | 8269 | else |
7855 | rc = lpfc_pci_probe_one_s3(pdev, pid); | 8270 | rc = lpfc_pci_probe_one_s3(pdev, pid); |
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index a9afd8b94b6a..6c4dce1a30ca 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -1707,7 +1707,8 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, | |||
1707 | alloc_len - sizeof(union lpfc_sli4_cfg_shdr); | 1707 | alloc_len - sizeof(union lpfc_sli4_cfg_shdr); |
1708 | } | 1708 | } |
1709 | /* The sub-header is in DMA memory, which needs endian converstion */ | 1709 | /* The sub-header is in DMA memory, which needs endian converstion */ |
1710 | lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, | 1710 | if (cfg_shdr) |
1711 | lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, | ||
1711 | sizeof(union lpfc_sli4_cfg_shdr)); | 1712 | sizeof(union lpfc_sli4_cfg_shdr)); |
1712 | 1713 | ||
1713 | return alloc_len; | 1714 | return alloc_len; |
@@ -1747,6 +1748,65 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) | |||
1747 | } | 1748 | } |
1748 | 1749 | ||
1749 | /** | 1750 | /** |
1751 | * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd | ||
1752 | * @phba: pointer to lpfc hba data structure. | ||
1753 | * @fcf_index: index to fcf table. | ||
1754 | * | ||
1755 | * This routine routine allocates and constructs non-embedded mailbox command | ||
1756 | * for reading a FCF table entry refered by @fcf_index. | ||
1757 | * | ||
1758 | * Return: pointer to the mailbox command constructed if successful, otherwise | ||
1759 | * NULL. | ||
1760 | **/ | ||
1761 | int | ||
1762 | lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba, | ||
1763 | struct lpfcMboxq *mboxq, | ||
1764 | uint16_t fcf_index) | ||
1765 | { | ||
1766 | void *virt_addr; | ||
1767 | dma_addr_t phys_addr; | ||
1768 | uint8_t *bytep; | ||
1769 | struct lpfc_mbx_sge sge; | ||
1770 | uint32_t alloc_len, req_len; | ||
1771 | struct lpfc_mbx_read_fcf_tbl *read_fcf; | ||
1772 | |||
1773 | if (!mboxq) | ||
1774 | return -ENOMEM; | ||
1775 | |||
1776 | req_len = sizeof(struct fcf_record) + | ||
1777 | sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); | ||
1778 | |||
1779 | /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ | ||
1780 | alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
1781 | LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, | ||
1782 | LPFC_SLI4_MBX_NEMBED); | ||
1783 | |||
1784 | if (alloc_len < req_len) { | ||
1785 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | ||
1786 | "0291 Allocated DMA memory size (x%x) is " | ||
1787 | "less than the requested DMA memory " | ||
1788 | "size (x%x)\n", alloc_len, req_len); | ||
1789 | return -ENOMEM; | ||
1790 | } | ||
1791 | |||
1792 | /* Get the first SGE entry from the non-embedded DMA memory. This | ||
1793 | * routine only uses a single SGE. | ||
1794 | */ | ||
1795 | lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); | ||
1796 | phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); | ||
1797 | virt_addr = mboxq->sge_array->addr[0]; | ||
1798 | read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; | ||
1799 | |||
1800 | /* Set up command fields */ | ||
1801 | bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); | ||
1802 | /* Perform necessary endian conversion */ | ||
1803 | bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); | ||
1804 | lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); | ||
1805 | |||
1806 | return 0; | ||
1807 | } | ||
1808 | |||
1809 | /** | ||
1750 | * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox | 1810 | * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox |
1751 | * @mboxq: pointer to lpfc mbox command. | 1811 | * @mboxq: pointer to lpfc mbox command. |
1752 | * | 1812 | * |
@@ -1946,13 +2006,14 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) | |||
1946 | bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); | 2006 | bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); |
1947 | bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); | 2007 | bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); |
1948 | bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); | 2008 | bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); |
1949 | bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx); | 2009 | bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, |
2010 | phba->fcf.current_rec.fcf_indx); | ||
1950 | /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ | 2011 | /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ |
1951 | bf_set(lpfc_reg_fcfi_mam, reg_fcfi, | 2012 | bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3); |
1952 | (~phba->fcf.addr_mode) & 0x3); | 2013 | if (phba->fcf.current_rec.vlan_id != 0xFFFF) { |
1953 | if (phba->fcf.fcf_flag & FCF_VALID_VLAN) { | ||
1954 | bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); | 2014 | bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); |
1955 | bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id); | 2015 | bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, |
2016 | phba->fcf.current_rec.vlan_id); | ||
1956 | } | 2017 | } |
1957 | } | 2018 | } |
1958 | 2019 | ||
@@ -1992,3 +2053,41 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) | |||
1992 | bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); | 2053 | bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); |
1993 | resume_rpi->event_tag = ndlp->phba->fc_eventTag; | 2054 | resume_rpi->event_tag = ndlp->phba->fc_eventTag; |
1994 | } | 2055 | } |
2056 | |||
2057 | /** | ||
2058 | * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages | ||
2059 | * mailbox command. | ||
2060 | * @mbox: pointer to lpfc mbox command to initialize. | ||
2061 | * | ||
2062 | * The PORT_CAPABILITIES supported pages mailbox command is issued to | ||
2063 | * retrieve the particular feature pages supported by the port. | ||
2064 | **/ | ||
2065 | void | ||
2066 | lpfc_supported_pages(struct lpfcMboxq *mbox) | ||
2067 | { | ||
2068 | struct lpfc_mbx_supp_pages *supp_pages; | ||
2069 | |||
2070 | memset(mbox, 0, sizeof(*mbox)); | ||
2071 | supp_pages = &mbox->u.mqe.un.supp_pages; | ||
2072 | bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES); | ||
2073 | bf_set(cpn, supp_pages, LPFC_SUPP_PAGES); | ||
2074 | } | ||
2075 | |||
2076 | /** | ||
2077 | * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params | ||
2078 | * mailbox command. | ||
2079 | * @mbox: pointer to lpfc mbox command to initialize. | ||
2080 | * | ||
2081 | * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to | ||
2082 | * retrieve the particular SLI4 features supported by the port. | ||
2083 | **/ | ||
2084 | void | ||
2085 | lpfc_sli4_params(struct lpfcMboxq *mbox) | ||
2086 | { | ||
2087 | struct lpfc_mbx_sli4_params *sli4_params; | ||
2088 | |||
2089 | memset(mbox, 0, sizeof(*mbox)); | ||
2090 | sli4_params = &mbox->u.mqe.un.sli4_params; | ||
2091 | bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES); | ||
2092 | bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS); | ||
2093 | } | ||
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h index d655ed3eebef..f3cfbe2ce986 100644 --- a/drivers/scsi/lpfc/lpfc_nl.h +++ b/drivers/scsi/lpfc/lpfc_nl.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2008 Emulex. All rights reserved. * | 4 | * Copyright (C) 2010 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -177,23 +177,3 @@ struct temp_event { | |||
177 | uint32_t data; | 177 | uint32_t data; |
178 | }; | 178 | }; |
179 | 179 | ||
180 | /* bsg definitions */ | ||
181 | #define LPFC_BSG_VENDOR_SET_CT_EVENT 1 | ||
182 | #define LPFC_BSG_VENDOR_GET_CT_EVENT 2 | ||
183 | |||
184 | struct set_ct_event { | ||
185 | uint32_t command; | ||
186 | uint32_t ev_req_id; | ||
187 | uint32_t ev_reg_id; | ||
188 | }; | ||
189 | |||
190 | struct get_ct_event { | ||
191 | uint32_t command; | ||
192 | uint32_t ev_reg_id; | ||
193 | uint32_t ev_req_id; | ||
194 | }; | ||
195 | |||
196 | struct get_ct_event_reply { | ||
197 | uint32_t immed_data; | ||
198 | uint32_t type; | ||
199 | }; | ||
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 2ed6af194932..d20ae6b3b3cf 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -62,7 +62,7 @@ lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
62 | 62 | ||
63 | int | 63 | int |
64 | lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | 64 | lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
65 | struct serv_parm * sp, uint32_t class) | 65 | struct serv_parm *sp, uint32_t class, int flogi) |
66 | { | 66 | { |
67 | volatile struct serv_parm *hsp = &vport->fc_sparam; | 67 | volatile struct serv_parm *hsp = &vport->fc_sparam; |
68 | uint16_t hsp_value, ssp_value = 0; | 68 | uint16_t hsp_value, ssp_value = 0; |
@@ -75,49 +75,56 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
75 | * correcting the byte values. | 75 | * correcting the byte values. |
76 | */ | 76 | */ |
77 | if (sp->cls1.classValid) { | 77 | if (sp->cls1.classValid) { |
78 | hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) | | 78 | if (!flogi) { |
79 | hsp->cls1.rcvDataSizeLsb; | 79 | hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) | |
80 | ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | | 80 | hsp->cls1.rcvDataSizeLsb); |
81 | sp->cls1.rcvDataSizeLsb; | 81 | ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) | |
82 | if (!ssp_value) | 82 | sp->cls1.rcvDataSizeLsb); |
83 | goto bad_service_param; | 83 | if (!ssp_value) |
84 | if (ssp_value > hsp_value) { | 84 | goto bad_service_param; |
85 | sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; | 85 | if (ssp_value > hsp_value) { |
86 | sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; | 86 | sp->cls1.rcvDataSizeLsb = |
87 | hsp->cls1.rcvDataSizeLsb; | ||
88 | sp->cls1.rcvDataSizeMsb = | ||
89 | hsp->cls1.rcvDataSizeMsb; | ||
90 | } | ||
87 | } | 91 | } |
88 | } else if (class == CLASS1) { | 92 | } else if (class == CLASS1) |
89 | goto bad_service_param; | 93 | goto bad_service_param; |
90 | } | ||
91 | |||
92 | if (sp->cls2.classValid) { | 94 | if (sp->cls2.classValid) { |
93 | hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) | | 95 | if (!flogi) { |
94 | hsp->cls2.rcvDataSizeLsb; | 96 | hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) | |
95 | ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | | 97 | hsp->cls2.rcvDataSizeLsb); |
96 | sp->cls2.rcvDataSizeLsb; | 98 | ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) | |
97 | if (!ssp_value) | 99 | sp->cls2.rcvDataSizeLsb); |
98 | goto bad_service_param; | 100 | if (!ssp_value) |
99 | if (ssp_value > hsp_value) { | 101 | goto bad_service_param; |
100 | sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; | 102 | if (ssp_value > hsp_value) { |
101 | sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; | 103 | sp->cls2.rcvDataSizeLsb = |
104 | hsp->cls2.rcvDataSizeLsb; | ||
105 | sp->cls2.rcvDataSizeMsb = | ||
106 | hsp->cls2.rcvDataSizeMsb; | ||
107 | } | ||
102 | } | 108 | } |
103 | } else if (class == CLASS2) { | 109 | } else if (class == CLASS2) |
104 | goto bad_service_param; | 110 | goto bad_service_param; |
105 | } | ||
106 | |||
107 | if (sp->cls3.classValid) { | 111 | if (sp->cls3.classValid) { |
108 | hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) | | 112 | if (!flogi) { |
109 | hsp->cls3.rcvDataSizeLsb; | 113 | hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) | |
110 | ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | | 114 | hsp->cls3.rcvDataSizeLsb); |
111 | sp->cls3.rcvDataSizeLsb; | 115 | ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) | |
112 | if (!ssp_value) | 116 | sp->cls3.rcvDataSizeLsb); |
113 | goto bad_service_param; | 117 | if (!ssp_value) |
114 | if (ssp_value > hsp_value) { | 118 | goto bad_service_param; |
115 | sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; | 119 | if (ssp_value > hsp_value) { |
116 | sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; | 120 | sp->cls3.rcvDataSizeLsb = |
121 | hsp->cls3.rcvDataSizeLsb; | ||
122 | sp->cls3.rcvDataSizeMsb = | ||
123 | hsp->cls3.rcvDataSizeMsb; | ||
124 | } | ||
117 | } | 125 | } |
118 | } else if (class == CLASS3) { | 126 | } else if (class == CLASS3) |
119 | goto bad_service_param; | 127 | goto bad_service_param; |
120 | } | ||
121 | 128 | ||
122 | /* | 129 | /* |
123 | * Preserve the upper four bits of the MSB from the PLOGI response. | 130 | * Preserve the upper four bits of the MSB from the PLOGI response. |
@@ -247,7 +254,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
247 | int rc; | 254 | int rc; |
248 | 255 | ||
249 | memset(&stat, 0, sizeof (struct ls_rjt)); | 256 | memset(&stat, 0, sizeof (struct ls_rjt)); |
250 | if (vport->port_state <= LPFC_FLOGI) { | 257 | if (vport->port_state <= LPFC_FDISC) { |
251 | /* Before responding to PLOGI, check for pt2pt mode. | 258 | /* Before responding to PLOGI, check for pt2pt mode. |
252 | * If we are pt2pt, with an outstanding FLOGI, abort | 259 | * If we are pt2pt, with an outstanding FLOGI, abort |
253 | * the FLOGI and resend it first. | 260 | * the FLOGI and resend it first. |
@@ -295,7 +302,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
295 | NULL); | 302 | NULL); |
296 | return 0; | 303 | return 0; |
297 | } | 304 | } |
298 | if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) { | 305 | if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) { |
299 | /* Reject this request because invalid parameters */ | 306 | /* Reject this request because invalid parameters */ |
300 | stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; | 307 | stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; |
301 | stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; | 308 | stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; |
@@ -831,7 +838,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, | |||
831 | "0142 PLOGI RSP: Invalid WWN.\n"); | 838 | "0142 PLOGI RSP: Invalid WWN.\n"); |
832 | goto out; | 839 | goto out; |
833 | } | 840 | } |
834 | if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3)) | 841 | if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0)) |
835 | goto out; | 842 | goto out; |
836 | /* PLOGI chkparm OK */ | 843 | /* PLOGI chkparm OK */ |
837 | lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, | 844 | lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a246410ce9df..7f21b47db791 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -626,6 +626,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, | |||
626 | &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { | 626 | &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { |
627 | if (psb->cur_iocbq.sli4_xritag == xri) { | 627 | if (psb->cur_iocbq.sli4_xritag == xri) { |
628 | list_del(&psb->list); | 628 | list_del(&psb->list); |
629 | psb->exch_busy = 0; | ||
629 | psb->status = IOSTAT_SUCCESS; | 630 | psb->status = IOSTAT_SUCCESS; |
630 | spin_unlock_irqrestore( | 631 | spin_unlock_irqrestore( |
631 | &phba->sli4_hba.abts_scsi_buf_list_lock, | 632 | &phba->sli4_hba.abts_scsi_buf_list_lock, |
@@ -688,11 +689,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) | |||
688 | list); | 689 | list); |
689 | if (status) { | 690 | if (status) { |
690 | /* Put this back on the abort scsi list */ | 691 | /* Put this back on the abort scsi list */ |
691 | psb->status = IOSTAT_LOCAL_REJECT; | 692 | psb->exch_busy = 1; |
692 | psb->result = IOERR_ABORT_REQUESTED; | ||
693 | rc++; | 693 | rc++; |
694 | } else | 694 | } else { |
695 | psb->exch_busy = 0; | ||
695 | psb->status = IOSTAT_SUCCESS; | 696 | psb->status = IOSTAT_SUCCESS; |
697 | } | ||
696 | /* Put it back into the SCSI buffer list */ | 698 | /* Put it back into the SCSI buffer list */ |
697 | lpfc_release_scsi_buf_s4(phba, psb); | 699 | lpfc_release_scsi_buf_s4(phba, psb); |
698 | } | 700 | } |
@@ -796,19 +798,17 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) | |||
796 | */ | 798 | */ |
797 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); | 799 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); |
798 | sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); | 800 | sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); |
799 | bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd)); | ||
800 | bf_set(lpfc_sli4_sge_last, sgl, 0); | 801 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
801 | sgl->word2 = cpu_to_le32(sgl->word2); | 802 | sgl->word2 = cpu_to_le32(sgl->word2); |
802 | sgl->word3 = cpu_to_le32(sgl->word3); | 803 | sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); |
803 | sgl++; | 804 | sgl++; |
804 | 805 | ||
805 | /* Setup the physical region for the FCP RSP */ | 806 | /* Setup the physical region for the FCP RSP */ |
806 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); | 807 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); |
807 | sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); | 808 | sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); |
808 | bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp)); | ||
809 | bf_set(lpfc_sli4_sge_last, sgl, 1); | 809 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
810 | sgl->word2 = cpu_to_le32(sgl->word2); | 810 | sgl->word2 = cpu_to_le32(sgl->word2); |
811 | sgl->word3 = cpu_to_le32(sgl->word3); | 811 | sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); |
812 | 812 | ||
813 | /* | 813 | /* |
814 | * Since the IOCB for the FCP I/O is built into this | 814 | * Since the IOCB for the FCP I/O is built into this |
@@ -839,11 +839,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) | |||
839 | psb->cur_iocbq.sli4_xritag); | 839 | psb->cur_iocbq.sli4_xritag); |
840 | if (status) { | 840 | if (status) { |
841 | /* Put this back on the abort scsi list */ | 841 | /* Put this back on the abort scsi list */ |
842 | psb->status = IOSTAT_LOCAL_REJECT; | 842 | psb->exch_busy = 1; |
843 | psb->result = IOERR_ABORT_REQUESTED; | ||
844 | rc++; | 843 | rc++; |
845 | } else | 844 | } else { |
845 | psb->exch_busy = 0; | ||
846 | psb->status = IOSTAT_SUCCESS; | 846 | psb->status = IOSTAT_SUCCESS; |
847 | } | ||
847 | /* Put it back into the SCSI buffer list */ | 848 | /* Put it back into the SCSI buffer list */ |
848 | lpfc_release_scsi_buf_s4(phba, psb); | 849 | lpfc_release_scsi_buf_s4(phba, psb); |
849 | break; | 850 | break; |
@@ -857,11 +858,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) | |||
857 | list); | 858 | list); |
858 | if (status) { | 859 | if (status) { |
859 | /* Put this back on the abort scsi list */ | 860 | /* Put this back on the abort scsi list */ |
860 | psb->status = IOSTAT_LOCAL_REJECT; | 861 | psb->exch_busy = 1; |
861 | psb->result = IOERR_ABORT_REQUESTED; | ||
862 | rc++; | 862 | rc++; |
863 | } else | 863 | } else { |
864 | psb->exch_busy = 0; | ||
864 | psb->status = IOSTAT_SUCCESS; | 865 | psb->status = IOSTAT_SUCCESS; |
866 | } | ||
865 | /* Put it back into the SCSI buffer list */ | 867 | /* Put it back into the SCSI buffer list */ |
866 | lpfc_release_scsi_buf_s4(phba, psb); | 868 | lpfc_release_scsi_buf_s4(phba, psb); |
867 | } | 869 | } |
@@ -951,8 +953,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | |||
951 | { | 953 | { |
952 | unsigned long iflag = 0; | 954 | unsigned long iflag = 0; |
953 | 955 | ||
954 | if (psb->status == IOSTAT_LOCAL_REJECT | 956 | if (psb->exch_busy) { |
955 | && psb->result == IOERR_ABORT_REQUESTED) { | ||
956 | spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, | 957 | spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, |
957 | iflag); | 958 | iflag); |
958 | psb->pCmd = NULL; | 959 | psb->pCmd = NULL; |
@@ -1869,7 +1870,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
1869 | scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { | 1870 | scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { |
1870 | physaddr = sg_dma_address(sgel); | 1871 | physaddr = sg_dma_address(sgel); |
1871 | dma_len = sg_dma_len(sgel); | 1872 | dma_len = sg_dma_len(sgel); |
1872 | bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel)); | ||
1873 | sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); | 1873 | sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); |
1874 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); | 1874 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); |
1875 | if ((num_bde + 1) == nseg) | 1875 | if ((num_bde + 1) == nseg) |
@@ -1878,7 +1878,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
1878 | bf_set(lpfc_sli4_sge_last, sgl, 0); | 1878 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
1879 | bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); | 1879 | bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); |
1880 | sgl->word2 = cpu_to_le32(sgl->word2); | 1880 | sgl->word2 = cpu_to_le32(sgl->word2); |
1881 | sgl->word3 = cpu_to_le32(sgl->word3); | 1881 | sgl->sge_len = cpu_to_le32(dma_len); |
1882 | dma_offset += dma_len; | 1882 | dma_offset += dma_len; |
1883 | sgl++; | 1883 | sgl++; |
1884 | } | 1884 | } |
@@ -2221,6 +2221,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
2221 | 2221 | ||
2222 | lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; | 2222 | lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; |
2223 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; | 2223 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; |
2224 | /* pick up SLI4 exhange busy status from HBA */ | ||
2225 | lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; | ||
2226 | |||
2224 | if (pnode && NLP_CHK_NODE_ACT(pnode)) | 2227 | if (pnode && NLP_CHK_NODE_ACT(pnode)) |
2225 | atomic_dec(&pnode->cmd_pending); | 2228 | atomic_dec(&pnode->cmd_pending); |
2226 | 2229 | ||
@@ -2637,6 +2640,7 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | |||
2637 | } | 2640 | } |
2638 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; | 2641 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; |
2639 | phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; | 2642 | phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; |
2643 | phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; | ||
2640 | return 0; | 2644 | return 0; |
2641 | } | 2645 | } |
2642 | 2646 | ||
@@ -2695,6 +2699,13 @@ lpfc_info(struct Scsi_Host *host) | |||
2695 | " port %s", | 2699 | " port %s", |
2696 | phba->Port); | 2700 | phba->Port); |
2697 | } | 2701 | } |
2702 | len = strlen(lpfcinfobuf); | ||
2703 | if (phba->sli4_hba.link_state.logical_speed) { | ||
2704 | snprintf(lpfcinfobuf + len, | ||
2705 | 384-len, | ||
2706 | " Logical Link Speed: %d Mbps", | ||
2707 | phba->sli4_hba.link_state.logical_speed * 10); | ||
2708 | } | ||
2698 | } | 2709 | } |
2699 | return lpfcinfobuf; | 2710 | return lpfcinfobuf; |
2700 | } | 2711 | } |
@@ -2990,6 +3001,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
2990 | 3001 | ||
2991 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ | 3002 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ |
2992 | abtsiocb->fcp_wqidx = iocb->fcp_wqidx; | 3003 | abtsiocb->fcp_wqidx = iocb->fcp_wqidx; |
3004 | abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; | ||
2993 | 3005 | ||
2994 | if (lpfc_is_link_up(phba)) | 3006 | if (lpfc_is_link_up(phba)) |
2995 | icmd->ulpCommand = CMD_ABORT_XRI_CN; | 3007 | icmd->ulpCommand = CMD_ABORT_XRI_CN; |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 65dfc8bd5b49..5932273870a5 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h | |||
@@ -118,6 +118,7 @@ struct lpfc_scsi_buf { | |||
118 | 118 | ||
119 | uint32_t timeout; | 119 | uint32_t timeout; |
120 | 120 | ||
121 | uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ | ||
121 | uint16_t status; /* From IOCB Word 7- ulpStatus */ | 122 | uint16_t status; /* From IOCB Word 7- ulpStatus */ |
122 | uint32_t result; /* From IOCB Word 4. */ | 123 | uint32_t result; /* From IOCB Word 4. */ |
123 | 124 | ||
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 589549b2bf0e..35e3b96d4e07 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -580,10 +580,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | |||
580 | else | 580 | else |
581 | sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); | 581 | sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); |
582 | if (sglq) { | 582 | if (sglq) { |
583 | if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED | 583 | if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) { |
584 | && ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) | ||
585 | && (iocbq->iocb.un.ulpWord[4] | ||
586 | == IOERR_ABORT_REQUESTED))) { | ||
587 | spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, | 584 | spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, |
588 | iflag); | 585 | iflag); |
589 | list_add(&sglq->list, | 586 | list_add(&sglq->list, |
@@ -764,10 +761,6 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) | |||
764 | case DSSCMD_IWRITE64_CX: | 761 | case DSSCMD_IWRITE64_CX: |
765 | case DSSCMD_IREAD64_CR: | 762 | case DSSCMD_IREAD64_CR: |
766 | case DSSCMD_IREAD64_CX: | 763 | case DSSCMD_IREAD64_CX: |
767 | case DSSCMD_INVALIDATE_DEK: | ||
768 | case DSSCMD_SET_KEK: | ||
769 | case DSSCMD_GET_KEK_ID: | ||
770 | case DSSCMD_GEN_XFER: | ||
771 | type = LPFC_SOL_IOCB; | 764 | type = LPFC_SOL_IOCB; |
772 | break; | 765 | break; |
773 | case CMD_ABORT_XRI_CN: | 766 | case CMD_ABORT_XRI_CN: |
@@ -1717,6 +1710,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1717 | struct lpfc_dmabuf *mp; | 1710 | struct lpfc_dmabuf *mp; |
1718 | uint16_t rpi, vpi; | 1711 | uint16_t rpi, vpi; |
1719 | int rc; | 1712 | int rc; |
1713 | struct lpfc_vport *vport = pmb->vport; | ||
1720 | 1714 | ||
1721 | mp = (struct lpfc_dmabuf *) (pmb->context1); | 1715 | mp = (struct lpfc_dmabuf *) (pmb->context1); |
1722 | 1716 | ||
@@ -1745,6 +1739,18 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1745 | return; | 1739 | return; |
1746 | } | 1740 | } |
1747 | 1741 | ||
1742 | /* Unreg VPI, if the REG_VPI succeed after VLink failure */ | ||
1743 | if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && | ||
1744 | !(phba->pport->load_flag & FC_UNLOADING) && | ||
1745 | !pmb->u.mb.mbxStatus) { | ||
1746 | lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb); | ||
1747 | pmb->vport = vport; | ||
1748 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
1749 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | ||
1750 | if (rc != MBX_NOT_FINISHED) | ||
1751 | return; | ||
1752 | } | ||
1753 | |||
1748 | if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) | 1754 | if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) |
1749 | lpfc_sli4_mbox_cmd_free(phba, pmb); | 1755 | lpfc_sli4_mbox_cmd_free(phba, pmb); |
1750 | else | 1756 | else |
@@ -2228,9 +2234,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
2228 | * All other are passed to the completion callback. | 2234 | * All other are passed to the completion callback. |
2229 | */ | 2235 | */ |
2230 | if (pring->ringno == LPFC_ELS_RING) { | 2236 | if (pring->ringno == LPFC_ELS_RING) { |
2231 | if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) { | 2237 | if ((phba->sli_rev < LPFC_SLI_REV4) && |
2238 | (cmdiocbp->iocb_flag & | ||
2239 | LPFC_DRIVER_ABORTED)) { | ||
2240 | spin_lock_irqsave(&phba->hbalock, | ||
2241 | iflag); | ||
2232 | cmdiocbp->iocb_flag &= | 2242 | cmdiocbp->iocb_flag &= |
2233 | ~LPFC_DRIVER_ABORTED; | 2243 | ~LPFC_DRIVER_ABORTED; |
2244 | spin_unlock_irqrestore(&phba->hbalock, | ||
2245 | iflag); | ||
2234 | saveq->iocb.ulpStatus = | 2246 | saveq->iocb.ulpStatus = |
2235 | IOSTAT_LOCAL_REJECT; | 2247 | IOSTAT_LOCAL_REJECT; |
2236 | saveq->iocb.un.ulpWord[4] = | 2248 | saveq->iocb.un.ulpWord[4] = |
@@ -2240,7 +2252,47 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
2240 | * of DMAing payload, so don't free data | 2252 | * of DMAing payload, so don't free data |
2241 | * buffer till after a hbeat. | 2253 | * buffer till after a hbeat. |
2242 | */ | 2254 | */ |
2255 | spin_lock_irqsave(&phba->hbalock, | ||
2256 | iflag); | ||
2243 | saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; | 2257 | saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; |
2258 | spin_unlock_irqrestore(&phba->hbalock, | ||
2259 | iflag); | ||
2260 | } | ||
2261 | if ((phba->sli_rev == LPFC_SLI_REV4) && | ||
2262 | (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) { | ||
2263 | /* Set cmdiocb flag for the exchange | ||
2264 | * busy so sgl (xri) will not be | ||
2265 | * released until the abort xri is | ||
2266 | * received from hba, clear the | ||
2267 | * LPFC_DRIVER_ABORTED bit in case | ||
2268 | * it was driver initiated abort. | ||
2269 | */ | ||
2270 | spin_lock_irqsave(&phba->hbalock, | ||
2271 | iflag); | ||
2272 | cmdiocbp->iocb_flag &= | ||
2273 | ~LPFC_DRIVER_ABORTED; | ||
2274 | cmdiocbp->iocb_flag |= | ||
2275 | LPFC_EXCHANGE_BUSY; | ||
2276 | spin_unlock_irqrestore(&phba->hbalock, | ||
2277 | iflag); | ||
2278 | cmdiocbp->iocb.ulpStatus = | ||
2279 | IOSTAT_LOCAL_REJECT; | ||
2280 | cmdiocbp->iocb.un.ulpWord[4] = | ||
2281 | IOERR_ABORT_REQUESTED; | ||
2282 | /* | ||
2283 | * For SLI4, irsiocb contains NO_XRI | ||
2284 | * in sli_xritag, it shall not affect | ||
2285 | * releasing sgl (xri) process. | ||
2286 | */ | ||
2287 | saveq->iocb.ulpStatus = | ||
2288 | IOSTAT_LOCAL_REJECT; | ||
2289 | saveq->iocb.un.ulpWord[4] = | ||
2290 | IOERR_SLI_ABORTED; | ||
2291 | spin_lock_irqsave(&phba->hbalock, | ||
2292 | iflag); | ||
2293 | saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; | ||
2294 | spin_unlock_irqrestore(&phba->hbalock, | ||
2295 | iflag); | ||
2244 | } | 2296 | } |
2245 | } | 2297 | } |
2246 | (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); | 2298 | (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); |
@@ -5687,19 +5739,19 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
5687 | 5739 | ||
5688 | for (i = 0; i < numBdes; i++) { | 5740 | for (i = 0; i < numBdes; i++) { |
5689 | /* Should already be byte swapped. */ | 5741 | /* Should already be byte swapped. */ |
5690 | sgl->addr_hi = bpl->addrHigh; | 5742 | sgl->addr_hi = bpl->addrHigh; |
5691 | sgl->addr_lo = bpl->addrLow; | 5743 | sgl->addr_lo = bpl->addrLow; |
5692 | /* swap the size field back to the cpu so we | 5744 | |
5693 | * can assign it to the sgl. | ||
5694 | */ | ||
5695 | bde.tus.w = le32_to_cpu(bpl->tus.w); | ||
5696 | bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize); | ||
5697 | if ((i+1) == numBdes) | 5745 | if ((i+1) == numBdes) |
5698 | bf_set(lpfc_sli4_sge_last, sgl, 1); | 5746 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
5699 | else | 5747 | else |
5700 | bf_set(lpfc_sli4_sge_last, sgl, 0); | 5748 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
5701 | sgl->word2 = cpu_to_le32(sgl->word2); | 5749 | sgl->word2 = cpu_to_le32(sgl->word2); |
5702 | sgl->word3 = cpu_to_le32(sgl->word3); | 5750 | /* swap the size field back to the cpu so we |
5751 | * can assign it to the sgl. | ||
5752 | */ | ||
5753 | bde.tus.w = le32_to_cpu(bpl->tus.w); | ||
5754 | sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); | ||
5703 | bpl++; | 5755 | bpl++; |
5704 | sgl++; | 5756 | sgl++; |
5705 | } | 5757 | } |
@@ -5712,11 +5764,10 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
5712 | cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); | 5764 | cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); |
5713 | sgl->addr_lo = | 5765 | sgl->addr_lo = |
5714 | cpu_to_le32(icmd->un.genreq64.bdl.addrLow); | 5766 | cpu_to_le32(icmd->un.genreq64.bdl.addrLow); |
5715 | bf_set(lpfc_sli4_sge_len, sgl, | ||
5716 | icmd->un.genreq64.bdl.bdeSize); | ||
5717 | bf_set(lpfc_sli4_sge_last, sgl, 1); | 5767 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
5718 | sgl->word2 = cpu_to_le32(sgl->word2); | 5768 | sgl->word2 = cpu_to_le32(sgl->word2); |
5719 | sgl->word3 = cpu_to_le32(sgl->word3); | 5769 | sgl->sge_len = |
5770 | cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); | ||
5720 | } | 5771 | } |
5721 | return sglq->sli4_xritag; | 5772 | return sglq->sli4_xritag; |
5722 | } | 5773 | } |
@@ -5987,12 +6038,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
5987 | else | 6038 | else |
5988 | bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); | 6039 | bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); |
5989 | bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); | 6040 | bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); |
5990 | abort_tag = iocbq->iocb.un.acxri.abortIoTag; | ||
5991 | wqe->words[5] = 0; | 6041 | wqe->words[5] = 0; |
5992 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, | 6042 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, |
5993 | ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); | 6043 | ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); |
5994 | abort_tag = iocbq->iocb.un.acxri.abortIoTag; | 6044 | abort_tag = iocbq->iocb.un.acxri.abortIoTag; |
5995 | wqe->generic.abort_tag = abort_tag; | ||
5996 | /* | 6045 | /* |
5997 | * The abort handler will send us CMD_ABORT_XRI_CN or | 6046 | * The abort handler will send us CMD_ABORT_XRI_CN or |
5998 | * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX | 6047 | * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX |
@@ -6121,15 +6170,15 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, | |||
6121 | if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) | 6170 | if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) |
6122 | return IOCB_ERROR; | 6171 | return IOCB_ERROR; |
6123 | 6172 | ||
6124 | if (piocb->iocb_flag & LPFC_IO_FCP) { | 6173 | if ((piocb->iocb_flag & LPFC_IO_FCP) || |
6174 | (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { | ||
6125 | /* | 6175 | /* |
6126 | * For FCP command IOCB, get a new WQ index to distribute | 6176 | * For FCP command IOCB, get a new WQ index to distribute |
6127 | * WQE across the WQsr. On the other hand, for abort IOCB, | 6177 | * WQE across the WQsr. On the other hand, for abort IOCB, |
6128 | * it carries the same WQ index to the original command | 6178 | * it carries the same WQ index to the original command |
6129 | * IOCB. | 6179 | * IOCB. |
6130 | */ | 6180 | */ |
6131 | if ((piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && | 6181 | if (piocb->iocb_flag & LPFC_IO_FCP) |
6132 | (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) | ||
6133 | piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); | 6182 | piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); |
6134 | if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], | 6183 | if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], |
6135 | &wqe)) | 6184 | &wqe)) |
@@ -7004,7 +7053,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
7004 | abort_iocb->iocb.ulpContext != abort_context || | 7053 | abort_iocb->iocb.ulpContext != abort_context || |
7005 | (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) | 7054 | (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) |
7006 | spin_unlock_irq(&phba->hbalock); | 7055 | spin_unlock_irq(&phba->hbalock); |
7007 | else { | 7056 | else if (phba->sli_rev < LPFC_SLI_REV4) { |
7057 | /* | ||
7058 | * leave the SLI4 aborted command on the txcmplq | ||
7059 | * list and the command complete WCQE's XB bit | ||
7060 | * will tell whether the SGL (XRI) can be released | ||
7061 | * immediately or to the aborted SGL list for the | ||
7062 | * following abort XRI from the HBA. | ||
7063 | */ | ||
7008 | list_del_init(&abort_iocb->list); | 7064 | list_del_init(&abort_iocb->list); |
7009 | pring->txcmplq_cnt--; | 7065 | pring->txcmplq_cnt--; |
7010 | spin_unlock_irq(&phba->hbalock); | 7066 | spin_unlock_irq(&phba->hbalock); |
@@ -7013,11 +7069,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
7013 | * payload, so don't free data buffer till after | 7069 | * payload, so don't free data buffer till after |
7014 | * a hbeat. | 7070 | * a hbeat. |
7015 | */ | 7071 | */ |
7072 | spin_lock_irq(&phba->hbalock); | ||
7016 | abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; | 7073 | abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; |
7017 | |||
7018 | abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; | 7074 | abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; |
7075 | spin_unlock_irq(&phba->hbalock); | ||
7076 | |||
7019 | abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; | 7077 | abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; |
7020 | abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; | 7078 | abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED; |
7021 | (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); | 7079 | (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); |
7022 | } | 7080 | } |
7023 | } | 7081 | } |
@@ -7106,7 +7164,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
7106 | return 0; | 7164 | return 0; |
7107 | 7165 | ||
7108 | /* This signals the response to set the correct status | 7166 | /* This signals the response to set the correct status |
7109 | * before calling the completion handler. | 7167 | * before calling the completion handler |
7110 | */ | 7168 | */ |
7111 | cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; | 7169 | cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; |
7112 | 7170 | ||
@@ -7124,6 +7182,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
7124 | 7182 | ||
7125 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ | 7183 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ |
7126 | abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; | 7184 | abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; |
7185 | if (cmdiocb->iocb_flag & LPFC_IO_FCP) | ||
7186 | abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; | ||
7127 | 7187 | ||
7128 | if (phba->link_state >= LPFC_LINK_UP) | 7188 | if (phba->link_state >= LPFC_LINK_UP) |
7129 | iabt->ulpCommand = CMD_ABORT_XRI_CN; | 7189 | iabt->ulpCommand = CMD_ABORT_XRI_CN; |
@@ -7330,6 +7390,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | |||
7330 | 7390 | ||
7331 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ | 7391 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ |
7332 | abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; | 7392 | abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; |
7393 | if (iocbq->iocb_flag & LPFC_IO_FCP) | ||
7394 | abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; | ||
7333 | 7395 | ||
7334 | if (lpfc_is_link_up(phba)) | 7396 | if (lpfc_is_link_up(phba)) |
7335 | abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; | 7397 | abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; |
@@ -8359,11 +8421,24 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) | |||
8359 | } | 8421 | } |
8360 | } | 8422 | } |
8361 | 8423 | ||
8424 | /** | ||
8425 | * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn | ||
8426 | * @phba: pointer to lpfc hba data structure | ||
8427 | * @pIocbIn: pointer to the rspiocbq | ||
8428 | * @pIocbOut: pointer to the cmdiocbq | ||
8429 | * @wcqe: pointer to the complete wcqe | ||
8430 | * | ||
8431 | * This routine transfers the fields of a command iocbq to a response iocbq | ||
8432 | * by copying all the IOCB fields from command iocbq and transferring the | ||
8433 | * completion status information from the complete wcqe. | ||
8434 | **/ | ||
8362 | static void | 8435 | static void |
8363 | lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, | 8436 | lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, |
8437 | struct lpfc_iocbq *pIocbIn, | ||
8364 | struct lpfc_iocbq *pIocbOut, | 8438 | struct lpfc_iocbq *pIocbOut, |
8365 | struct lpfc_wcqe_complete *wcqe) | 8439 | struct lpfc_wcqe_complete *wcqe) |
8366 | { | 8440 | { |
8441 | unsigned long iflags; | ||
8367 | size_t offset = offsetof(struct lpfc_iocbq, iocb); | 8442 | size_t offset = offsetof(struct lpfc_iocbq, iocb); |
8368 | 8443 | ||
8369 | memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, | 8444 | memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, |
@@ -8377,8 +8452,17 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, | |||
8377 | wcqe->total_data_placed; | 8452 | wcqe->total_data_placed; |
8378 | else | 8453 | else |
8379 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; | 8454 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; |
8380 | else | 8455 | else { |
8381 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; | 8456 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; |
8457 | pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; | ||
8458 | } | ||
8459 | |||
8460 | /* Pick up HBA exchange busy condition */ | ||
8461 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) { | ||
8462 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
8463 | pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; | ||
8464 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
8465 | } | ||
8382 | } | 8466 | } |
8383 | 8467 | ||
8384 | /** | 8468 | /** |
@@ -8419,7 +8503,7 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, | |||
8419 | } | 8503 | } |
8420 | 8504 | ||
8421 | /* Fake the irspiocbq and copy necessary response information */ | 8505 | /* Fake the irspiocbq and copy necessary response information */ |
8422 | lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe); | 8506 | lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); |
8423 | 8507 | ||
8424 | return irspiocbq; | 8508 | return irspiocbq; |
8425 | } | 8509 | } |
@@ -8849,8 +8933,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) | |||
8849 | int ecount = 0; | 8933 | int ecount = 0; |
8850 | uint16_t cqid; | 8934 | uint16_t cqid; |
8851 | 8935 | ||
8852 | if (bf_get(lpfc_eqe_major_code, eqe) != 0 || | 8936 | if (bf_get(lpfc_eqe_major_code, eqe) != 0) { |
8853 | bf_get(lpfc_eqe_minor_code, eqe) != 0) { | ||
8854 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 8937 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
8855 | "0359 Not a valid slow-path completion " | 8938 | "0359 Not a valid slow-path completion " |
8856 | "event: majorcode=x%x, minorcode=x%x\n", | 8939 | "event: majorcode=x%x, minorcode=x%x\n", |
@@ -8976,7 +9059,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, | |||
8976 | } | 9059 | } |
8977 | 9060 | ||
8978 | /* Fake the irspiocb and copy necessary response information */ | 9061 | /* Fake the irspiocb and copy necessary response information */ |
8979 | lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe); | 9062 | lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); |
8980 | 9063 | ||
8981 | /* Pass the cmd_iocb and the rsp state to the upper layer */ | 9064 | /* Pass the cmd_iocb and the rsp state to the upper layer */ |
8982 | (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); | 9065 | (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); |
@@ -9082,8 +9165,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, | |||
9082 | uint16_t cqid; | 9165 | uint16_t cqid; |
9083 | int ecount = 0; | 9166 | int ecount = 0; |
9084 | 9167 | ||
9085 | if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) || | 9168 | if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) { |
9086 | unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) { | ||
9087 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 9169 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
9088 | "0366 Not a valid fast-path completion " | 9170 | "0366 Not a valid fast-path completion " |
9089 | "event: majorcode=x%x, minorcode=x%x\n", | 9171 | "event: majorcode=x%x, minorcode=x%x\n", |
@@ -11871,12 +11953,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) | |||
11871 | { | 11953 | { |
11872 | int rc = 0, error; | 11954 | int rc = 0, error; |
11873 | LPFC_MBOXQ_t *mboxq; | 11955 | LPFC_MBOXQ_t *mboxq; |
11874 | void *virt_addr; | ||
11875 | dma_addr_t phys_addr; | ||
11876 | uint8_t *bytep; | ||
11877 | struct lpfc_mbx_sge sge; | ||
11878 | uint32_t alloc_len, req_len; | ||
11879 | struct lpfc_mbx_read_fcf_tbl *read_fcf; | ||
11880 | 11956 | ||
11881 | phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; | 11957 | phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; |
11882 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 11958 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
@@ -11887,43 +11963,19 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) | |||
11887 | error = -ENOMEM; | 11963 | error = -ENOMEM; |
11888 | goto fail_fcfscan; | 11964 | goto fail_fcfscan; |
11889 | } | 11965 | } |
11890 | 11966 | /* Construct the read FCF record mailbox command */ | |
11891 | req_len = sizeof(struct fcf_record) + | 11967 | rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index); |
11892 | sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); | 11968 | if (rc) { |
11893 | 11969 | error = -EINVAL; | |
11894 | /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ | ||
11895 | alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
11896 | LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, | ||
11897 | LPFC_SLI4_MBX_NEMBED); | ||
11898 | |||
11899 | if (alloc_len < req_len) { | ||
11900 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
11901 | "0291 Allocated DMA memory size (x%x) is " | ||
11902 | "less than the requested DMA memory " | ||
11903 | "size (x%x)\n", alloc_len, req_len); | ||
11904 | error = -ENOMEM; | ||
11905 | goto fail_fcfscan; | 11970 | goto fail_fcfscan; |
11906 | } | 11971 | } |
11907 | 11972 | /* Issue the mailbox command asynchronously */ | |
11908 | /* Get the first SGE entry from the non-embedded DMA memory. This | ||
11909 | * routine only uses a single SGE. | ||
11910 | */ | ||
11911 | lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); | ||
11912 | phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); | ||
11913 | virt_addr = mboxq->sge_array->addr[0]; | ||
11914 | read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; | ||
11915 | |||
11916 | /* Set up command fields */ | ||
11917 | bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); | ||
11918 | /* Perform necessary endian conversion */ | ||
11919 | bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); | ||
11920 | lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); | ||
11921 | mboxq->vport = phba->pport; | 11973 | mboxq->vport = phba->pport; |
11922 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; | 11974 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; |
11923 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); | 11975 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
11924 | if (rc == MBX_NOT_FINISHED) { | 11976 | if (rc == MBX_NOT_FINISHED) |
11925 | error = -EIO; | 11977 | error = -EIO; |
11926 | } else { | 11978 | else { |
11927 | spin_lock_irq(&phba->hbalock); | 11979 | spin_lock_irq(&phba->hbalock); |
11928 | phba->hba_flag |= FCF_DISC_INPROGRESS; | 11980 | phba->hba_flag |= FCF_DISC_INPROGRESS; |
11929 | spin_unlock_irq(&phba->hbalock); | 11981 | spin_unlock_irq(&phba->hbalock); |
@@ -11942,6 +11994,90 @@ fail_fcfscan: | |||
11942 | } | 11994 | } |
11943 | 11995 | ||
11944 | /** | 11996 | /** |
11997 | * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table | ||
11998 | * @phba: pointer to lpfc hba data structure. | ||
11999 | * | ||
12000 | * This routine is the completion routine for the rediscover FCF table mailbox | ||
12001 | * command. If the mailbox command returned failure, it will try to stop the | ||
12002 | * FCF rediscover wait timer. | ||
12003 | **/ | ||
12004 | void | ||
12005 | lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) | ||
12006 | { | ||
12007 | struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; | ||
12008 | uint32_t shdr_status, shdr_add_status; | ||
12009 | |||
12010 | redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; | ||
12011 | |||
12012 | shdr_status = bf_get(lpfc_mbox_hdr_status, | ||
12013 | &redisc_fcf->header.cfg_shdr.response); | ||
12014 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, | ||
12015 | &redisc_fcf->header.cfg_shdr.response); | ||
12016 | if (shdr_status || shdr_add_status) { | ||
12017 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
12018 | "2746 Requesting for FCF rediscovery failed " | ||
12019 | "status x%x add_status x%x\n", | ||
12020 | shdr_status, shdr_add_status); | ||
12021 | /* | ||
12022 | * Request failed, last resort to re-try current | ||
12023 | * registered FCF entry | ||
12024 | */ | ||
12025 | lpfc_retry_pport_discovery(phba); | ||
12026 | } else | ||
12027 | /* | ||
12028 | * Start FCF rediscovery wait timer for pending FCF | ||
12029 | * before rescan FCF record table. | ||
12030 | */ | ||
12031 | lpfc_fcf_redisc_wait_start_timer(phba); | ||
12032 | |||
12033 | mempool_free(mbox, phba->mbox_mem_pool); | ||
12034 | } | ||
12035 | |||
12036 | /** | ||
12037 | * lpfc_sli4_redisc_all_fcf - Request to rediscover entire FCF table by port. | ||
12038 | * @phba: pointer to lpfc hba data structure. | ||
12039 | * | ||
12040 | * This routine is invoked to request for rediscovery of the entire FCF table | ||
12041 | * by the port. | ||
12042 | **/ | ||
12043 | int | ||
12044 | lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) | ||
12045 | { | ||
12046 | LPFC_MBOXQ_t *mbox; | ||
12047 | struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; | ||
12048 | int rc, length; | ||
12049 | |||
12050 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
12051 | if (!mbox) { | ||
12052 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
12053 | "2745 Failed to allocate mbox for " | ||
12054 | "requesting FCF rediscover.\n"); | ||
12055 | return -ENOMEM; | ||
12056 | } | ||
12057 | |||
12058 | length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - | ||
12059 | sizeof(struct lpfc_sli4_cfg_mhdr)); | ||
12060 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
12061 | LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, | ||
12062 | length, LPFC_SLI4_MBX_EMBED); | ||
12063 | |||
12064 | redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; | ||
12065 | /* Set count to 0 for invalidating the entire FCF database */ | ||
12066 | bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); | ||
12067 | |||
12068 | /* Issue the mailbox command asynchronously */ | ||
12069 | mbox->vport = phba->pport; | ||
12070 | mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; | ||
12071 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); | ||
12072 | |||
12073 | if (rc == MBX_NOT_FINISHED) { | ||
12074 | mempool_free(mbox, phba->mbox_mem_pool); | ||
12075 | return -EIO; | ||
12076 | } | ||
12077 | return 0; | ||
12078 | } | ||
12079 | |||
12080 | /** | ||
11945 | * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. | 12081 | * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. |
11946 | * @phba: pointer to lpfc hba data structure. | 12082 | * @phba: pointer to lpfc hba data structure. |
11947 | * | 12083 | * |
@@ -12069,3 +12205,48 @@ out: | |||
12069 | kfree(rgn23_data); | 12205 | kfree(rgn23_data); |
12070 | return; | 12206 | return; |
12071 | } | 12207 | } |
12208 | |||
12209 | /** | ||
12210 | * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. | ||
12211 | * @vport: pointer to vport data structure. | ||
12212 | * | ||
12213 | * This function iterate through the mailboxq and clean up all REG_LOGIN | ||
12214 | * and REG_VPI mailbox commands associated with the vport. This function | ||
12215 | * is called when driver want to restart discovery of the vport due to | ||
12216 | * a Clear Virtual Link event. | ||
12217 | **/ | ||
12218 | void | ||
12219 | lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) | ||
12220 | { | ||
12221 | struct lpfc_hba *phba = vport->phba; | ||
12222 | LPFC_MBOXQ_t *mb, *nextmb; | ||
12223 | struct lpfc_dmabuf *mp; | ||
12224 | |||
12225 | spin_lock_irq(&phba->hbalock); | ||
12226 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { | ||
12227 | if (mb->vport != vport) | ||
12228 | continue; | ||
12229 | |||
12230 | if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && | ||
12231 | (mb->u.mb.mbxCommand != MBX_REG_VPI)) | ||
12232 | continue; | ||
12233 | |||
12234 | if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { | ||
12235 | mp = (struct lpfc_dmabuf *) (mb->context1); | ||
12236 | if (mp) { | ||
12237 | __lpfc_mbuf_free(phba, mp->virt, mp->phys); | ||
12238 | kfree(mp); | ||
12239 | } | ||
12240 | } | ||
12241 | list_del(&mb->list); | ||
12242 | mempool_free(mb, phba->mbox_mem_pool); | ||
12243 | } | ||
12244 | mb = phba->sli.mbox_active; | ||
12245 | if (mb && (mb->vport == vport)) { | ||
12246 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || | ||
12247 | (mb->u.mb.mbxCommand == MBX_REG_VPI)) | ||
12248 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
12249 | } | ||
12250 | spin_unlock_irq(&phba->hbalock); | ||
12251 | } | ||
12252 | |||
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index ba38de3c28f1..dfcf5437d1f5 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h | |||
@@ -53,17 +53,19 @@ struct lpfc_iocbq { | |||
53 | 53 | ||
54 | IOCB_t iocb; /* IOCB cmd */ | 54 | IOCB_t iocb; /* IOCB cmd */ |
55 | uint8_t retry; /* retry counter for IOCB cmd - if needed */ | 55 | uint8_t retry; /* retry counter for IOCB cmd - if needed */ |
56 | uint8_t iocb_flag; | 56 | uint16_t iocb_flag; |
57 | #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ | 57 | #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ |
58 | #define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ | 58 | #define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ |
59 | #define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ | 59 | #define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ |
60 | #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ | 60 | #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ |
61 | #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ | 61 | #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ |
62 | #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ | 62 | #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ |
63 | #define LPFC_FIP_ELS_ID_MASK 0xc0 /* ELS_ID range 0-3 */ | 63 | #define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ |
64 | #define LPFC_FIP_ELS_ID_SHIFT 6 | 64 | #define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ |
65 | |||
66 | #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ | ||
67 | #define LPFC_FIP_ELS_ID_SHIFT 14 | ||
65 | 68 | ||
66 | uint8_t abort_count; | ||
67 | uint8_t rsvd2; | 69 | uint8_t rsvd2; |
68 | uint32_t drvrTimeout; /* driver timeout in seconds */ | 70 | uint32_t drvrTimeout; /* driver timeout in seconds */ |
69 | uint32_t fcp_wqidx; /* index to FCP work queue */ | 71 | uint32_t fcp_wqidx; /* index to FCP work queue */ |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 44e5f574236b..86308836600f 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -22,6 +22,10 @@ | |||
22 | #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 | 22 | #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 |
23 | #define LPFC_GET_QE_REL_INT 32 | 23 | #define LPFC_GET_QE_REL_INT 32 |
24 | #define LPFC_RPI_LOW_WATER_MARK 10 | 24 | #define LPFC_RPI_LOW_WATER_MARK 10 |
25 | |||
26 | /* Amount of time in seconds for waiting FCF rediscovery to complete */ | ||
27 | #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */ | ||
28 | |||
25 | /* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ | 29 | /* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ |
26 | #define LPFC_NEMBED_MBOX_SGL_CNT 254 | 30 | #define LPFC_NEMBED_MBOX_SGL_CNT 254 |
27 | 31 | ||
@@ -126,24 +130,36 @@ struct lpfc_sli4_link { | |||
126 | uint8_t status; | 130 | uint8_t status; |
127 | uint8_t physical; | 131 | uint8_t physical; |
128 | uint8_t fault; | 132 | uint8_t fault; |
133 | uint16_t logical_speed; | ||
129 | }; | 134 | }; |
130 | 135 | ||
131 | struct lpfc_fcf { | 136 | struct lpfc_fcf_rec { |
132 | uint8_t fabric_name[8]; | 137 | uint8_t fabric_name[8]; |
133 | uint8_t switch_name[8]; | 138 | uint8_t switch_name[8]; |
134 | uint8_t mac_addr[6]; | 139 | uint8_t mac_addr[6]; |
135 | uint16_t fcf_indx; | 140 | uint16_t fcf_indx; |
141 | uint32_t priority; | ||
142 | uint16_t vlan_id; | ||
143 | uint32_t addr_mode; | ||
144 | uint32_t flag; | ||
145 | #define BOOT_ENABLE 0x01 | ||
146 | #define RECORD_VALID 0x02 | ||
147 | }; | ||
148 | |||
149 | struct lpfc_fcf { | ||
136 | uint16_t fcfi; | 150 | uint16_t fcfi; |
137 | uint32_t fcf_flag; | 151 | uint32_t fcf_flag; |
138 | #define FCF_AVAILABLE 0x01 /* FCF available for discovery */ | 152 | #define FCF_AVAILABLE 0x01 /* FCF available for discovery */ |
139 | #define FCF_REGISTERED 0x02 /* FCF registered with FW */ | 153 | #define FCF_REGISTERED 0x02 /* FCF registered with FW */ |
140 | #define FCF_DISCOVERED 0x04 /* FCF discovery started */ | 154 | #define FCF_SCAN_DONE 0x04 /* FCF table scan done */ |
141 | #define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */ | 155 | #define FCF_IN_USE 0x08 /* Atleast one discovery completed */ |
142 | #define FCF_IN_USE 0x10 /* Atleast one discovery completed */ | 156 | #define FCF_REDISC_PEND 0x10 /* FCF rediscovery pending */ |
143 | #define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */ | 157 | #define FCF_REDISC_EVT 0x20 /* FCF rediscovery event to worker thread */ |
144 | uint32_t priority; | 158 | #define FCF_REDISC_FOV 0x40 /* Post FCF rediscovery fast failover */ |
145 | uint32_t addr_mode; | 159 | uint32_t addr_mode; |
146 | uint16_t vlan_id; | 160 | struct lpfc_fcf_rec current_rec; |
161 | struct lpfc_fcf_rec failover_rec; | ||
162 | struct timer_list redisc_wait; | ||
147 | }; | 163 | }; |
148 | 164 | ||
149 | #define LPFC_REGION23_SIGNATURE "RG23" | 165 | #define LPFC_REGION23_SIGNATURE "RG23" |
@@ -248,7 +264,10 @@ struct lpfc_bmbx { | |||
248 | #define SLI4_CT_VFI 2 | 264 | #define SLI4_CT_VFI 2 |
249 | #define SLI4_CT_FCFI 3 | 265 | #define SLI4_CT_FCFI 3 |
250 | 266 | ||
251 | #define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000 | 267 | #define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000 |
268 | #define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000 | ||
269 | #define LPFC_SLI4_MIN_BUF_SIZE 0x400 | ||
270 | #define LPFC_SLI4_MAX_BUF_SIZE 0x20000 | ||
252 | 271 | ||
253 | /* | 272 | /* |
254 | * SLI4 specific data structures | 273 | * SLI4 specific data structures |
@@ -282,6 +301,42 @@ struct lpfc_fcp_eq_hdl { | |||
282 | struct lpfc_hba *phba; | 301 | struct lpfc_hba *phba; |
283 | }; | 302 | }; |
284 | 303 | ||
304 | /* Port Capabilities for SLI4 Parameters */ | ||
305 | struct lpfc_pc_sli4_params { | ||
306 | uint32_t supported; | ||
307 | uint32_t if_type; | ||
308 | uint32_t sli_rev; | ||
309 | uint32_t sli_family; | ||
310 | uint32_t featurelevel_1; | ||
311 | uint32_t featurelevel_2; | ||
312 | uint32_t proto_types; | ||
313 | #define LPFC_SLI4_PROTO_FCOE 0x0000001 | ||
314 | #define LPFC_SLI4_PROTO_FC 0x0000002 | ||
315 | #define LPFC_SLI4_PROTO_NIC 0x0000004 | ||
316 | #define LPFC_SLI4_PROTO_ISCSI 0x0000008 | ||
317 | #define LPFC_SLI4_PROTO_RDMA 0x0000010 | ||
318 | uint32_t sge_supp_len; | ||
319 | uint32_t if_page_sz; | ||
320 | uint32_t rq_db_window; | ||
321 | uint32_t loopbk_scope; | ||
322 | uint32_t eq_pages_max; | ||
323 | uint32_t eqe_size; | ||
324 | uint32_t cq_pages_max; | ||
325 | uint32_t cqe_size; | ||
326 | uint32_t mq_pages_max; | ||
327 | uint32_t mqe_size; | ||
328 | uint32_t mq_elem_cnt; | ||
329 | uint32_t wq_pages_max; | ||
330 | uint32_t wqe_size; | ||
331 | uint32_t rq_pages_max; | ||
332 | uint32_t rqe_size; | ||
333 | uint32_t hdr_pages_max; | ||
334 | uint32_t hdr_size; | ||
335 | uint32_t hdr_pp_align; | ||
336 | uint32_t sgl_pages_max; | ||
337 | uint32_t sgl_pp_align; | ||
338 | }; | ||
339 | |||
285 | /* SLI4 HBA data structure entries */ | 340 | /* SLI4 HBA data structure entries */ |
286 | struct lpfc_sli4_hba { | 341 | struct lpfc_sli4_hba { |
287 | void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for | 342 | void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for |
@@ -295,7 +350,7 @@ struct lpfc_sli4_hba { | |||
295 | void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ | 350 | void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ |
296 | void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */ | 351 | void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */ |
297 | void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */ | 352 | void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */ |
298 | void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */ | 353 | void __iomem *SLIINTFregaddr; /* Address to SLI_INTF register */ |
299 | /* BAR1 FCoE function CSR register memory map */ | 354 | /* BAR1 FCoE function CSR register memory map */ |
300 | void __iomem *STAregaddr; /* Address to HST_STATE register */ | 355 | void __iomem *STAregaddr; /* Address to HST_STATE register */ |
301 | void __iomem *ISRregaddr; /* Address to HST_ISR register */ | 356 | void __iomem *ISRregaddr; /* Address to HST_ISR register */ |
@@ -310,6 +365,8 @@ struct lpfc_sli4_hba { | |||
310 | 365 | ||
311 | uint32_t ue_mask_lo; | 366 | uint32_t ue_mask_lo; |
312 | uint32_t ue_mask_hi; | 367 | uint32_t ue_mask_hi; |
368 | struct lpfc_register sli_intf; | ||
369 | struct lpfc_pc_sli4_params pc_sli4_params; | ||
313 | struct msix_entry *msix_entries; | 370 | struct msix_entry *msix_entries; |
314 | uint32_t cfg_eqn; | 371 | uint32_t cfg_eqn; |
315 | struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ | 372 | struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ |
@@ -406,6 +463,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); | |||
406 | void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); | 463 | void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); |
407 | void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, | 464 | void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, |
408 | struct lpfc_mbx_sge *); | 465 | struct lpfc_mbx_sge *); |
466 | int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *, | ||
467 | uint16_t); | ||
409 | 468 | ||
410 | void lpfc_sli4_hba_reset(struct lpfc_hba *); | 469 | void lpfc_sli4_hba_reset(struct lpfc_hba *); |
411 | struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, | 470 | struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, |
@@ -448,6 +507,7 @@ int lpfc_sli4_alloc_rpi(struct lpfc_hba *); | |||
448 | void lpfc_sli4_free_rpi(struct lpfc_hba *, int); | 507 | void lpfc_sli4_free_rpi(struct lpfc_hba *, int); |
449 | void lpfc_sli4_remove_rpis(struct lpfc_hba *); | 508 | void lpfc_sli4_remove_rpis(struct lpfc_hba *); |
450 | void lpfc_sli4_async_event_proc(struct lpfc_hba *); | 509 | void lpfc_sli4_async_event_proc(struct lpfc_hba *); |
510 | void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *); | ||
451 | int lpfc_sli4_resume_rpi(struct lpfc_nodelist *); | 511 | int lpfc_sli4_resume_rpi(struct lpfc_nodelist *); |
452 | void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); | 512 | void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); |
453 | void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); | 513 | void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 792f72263f1a..ac276aa46fba 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2010 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.3.7" | 21 | #define LPFC_DRIVER_VERSION "8.3.9" |
22 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | 23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" |
24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" | 24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" |
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index e3c7fa642306..dc86e873102a 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
@@ -389,7 +389,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | |||
389 | * by the port. | 389 | * by the port. |
390 | */ | 390 | */ |
391 | if ((phba->sli_rev == LPFC_SLI_REV4) && | 391 | if ((phba->sli_rev == LPFC_SLI_REV4) && |
392 | (pport->vpi_state & LPFC_VPI_REGISTERED)) { | 392 | (pport->fc_flag & FC_VFI_REGISTERED)) { |
393 | rc = lpfc_sli4_init_vpi(phba, vpi); | 393 | rc = lpfc_sli4_init_vpi(phba, vpi); |
394 | if (rc) { | 394 | if (rc) { |
395 | lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, | 395 | lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, |
@@ -505,6 +505,7 @@ enable_vport(struct fc_vport *fc_vport) | |||
505 | struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; | 505 | struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; |
506 | struct lpfc_hba *phba = vport->phba; | 506 | struct lpfc_hba *phba = vport->phba; |
507 | struct lpfc_nodelist *ndlp = NULL; | 507 | struct lpfc_nodelist *ndlp = NULL; |
508 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
508 | 509 | ||
509 | if ((phba->link_state < LPFC_LINK_UP) || | 510 | if ((phba->link_state < LPFC_LINK_UP) || |
510 | (phba->fc_topology == TOPOLOGY_LOOP)) { | 511 | (phba->fc_topology == TOPOLOGY_LOOP)) { |
@@ -512,10 +513,10 @@ enable_vport(struct fc_vport *fc_vport) | |||
512 | return VPORT_OK; | 513 | return VPORT_OK; |
513 | } | 514 | } |
514 | 515 | ||
515 | spin_lock_irq(&phba->hbalock); | 516 | spin_lock_irq(shost->host_lock); |
516 | vport->load_flag |= FC_LOADING; | 517 | vport->load_flag |= FC_LOADING; |
517 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 518 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
518 | spin_unlock_irq(&phba->hbalock); | 519 | spin_unlock_irq(shost->host_lock); |
519 | 520 | ||
520 | /* Use the Physical nodes Fabric NDLP to determine if the link is | 521 | /* Use the Physical nodes Fabric NDLP to determine if the link is |
521 | * up and ready to FDISC. | 522 | * up and ready to FDISC. |
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index c24e86f07804..dd808ae942a1 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c | |||
@@ -22,7 +22,6 @@ | |||
22 | 22 | ||
23 | #include <asm/irq.h> | 23 | #include <asm/irq.h> |
24 | #include <asm/dma.h> | 24 | #include <asm/dma.h> |
25 | |||
26 | #include <asm/macints.h> | 25 | #include <asm/macints.h> |
27 | #include <asm/macintosh.h> | 26 | #include <asm/macintosh.h> |
28 | 27 | ||
@@ -279,24 +278,27 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count, | |||
279 | * Programmed IO routines follow. | 278 | * Programmed IO routines follow. |
280 | */ | 279 | */ |
281 | 280 | ||
282 | static inline int mac_esp_wait_for_fifo(struct esp *esp) | 281 | static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp) |
283 | { | 282 | { |
284 | int i = 500000; | 283 | int i = 500000; |
285 | 284 | ||
286 | do { | 285 | do { |
287 | if (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) | 286 | unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; |
288 | return 0; | 287 | |
288 | if (fbytes) | ||
289 | return fbytes; | ||
289 | 290 | ||
290 | udelay(2); | 291 | udelay(2); |
291 | } while (--i); | 292 | } while (--i); |
292 | 293 | ||
293 | printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n", | 294 | printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n", |
294 | esp_read8(ESP_STATUS)); | 295 | esp_read8(ESP_STATUS)); |
295 | return 1; | 296 | return 0; |
296 | } | 297 | } |
297 | 298 | ||
298 | static inline int mac_esp_wait_for_intr(struct esp *esp) | 299 | static inline int mac_esp_wait_for_intr(struct esp *esp) |
299 | { | 300 | { |
301 | struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); | ||
300 | int i = 500000; | 302 | int i = 500000; |
301 | 303 | ||
302 | do { | 304 | do { |
@@ -308,6 +310,7 @@ static inline int mac_esp_wait_for_intr(struct esp *esp) | |||
308 | } while (--i); | 310 | } while (--i); |
309 | 311 | ||
310 | printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg); | 312 | printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg); |
313 | mep->error = 1; | ||
311 | return 1; | 314 | return 1; |
312 | } | 315 | } |
313 | 316 | ||
@@ -347,11 +350,10 @@ static inline int mac_esp_wait_for_intr(struct esp *esp) | |||
347 | static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, | 350 | static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, |
348 | u32 dma_count, int write, u8 cmd) | 351 | u32 dma_count, int write, u8 cmd) |
349 | { | 352 | { |
350 | unsigned long flags; | ||
351 | struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); | 353 | struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); |
352 | u8 *fifo = esp->regs + ESP_FDATA * 16; | 354 | u8 *fifo = esp->regs + ESP_FDATA * 16; |
353 | 355 | ||
354 | local_irq_save(flags); | 356 | disable_irq(esp->host->irq); |
355 | 357 | ||
356 | cmd &= ~ESP_CMD_DMA; | 358 | cmd &= ~ESP_CMD_DMA; |
357 | mep->error = 0; | 359 | mep->error = 0; |
@@ -359,11 +361,35 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, | |||
359 | if (write) { | 361 | if (write) { |
360 | scsi_esp_cmd(esp, cmd); | 362 | scsi_esp_cmd(esp, cmd); |
361 | 363 | ||
362 | if (!mac_esp_wait_for_intr(esp)) { | 364 | while (1) { |
363 | if (mac_esp_wait_for_fifo(esp)) | 365 | unsigned int n; |
364 | esp_count = 0; | 366 | |
365 | } else { | 367 | n = mac_esp_wait_for_fifo(esp); |
366 | esp_count = 0; | 368 | if (!n) |
369 | break; | ||
370 | |||
371 | if (n > esp_count) | ||
372 | n = esp_count; | ||
373 | esp_count -= n; | ||
374 | |||
375 | MAC_ESP_PIO_LOOP("%2@,%0@+", n); | ||
376 | |||
377 | if (!esp_count) | ||
378 | break; | ||
379 | |||
380 | if (mac_esp_wait_for_intr(esp)) | ||
381 | break; | ||
382 | |||
383 | if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) && | ||
384 | ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP)) | ||
385 | break; | ||
386 | |||
387 | esp->ireg = esp_read8(ESP_INTRPT); | ||
388 | if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) != | ||
389 | ESP_INTR_BSERV) | ||
390 | break; | ||
391 | |||
392 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
367 | } | 393 | } |
368 | } else { | 394 | } else { |
369 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | 395 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); |
@@ -374,47 +400,24 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, | |||
374 | MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count); | 400 | MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count); |
375 | 401 | ||
376 | scsi_esp_cmd(esp, cmd); | 402 | scsi_esp_cmd(esp, cmd); |
377 | } | ||
378 | |||
379 | while (esp_count) { | ||
380 | unsigned int n; | ||
381 | |||
382 | if (mac_esp_wait_for_intr(esp)) { | ||
383 | mep->error = 1; | ||
384 | break; | ||
385 | } | ||
386 | |||
387 | if (esp->sreg & ESP_STAT_SPAM) { | ||
388 | printk(KERN_ERR PFX "gross error\n"); | ||
389 | mep->error = 1; | ||
390 | break; | ||
391 | } | ||
392 | 403 | ||
393 | n = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | 404 | while (esp_count) { |
394 | 405 | unsigned int n; | |
395 | if (write) { | ||
396 | if (n > esp_count) | ||
397 | n = esp_count; | ||
398 | esp_count -= n; | ||
399 | |||
400 | MAC_ESP_PIO_LOOP("%2@,%0@+", n); | ||
401 | 406 | ||
402 | if ((esp->sreg & ESP_STAT_PMASK) == ESP_STATP) | 407 | if (mac_esp_wait_for_intr(esp)) |
403 | break; | 408 | break; |
404 | 409 | ||
405 | if (esp_count) { | 410 | if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) && |
406 | esp->ireg = esp_read8(ESP_INTRPT); | 411 | ((esp->sreg & ESP_STAT_PMASK) != ESP_MOP)) |
407 | if (esp->ireg & ESP_INTR_DC) | 412 | break; |
408 | break; | ||
409 | 413 | ||
410 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
411 | } | ||
412 | } else { | ||
413 | esp->ireg = esp_read8(ESP_INTRPT); | 414 | esp->ireg = esp_read8(ESP_INTRPT); |
414 | if (esp->ireg & ESP_INTR_DC) | 415 | if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) != |
416 | ESP_INTR_BSERV) | ||
415 | break; | 417 | break; |
416 | 418 | ||
417 | n = MAC_ESP_FIFO_SIZE - n; | 419 | n = MAC_ESP_FIFO_SIZE - |
420 | (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES); | ||
418 | if (n > esp_count) | 421 | if (n > esp_count) |
419 | n = esp_count; | 422 | n = esp_count; |
420 | 423 | ||
@@ -429,7 +432,7 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, | |||
429 | } | 432 | } |
430 | } | 433 | } |
431 | 434 | ||
432 | local_irq_restore(flags); | 435 | enable_irq(esp->host->irq); |
433 | } | 436 | } |
434 | 437 | ||
435 | static int mac_esp_irq_pending(struct esp *esp) | 438 | static int mac_esp_irq_pending(struct esp *esp) |
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index d9b8ca5116bc..409648f5845f 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | * | 11 | * |
12 | * FILE : megaraid_sas.c | 12 | * FILE : megaraid_sas.c |
13 | * Version : v00.00.04.12-rc1 | 13 | * Version : v00.00.04.17.1-rc1 |
14 | * | 14 | * |
15 | * Authors: | 15 | * Authors: |
16 | * (email-id : megaraidlinux@lsi.com) | 16 | * (email-id : megaraidlinux@lsi.com) |
@@ -843,6 +843,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
843 | pthru->lun = scp->device->lun; | 843 | pthru->lun = scp->device->lun; |
844 | pthru->cdb_len = scp->cmd_len; | 844 | pthru->cdb_len = scp->cmd_len; |
845 | pthru->timeout = 0; | 845 | pthru->timeout = 0; |
846 | pthru->pad_0 = 0; | ||
846 | pthru->flags = flags; | 847 | pthru->flags = flags; |
847 | pthru->data_xfer_len = scsi_bufflen(scp); | 848 | pthru->data_xfer_len = scsi_bufflen(scp); |
848 | 849 | ||
@@ -874,6 +875,12 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
874 | pthru->sge_count = megasas_make_sgl32(instance, scp, | 875 | pthru->sge_count = megasas_make_sgl32(instance, scp, |
875 | &pthru->sgl); | 876 | &pthru->sgl); |
876 | 877 | ||
878 | if (pthru->sge_count > instance->max_num_sge) { | ||
879 | printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n", | ||
880 | pthru->sge_count); | ||
881 | return 0; | ||
882 | } | ||
883 | |||
877 | /* | 884 | /* |
878 | * Sense info specific | 885 | * Sense info specific |
879 | */ | 886 | */ |
@@ -1000,6 +1007,12 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
1000 | } else | 1007 | } else |
1001 | ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); | 1008 | ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); |
1002 | 1009 | ||
1010 | if (ldio->sge_count > instance->max_num_sge) { | ||
1011 | printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n", | ||
1012 | ldio->sge_count); | ||
1013 | return 0; | ||
1014 | } | ||
1015 | |||
1003 | /* | 1016 | /* |
1004 | * Sense info specific | 1017 | * Sense info specific |
1005 | */ | 1018 | */ |
@@ -2250,6 +2263,7 @@ megasas_get_pd_list(struct megasas_instance *instance) | |||
2250 | dcmd->sge_count = 1; | 2263 | dcmd->sge_count = 1; |
2251 | dcmd->flags = MFI_FRAME_DIR_READ; | 2264 | dcmd->flags = MFI_FRAME_DIR_READ; |
2252 | dcmd->timeout = 0; | 2265 | dcmd->timeout = 0; |
2266 | dcmd->pad_0 = 0; | ||
2253 | dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); | 2267 | dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); |
2254 | dcmd->opcode = MR_DCMD_PD_LIST_QUERY; | 2268 | dcmd->opcode = MR_DCMD_PD_LIST_QUERY; |
2255 | dcmd->sgl.sge32[0].phys_addr = ci_h; | 2269 | dcmd->sgl.sge32[0].phys_addr = ci_h; |
@@ -2294,6 +2308,86 @@ megasas_get_pd_list(struct megasas_instance *instance) | |||
2294 | return ret; | 2308 | return ret; |
2295 | } | 2309 | } |
2296 | 2310 | ||
2311 | /* | ||
2312 | * megasas_get_ld_list_info - Returns FW's ld_list structure | ||
2313 | * @instance: Adapter soft state | ||
2314 | * @ld_list: ld_list structure | ||
2315 | * | ||
2316 | * Issues an internal command (DCMD) to get the FW's controller PD | ||
2317 | * list structure. This information is mainly used to find out SYSTEM | ||
2318 | * supported by the FW. | ||
2319 | */ | ||
2320 | static int | ||
2321 | megasas_get_ld_list(struct megasas_instance *instance) | ||
2322 | { | ||
2323 | int ret = 0, ld_index = 0, ids = 0; | ||
2324 | struct megasas_cmd *cmd; | ||
2325 | struct megasas_dcmd_frame *dcmd; | ||
2326 | struct MR_LD_LIST *ci; | ||
2327 | dma_addr_t ci_h = 0; | ||
2328 | |||
2329 | cmd = megasas_get_cmd(instance); | ||
2330 | |||
2331 | if (!cmd) { | ||
2332 | printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n"); | ||
2333 | return -ENOMEM; | ||
2334 | } | ||
2335 | |||
2336 | dcmd = &cmd->frame->dcmd; | ||
2337 | |||
2338 | ci = pci_alloc_consistent(instance->pdev, | ||
2339 | sizeof(struct MR_LD_LIST), | ||
2340 | &ci_h); | ||
2341 | |||
2342 | if (!ci) { | ||
2343 | printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n"); | ||
2344 | megasas_return_cmd(instance, cmd); | ||
2345 | return -ENOMEM; | ||
2346 | } | ||
2347 | |||
2348 | memset(ci, 0, sizeof(*ci)); | ||
2349 | memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); | ||
2350 | |||
2351 | dcmd->cmd = MFI_CMD_DCMD; | ||
2352 | dcmd->cmd_status = 0xFF; | ||
2353 | dcmd->sge_count = 1; | ||
2354 | dcmd->flags = MFI_FRAME_DIR_READ; | ||
2355 | dcmd->timeout = 0; | ||
2356 | dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); | ||
2357 | dcmd->opcode = MR_DCMD_LD_GET_LIST; | ||
2358 | dcmd->sgl.sge32[0].phys_addr = ci_h; | ||
2359 | dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); | ||
2360 | dcmd->pad_0 = 0; | ||
2361 | |||
2362 | if (!megasas_issue_polled(instance, cmd)) { | ||
2363 | ret = 0; | ||
2364 | } else { | ||
2365 | ret = -1; | ||
2366 | } | ||
2367 | |||
2368 | /* the following function will get the instance PD LIST */ | ||
2369 | |||
2370 | if ((ret == 0) && (ci->ldCount < MAX_LOGICAL_DRIVES)) { | ||
2371 | memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); | ||
2372 | |||
2373 | for (ld_index = 0; ld_index < ci->ldCount; ld_index++) { | ||
2374 | if (ci->ldList[ld_index].state != 0) { | ||
2375 | ids = ci->ldList[ld_index].ref.targetId; | ||
2376 | instance->ld_ids[ids] = | ||
2377 | ci->ldList[ld_index].ref.targetId; | ||
2378 | } | ||
2379 | } | ||
2380 | } | ||
2381 | |||
2382 | pci_free_consistent(instance->pdev, | ||
2383 | sizeof(struct MR_LD_LIST), | ||
2384 | ci, | ||
2385 | ci_h); | ||
2386 | |||
2387 | megasas_return_cmd(instance, cmd); | ||
2388 | return ret; | ||
2389 | } | ||
2390 | |||
2297 | /** | 2391 | /** |
2298 | * megasas_get_controller_info - Returns FW's controller structure | 2392 | * megasas_get_controller_info - Returns FW's controller structure |
2299 | * @instance: Adapter soft state | 2393 | * @instance: Adapter soft state |
@@ -2339,6 +2433,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance, | |||
2339 | dcmd->sge_count = 1; | 2433 | dcmd->sge_count = 1; |
2340 | dcmd->flags = MFI_FRAME_DIR_READ; | 2434 | dcmd->flags = MFI_FRAME_DIR_READ; |
2341 | dcmd->timeout = 0; | 2435 | dcmd->timeout = 0; |
2436 | dcmd->pad_0 = 0; | ||
2342 | dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); | 2437 | dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); |
2343 | dcmd->opcode = MR_DCMD_CTRL_GET_INFO; | 2438 | dcmd->opcode = MR_DCMD_CTRL_GET_INFO; |
2344 | dcmd->sgl.sge32[0].phys_addr = ci_h; | 2439 | dcmd->sgl.sge32[0].phys_addr = ci_h; |
@@ -2590,6 +2685,9 @@ static int megasas_init_mfi(struct megasas_instance *instance) | |||
2590 | (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); | 2685 | (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); |
2591 | megasas_get_pd_list(instance); | 2686 | megasas_get_pd_list(instance); |
2592 | 2687 | ||
2688 | memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); | ||
2689 | megasas_get_ld_list(instance); | ||
2690 | |||
2593 | ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); | 2691 | ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); |
2594 | 2692 | ||
2595 | /* | 2693 | /* |
@@ -2714,6 +2812,7 @@ megasas_get_seq_num(struct megasas_instance *instance, | |||
2714 | dcmd->sge_count = 1; | 2812 | dcmd->sge_count = 1; |
2715 | dcmd->flags = MFI_FRAME_DIR_READ; | 2813 | dcmd->flags = MFI_FRAME_DIR_READ; |
2716 | dcmd->timeout = 0; | 2814 | dcmd->timeout = 0; |
2815 | dcmd->pad_0 = 0; | ||
2717 | dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); | 2816 | dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); |
2718 | dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; | 2817 | dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; |
2719 | dcmd->sgl.sge32[0].phys_addr = el_info_h; | 2818 | dcmd->sgl.sge32[0].phys_addr = el_info_h; |
@@ -2828,6 +2927,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, | |||
2828 | dcmd->sge_count = 1; | 2927 | dcmd->sge_count = 1; |
2829 | dcmd->flags = MFI_FRAME_DIR_READ; | 2928 | dcmd->flags = MFI_FRAME_DIR_READ; |
2830 | dcmd->timeout = 0; | 2929 | dcmd->timeout = 0; |
2930 | dcmd->pad_0 = 0; | ||
2831 | dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); | 2931 | dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); |
2832 | dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; | 2932 | dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; |
2833 | dcmd->mbox.w[0] = seq_num; | 2933 | dcmd->mbox.w[0] = seq_num; |
@@ -3166,6 +3266,7 @@ static void megasas_flush_cache(struct megasas_instance *instance) | |||
3166 | dcmd->sge_count = 0; | 3266 | dcmd->sge_count = 0; |
3167 | dcmd->flags = MFI_FRAME_DIR_NONE; | 3267 | dcmd->flags = MFI_FRAME_DIR_NONE; |
3168 | dcmd->timeout = 0; | 3268 | dcmd->timeout = 0; |
3269 | dcmd->pad_0 = 0; | ||
3169 | dcmd->data_xfer_len = 0; | 3270 | dcmd->data_xfer_len = 0; |
3170 | dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; | 3271 | dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; |
3171 | dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; | 3272 | dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; |
@@ -3205,6 +3306,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance, | |||
3205 | dcmd->sge_count = 0; | 3306 | dcmd->sge_count = 0; |
3206 | dcmd->flags = MFI_FRAME_DIR_NONE; | 3307 | dcmd->flags = MFI_FRAME_DIR_NONE; |
3207 | dcmd->timeout = 0; | 3308 | dcmd->timeout = 0; |
3309 | dcmd->pad_0 = 0; | ||
3208 | dcmd->data_xfer_len = 0; | 3310 | dcmd->data_xfer_len = 0; |
3209 | dcmd->opcode = opcode; | 3311 | dcmd->opcode = opcode; |
3210 | 3312 | ||
@@ -3984,6 +4086,7 @@ megasas_aen_polling(struct work_struct *work) | |||
3984 | struct Scsi_Host *host; | 4086 | struct Scsi_Host *host; |
3985 | struct scsi_device *sdev1; | 4087 | struct scsi_device *sdev1; |
3986 | u16 pd_index = 0; | 4088 | u16 pd_index = 0; |
4089 | u16 ld_index = 0; | ||
3987 | int i, j, doscan = 0; | 4090 | int i, j, doscan = 0; |
3988 | u32 seq_num; | 4091 | u32 seq_num; |
3989 | int error; | 4092 | int error; |
@@ -3999,8 +4102,124 @@ megasas_aen_polling(struct work_struct *work) | |||
3999 | 4102 | ||
4000 | switch (instance->evt_detail->code) { | 4103 | switch (instance->evt_detail->code) { |
4001 | case MR_EVT_PD_INSERTED: | 4104 | case MR_EVT_PD_INSERTED: |
4105 | if (megasas_get_pd_list(instance) == 0) { | ||
4106 | for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { | ||
4107 | for (j = 0; | ||
4108 | j < MEGASAS_MAX_DEV_PER_CHANNEL; | ||
4109 | j++) { | ||
4110 | |||
4111 | pd_index = | ||
4112 | (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; | ||
4113 | |||
4114 | sdev1 = | ||
4115 | scsi_device_lookup(host, i, j, 0); | ||
4116 | |||
4117 | if (instance->pd_list[pd_index].driveState | ||
4118 | == MR_PD_STATE_SYSTEM) { | ||
4119 | if (!sdev1) { | ||
4120 | scsi_add_device(host, i, j, 0); | ||
4121 | } | ||
4122 | |||
4123 | if (sdev1) | ||
4124 | scsi_device_put(sdev1); | ||
4125 | } | ||
4126 | } | ||
4127 | } | ||
4128 | } | ||
4129 | doscan = 0; | ||
4130 | break; | ||
4131 | |||
4002 | case MR_EVT_PD_REMOVED: | 4132 | case MR_EVT_PD_REMOVED: |
4133 | if (megasas_get_pd_list(instance) == 0) { | ||
4134 | megasas_get_pd_list(instance); | ||
4135 | for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { | ||
4136 | for (j = 0; | ||
4137 | j < MEGASAS_MAX_DEV_PER_CHANNEL; | ||
4138 | j++) { | ||
4139 | |||
4140 | pd_index = | ||
4141 | (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; | ||
4142 | |||
4143 | sdev1 = | ||
4144 | scsi_device_lookup(host, i, j, 0); | ||
4145 | |||
4146 | if (instance->pd_list[pd_index].driveState | ||
4147 | == MR_PD_STATE_SYSTEM) { | ||
4148 | if (sdev1) { | ||
4149 | scsi_device_put(sdev1); | ||
4150 | } | ||
4151 | } else { | ||
4152 | if (sdev1) { | ||
4153 | scsi_remove_device(sdev1); | ||
4154 | scsi_device_put(sdev1); | ||
4155 | } | ||
4156 | } | ||
4157 | } | ||
4158 | } | ||
4159 | } | ||
4160 | doscan = 0; | ||
4161 | break; | ||
4162 | |||
4163 | case MR_EVT_LD_OFFLINE: | ||
4164 | case MR_EVT_LD_DELETED: | ||
4165 | megasas_get_ld_list(instance); | ||
4166 | for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { | ||
4167 | for (j = 0; | ||
4168 | j < MEGASAS_MAX_DEV_PER_CHANNEL; | ||
4169 | j++) { | ||
4170 | |||
4171 | ld_index = | ||
4172 | (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; | ||
4173 | |||
4174 | sdev1 = scsi_device_lookup(host, | ||
4175 | i + MEGASAS_MAX_LD_CHANNELS, | ||
4176 | j, | ||
4177 | 0); | ||
4178 | |||
4179 | if (instance->ld_ids[ld_index] != 0xff) { | ||
4180 | if (sdev1) { | ||
4181 | scsi_device_put(sdev1); | ||
4182 | } | ||
4183 | } else { | ||
4184 | if (sdev1) { | ||
4185 | scsi_remove_device(sdev1); | ||
4186 | scsi_device_put(sdev1); | ||
4187 | } | ||
4188 | } | ||
4189 | } | ||
4190 | } | ||
4191 | doscan = 0; | ||
4192 | break; | ||
4193 | case MR_EVT_LD_CREATED: | ||
4194 | megasas_get_ld_list(instance); | ||
4195 | for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { | ||
4196 | for (j = 0; | ||
4197 | j < MEGASAS_MAX_DEV_PER_CHANNEL; | ||
4198 | j++) { | ||
4199 | ld_index = | ||
4200 | (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; | ||
4201 | |||
4202 | sdev1 = scsi_device_lookup(host, | ||
4203 | i+MEGASAS_MAX_LD_CHANNELS, | ||
4204 | j, 0); | ||
4205 | |||
4206 | if (instance->ld_ids[ld_index] != | ||
4207 | 0xff) { | ||
4208 | if (!sdev1) { | ||
4209 | scsi_add_device(host, | ||
4210 | i + 2, | ||
4211 | j, 0); | ||
4212 | } | ||
4213 | } | ||
4214 | if (sdev1) { | ||
4215 | scsi_device_put(sdev1); | ||
4216 | } | ||
4217 | } | ||
4218 | } | ||
4219 | doscan = 0; | ||
4220 | break; | ||
4003 | case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: | 4221 | case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: |
4222 | case MR_EVT_FOREIGN_CFG_IMPORTED: | ||
4004 | doscan = 1; | 4223 | doscan = 1; |
4005 | break; | 4224 | break; |
4006 | default: | 4225 | default: |
@@ -4035,6 +4254,31 @@ megasas_aen_polling(struct work_struct *work) | |||
4035 | } | 4254 | } |
4036 | } | 4255 | } |
4037 | } | 4256 | } |
4257 | |||
4258 | megasas_get_ld_list(instance); | ||
4259 | for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { | ||
4260 | for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { | ||
4261 | ld_index = | ||
4262 | (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; | ||
4263 | |||
4264 | sdev1 = scsi_device_lookup(host, | ||
4265 | i+MEGASAS_MAX_LD_CHANNELS, j, 0); | ||
4266 | if (instance->ld_ids[ld_index] != 0xff) { | ||
4267 | if (!sdev1) { | ||
4268 | scsi_add_device(host, | ||
4269 | i+2, | ||
4270 | j, 0); | ||
4271 | } else { | ||
4272 | scsi_device_put(sdev1); | ||
4273 | } | ||
4274 | } else { | ||
4275 | if (sdev1) { | ||
4276 | scsi_remove_device(sdev1); | ||
4277 | scsi_device_put(sdev1); | ||
4278 | } | ||
4279 | } | ||
4280 | } | ||
4281 | } | ||
4038 | } | 4282 | } |
4039 | 4283 | ||
4040 | if ( instance->aen_cmd != NULL ) { | 4284 | if ( instance->aen_cmd != NULL ) { |
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 72b28e436e32..9d8b6bf605aa 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
@@ -18,9 +18,9 @@ | |||
18 | /* | 18 | /* |
19 | * MegaRAID SAS Driver meta data | 19 | * MegaRAID SAS Driver meta data |
20 | */ | 20 | */ |
21 | #define MEGASAS_VERSION "00.00.04.12-rc1" | 21 | #define MEGASAS_VERSION "00.00.04.17.1-rc1" |
22 | #define MEGASAS_RELDATE "Sep. 17, 2009" | 22 | #define MEGASAS_RELDATE "Oct. 29, 2009" |
23 | #define MEGASAS_EXT_VERSION "Thu Sep. 17 11:41:51 PST 2009" | 23 | #define MEGASAS_EXT_VERSION "Thu. Oct. 29, 11:41:51 PST 2009" |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * Device IDs | 26 | * Device IDs |
@@ -117,6 +117,7 @@ | |||
117 | #define MFI_CMD_STP 0x08 | 117 | #define MFI_CMD_STP 0x08 |
118 | 118 | ||
119 | #define MR_DCMD_CTRL_GET_INFO 0x01010000 | 119 | #define MR_DCMD_CTRL_GET_INFO 0x01010000 |
120 | #define MR_DCMD_LD_GET_LIST 0x03010000 | ||
120 | 121 | ||
121 | #define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 | 122 | #define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 |
122 | #define MR_FLUSH_CTRL_CACHE 0x01 | 123 | #define MR_FLUSH_CTRL_CACHE 0x01 |
@@ -349,6 +350,32 @@ struct megasas_pd_list { | |||
349 | u8 driveState; | 350 | u8 driveState; |
350 | } __packed; | 351 | } __packed; |
351 | 352 | ||
353 | /* | ||
354 | * defines the logical drive reference structure | ||
355 | */ | ||
356 | union MR_LD_REF { | ||
357 | struct { | ||
358 | u8 targetId; | ||
359 | u8 reserved; | ||
360 | u16 seqNum; | ||
361 | }; | ||
362 | u32 ref; | ||
363 | } __packed; | ||
364 | |||
365 | /* | ||
366 | * defines the logical drive list structure | ||
367 | */ | ||
368 | struct MR_LD_LIST { | ||
369 | u32 ldCount; | ||
370 | u32 reserved; | ||
371 | struct { | ||
372 | union MR_LD_REF ref; | ||
373 | u8 state; | ||
374 | u8 reserved[3]; | ||
375 | u64 size; | ||
376 | } ldList[MAX_LOGICAL_DRIVES]; | ||
377 | } __packed; | ||
378 | |||
352 | /* | 379 | /* |
353 | * SAS controller properties | 380 | * SAS controller properties |
354 | */ | 381 | */ |
@@ -637,6 +664,8 @@ struct megasas_ctrl_info { | |||
637 | #define MEGASAS_MAX_LD 64 | 664 | #define MEGASAS_MAX_LD 64 |
638 | #define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ | 665 | #define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ |
639 | MEGASAS_MAX_DEV_PER_CHANNEL) | 666 | MEGASAS_MAX_DEV_PER_CHANNEL) |
667 | #define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \ | ||
668 | MEGASAS_MAX_DEV_PER_CHANNEL) | ||
640 | 669 | ||
641 | #define MEGASAS_DBG_LVL 1 | 670 | #define MEGASAS_DBG_LVL 1 |
642 | 671 | ||
@@ -1187,6 +1216,7 @@ struct megasas_instance { | |||
1187 | struct megasas_register_set __iomem *reg_set; | 1216 | struct megasas_register_set __iomem *reg_set; |
1188 | 1217 | ||
1189 | struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; | 1218 | struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; |
1219 | u8 ld_ids[MEGASAS_MAX_LD_IDS]; | ||
1190 | s8 init_id; | 1220 | s8 init_id; |
1191 | 1221 | ||
1192 | u16 max_num_sge; | 1222 | u16 max_num_sge; |
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig index 70c4c2467dd8..ba8e128de238 100644 --- a/drivers/scsi/mpt2sas/Kconfig +++ b/drivers/scsi/mpt2sas/Kconfig | |||
@@ -44,6 +44,7 @@ config SCSI_MPT2SAS | |||
44 | tristate "LSI MPT Fusion SAS 2.0 Device Driver" | 44 | tristate "LSI MPT Fusion SAS 2.0 Device Driver" |
45 | depends on PCI && SCSI | 45 | depends on PCI && SCSI |
46 | select SCSI_SAS_ATTRS | 46 | select SCSI_SAS_ATTRS |
47 | select RAID_ATTRS | ||
47 | ---help--- | 48 | ---help--- |
48 | This driver supports PCI-Express SAS 6Gb/s Host Adapters. | 49 | This driver supports PCI-Express SAS 6Gb/s Host Adapters. |
49 | 50 | ||
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index 914168105297..9958d847a88d 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * scatter/gather formats. | 8 | * scatter/gather formats. |
9 | * Creation Date: June 21, 2006 | 9 | * Creation Date: June 21, 2006 |
10 | * | 10 | * |
11 | * mpi2.h Version: 02.00.13 | 11 | * mpi2.h Version: 02.00.14 |
12 | * | 12 | * |
13 | * Version History | 13 | * Version History |
14 | * --------------- | 14 | * --------------- |
@@ -53,6 +53,10 @@ | |||
53 | * bytes reserved. | 53 | * bytes reserved. |
54 | * Added RAID Accelerator functionality. | 54 | * Added RAID Accelerator functionality. |
55 | * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. | 55 | * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. |
56 | * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. | ||
57 | * Added MSI-x index mask and shift for Reply Post Host | ||
58 | * Index register. | ||
59 | * Added function code for Host Based Discovery Action. | ||
56 | * -------------------------------------------------------------------------- | 60 | * -------------------------------------------------------------------------- |
57 | */ | 61 | */ |
58 | 62 | ||
@@ -78,7 +82,7 @@ | |||
78 | #define MPI2_VERSION_02_00 (0x0200) | 82 | #define MPI2_VERSION_02_00 (0x0200) |
79 | 83 | ||
80 | /* versioning for this MPI header set */ | 84 | /* versioning for this MPI header set */ |
81 | #define MPI2_HEADER_VERSION_UNIT (0x0D) | 85 | #define MPI2_HEADER_VERSION_UNIT (0x0E) |
82 | #define MPI2_HEADER_VERSION_DEV (0x00) | 86 | #define MPI2_HEADER_VERSION_DEV (0x00) |
83 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) | 87 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) |
84 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) | 88 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) |
@@ -232,9 +236,12 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS | |||
232 | #define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048) | 236 | #define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048) |
233 | 237 | ||
234 | /* | 238 | /* |
235 | * Offset for the Reply Descriptor Post Queue | 239 | * Defines for the Reply Descriptor Post Queue |
236 | */ | 240 | */ |
237 | #define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) | 241 | #define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) |
242 | #define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF) | ||
243 | #define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000) | ||
244 | #define MPI2_RPHI_MSIX_INDEX_SHIFT (24) | ||
238 | 245 | ||
239 | /* | 246 | /* |
240 | * Defines for the HCBSize and address | 247 | * Defines for the HCBSize and address |
@@ -497,12 +504,13 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION | |||
497 | #define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */ | 504 | #define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */ |
498 | #define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */ | 505 | #define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */ |
499 | #define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator*/ | 506 | #define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator*/ |
507 | /* Host Based Discovery Action */ | ||
508 | #define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) | ||
500 | 509 | ||
501 | 510 | ||
502 | 511 | ||
503 | /* Doorbell functions */ | 512 | /* Doorbell functions */ |
504 | #define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40) | 513 | #define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40) |
505 | /* #define MPI2_FUNCTION_IO_UNIT_RESET (0x41) */ | ||
506 | #define MPI2_FUNCTION_HANDSHAKE (0x42) | 514 | #define MPI2_FUNCTION_HANDSHAKE (0x42) |
507 | 515 | ||
508 | 516 | ||
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index 1611c57a6fdf..cf0ac9f40c97 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI Configuration messages and pages | 6 | * Title: MPI Configuration messages and pages |
7 | * Creation Date: November 10, 2006 | 7 | * Creation Date: November 10, 2006 |
8 | * | 8 | * |
9 | * mpi2_cnfg.h Version: 02.00.12 | 9 | * mpi2_cnfg.h Version: 02.00.13 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -107,6 +107,8 @@ | |||
107 | * to SAS Device Page 0 Flags field. | 107 | * to SAS Device Page 0 Flags field. |
108 | * Added PhyInfo defines for power condition. | 108 | * Added PhyInfo defines for power condition. |
109 | * Added Ethernet configuration pages. | 109 | * Added Ethernet configuration pages. |
110 | * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. | ||
111 | * Added SAS PHY Page 4 structure and defines. | ||
110 | * -------------------------------------------------------------------------- | 112 | * -------------------------------------------------------------------------- |
111 | */ | 113 | */ |
112 | 114 | ||
@@ -712,6 +714,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 | |||
712 | #define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) | 714 | #define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) |
713 | 715 | ||
714 | /* IO Unit Page 1 Flags defines */ | 716 | /* IO Unit Page 1 Flags defines */ |
717 | #define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) | ||
715 | #define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) | 718 | #define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) |
716 | #define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) | 719 | #define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) |
717 | #define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) | 720 | #define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) |
@@ -2291,6 +2294,26 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 { | |||
2291 | #define MPI2_SASPHY3_PAGEVERSION (0x00) | 2294 | #define MPI2_SASPHY3_PAGEVERSION (0x00) |
2292 | 2295 | ||
2293 | 2296 | ||
2297 | /* SAS PHY Page 4 */ | ||
2298 | |||
2299 | typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 { | ||
2300 | MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ | ||
2301 | U16 Reserved1; /* 0x08 */ | ||
2302 | U8 Reserved2; /* 0x0A */ | ||
2303 | U8 Flags; /* 0x0B */ | ||
2304 | U8 InitialFrame[28]; /* 0x0C */ | ||
2305 | } MPI2_CONFIG_PAGE_SAS_PHY_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_4, | ||
2306 | Mpi2SasPhyPage4_t, MPI2_POINTER pMpi2SasPhyPage4_t; | ||
2307 | |||
2308 | #define MPI2_SASPHY4_PAGEVERSION (0x00) | ||
2309 | |||
2310 | /* values for the Flags field */ | ||
2311 | #define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02) | ||
2312 | #define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01) | ||
2313 | |||
2314 | |||
2315 | |||
2316 | |||
2294 | /**************************************************************************** | 2317 | /**************************************************************************** |
2295 | * SAS Port Config Pages | 2318 | * SAS Port Config Pages |
2296 | ****************************************************************************/ | 2319 | ****************************************************************************/ |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt index 65fcaa31cb30..c4adf76b49d9 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt +++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt | |||
@@ -5,23 +5,24 @@ | |||
5 | Copyright (c) 2000-2009 LSI Corporation. | 5 | Copyright (c) 2000-2009 LSI Corporation. |
6 | 6 | ||
7 | --------------------------------------- | 7 | --------------------------------------- |
8 | Header Set Release Version: 02.00.12 | 8 | Header Set Release Version: 02.00.14 |
9 | Header Set Release Date: 05-06-09 | 9 | Header Set Release Date: 10-28-09 |
10 | --------------------------------------- | 10 | --------------------------------------- |
11 | 11 | ||
12 | Filename Current version Prior version | 12 | Filename Current version Prior version |
13 | ---------- --------------- ------------- | 13 | ---------- --------------- ------------- |
14 | mpi2.h 02.00.12 02.00.11 | 14 | mpi2.h 02.00.14 02.00.13 |
15 | mpi2_cnfg.h 02.00.11 02.00.10 | 15 | mpi2_cnfg.h 02.00.13 02.00.12 |
16 | mpi2_init.h 02.00.07 02.00.06 | 16 | mpi2_init.h 02.00.08 02.00.07 |
17 | mpi2_ioc.h 02.00.11 02.00.10 | 17 | mpi2_ioc.h 02.00.13 02.00.12 |
18 | mpi2_raid.h 02.00.03 02.00.03 | 18 | mpi2_raid.h 02.00.04 02.00.04 |
19 | mpi2_sas.h 02.00.02 02.00.02 | 19 | mpi2_sas.h 02.00.03 02.00.02 |
20 | mpi2_targ.h 02.00.03 02.00.03 | 20 | mpi2_targ.h 02.00.03 02.00.03 |
21 | mpi2_tool.h 02.00.03 02.00.02 | 21 | mpi2_tool.h 02.00.04 02.00.04 |
22 | mpi2_type.h 02.00.00 02.00.00 | 22 | mpi2_type.h 02.00.00 02.00.00 |
23 | mpi2_ra.h 02.00.00 | 23 | mpi2_ra.h 02.00.00 02.00.00 |
24 | mpi2_history.txt 02.00.11 02.00.12 | 24 | mpi2_hbd.h 02.00.00 |
25 | mpi2_history.txt 02.00.14 02.00.13 | ||
25 | 26 | ||
26 | 27 | ||
27 | * Date Version Description | 28 | * Date Version Description |
@@ -65,6 +66,11 @@ mpi2.h | |||
65 | * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those | 66 | * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those |
66 | * bytes reserved. | 67 | * bytes reserved. |
67 | * Added RAID Accelerator functionality. | 68 | * Added RAID Accelerator functionality. |
69 | * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. | ||
70 | * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. | ||
71 | * Added MSI-x index mask and shift for Reply Post Host | ||
72 | * Index register. | ||
73 | * Added function code for Host Based Discovery Action. | ||
68 | * -------------------------------------------------------------------------- | 74 | * -------------------------------------------------------------------------- |
69 | 75 | ||
70 | mpi2_cnfg.h | 76 | mpi2_cnfg.h |
@@ -155,6 +161,15 @@ mpi2_cnfg.h | |||
155 | * Added expander reduced functionality data to SAS | 161 | * Added expander reduced functionality data to SAS |
156 | * Expander Page 0. | 162 | * Expander Page 0. |
157 | * Added SAS PHY Page 2 and SAS PHY Page 3. | 163 | * Added SAS PHY Page 2 and SAS PHY Page 3. |
164 | * 07-30-09 02.00.12 Added IO Unit Page 7. | ||
165 | * Added new device ids. | ||
166 | * Added SAS IO Unit Page 5. | ||
167 | * Added partial and slumber power management capable flags | ||
168 | * to SAS Device Page 0 Flags field. | ||
169 | * Added PhyInfo defines for power condition. | ||
170 | * Added Ethernet configuration pages. | ||
171 | * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. | ||
172 | * Added SAS PHY Page 4 structure and defines. | ||
158 | * -------------------------------------------------------------------------- | 173 | * -------------------------------------------------------------------------- |
159 | 174 | ||
160 | mpi2_init.h | 175 | mpi2_init.h |
@@ -172,6 +187,10 @@ mpi2_init.h | |||
172 | * Query Asynchronous Event. | 187 | * Query Asynchronous Event. |
173 | * Defined two new bits in the SlotStatus field of the SCSI | 188 | * Defined two new bits in the SlotStatus field of the SCSI |
174 | * Enclosure Processor Request and Reply. | 189 | * Enclosure Processor Request and Reply. |
190 | * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for | ||
191 | * both SCSI IO Error Reply and SCSI Task Management Reply. | ||
192 | * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. | ||
193 | * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. | ||
175 | * -------------------------------------------------------------------------- | 194 | * -------------------------------------------------------------------------- |
176 | 195 | ||
177 | mpi2_ioc.h | 196 | mpi2_ioc.h |
@@ -246,6 +265,20 @@ mpi2_ioc.h | |||
246 | * Added two new reason codes for SAS Device Status Change | 265 | * Added two new reason codes for SAS Device Status Change |
247 | * Event. | 266 | * Event. |
248 | * Added new event: SAS PHY Counter. | 267 | * Added new event: SAS PHY Counter. |
268 | * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. | ||
269 | * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. | ||
270 | * Added new product id family for 2208. | ||
271 | * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. | ||
272 | * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. | ||
273 | * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. | ||
274 | * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. | ||
275 | * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. | ||
276 | * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. | ||
277 | * Added Host Based Discovery Phy Event data. | ||
278 | * Added defines for ProductID Product field | ||
279 | * (MPI2_FW_HEADER_PID_). | ||
280 | * Modified values for SAS ProductID Family | ||
281 | * (MPI2_FW_HEADER_PID_FAMILY_). | ||
249 | * -------------------------------------------------------------------------- | 282 | * -------------------------------------------------------------------------- |
250 | 283 | ||
251 | mpi2_raid.h | 284 | mpi2_raid.h |
@@ -256,6 +289,8 @@ mpi2_raid.h | |||
256 | * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that | 289 | * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that |
257 | * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT | 290 | * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT |
258 | * can be sized by the build environment. | 291 | * can be sized by the build environment. |
292 | * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of | ||
293 | * VolumeCreationFlags and marked the old one as obsolete. | ||
259 | * -------------------------------------------------------------------------- | 294 | * -------------------------------------------------------------------------- |
260 | 295 | ||
261 | mpi2_sas.h | 296 | mpi2_sas.h |
@@ -264,6 +299,8 @@ mpi2_sas.h | |||
264 | * Control Request. | 299 | * Control Request. |
265 | * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control | 300 | * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control |
266 | * Request. | 301 | * Request. |
302 | * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST | ||
303 | * to MPI2_SGE_IO_UNION since it supports chained SGLs. | ||
267 | * -------------------------------------------------------------------------- | 304 | * -------------------------------------------------------------------------- |
268 | 305 | ||
269 | mpi2_targ.h | 306 | mpi2_targ.h |
@@ -283,6 +320,10 @@ mpi2_tool.h | |||
283 | * structures and defines. | 320 | * structures and defines. |
284 | * 02-29-08 02.00.02 Modified various names to make them 32-character unique. | 321 | * 02-29-08 02.00.02 Modified various names to make them 32-character unique. |
285 | * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. | 322 | * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. |
323 | * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request | ||
324 | * and reply messages. | ||
325 | * Added MPI2_DIAG_BUF_TYPE_EXTENDED. | ||
326 | * Incremented MPI2_DIAG_BUF_TYPE_COUNT. | ||
286 | * -------------------------------------------------------------------------- | 327 | * -------------------------------------------------------------------------- |
287 | 328 | ||
288 | mpi2_type.h | 329 | mpi2_type.h |
@@ -293,20 +334,26 @@ mpi2_ra.h | |||
293 | * 05-06-09 02.00.00 Initial version. | 334 | * 05-06-09 02.00.00 Initial version. |
294 | * -------------------------------------------------------------------------- | 335 | * -------------------------------------------------------------------------- |
295 | 336 | ||
337 | mpi2_hbd.h | ||
338 | * 10-28-09 02.00.00 Initial version. | ||
339 | * -------------------------------------------------------------------------- | ||
340 | |||
341 | |||
296 | mpi2_history.txt Parts list history | 342 | mpi2_history.txt Parts list history |
297 | 343 | ||
298 | Filename 02.00.12 | 344 | Filename 02.00.14 02.00.13 02.00.12 |
299 | ---------- -------- | 345 | ---------- -------- -------- -------- |
300 | mpi2.h 02.00.12 | 346 | mpi2.h 02.00.14 02.00.13 02.00.12 |
301 | mpi2_cnfg.h 02.00.11 | 347 | mpi2_cnfg.h 02.00.13 02.00.12 02.00.11 |
302 | mpi2_init.h 02.00.07 | 348 | mpi2_init.h 02.00.08 02.00.07 02.00.07 |
303 | mpi2_ioc.h 02.00.11 | 349 | mpi2_ioc.h 02.00.13 02.00.12 02.00.11 |
304 | mpi2_raid.h 02.00.03 | 350 | mpi2_raid.h 02.00.04 02.00.04 02.00.03 |
305 | mpi2_sas.h 02.00.02 | 351 | mpi2_sas.h 02.00.03 02.00.02 02.00.02 |
306 | mpi2_targ.h 02.00.03 | 352 | mpi2_targ.h 02.00.03 02.00.03 02.00.03 |
307 | mpi2_tool.h 02.00.03 | 353 | mpi2_tool.h 02.00.04 02.00.04 02.00.03 |
308 | mpi2_type.h 02.00.00 | 354 | mpi2_type.h 02.00.00 02.00.00 02.00.00 |
309 | mpi2_ra.h 02.00.00 | 355 | mpi2_ra.h 02.00.00 02.00.00 02.00.00 |
356 | mpi2_hbd.h 02.00.00 | ||
310 | 357 | ||
311 | Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 | 358 | Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 |
312 | ---------- -------- -------- -------- -------- -------- -------- | 359 | ---------- -------- -------- -------- -------- -------- -------- |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h index 563e56d2e945..6541945e97c3 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI SCSI initiator mode messages and structures | 6 | * Title: MPI SCSI initiator mode messages and structures |
7 | * Creation Date: June 23, 2006 | 7 | * Creation Date: June 23, 2006 |
8 | * | 8 | * |
9 | * mpi2_init.h Version: 02.00.07 | 9 | * mpi2_init.h Version: 02.00.08 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -27,6 +27,10 @@ | |||
27 | * Query Asynchronous Event. | 27 | * Query Asynchronous Event. |
28 | * Defined two new bits in the SlotStatus field of the SCSI | 28 | * Defined two new bits in the SlotStatus field of the SCSI |
29 | * Enclosure Processor Request and Reply. | 29 | * Enclosure Processor Request and Reply. |
30 | * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for | ||
31 | * both SCSI IO Error Reply and SCSI Task Management Reply. | ||
32 | * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. | ||
33 | * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. | ||
30 | * -------------------------------------------------------------------------- | 34 | * -------------------------------------------------------------------------- |
31 | */ | 35 | */ |
32 | 36 | ||
@@ -254,6 +258,11 @@ typedef struct _MPI2_SCSI_IO_REPLY | |||
254 | #define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02) | 258 | #define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02) |
255 | #define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01) | 259 | #define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01) |
256 | 260 | ||
261 | /* masks and shifts for the ResponseInfo field */ | ||
262 | |||
263 | #define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF) | ||
264 | #define MPI2_SCSI_RI_SHIFT_REASONCODE (0) | ||
265 | |||
257 | #define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF) | 266 | #define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF) |
258 | 267 | ||
259 | 268 | ||
@@ -327,6 +336,7 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY | |||
327 | U16 IOCStatus; /* 0x0E */ | 336 | U16 IOCStatus; /* 0x0E */ |
328 | U32 IOCLogInfo; /* 0x10 */ | 337 | U32 IOCLogInfo; /* 0x10 */ |
329 | U32 TerminationCount; /* 0x14 */ | 338 | U32 TerminationCount; /* 0x14 */ |
339 | U32 ResponseInfo; /* 0x18 */ | ||
330 | } MPI2_SCSI_TASK_MANAGE_REPLY, | 340 | } MPI2_SCSI_TASK_MANAGE_REPLY, |
331 | MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY, | 341 | MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY, |
332 | Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t; | 342 | Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t; |
@@ -339,8 +349,20 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY | |||
339 | #define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) | 349 | #define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) |
340 | #define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) | 350 | #define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) |
341 | #define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) | 351 | #define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) |
352 | #define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A) | ||
342 | #define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) | 353 | #define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) |
343 | 354 | ||
355 | /* masks and shifts for the ResponseInfo field */ | ||
356 | |||
357 | #define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF) | ||
358 | #define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0) | ||
359 | #define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00) | ||
360 | #define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8) | ||
361 | #define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000) | ||
362 | #define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16) | ||
363 | #define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000) | ||
364 | #define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24) | ||
365 | |||
344 | 366 | ||
345 | /**************************************************************************** | 367 | /**************************************************************************** |
346 | * SCSI Enclosure Processor messages | 368 | * SCSI Enclosure Processor messages |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h index ea51ce868690..754938422f6a 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages | 6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages |
7 | * Creation Date: October 11, 2006 | 7 | * Creation Date: October 11, 2006 |
8 | * | 8 | * |
9 | * mpi2_ioc.h Version: 02.00.12 | 9 | * mpi2_ioc.h Version: 02.00.13 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -87,6 +87,17 @@ | |||
87 | * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. | 87 | * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. |
88 | * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. | 88 | * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. |
89 | * Added new product id family for 2208. | 89 | * Added new product id family for 2208. |
90 | * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. | ||
91 | * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. | ||
92 | * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. | ||
93 | * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. | ||
94 | * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. | ||
95 | * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. | ||
96 | * Added Host Based Discovery Phy Event data. | ||
97 | * Added defines for ProductID Product field | ||
98 | * (MPI2_FW_HEADER_PID_). | ||
99 | * Modified values for SAS ProductID Family | ||
100 | * (MPI2_FW_HEADER_PID_FAMILY_). | ||
90 | * -------------------------------------------------------------------------- | 101 | * -------------------------------------------------------------------------- |
91 | */ | 102 | */ |
92 | 103 | ||
@@ -119,8 +130,10 @@ typedef struct _MPI2_IOC_INIT_REQUEST | |||
119 | U16 MsgVersion; /* 0x0C */ | 130 | U16 MsgVersion; /* 0x0C */ |
120 | U16 HeaderVersion; /* 0x0E */ | 131 | U16 HeaderVersion; /* 0x0E */ |
121 | U32 Reserved5; /* 0x10 */ | 132 | U32 Reserved5; /* 0x10 */ |
122 | U32 Reserved6; /* 0x14 */ | 133 | U16 Reserved6; /* 0x14 */ |
123 | U16 Reserved7; /* 0x18 */ | 134 | U8 Reserved7; /* 0x16 */ |
135 | U8 HostMSIxVectors; /* 0x17 */ | ||
136 | U16 Reserved8; /* 0x18 */ | ||
124 | U16 SystemRequestFrameSize; /* 0x1A */ | 137 | U16 SystemRequestFrameSize; /* 0x1A */ |
125 | U16 ReplyDescriptorPostQueueDepth; /* 0x1C */ | 138 | U16 ReplyDescriptorPostQueueDepth; /* 0x1C */ |
126 | U16 ReplyFreeQueueDepth; /* 0x1E */ | 139 | U16 ReplyFreeQueueDepth; /* 0x1E */ |
@@ -215,7 +228,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY | |||
215 | U8 MaxChainDepth; /* 0x14 */ | 228 | U8 MaxChainDepth; /* 0x14 */ |
216 | U8 WhoInit; /* 0x15 */ | 229 | U8 WhoInit; /* 0x15 */ |
217 | U8 NumberOfPorts; /* 0x16 */ | 230 | U8 NumberOfPorts; /* 0x16 */ |
218 | U8 Reserved2; /* 0x17 */ | 231 | U8 MaxMSIxVectors; /* 0x17 */ |
219 | U16 RequestCredit; /* 0x18 */ | 232 | U16 RequestCredit; /* 0x18 */ |
220 | U16 ProductID; /* 0x1A */ | 233 | U16 ProductID; /* 0x1A */ |
221 | U32 IOCCapabilities; /* 0x1C */ | 234 | U32 IOCCapabilities; /* 0x1C */ |
@@ -233,7 +246,8 @@ typedef struct _MPI2_IOC_FACTS_REPLY | |||
233 | U8 MaxVolumes; /* 0x37 */ | 246 | U8 MaxVolumes; /* 0x37 */ |
234 | U16 MaxDevHandle; /* 0x38 */ | 247 | U16 MaxDevHandle; /* 0x38 */ |
235 | U16 MaxPersistentEntries; /* 0x3A */ | 248 | U16 MaxPersistentEntries; /* 0x3A */ |
236 | U32 Reserved4; /* 0x3C */ | 249 | U16 MinDevHandle; /* 0x3C */ |
250 | U16 Reserved4; /* 0x3E */ | ||
237 | } MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY, | 251 | } MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY, |
238 | Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t; | 252 | Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t; |
239 | 253 | ||
@@ -269,6 +283,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY | |||
269 | /* ProductID field uses MPI2_FW_HEADER_PID_ */ | 283 | /* ProductID field uses MPI2_FW_HEADER_PID_ */ |
270 | 284 | ||
271 | /* IOCCapabilities */ | 285 | /* IOCCapabilities */ |
286 | #define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000) | ||
272 | #define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) | 287 | #define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) |
273 | #define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000) | 288 | #define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000) |
274 | #define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) | 289 | #define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) |
@@ -453,6 +468,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY | |||
453 | #define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) | 468 | #define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) |
454 | #define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) | 469 | #define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) |
455 | #define MPI2_EVENT_GPIO_INTERRUPT (0x0023) | 470 | #define MPI2_EVENT_GPIO_INTERRUPT (0x0023) |
471 | #define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024) | ||
456 | 472 | ||
457 | 473 | ||
458 | /* Log Entry Added Event data */ | 474 | /* Log Entry Added Event data */ |
@@ -793,6 +809,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST | |||
793 | MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t; | 809 | MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t; |
794 | 810 | ||
795 | /* values for the ExpStatus field */ | 811 | /* values for the ExpStatus field */ |
812 | #define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00) | ||
796 | #define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01) | 813 | #define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01) |
797 | #define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) | 814 | #define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) |
798 | #define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03) | 815 | #define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03) |
@@ -878,6 +895,44 @@ typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER { | |||
878 | * */ | 895 | * */ |
879 | 896 | ||
880 | 897 | ||
898 | /* Host Based Discovery Phy Event data */ | ||
899 | |||
900 | typedef struct _MPI2_EVENT_HBD_PHY_SAS { | ||
901 | U8 Flags; /* 0x00 */ | ||
902 | U8 NegotiatedLinkRate; /* 0x01 */ | ||
903 | U8 PhyNum; /* 0x02 */ | ||
904 | U8 PhysicalPort; /* 0x03 */ | ||
905 | U32 Reserved1; /* 0x04 */ | ||
906 | U8 InitialFrame[28]; /* 0x08 */ | ||
907 | } MPI2_EVENT_HBD_PHY_SAS, MPI2_POINTER PTR_MPI2_EVENT_HBD_PHY_SAS, | ||
908 | Mpi2EventHbdPhySas_t, MPI2_POINTER pMpi2EventHbdPhySas_t; | ||
909 | |||
910 | /* values for the Flags field */ | ||
911 | #define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02) | ||
912 | #define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01) | ||
913 | |||
914 | /* use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h for | ||
915 | * the NegotiatedLinkRate field */ | ||
916 | |||
917 | typedef union _MPI2_EVENT_HBD_DESCRIPTOR { | ||
918 | MPI2_EVENT_HBD_PHY_SAS Sas; | ||
919 | } MPI2_EVENT_HBD_DESCRIPTOR, MPI2_POINTER PTR_MPI2_EVENT_HBD_DESCRIPTOR, | ||
920 | Mpi2EventHbdDescriptor_t, MPI2_POINTER pMpi2EventHbdDescriptor_t; | ||
921 | |||
922 | typedef struct _MPI2_EVENT_DATA_HBD_PHY { | ||
923 | U8 DescriptorType; /* 0x00 */ | ||
924 | U8 Reserved1; /* 0x01 */ | ||
925 | U16 Reserved2; /* 0x02 */ | ||
926 | U32 Reserved3; /* 0x04 */ | ||
927 | MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /* 0x08 */ | ||
928 | } MPI2_EVENT_DATA_HBD_PHY, MPI2_POINTER PTR_MPI2_EVENT_DATA_HBD_PHY, | ||
929 | Mpi2EventDataHbdPhy_t, MPI2_POINTER pMpi2EventDataMpi2EventDataHbdPhy_t; | ||
930 | |||
931 | /* values for the DescriptorType field */ | ||
932 | #define MPI2_EVENT_HBD_DT_SAS (0x01) | ||
933 | |||
934 | |||
935 | |||
881 | /**************************************************************************** | 936 | /**************************************************************************** |
882 | * EventAck message | 937 | * EventAck message |
883 | ****************************************************************************/ | 938 | ****************************************************************************/ |
@@ -1126,13 +1181,17 @@ typedef struct _MPI2_FW_IMAGE_HEADER | |||
1126 | #define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000) | 1181 | #define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000) |
1127 | #define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000) | 1182 | #define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000) |
1128 | 1183 | ||
1129 | #define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00) | 1184 | #define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00) |
1130 | #define MPI2_FW_HEADER_PID_PROD_A (0x0000) | 1185 | #define MPI2_FW_HEADER_PID_PROD_A (0x0000) |
1186 | #define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00) | ||
1187 | #define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200) | ||
1188 | #define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700) | ||
1189 | |||
1131 | 1190 | ||
1132 | #define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) | 1191 | #define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) |
1133 | /* SAS */ | 1192 | /* SAS */ |
1134 | #define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0010) | 1193 | #define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013) |
1135 | #define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0011) | 1194 | #define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014) |
1136 | 1195 | ||
1137 | /* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ | 1196 | /* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ |
1138 | 1197 | ||
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h index 8a42b136cf53..2d8aeed51392 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI Serial Attached SCSI structures and definitions | 6 | * Title: MPI Serial Attached SCSI structures and definitions |
7 | * Creation Date: February 9, 2007 | 7 | * Creation Date: February 9, 2007 |
8 | * | 8 | * |
9 | * mpi2.h Version: 02.00.02 | 9 | * mpi2.h Version: 02.00.03 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -18,6 +18,8 @@ | |||
18 | * Control Request. | 18 | * Control Request. |
19 | * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control | 19 | * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control |
20 | * Request. | 20 | * Request. |
21 | * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST | ||
22 | * to MPI2_SGE_IO_UNION since it supports chained SGLs. | ||
21 | * -------------------------------------------------------------------------- | 23 | * -------------------------------------------------------------------------- |
22 | */ | 24 | */ |
23 | 25 | ||
@@ -160,7 +162,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST | |||
160 | U32 Reserved4; /* 0x14 */ | 162 | U32 Reserved4; /* 0x14 */ |
161 | U32 DataLength; /* 0x18 */ | 163 | U32 DataLength; /* 0x18 */ |
162 | U8 CommandFIS[20]; /* 0x1C */ | 164 | U8 CommandFIS[20]; /* 0x1C */ |
163 | MPI2_SIMPLE_SGE_UNION SGL; /* 0x20 */ | 165 | MPI2_SGE_IO_UNION SGL; /* 0x20 */ |
164 | } MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST, | 166 | } MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST, |
165 | Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t; | 167 | Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t; |
166 | 168 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 89d02401b9ec..88e6eebc3159 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -107,8 +107,7 @@ _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp) | |||
107 | if (ret) | 107 | if (ret) |
108 | return ret; | 108 | return ret; |
109 | 109 | ||
110 | printk(KERN_INFO "setting logging_level(0x%08x)\n", | 110 | printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug); |
111 | mpt2sas_fwfault_debug); | ||
112 | list_for_each_entry(ioc, &mpt2sas_ioc_list, list) | 111 | list_for_each_entry(ioc, &mpt2sas_ioc_list, list) |
113 | ioc->fwfault_debug = mpt2sas_fwfault_debug; | 112 | ioc->fwfault_debug = mpt2sas_fwfault_debug; |
114 | return 0; | 113 | return 0; |
@@ -1222,6 +1221,8 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) | |||
1222 | u32 memap_sz; | 1221 | u32 memap_sz; |
1223 | u32 pio_sz; | 1222 | u32 pio_sz; |
1224 | int i, r = 0; | 1223 | int i, r = 0; |
1224 | u64 pio_chip = 0; | ||
1225 | u64 chip_phys = 0; | ||
1225 | 1226 | ||
1226 | dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", | 1227 | dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", |
1227 | ioc->name, __func__)); | 1228 | ioc->name, __func__)); |
@@ -1255,12 +1256,13 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) | |||
1255 | if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) { | 1256 | if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) { |
1256 | if (pio_sz) | 1257 | if (pio_sz) |
1257 | continue; | 1258 | continue; |
1258 | ioc->pio_chip = pci_resource_start(pdev, i); | 1259 | pio_chip = (u64)pci_resource_start(pdev, i); |
1259 | pio_sz = pci_resource_len(pdev, i); | 1260 | pio_sz = pci_resource_len(pdev, i); |
1260 | } else { | 1261 | } else { |
1261 | if (memap_sz) | 1262 | if (memap_sz) |
1262 | continue; | 1263 | continue; |
1263 | ioc->chip_phys = pci_resource_start(pdev, i); | 1264 | ioc->chip_phys = pci_resource_start(pdev, i); |
1265 | chip_phys = (u64)ioc->chip_phys; | ||
1264 | memap_sz = pci_resource_len(pdev, i); | 1266 | memap_sz = pci_resource_len(pdev, i); |
1265 | ioc->chip = ioremap(ioc->chip_phys, memap_sz); | 1267 | ioc->chip = ioremap(ioc->chip_phys, memap_sz); |
1266 | if (ioc->chip == NULL) { | 1268 | if (ioc->chip == NULL) { |
@@ -1280,10 +1282,10 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) | |||
1280 | printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n", | 1282 | printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n", |
1281 | ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : | 1283 | ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : |
1282 | "IO-APIC enabled"), ioc->pci_irq); | 1284 | "IO-APIC enabled"), ioc->pci_irq); |
1283 | printk(MPT2SAS_INFO_FMT "iomem(0x%lx), mapped(0x%p), size(%d)\n", | 1285 | printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", |
1284 | ioc->name, ioc->chip_phys, ioc->chip, memap_sz); | 1286 | ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); |
1285 | printk(MPT2SAS_INFO_FMT "ioport(0x%lx), size(%d)\n", | 1287 | printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n", |
1286 | ioc->name, ioc->pio_chip, pio_sz); | 1288 | ioc->name, (unsigned long long)pio_chip, pio_sz); |
1287 | 1289 | ||
1288 | return 0; | 1290 | return 0; |
1289 | 1291 | ||
@@ -3573,6 +3575,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
3573 | 3575 | ||
3574 | init_waitqueue_head(&ioc->reset_wq); | 3576 | init_waitqueue_head(&ioc->reset_wq); |
3575 | 3577 | ||
3578 | ioc->fwfault_debug = mpt2sas_fwfault_debug; | ||
3579 | |||
3576 | /* base internal command bits */ | 3580 | /* base internal command bits */ |
3577 | mutex_init(&ioc->base_cmds.mutex); | 3581 | mutex_init(&ioc->base_cmds.mutex); |
3578 | ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); | 3582 | ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index bb4f14656afa..e18b0544c38f 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
@@ -69,10 +69,10 @@ | |||
69 | #define MPT2SAS_DRIVER_NAME "mpt2sas" | 69 | #define MPT2SAS_DRIVER_NAME "mpt2sas" |
70 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" | 70 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" |
71 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" | 71 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" |
72 | #define MPT2SAS_DRIVER_VERSION "03.100.03.00" | 72 | #define MPT2SAS_DRIVER_VERSION "04.100.01.00" |
73 | #define MPT2SAS_MAJOR_VERSION 03 | 73 | #define MPT2SAS_MAJOR_VERSION 04 |
74 | #define MPT2SAS_MINOR_VERSION 100 | 74 | #define MPT2SAS_MINOR_VERSION 100 |
75 | #define MPT2SAS_BUILD_VERSION 03 | 75 | #define MPT2SAS_BUILD_VERSION 01 |
76 | #define MPT2SAS_RELEASE_VERSION 00 | 76 | #define MPT2SAS_RELEASE_VERSION 00 |
77 | 77 | ||
78 | /* | 78 | /* |
@@ -323,6 +323,7 @@ struct _sas_device { | |||
323 | * @device_info: bitfield provides detailed info about the hidden components | 323 | * @device_info: bitfield provides detailed info about the hidden components |
324 | * @num_pds: number of hidden raid components | 324 | * @num_pds: number of hidden raid components |
325 | * @responding: used in _scsih_raid_device_mark_responding | 325 | * @responding: used in _scsih_raid_device_mark_responding |
326 | * @percent_complete: resync percent complete | ||
326 | */ | 327 | */ |
327 | struct _raid_device { | 328 | struct _raid_device { |
328 | struct list_head list; | 329 | struct list_head list; |
@@ -336,6 +337,7 @@ struct _raid_device { | |||
336 | u32 device_info; | 337 | u32 device_info; |
337 | u8 num_pds; | 338 | u8 num_pds; |
338 | u8 responding; | 339 | u8 responding; |
340 | u8 percent_complete; | ||
339 | }; | 341 | }; |
340 | 342 | ||
341 | /** | 343 | /** |
@@ -464,7 +466,6 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); | |||
464 | * @pdev: pci pdev object | 466 | * @pdev: pci pdev object |
465 | * @chip: memory mapped register space | 467 | * @chip: memory mapped register space |
466 | * @chip_phys: physical addrss prior to mapping | 468 | * @chip_phys: physical addrss prior to mapping |
467 | * @pio_chip: I/O mapped register space | ||
468 | * @logging_level: see mpt2sas_debug.h | 469 | * @logging_level: see mpt2sas_debug.h |
469 | * @fwfault_debug: debuging FW timeouts | 470 | * @fwfault_debug: debuging FW timeouts |
470 | * @ir_firmware: IR firmware present | 471 | * @ir_firmware: IR firmware present |
@@ -587,8 +588,7 @@ struct MPT2SAS_ADAPTER { | |||
587 | char tmp_string[MPT_STRING_LENGTH]; | 588 | char tmp_string[MPT_STRING_LENGTH]; |
588 | struct pci_dev *pdev; | 589 | struct pci_dev *pdev; |
589 | Mpi2SystemInterfaceRegs_t __iomem *chip; | 590 | Mpi2SystemInterfaceRegs_t __iomem *chip; |
590 | unsigned long chip_phys; | 591 | resource_size_t chip_phys; |
591 | unsigned long pio_chip; | ||
592 | int logging_level; | 592 | int logging_level; |
593 | int fwfault_debug; | 593 | int fwfault_debug; |
594 | u8 ir_firmware; | 594 | u8 ir_firmware; |
@@ -853,6 +853,8 @@ int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
853 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); | 853 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); |
854 | int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 854 | int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
855 | *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz); | 855 | *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz); |
856 | int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, | ||
857 | Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz); | ||
856 | int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 858 | int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
857 | *mpi_reply, Mpi2IOCPage8_t *config_page); | 859 | *mpi_reply, Mpi2IOCPage8_t *config_page); |
858 | int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 860 | int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index 594a389c6526..411c27d7f787 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c | |||
@@ -324,7 +324,9 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t | |||
324 | if (r != 0) | 324 | if (r != 0) |
325 | goto out; | 325 | goto out; |
326 | if (mpi_request->Action == | 326 | if (mpi_request->Action == |
327 | MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT) { | 327 | MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT || |
328 | mpi_request->Action == | ||
329 | MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) { | ||
328 | ioc->base_add_sg_single(&mpi_request->PageBufferSGE, | 330 | ioc->base_add_sg_single(&mpi_request->PageBufferSGE, |
329 | MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz, | 331 | MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz, |
330 | mem.page_dma); | 332 | mem.page_dma); |
@@ -882,7 +884,7 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
882 | } | 884 | } |
883 | 885 | ||
884 | /** | 886 | /** |
885 | * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 0 | 887 | * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1 |
886 | * @ioc: per adapter object | 888 | * @ioc: per adapter object |
887 | * @mpi_reply: reply mf payload returned from firmware | 889 | * @mpi_reply: reply mf payload returned from firmware |
888 | * @config_page: contents of the config page | 890 | * @config_page: contents of the config page |
@@ -907,7 +909,7 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
907 | mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; | 909 | mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; |
908 | mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; | 910 | mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; |
909 | mpi_request.Header.PageNumber = 1; | 911 | mpi_request.Header.PageNumber = 1; |
910 | mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; | 912 | mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; |
911 | mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); | 913 | mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); |
912 | r = _config_request(ioc, &mpi_request, mpi_reply, | 914 | r = _config_request(ioc, &mpi_request, mpi_reply, |
913 | MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); | 915 | MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); |
@@ -922,6 +924,49 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
922 | } | 924 | } |
923 | 925 | ||
924 | /** | 926 | /** |
927 | * mpt2sas_config_set_sas_iounit_pg1 - send sas iounit page 1 | ||
928 | * @ioc: per adapter object | ||
929 | * @mpi_reply: reply mf payload returned from firmware | ||
930 | * @config_page: contents of the config page | ||
931 | * @sz: size of buffer passed in config_page | ||
932 | * Context: sleep. | ||
933 | * | ||
934 | * Calling function should call config_get_number_hba_phys prior to | ||
935 | * this function, so enough memory is allocated for config_page. | ||
936 | * | ||
937 | * Returns 0 for success, non-zero for failure. | ||
938 | */ | ||
939 | int | ||
940 | mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | ||
941 | *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz) | ||
942 | { | ||
943 | Mpi2ConfigRequest_t mpi_request; | ||
944 | int r; | ||
945 | |||
946 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | ||
947 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | ||
948 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; | ||
949 | mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; | ||
950 | mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; | ||
951 | mpi_request.Header.PageNumber = 1; | ||
952 | mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; | ||
953 | mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); | ||
954 | r = _config_request(ioc, &mpi_request, mpi_reply, | ||
955 | MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); | ||
956 | if (r) | ||
957 | goto out; | ||
958 | |||
959 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; | ||
960 | _config_request(ioc, &mpi_request, mpi_reply, | ||
961 | MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); | ||
962 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; | ||
963 | r = _config_request(ioc, &mpi_request, mpi_reply, | ||
964 | MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); | ||
965 | out: | ||
966 | return r; | ||
967 | } | ||
968 | |||
969 | /** | ||
925 | * mpt2sas_config_get_expander_pg0 - obtain expander page 0 | 970 | * mpt2sas_config_get_expander_pg0 - obtain expander page 0 |
926 | * @ioc: per adapter object | 971 | * @ioc: per adapter object |
927 | * @mpi_reply: reply mf payload returned from firmware | 972 | * @mpi_reply: reply mf payload returned from firmware |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 84a124f8e21f..fa9bf83819d5 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c | |||
@@ -891,6 +891,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, | |||
891 | 891 | ||
892 | issue_host_reset: | 892 | issue_host_reset: |
893 | if (issue_reset) { | 893 | if (issue_reset) { |
894 | ret = -ENODATA; | ||
894 | if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || | 895 | if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || |
895 | mpi_request->Function == | 896 | mpi_request->Function == |
896 | MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { | 897 | MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { |
@@ -2202,14 +2203,10 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg) | |||
2202 | karg.data_out_size = karg32.data_out_size; | 2203 | karg.data_out_size = karg32.data_out_size; |
2203 | karg.max_sense_bytes = karg32.max_sense_bytes; | 2204 | karg.max_sense_bytes = karg32.max_sense_bytes; |
2204 | karg.data_sge_offset = karg32.data_sge_offset; | 2205 | karg.data_sge_offset = karg32.data_sge_offset; |
2205 | memcpy(&karg.reply_frame_buf_ptr, &karg32.reply_frame_buf_ptr, | 2206 | karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); |
2206 | sizeof(uint32_t)); | 2207 | karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); |
2207 | memcpy(&karg.data_in_buf_ptr, &karg32.data_in_buf_ptr, | 2208 | karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); |
2208 | sizeof(uint32_t)); | 2209 | karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); |
2209 | memcpy(&karg.data_out_buf_ptr, &karg32.data_out_buf_ptr, | ||
2210 | sizeof(uint32_t)); | ||
2211 | memcpy(&karg.sense_data_ptr, &karg32.sense_data_ptr, | ||
2212 | sizeof(uint32_t)); | ||
2213 | state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; | 2210 | state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; |
2214 | return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state); | 2211 | return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state); |
2215 | } | 2212 | } |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index efabea1a3ce4..c7ec3f174782 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/delay.h> | 52 | #include <linux/delay.h> |
53 | #include <linux/pci.h> | 53 | #include <linux/pci.h> |
54 | #include <linux/interrupt.h> | 54 | #include <linux/interrupt.h> |
55 | #include <linux/raid_class.h> | ||
55 | 56 | ||
56 | #include "mpt2sas_base.h" | 57 | #include "mpt2sas_base.h" |
57 | 58 | ||
@@ -133,6 +134,9 @@ struct fw_event_work { | |||
133 | void *event_data; | 134 | void *event_data; |
134 | }; | 135 | }; |
135 | 136 | ||
137 | /* raid transport support */ | ||
138 | static struct raid_template *mpt2sas_raid_template; | ||
139 | |||
136 | /** | 140 | /** |
137 | * struct _scsi_io_transfer - scsi io transfer | 141 | * struct _scsi_io_transfer - scsi io transfer |
138 | * @handle: sas device handle (assigned by firmware) | 142 | * @handle: sas device handle (assigned by firmware) |
@@ -1305,7 +1309,6 @@ _scsih_slave_alloc(struct scsi_device *sdev) | |||
1305 | struct MPT2SAS_DEVICE *sas_device_priv_data; | 1309 | struct MPT2SAS_DEVICE *sas_device_priv_data; |
1306 | struct scsi_target *starget; | 1310 | struct scsi_target *starget; |
1307 | struct _raid_device *raid_device; | 1311 | struct _raid_device *raid_device; |
1308 | struct _sas_device *sas_device; | ||
1309 | unsigned long flags; | 1312 | unsigned long flags; |
1310 | 1313 | ||
1311 | sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); | 1314 | sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); |
@@ -1332,21 +1335,8 @@ _scsih_slave_alloc(struct scsi_device *sdev) | |||
1332 | if (raid_device) | 1335 | if (raid_device) |
1333 | raid_device->sdev = sdev; /* raid is single lun */ | 1336 | raid_device->sdev = sdev; /* raid is single lun */ |
1334 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); | 1337 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); |
1335 | } else { | ||
1336 | /* set TLR bit for SSP devices */ | ||
1337 | if (!(ioc->facts.IOCCapabilities & | ||
1338 | MPI2_IOCFACTS_CAPABILITY_TLR)) | ||
1339 | goto out; | ||
1340 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | ||
1341 | sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, | ||
1342 | sas_device_priv_data->sas_target->sas_address); | ||
1343 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
1344 | if (sas_device && sas_device->device_info & | ||
1345 | MPI2_SAS_DEVICE_INFO_SSP_TARGET) | ||
1346 | sas_device_priv_data->flags |= MPT_DEVICE_TLR_ON; | ||
1347 | } | 1338 | } |
1348 | 1339 | ||
1349 | out: | ||
1350 | return 0; | 1340 | return 0; |
1351 | } | 1341 | } |
1352 | 1342 | ||
@@ -1419,6 +1409,140 @@ _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, | |||
1419 | } | 1409 | } |
1420 | 1410 | ||
1421 | /** | 1411 | /** |
1412 | * _scsih_is_raid - return boolean indicating device is raid volume | ||
1413 | * @dev the device struct object | ||
1414 | */ | ||
1415 | static int | ||
1416 | _scsih_is_raid(struct device *dev) | ||
1417 | { | ||
1418 | struct scsi_device *sdev = to_scsi_device(dev); | ||
1419 | |||
1420 | return (sdev->channel == RAID_CHANNEL) ? 1 : 0; | ||
1421 | } | ||
1422 | |||
1423 | /** | ||
1424 | * _scsih_get_resync - get raid volume resync percent complete | ||
1425 | * @dev the device struct object | ||
1426 | */ | ||
1427 | static void | ||
1428 | _scsih_get_resync(struct device *dev) | ||
1429 | { | ||
1430 | struct scsi_device *sdev = to_scsi_device(dev); | ||
1431 | struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host); | ||
1432 | static struct _raid_device *raid_device; | ||
1433 | unsigned long flags; | ||
1434 | Mpi2RaidVolPage0_t vol_pg0; | ||
1435 | Mpi2ConfigReply_t mpi_reply; | ||
1436 | u32 volume_status_flags; | ||
1437 | u8 percent_complete = 0; | ||
1438 | |||
1439 | spin_lock_irqsave(&ioc->raid_device_lock, flags); | ||
1440 | raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, | ||
1441 | sdev->channel); | ||
1442 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); | ||
1443 | |||
1444 | if (!raid_device) | ||
1445 | goto out; | ||
1446 | |||
1447 | if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, | ||
1448 | MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, | ||
1449 | sizeof(Mpi2RaidVolPage0_t))) { | ||
1450 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
1451 | ioc->name, __FILE__, __LINE__, __func__); | ||
1452 | goto out; | ||
1453 | } | ||
1454 | |||
1455 | volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); | ||
1456 | if (volume_status_flags & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) | ||
1457 | percent_complete = raid_device->percent_complete; | ||
1458 | out: | ||
1459 | raid_set_resync(mpt2sas_raid_template, dev, percent_complete); | ||
1460 | } | ||
1461 | |||
1462 | /** | ||
1463 | * _scsih_get_state - get raid volume level | ||
1464 | * @dev the device struct object | ||
1465 | */ | ||
1466 | static void | ||
1467 | _scsih_get_state(struct device *dev) | ||
1468 | { | ||
1469 | struct scsi_device *sdev = to_scsi_device(dev); | ||
1470 | struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host); | ||
1471 | static struct _raid_device *raid_device; | ||
1472 | unsigned long flags; | ||
1473 | Mpi2RaidVolPage0_t vol_pg0; | ||
1474 | Mpi2ConfigReply_t mpi_reply; | ||
1475 | u32 volstate; | ||
1476 | enum raid_state state = RAID_STATE_UNKNOWN; | ||
1477 | |||
1478 | spin_lock_irqsave(&ioc->raid_device_lock, flags); | ||
1479 | raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, | ||
1480 | sdev->channel); | ||
1481 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); | ||
1482 | |||
1483 | if (!raid_device) | ||
1484 | goto out; | ||
1485 | |||
1486 | if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, | ||
1487 | MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, | ||
1488 | sizeof(Mpi2RaidVolPage0_t))) { | ||
1489 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
1490 | ioc->name, __FILE__, __LINE__, __func__); | ||
1491 | goto out; | ||
1492 | } | ||
1493 | |||
1494 | volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); | ||
1495 | if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { | ||
1496 | state = RAID_STATE_RESYNCING; | ||
1497 | goto out; | ||
1498 | } | ||
1499 | |||
1500 | switch (vol_pg0.VolumeState) { | ||
1501 | case MPI2_RAID_VOL_STATE_OPTIMAL: | ||
1502 | case MPI2_RAID_VOL_STATE_ONLINE: | ||
1503 | state = RAID_STATE_ACTIVE; | ||
1504 | break; | ||
1505 | case MPI2_RAID_VOL_STATE_DEGRADED: | ||
1506 | state = RAID_STATE_DEGRADED; | ||
1507 | break; | ||
1508 | case MPI2_RAID_VOL_STATE_FAILED: | ||
1509 | case MPI2_RAID_VOL_STATE_MISSING: | ||
1510 | state = RAID_STATE_OFFLINE; | ||
1511 | break; | ||
1512 | } | ||
1513 | out: | ||
1514 | raid_set_state(mpt2sas_raid_template, dev, state); | ||
1515 | } | ||
1516 | |||
1517 | /** | ||
1518 | * _scsih_set_level - set raid level | ||
1519 | * @sdev: scsi device struct | ||
1520 | * @raid_device: raid_device object | ||
1521 | */ | ||
1522 | static void | ||
1523 | _scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device) | ||
1524 | { | ||
1525 | enum raid_level level = RAID_LEVEL_UNKNOWN; | ||
1526 | |||
1527 | switch (raid_device->volume_type) { | ||
1528 | case MPI2_RAID_VOL_TYPE_RAID0: | ||
1529 | level = RAID_LEVEL_0; | ||
1530 | break; | ||
1531 | case MPI2_RAID_VOL_TYPE_RAID10: | ||
1532 | level = RAID_LEVEL_10; | ||
1533 | break; | ||
1534 | case MPI2_RAID_VOL_TYPE_RAID1E: | ||
1535 | level = RAID_LEVEL_1E; | ||
1536 | break; | ||
1537 | case MPI2_RAID_VOL_TYPE_RAID1: | ||
1538 | level = RAID_LEVEL_1; | ||
1539 | break; | ||
1540 | } | ||
1541 | |||
1542 | raid_set_level(mpt2sas_raid_template, &sdev->sdev_gendev, level); | ||
1543 | } | ||
1544 | |||
1545 | /** | ||
1422 | * _scsih_get_volume_capabilities - volume capabilities | 1546 | * _scsih_get_volume_capabilities - volume capabilities |
1423 | * @ioc: per adapter object | 1547 | * @ioc: per adapter object |
1424 | * @sas_device: the raid_device object | 1548 | * @sas_device: the raid_device object |
@@ -1479,6 +1603,32 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc, | |||
1479 | } | 1603 | } |
1480 | 1604 | ||
1481 | /** | 1605 | /** |
1606 | * _scsih_enable_tlr - setting TLR flags | ||
1607 | * @ioc: per adapter object | ||
1608 | * @sdev: scsi device struct | ||
1609 | * | ||
1610 | * Enabling Transaction Layer Retries for tape devices when | ||
1611 | * vpd page 0x90 is present | ||
1612 | * | ||
1613 | */ | ||
1614 | static void | ||
1615 | _scsih_enable_tlr(struct MPT2SAS_ADAPTER *ioc, struct scsi_device *sdev) | ||
1616 | { | ||
1617 | /* only for TAPE */ | ||
1618 | if (sdev->type != TYPE_TAPE) | ||
1619 | return; | ||
1620 | |||
1621 | if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) | ||
1622 | return; | ||
1623 | |||
1624 | sas_enable_tlr(sdev); | ||
1625 | sdev_printk(KERN_INFO, sdev, "TLR %s\n", | ||
1626 | sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); | ||
1627 | return; | ||
1628 | |||
1629 | } | ||
1630 | |||
1631 | /** | ||
1482 | * _scsih_slave_configure - device configure routine. | 1632 | * _scsih_slave_configure - device configure routine. |
1483 | * @sdev: scsi device struct | 1633 | * @sdev: scsi device struct |
1484 | * | 1634 | * |
@@ -1574,6 +1724,8 @@ _scsih_slave_configure(struct scsi_device *sdev) | |||
1574 | (unsigned long long)raid_device->wwid, | 1724 | (unsigned long long)raid_device->wwid, |
1575 | raid_device->num_pds, ds); | 1725 | raid_device->num_pds, ds); |
1576 | _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); | 1726 | _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); |
1727 | /* raid transport support */ | ||
1728 | _scsih_set_level(sdev, raid_device); | ||
1577 | return 0; | 1729 | return 0; |
1578 | } | 1730 | } |
1579 | 1731 | ||
@@ -1621,8 +1773,10 @@ _scsih_slave_configure(struct scsi_device *sdev) | |||
1621 | 1773 | ||
1622 | _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); | 1774 | _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); |
1623 | 1775 | ||
1624 | if (ssp_target) | 1776 | if (ssp_target) { |
1625 | sas_read_port_mode_page(sdev); | 1777 | sas_read_port_mode_page(sdev); |
1778 | _scsih_enable_tlr(ioc, sdev); | ||
1779 | } | ||
1626 | return 0; | 1780 | return 0; |
1627 | } | 1781 | } |
1628 | 1782 | ||
@@ -2908,8 +3062,9 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) | |||
2908 | 3062 | ||
2909 | } else | 3063 | } else |
2910 | mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; | 3064 | mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; |
2911 | 3065 | /* Make sure Device is not raid volume */ | |
2912 | if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON)) | 3066 | if (!_scsih_is_raid(&scmd->device->sdev_gendev) && |
3067 | sas_is_tlr_enabled(scmd->device)) | ||
2913 | mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; | 3068 | mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; |
2914 | 3069 | ||
2915 | smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); | 3070 | smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); |
@@ -3298,10 +3453,12 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
3298 | le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; | 3453 | le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; |
3299 | if (!sas_device_priv_data->tlr_snoop_check) { | 3454 | if (!sas_device_priv_data->tlr_snoop_check) { |
3300 | sas_device_priv_data->tlr_snoop_check++; | 3455 | sas_device_priv_data->tlr_snoop_check++; |
3301 | if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && | 3456 | if (!_scsih_is_raid(&scmd->device->sdev_gendev) && |
3302 | response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) | 3457 | sas_is_tlr_enabled(scmd->device) && |
3303 | sas_device_priv_data->flags &= | 3458 | response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { |
3304 | ~MPT_DEVICE_TLR_ON; | 3459 | sas_disable_tlr(scmd->device); |
3460 | sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); | ||
3461 | } | ||
3305 | } | 3462 | } |
3306 | 3463 | ||
3307 | xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); | 3464 | xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); |
@@ -5170,11 +5327,33 @@ static void | |||
5170 | _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc, | 5327 | _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc, |
5171 | struct fw_event_work *fw_event) | 5328 | struct fw_event_work *fw_event) |
5172 | { | 5329 | { |
5330 | Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data; | ||
5331 | static struct _raid_device *raid_device; | ||
5332 | unsigned long flags; | ||
5333 | u16 handle; | ||
5334 | |||
5173 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 5335 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
5174 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) | 5336 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
5175 | _scsih_sas_ir_operation_status_event_debug(ioc, | 5337 | _scsih_sas_ir_operation_status_event_debug(ioc, |
5176 | fw_event->event_data); | 5338 | event_data); |
5177 | #endif | 5339 | #endif |
5340 | |||
5341 | /* code added for raid transport support */ | ||
5342 | if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { | ||
5343 | |||
5344 | handle = le16_to_cpu(event_data->VolDevHandle); | ||
5345 | |||
5346 | spin_lock_irqsave(&ioc->raid_device_lock, flags); | ||
5347 | raid_device = _scsih_raid_device_find_by_handle(ioc, handle); | ||
5348 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); | ||
5349 | |||
5350 | if (!raid_device) | ||
5351 | return; | ||
5352 | |||
5353 | if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) | ||
5354 | raid_device->percent_complete = | ||
5355 | event_data->PercentComplete; | ||
5356 | } | ||
5178 | } | 5357 | } |
5179 | 5358 | ||
5180 | /** | 5359 | /** |
@@ -5998,6 +6177,8 @@ _scsih_remove(struct pci_dev *pdev) | |||
5998 | struct _sas_port *mpt2sas_port; | 6177 | struct _sas_port *mpt2sas_port; |
5999 | struct _sas_device *sas_device; | 6178 | struct _sas_device *sas_device; |
6000 | struct _sas_node *expander_sibling; | 6179 | struct _sas_node *expander_sibling; |
6180 | struct _raid_device *raid_device, *next; | ||
6181 | struct MPT2SAS_TARGET *sas_target_priv_data; | ||
6001 | struct workqueue_struct *wq; | 6182 | struct workqueue_struct *wq; |
6002 | unsigned long flags; | 6183 | unsigned long flags; |
6003 | 6184 | ||
@@ -6011,6 +6192,21 @@ _scsih_remove(struct pci_dev *pdev) | |||
6011 | if (wq) | 6192 | if (wq) |
6012 | destroy_workqueue(wq); | 6193 | destroy_workqueue(wq); |
6013 | 6194 | ||
6195 | /* release all the volumes */ | ||
6196 | list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, | ||
6197 | list) { | ||
6198 | if (raid_device->starget) { | ||
6199 | sas_target_priv_data = | ||
6200 | raid_device->starget->hostdata; | ||
6201 | sas_target_priv_data->deleted = 1; | ||
6202 | scsi_remove_target(&raid_device->starget->dev); | ||
6203 | } | ||
6204 | printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid" | ||
6205 | "(0x%016llx)\n", ioc->name, raid_device->handle, | ||
6206 | (unsigned long long) raid_device->wwid); | ||
6207 | _scsih_raid_device_remove(ioc, raid_device); | ||
6208 | } | ||
6209 | |||
6014 | /* free ports attached to the sas_host */ | 6210 | /* free ports attached to the sas_host */ |
6015 | retry_again: | 6211 | retry_again: |
6016 | list_for_each_entry(mpt2sas_port, | 6212 | list_for_each_entry(mpt2sas_port, |
@@ -6373,6 +6569,13 @@ static struct pci_driver scsih_driver = { | |||
6373 | #endif | 6569 | #endif |
6374 | }; | 6570 | }; |
6375 | 6571 | ||
6572 | /* raid transport support */ | ||
6573 | static struct raid_function_template mpt2sas_raid_functions = { | ||
6574 | .cookie = &scsih_driver_template, | ||
6575 | .is_raid = _scsih_is_raid, | ||
6576 | .get_resync = _scsih_get_resync, | ||
6577 | .get_state = _scsih_get_state, | ||
6578 | }; | ||
6376 | 6579 | ||
6377 | /** | 6580 | /** |
6378 | * _scsih_init - main entry point for this driver. | 6581 | * _scsih_init - main entry point for this driver. |
@@ -6392,6 +6595,12 @@ _scsih_init(void) | |||
6392 | sas_attach_transport(&mpt2sas_transport_functions); | 6595 | sas_attach_transport(&mpt2sas_transport_functions); |
6393 | if (!mpt2sas_transport_template) | 6596 | if (!mpt2sas_transport_template) |
6394 | return -ENODEV; | 6597 | return -ENODEV; |
6598 | /* raid transport support */ | ||
6599 | mpt2sas_raid_template = raid_class_attach(&mpt2sas_raid_functions); | ||
6600 | if (!mpt2sas_raid_template) { | ||
6601 | sas_release_transport(mpt2sas_transport_template); | ||
6602 | return -ENODEV; | ||
6603 | } | ||
6395 | 6604 | ||
6396 | mpt2sas_base_initialize_callback_handler(); | 6605 | mpt2sas_base_initialize_callback_handler(); |
6397 | 6606 | ||
@@ -6426,8 +6635,11 @@ _scsih_init(void) | |||
6426 | mpt2sas_ctl_init(); | 6635 | mpt2sas_ctl_init(); |
6427 | 6636 | ||
6428 | error = pci_register_driver(&scsih_driver); | 6637 | error = pci_register_driver(&scsih_driver); |
6429 | if (error) | 6638 | if (error) { |
6639 | /* raid transport support */ | ||
6640 | raid_class_release(mpt2sas_raid_template); | ||
6430 | sas_release_transport(mpt2sas_transport_template); | 6641 | sas_release_transport(mpt2sas_transport_template); |
6642 | } | ||
6431 | 6643 | ||
6432 | return error; | 6644 | return error; |
6433 | } | 6645 | } |
@@ -6445,7 +6657,8 @@ _scsih_exit(void) | |||
6445 | 6657 | ||
6446 | pci_unregister_driver(&scsih_driver); | 6658 | pci_unregister_driver(&scsih_driver); |
6447 | 6659 | ||
6448 | sas_release_transport(mpt2sas_transport_template); | 6660 | mpt2sas_ctl_exit(); |
6661 | |||
6449 | mpt2sas_base_release_callback_handler(scsi_io_cb_idx); | 6662 | mpt2sas_base_release_callback_handler(scsi_io_cb_idx); |
6450 | mpt2sas_base_release_callback_handler(tm_cb_idx); | 6663 | mpt2sas_base_release_callback_handler(tm_cb_idx); |
6451 | mpt2sas_base_release_callback_handler(base_cb_idx); | 6664 | mpt2sas_base_release_callback_handler(base_cb_idx); |
@@ -6457,7 +6670,10 @@ _scsih_exit(void) | |||
6457 | mpt2sas_base_release_callback_handler(tm_tr_cb_idx); | 6670 | mpt2sas_base_release_callback_handler(tm_tr_cb_idx); |
6458 | mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx); | 6671 | mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx); |
6459 | 6672 | ||
6460 | mpt2sas_ctl_exit(); | 6673 | /* raid transport support */ |
6674 | raid_class_release(mpt2sas_raid_template); | ||
6675 | sas_release_transport(mpt2sas_transport_template); | ||
6676 | |||
6461 | } | 6677 | } |
6462 | 6678 | ||
6463 | module_init(_scsih_init); | 6679 | module_init(_scsih_init); |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index 3a82872bad44..789f9ee7f001 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c | |||
@@ -855,6 +855,17 @@ rphy_to_ioc(struct sas_rphy *rphy) | |||
855 | return shost_priv(shost); | 855 | return shost_priv(shost); |
856 | } | 856 | } |
857 | 857 | ||
858 | static struct _sas_phy * | ||
859 | _transport_find_local_phy(struct MPT2SAS_ADAPTER *ioc, struct sas_phy *phy) | ||
860 | { | ||
861 | int i; | ||
862 | |||
863 | for (i = 0; i < ioc->sas_hba.num_phys; i++) | ||
864 | if (ioc->sas_hba.phy[i].phy == phy) | ||
865 | return(&ioc->sas_hba.phy[i]); | ||
866 | return NULL; | ||
867 | } | ||
868 | |||
858 | /** | 869 | /** |
859 | * _transport_get_linkerrors - | 870 | * _transport_get_linkerrors - |
860 | * @phy: The sas phy object | 871 | * @phy: The sas phy object |
@@ -870,14 +881,8 @@ _transport_get_linkerrors(struct sas_phy *phy) | |||
870 | struct _sas_phy *mpt2sas_phy; | 881 | struct _sas_phy *mpt2sas_phy; |
871 | Mpi2ConfigReply_t mpi_reply; | 882 | Mpi2ConfigReply_t mpi_reply; |
872 | Mpi2SasPhyPage1_t phy_pg1; | 883 | Mpi2SasPhyPage1_t phy_pg1; |
873 | int i; | ||
874 | 884 | ||
875 | for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys && | 885 | mpt2sas_phy = _transport_find_local_phy(ioc, phy); |
876 | !mpt2sas_phy; i++) { | ||
877 | if (ioc->sas_hba.phy[i].phy != phy) | ||
878 | continue; | ||
879 | mpt2sas_phy = &ioc->sas_hba.phy[i]; | ||
880 | } | ||
881 | 886 | ||
882 | if (!mpt2sas_phy) /* this phy not on sas_host */ | 887 | if (!mpt2sas_phy) /* this phy not on sas_host */ |
883 | return -EINVAL; | 888 | return -EINVAL; |
@@ -971,14 +976,8 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset) | |||
971 | struct _sas_phy *mpt2sas_phy; | 976 | struct _sas_phy *mpt2sas_phy; |
972 | Mpi2SasIoUnitControlReply_t mpi_reply; | 977 | Mpi2SasIoUnitControlReply_t mpi_reply; |
973 | Mpi2SasIoUnitControlRequest_t mpi_request; | 978 | Mpi2SasIoUnitControlRequest_t mpi_request; |
974 | int i; | ||
975 | 979 | ||
976 | for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys && | 980 | mpt2sas_phy = _transport_find_local_phy(ioc, phy); |
977 | !mpt2sas_phy; i++) { | ||
978 | if (ioc->sas_hba.phy[i].phy != phy) | ||
979 | continue; | ||
980 | mpt2sas_phy = &ioc->sas_hba.phy[i]; | ||
981 | } | ||
982 | 981 | ||
983 | if (!mpt2sas_phy) /* this phy not on sas_host */ | 982 | if (!mpt2sas_phy) /* this phy not on sas_host */ |
984 | return -EINVAL; | 983 | return -EINVAL; |
@@ -1006,6 +1005,173 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset) | |||
1006 | } | 1005 | } |
1007 | 1006 | ||
1008 | /** | 1007 | /** |
1008 | * _transport_phy_enable - enable/disable phys | ||
1009 | * @phy: The sas phy object | ||
1010 | * @enable: enable phy when true | ||
1011 | * | ||
1012 | * Only support sas_host direct attached phys. | ||
1013 | * Returns 0 for success, non-zero for failure. | ||
1014 | */ | ||
1015 | static int | ||
1016 | _transport_phy_enable(struct sas_phy *phy, int enable) | ||
1017 | { | ||
1018 | struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); | ||
1019 | struct _sas_phy *mpt2sas_phy; | ||
1020 | Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; | ||
1021 | Mpi2ConfigReply_t mpi_reply; | ||
1022 | u16 ioc_status; | ||
1023 | u16 sz; | ||
1024 | int rc = 0; | ||
1025 | |||
1026 | mpt2sas_phy = _transport_find_local_phy(ioc, phy); | ||
1027 | |||
1028 | if (!mpt2sas_phy) /* this phy not on sas_host */ | ||
1029 | return -EINVAL; | ||
1030 | |||
1031 | /* sas_iounit page 1 */ | ||
1032 | sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * | ||
1033 | sizeof(Mpi2SasIOUnit1PhyData_t)); | ||
1034 | sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); | ||
1035 | if (!sas_iounit_pg1) { | ||
1036 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
1037 | ioc->name, __FILE__, __LINE__, __func__); | ||
1038 | rc = -ENOMEM; | ||
1039 | goto out; | ||
1040 | } | ||
1041 | if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, | ||
1042 | sas_iounit_pg1, sz))) { | ||
1043 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
1044 | ioc->name, __FILE__, __LINE__, __func__); | ||
1045 | rc = -ENXIO; | ||
1046 | goto out; | ||
1047 | } | ||
1048 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & | ||
1049 | MPI2_IOCSTATUS_MASK; | ||
1050 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { | ||
1051 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
1052 | ioc->name, __FILE__, __LINE__, __func__); | ||
1053 | rc = -EIO; | ||
1054 | goto out; | ||
1055 | } | ||
1056 | |||
1057 | if (enable) | ||
1058 | sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags | ||
1059 | &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; | ||
1060 | else | ||
1061 | sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags | ||
1062 | |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; | ||
1063 | |||
1064 | mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz); | ||
1065 | |||
1066 | out: | ||
1067 | kfree(sas_iounit_pg1); | ||
1068 | return rc; | ||
1069 | } | ||
1070 | |||
1071 | /** | ||
1072 | * _transport_phy_speed - set phy min/max link rates | ||
1073 | * @phy: The sas phy object | ||
1074 | * @rates: rates defined in sas_phy_linkrates | ||
1075 | * | ||
1076 | * Only support sas_host direct attached phys. | ||
1077 | * Returns 0 for success, non-zero for failure. | ||
1078 | */ | ||
1079 | static int | ||
1080 | _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) | ||
1081 | { | ||
1082 | struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); | ||
1083 | struct _sas_phy *mpt2sas_phy; | ||
1084 | Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; | ||
1085 | Mpi2SasPhyPage0_t phy_pg0; | ||
1086 | Mpi2ConfigReply_t mpi_reply; | ||
1087 | u16 ioc_status; | ||
1088 | u16 sz; | ||
1089 | int i; | ||
1090 | int rc = 0; | ||
1091 | |||
1092 | mpt2sas_phy = _transport_find_local_phy(ioc, phy); | ||
1093 | |||
1094 | if (!mpt2sas_phy) /* this phy not on sas_host */ | ||
1095 | return -EINVAL; | ||
1096 | |||
1097 | if (!rates->minimum_linkrate) | ||
1098 | rates->minimum_linkrate = phy->minimum_linkrate; | ||
1099 | else if (rates->minimum_linkrate < phy->minimum_linkrate_hw) | ||
1100 | rates->minimum_linkrate = phy->minimum_linkrate_hw; | ||
1101 | |||
1102 | if (!rates->maximum_linkrate) | ||
1103 | rates->maximum_linkrate = phy->maximum_linkrate; | ||
1104 | else if (rates->maximum_linkrate > phy->maximum_linkrate_hw) | ||
1105 | rates->maximum_linkrate = phy->maximum_linkrate_hw; | ||
1106 | |||
1107 | /* sas_iounit page 1 */ | ||
1108 | sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * | ||
1109 | sizeof(Mpi2SasIOUnit1PhyData_t)); | ||
1110 | sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); | ||
1111 | if (!sas_iounit_pg1) { | ||
1112 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
1113 | ioc->name, __FILE__, __LINE__, __func__); | ||
1114 | rc = -ENOMEM; | ||
1115 | goto out; | ||
1116 | } | ||
1117 | if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, | ||
1118 | sas_iounit_pg1, sz))) { | ||
1119 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
1120 | ioc->name, __FILE__, __LINE__, __func__); | ||
1121 | rc = -ENXIO; | ||
1122 | goto out; | ||
1123 | } | ||
1124 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & | ||
1125 | MPI2_IOCSTATUS_MASK; | ||
1126 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { | ||
1127 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
1128 | ioc->name, __FILE__, __LINE__, __func__); | ||
1129 | rc = -EIO; | ||
1130 | goto out; | ||
1131 | } | ||
1132 | |||
1133 | for (i = 0; i < ioc->sas_hba.num_phys; i++) { | ||
1134 | if (mpt2sas_phy->phy_id != i) { | ||
1135 | sas_iounit_pg1->PhyData[i].MaxMinLinkRate = | ||
1136 | (ioc->sas_hba.phy[i].phy->minimum_linkrate + | ||
1137 | (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4)); | ||
1138 | } else { | ||
1139 | sas_iounit_pg1->PhyData[i].MaxMinLinkRate = | ||
1140 | (rates->minimum_linkrate + | ||
1141 | (rates->maximum_linkrate << 4)); | ||
1142 | } | ||
1143 | } | ||
1144 | |||
1145 | if (mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, | ||
1146 | sz)) { | ||
1147 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
1148 | ioc->name, __FILE__, __LINE__, __func__); | ||
1149 | rc = -ENXIO; | ||
1150 | goto out; | ||
1151 | } | ||
1152 | |||
1153 | /* link reset */ | ||
1154 | _transport_phy_reset(phy, 0); | ||
1155 | |||
1156 | /* read phy page 0, then update the rates in the sas transport phy */ | ||
1157 | if (!mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, | ||
1158 | mpt2sas_phy->phy_id)) { | ||
1159 | phy->minimum_linkrate = _transport_convert_phy_link_rate( | ||
1160 | phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); | ||
1161 | phy->maximum_linkrate = _transport_convert_phy_link_rate( | ||
1162 | phy_pg0.ProgrammedLinkRate >> 4); | ||
1163 | phy->negotiated_linkrate = _transport_convert_phy_link_rate( | ||
1164 | phy_pg0.NegotiatedLinkRate & | ||
1165 | MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); | ||
1166 | } | ||
1167 | |||
1168 | out: | ||
1169 | kfree(sas_iounit_pg1); | ||
1170 | return rc; | ||
1171 | } | ||
1172 | |||
1173 | |||
1174 | /** | ||
1009 | * _transport_smp_handler - transport portal for smp passthru | 1175 | * _transport_smp_handler - transport portal for smp passthru |
1010 | * @shost: shost object | 1176 | * @shost: shost object |
1011 | * @rphy: sas transport rphy object | 1177 | * @rphy: sas transport rphy object |
@@ -1207,6 +1373,8 @@ struct sas_function_template mpt2sas_transport_functions = { | |||
1207 | .get_enclosure_identifier = _transport_get_enclosure_identifier, | 1373 | .get_enclosure_identifier = _transport_get_enclosure_identifier, |
1208 | .get_bay_identifier = _transport_get_bay_identifier, | 1374 | .get_bay_identifier = _transport_get_bay_identifier, |
1209 | .phy_reset = _transport_phy_reset, | 1375 | .phy_reset = _transport_phy_reset, |
1376 | .phy_enable = _transport_phy_enable, | ||
1377 | .set_phy_speed = _transport_phy_speed, | ||
1210 | .smp_handler = _transport_smp_handler, | 1378 | .smp_handler = _transport_smp_handler, |
1211 | }; | 1379 | }; |
1212 | 1380 | ||
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index c2f1032496cb..f80c1da8f6ca 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c | |||
@@ -654,7 +654,7 @@ static int __devinit pm8001_pci_probe(struct pci_dev *pdev, | |||
654 | } | 654 | } |
655 | chip = &pm8001_chips[ent->driver_data]; | 655 | chip = &pm8001_chips[ent->driver_data]; |
656 | SHOST_TO_SAS_HA(shost) = | 656 | SHOST_TO_SAS_HA(shost) = |
657 | kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); | 657 | kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL); |
658 | if (!SHOST_TO_SAS_HA(shost)) { | 658 | if (!SHOST_TO_SAS_HA(shost)) { |
659 | rc = -ENOMEM; | 659 | rc = -ENOMEM; |
660 | goto err_out_free_host; | 660 | goto err_out_free_host; |
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 8371d917a9a2..49ac4148493b 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
@@ -1640,8 +1640,10 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha) | |||
1640 | uint16_t mb[MAILBOX_REGISTER_COUNT], i; | 1640 | uint16_t mb[MAILBOX_REGISTER_COUNT], i; |
1641 | int err; | 1641 | int err; |
1642 | 1642 | ||
1643 | spin_unlock_irq(ha->host->host_lock); | ||
1643 | err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname, | 1644 | err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname, |
1644 | &ha->pdev->dev); | 1645 | &ha->pdev->dev); |
1646 | spin_lock_irq(ha->host->host_lock); | ||
1645 | if (err) { | 1647 | if (err) { |
1646 | printk(KERN_ERR "Failed to load image \"%s\" err %d\n", | 1648 | printk(KERN_ERR "Failed to load image \"%s\" err %d\n", |
1647 | ql1280_board_tbl[ha->devnum].fwname, err); | 1649 | ql1280_board_tbl[ha->devnum].fwname, err); |
@@ -1699,8 +1701,10 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha) | |||
1699 | return -ENOMEM; | 1701 | return -ENOMEM; |
1700 | #endif | 1702 | #endif |
1701 | 1703 | ||
1704 | spin_unlock_irq(ha->host->host_lock); | ||
1702 | err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname, | 1705 | err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname, |
1703 | &ha->pdev->dev); | 1706 | &ha->pdev->dev); |
1707 | spin_lock_irq(ha->host->host_lock); | ||
1704 | if (err) { | 1708 | if (err) { |
1705 | printk(KERN_ERR "Failed to load image \"%s\" err %d\n", | 1709 | printk(KERN_ERR "Failed to load image \"%s\" err %d\n", |
1706 | ql1280_board_tbl[ha->devnum].fwname, err); | 1710 | ql1280_board_tbl[ha->devnum].fwname, err); |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 3a9f5b288aee..90d1e062ec4f 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -11,7 +11,9 @@ | |||
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | 12 | ||
13 | static int qla24xx_vport_disable(struct fc_vport *, bool); | 13 | static int qla24xx_vport_disable(struct fc_vport *, bool); |
14 | 14 | static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *); | |
15 | int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *); | ||
16 | static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *); | ||
15 | /* SYSFS attributes --------------------------------------------------------- */ | 17 | /* SYSFS attributes --------------------------------------------------------- */ |
16 | 18 | ||
17 | static ssize_t | 19 | static ssize_t |
@@ -1168,6 +1170,28 @@ qla2x00_total_isp_aborts_show(struct device *dev, | |||
1168 | } | 1170 | } |
1169 | 1171 | ||
1170 | static ssize_t | 1172 | static ssize_t |
1173 | qla24xx_84xx_fw_version_show(struct device *dev, | ||
1174 | struct device_attribute *attr, char *buf) | ||
1175 | { | ||
1176 | int rval = QLA_SUCCESS; | ||
1177 | uint16_t status[2] = {0, 0}; | ||
1178 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | ||
1179 | struct qla_hw_data *ha = vha->hw; | ||
1180 | |||
1181 | if (IS_QLA84XX(ha) && ha->cs84xx) { | ||
1182 | if (ha->cs84xx->op_fw_version == 0) { | ||
1183 | rval = qla84xx_verify_chip(vha, status); | ||
1184 | } | ||
1185 | |||
1186 | if ((rval == QLA_SUCCESS) && (status[0] == 0)) | ||
1187 | return snprintf(buf, PAGE_SIZE, "%u\n", | ||
1188 | (uint32_t)ha->cs84xx->op_fw_version); | ||
1189 | } | ||
1190 | |||
1191 | return snprintf(buf, PAGE_SIZE, "\n"); | ||
1192 | } | ||
1193 | |||
1194 | static ssize_t | ||
1171 | qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, | 1195 | qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, |
1172 | char *buf) | 1196 | char *buf) |
1173 | { | 1197 | { |
@@ -1281,6 +1305,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO, | |||
1281 | qla2x00_optrom_fcode_version_show, NULL); | 1305 | qla2x00_optrom_fcode_version_show, NULL); |
1282 | static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, | 1306 | static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, |
1283 | NULL); | 1307 | NULL); |
1308 | static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show, | ||
1309 | NULL); | ||
1284 | static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, | 1310 | static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, |
1285 | NULL); | 1311 | NULL); |
1286 | static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); | 1312 | static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); |
@@ -1310,6 +1336,7 @@ struct device_attribute *qla2x00_host_attrs[] = { | |||
1310 | &dev_attr_optrom_efi_version, | 1336 | &dev_attr_optrom_efi_version, |
1311 | &dev_attr_optrom_fcode_version, | 1337 | &dev_attr_optrom_fcode_version, |
1312 | &dev_attr_optrom_fw_version, | 1338 | &dev_attr_optrom_fw_version, |
1339 | &dev_attr_84xx_fw_version, | ||
1313 | &dev_attr_total_isp_aborts, | 1340 | &dev_attr_total_isp_aborts, |
1314 | &dev_attr_mpi_version, | 1341 | &dev_attr_mpi_version, |
1315 | &dev_attr_phy_version, | 1342 | &dev_attr_phy_version, |
@@ -1504,8 +1531,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) | |||
1504 | fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, | 1531 | fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, |
1505 | fcport->loop_id, fcport->d_id.b.domain, | 1532 | fcport->loop_id, fcport->d_id.b.domain, |
1506 | fcport->d_id.b.area, fcport->d_id.b.al_pa); | 1533 | fcport->d_id.b.area, fcport->d_id.b.al_pa); |
1507 | |||
1508 | qla2x00_abort_fcport_cmds(fcport); | ||
1509 | } | 1534 | } |
1510 | 1535 | ||
1511 | static int | 1536 | static int |
@@ -1795,6 +1820,581 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable) | |||
1795 | return 0; | 1820 | return 0; |
1796 | } | 1821 | } |
1797 | 1822 | ||
1823 | /* BSG support for ELS/CT pass through */ | ||
1824 | inline srb_t * | ||
1825 | qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size) | ||
1826 | { | ||
1827 | srb_t *sp; | ||
1828 | struct qla_hw_data *ha = vha->hw; | ||
1829 | struct srb_bsg_ctx *ctx; | ||
1830 | |||
1831 | sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); | ||
1832 | if (!sp) | ||
1833 | goto done; | ||
1834 | ctx = kzalloc(size, GFP_KERNEL); | ||
1835 | if (!ctx) { | ||
1836 | mempool_free(sp, ha->srb_mempool); | ||
1837 | goto done; | ||
1838 | } | ||
1839 | |||
1840 | memset(sp, 0, sizeof(*sp)); | ||
1841 | sp->fcport = fcport; | ||
1842 | sp->ctx = ctx; | ||
1843 | done: | ||
1844 | return sp; | ||
1845 | } | ||
1846 | |||
1847 | static int | ||
1848 | qla2x00_process_els(struct fc_bsg_job *bsg_job) | ||
1849 | { | ||
1850 | struct fc_rport *rport; | ||
1851 | fc_port_t *fcport; | ||
1852 | struct Scsi_Host *host; | ||
1853 | scsi_qla_host_t *vha; | ||
1854 | struct qla_hw_data *ha; | ||
1855 | srb_t *sp; | ||
1856 | const char *type; | ||
1857 | int req_sg_cnt, rsp_sg_cnt; | ||
1858 | int rval = (DRIVER_ERROR << 16); | ||
1859 | uint16_t nextlid = 0; | ||
1860 | struct srb_bsg *els; | ||
1861 | |||
1862 | /* Multiple SG's are not supported for ELS requests */ | ||
1863 | if (bsg_job->request_payload.sg_cnt > 1 || | ||
1864 | bsg_job->reply_payload.sg_cnt > 1) { | ||
1865 | DEBUG2(printk(KERN_INFO | ||
1866 | "multiple SG's are not supported for ELS requests" | ||
1867 | " [request_sg_cnt: %x reply_sg_cnt: %x]\n", | ||
1868 | bsg_job->request_payload.sg_cnt, | ||
1869 | bsg_job->reply_payload.sg_cnt)); | ||
1870 | rval = -EPERM; | ||
1871 | goto done; | ||
1872 | } | ||
1873 | |||
1874 | /* ELS request for rport */ | ||
1875 | if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { | ||
1876 | rport = bsg_job->rport; | ||
1877 | fcport = *(fc_port_t **) rport->dd_data; | ||
1878 | host = rport_to_shost(rport); | ||
1879 | vha = shost_priv(host); | ||
1880 | ha = vha->hw; | ||
1881 | type = "FC_BSG_RPT_ELS"; | ||
1882 | |||
1883 | /* make sure the rport is logged in, | ||
1884 | * if not perform fabric login | ||
1885 | */ | ||
1886 | if (qla2x00_fabric_login(vha, fcport, &nextlid)) { | ||
1887 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
1888 | "failed to login port %06X for ELS passthru\n", | ||
1889 | fcport->d_id.b24)); | ||
1890 | rval = -EIO; | ||
1891 | goto done; | ||
1892 | } | ||
1893 | } else { | ||
1894 | host = bsg_job->shost; | ||
1895 | vha = shost_priv(host); | ||
1896 | ha = vha->hw; | ||
1897 | type = "FC_BSG_HST_ELS_NOLOGIN"; | ||
1898 | |||
1899 | /* Allocate a dummy fcport structure, since functions | ||
1900 | * preparing the IOCB and mailbox command retrieves port | ||
1901 | * specific information from fcport structure. For Host based | ||
1902 | * ELS commands there will be no fcport structure allocated | ||
1903 | */ | ||
1904 | fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); | ||
1905 | if (!fcport) { | ||
1906 | rval = -ENOMEM; | ||
1907 | goto done; | ||
1908 | } | ||
1909 | |||
1910 | /* Initialize all required fields of fcport */ | ||
1911 | fcport->vha = vha; | ||
1912 | fcport->vp_idx = vha->vp_idx; | ||
1913 | fcport->d_id.b.al_pa = | ||
1914 | bsg_job->request->rqst_data.h_els.port_id[0]; | ||
1915 | fcport->d_id.b.area = | ||
1916 | bsg_job->request->rqst_data.h_els.port_id[1]; | ||
1917 | fcport->d_id.b.domain = | ||
1918 | bsg_job->request->rqst_data.h_els.port_id[2]; | ||
1919 | fcport->loop_id = | ||
1920 | (fcport->d_id.b.al_pa == 0xFD) ? | ||
1921 | NPH_FABRIC_CONTROLLER : NPH_F_PORT; | ||
1922 | } | ||
1923 | |||
1924 | if (!vha->flags.online) { | ||
1925 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
1926 | "host not online\n")); | ||
1927 | rval = -EIO; | ||
1928 | goto done; | ||
1929 | } | ||
1930 | |||
1931 | req_sg_cnt = | ||
1932 | dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, | ||
1933 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1934 | if (!req_sg_cnt) { | ||
1935 | rval = -ENOMEM; | ||
1936 | goto done_free_fcport; | ||
1937 | } | ||
1938 | rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, | ||
1939 | bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
1940 | if (!rsp_sg_cnt) { | ||
1941 | rval = -ENOMEM; | ||
1942 | goto done_free_fcport; | ||
1943 | } | ||
1944 | |||
1945 | if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || | ||
1946 | (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) | ||
1947 | { | ||
1948 | DEBUG2(printk(KERN_INFO | ||
1949 | "dma mapping resulted in different sg counts \ | ||
1950 | [request_sg_cnt: %x dma_request_sg_cnt: %x\ | ||
1951 | reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", | ||
1952 | bsg_job->request_payload.sg_cnt, req_sg_cnt, | ||
1953 | bsg_job->reply_payload.sg_cnt, rsp_sg_cnt)); | ||
1954 | rval = -EAGAIN; | ||
1955 | goto done_unmap_sg; | ||
1956 | } | ||
1957 | |||
1958 | /* Alloc SRB structure */ | ||
1959 | sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg)); | ||
1960 | if (!sp) { | ||
1961 | rval = -ENOMEM; | ||
1962 | goto done_unmap_sg; | ||
1963 | } | ||
1964 | |||
1965 | els = sp->ctx; | ||
1966 | els->ctx.type = | ||
1967 | (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? | ||
1968 | SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); | ||
1969 | els->bsg_job = bsg_job; | ||
1970 | |||
1971 | DEBUG2(qla_printk(KERN_INFO, ha, | ||
1972 | "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x " | ||
1973 | "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type, | ||
1974 | bsg_job->request->rqst_data.h_els.command_code, | ||
1975 | fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, | ||
1976 | fcport->d_id.b.al_pa)); | ||
1977 | |||
1978 | rval = qla2x00_start_sp(sp); | ||
1979 | if (rval != QLA_SUCCESS) { | ||
1980 | kfree(sp->ctx); | ||
1981 | mempool_free(sp, ha->srb_mempool); | ||
1982 | rval = -EIO; | ||
1983 | goto done_unmap_sg; | ||
1984 | } | ||
1985 | return rval; | ||
1986 | |||
1987 | done_unmap_sg: | ||
1988 | dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, | ||
1989 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1990 | dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, | ||
1991 | bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
1992 | goto done_free_fcport; | ||
1993 | |||
1994 | done_free_fcport: | ||
1995 | if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN) | ||
1996 | kfree(fcport); | ||
1997 | done: | ||
1998 | return rval; | ||
1999 | } | ||
2000 | |||
2001 | static int | ||
2002 | qla2x00_process_ct(struct fc_bsg_job *bsg_job) | ||
2003 | { | ||
2004 | srb_t *sp; | ||
2005 | struct Scsi_Host *host = bsg_job->shost; | ||
2006 | scsi_qla_host_t *vha = shost_priv(host); | ||
2007 | struct qla_hw_data *ha = vha->hw; | ||
2008 | int rval = (DRIVER_ERROR << 16); | ||
2009 | int req_sg_cnt, rsp_sg_cnt; | ||
2010 | uint16_t loop_id; | ||
2011 | struct fc_port *fcport; | ||
2012 | char *type = "FC_BSG_HST_CT"; | ||
2013 | struct srb_bsg *ct; | ||
2014 | |||
2015 | /* pass through is supported only for ISP 4Gb or higher */ | ||
2016 | if (!IS_FWI2_CAPABLE(ha)) { | ||
2017 | DEBUG2(qla_printk(KERN_INFO, ha, | ||
2018 | "scsi(%ld):Firmware is not capable to support FC " | ||
2019 | "CT pass thru\n", vha->host_no)); | ||
2020 | rval = -EPERM; | ||
2021 | goto done; | ||
2022 | } | ||
2023 | |||
2024 | req_sg_cnt = | ||
2025 | dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, | ||
2026 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
2027 | if (!req_sg_cnt) { | ||
2028 | rval = -ENOMEM; | ||
2029 | goto done; | ||
2030 | } | ||
2031 | |||
2032 | rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, | ||
2033 | bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
2034 | if (!rsp_sg_cnt) { | ||
2035 | rval = -ENOMEM; | ||
2036 | goto done; | ||
2037 | } | ||
2038 | |||
2039 | if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || | ||
2040 | (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) | ||
2041 | { | ||
2042 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
2043 | "dma mapping resulted in different sg counts \ | ||
2044 | [request_sg_cnt: %x dma_request_sg_cnt: %x\ | ||
2045 | reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", | ||
2046 | bsg_job->request_payload.sg_cnt, req_sg_cnt, | ||
2047 | bsg_job->reply_payload.sg_cnt, rsp_sg_cnt)); | ||
2048 | rval = -EAGAIN; | ||
2049 | goto done_unmap_sg; | ||
2050 | } | ||
2051 | |||
2052 | if (!vha->flags.online) { | ||
2053 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
2054 | "host not online\n")); | ||
2055 | rval = -EIO; | ||
2056 | goto done_unmap_sg; | ||
2057 | } | ||
2058 | |||
2059 | loop_id = | ||
2060 | (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000) | ||
2061 | >> 24; | ||
2062 | switch (loop_id) { | ||
2063 | case 0xFC: | ||
2064 | loop_id = cpu_to_le16(NPH_SNS); | ||
2065 | break; | ||
2066 | case 0xFA: | ||
2067 | loop_id = vha->mgmt_svr_loop_id; | ||
2068 | break; | ||
2069 | default: | ||
2070 | DEBUG2(qla_printk(KERN_INFO, ha, | ||
2071 | "Unknown loop id: %x\n", loop_id)); | ||
2072 | rval = -EINVAL; | ||
2073 | goto done_unmap_sg; | ||
2074 | } | ||
2075 | |||
2076 | /* Allocate a dummy fcport structure, since functions preparing the | ||
2077 | * IOCB and mailbox command retrieves port specific information | ||
2078 | * from fcport structure. For Host based ELS commands there will be | ||
2079 | * no fcport structure allocated | ||
2080 | */ | ||
2081 | fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); | ||
2082 | if (!fcport) | ||
2083 | { | ||
2084 | rval = -ENOMEM; | ||
2085 | goto done_unmap_sg; | ||
2086 | } | ||
2087 | |||
2088 | /* Initialize all required fields of fcport */ | ||
2089 | fcport->vha = vha; | ||
2090 | fcport->vp_idx = vha->vp_idx; | ||
2091 | fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0]; | ||
2092 | fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1]; | ||
2093 | fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2]; | ||
2094 | fcport->loop_id = loop_id; | ||
2095 | |||
2096 | /* Alloc SRB structure */ | ||
2097 | sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg)); | ||
2098 | if (!sp) { | ||
2099 | rval = -ENOMEM; | ||
2100 | goto done_free_fcport; | ||
2101 | } | ||
2102 | |||
2103 | ct = sp->ctx; | ||
2104 | ct->ctx.type = SRB_CT_CMD; | ||
2105 | ct->bsg_job = bsg_job; | ||
2106 | |||
2107 | DEBUG2(qla_printk(KERN_INFO, ha, | ||
2108 | "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x " | ||
2109 | "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type, | ||
2110 | (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16), | ||
2111 | fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, | ||
2112 | fcport->d_id.b.al_pa)); | ||
2113 | |||
2114 | rval = qla2x00_start_sp(sp); | ||
2115 | if (rval != QLA_SUCCESS) { | ||
2116 | kfree(sp->ctx); | ||
2117 | mempool_free(sp, ha->srb_mempool); | ||
2118 | rval = -EIO; | ||
2119 | goto done_free_fcport; | ||
2120 | } | ||
2121 | return rval; | ||
2122 | |||
2123 | done_free_fcport: | ||
2124 | kfree(fcport); | ||
2125 | done_unmap_sg: | ||
2126 | dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, | ||
2127 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
2128 | dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, | ||
2129 | bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
2130 | done: | ||
2131 | return rval; | ||
2132 | } | ||
2133 | |||
2134 | static int | ||
2135 | qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) | ||
2136 | { | ||
2137 | struct Scsi_Host *host = bsg_job->shost; | ||
2138 | scsi_qla_host_t *vha = shost_priv(host); | ||
2139 | struct qla_hw_data *ha = vha->hw; | ||
2140 | int rval; | ||
2141 | uint8_t command_sent; | ||
2142 | uint32_t vendor_cmd; | ||
2143 | char *type; | ||
2144 | struct msg_echo_lb elreq; | ||
2145 | uint16_t response[MAILBOX_REGISTER_COUNT]; | ||
2146 | uint8_t* fw_sts_ptr; | ||
2147 | uint8_t *req_data; | ||
2148 | dma_addr_t req_data_dma; | ||
2149 | uint32_t req_data_len; | ||
2150 | uint8_t *rsp_data; | ||
2151 | dma_addr_t rsp_data_dma; | ||
2152 | uint32_t rsp_data_len; | ||
2153 | |||
2154 | if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || | ||
2155 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || | ||
2156 | test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { | ||
2157 | rval = -EBUSY; | ||
2158 | goto done; | ||
2159 | } | ||
2160 | |||
2161 | if (!vha->flags.online) { | ||
2162 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
2163 | "host not online\n")); | ||
2164 | rval = -EIO; | ||
2165 | goto done; | ||
2166 | } | ||
2167 | |||
2168 | elreq.req_sg_cnt = | ||
2169 | dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, | ||
2170 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
2171 | if (!elreq.req_sg_cnt) { | ||
2172 | rval = -ENOMEM; | ||
2173 | goto done; | ||
2174 | } | ||
2175 | elreq.rsp_sg_cnt = | ||
2176 | dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, | ||
2177 | bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
2178 | if (!elreq.rsp_sg_cnt) { | ||
2179 | rval = -ENOMEM; | ||
2180 | goto done; | ||
2181 | } | ||
2182 | |||
2183 | if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || | ||
2184 | (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) | ||
2185 | { | ||
2186 | DEBUG2(printk(KERN_INFO | ||
2187 | "dma mapping resulted in different sg counts \ | ||
2188 | [request_sg_cnt: %x dma_request_sg_cnt: %x\ | ||
2189 | reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", | ||
2190 | bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, | ||
2191 | bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt)); | ||
2192 | rval = -EAGAIN; | ||
2193 | goto done_unmap_sg; | ||
2194 | } | ||
2195 | req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; | ||
2196 | req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, | ||
2197 | &req_data_dma, GFP_KERNEL); | ||
2198 | |||
2199 | rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, | ||
2200 | &rsp_data_dma, GFP_KERNEL); | ||
2201 | |||
2202 | /* Copy the request buffer in req_data now */ | ||
2203 | sg_copy_to_buffer(bsg_job->request_payload.sg_list, | ||
2204 | bsg_job->request_payload.sg_cnt, req_data, | ||
2205 | req_data_len); | ||
2206 | |||
2207 | elreq.send_dma = req_data_dma; | ||
2208 | elreq.rcv_dma = rsp_data_dma; | ||
2209 | elreq.transfer_size = req_data_len; | ||
2210 | |||
2211 | /* Vendor cmd : loopback or ECHO diagnostic | ||
2212 | * Options: | ||
2213 | * Loopback : Either internal or external loopback | ||
2214 | * ECHO: ECHO ELS or Vendor specific FC4 link data | ||
2215 | */ | ||
2216 | vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]; | ||
2217 | elreq.options = | ||
2218 | *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd) | ||
2219 | + 1); | ||
2220 | |||
2221 | switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { | ||
2222 | case QL_VND_LOOPBACK: | ||
2223 | if (ha->current_topology != ISP_CFG_F) { | ||
2224 | type = "FC_BSG_HST_VENDOR_LOOPBACK"; | ||
2225 | |||
2226 | DEBUG2(qla_printk(KERN_INFO, ha, | ||
2227 | "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n", | ||
2228 | vha->host_no, type, vendor_cmd, elreq.options)); | ||
2229 | |||
2230 | command_sent = INT_DEF_LB_LOOPBACK_CMD; | ||
2231 | rval = qla2x00_loopback_test(vha, &elreq, response); | ||
2232 | if (IS_QLA81XX(ha)) { | ||
2233 | if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) { | ||
2234 | DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing " | ||
2235 | "ISP\n", __func__, vha->host_no)); | ||
2236 | set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); | ||
2237 | qla2xxx_wake_dpc(vha); | ||
2238 | } | ||
2239 | } | ||
2240 | } else { | ||
2241 | type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; | ||
2242 | DEBUG2(qla_printk(KERN_INFO, ha, | ||
2243 | "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n", | ||
2244 | vha->host_no, type, vendor_cmd, elreq.options)); | ||
2245 | |||
2246 | command_sent = INT_DEF_LB_ECHO_CMD; | ||
2247 | rval = qla2x00_echo_test(vha, &elreq, response); | ||
2248 | } | ||
2249 | break; | ||
2250 | case QLA84_RESET: | ||
2251 | if (!IS_QLA84XX(vha->hw)) { | ||
2252 | rval = -EINVAL; | ||
2253 | DEBUG16(printk( | ||
2254 | "%s(%ld): 8xxx exiting.\n", | ||
2255 | __func__, vha->host_no)); | ||
2256 | return rval; | ||
2257 | } | ||
2258 | rval = qla84xx_reset(vha, &elreq, bsg_job); | ||
2259 | break; | ||
2260 | case QLA84_MGMT_CMD: | ||
2261 | if (!IS_QLA84XX(vha->hw)) { | ||
2262 | rval = -EINVAL; | ||
2263 | DEBUG16(printk( | ||
2264 | "%s(%ld): 8xxx exiting.\n", | ||
2265 | __func__, vha->host_no)); | ||
2266 | return rval; | ||
2267 | } | ||
2268 | rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job); | ||
2269 | break; | ||
2270 | default: | ||
2271 | rval = -ENOSYS; | ||
2272 | } | ||
2273 | |||
2274 | if (rval != QLA_SUCCESS) { | ||
2275 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
2276 | "scsi(%ld) Vendor request %s failed\n", vha->host_no, type)); | ||
2277 | rval = 0; | ||
2278 | bsg_job->reply->result = (DID_ERROR << 16); | ||
2279 | bsg_job->reply->reply_payload_rcv_len = 0; | ||
2280 | fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); | ||
2281 | memcpy( fw_sts_ptr, response, sizeof(response)); | ||
2282 | fw_sts_ptr += sizeof(response); | ||
2283 | *fw_sts_ptr = command_sent; | ||
2284 | } else { | ||
2285 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
2286 | "scsi(%ld) Vendor request %s completed\n", vha->host_no, type)); | ||
2287 | rval = bsg_job->reply->result = 0; | ||
2288 | bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t); | ||
2289 | bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; | ||
2290 | fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); | ||
2291 | memcpy(fw_sts_ptr, response, sizeof(response)); | ||
2292 | fw_sts_ptr += sizeof(response); | ||
2293 | *fw_sts_ptr = command_sent; | ||
2294 | sg_copy_from_buffer(bsg_job->reply_payload.sg_list, | ||
2295 | bsg_job->reply_payload.sg_cnt, rsp_data, | ||
2296 | rsp_data_len); | ||
2297 | } | ||
2298 | bsg_job->job_done(bsg_job); | ||
2299 | |||
2300 | done_unmap_sg: | ||
2301 | |||
2302 | if(req_data) | ||
2303 | dma_free_coherent(&ha->pdev->dev, req_data_len, | ||
2304 | req_data, req_data_dma); | ||
2305 | dma_unmap_sg(&ha->pdev->dev, | ||
2306 | bsg_job->request_payload.sg_list, | ||
2307 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
2308 | dma_unmap_sg(&ha->pdev->dev, | ||
2309 | bsg_job->reply_payload.sg_list, | ||
2310 | bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
2311 | |||
2312 | done: | ||
2313 | return rval; | ||
2314 | } | ||
2315 | |||
2316 | static int | ||
2317 | qla24xx_bsg_request(struct fc_bsg_job *bsg_job) | ||
2318 | { | ||
2319 | int ret = -EINVAL; | ||
2320 | |||
2321 | switch (bsg_job->request->msgcode) { | ||
2322 | case FC_BSG_RPT_ELS: | ||
2323 | case FC_BSG_HST_ELS_NOLOGIN: | ||
2324 | ret = qla2x00_process_els(bsg_job); | ||
2325 | break; | ||
2326 | case FC_BSG_HST_CT: | ||
2327 | ret = qla2x00_process_ct(bsg_job); | ||
2328 | break; | ||
2329 | case FC_BSG_HST_VENDOR: | ||
2330 | ret = qla2x00_process_vendor_specific(bsg_job); | ||
2331 | break; | ||
2332 | case FC_BSG_HST_ADD_RPORT: | ||
2333 | case FC_BSG_HST_DEL_RPORT: | ||
2334 | case FC_BSG_RPT_CT: | ||
2335 | default: | ||
2336 | DEBUG2(printk("qla2xxx: unsupported BSG request\n")); | ||
2337 | break; | ||
2338 | } | ||
2339 | return ret; | ||
2340 | } | ||
2341 | |||
2342 | static int | ||
2343 | qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) | ||
2344 | { | ||
2345 | scsi_qla_host_t *vha = shost_priv(bsg_job->shost); | ||
2346 | struct qla_hw_data *ha = vha->hw; | ||
2347 | srb_t *sp; | ||
2348 | int cnt, que; | ||
2349 | unsigned long flags; | ||
2350 | struct req_que *req; | ||
2351 | struct srb_bsg *sp_bsg; | ||
2352 | |||
2353 | /* find the bsg job from the active list of commands */ | ||
2354 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
2355 | for (que = 0; que < ha->max_req_queues; que++) { | ||
2356 | req = ha->req_q_map[que]; | ||
2357 | if (!req) | ||
2358 | continue; | ||
2359 | |||
2360 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) { | ||
2361 | sp = req->outstanding_cmds[cnt]; | ||
2362 | |||
2363 | if (sp) { | ||
2364 | sp_bsg = (struct srb_bsg*)sp->ctx; | ||
2365 | |||
2366 | if (((sp_bsg->ctx.type == SRB_CT_CMD) || | ||
2367 | (sp_bsg->ctx.type == SRB_ELS_CMD_RPT) | ||
2368 | || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) && | ||
2369 | (sp_bsg->bsg_job == bsg_job)) { | ||
2370 | if (ha->isp_ops->abort_command(sp)) { | ||
2371 | DEBUG2(qla_printk(KERN_INFO, ha, | ||
2372 | "scsi(%ld): mbx abort_command failed\n", vha->host_no)); | ||
2373 | bsg_job->req->errors = bsg_job->reply->result = -EIO; | ||
2374 | } else { | ||
2375 | DEBUG2(qla_printk(KERN_INFO, ha, | ||
2376 | "scsi(%ld): mbx abort_command success\n", vha->host_no)); | ||
2377 | bsg_job->req->errors = bsg_job->reply->result = 0; | ||
2378 | } | ||
2379 | goto done; | ||
2380 | } | ||
2381 | } | ||
2382 | } | ||
2383 | } | ||
2384 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2385 | DEBUG2(qla_printk(KERN_INFO, ha, | ||
2386 | "scsi(%ld) SRB not found to abort\n", vha->host_no)); | ||
2387 | bsg_job->req->errors = bsg_job->reply->result = -ENXIO; | ||
2388 | return 0; | ||
2389 | |||
2390 | done: | ||
2391 | if (bsg_job->request->msgcode == FC_BSG_HST_CT) | ||
2392 | kfree(sp->fcport); | ||
2393 | kfree(sp->ctx); | ||
2394 | mempool_free(sp, ha->srb_mempool); | ||
2395 | return 0; | ||
2396 | } | ||
2397 | |||
1798 | struct fc_function_template qla2xxx_transport_functions = { | 2398 | struct fc_function_template qla2xxx_transport_functions = { |
1799 | 2399 | ||
1800 | .show_host_node_name = 1, | 2400 | .show_host_node_name = 1, |
@@ -1838,6 +2438,8 @@ struct fc_function_template qla2xxx_transport_functions = { | |||
1838 | .vport_create = qla24xx_vport_create, | 2438 | .vport_create = qla24xx_vport_create, |
1839 | .vport_disable = qla24xx_vport_disable, | 2439 | .vport_disable = qla24xx_vport_disable, |
1840 | .vport_delete = qla24xx_vport_delete, | 2440 | .vport_delete = qla24xx_vport_delete, |
2441 | .bsg_request = qla24xx_bsg_request, | ||
2442 | .bsg_timeout = qla24xx_bsg_timeout, | ||
1841 | }; | 2443 | }; |
1842 | 2444 | ||
1843 | struct fc_function_template qla2xxx_transport_vport_functions = { | 2445 | struct fc_function_template qla2xxx_transport_vport_functions = { |
@@ -1878,6 +2480,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = { | |||
1878 | .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, | 2480 | .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, |
1879 | .terminate_rport_io = qla2x00_terminate_rport_io, | 2481 | .terminate_rport_io = qla2x00_terminate_rport_io, |
1880 | .get_fc_host_stats = qla2x00_get_fc_host_stats, | 2482 | .get_fc_host_stats = qla2x00_get_fc_host_stats, |
2483 | .bsg_request = qla24xx_bsg_request, | ||
2484 | .bsg_timeout = qla24xx_bsg_timeout, | ||
1881 | }; | 2485 | }; |
1882 | 2486 | ||
1883 | void | 2487 | void |
@@ -1906,3 +2510,125 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha) | |||
1906 | speed = FC_PORTSPEED_1GBIT; | 2510 | speed = FC_PORTSPEED_1GBIT; |
1907 | fc_host_supported_speeds(vha->host) = speed; | 2511 | fc_host_supported_speeds(vha->host) = speed; |
1908 | } | 2512 | } |
2513 | static int | ||
2514 | qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job) | ||
2515 | { | ||
2516 | int ret = 0; | ||
2517 | int cmd; | ||
2518 | uint16_t cmd_status; | ||
2519 | |||
2520 | DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no)); | ||
2521 | |||
2522 | cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2)) | ||
2523 | == A84_RESET_FLAG_ENABLE_DIAG_FW ? | ||
2524 | A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW; | ||
2525 | ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW, | ||
2526 | &cmd_status); | ||
2527 | return ret; | ||
2528 | } | ||
2529 | |||
2530 | static int | ||
2531 | qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job) | ||
2532 | { | ||
2533 | struct access_chip_84xx *mn; | ||
2534 | dma_addr_t mn_dma, mgmt_dma; | ||
2535 | void *mgmt_b = NULL; | ||
2536 | int ret = 0; | ||
2537 | int rsp_hdr_len, len = 0; | ||
2538 | struct qla84_msg_mgmt *ql84_mgmt; | ||
2539 | |||
2540 | ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt)); | ||
2541 | ql84_mgmt->cmd = | ||
2542 | *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2)); | ||
2543 | ql84_mgmt->mgmtp.u.mem.start_addr = | ||
2544 | *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3)); | ||
2545 | ql84_mgmt->len = | ||
2546 | *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4)); | ||
2547 | ql84_mgmt->mgmtp.u.config.id = | ||
2548 | *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5)); | ||
2549 | ql84_mgmt->mgmtp.u.config.param0 = | ||
2550 | *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6)); | ||
2551 | ql84_mgmt->mgmtp.u.config.param1 = | ||
2552 | *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7)); | ||
2553 | ql84_mgmt->mgmtp.u.info.type = | ||
2554 | *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8)); | ||
2555 | ql84_mgmt->mgmtp.u.info.context = | ||
2556 | *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9)); | ||
2557 | |||
2558 | rsp_hdr_len = bsg_job->request_payload.payload_len; | ||
2559 | |||
2560 | mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma); | ||
2561 | if (mn == NULL) { | ||
2562 | DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer " | ||
2563 | "failed%lu\n", __func__, ha->host_no)); | ||
2564 | return -ENOMEM; | ||
2565 | } | ||
2566 | |||
2567 | memset(mn, 0, sizeof (struct access_chip_84xx)); | ||
2568 | |||
2569 | mn->entry_type = ACCESS_CHIP_IOCB_TYPE; | ||
2570 | mn->entry_count = 1; | ||
2571 | |||
2572 | switch (ql84_mgmt->cmd) { | ||
2573 | case QLA84_MGMT_READ_MEM: | ||
2574 | mn->options = cpu_to_le16(ACO_DUMP_MEMORY); | ||
2575 | mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr); | ||
2576 | break; | ||
2577 | case QLA84_MGMT_WRITE_MEM: | ||
2578 | mn->options = cpu_to_le16(ACO_LOAD_MEMORY); | ||
2579 | mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr); | ||
2580 | break; | ||
2581 | case QLA84_MGMT_CHNG_CONFIG: | ||
2582 | mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); | ||
2583 | mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id); | ||
2584 | mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0); | ||
2585 | mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1); | ||
2586 | break; | ||
2587 | case QLA84_MGMT_GET_INFO: | ||
2588 | mn->options = cpu_to_le16(ACO_REQUEST_INFO); | ||
2589 | mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type); | ||
2590 | mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context); | ||
2591 | break; | ||
2592 | default: | ||
2593 | ret = -EIO; | ||
2594 | goto exit_mgmt0; | ||
2595 | } | ||
2596 | |||
2597 | if ((len == ql84_mgmt->len) && | ||
2598 | ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) { | ||
2599 | mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len, | ||
2600 | &mgmt_dma, GFP_KERNEL); | ||
2601 | if (mgmt_b == NULL) { | ||
2602 | DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b " | ||
2603 | "failed%lu\n", __func__, ha->host_no)); | ||
2604 | ret = -ENOMEM; | ||
2605 | goto exit_mgmt0; | ||
2606 | } | ||
2607 | mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len); | ||
2608 | mn->dseg_count = cpu_to_le16(1); | ||
2609 | mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma)); | ||
2610 | mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma)); | ||
2611 | mn->dseg_length = cpu_to_le32(len); | ||
2612 | |||
2613 | if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) { | ||
2614 | memcpy(mgmt_b, ql84_mgmt->payload, len); | ||
2615 | } | ||
2616 | } | ||
2617 | |||
2618 | ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0); | ||
2619 | if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) | ||
2620 | || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) { | ||
2621 | if (ret != QLA_SUCCESS) | ||
2622 | DEBUG2(printk(KERN_ERR "%s(%lu): failed\n", | ||
2623 | __func__, ha->host_no)); | ||
2624 | } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) || | ||
2625 | (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) { | ||
2626 | } | ||
2627 | |||
2628 | if (mgmt_b) | ||
2629 | dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma); | ||
2630 | |||
2631 | exit_mgmt0: | ||
2632 | dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma); | ||
2633 | return ret; | ||
2634 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 1263d9796e89..afa95614aaf8 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <scsi/scsi_device.h> | 31 | #include <scsi/scsi_device.h> |
32 | #include <scsi/scsi_cmnd.h> | 32 | #include <scsi/scsi_cmnd.h> |
33 | #include <scsi/scsi_transport_fc.h> | 33 | #include <scsi/scsi_transport_fc.h> |
34 | #include <scsi/scsi_bsg_fc.h> | ||
34 | 35 | ||
35 | #define QLA2XXX_DRIVER_NAME "qla2xxx" | 36 | #define QLA2XXX_DRIVER_NAME "qla2xxx" |
36 | 37 | ||
@@ -228,6 +229,27 @@ struct srb_logio { | |||
228 | uint16_t flags; | 229 | uint16_t flags; |
229 | }; | 230 | }; |
230 | 231 | ||
232 | struct srb_bsg_ctx { | ||
233 | #define SRB_ELS_CMD_RPT 3 | ||
234 | #define SRB_ELS_CMD_HST 4 | ||
235 | #define SRB_CT_CMD 5 | ||
236 | uint16_t type; | ||
237 | }; | ||
238 | |||
239 | struct srb_bsg { | ||
240 | struct srb_bsg_ctx ctx; | ||
241 | struct fc_bsg_job *bsg_job; | ||
242 | }; | ||
243 | |||
244 | struct msg_echo_lb { | ||
245 | dma_addr_t send_dma; | ||
246 | dma_addr_t rcv_dma; | ||
247 | uint16_t req_sg_cnt; | ||
248 | uint16_t rsp_sg_cnt; | ||
249 | uint16_t options; | ||
250 | uint32_t transfer_size; | ||
251 | }; | ||
252 | |||
231 | /* | 253 | /* |
232 | * ISP I/O Register Set structure definitions. | 254 | * ISP I/O Register Set structure definitions. |
233 | */ | 255 | */ |
@@ -522,6 +544,8 @@ typedef struct { | |||
522 | #define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */ | 544 | #define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */ |
523 | #define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */ | 545 | #define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */ |
524 | 546 | ||
547 | /* ISP mailbox loopback echo diagnostic error code */ | ||
548 | #define MBS_LB_RESET 0x17 | ||
525 | /* | 549 | /* |
526 | * Firmware options 1, 2, 3. | 550 | * Firmware options 1, 2, 3. |
527 | */ | 551 | */ |
@@ -2230,6 +2254,13 @@ struct req_que { | |||
2230 | int max_q_depth; | 2254 | int max_q_depth; |
2231 | }; | 2255 | }; |
2232 | 2256 | ||
2257 | /* Place holder for FW buffer parameters */ | ||
2258 | struct qlfc_fw { | ||
2259 | void *fw_buf; | ||
2260 | dma_addr_t fw_dma; | ||
2261 | uint32_t len; | ||
2262 | }; | ||
2263 | |||
2233 | /* | 2264 | /* |
2234 | * Qlogic host adapter specific data structure. | 2265 | * Qlogic host adapter specific data structure. |
2235 | */ | 2266 | */ |
@@ -2594,6 +2625,7 @@ struct qla_hw_data { | |||
2594 | struct qla_statistics qla_stats; | 2625 | struct qla_statistics qla_stats; |
2595 | struct isp_operations *isp_ops; | 2626 | struct isp_operations *isp_ops; |
2596 | struct workqueue_struct *wq; | 2627 | struct workqueue_struct *wq; |
2628 | struct qlfc_fw fw_buf; | ||
2597 | }; | 2629 | }; |
2598 | 2630 | ||
2599 | /* | 2631 | /* |
@@ -2766,4 +2798,127 @@ typedef struct scsi_qla_host { | |||
2766 | 2798 | ||
2767 | #define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) | 2799 | #define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) |
2768 | 2800 | ||
2801 | /* | ||
2802 | * BSG Vendor specific commands | ||
2803 | */ | ||
2804 | |||
2805 | #define QL_VND_LOOPBACK 0x01 | ||
2806 | #define QLA84_RESET 0x02 | ||
2807 | #define QLA84_UPDATE_FW 0x03 | ||
2808 | #define QLA84_MGMT_CMD 0x04 | ||
2809 | |||
2810 | /* BSG definations for interpreting CommandSent field */ | ||
2811 | #define INT_DEF_LB_LOOPBACK_CMD 0 | ||
2812 | #define INT_DEF_LB_ECHO_CMD 1 | ||
2813 | |||
2814 | /* BSG Vendor specific definations */ | ||
2815 | typedef struct _A84_RESET { | ||
2816 | uint16_t Flags; | ||
2817 | uint16_t Reserved; | ||
2818 | #define A84_RESET_FLAG_ENABLE_DIAG_FW 1 | ||
2819 | } __attribute__((packed)) A84_RESET, *PA84_RESET; | ||
2820 | |||
2821 | #define A84_ISSUE_WRITE_TYPE_CMD 0 | ||
2822 | #define A84_ISSUE_READ_TYPE_CMD 1 | ||
2823 | #define A84_CLEANUP_CMD 2 | ||
2824 | #define A84_ISSUE_RESET_OP_FW 3 | ||
2825 | #define A84_ISSUE_RESET_DIAG_FW 4 | ||
2826 | #define A84_ISSUE_UPDATE_OPFW_CMD 5 | ||
2827 | #define A84_ISSUE_UPDATE_DIAGFW_CMD 6 | ||
2828 | |||
2829 | struct qla84_mgmt_param { | ||
2830 | union { | ||
2831 | struct { | ||
2832 | uint32_t start_addr; | ||
2833 | } mem; /* for QLA84_MGMT_READ/WRITE_MEM */ | ||
2834 | struct { | ||
2835 | uint32_t id; | ||
2836 | #define QLA84_MGMT_CONFIG_ID_UIF 1 | ||
2837 | #define QLA84_MGMT_CONFIG_ID_FCOE_COS 2 | ||
2838 | #define QLA84_MGMT_CONFIG_ID_PAUSE 3 | ||
2839 | #define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4 | ||
2840 | |||
2841 | uint32_t param0; | ||
2842 | uint32_t param1; | ||
2843 | } config; /* for QLA84_MGMT_CHNG_CONFIG */ | ||
2844 | |||
2845 | struct { | ||
2846 | uint32_t type; | ||
2847 | #define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */ | ||
2848 | #define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */ | ||
2849 | #define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */ | ||
2850 | #define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */ | ||
2851 | #define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */ | ||
2852 | #define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */ | ||
2853 | #define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */ | ||
2854 | |||
2855 | uint32_t context; | ||
2856 | /* | ||
2857 | * context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA | ||
2858 | */ | ||
2859 | #define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0 | ||
2860 | #define IC_LOG_DATA_LOG_ID_LEARN_LOG 1 | ||
2861 | #define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2 | ||
2862 | #define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3 | ||
2863 | #define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4 | ||
2864 | #define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5 | ||
2865 | #define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6 | ||
2866 | #define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7 | ||
2867 | #define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8 | ||
2868 | #define IC_LOG_DATA_LOG_ID_DCX_LOG 9 | ||
2869 | |||
2870 | /* | ||
2871 | * context definitions for QLA84_MGMT_INFO_PORT_STAT | ||
2872 | */ | ||
2873 | #define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0 | ||
2874 | #define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1 | ||
2875 | #define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2 | ||
2876 | #define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3 | ||
2877 | #define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4 | ||
2878 | #define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5 | ||
2879 | |||
2880 | |||
2881 | /* | ||
2882 | * context definitions for QLA84_MGMT_INFO_LIF_STAT | ||
2883 | */ | ||
2884 | #define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0 | ||
2885 | #define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1 | ||
2886 | #define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2 | ||
2887 | #define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3 | ||
2888 | #define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6 | ||
2889 | |||
2890 | } info; /* for QLA84_MGMT_GET_INFO */ | ||
2891 | } u; | ||
2892 | }; | ||
2893 | |||
2894 | struct qla84_msg_mgmt { | ||
2895 | uint16_t cmd; | ||
2896 | #define QLA84_MGMT_READ_MEM 0x00 | ||
2897 | #define QLA84_MGMT_WRITE_MEM 0x01 | ||
2898 | #define QLA84_MGMT_CHNG_CONFIG 0x02 | ||
2899 | #define QLA84_MGMT_GET_INFO 0x03 | ||
2900 | uint16_t rsrvd; | ||
2901 | struct qla84_mgmt_param mgmtp;/* parameters for cmd */ | ||
2902 | uint32_t len; /* bytes in payload following this struct */ | ||
2903 | uint8_t payload[0]; /* payload for cmd */ | ||
2904 | }; | ||
2905 | |||
2906 | struct msg_update_fw { | ||
2907 | /* | ||
2908 | * diag_fw = 0 operational fw | ||
2909 | * otherwise diagnostic fw | ||
2910 | * offset, len, fw_len are present to overcome the current limitation | ||
2911 | * of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk | ||
2912 | * specifies the byte "offset" where it fits in the fw buffer. The | ||
2913 | * number of bytes in each chunk is specified in "len". "fw_len" | ||
2914 | * is the total size of fw. The first chunk should start at offset = 0. | ||
2915 | * When offset+len == fw_len, the fw is written to the HBA. | ||
2916 | */ | ||
2917 | uint32_t diag_fw; | ||
2918 | uint32_t offset;/* start offset */ | ||
2919 | uint32_t len; /* num bytes in cur xfer */ | ||
2920 | uint32_t fw_len; /* size of fw in bytes */ | ||
2921 | uint8_t fw_bytes[0]; | ||
2922 | }; | ||
2923 | |||
2769 | #endif | 2924 | #endif |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 66a8da5d7d08..cebf4f1bb7d9 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
@@ -627,6 +627,39 @@ struct els_entry_24xx { | |||
627 | uint32_t rx_len; /* Data segment 1 length. */ | 627 | uint32_t rx_len; /* Data segment 1 length. */ |
628 | }; | 628 | }; |
629 | 629 | ||
630 | struct els_sts_entry_24xx { | ||
631 | uint8_t entry_type; /* Entry type. */ | ||
632 | uint8_t entry_count; /* Entry count. */ | ||
633 | uint8_t sys_define; /* System Defined. */ | ||
634 | uint8_t entry_status; /* Entry Status. */ | ||
635 | |||
636 | uint32_t handle; /* System handle. */ | ||
637 | |||
638 | uint16_t comp_status; | ||
639 | |||
640 | uint16_t nport_handle; /* N_PORT handle. */ | ||
641 | |||
642 | uint16_t reserved_1; | ||
643 | |||
644 | uint8_t vp_index; | ||
645 | uint8_t sof_type; | ||
646 | |||
647 | uint32_t rx_xchg_address; /* Receive exchange address. */ | ||
648 | uint16_t reserved_2; | ||
649 | |||
650 | uint8_t opcode; | ||
651 | uint8_t reserved_3; | ||
652 | |||
653 | uint8_t port_id[3]; | ||
654 | uint8_t reserved_4; | ||
655 | |||
656 | uint16_t reserved_5; | ||
657 | |||
658 | uint16_t control_flags; /* Control flags. */ | ||
659 | uint32_t total_byte_count; | ||
660 | uint32_t error_subcode_1; | ||
661 | uint32_t error_subcode_2; | ||
662 | }; | ||
630 | /* | 663 | /* |
631 | * ISP queue - Mailbox Command entry structure definition. | 664 | * ISP queue - Mailbox Command entry structure definition. |
632 | */ | 665 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 8bc6f53691e9..3a89bc514e2b 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -60,6 +60,8 @@ extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *, | |||
60 | extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, | 60 | extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, |
61 | uint16_t *); | 61 | uint16_t *); |
62 | 62 | ||
63 | extern fc_port_t * | ||
64 | qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t ); | ||
63 | /* | 65 | /* |
64 | * Global Data in qla_os.c source file. | 66 | * Global Data in qla_os.c source file. |
65 | */ | 67 | */ |
@@ -76,6 +78,7 @@ extern int ql2xiidmaenable; | |||
76 | extern int ql2xmaxqueues; | 78 | extern int ql2xmaxqueues; |
77 | extern int ql2xmultique_tag; | 79 | extern int ql2xmultique_tag; |
78 | extern int ql2xfwloadbin; | 80 | extern int ql2xfwloadbin; |
81 | extern int ql2xetsenable; | ||
79 | 82 | ||
80 | extern int qla2x00_loop_reset(scsi_qla_host_t *); | 83 | extern int qla2x00_loop_reset(scsi_qla_host_t *); |
81 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); | 84 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); |
@@ -94,7 +97,6 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); | |||
94 | 97 | ||
95 | extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); | 98 | extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); |
96 | 99 | ||
97 | extern void qla2x00_abort_fcport_cmds(fc_port_t *); | ||
98 | extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, | 100 | extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, |
99 | struct qla_hw_data *); | 101 | struct qla_hw_data *); |
100 | extern void qla2x00_free_host(struct scsi_qla_host *); | 102 | extern void qla2x00_free_host(struct scsi_qla_host *); |
@@ -154,6 +156,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, | |||
154 | int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, | 156 | int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, |
155 | uint16_t, uint16_t, uint8_t); | 157 | uint16_t, uint16_t, uint8_t); |
156 | extern int qla2x00_start_sp(srb_t *); | 158 | extern int qla2x00_start_sp(srb_t *); |
159 | extern void qla2x00_ctx_sp_free(srb_t *); | ||
157 | 160 | ||
158 | /* | 161 | /* |
159 | * Global Function Prototypes in qla_mbx.c source file. | 162 | * Global Function Prototypes in qla_mbx.c source file. |
@@ -426,6 +429,8 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); | |||
426 | extern void qla2x00_init_host_attr(scsi_qla_host_t *); | 429 | extern void qla2x00_init_host_attr(scsi_qla_host_t *); |
427 | extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); | 430 | extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); |
428 | extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); | 431 | extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); |
432 | extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *); | ||
433 | extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *); | ||
429 | 434 | ||
430 | /* | 435 | /* |
431 | * Global Function Prototypes in qla_dfs.c source file. | 436 | * Global Function Prototypes in qla_dfs.c source file. |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 3f8e8495b743..a67b2bafb882 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -62,7 +62,7 @@ qla2x00_ctx_sp_timeout(unsigned long __data) | |||
62 | ctx->free(sp); | 62 | ctx->free(sp); |
63 | } | 63 | } |
64 | 64 | ||
65 | static void | 65 | void |
66 | qla2x00_ctx_sp_free(srb_t *sp) | 66 | qla2x00_ctx_sp_free(srb_t *sp) |
67 | { | 67 | { |
68 | struct srb_ctx *ctx = sp->ctx; | 68 | struct srb_ctx *ctx = sp->ctx; |
@@ -338,6 +338,16 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
338 | rval = qla2x00_init_rings(vha); | 338 | rval = qla2x00_init_rings(vha); |
339 | ha->flags.chip_reset_done = 1; | 339 | ha->flags.chip_reset_done = 1; |
340 | 340 | ||
341 | if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { | ||
342 | /* Issue verify 84xx FW IOCB to complete 84xx initialization */ | ||
343 | rval = qla84xx_init_chip(vha); | ||
344 | if (rval != QLA_SUCCESS) { | ||
345 | qla_printk(KERN_ERR, ha, | ||
346 | "Unable to initialize ISP84XX.\n"); | ||
347 | qla84xx_put_chip(vha); | ||
348 | } | ||
349 | } | ||
350 | |||
341 | return (rval); | 351 | return (rval); |
342 | } | 352 | } |
343 | 353 | ||
@@ -2216,7 +2226,7 @@ qla2x00_rport_del(void *data) | |||
2216 | * | 2226 | * |
2217 | * Returns a pointer to the allocated fcport, or NULL, if none available. | 2227 | * Returns a pointer to the allocated fcport, or NULL, if none available. |
2218 | */ | 2228 | */ |
2219 | static fc_port_t * | 2229 | fc_port_t * |
2220 | qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) | 2230 | qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) |
2221 | { | 2231 | { |
2222 | fc_port_t *fcport; | 2232 | fc_port_t *fcport; |
@@ -2900,8 +2910,13 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
2900 | if (qla2x00_is_reserved_id(vha, loop_id)) | 2910 | if (qla2x00_is_reserved_id(vha, loop_id)) |
2901 | continue; | 2911 | continue; |
2902 | 2912 | ||
2903 | if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha)) | 2913 | if (atomic_read(&vha->loop_down_timer) || |
2914 | LOOP_TRANSITION(vha)) { | ||
2915 | atomic_set(&vha->loop_down_timer, 0); | ||
2916 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); | ||
2917 | set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); | ||
2904 | break; | 2918 | break; |
2919 | } | ||
2905 | 2920 | ||
2906 | if (swl != NULL) { | 2921 | if (swl != NULL) { |
2907 | if (last_dev) { | 2922 | if (last_dev) { |
@@ -4877,6 +4892,15 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) | |||
4877 | } | 4892 | } |
4878 | 4893 | ||
4879 | void | 4894 | void |
4880 | qla81xx_update_fw_options(scsi_qla_host_t *ha) | 4895 | qla81xx_update_fw_options(scsi_qla_host_t *vha) |
4881 | { | 4896 | { |
4897 | struct qla_hw_data *ha = vha->hw; | ||
4898 | |||
4899 | if (!ql2xetsenable) | ||
4900 | return; | ||
4901 | |||
4902 | /* Enable ETS Burst. */ | ||
4903 | memset(ha->fw_options, 0, sizeof(ha->fw_options)); | ||
4904 | ha->fw_options[2] |= BIT_9; | ||
4905 | qla2x00_set_fw_options(vha, ha->fw_options); | ||
4882 | } | 4906 | } |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index c5ccac0bef76..8299a9891bfe 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -1025,6 +1025,119 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) | |||
1025 | /* Implicit: mbx->mbx10 = 0. */ | 1025 | /* Implicit: mbx->mbx10 = 0. */ |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | static void | ||
1029 | qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) | ||
1030 | { | ||
1031 | struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job; | ||
1032 | |||
1033 | els_iocb->entry_type = ELS_IOCB_TYPE; | ||
1034 | els_iocb->entry_count = 1; | ||
1035 | els_iocb->sys_define = 0; | ||
1036 | els_iocb->entry_status = 0; | ||
1037 | els_iocb->handle = sp->handle; | ||
1038 | els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); | ||
1039 | els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); | ||
1040 | els_iocb->vp_index = sp->fcport->vp_idx; | ||
1041 | els_iocb->sof_type = EST_SOFI3; | ||
1042 | els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); | ||
1043 | |||
1044 | els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ? | ||
1045 | bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code; | ||
1046 | els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; | ||
1047 | els_iocb->port_id[1] = sp->fcport->d_id.b.area; | ||
1048 | els_iocb->port_id[2] = sp->fcport->d_id.b.domain; | ||
1049 | els_iocb->control_flags = 0; | ||
1050 | els_iocb->rx_byte_count = | ||
1051 | cpu_to_le32(bsg_job->reply_payload.payload_len); | ||
1052 | els_iocb->tx_byte_count = | ||
1053 | cpu_to_le32(bsg_job->request_payload.payload_len); | ||
1054 | |||
1055 | els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address | ||
1056 | (bsg_job->request_payload.sg_list))); | ||
1057 | els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address | ||
1058 | (bsg_job->request_payload.sg_list))); | ||
1059 | els_iocb->tx_len = cpu_to_le32(sg_dma_len | ||
1060 | (bsg_job->request_payload.sg_list)); | ||
1061 | |||
1062 | els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address | ||
1063 | (bsg_job->reply_payload.sg_list))); | ||
1064 | els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address | ||
1065 | (bsg_job->reply_payload.sg_list))); | ||
1066 | els_iocb->rx_len = cpu_to_le32(sg_dma_len | ||
1067 | (bsg_job->reply_payload.sg_list)); | ||
1068 | } | ||
1069 | |||
1070 | static void | ||
1071 | qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) | ||
1072 | { | ||
1073 | uint16_t avail_dsds; | ||
1074 | uint32_t *cur_dsd; | ||
1075 | struct scatterlist *sg; | ||
1076 | int index; | ||
1077 | uint16_t tot_dsds; | ||
1078 | scsi_qla_host_t *vha = sp->fcport->vha; | ||
1079 | struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job; | ||
1080 | int loop_iterartion = 0; | ||
1081 | int cont_iocb_prsnt = 0; | ||
1082 | int entry_count = 1; | ||
1083 | |||
1084 | ct_iocb->entry_type = CT_IOCB_TYPE; | ||
1085 | ct_iocb->entry_status = 0; | ||
1086 | ct_iocb->sys_define = 0; | ||
1087 | ct_iocb->handle = sp->handle; | ||
1088 | |||
1089 | ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); | ||
1090 | ct_iocb->vp_index = sp->fcport->vp_idx; | ||
1091 | ct_iocb->comp_status = __constant_cpu_to_le16(0); | ||
1092 | |||
1093 | ct_iocb->cmd_dsd_count = | ||
1094 | __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); | ||
1095 | ct_iocb->timeout = 0; | ||
1096 | ct_iocb->rsp_dsd_count = | ||
1097 | __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); | ||
1098 | ct_iocb->rsp_byte_count = | ||
1099 | cpu_to_le32(bsg_job->reply_payload.payload_len); | ||
1100 | ct_iocb->cmd_byte_count = | ||
1101 | cpu_to_le32(bsg_job->request_payload.payload_len); | ||
1102 | ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address | ||
1103 | (bsg_job->request_payload.sg_list))); | ||
1104 | ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address | ||
1105 | (bsg_job->request_payload.sg_list))); | ||
1106 | ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len | ||
1107 | (bsg_job->request_payload.sg_list)); | ||
1108 | |||
1109 | avail_dsds = 1; | ||
1110 | cur_dsd = (uint32_t *)ct_iocb->dseg_1_address; | ||
1111 | index = 0; | ||
1112 | tot_dsds = bsg_job->reply_payload.sg_cnt; | ||
1113 | |||
1114 | for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { | ||
1115 | dma_addr_t sle_dma; | ||
1116 | cont_a64_entry_t *cont_pkt; | ||
1117 | |||
1118 | /* Allocate additional continuation packets? */ | ||
1119 | if (avail_dsds == 0) { | ||
1120 | /* | ||
1121 | * Five DSDs are available in the Cont. | ||
1122 | * Type 1 IOCB. | ||
1123 | */ | ||
1124 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha); | ||
1125 | cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; | ||
1126 | avail_dsds = 5; | ||
1127 | cont_iocb_prsnt = 1; | ||
1128 | entry_count++; | ||
1129 | } | ||
1130 | |||
1131 | sle_dma = sg_dma_address(sg); | ||
1132 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | ||
1133 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | ||
1134 | *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); | ||
1135 | loop_iterartion++; | ||
1136 | avail_dsds--; | ||
1137 | } | ||
1138 | ct_iocb->entry_count = entry_count; | ||
1139 | } | ||
1140 | |||
1028 | int | 1141 | int |
1029 | qla2x00_start_sp(srb_t *sp) | 1142 | qla2x00_start_sp(srb_t *sp) |
1030 | { | 1143 | { |
@@ -1052,6 +1165,13 @@ qla2x00_start_sp(srb_t *sp) | |||
1052 | qla24xx_logout_iocb(sp, pkt): | 1165 | qla24xx_logout_iocb(sp, pkt): |
1053 | qla2x00_logout_iocb(sp, pkt); | 1166 | qla2x00_logout_iocb(sp, pkt); |
1054 | break; | 1167 | break; |
1168 | case SRB_ELS_CMD_RPT: | ||
1169 | case SRB_ELS_CMD_HST: | ||
1170 | qla24xx_els_iocb(sp, pkt); | ||
1171 | break; | ||
1172 | case SRB_CT_CMD: | ||
1173 | qla24xx_ct_iocb(sp, pkt); | ||
1174 | break; | ||
1055 | default: | 1175 | default: |
1056 | break; | 1176 | break; |
1057 | } | 1177 | } |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 6fc63b98818c..ab90329ff2e4 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
10 | #include <scsi/scsi_tcq.h> | 10 | #include <scsi/scsi_tcq.h> |
11 | #include <scsi/scsi_bsg_fc.h> | ||
11 | 12 | ||
12 | static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); | 13 | static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); |
13 | static void qla2x00_process_completed_request(struct scsi_qla_host *, | 14 | static void qla2x00_process_completed_request(struct scsi_qla_host *, |
@@ -881,7 +882,9 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, | |||
881 | index); | 882 | index); |
882 | return NULL; | 883 | return NULL; |
883 | } | 884 | } |
885 | |||
884 | req->outstanding_cmds[index] = NULL; | 886 | req->outstanding_cmds[index] = NULL; |
887 | |||
885 | done: | 888 | done: |
886 | return sp; | 889 | return sp; |
887 | } | 890 | } |
@@ -982,6 +985,100 @@ done_post_logio_done_work: | |||
982 | } | 985 | } |
983 | 986 | ||
984 | static void | 987 | static void |
988 | qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, | ||
989 | struct sts_entry_24xx *pkt, int iocb_type) | ||
990 | { | ||
991 | const char func[] = "ELS_CT_IOCB"; | ||
992 | const char *type; | ||
993 | struct qla_hw_data *ha = vha->hw; | ||
994 | srb_t *sp; | ||
995 | struct srb_bsg *sp_bsg; | ||
996 | struct fc_bsg_job *bsg_job; | ||
997 | uint16_t comp_status; | ||
998 | uint32_t fw_status[3]; | ||
999 | uint8_t* fw_sts_ptr; | ||
1000 | |||
1001 | sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); | ||
1002 | if (!sp) | ||
1003 | return; | ||
1004 | sp_bsg = (struct srb_bsg*)sp->ctx; | ||
1005 | bsg_job = sp_bsg->bsg_job; | ||
1006 | |||
1007 | type = NULL; | ||
1008 | switch (sp_bsg->ctx.type) { | ||
1009 | case SRB_ELS_CMD_RPT: | ||
1010 | case SRB_ELS_CMD_HST: | ||
1011 | type = "els"; | ||
1012 | break; | ||
1013 | case SRB_CT_CMD: | ||
1014 | type = "ct pass-through"; | ||
1015 | break; | ||
1016 | default: | ||
1017 | qla_printk(KERN_WARNING, ha, | ||
1018 | "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, | ||
1019 | sp_bsg->ctx.type); | ||
1020 | return; | ||
1021 | } | ||
1022 | |||
1023 | comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); | ||
1024 | fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); | ||
1025 | fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); | ||
1026 | |||
1027 | /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT | ||
1028 | * fc payload to the caller | ||
1029 | */ | ||
1030 | bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; | ||
1031 | bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); | ||
1032 | |||
1033 | if (comp_status != CS_COMPLETE) { | ||
1034 | if (comp_status == CS_DATA_UNDERRUN) { | ||
1035 | bsg_job->reply->result = DID_OK << 16; | ||
1036 | bsg_job->reply->reply_payload_rcv_len = | ||
1037 | le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); | ||
1038 | |||
1039 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
1040 | "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " | ||
1041 | "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", | ||
1042 | vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2], | ||
1043 | le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count))); | ||
1044 | fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); | ||
1045 | memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); | ||
1046 | } | ||
1047 | else { | ||
1048 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
1049 | "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " | ||
1050 | "error subcode 1=0x%x error subcode 2=0x%x.\n", | ||
1051 | vha->host_no, sp->handle, type, comp_status, | ||
1052 | le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1), | ||
1053 | le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2))); | ||
1054 | bsg_job->reply->result = DID_ERROR << 16; | ||
1055 | bsg_job->reply->reply_payload_rcv_len = 0; | ||
1056 | fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); | ||
1057 | memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); | ||
1058 | } | ||
1059 | DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); | ||
1060 | } | ||
1061 | else { | ||
1062 | bsg_job->reply->result = DID_OK << 16;; | ||
1063 | bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; | ||
1064 | bsg_job->reply_len = 0; | ||
1065 | } | ||
1066 | |||
1067 | dma_unmap_sg(&ha->pdev->dev, | ||
1068 | bsg_job->request_payload.sg_list, | ||
1069 | bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1070 | dma_unmap_sg(&ha->pdev->dev, | ||
1071 | bsg_job->reply_payload.sg_list, | ||
1072 | bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
1073 | if ((sp_bsg->ctx.type == SRB_ELS_CMD_HST) || | ||
1074 | (sp_bsg->ctx.type == SRB_CT_CMD)) | ||
1075 | kfree(sp->fcport); | ||
1076 | kfree(sp->ctx); | ||
1077 | mempool_free(sp, ha->srb_mempool); | ||
1078 | bsg_job->job_done(bsg_job); | ||
1079 | } | ||
1080 | |||
1081 | static void | ||
985 | qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, | 1082 | qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, |
986 | struct logio_entry_24xx *logio) | 1083 | struct logio_entry_24xx *logio) |
987 | { | 1084 | { |
@@ -1749,6 +1846,13 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, | |||
1749 | qla24xx_logio_entry(vha, rsp->req, | 1846 | qla24xx_logio_entry(vha, rsp->req, |
1750 | (struct logio_entry_24xx *)pkt); | 1847 | (struct logio_entry_24xx *)pkt); |
1751 | break; | 1848 | break; |
1849 | case CT_IOCB_TYPE: | ||
1850 | qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); | ||
1851 | clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags); | ||
1852 | break; | ||
1853 | case ELS_IOCB_TYPE: | ||
1854 | qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); | ||
1855 | break; | ||
1752 | default: | 1856 | default: |
1753 | /* Type Not Supported. */ | 1857 | /* Type Not Supported. */ |
1754 | DEBUG4(printk(KERN_WARNING | 1858 | DEBUG4(printk(KERN_WARNING |
@@ -2049,7 +2153,6 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
2049 | set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); | 2153 | set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); |
2050 | complete(&ha->mbx_intr_comp); | 2154 | complete(&ha->mbx_intr_comp); |
2051 | } | 2155 | } |
2052 | |||
2053 | return IRQ_HANDLED; | 2156 | return IRQ_HANDLED; |
2054 | } | 2157 | } |
2055 | 2158 | ||
@@ -2255,10 +2358,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) | |||
2255 | 2358 | ||
2256 | if (ha->flags.msix_enabled) | 2359 | if (ha->flags.msix_enabled) |
2257 | qla24xx_disable_msix(ha); | 2360 | qla24xx_disable_msix(ha); |
2258 | else if (ha->flags.inta_enabled) { | 2361 | else if (ha->flags.msi_enabled) { |
2259 | free_irq(ha->pdev->irq, rsp); | 2362 | free_irq(ha->pdev->irq, rsp); |
2260 | pci_disable_msi(ha->pdev); | 2363 | pci_disable_msi(ha->pdev); |
2261 | } | 2364 | } else |
2365 | free_irq(ha->pdev->irq, rsp); | ||
2262 | } | 2366 | } |
2263 | 2367 | ||
2264 | 2368 | ||
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 056e4d4505f3..6e53bdbb1da8 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -3636,6 +3636,157 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) | |||
3636 | } | 3636 | } |
3637 | 3637 | ||
3638 | int | 3638 | int |
3639 | qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp) | ||
3640 | { | ||
3641 | int rval; | ||
3642 | mbx_cmd_t mc; | ||
3643 | mbx_cmd_t *mcp = &mc; | ||
3644 | uint32_t iter_cnt = 0x1; | ||
3645 | |||
3646 | DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); | ||
3647 | |||
3648 | memset(mcp->mb, 0 , sizeof(mcp->mb)); | ||
3649 | mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; | ||
3650 | mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing | ||
3651 | |||
3652 | /* transfer count */ | ||
3653 | mcp->mb[10] = LSW(mreq->transfer_size); | ||
3654 | mcp->mb[11] = MSW(mreq->transfer_size); | ||
3655 | |||
3656 | /* send data address */ | ||
3657 | mcp->mb[14] = LSW(mreq->send_dma); | ||
3658 | mcp->mb[15] = MSW(mreq->send_dma); | ||
3659 | mcp->mb[20] = LSW(MSD(mreq->send_dma)); | ||
3660 | mcp->mb[21] = MSW(MSD(mreq->send_dma)); | ||
3661 | |||
3662 | /* recieve data address */ | ||
3663 | mcp->mb[16] = LSW(mreq->rcv_dma); | ||
3664 | mcp->mb[17] = MSW(mreq->rcv_dma); | ||
3665 | mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); | ||
3666 | mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); | ||
3667 | |||
3668 | /* Iteration count */ | ||
3669 | mcp->mb[18] = LSW(iter_cnt); | ||
3670 | mcp->mb[19] = MSW(iter_cnt); | ||
3671 | |||
3672 | mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| | ||
3673 | MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; | ||
3674 | if (IS_QLA81XX(vha->hw)) | ||
3675 | mcp->out_mb |= MBX_2; | ||
3676 | mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; | ||
3677 | |||
3678 | mcp->buf_size = mreq->transfer_size; | ||
3679 | mcp->tov = MBX_TOV_SECONDS; | ||
3680 | mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; | ||
3681 | |||
3682 | rval = qla2x00_mailbox_command(vha, mcp); | ||
3683 | |||
3684 | if (rval != QLA_SUCCESS) { | ||
3685 | DEBUG2(printk(KERN_WARNING | ||
3686 | "(%ld): failed=%x mb[0]=0x%x " | ||
3687 | "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x mb[19]=0x%x. \n", vha->host_no, rval, | ||
3688 | mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19])); | ||
3689 | } else { | ||
3690 | DEBUG2(printk(KERN_WARNING | ||
3691 | "scsi(%ld): done.\n", vha->host_no)); | ||
3692 | } | ||
3693 | |||
3694 | /* Copy mailbox information */ | ||
3695 | memcpy( mresp, mcp->mb, 64); | ||
3696 | mresp[3] = mcp->mb[18]; | ||
3697 | mresp[4] = mcp->mb[19]; | ||
3698 | return rval; | ||
3699 | } | ||
3700 | |||
3701 | int | ||
3702 | qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp) | ||
3703 | { | ||
3704 | int rval; | ||
3705 | mbx_cmd_t mc; | ||
3706 | mbx_cmd_t *mcp = &mc; | ||
3707 | struct qla_hw_data *ha = vha->hw; | ||
3708 | |||
3709 | DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); | ||
3710 | |||
3711 | memset(mcp->mb, 0 , sizeof(mcp->mb)); | ||
3712 | mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; | ||
3713 | mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ | ||
3714 | if (IS_QLA81XX(ha)) | ||
3715 | mcp->mb[1] |= BIT_15; | ||
3716 | mcp->mb[2] = IS_QLA81XX(ha) ? vha->fcoe_fcf_idx : 0; | ||
3717 | mcp->mb[16] = LSW(mreq->rcv_dma); | ||
3718 | mcp->mb[17] = MSW(mreq->rcv_dma); | ||
3719 | mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); | ||
3720 | mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); | ||
3721 | |||
3722 | mcp->mb[10] = LSW(mreq->transfer_size); | ||
3723 | |||
3724 | mcp->mb[14] = LSW(mreq->send_dma); | ||
3725 | mcp->mb[15] = MSW(mreq->send_dma); | ||
3726 | mcp->mb[20] = LSW(MSD(mreq->send_dma)); | ||
3727 | mcp->mb[21] = MSW(MSD(mreq->send_dma)); | ||
3728 | |||
3729 | mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| | ||
3730 | MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; | ||
3731 | if (IS_QLA81XX(ha)) | ||
3732 | mcp->out_mb |= MBX_2; | ||
3733 | |||
3734 | mcp->in_mb = MBX_0; | ||
3735 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) | ||
3736 | mcp->in_mb |= MBX_1; | ||
3737 | if (IS_QLA81XX(ha)) | ||
3738 | mcp->in_mb |= MBX_3; | ||
3739 | |||
3740 | mcp->tov = MBX_TOV_SECONDS; | ||
3741 | mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; | ||
3742 | mcp->buf_size = mreq->transfer_size; | ||
3743 | |||
3744 | rval = qla2x00_mailbox_command(vha, mcp); | ||
3745 | |||
3746 | if (rval != QLA_SUCCESS) { | ||
3747 | DEBUG2(printk(KERN_WARNING | ||
3748 | "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", | ||
3749 | vha->host_no, rval, mcp->mb[0], mcp->mb[1])); | ||
3750 | } else { | ||
3751 | DEBUG2(printk(KERN_WARNING | ||
3752 | "scsi(%ld): done.\n", vha->host_no)); | ||
3753 | } | ||
3754 | |||
3755 | /* Copy mailbox information */ | ||
3756 | memcpy( mresp, mcp->mb, 32); | ||
3757 | return rval; | ||
3758 | } | ||
3759 | int | ||
3760 | qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic, | ||
3761 | uint16_t *cmd_status) | ||
3762 | { | ||
3763 | int rval; | ||
3764 | mbx_cmd_t mc; | ||
3765 | mbx_cmd_t *mcp = &mc; | ||
3766 | |||
3767 | DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__, | ||
3768 | ha->host_no, enable_diagnostic)); | ||
3769 | |||
3770 | mcp->mb[0] = MBC_ISP84XX_RESET; | ||
3771 | mcp->mb[1] = enable_diagnostic; | ||
3772 | mcp->out_mb = MBX_1|MBX_0; | ||
3773 | mcp->in_mb = MBX_1|MBX_0; | ||
3774 | mcp->tov = MBX_TOV_SECONDS; | ||
3775 | mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; | ||
3776 | rval = qla2x00_mailbox_command(ha, mcp); | ||
3777 | |||
3778 | /* Return mailbox statuses. */ | ||
3779 | *cmd_status = mcp->mb[0]; | ||
3780 | if (rval != QLA_SUCCESS) | ||
3781 | DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no, | ||
3782 | rval)); | ||
3783 | else | ||
3784 | DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no)); | ||
3785 | |||
3786 | return rval; | ||
3787 | } | ||
3788 | |||
3789 | int | ||
3639 | qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) | 3790 | qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) |
3640 | { | 3791 | { |
3641 | int rval; | 3792 | int rval; |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8529eb1f3cd4..46720b23028f 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -107,6 +107,12 @@ MODULE_PARM_DESC(ql2xfwloadbin, | |||
107 | " 1 -- load firmware from flash.\n" | 107 | " 1 -- load firmware from flash.\n" |
108 | " 0 -- use default semantics.\n"); | 108 | " 0 -- use default semantics.\n"); |
109 | 109 | ||
110 | int ql2xetsenable; | ||
111 | module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR); | ||
112 | MODULE_PARM_DESC(ql2xetsenable, | ||
113 | "Enables firmware ETS burst." | ||
114 | "Default is 0 - skip ETS enablement."); | ||
115 | |||
110 | /* | 116 | /* |
111 | * SCSI host template entry points | 117 | * SCSI host template entry points |
112 | */ | 118 | */ |
@@ -682,44 +688,6 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha) | |||
682 | return (return_status); | 688 | return (return_status); |
683 | } | 689 | } |
684 | 690 | ||
685 | void | ||
686 | qla2x00_abort_fcport_cmds(fc_port_t *fcport) | ||
687 | { | ||
688 | int cnt; | ||
689 | unsigned long flags; | ||
690 | srb_t *sp; | ||
691 | scsi_qla_host_t *vha = fcport->vha; | ||
692 | struct qla_hw_data *ha = vha->hw; | ||
693 | struct req_que *req; | ||
694 | |||
695 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
696 | req = vha->req; | ||
697 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | ||
698 | sp = req->outstanding_cmds[cnt]; | ||
699 | if (!sp) | ||
700 | continue; | ||
701 | if (sp->fcport != fcport) | ||
702 | continue; | ||
703 | if (sp->ctx) | ||
704 | continue; | ||
705 | |||
706 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
707 | if (ha->isp_ops->abort_command(sp)) { | ||
708 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
709 | "Abort failed -- %lx\n", | ||
710 | sp->cmd->serial_number)); | ||
711 | } else { | ||
712 | if (qla2x00_eh_wait_on_command(sp->cmd) != | ||
713 | QLA_SUCCESS) | ||
714 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
715 | "Abort failed while waiting -- %lx\n", | ||
716 | sp->cmd->serial_number)); | ||
717 | } | ||
718 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
719 | } | ||
720 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
721 | } | ||
722 | |||
723 | /************************************************************************** | 691 | /************************************************************************** |
724 | * qla2xxx_eh_abort | 692 | * qla2xxx_eh_abort |
725 | * | 693 | * |
@@ -1095,6 +1063,20 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) | |||
1095 | struct fc_port *fcport; | 1063 | struct fc_port *fcport; |
1096 | struct qla_hw_data *ha = vha->hw; | 1064 | struct qla_hw_data *ha = vha->hw; |
1097 | 1065 | ||
1066 | if (ha->flags.enable_target_reset) { | ||
1067 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | ||
1068 | if (fcport->port_type != FCT_TARGET) | ||
1069 | continue; | ||
1070 | |||
1071 | ret = ha->isp_ops->target_reset(fcport, 0, 0); | ||
1072 | if (ret != QLA_SUCCESS) { | ||
1073 | DEBUG2_3(printk("%s(%ld): bus_reset failed: " | ||
1074 | "target_reset=%d d_id=%x.\n", __func__, | ||
1075 | vha->host_no, ret, fcport->d_id.b24)); | ||
1076 | } | ||
1077 | } | ||
1078 | } | ||
1079 | |||
1098 | if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) { | 1080 | if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) { |
1099 | ret = qla2x00_full_login_lip(vha); | 1081 | ret = qla2x00_full_login_lip(vha); |
1100 | if (ret != QLA_SUCCESS) { | 1082 | if (ret != QLA_SUCCESS) { |
@@ -1117,19 +1099,6 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) | |||
1117 | qla2x00_wait_for_loop_ready(vha); | 1099 | qla2x00_wait_for_loop_ready(vha); |
1118 | } | 1100 | } |
1119 | 1101 | ||
1120 | if (ha->flags.enable_target_reset) { | ||
1121 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | ||
1122 | if (fcport->port_type != FCT_TARGET) | ||
1123 | continue; | ||
1124 | |||
1125 | ret = ha->isp_ops->target_reset(fcport, 0, 0); | ||
1126 | if (ret != QLA_SUCCESS) { | ||
1127 | DEBUG2_3(printk("%s(%ld): bus_reset failed: " | ||
1128 | "target_reset=%d d_id=%x.\n", __func__, | ||
1129 | vha->host_no, ret, fcport->d_id.b24)); | ||
1130 | } | ||
1131 | } | ||
1132 | } | ||
1133 | /* Issue marker command only when we are going to start the I/O */ | 1102 | /* Issue marker command only when we are going to start the I/O */ |
1134 | vha->marker_needed = 1; | 1103 | vha->marker_needed = 1; |
1135 | 1104 | ||
@@ -1160,8 +1129,19 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1160 | qla2x00_sp_compl(ha, sp); | 1129 | qla2x00_sp_compl(ha, sp); |
1161 | } else { | 1130 | } else { |
1162 | ctx = sp->ctx; | 1131 | ctx = sp->ctx; |
1163 | del_timer_sync(&ctx->timer); | 1132 | if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) { |
1164 | ctx->free(sp); | 1133 | del_timer_sync(&ctx->timer); |
1134 | ctx->free(sp); | ||
1135 | } else { | ||
1136 | struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx; | ||
1137 | if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT) | ||
1138 | kfree(sp->fcport); | ||
1139 | sp_bsg->bsg_job->req->errors = 0; | ||
1140 | sp_bsg->bsg_job->reply->result = res; | ||
1141 | sp_bsg->bsg_job->job_done(sp_bsg->bsg_job); | ||
1142 | kfree(sp->ctx); | ||
1143 | mempool_free(sp, ha->srb_mempool); | ||
1144 | } | ||
1165 | } | 1145 | } |
1166 | } | 1146 | } |
1167 | } | 1147 | } |
@@ -1258,7 +1238,7 @@ qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) | |||
1258 | qla2x00_adjust_sdev_qdepth_up(sdev, qdepth); | 1238 | qla2x00_adjust_sdev_qdepth_up(sdev, qdepth); |
1259 | break; | 1239 | break; |
1260 | default: | 1240 | default: |
1261 | return EOPNOTSUPP; | 1241 | return -EOPNOTSUPP; |
1262 | } | 1242 | } |
1263 | 1243 | ||
1264 | return sdev->queue_depth; | 1244 | return sdev->queue_depth; |
@@ -1818,7 +1798,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1818 | /* Set EEH reset type to fundamental if required by hba */ | 1798 | /* Set EEH reset type to fundamental if required by hba */ |
1819 | if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) { | 1799 | if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) { |
1820 | pdev->needs_freset = 1; | 1800 | pdev->needs_freset = 1; |
1821 | pci_save_state(pdev); | ||
1822 | } | 1801 | } |
1823 | 1802 | ||
1824 | /* Configure PCI I/O space */ | 1803 | /* Configure PCI I/O space */ |
@@ -1970,11 +1949,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1970 | host->max_channel = MAX_BUSES - 1; | 1949 | host->max_channel = MAX_BUSES - 1; |
1971 | host->max_lun = MAX_LUNS; | 1950 | host->max_lun = MAX_LUNS; |
1972 | host->transportt = qla2xxx_transport_template; | 1951 | host->transportt = qla2xxx_transport_template; |
1952 | sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); | ||
1973 | 1953 | ||
1974 | /* Set up the irqs */ | 1954 | /* Set up the irqs */ |
1975 | ret = qla2x00_request_irqs(ha, rsp); | 1955 | ret = qla2x00_request_irqs(ha, rsp); |
1976 | if (ret) | 1956 | if (ret) |
1977 | goto probe_init_failed; | 1957 | goto probe_init_failed; |
1958 | |||
1959 | pci_save_state(pdev); | ||
1960 | |||
1978 | /* Alloc arrays of request and response ring ptrs */ | 1961 | /* Alloc arrays of request and response ring ptrs */ |
1979 | que_init: | 1962 | que_init: |
1980 | if (!qla2x00_alloc_queues(ha)) { | 1963 | if (!qla2x00_alloc_queues(ha)) { |
@@ -2176,6 +2159,8 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
2176 | kfree(ha); | 2159 | kfree(ha); |
2177 | ha = NULL; | 2160 | ha = NULL; |
2178 | 2161 | ||
2162 | pci_disable_pcie_error_reporting(pdev); | ||
2163 | |||
2179 | pci_disable_device(pdev); | 2164 | pci_disable_device(pdev); |
2180 | pci_set_drvdata(pdev, NULL); | 2165 | pci_set_drvdata(pdev, NULL); |
2181 | } | 2166 | } |
@@ -3310,6 +3295,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | |||
3310 | return PCI_ERS_RESULT_CAN_RECOVER; | 3295 | return PCI_ERS_RESULT_CAN_RECOVER; |
3311 | case pci_channel_io_frozen: | 3296 | case pci_channel_io_frozen: |
3312 | ha->flags.eeh_busy = 1; | 3297 | ha->flags.eeh_busy = 1; |
3298 | qla2x00_free_irqs(vha); | ||
3313 | pci_disable_device(pdev); | 3299 | pci_disable_device(pdev); |
3314 | return PCI_ERS_RESULT_NEED_RESET; | 3300 | return PCI_ERS_RESULT_NEED_RESET; |
3315 | case pci_channel_io_perm_failure: | 3301 | case pci_channel_io_perm_failure: |
@@ -3363,10 +3349,24 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) | |||
3363 | pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; | 3349 | pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; |
3364 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); | 3350 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); |
3365 | struct qla_hw_data *ha = base_vha->hw; | 3351 | struct qla_hw_data *ha = base_vha->hw; |
3366 | int rc; | 3352 | struct rsp_que *rsp; |
3353 | int rc, retries = 10; | ||
3367 | 3354 | ||
3368 | DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); | 3355 | DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); |
3369 | 3356 | ||
3357 | /* Workaround: qla2xxx driver which access hardware earlier | ||
3358 | * needs error state to be pci_channel_io_online. | ||
3359 | * Otherwise mailbox command timesout. | ||
3360 | */ | ||
3361 | pdev->error_state = pci_channel_io_normal; | ||
3362 | |||
3363 | pci_restore_state(pdev); | ||
3364 | |||
3365 | /* pci_restore_state() clears the saved_state flag of the device | ||
3366 | * save restored state which resets saved_state flag | ||
3367 | */ | ||
3368 | pci_save_state(pdev); | ||
3369 | |||
3370 | if (ha->mem_only) | 3370 | if (ha->mem_only) |
3371 | rc = pci_enable_device_mem(pdev); | 3371 | rc = pci_enable_device_mem(pdev); |
3372 | else | 3372 | else |
@@ -3378,27 +3378,23 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) | |||
3378 | return ret; | 3378 | return ret; |
3379 | } | 3379 | } |
3380 | 3380 | ||
3381 | rsp = ha->rsp_q_map[0]; | ||
3382 | if (qla2x00_request_irqs(ha, rsp)) | ||
3383 | return ret; | ||
3384 | |||
3381 | if (ha->isp_ops->pci_config(base_vha)) | 3385 | if (ha->isp_ops->pci_config(base_vha)) |
3382 | return ret; | 3386 | return ret; |
3383 | 3387 | ||
3384 | #ifdef QL_DEBUG_LEVEL_17 | 3388 | while (ha->flags.mbox_busy && retries--) |
3385 | { | 3389 | msleep(1000); |
3386 | uint8_t b; | ||
3387 | uint32_t i; | ||
3388 | 3390 | ||
3389 | printk("slot_reset_1: "); | ||
3390 | for (i = 0; i < 256; i++) { | ||
3391 | pci_read_config_byte(ha->pdev, i, &b); | ||
3392 | printk("%s%02x", (i%16) ? " " : "\n", b); | ||
3393 | } | ||
3394 | printk("\n"); | ||
3395 | } | ||
3396 | #endif | ||
3397 | set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); | 3391 | set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); |
3398 | if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) | 3392 | if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) |
3399 | ret = PCI_ERS_RESULT_RECOVERED; | 3393 | ret = PCI_ERS_RESULT_RECOVERED; |
3400 | clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); | 3394 | clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); |
3401 | 3395 | ||
3396 | pci_cleanup_aer_uncorrect_error_status(pdev); | ||
3397 | |||
3402 | DEBUG17(qla_printk(KERN_WARNING, ha, | 3398 | DEBUG17(qla_printk(KERN_WARNING, ha, |
3403 | "slot_reset-return:ret=%x\n", ret)); | 3399 | "slot_reset-return:ret=%x\n", ret)); |
3404 | 3400 | ||
@@ -3422,8 +3418,6 @@ qla2xxx_pci_resume(struct pci_dev *pdev) | |||
3422 | } | 3418 | } |
3423 | 3419 | ||
3424 | ha->flags.eeh_busy = 0; | 3420 | ha->flags.eeh_busy = 0; |
3425 | |||
3426 | pci_cleanup_aer_uncorrect_error_status(pdev); | ||
3427 | } | 3421 | } |
3428 | 3422 | ||
3429 | static struct pci_error_handlers qla2xxx_err_handler = { | 3423 | static struct pci_error_handlers qla2xxx_err_handler = { |
@@ -3536,4 +3530,3 @@ MODULE_FIRMWARE(FW_FILE_ISP2300); | |||
3536 | MODULE_FIRMWARE(FW_FILE_ISP2322); | 3530 | MODULE_FIRMWARE(FW_FILE_ISP2322); |
3537 | MODULE_FIRMWARE(FW_FILE_ISP24XX); | 3531 | MODULE_FIRMWARE(FW_FILE_ISP24XX); |
3538 | MODULE_FIRMWARE(FW_FILE_ISP25XX); | 3532 | MODULE_FIRMWARE(FW_FILE_ISP25XX); |
3539 | MODULE_FIRMWARE(FW_FILE_ISP81XX); | ||
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index ed36279a33c1..8d2fc2fa7a6b 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,9 +7,9 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.03.01-k10" | 10 | #define QLA2XXX_VERSION "8.03.02-k1" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
14 | #define QLA_DRIVER_PATCH_VER 1 | 14 | #define QLA_DRIVER_PATCH_VER 2 |
15 | #define QLA_DRIVER_BETA_VER 0 | 15 | #define QLA_DRIVER_BETA_VER 1 |
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index af8c3233e8ae..92329a461c68 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c | |||
@@ -844,10 +844,10 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha) | |||
844 | DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no, | 844 | DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no, |
845 | __func__)); | 845 | __func__)); |
846 | if (ql4xxx_lock_flash(ha) != QLA_SUCCESS) | 846 | if (ql4xxx_lock_flash(ha) != QLA_SUCCESS) |
847 | return (QLA_ERROR); | 847 | return QLA_ERROR; |
848 | if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) { | 848 | if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) { |
849 | ql4xxx_unlock_flash(ha); | 849 | ql4xxx_unlock_flash(ha); |
850 | return (QLA_ERROR); | 850 | return QLA_ERROR; |
851 | } | 851 | } |
852 | 852 | ||
853 | /* Get EEPRom Parameters from NVRAM and validate */ | 853 | /* Get EEPRom Parameters from NVRAM and validate */ |
@@ -858,20 +858,18 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha) | |||
858 | rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha)); | 858 | rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha)); |
859 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 859 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
860 | } else { | 860 | } else { |
861 | /* | ||
862 | * QLogic adapters should always have a valid NVRAM. | ||
863 | * If not valid, do not load. | ||
864 | */ | ||
865 | dev_warn(&ha->pdev->dev, | 861 | dev_warn(&ha->pdev->dev, |
866 | "scsi%ld: %s: EEProm checksum invalid. " | 862 | "scsi%ld: %s: EEProm checksum invalid. " |
867 | "Please update your EEPROM\n", ha->host_no, | 863 | "Please update your EEPROM\n", ha->host_no, |
868 | __func__); | 864 | __func__); |
869 | 865 | ||
870 | /* set defaults */ | 866 | /* Attempt to set defaults */ |
871 | if (is_qla4010(ha)) | 867 | if (is_qla4010(ha)) |
872 | extHwConfig.Asuint32_t = 0x1912; | 868 | extHwConfig.Asuint32_t = 0x1912; |
873 | else if (is_qla4022(ha) | is_qla4032(ha)) | 869 | else if (is_qla4022(ha) | is_qla4032(ha)) |
874 | extHwConfig.Asuint32_t = 0x0023; | 870 | extHwConfig.Asuint32_t = 0x0023; |
871 | else | ||
872 | return QLA_ERROR; | ||
875 | } | 873 | } |
876 | DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n", | 874 | DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n", |
877 | ha->host_no, __func__, extHwConfig.Asuint32_t)); | 875 | ha->host_no, __func__, extHwConfig.Asuint32_t)); |
@@ -884,7 +882,7 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha) | |||
884 | ql4xxx_unlock_nvram(ha); | 882 | ql4xxx_unlock_nvram(ha); |
885 | ql4xxx_unlock_flash(ha); | 883 | ql4xxx_unlock_flash(ha); |
886 | 884 | ||
887 | return (QLA_SUCCESS); | 885 | return QLA_SUCCESS; |
888 | } | 886 | } |
889 | 887 | ||
890 | static void qla4x00_pci_config(struct scsi_qla_host *ha) | 888 | static void qla4x00_pci_config(struct scsi_qla_host *ha) |
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c index 8e5c169b03fb..bd88349b8526 100644 --- a/drivers/scsi/raid_class.c +++ b/drivers/scsi/raid_class.c | |||
@@ -149,6 +149,7 @@ static struct { | |||
149 | { RAID_LEVEL_0, "raid0" }, | 149 | { RAID_LEVEL_0, "raid0" }, |
150 | { RAID_LEVEL_1, "raid1" }, | 150 | { RAID_LEVEL_1, "raid1" }, |
151 | { RAID_LEVEL_10, "raid10" }, | 151 | { RAID_LEVEL_10, "raid10" }, |
152 | { RAID_LEVEL_1E, "raid1e" }, | ||
152 | { RAID_LEVEL_3, "raid3" }, | 153 | { RAID_LEVEL_3, "raid3" }, |
153 | { RAID_LEVEL_4, "raid4" }, | 154 | { RAID_LEVEL_4, "raid4" }, |
154 | { RAID_LEVEL_5, "raid5" }, | 155 | { RAID_LEVEL_5, "raid5" }, |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index a60da5555577..513661f45e5f 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -1026,55 +1026,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, | |||
1026 | * responsible for calling kfree() on this pointer when it is no longer | 1026 | * responsible for calling kfree() on this pointer when it is no longer |
1027 | * needed. If we cannot retrieve the VPD page this routine returns %NULL. | 1027 | * needed. If we cannot retrieve the VPD page this routine returns %NULL. |
1028 | */ | 1028 | */ |
1029 | unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page) | 1029 | int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, |
1030 | int buf_len) | ||
1030 | { | 1031 | { |
1031 | int i, result; | 1032 | int i, result; |
1032 | unsigned int len; | ||
1033 | const unsigned int init_vpd_len = 255; | ||
1034 | unsigned char *buf = kmalloc(init_vpd_len, GFP_KERNEL); | ||
1035 | |||
1036 | if (!buf) | ||
1037 | return NULL; | ||
1038 | 1033 | ||
1039 | /* Ask for all the pages supported by this device */ | 1034 | /* Ask for all the pages supported by this device */ |
1040 | result = scsi_vpd_inquiry(sdev, buf, 0, init_vpd_len); | 1035 | result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); |
1041 | if (result) | 1036 | if (result) |
1042 | goto fail; | 1037 | goto fail; |
1043 | 1038 | ||
1044 | /* If the user actually wanted this page, we can skip the rest */ | 1039 | /* If the user actually wanted this page, we can skip the rest */ |
1045 | if (page == 0) | 1040 | if (page == 0) |
1046 | return buf; | 1041 | return -EINVAL; |
1047 | 1042 | ||
1048 | for (i = 0; i < buf[3]; i++) | 1043 | for (i = 0; i < min((int)buf[3], buf_len - 4); i++) |
1049 | if (buf[i + 4] == page) | 1044 | if (buf[i + 4] == page) |
1050 | goto found; | 1045 | goto found; |
1046 | |||
1047 | if (i < buf[3] && i > buf_len) | ||
1048 | /* ran off the end of the buffer, give us benefit of doubt */ | ||
1049 | goto found; | ||
1051 | /* The device claims it doesn't support the requested page */ | 1050 | /* The device claims it doesn't support the requested page */ |
1052 | goto fail; | 1051 | goto fail; |
1053 | 1052 | ||
1054 | found: | 1053 | found: |
1055 | result = scsi_vpd_inquiry(sdev, buf, page, 255); | 1054 | result = scsi_vpd_inquiry(sdev, buf, page, buf_len); |
1056 | if (result) | 1055 | if (result) |
1057 | goto fail; | 1056 | goto fail; |
1058 | 1057 | ||
1059 | /* | 1058 | return 0; |
1060 | * Some pages are longer than 255 bytes. The actual length of | ||
1061 | * the page is returned in the header. | ||
1062 | */ | ||
1063 | len = ((buf[2] << 8) | buf[3]) + 4; | ||
1064 | if (len <= init_vpd_len) | ||
1065 | return buf; | ||
1066 | |||
1067 | kfree(buf); | ||
1068 | buf = kmalloc(len, GFP_KERNEL); | ||
1069 | result = scsi_vpd_inquiry(sdev, buf, page, len); | ||
1070 | if (result) | ||
1071 | goto fail; | ||
1072 | |||
1073 | return buf; | ||
1074 | 1059 | ||
1075 | fail: | 1060 | fail: |
1076 | kfree(buf); | 1061 | return -EINVAL; |
1077 | return NULL; | ||
1078 | } | 1062 | } |
1079 | EXPORT_SYMBOL_GPL(scsi_get_vpd_page); | 1063 | EXPORT_SYMBOL_GPL(scsi_get_vpd_page); |
1080 | 1064 | ||
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index c6642423cc67..56977097de9f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -773,8 +773,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
773 | * we already took a copy of the original into rq->errors which | 773 | * we already took a copy of the original into rq->errors which |
774 | * is what gets returned to the user | 774 | * is what gets returned to the user |
775 | */ | 775 | */ |
776 | if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) { | 776 | if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { |
777 | if (!(req->cmd_flags & REQ_QUIET)) | 777 | /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip |
778 | * print since caller wants ATA registers. Only occurs on | ||
779 | * SCSI ATA PASS_THROUGH commands when CK_COND=1 | ||
780 | */ | ||
781 | if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) | ||
782 | ; | ||
783 | else if (!(req->cmd_flags & REQ_QUIET)) | ||
778 | scsi_print_sense("", cmd); | 784 | scsi_print_sense("", cmd); |
779 | result = 0; | 785 | result = 0; |
780 | /* BLOCK_PC may have set error */ | 786 | /* BLOCK_PC may have set error */ |
diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h index 998cb5be6833..6266a5d73d0f 100644 --- a/drivers/scsi/scsi_sas_internal.h +++ b/drivers/scsi/scsi_sas_internal.h | |||
@@ -5,7 +5,7 @@ | |||
5 | #define SAS_PHY_ATTRS 17 | 5 | #define SAS_PHY_ATTRS 17 |
6 | #define SAS_PORT_ATTRS 1 | 6 | #define SAS_PORT_ATTRS 1 |
7 | #define SAS_RPORT_ATTRS 7 | 7 | #define SAS_RPORT_ATTRS 7 |
8 | #define SAS_END_DEV_ATTRS 3 | 8 | #define SAS_END_DEV_ATTRS 5 |
9 | #define SAS_EXPANDER_ATTRS 7 | 9 | #define SAS_EXPANDER_ATTRS 7 |
10 | 10 | ||
11 | struct sas_internal { | 11 | struct sas_internal { |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 012f73a96880..f697229ae5a9 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -1339,8 +1339,10 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
1339 | sdev = scsi_alloc_sdev(starget, 0, NULL); | 1339 | sdev = scsi_alloc_sdev(starget, 0, NULL); |
1340 | if (!sdev) | 1340 | if (!sdev) |
1341 | return 0; | 1341 | return 0; |
1342 | if (scsi_device_get(sdev)) | 1342 | if (scsi_device_get(sdev)) { |
1343 | __scsi_remove_device(sdev); | ||
1343 | return 0; | 1344 | return 0; |
1345 | } | ||
1344 | } | 1346 | } |
1345 | 1347 | ||
1346 | sprintf(devname, "host %d channel %d id %d", | 1348 | sprintf(devname, "host %d channel %d id %d", |
@@ -1907,10 +1909,9 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) | |||
1907 | goto out; | 1909 | goto out; |
1908 | 1910 | ||
1909 | sdev = scsi_alloc_sdev(starget, 0, NULL); | 1911 | sdev = scsi_alloc_sdev(starget, 0, NULL); |
1910 | if (sdev) { | 1912 | if (sdev) |
1911 | sdev->sdev_gendev.parent = get_device(&starget->dev); | ||
1912 | sdev->borken = 0; | 1913 | sdev->borken = 0; |
1913 | } else | 1914 | else |
1914 | scsi_target_reap(starget); | 1915 | scsi_target_reap(starget); |
1915 | put_device(&starget->dev); | 1916 | put_device(&starget->dev); |
1916 | out: | 1917 | out: |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 5a065055e68a..a4936c4e2f46 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -878,7 +878,8 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) | |||
878 | struct request_queue *rq = sdev->request_queue; | 878 | struct request_queue *rq = sdev->request_queue; |
879 | struct scsi_target *starget = sdev->sdev_target; | 879 | struct scsi_target *starget = sdev->sdev_target; |
880 | 880 | ||
881 | if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0) | 881 | error = scsi_device_set_state(sdev, SDEV_RUNNING); |
882 | if (error) | ||
882 | return error; | 883 | return error; |
883 | 884 | ||
884 | error = scsi_target_add(starget); | 885 | error = scsi_target_add(starget); |
@@ -889,13 +890,13 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) | |||
889 | error = device_add(&sdev->sdev_gendev); | 890 | error = device_add(&sdev->sdev_gendev); |
890 | if (error) { | 891 | if (error) { |
891 | printk(KERN_INFO "error 1\n"); | 892 | printk(KERN_INFO "error 1\n"); |
892 | goto out_remove; | 893 | return error; |
893 | } | 894 | } |
894 | error = device_add(&sdev->sdev_dev); | 895 | error = device_add(&sdev->sdev_dev); |
895 | if (error) { | 896 | if (error) { |
896 | printk(KERN_INFO "error 2\n"); | 897 | printk(KERN_INFO "error 2\n"); |
897 | device_del(&sdev->sdev_gendev); | 898 | device_del(&sdev->sdev_gendev); |
898 | goto out_remove; | 899 | return error; |
899 | } | 900 | } |
900 | transport_add_device(&sdev->sdev_gendev); | 901 | transport_add_device(&sdev->sdev_gendev); |
901 | sdev->is_visible = 1; | 902 | sdev->is_visible = 1; |
@@ -910,14 +911,14 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) | |||
910 | else | 911 | else |
911 | error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); | 912 | error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); |
912 | if (error) | 913 | if (error) |
913 | goto out_remove; | 914 | return error; |
914 | 915 | ||
915 | if (sdev->host->hostt->change_queue_type) | 916 | if (sdev->host->hostt->change_queue_type) |
916 | error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw); | 917 | error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw); |
917 | else | 918 | else |
918 | error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type); | 919 | error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type); |
919 | if (error) | 920 | if (error) |
920 | goto out_remove; | 921 | return error; |
921 | 922 | ||
922 | error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); | 923 | error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); |
923 | 924 | ||
@@ -933,16 +934,11 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) | |||
933 | error = device_create_file(&sdev->sdev_gendev, | 934 | error = device_create_file(&sdev->sdev_gendev, |
934 | sdev->host->hostt->sdev_attrs[i]); | 935 | sdev->host->hostt->sdev_attrs[i]); |
935 | if (error) | 936 | if (error) |
936 | goto out_remove; | 937 | return error; |
937 | } | 938 | } |
938 | } | 939 | } |
939 | 940 | ||
940 | return 0; | ||
941 | |||
942 | out_remove: | ||
943 | __scsi_remove_device(sdev); | ||
944 | return error; | 941 | return error; |
945 | |||
946 | } | 942 | } |
947 | 943 | ||
948 | void __scsi_remove_device(struct scsi_device *sdev) | 944 | void __scsi_remove_device(struct scsi_device *sdev) |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 653f22a8deb9..79660ee3e211 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -475,7 +475,8 @@ MODULE_PARM_DESC(dev_loss_tmo, | |||
475 | "Maximum number of seconds that the FC transport should" | 475 | "Maximum number of seconds that the FC transport should" |
476 | " insulate the loss of a remote port. Once this value is" | 476 | " insulate the loss of a remote port. Once this value is" |
477 | " exceeded, the scsi target is removed. Value should be" | 477 | " exceeded, the scsi target is removed. Value should be" |
478 | " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT."); | 478 | " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if" |
479 | " fast_io_fail_tmo is not set."); | ||
479 | 480 | ||
480 | /* | 481 | /* |
481 | * Netlink Infrastructure | 482 | * Netlink Infrastructure |
@@ -842,9 +843,17 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr, | |||
842 | (rport->port_state == FC_PORTSTATE_NOTPRESENT)) | 843 | (rport->port_state == FC_PORTSTATE_NOTPRESENT)) |
843 | return -EBUSY; | 844 | return -EBUSY; |
844 | val = simple_strtoul(buf, &cp, 0); | 845 | val = simple_strtoul(buf, &cp, 0); |
845 | if ((*cp && (*cp != '\n')) || | 846 | if ((*cp && (*cp != '\n')) || (val < 0)) |
846 | (val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)) | ||
847 | return -EINVAL; | 847 | return -EINVAL; |
848 | |||
849 | /* | ||
850 | * If fast_io_fail is off we have to cap | ||
851 | * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT | ||
852 | */ | ||
853 | if (rport->fast_io_fail_tmo == -1 && | ||
854 | val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) | ||
855 | return -EINVAL; | ||
856 | |||
848 | i->f->set_rport_dev_loss_tmo(rport, val); | 857 | i->f->set_rport_dev_loss_tmo(rport, val); |
849 | return count; | 858 | return count; |
850 | } | 859 | } |
@@ -925,9 +934,16 @@ store_fc_rport_fast_io_fail_tmo(struct device *dev, | |||
925 | rport->fast_io_fail_tmo = -1; | 934 | rport->fast_io_fail_tmo = -1; |
926 | else { | 935 | else { |
927 | val = simple_strtoul(buf, &cp, 0); | 936 | val = simple_strtoul(buf, &cp, 0); |
928 | if ((*cp && (*cp != '\n')) || | 937 | if ((*cp && (*cp != '\n')) || (val < 0)) |
929 | (val < 0) || (val >= rport->dev_loss_tmo)) | ||
930 | return -EINVAL; | 938 | return -EINVAL; |
939 | /* | ||
940 | * Cap fast_io_fail by dev_loss_tmo or | ||
941 | * SCSI_DEVICE_BLOCK_MAX_TIMEOUT. | ||
942 | */ | ||
943 | if ((val >= rport->dev_loss_tmo) || | ||
944 | (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)) | ||
945 | return -EINVAL; | ||
946 | |||
931 | rport->fast_io_fail_tmo = val; | 947 | rport->fast_io_fail_tmo = val; |
932 | } | 948 | } |
933 | return count; | 949 | return count; |
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index f27e52d963d3..927e99cb7225 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
@@ -155,6 +155,17 @@ static struct { | |||
155 | sas_bitfield_name_search(linkspeed, sas_linkspeed_names) | 155 | sas_bitfield_name_search(linkspeed, sas_linkspeed_names) |
156 | sas_bitfield_name_set(linkspeed, sas_linkspeed_names) | 156 | sas_bitfield_name_set(linkspeed, sas_linkspeed_names) |
157 | 157 | ||
158 | static struct sas_end_device *sas_sdev_to_rdev(struct scsi_device *sdev) | ||
159 | { | ||
160 | struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target); | ||
161 | struct sas_end_device *rdev; | ||
162 | |||
163 | BUG_ON(rphy->identify.device_type != SAS_END_DEVICE); | ||
164 | |||
165 | rdev = rphy_to_end_device(rphy); | ||
166 | return rdev; | ||
167 | } | ||
168 | |||
158 | static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost, | 169 | static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost, |
159 | struct sas_rphy *rphy) | 170 | struct sas_rphy *rphy) |
160 | { | 171 | { |
@@ -358,6 +369,85 @@ void sas_remove_host(struct Scsi_Host *shost) | |||
358 | } | 369 | } |
359 | EXPORT_SYMBOL(sas_remove_host); | 370 | EXPORT_SYMBOL(sas_remove_host); |
360 | 371 | ||
372 | /** | ||
373 | * sas_tlr_supported - checking TLR bit in vpd 0x90 | ||
374 | * @sdev: scsi device struct | ||
375 | * | ||
376 | * Check Transport Layer Retries are supported or not. | ||
377 | * If vpd page 0x90 is present, TRL is supported. | ||
378 | * | ||
379 | */ | ||
380 | unsigned int | ||
381 | sas_tlr_supported(struct scsi_device *sdev) | ||
382 | { | ||
383 | const int vpd_len = 32; | ||
384 | struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); | ||
385 | char *buffer = kzalloc(vpd_len, GFP_KERNEL); | ||
386 | int ret = 0; | ||
387 | |||
388 | if (scsi_get_vpd_page(sdev, 0x90, buffer, vpd_len)) | ||
389 | goto out; | ||
390 | |||
391 | /* | ||
392 | * Magic numbers: the VPD Protocol page (0x90) | ||
393 | * has a 4 byte header and then one entry per device port | ||
394 | * the TLR bit is at offset 8 on each port entry | ||
395 | * if we take the first port, that's at total offset 12 | ||
396 | */ | ||
397 | ret = buffer[12] & 0x01; | ||
398 | |||
399 | out: | ||
400 | kfree(buffer); | ||
401 | rdev->tlr_supported = ret; | ||
402 | return ret; | ||
403 | |||
404 | } | ||
405 | EXPORT_SYMBOL_GPL(sas_tlr_supported); | ||
406 | |||
407 | /** | ||
408 | * sas_disable_tlr - setting TLR flags | ||
409 | * @sdev: scsi device struct | ||
410 | * | ||
411 | * Seting tlr_enabled flag to 0. | ||
412 | * | ||
413 | */ | ||
414 | void | ||
415 | sas_disable_tlr(struct scsi_device *sdev) | ||
416 | { | ||
417 | struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); | ||
418 | |||
419 | rdev->tlr_enabled = 0; | ||
420 | } | ||
421 | EXPORT_SYMBOL_GPL(sas_disable_tlr); | ||
422 | |||
423 | /** | ||
424 | * sas_enable_tlr - setting TLR flags | ||
425 | * @sdev: scsi device struct | ||
426 | * | ||
427 | * Seting tlr_enabled flag 1. | ||
428 | * | ||
429 | */ | ||
430 | void sas_enable_tlr(struct scsi_device *sdev) | ||
431 | { | ||
432 | unsigned int tlr_supported = 0; | ||
433 | tlr_supported = sas_tlr_supported(sdev); | ||
434 | |||
435 | if (tlr_supported) { | ||
436 | struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); | ||
437 | |||
438 | rdev->tlr_enabled = 1; | ||
439 | } | ||
440 | |||
441 | return; | ||
442 | } | ||
443 | EXPORT_SYMBOL_GPL(sas_enable_tlr); | ||
444 | |||
445 | unsigned int sas_is_tlr_enabled(struct scsi_device *sdev) | ||
446 | { | ||
447 | struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); | ||
448 | return rdev->tlr_enabled; | ||
449 | } | ||
450 | EXPORT_SYMBOL_GPL(sas_is_tlr_enabled); | ||
361 | 451 | ||
362 | /* | 452 | /* |
363 | * SAS Phy attributes | 453 | * SAS Phy attributes |
@@ -1146,15 +1236,10 @@ sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); | |||
1146 | int sas_read_port_mode_page(struct scsi_device *sdev) | 1236 | int sas_read_port_mode_page(struct scsi_device *sdev) |
1147 | { | 1237 | { |
1148 | char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata; | 1238 | char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata; |
1149 | struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target); | 1239 | struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); |
1150 | struct sas_end_device *rdev; | ||
1151 | struct scsi_mode_data mode_data; | 1240 | struct scsi_mode_data mode_data; |
1152 | int res, error; | 1241 | int res, error; |
1153 | 1242 | ||
1154 | BUG_ON(rphy->identify.device_type != SAS_END_DEVICE); | ||
1155 | |||
1156 | rdev = rphy_to_end_device(rphy); | ||
1157 | |||
1158 | if (!buffer) | 1243 | if (!buffer) |
1159 | return -ENOMEM; | 1244 | return -ENOMEM; |
1160 | 1245 | ||
@@ -1207,6 +1292,10 @@ sas_end_dev_simple_attr(I_T_nexus_loss_timeout, I_T_nexus_loss_timeout, | |||
1207 | "%d\n", int); | 1292 | "%d\n", int); |
1208 | sas_end_dev_simple_attr(initiator_response_timeout, initiator_response_timeout, | 1293 | sas_end_dev_simple_attr(initiator_response_timeout, initiator_response_timeout, |
1209 | "%d\n", int); | 1294 | "%d\n", int); |
1295 | sas_end_dev_simple_attr(tlr_supported, tlr_supported, | ||
1296 | "%d\n", int); | ||
1297 | sas_end_dev_simple_attr(tlr_enabled, tlr_enabled, | ||
1298 | "%d\n", int); | ||
1210 | 1299 | ||
1211 | static DECLARE_TRANSPORT_CLASS(sas_expander_class, | 1300 | static DECLARE_TRANSPORT_CLASS(sas_expander_class, |
1212 | "sas_expander", NULL, NULL, NULL); | 1301 | "sas_expander", NULL, NULL, NULL); |
@@ -1733,6 +1822,8 @@ sas_attach_transport(struct sas_function_template *ft) | |||
1733 | SETUP_END_DEV_ATTRIBUTE(end_dev_ready_led_meaning); | 1822 | SETUP_END_DEV_ATTRIBUTE(end_dev_ready_led_meaning); |
1734 | SETUP_END_DEV_ATTRIBUTE(end_dev_I_T_nexus_loss_timeout); | 1823 | SETUP_END_DEV_ATTRIBUTE(end_dev_I_T_nexus_loss_timeout); |
1735 | SETUP_END_DEV_ATTRIBUTE(end_dev_initiator_response_timeout); | 1824 | SETUP_END_DEV_ATTRIBUTE(end_dev_initiator_response_timeout); |
1825 | SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_supported); | ||
1826 | SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_enabled); | ||
1736 | i->end_dev_attrs[count] = NULL; | 1827 | i->end_dev_attrs[count] = NULL; |
1737 | 1828 | ||
1738 | count = 0; | 1829 | count = 0; |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 255da53e5a01..1dd4d8407694 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -1196,19 +1196,10 @@ static int sd_done(struct scsi_cmnd *SCpnt) | |||
1196 | SCpnt->result = 0; | 1196 | SCpnt->result = 0; |
1197 | memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); | 1197 | memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); |
1198 | break; | 1198 | break; |
1199 | case ABORTED_COMMAND: | 1199 | case ABORTED_COMMAND: /* DIF: Target detected corruption */ |
1200 | if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */ | 1200 | case ILLEGAL_REQUEST: /* DIX: Host detected corruption */ |
1201 | scsi_print_result(SCpnt); | 1201 | if (sshdr.asc == 0x10) |
1202 | scsi_print_sense("sd", SCpnt); | ||
1203 | good_bytes = sd_completed_bytes(SCpnt); | 1202 | good_bytes = sd_completed_bytes(SCpnt); |
1204 | } | ||
1205 | break; | ||
1206 | case ILLEGAL_REQUEST: | ||
1207 | if (sshdr.asc == 0x10) { /* DIX: HBA detected corruption */ | ||
1208 | scsi_print_result(SCpnt); | ||
1209 | scsi_print_sense("sd", SCpnt); | ||
1210 | good_bytes = sd_completed_bytes(SCpnt); | ||
1211 | } | ||
1212 | break; | 1203 | break; |
1213 | default: | 1204 | default: |
1214 | break; | 1205 | break; |
@@ -1218,8 +1209,19 @@ static int sd_done(struct scsi_cmnd *SCpnt) | |||
1218 | sd_dif_complete(SCpnt, good_bytes); | 1209 | sd_dif_complete(SCpnt, good_bytes); |
1219 | 1210 | ||
1220 | if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type) | 1211 | if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type) |
1221 | == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) | 1212 | == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) { |
1213 | |||
1214 | /* We have to print a failed command here as the | ||
1215 | * extended CDB gets freed before scsi_io_completion() | ||
1216 | * is called. | ||
1217 | */ | ||
1218 | if (result) | ||
1219 | scsi_print_command(SCpnt); | ||
1220 | |||
1222 | mempool_free(SCpnt->cmnd, sd_cdb_pool); | 1221 | mempool_free(SCpnt->cmnd, sd_cdb_pool); |
1222 | SCpnt->cmnd = NULL; | ||
1223 | SCpnt->cmd_len = 0; | ||
1224 | } | ||
1223 | 1225 | ||
1224 | return good_bytes; | 1226 | return good_bytes; |
1225 | } | 1227 | } |
@@ -1946,13 +1948,13 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) | |||
1946 | { | 1948 | { |
1947 | struct request_queue *q = sdkp->disk->queue; | 1949 | struct request_queue *q = sdkp->disk->queue; |
1948 | unsigned int sector_sz = sdkp->device->sector_size; | 1950 | unsigned int sector_sz = sdkp->device->sector_size; |
1949 | char *buffer; | 1951 | const int vpd_len = 32; |
1952 | unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); | ||
1950 | 1953 | ||
1951 | /* Block Limits VPD */ | 1954 | if (!buffer || |
1952 | buffer = scsi_get_vpd_page(sdkp->device, 0xb0); | 1955 | /* Block Limits VPD */ |
1953 | 1956 | scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) | |
1954 | if (buffer == NULL) | 1957 | goto out; |
1955 | return; | ||
1956 | 1958 | ||
1957 | blk_queue_io_min(sdkp->disk->queue, | 1959 | blk_queue_io_min(sdkp->disk->queue, |
1958 | get_unaligned_be16(&buffer[6]) * sector_sz); | 1960 | get_unaligned_be16(&buffer[6]) * sector_sz); |
@@ -1984,6 +1986,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) | |||
1984 | get_unaligned_be32(&buffer[32]) & ~(1 << 31); | 1986 | get_unaligned_be32(&buffer[32]) & ~(1 << 31); |
1985 | } | 1987 | } |
1986 | 1988 | ||
1989 | out: | ||
1987 | kfree(buffer); | 1990 | kfree(buffer); |
1988 | } | 1991 | } |
1989 | 1992 | ||
@@ -1993,20 +1996,23 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) | |||
1993 | */ | 1996 | */ |
1994 | static void sd_read_block_characteristics(struct scsi_disk *sdkp) | 1997 | static void sd_read_block_characteristics(struct scsi_disk *sdkp) |
1995 | { | 1998 | { |
1996 | char *buffer; | 1999 | unsigned char *buffer; |
1997 | u16 rot; | 2000 | u16 rot; |
2001 | const int vpd_len = 32; | ||
1998 | 2002 | ||
1999 | /* Block Device Characteristics VPD */ | 2003 | buffer = kmalloc(vpd_len, GFP_KERNEL); |
2000 | buffer = scsi_get_vpd_page(sdkp->device, 0xb1); | ||
2001 | 2004 | ||
2002 | if (buffer == NULL) | 2005 | if (!buffer || |
2003 | return; | 2006 | /* Block Device Characteristics VPD */ |
2007 | scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len)) | ||
2008 | goto out; | ||
2004 | 2009 | ||
2005 | rot = get_unaligned_be16(&buffer[4]); | 2010 | rot = get_unaligned_be16(&buffer[4]); |
2006 | 2011 | ||
2007 | if (rot == 1) | 2012 | if (rot == 1) |
2008 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue); | 2013 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue); |
2009 | 2014 | ||
2015 | out: | ||
2010 | kfree(buffer); | 2016 | kfree(buffer); |
2011 | } | 2017 | } |
2012 | 2018 | ||
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 55b034b72708..1d7a8780e00c 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
@@ -448,13 +448,17 @@ static void ses_match_to_enclosure(struct enclosure_device *edev, | |||
448 | .addr = 0, | 448 | .addr = 0, |
449 | }; | 449 | }; |
450 | 450 | ||
451 | buf = scsi_get_vpd_page(sdev, 0x83); | 451 | buf = kmalloc(INIT_ALLOC_SIZE, GFP_KERNEL); |
452 | if (!buf) | 452 | if (!buf || scsi_get_vpd_page(sdev, 0x83, buf, INIT_ALLOC_SIZE)) |
453 | return; | 453 | goto free; |
454 | 454 | ||
455 | ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); | 455 | ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); |
456 | 456 | ||
457 | vpd_len = ((buf[2] << 8) | buf[3]) + 4; | 457 | vpd_len = ((buf[2] << 8) | buf[3]) + 4; |
458 | kfree(buf); | ||
459 | buf = kmalloc(vpd_len, GFP_KERNEL); | ||
460 | if (!buf ||scsi_get_vpd_page(sdev, 0x83, buf, vpd_len)) | ||
461 | goto free; | ||
458 | 462 | ||
459 | desc = buf + 4; | 463 | desc = buf + 4; |
460 | while (desc < buf + vpd_len) { | 464 | while (desc < buf + vpd_len) { |
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c index 54023d41fd15..26e8e0e6b8dd 100644 --- a/drivers/scsi/u14-34f.c +++ b/drivers/scsi/u14-34f.c | |||
@@ -1070,7 +1070,7 @@ static int option_setup(char *str) { | |||
1070 | char *cur = str; | 1070 | char *cur = str; |
1071 | int i = 1; | 1071 | int i = 1; |
1072 | 1072 | ||
1073 | while (cur && isdigit(*cur) && i <= MAX_INT_PARAM) { | 1073 | while (cur && isdigit(*cur) && i < MAX_INT_PARAM) { |
1074 | ints[i++] = simple_strtoul(cur, NULL, 0); | 1074 | ints[i++] = simple_strtoul(cur, NULL, 0); |
1075 | 1075 | ||
1076 | if ((cur = strchr(cur, ',')) != NULL) cur++; | 1076 | if ((cur = strchr(cur, ',')) != NULL) cur++; |
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index d2604c813a20..e4ac5829b637 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c | |||
@@ -1069,7 +1069,8 @@ static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) | |||
1069 | free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); | 1069 | free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); |
1070 | } | 1070 | } |
1071 | 1071 | ||
1072 | static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq) | 1072 | static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, |
1073 | unsigned int *irq) | ||
1073 | { | 1074 | { |
1074 | struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; | 1075 | struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; |
1075 | int ret; | 1076 | int ret; |
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c index 683e66f18e8c..3e2ae4807ae2 100644 --- a/drivers/serial/pmac_zilog.c +++ b/drivers/serial/pmac_zilog.c | |||
@@ -2031,9 +2031,9 @@ static int __init pmz_console_setup(struct console *co, char *options) | |||
2031 | /* | 2031 | /* |
2032 | * XServe's default to 57600 bps | 2032 | * XServe's default to 57600 bps |
2033 | */ | 2033 | */ |
2034 | if (machine_is_compatible("RackMac1,1") | 2034 | if (of_machine_is_compatible("RackMac1,1") |
2035 | || machine_is_compatible("RackMac1,2") | 2035 | || of_machine_is_compatible("RackMac1,2") |
2036 | || machine_is_compatible("MacRISC4")) | 2036 | || of_machine_is_compatible("MacRISC4")) |
2037 | baud = 57600; | 2037 | baud = 57600; |
2038 | 2038 | ||
2039 | /* | 2039 | /* |
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index 0efcded59ae6..f7d2589926d2 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h | |||
@@ -518,34 +518,6 @@ static inline int sci_rxd_in(struct uart_port *port) | |||
518 | { | 518 | { |
519 | if (port->mapbase == 0xfffffe80) | 519 | if (port->mapbase == 0xfffffe80) |
520 | return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */ | 520 | return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */ |
521 | if (port->mapbase == 0xa4000150) | ||
522 | return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */ | ||
523 | if (port->mapbase == 0xa4000140) | ||
524 | return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */ | ||
525 | return 1; | ||
526 | } | ||
527 | #elif defined(CONFIG_CPU_SUBTYPE_SH7705) | ||
528 | static inline int sci_rxd_in(struct uart_port *port) | ||
529 | { | ||
530 | if (port->mapbase == SCIF0) | ||
531 | return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */ | ||
532 | if (port->mapbase == SCIF2) | ||
533 | return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */ | ||
534 | return 1; | ||
535 | } | ||
536 | #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) | ||
537 | static inline int sci_rxd_in(struct uart_port *port) | ||
538 | { | ||
539 | return sci_in(port,SCxSR)&0x0010 ? 1 : 0; | ||
540 | } | ||
541 | #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | ||
542 | defined(CONFIG_CPU_SUBTYPE_SH7721) | ||
543 | static inline int sci_rxd_in(struct uart_port *port) | ||
544 | { | ||
545 | if (port->mapbase == 0xa4430000) | ||
546 | return sci_in(port, SCxSR) & 0x0003 ? 1 : 0; | ||
547 | else if (port->mapbase == 0xa4438000) | ||
548 | return sci_in(port, SCxSR) & 0x0003 ? 1 : 0; | ||
549 | return 1; | 521 | return 1; |
550 | } | 522 | } |
551 | #elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ | 523 | #elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ |
@@ -558,207 +530,17 @@ static inline int sci_rxd_in(struct uart_port *port) | |||
558 | { | 530 | { |
559 | if (port->mapbase == 0xffe00000) | 531 | if (port->mapbase == 0xffe00000) |
560 | return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */ | 532 | return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */ |
561 | if (port->mapbase == 0xffe80000) | ||
562 | return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */ | ||
563 | return 1; | ||
564 | } | ||
565 | #elif defined(CONFIG_CPU_SUBTYPE_SH4_202) | ||
566 | static inline int sci_rxd_in(struct uart_port *port) | ||
567 | { | ||
568 | if (port->mapbase == 0xffe80000) | ||
569 | return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */ | ||
570 | return 1; | 533 | return 1; |
571 | } | 534 | } |
572 | #elif defined(CONFIG_CPU_SUBTYPE_SH7757) | ||
573 | static inline int sci_rxd_in(struct uart_port *port) | ||
574 | { | ||
575 | if (port->mapbase == 0xfe4b0000) | ||
576 | return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; | ||
577 | if (port->mapbase == 0xfe4c0000) | ||
578 | return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; | ||
579 | if (port->mapbase == 0xfe4d0000) | ||
580 | return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; | ||
581 | } | ||
582 | #elif defined(CONFIG_CPU_SUBTYPE_SH7760) | ||
583 | static inline int sci_rxd_in(struct uart_port *port) | ||
584 | { | ||
585 | if (port->mapbase == 0xfe600000) | ||
586 | return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
587 | if (port->mapbase == 0xfe610000) | ||
588 | return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
589 | if (port->mapbase == 0xfe620000) | ||
590 | return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ | ||
591 | return 1; | ||
592 | } | ||
593 | #elif defined(CONFIG_CPU_SUBTYPE_SH7343) | ||
594 | static inline int sci_rxd_in(struct uart_port *port) | ||
595 | { | ||
596 | if (port->mapbase == 0xffe00000) | ||
597 | return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
598 | if (port->mapbase == 0xffe10000) | ||
599 | return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
600 | if (port->mapbase == 0xffe20000) | ||
601 | return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ | ||
602 | if (port->mapbase == 0xffe30000) | ||
603 | return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ | ||
604 | return 1; | ||
605 | } | ||
606 | #elif defined(CONFIG_CPU_SUBTYPE_SH7366) | ||
607 | static inline int sci_rxd_in(struct uart_port *port) | ||
608 | { | ||
609 | if (port->mapbase == 0xffe00000) | ||
610 | return __raw_readb(SCPDR0) & 0x0001 ? 1 : 0; /* SCIF0 */ | ||
611 | return 1; | ||
612 | } | ||
613 | #elif defined(CONFIG_CPU_SUBTYPE_SH7722) | ||
614 | static inline int sci_rxd_in(struct uart_port *port) | ||
615 | { | ||
616 | if (port->mapbase == 0xffe00000) | ||
617 | return __raw_readb(PSDR) & 0x02 ? 1 : 0; /* SCIF0 */ | ||
618 | if (port->mapbase == 0xffe10000) | ||
619 | return __raw_readb(PADR) & 0x40 ? 1 : 0; /* SCIF1 */ | ||
620 | if (port->mapbase == 0xffe20000) | ||
621 | return __raw_readb(PWDR) & 0x04 ? 1 : 0; /* SCIF2 */ | ||
622 | |||
623 | return 1; | ||
624 | } | ||
625 | #elif defined(CONFIG_CPU_SUBTYPE_SH7723) | ||
626 | static inline int sci_rxd_in(struct uart_port *port) | ||
627 | { | ||
628 | if (port->mapbase == 0xffe00000) | ||
629 | return __raw_readb(SCSPTR0) & 0x0008 ? 1 : 0; /* SCIF0 */ | ||
630 | if (port->mapbase == 0xffe10000) | ||
631 | return __raw_readb(SCSPTR1) & 0x0020 ? 1 : 0; /* SCIF1 */ | ||
632 | if (port->mapbase == 0xffe20000) | ||
633 | return __raw_readb(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF2 */ | ||
634 | if (port->mapbase == 0xa4e30000) | ||
635 | return __raw_readb(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF3 */ | ||
636 | if (port->mapbase == 0xa4e40000) | ||
637 | return __raw_readb(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF4 */ | ||
638 | if (port->mapbase == 0xa4e50000) | ||
639 | return __raw_readb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */ | ||
640 | return 1; | ||
641 | } | ||
642 | #elif defined(CONFIG_CPU_SUBTYPE_SH7724) | ||
643 | # define SCFSR 0x0010 | ||
644 | # define SCASSR 0x0014 | ||
645 | static inline int sci_rxd_in(struct uart_port *port) | ||
646 | { | ||
647 | if (port->type == PORT_SCIF) | ||
648 | return __raw_readw((port->mapbase + SCFSR)) & SCIF_BRK ? 1 : 0; | ||
649 | if (port->type == PORT_SCIFA) | ||
650 | return __raw_readw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0; | ||
651 | return 1; | ||
652 | } | ||
653 | #elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103) | ||
654 | static inline int sci_rxd_in(struct uart_port *port) | ||
655 | { | ||
656 | return sci_in(port, SCSPTR)&0x0001 ? 1 : 0; /* SCIF */ | ||
657 | } | ||
658 | #elif defined(__H8300H__) || defined(__H8300S__) | 535 | #elif defined(__H8300H__) || defined(__H8300S__) |
659 | static inline int sci_rxd_in(struct uart_port *port) | 536 | static inline int sci_rxd_in(struct uart_port *port) |
660 | { | 537 | { |
661 | int ch = (port->mapbase - SMR0) >> 3; | 538 | int ch = (port->mapbase - SMR0) >> 3; |
662 | return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0; | 539 | return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0; |
663 | } | 540 | } |
664 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) | 541 | #else /* default case for non-SCI processors */ |
665 | static inline int sci_rxd_in(struct uart_port *port) | ||
666 | { | ||
667 | if (port->mapbase == 0xffe00000) | ||
668 | return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
669 | if (port->mapbase == 0xffe08000) | ||
670 | return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
671 | if (port->mapbase == 0xffe10000) | ||
672 | return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF/IRDA */ | ||
673 | |||
674 | return 1; | ||
675 | } | ||
676 | #elif defined(CONFIG_CPU_SUBTYPE_SH7770) | ||
677 | static inline int sci_rxd_in(struct uart_port *port) | ||
678 | { | ||
679 | if (port->mapbase == 0xff923000) | ||
680 | return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
681 | if (port->mapbase == 0xff924000) | ||
682 | return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
683 | if (port->mapbase == 0xff925000) | ||
684 | return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ | ||
685 | return 1; | ||
686 | } | ||
687 | #elif defined(CONFIG_CPU_SUBTYPE_SH7780) | ||
688 | static inline int sci_rxd_in(struct uart_port *port) | ||
689 | { | ||
690 | if (port->mapbase == 0xffe00000) | ||
691 | return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
692 | if (port->mapbase == 0xffe10000) | ||
693 | return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
694 | return 1; | ||
695 | } | ||
696 | #elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ | ||
697 | defined(CONFIG_CPU_SUBTYPE_SH7786) | ||
698 | static inline int sci_rxd_in(struct uart_port *port) | ||
699 | { | ||
700 | if (port->mapbase == 0xffea0000) | ||
701 | return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
702 | if (port->mapbase == 0xffeb0000) | ||
703 | return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
704 | if (port->mapbase == 0xffec0000) | ||
705 | return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ | ||
706 | if (port->mapbase == 0xffed0000) | ||
707 | return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ | ||
708 | if (port->mapbase == 0xffee0000) | ||
709 | return __raw_readw(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF */ | ||
710 | if (port->mapbase == 0xffef0000) | ||
711 | return __raw_readw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */ | ||
712 | return 1; | ||
713 | } | ||
714 | #elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ | ||
715 | defined(CONFIG_CPU_SUBTYPE_SH7203) || \ | ||
716 | defined(CONFIG_CPU_SUBTYPE_SH7206) || \ | ||
717 | defined(CONFIG_CPU_SUBTYPE_SH7263) | ||
718 | static inline int sci_rxd_in(struct uart_port *port) | ||
719 | { | ||
720 | if (port->mapbase == 0xfffe8000) | ||
721 | return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
722 | if (port->mapbase == 0xfffe8800) | ||
723 | return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
724 | if (port->mapbase == 0xfffe9000) | ||
725 | return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ | ||
726 | if (port->mapbase == 0xfffe9800) | ||
727 | return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ | ||
728 | #if defined(CONFIG_CPU_SUBTYPE_SH7201) | ||
729 | if (port->mapbase == 0xfffeA000) | ||
730 | return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
731 | if (port->mapbase == 0xfffeA800) | ||
732 | return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
733 | if (port->mapbase == 0xfffeB000) | ||
734 | return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ | ||
735 | if (port->mapbase == 0xfffeB800) | ||
736 | return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ | ||
737 | #endif | ||
738 | return 1; | ||
739 | } | ||
740 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) | ||
741 | static inline int sci_rxd_in(struct uart_port *port) | ||
742 | { | ||
743 | if (port->mapbase == 0xf8400000) | ||
744 | return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
745 | if (port->mapbase == 0xf8410000) | ||
746 | return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
747 | if (port->mapbase == 0xf8420000) | ||
748 | return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ | ||
749 | return 1; | ||
750 | } | ||
751 | #elif defined(CONFIG_CPU_SUBTYPE_SHX3) | ||
752 | static inline int sci_rxd_in(struct uart_port *port) | 542 | static inline int sci_rxd_in(struct uart_port *port) |
753 | { | 543 | { |
754 | if (port->mapbase == 0xffc30000) | ||
755 | return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
756 | if (port->mapbase == 0xffc40000) | ||
757 | return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
758 | if (port->mapbase == 0xffc50000) | ||
759 | return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ | ||
760 | if (port->mapbase == 0xffc60000) | ||
761 | return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ | ||
762 | return 1; | 544 | return 1; |
763 | } | 545 | } |
764 | #endif | 546 | #endif |
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c index d5d7f23c19a5..3a5a17db9474 100644 --- a/drivers/sh/intc.c +++ b/drivers/sh/intc.c | |||
@@ -259,6 +259,43 @@ static void intc_disable(unsigned int irq) | |||
259 | } | 259 | } |
260 | } | 260 | } |
261 | 261 | ||
262 | static void (*intc_enable_noprio_fns[])(unsigned long addr, | ||
263 | unsigned long handle, | ||
264 | void (*fn)(unsigned long, | ||
265 | unsigned long, | ||
266 | unsigned long), | ||
267 | unsigned int irq) = { | ||
268 | [MODE_ENABLE_REG] = intc_mode_field, | ||
269 | [MODE_MASK_REG] = intc_mode_zero, | ||
270 | [MODE_DUAL_REG] = intc_mode_field, | ||
271 | [MODE_PRIO_REG] = intc_mode_field, | ||
272 | [MODE_PCLR_REG] = intc_mode_field, | ||
273 | }; | ||
274 | |||
275 | static void intc_enable_disable(struct intc_desc_int *d, | ||
276 | unsigned long handle, int do_enable) | ||
277 | { | ||
278 | unsigned long addr; | ||
279 | unsigned int cpu; | ||
280 | void (*fn)(unsigned long, unsigned long, | ||
281 | void (*)(unsigned long, unsigned long, unsigned long), | ||
282 | unsigned int); | ||
283 | |||
284 | if (do_enable) { | ||
285 | for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { | ||
286 | addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); | ||
287 | fn = intc_enable_noprio_fns[_INTC_MODE(handle)]; | ||
288 | fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0); | ||
289 | } | ||
290 | } else { | ||
291 | for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { | ||
292 | addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); | ||
293 | fn = intc_disable_fns[_INTC_MODE(handle)]; | ||
294 | fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0); | ||
295 | } | ||
296 | } | ||
297 | } | ||
298 | |||
262 | static int intc_set_wake(unsigned int irq, unsigned int on) | 299 | static int intc_set_wake(unsigned int irq, unsigned int on) |
263 | { | 300 | { |
264 | return 0; /* allow wakeup, but setup hardware in intc_suspend() */ | 301 | return 0; /* allow wakeup, but setup hardware in intc_suspend() */ |
@@ -400,11 +437,11 @@ static unsigned int __init intc_get_reg(struct intc_desc_int *d, | |||
400 | static intc_enum __init intc_grp_id(struct intc_desc *desc, | 437 | static intc_enum __init intc_grp_id(struct intc_desc *desc, |
401 | intc_enum enum_id) | 438 | intc_enum enum_id) |
402 | { | 439 | { |
403 | struct intc_group *g = desc->groups; | 440 | struct intc_group *g = desc->hw.groups; |
404 | unsigned int i, j; | 441 | unsigned int i, j; |
405 | 442 | ||
406 | for (i = 0; g && enum_id && i < desc->nr_groups; i++) { | 443 | for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) { |
407 | g = desc->groups + i; | 444 | g = desc->hw.groups + i; |
408 | 445 | ||
409 | for (j = 0; g->enum_ids[j]; j++) { | 446 | for (j = 0; g->enum_ids[j]; j++) { |
410 | if (g->enum_ids[j] != enum_id) | 447 | if (g->enum_ids[j] != enum_id) |
@@ -417,19 +454,21 @@ static intc_enum __init intc_grp_id(struct intc_desc *desc, | |||
417 | return 0; | 454 | return 0; |
418 | } | 455 | } |
419 | 456 | ||
420 | static unsigned int __init intc_mask_data(struct intc_desc *desc, | 457 | static unsigned int __init _intc_mask_data(struct intc_desc *desc, |
421 | struct intc_desc_int *d, | 458 | struct intc_desc_int *d, |
422 | intc_enum enum_id, int do_grps) | 459 | intc_enum enum_id, |
460 | unsigned int *reg_idx, | ||
461 | unsigned int *fld_idx) | ||
423 | { | 462 | { |
424 | struct intc_mask_reg *mr = desc->mask_regs; | 463 | struct intc_mask_reg *mr = desc->hw.mask_regs; |
425 | unsigned int i, j, fn, mode; | 464 | unsigned int fn, mode; |
426 | unsigned long reg_e, reg_d; | 465 | unsigned long reg_e, reg_d; |
427 | 466 | ||
428 | for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) { | 467 | while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) { |
429 | mr = desc->mask_regs + i; | 468 | mr = desc->hw.mask_regs + *reg_idx; |
430 | 469 | ||
431 | for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { | 470 | for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) { |
432 | if (mr->enum_ids[j] != enum_id) | 471 | if (mr->enum_ids[*fld_idx] != enum_id) |
433 | continue; | 472 | continue; |
434 | 473 | ||
435 | if (mr->set_reg && mr->clr_reg) { | 474 | if (mr->set_reg && mr->clr_reg) { |
@@ -455,29 +494,49 @@ static unsigned int __init intc_mask_data(struct intc_desc *desc, | |||
455 | intc_get_reg(d, reg_e), | 494 | intc_get_reg(d, reg_e), |
456 | intc_get_reg(d, reg_d), | 495 | intc_get_reg(d, reg_d), |
457 | 1, | 496 | 1, |
458 | (mr->reg_width - 1) - j); | 497 | (mr->reg_width - 1) - *fld_idx); |
459 | } | 498 | } |
499 | |||
500 | *fld_idx = 0; | ||
501 | (*reg_idx)++; | ||
460 | } | 502 | } |
461 | 503 | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | static unsigned int __init intc_mask_data(struct intc_desc *desc, | ||
508 | struct intc_desc_int *d, | ||
509 | intc_enum enum_id, int do_grps) | ||
510 | { | ||
511 | unsigned int i = 0; | ||
512 | unsigned int j = 0; | ||
513 | unsigned int ret; | ||
514 | |||
515 | ret = _intc_mask_data(desc, d, enum_id, &i, &j); | ||
516 | if (ret) | ||
517 | return ret; | ||
518 | |||
462 | if (do_grps) | 519 | if (do_grps) |
463 | return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0); | 520 | return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0); |
464 | 521 | ||
465 | return 0; | 522 | return 0; |
466 | } | 523 | } |
467 | 524 | ||
468 | static unsigned int __init intc_prio_data(struct intc_desc *desc, | 525 | static unsigned int __init _intc_prio_data(struct intc_desc *desc, |
469 | struct intc_desc_int *d, | 526 | struct intc_desc_int *d, |
470 | intc_enum enum_id, int do_grps) | 527 | intc_enum enum_id, |
528 | unsigned int *reg_idx, | ||
529 | unsigned int *fld_idx) | ||
471 | { | 530 | { |
472 | struct intc_prio_reg *pr = desc->prio_regs; | 531 | struct intc_prio_reg *pr = desc->hw.prio_regs; |
473 | unsigned int i, j, fn, mode, bit; | 532 | unsigned int fn, n, mode, bit; |
474 | unsigned long reg_e, reg_d; | 533 | unsigned long reg_e, reg_d; |
475 | 534 | ||
476 | for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) { | 535 | while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) { |
477 | pr = desc->prio_regs + i; | 536 | pr = desc->hw.prio_regs + *reg_idx; |
478 | 537 | ||
479 | for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) { | 538 | for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) { |
480 | if (pr->enum_ids[j] != enum_id) | 539 | if (pr->enum_ids[*fld_idx] != enum_id) |
481 | continue; | 540 | continue; |
482 | 541 | ||
483 | if (pr->set_reg && pr->clr_reg) { | 542 | if (pr->set_reg && pr->clr_reg) { |
@@ -495,34 +554,79 @@ static unsigned int __init intc_prio_data(struct intc_desc *desc, | |||
495 | } | 554 | } |
496 | 555 | ||
497 | fn += (pr->reg_width >> 3) - 1; | 556 | fn += (pr->reg_width >> 3) - 1; |
557 | n = *fld_idx + 1; | ||
498 | 558 | ||
499 | BUG_ON((j + 1) * pr->field_width > pr->reg_width); | 559 | BUG_ON(n * pr->field_width > pr->reg_width); |
500 | 560 | ||
501 | bit = pr->reg_width - ((j + 1) * pr->field_width); | 561 | bit = pr->reg_width - (n * pr->field_width); |
502 | 562 | ||
503 | return _INTC_MK(fn, mode, | 563 | return _INTC_MK(fn, mode, |
504 | intc_get_reg(d, reg_e), | 564 | intc_get_reg(d, reg_e), |
505 | intc_get_reg(d, reg_d), | 565 | intc_get_reg(d, reg_d), |
506 | pr->field_width, bit); | 566 | pr->field_width, bit); |
507 | } | 567 | } |
568 | |||
569 | *fld_idx = 0; | ||
570 | (*reg_idx)++; | ||
508 | } | 571 | } |
509 | 572 | ||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | static unsigned int __init intc_prio_data(struct intc_desc *desc, | ||
577 | struct intc_desc_int *d, | ||
578 | intc_enum enum_id, int do_grps) | ||
579 | { | ||
580 | unsigned int i = 0; | ||
581 | unsigned int j = 0; | ||
582 | unsigned int ret; | ||
583 | |||
584 | ret = _intc_prio_data(desc, d, enum_id, &i, &j); | ||
585 | if (ret) | ||
586 | return ret; | ||
587 | |||
510 | if (do_grps) | 588 | if (do_grps) |
511 | return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0); | 589 | return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0); |
512 | 590 | ||
513 | return 0; | 591 | return 0; |
514 | } | 592 | } |
515 | 593 | ||
594 | static void __init intc_enable_disable_enum(struct intc_desc *desc, | ||
595 | struct intc_desc_int *d, | ||
596 | intc_enum enum_id, int enable) | ||
597 | { | ||
598 | unsigned int i, j, data; | ||
599 | |||
600 | /* go through and enable/disable all mask bits */ | ||
601 | i = j = 0; | ||
602 | do { | ||
603 | data = _intc_mask_data(desc, d, enum_id, &i, &j); | ||
604 | if (data) | ||
605 | intc_enable_disable(d, data, enable); | ||
606 | j++; | ||
607 | } while (data); | ||
608 | |||
609 | /* go through and enable/disable all priority fields */ | ||
610 | i = j = 0; | ||
611 | do { | ||
612 | data = _intc_prio_data(desc, d, enum_id, &i, &j); | ||
613 | if (data) | ||
614 | intc_enable_disable(d, data, enable); | ||
615 | |||
616 | j++; | ||
617 | } while (data); | ||
618 | } | ||
619 | |||
516 | static unsigned int __init intc_ack_data(struct intc_desc *desc, | 620 | static unsigned int __init intc_ack_data(struct intc_desc *desc, |
517 | struct intc_desc_int *d, | 621 | struct intc_desc_int *d, |
518 | intc_enum enum_id) | 622 | intc_enum enum_id) |
519 | { | 623 | { |
520 | struct intc_mask_reg *mr = desc->ack_regs; | 624 | struct intc_mask_reg *mr = desc->hw.ack_regs; |
521 | unsigned int i, j, fn, mode; | 625 | unsigned int i, j, fn, mode; |
522 | unsigned long reg_e, reg_d; | 626 | unsigned long reg_e, reg_d; |
523 | 627 | ||
524 | for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) { | 628 | for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) { |
525 | mr = desc->ack_regs + i; | 629 | mr = desc->hw.ack_regs + i; |
526 | 630 | ||
527 | for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { | 631 | for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { |
528 | if (mr->enum_ids[j] != enum_id) | 632 | if (mr->enum_ids[j] != enum_id) |
@@ -549,11 +653,11 @@ static unsigned int __init intc_sense_data(struct intc_desc *desc, | |||
549 | struct intc_desc_int *d, | 653 | struct intc_desc_int *d, |
550 | intc_enum enum_id) | 654 | intc_enum enum_id) |
551 | { | 655 | { |
552 | struct intc_sense_reg *sr = desc->sense_regs; | 656 | struct intc_sense_reg *sr = desc->hw.sense_regs; |
553 | unsigned int i, j, fn, bit; | 657 | unsigned int i, j, fn, bit; |
554 | 658 | ||
555 | for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) { | 659 | for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) { |
556 | sr = desc->sense_regs + i; | 660 | sr = desc->hw.sense_regs + i; |
557 | 661 | ||
558 | for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) { | 662 | for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) { |
559 | if (sr->enum_ids[j] != enum_id) | 663 | if (sr->enum_ids[j] != enum_id) |
@@ -656,7 +760,7 @@ static void __init intc_register_irq(struct intc_desc *desc, | |||
656 | /* irq should be disabled by default */ | 760 | /* irq should be disabled by default */ |
657 | d->chip.mask(irq); | 761 | d->chip.mask(irq); |
658 | 762 | ||
659 | if (desc->ack_regs) | 763 | if (desc->hw.ack_regs) |
660 | ack_handle[irq] = intc_ack_data(desc, d, enum_id); | 764 | ack_handle[irq] = intc_ack_data(desc, d, enum_id); |
661 | } | 765 | } |
662 | 766 | ||
@@ -684,6 +788,7 @@ static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) | |||
684 | void __init register_intc_controller(struct intc_desc *desc) | 788 | void __init register_intc_controller(struct intc_desc *desc) |
685 | { | 789 | { |
686 | unsigned int i, k, smp; | 790 | unsigned int i, k, smp; |
791 | struct intc_hw_desc *hw = &desc->hw; | ||
687 | struct intc_desc_int *d; | 792 | struct intc_desc_int *d; |
688 | 793 | ||
689 | d = kzalloc(sizeof(*d), GFP_NOWAIT); | 794 | d = kzalloc(sizeof(*d), GFP_NOWAIT); |
@@ -691,10 +796,10 @@ void __init register_intc_controller(struct intc_desc *desc) | |||
691 | INIT_LIST_HEAD(&d->list); | 796 | INIT_LIST_HEAD(&d->list); |
692 | list_add(&d->list, &intc_list); | 797 | list_add(&d->list, &intc_list); |
693 | 798 | ||
694 | d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0; | 799 | d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0; |
695 | d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0; | 800 | d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0; |
696 | d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0; | 801 | d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0; |
697 | d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0; | 802 | d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0; |
698 | 803 | ||
699 | d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); | 804 | d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); |
700 | #ifdef CONFIG_SMP | 805 | #ifdef CONFIG_SMP |
@@ -702,30 +807,31 @@ void __init register_intc_controller(struct intc_desc *desc) | |||
702 | #endif | 807 | #endif |
703 | k = 0; | 808 | k = 0; |
704 | 809 | ||
705 | if (desc->mask_regs) { | 810 | if (hw->mask_regs) { |
706 | for (i = 0; i < desc->nr_mask_regs; i++) { | 811 | for (i = 0; i < hw->nr_mask_regs; i++) { |
707 | smp = IS_SMP(desc->mask_regs[i]); | 812 | smp = IS_SMP(hw->mask_regs[i]); |
708 | k += save_reg(d, k, desc->mask_regs[i].set_reg, smp); | 813 | k += save_reg(d, k, hw->mask_regs[i].set_reg, smp); |
709 | k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp); | 814 | k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp); |
710 | } | 815 | } |
711 | } | 816 | } |
712 | 817 | ||
713 | if (desc->prio_regs) { | 818 | if (hw->prio_regs) { |
714 | d->prio = kzalloc(desc->nr_vectors * sizeof(*d->prio), GFP_NOWAIT); | 819 | d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio), |
820 | GFP_NOWAIT); | ||
715 | 821 | ||
716 | for (i = 0; i < desc->nr_prio_regs; i++) { | 822 | for (i = 0; i < hw->nr_prio_regs; i++) { |
717 | smp = IS_SMP(desc->prio_regs[i]); | 823 | smp = IS_SMP(hw->prio_regs[i]); |
718 | k += save_reg(d, k, desc->prio_regs[i].set_reg, smp); | 824 | k += save_reg(d, k, hw->prio_regs[i].set_reg, smp); |
719 | k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp); | 825 | k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp); |
720 | } | 826 | } |
721 | } | 827 | } |
722 | 828 | ||
723 | if (desc->sense_regs) { | 829 | if (hw->sense_regs) { |
724 | d->sense = kzalloc(desc->nr_vectors * sizeof(*d->sense), GFP_NOWAIT); | 830 | d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense), |
831 | GFP_NOWAIT); | ||
725 | 832 | ||
726 | for (i = 0; i < desc->nr_sense_regs; i++) { | 833 | for (i = 0; i < hw->nr_sense_regs; i++) |
727 | k += save_reg(d, k, desc->sense_regs[i].reg, 0); | 834 | k += save_reg(d, k, hw->sense_regs[i].reg, 0); |
728 | } | ||
729 | } | 835 | } |
730 | 836 | ||
731 | d->chip.name = desc->name; | 837 | d->chip.name = desc->name; |
@@ -738,18 +844,26 @@ void __init register_intc_controller(struct intc_desc *desc) | |||
738 | d->chip.set_type = intc_set_sense; | 844 | d->chip.set_type = intc_set_sense; |
739 | d->chip.set_wake = intc_set_wake; | 845 | d->chip.set_wake = intc_set_wake; |
740 | 846 | ||
741 | if (desc->ack_regs) { | 847 | if (hw->ack_regs) { |
742 | for (i = 0; i < desc->nr_ack_regs; i++) | 848 | for (i = 0; i < hw->nr_ack_regs; i++) |
743 | k += save_reg(d, k, desc->ack_regs[i].set_reg, 0); | 849 | k += save_reg(d, k, hw->ack_regs[i].set_reg, 0); |
744 | 850 | ||
745 | d->chip.mask_ack = intc_mask_ack; | 851 | d->chip.mask_ack = intc_mask_ack; |
746 | } | 852 | } |
747 | 853 | ||
854 | /* disable bits matching force_disable before registering irqs */ | ||
855 | if (desc->force_disable) | ||
856 | intc_enable_disable_enum(desc, d, desc->force_disable, 0); | ||
857 | |||
858 | /* disable bits matching force_enable before registering irqs */ | ||
859 | if (desc->force_enable) | ||
860 | intc_enable_disable_enum(desc, d, desc->force_enable, 0); | ||
861 | |||
748 | BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ | 862 | BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ |
749 | 863 | ||
750 | /* register the vectors one by one */ | 864 | /* register the vectors one by one */ |
751 | for (i = 0; i < desc->nr_vectors; i++) { | 865 | for (i = 0; i < hw->nr_vectors; i++) { |
752 | struct intc_vect *vect = desc->vectors + i; | 866 | struct intc_vect *vect = hw->vectors + i; |
753 | unsigned int irq = evt2irq(vect->vect); | 867 | unsigned int irq = evt2irq(vect->vect); |
754 | struct irq_desc *irq_desc; | 868 | struct irq_desc *irq_desc; |
755 | 869 | ||
@@ -764,8 +878,8 @@ void __init register_intc_controller(struct intc_desc *desc) | |||
764 | 878 | ||
765 | intc_register_irq(desc, d, vect->enum_id, irq); | 879 | intc_register_irq(desc, d, vect->enum_id, irq); |
766 | 880 | ||
767 | for (k = i + 1; k < desc->nr_vectors; k++) { | 881 | for (k = i + 1; k < hw->nr_vectors; k++) { |
768 | struct intc_vect *vect2 = desc->vectors + k; | 882 | struct intc_vect *vect2 = hw->vectors + k; |
769 | unsigned int irq2 = evt2irq(vect2->vect); | 883 | unsigned int irq2 = evt2irq(vect2->vect); |
770 | 884 | ||
771 | if (vect->enum_id != vect2->enum_id) | 885 | if (vect->enum_id != vect2->enum_id) |
@@ -785,11 +899,15 @@ void __init register_intc_controller(struct intc_desc *desc) | |||
785 | vect2->enum_id = 0; | 899 | vect2->enum_id = 0; |
786 | 900 | ||
787 | /* redirect this interrupts to the first one */ | 901 | /* redirect this interrupts to the first one */ |
788 | set_irq_chip_and_handler_name(irq2, &d->chip, | 902 | set_irq_chip(irq2, &dummy_irq_chip); |
789 | intc_redirect_irq, "redirect"); | 903 | set_irq_chained_handler(irq2, intc_redirect_irq); |
790 | set_irq_data(irq2, (void *)irq); | 904 | set_irq_data(irq2, (void *)irq); |
791 | } | 905 | } |
792 | } | 906 | } |
907 | |||
908 | /* enable bits matching force_enable after registering irqs */ | ||
909 | if (desc->force_enable) | ||
910 | intc_enable_disable_enum(desc, d, desc->force_enable, 1); | ||
793 | } | 911 | } |
794 | 912 | ||
795 | static int intc_suspend(struct sys_device *dev, pm_message_t state) | 913 | static int intc_suspend(struct sys_device *dev, pm_message_t state) |
@@ -872,7 +990,7 @@ device_initcall(register_intc_sysdevs); | |||
872 | /* | 990 | /* |
873 | * Dynamic IRQ allocation and deallocation | 991 | * Dynamic IRQ allocation and deallocation |
874 | */ | 992 | */ |
875 | static unsigned int create_irq_on_node(unsigned int irq_want, int node) | 993 | unsigned int create_irq_nr(unsigned int irq_want, int node) |
876 | { | 994 | { |
877 | unsigned int irq = 0, new; | 995 | unsigned int irq = 0, new; |
878 | unsigned long flags; | 996 | unsigned long flags; |
@@ -881,24 +999,28 @@ static unsigned int create_irq_on_node(unsigned int irq_want, int node) | |||
881 | spin_lock_irqsave(&vector_lock, flags); | 999 | spin_lock_irqsave(&vector_lock, flags); |
882 | 1000 | ||
883 | /* | 1001 | /* |
884 | * First try the wanted IRQ, then scan. | 1002 | * First try the wanted IRQ |
885 | */ | 1003 | */ |
886 | if (test_and_set_bit(irq_want, intc_irq_map)) { | 1004 | if (test_and_set_bit(irq_want, intc_irq_map) == 0) { |
1005 | new = irq_want; | ||
1006 | } else { | ||
1007 | /* .. then fall back to scanning. */ | ||
887 | new = find_first_zero_bit(intc_irq_map, nr_irqs); | 1008 | new = find_first_zero_bit(intc_irq_map, nr_irqs); |
888 | if (unlikely(new == nr_irqs)) | 1009 | if (unlikely(new == nr_irqs)) |
889 | goto out_unlock; | 1010 | goto out_unlock; |
890 | 1011 | ||
891 | desc = irq_to_desc_alloc_node(new, node); | ||
892 | if (unlikely(!desc)) { | ||
893 | pr_info("can't get irq_desc for %d\n", new); | ||
894 | goto out_unlock; | ||
895 | } | ||
896 | |||
897 | desc = move_irq_desc(desc, node); | ||
898 | __set_bit(new, intc_irq_map); | 1012 | __set_bit(new, intc_irq_map); |
899 | irq = new; | ||
900 | } | 1013 | } |
901 | 1014 | ||
1015 | desc = irq_to_desc_alloc_node(new, node); | ||
1016 | if (unlikely(!desc)) { | ||
1017 | pr_info("can't get irq_desc for %d\n", new); | ||
1018 | goto out_unlock; | ||
1019 | } | ||
1020 | |||
1021 | desc = move_irq_desc(desc, node); | ||
1022 | irq = new; | ||
1023 | |||
902 | out_unlock: | 1024 | out_unlock: |
903 | spin_unlock_irqrestore(&vector_lock, flags); | 1025 | spin_unlock_irqrestore(&vector_lock, flags); |
904 | 1026 | ||
@@ -913,7 +1035,7 @@ int create_irq(void) | |||
913 | int nid = cpu_to_node(smp_processor_id()); | 1035 | int nid = cpu_to_node(smp_processor_id()); |
914 | int irq; | 1036 | int irq; |
915 | 1037 | ||
916 | irq = create_irq_on_node(NR_IRQS_LEGACY, nid); | 1038 | irq = create_irq_nr(NR_IRQS_LEGACY, nid); |
917 | if (irq == 0) | 1039 | if (irq == 0) |
918 | irq = -1; | 1040 | irq = -1; |
919 | 1041 | ||
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c index 082604edc4c2..cf0303acab8e 100644 --- a/drivers/sh/pfc.c +++ b/drivers/sh/pfc.c | |||
@@ -337,12 +337,39 @@ static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio, | |||
337 | if (!enum_id) | 337 | if (!enum_id) |
338 | break; | 338 | break; |
339 | 339 | ||
340 | /* first check if this is a function enum */ | ||
340 | in_range = enum_in_range(enum_id, &gpioc->function); | 341 | in_range = enum_in_range(enum_id, &gpioc->function); |
341 | if (!in_range && range) { | 342 | if (!in_range) { |
342 | in_range = enum_in_range(enum_id, range); | 343 | /* not a function enum */ |
343 | 344 | if (range) { | |
344 | if (in_range && enum_id == range->force) | 345 | /* |
345 | continue; | 346 | * other range exists, so this pin is |
347 | * a regular GPIO pin that now is being | ||
348 | * bound to a specific direction. | ||
349 | * | ||
350 | * for this case we only allow function enums | ||
351 | * and the enums that match the other range. | ||
352 | */ | ||
353 | in_range = enum_in_range(enum_id, range); | ||
354 | |||
355 | /* | ||
356 | * special case pass through for fixed | ||
357 | * input-only or output-only pins without | ||
358 | * function enum register association. | ||
359 | */ | ||
360 | if (in_range && enum_id == range->force) | ||
361 | continue; | ||
362 | } else { | ||
363 | /* | ||
364 | * no other range exists, so this pin | ||
365 | * must then be of the function type. | ||
366 | * | ||
367 | * allow function type pins to select | ||
368 | * any combination of function/in/out | ||
369 | * in their MARK lists. | ||
370 | */ | ||
371 | in_range = 1; | ||
372 | } | ||
346 | } | 373 | } |
347 | 374 | ||
348 | if (!in_range) | 375 | if (!in_range) |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f55eb0107336..0fee95cd9a49 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -100,6 +100,23 @@ config SPI_BUTTERFLY | |||
100 | inexpensive battery powered microcontroller evaluation board. | 100 | inexpensive battery powered microcontroller evaluation board. |
101 | This same cable can be used to flash new firmware. | 101 | This same cable can be used to flash new firmware. |
102 | 102 | ||
103 | config SPI_COLDFIRE_QSPI | ||
104 | tristate "Freescale Coldfire QSPI controller" | ||
105 | depends on (M520x || M523x || M5249 || M527x || M528x || M532x) | ||
106 | help | ||
107 | This enables support for the Coldfire QSPI controller in master | ||
108 | mode. | ||
109 | |||
110 | This driver can also be built as a module. If so, the module | ||
111 | will be called coldfire_qspi. | ||
112 | |||
113 | config SPI_DAVINCI | ||
114 | tristate "SPI controller driver for DaVinci/DA8xx SoC's" | ||
115 | depends on SPI_MASTER && ARCH_DAVINCI | ||
116 | select SPI_BITBANG | ||
117 | help | ||
118 | SPI master controller for DaVinci and DA8xx SPI modules. | ||
119 | |||
103 | config SPI_GPIO | 120 | config SPI_GPIO |
104 | tristate "GPIO-based bitbanging SPI Master" | 121 | tristate "GPIO-based bitbanging SPI Master" |
105 | depends on GENERIC_GPIO | 122 | depends on GENERIC_GPIO |
@@ -308,7 +325,7 @@ config SPI_NUC900 | |||
308 | # | 325 | # |
309 | 326 | ||
310 | config SPI_DESIGNWARE | 327 | config SPI_DESIGNWARE |
311 | bool "DesignWare SPI controller core support" | 328 | tristate "DesignWare SPI controller core support" |
312 | depends on SPI_MASTER | 329 | depends on SPI_MASTER |
313 | help | 330 | help |
314 | general driver for SPI controller core from DesignWare | 331 | general driver for SPI controller core from DesignWare |
@@ -317,6 +334,10 @@ config SPI_DW_PCI | |||
317 | tristate "PCI interface driver for DW SPI core" | 334 | tristate "PCI interface driver for DW SPI core" |
318 | depends on SPI_DESIGNWARE && PCI | 335 | depends on SPI_DESIGNWARE && PCI |
319 | 336 | ||
337 | config SPI_DW_MMIO | ||
338 | tristate "Memory-mapped io interface driver for DW SPI core" | ||
339 | depends on SPI_DESIGNWARE && HAVE_CLK | ||
340 | |||
320 | # | 341 | # |
321 | # There are lots of SPI device types, with sensors and memory | 342 | # There are lots of SPI device types, with sensors and memory |
322 | # being probably the most widely used ones. | 343 | # being probably the most widely used ones. |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index f3d2810ba11c..d7d0f89b797b 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -16,8 +16,11 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o | |||
16 | obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o | 16 | obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o |
17 | obj-$(CONFIG_SPI_AU1550) += au1550_spi.o | 17 | obj-$(CONFIG_SPI_AU1550) += au1550_spi.o |
18 | obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o | 18 | obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o |
19 | obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o | ||
20 | obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o | ||
19 | obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o | 21 | obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o |
20 | obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o | 22 | obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o |
23 | obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o | ||
21 | obj-$(CONFIG_SPI_GPIO) += spi_gpio.o | 24 | obj-$(CONFIG_SPI_GPIO) += spi_gpio.o |
22 | obj-$(CONFIG_SPI_IMX) += spi_imx.o | 25 | obj-$(CONFIG_SPI_IMX) += spi_imx.o |
23 | obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o | 26 | obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o |
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c new file mode 100644 index 000000000000..59be3efe0636 --- /dev/null +++ b/drivers/spi/coldfire_qspi.c | |||
@@ -0,0 +1,640 @@ | |||
1 | /* | ||
2 | * Freescale/Motorola Coldfire Queued SPI driver | ||
3 | * | ||
4 | * Copyright 2010 Steven King <sfking@fdwdc.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/workqueue.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/io.h> | ||
30 | #include <linux/clk.h> | ||
31 | #include <linux/err.h> | ||
32 | #include <linux/spi/spi.h> | ||
33 | |||
34 | #include <asm/coldfire.h> | ||
35 | #include <asm/mcfqspi.h> | ||
36 | |||
37 | #define DRIVER_NAME "mcfqspi" | ||
38 | |||
39 | #define MCFQSPI_BUSCLK (MCF_BUSCLK / 2) | ||
40 | |||
41 | #define MCFQSPI_QMR 0x00 | ||
42 | #define MCFQSPI_QMR_MSTR 0x8000 | ||
43 | #define MCFQSPI_QMR_CPOL 0x0200 | ||
44 | #define MCFQSPI_QMR_CPHA 0x0100 | ||
45 | #define MCFQSPI_QDLYR 0x04 | ||
46 | #define MCFQSPI_QDLYR_SPE 0x8000 | ||
47 | #define MCFQSPI_QWR 0x08 | ||
48 | #define MCFQSPI_QWR_HALT 0x8000 | ||
49 | #define MCFQSPI_QWR_WREN 0x4000 | ||
50 | #define MCFQSPI_QWR_CSIV 0x1000 | ||
51 | #define MCFQSPI_QIR 0x0C | ||
52 | #define MCFQSPI_QIR_WCEFB 0x8000 | ||
53 | #define MCFQSPI_QIR_ABRTB 0x4000 | ||
54 | #define MCFQSPI_QIR_ABRTL 0x1000 | ||
55 | #define MCFQSPI_QIR_WCEFE 0x0800 | ||
56 | #define MCFQSPI_QIR_ABRTE 0x0400 | ||
57 | #define MCFQSPI_QIR_SPIFE 0x0100 | ||
58 | #define MCFQSPI_QIR_WCEF 0x0008 | ||
59 | #define MCFQSPI_QIR_ABRT 0x0004 | ||
60 | #define MCFQSPI_QIR_SPIF 0x0001 | ||
61 | #define MCFQSPI_QAR 0x010 | ||
62 | #define MCFQSPI_QAR_TXBUF 0x00 | ||
63 | #define MCFQSPI_QAR_RXBUF 0x10 | ||
64 | #define MCFQSPI_QAR_CMDBUF 0x20 | ||
65 | #define MCFQSPI_QDR 0x014 | ||
66 | #define MCFQSPI_QCR 0x014 | ||
67 | #define MCFQSPI_QCR_CONT 0x8000 | ||
68 | #define MCFQSPI_QCR_BITSE 0x4000 | ||
69 | #define MCFQSPI_QCR_DT 0x2000 | ||
70 | |||
71 | struct mcfqspi { | ||
72 | void __iomem *iobase; | ||
73 | int irq; | ||
74 | struct clk *clk; | ||
75 | struct mcfqspi_cs_control *cs_control; | ||
76 | |||
77 | wait_queue_head_t waitq; | ||
78 | |||
79 | struct work_struct work; | ||
80 | struct workqueue_struct *workq; | ||
81 | spinlock_t lock; | ||
82 | struct list_head msgq; | ||
83 | }; | ||
84 | |||
85 | static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val) | ||
86 | { | ||
87 | writew(val, mcfqspi->iobase + MCFQSPI_QMR); | ||
88 | } | ||
89 | |||
90 | static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val) | ||
91 | { | ||
92 | writew(val, mcfqspi->iobase + MCFQSPI_QDLYR); | ||
93 | } | ||
94 | |||
95 | static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi) | ||
96 | { | ||
97 | return readw(mcfqspi->iobase + MCFQSPI_QDLYR); | ||
98 | } | ||
99 | |||
100 | static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val) | ||
101 | { | ||
102 | writew(val, mcfqspi->iobase + MCFQSPI_QWR); | ||
103 | } | ||
104 | |||
105 | static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val) | ||
106 | { | ||
107 | writew(val, mcfqspi->iobase + MCFQSPI_QIR); | ||
108 | } | ||
109 | |||
110 | static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val) | ||
111 | { | ||
112 | writew(val, mcfqspi->iobase + MCFQSPI_QAR); | ||
113 | } | ||
114 | |||
115 | static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val) | ||
116 | { | ||
117 | writew(val, mcfqspi->iobase + MCFQSPI_QDR); | ||
118 | } | ||
119 | |||
120 | static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi) | ||
121 | { | ||
122 | return readw(mcfqspi->iobase + MCFQSPI_QDR); | ||
123 | } | ||
124 | |||
125 | static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select, | ||
126 | bool cs_high) | ||
127 | { | ||
128 | mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high); | ||
129 | } | ||
130 | |||
131 | static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select, | ||
132 | bool cs_high) | ||
133 | { | ||
134 | mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high); | ||
135 | } | ||
136 | |||
137 | static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi) | ||
138 | { | ||
139 | return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ? | ||
140 | mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0; | ||
141 | } | ||
142 | |||
143 | static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi) | ||
144 | { | ||
145 | if (mcfqspi->cs_control && mcfqspi->cs_control->teardown) | ||
146 | mcfqspi->cs_control->teardown(mcfqspi->cs_control); | ||
147 | } | ||
148 | |||
149 | static u8 mcfqspi_qmr_baud(u32 speed_hz) | ||
150 | { | ||
151 | return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u); | ||
152 | } | ||
153 | |||
154 | static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi) | ||
155 | { | ||
156 | return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE; | ||
157 | } | ||
158 | |||
159 | static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id) | ||
160 | { | ||
161 | struct mcfqspi *mcfqspi = dev_id; | ||
162 | |||
163 | /* clear interrupt */ | ||
164 | mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF); | ||
165 | wake_up(&mcfqspi->waitq); | ||
166 | |||
167 | return IRQ_HANDLED; | ||
168 | } | ||
169 | |||
170 | static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count, | ||
171 | const u8 *txbuf, u8 *rxbuf) | ||
172 | { | ||
173 | unsigned i, n, offset = 0; | ||
174 | |||
175 | n = min(count, 16u); | ||
176 | |||
177 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); | ||
178 | for (i = 0; i < n; ++i) | ||
179 | mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); | ||
180 | |||
181 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); | ||
182 | if (txbuf) | ||
183 | for (i = 0; i < n; ++i) | ||
184 | mcfqspi_wr_qdr(mcfqspi, *txbuf++); | ||
185 | else | ||
186 | for (i = 0; i < count; ++i) | ||
187 | mcfqspi_wr_qdr(mcfqspi, 0); | ||
188 | |||
189 | count -= n; | ||
190 | if (count) { | ||
191 | u16 qwr = 0xf08; | ||
192 | mcfqspi_wr_qwr(mcfqspi, 0x700); | ||
193 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
194 | |||
195 | do { | ||
196 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
197 | mcfqspi_wr_qwr(mcfqspi, qwr); | ||
198 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
199 | if (rxbuf) { | ||
200 | mcfqspi_wr_qar(mcfqspi, | ||
201 | MCFQSPI_QAR_RXBUF + offset); | ||
202 | for (i = 0; i < 8; ++i) | ||
203 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
204 | } | ||
205 | n = min(count, 8u); | ||
206 | if (txbuf) { | ||
207 | mcfqspi_wr_qar(mcfqspi, | ||
208 | MCFQSPI_QAR_TXBUF + offset); | ||
209 | for (i = 0; i < n; ++i) | ||
210 | mcfqspi_wr_qdr(mcfqspi, *txbuf++); | ||
211 | } | ||
212 | qwr = (offset ? 0x808 : 0) + ((n - 1) << 8); | ||
213 | offset ^= 8; | ||
214 | count -= n; | ||
215 | } while (count); | ||
216 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
217 | mcfqspi_wr_qwr(mcfqspi, qwr); | ||
218 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
219 | if (rxbuf) { | ||
220 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); | ||
221 | for (i = 0; i < 8; ++i) | ||
222 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
223 | offset ^= 8; | ||
224 | } | ||
225 | } else { | ||
226 | mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); | ||
227 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
228 | } | ||
229 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
230 | if (rxbuf) { | ||
231 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); | ||
232 | for (i = 0; i < n; ++i) | ||
233 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count, | ||
238 | const u16 *txbuf, u16 *rxbuf) | ||
239 | { | ||
240 | unsigned i, n, offset = 0; | ||
241 | |||
242 | n = min(count, 16u); | ||
243 | |||
244 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); | ||
245 | for (i = 0; i < n; ++i) | ||
246 | mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); | ||
247 | |||
248 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); | ||
249 | if (txbuf) | ||
250 | for (i = 0; i < n; ++i) | ||
251 | mcfqspi_wr_qdr(mcfqspi, *txbuf++); | ||
252 | else | ||
253 | for (i = 0; i < count; ++i) | ||
254 | mcfqspi_wr_qdr(mcfqspi, 0); | ||
255 | |||
256 | count -= n; | ||
257 | if (count) { | ||
258 | u16 qwr = 0xf08; | ||
259 | mcfqspi_wr_qwr(mcfqspi, 0x700); | ||
260 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
261 | |||
262 | do { | ||
263 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
264 | mcfqspi_wr_qwr(mcfqspi, qwr); | ||
265 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
266 | if (rxbuf) { | ||
267 | mcfqspi_wr_qar(mcfqspi, | ||
268 | MCFQSPI_QAR_RXBUF + offset); | ||
269 | for (i = 0; i < 8; ++i) | ||
270 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
271 | } | ||
272 | n = min(count, 8u); | ||
273 | if (txbuf) { | ||
274 | mcfqspi_wr_qar(mcfqspi, | ||
275 | MCFQSPI_QAR_TXBUF + offset); | ||
276 | for (i = 0; i < n; ++i) | ||
277 | mcfqspi_wr_qdr(mcfqspi, *txbuf++); | ||
278 | } | ||
279 | qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8); | ||
280 | offset ^= 8; | ||
281 | count -= n; | ||
282 | } while (count); | ||
283 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
284 | mcfqspi_wr_qwr(mcfqspi, qwr); | ||
285 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
286 | if (rxbuf) { | ||
287 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); | ||
288 | for (i = 0; i < 8; ++i) | ||
289 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
290 | offset ^= 8; | ||
291 | } | ||
292 | } else { | ||
293 | mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); | ||
294 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
295 | } | ||
296 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
297 | if (rxbuf) { | ||
298 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); | ||
299 | for (i = 0; i < n; ++i) | ||
300 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | static void mcfqspi_work(struct work_struct *work) | ||
305 | { | ||
306 | struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work); | ||
307 | unsigned long flags; | ||
308 | |||
309 | spin_lock_irqsave(&mcfqspi->lock, flags); | ||
310 | while (!list_empty(&mcfqspi->msgq)) { | ||
311 | struct spi_message *msg; | ||
312 | struct spi_device *spi; | ||
313 | struct spi_transfer *xfer; | ||
314 | int status = 0; | ||
315 | |||
316 | msg = container_of(mcfqspi->msgq.next, struct spi_message, | ||
317 | queue); | ||
318 | |||
319 | list_del_init(&mcfqspi->msgq); | ||
320 | spin_unlock_irqrestore(&mcfqspi->lock, flags); | ||
321 | |||
322 | spi = msg->spi; | ||
323 | |||
324 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
325 | bool cs_high = spi->mode & SPI_CS_HIGH; | ||
326 | u16 qmr = MCFQSPI_QMR_MSTR; | ||
327 | |||
328 | if (xfer->bits_per_word) | ||
329 | qmr |= xfer->bits_per_word << 10; | ||
330 | else | ||
331 | qmr |= spi->bits_per_word << 10; | ||
332 | if (spi->mode & SPI_CPHA) | ||
333 | qmr |= MCFQSPI_QMR_CPHA; | ||
334 | if (spi->mode & SPI_CPOL) | ||
335 | qmr |= MCFQSPI_QMR_CPOL; | ||
336 | if (xfer->speed_hz) | ||
337 | qmr |= mcfqspi_qmr_baud(xfer->speed_hz); | ||
338 | else | ||
339 | qmr |= mcfqspi_qmr_baud(spi->max_speed_hz); | ||
340 | mcfqspi_wr_qmr(mcfqspi, qmr); | ||
341 | |||
342 | mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high); | ||
343 | |||
344 | mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE); | ||
345 | if ((xfer->bits_per_word ? xfer->bits_per_word : | ||
346 | spi->bits_per_word) == 8) | ||
347 | mcfqspi_transfer_msg8(mcfqspi, xfer->len, | ||
348 | xfer->tx_buf, | ||
349 | xfer->rx_buf); | ||
350 | else | ||
351 | mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2, | ||
352 | xfer->tx_buf, | ||
353 | xfer->rx_buf); | ||
354 | mcfqspi_wr_qir(mcfqspi, 0); | ||
355 | |||
356 | if (xfer->delay_usecs) | ||
357 | udelay(xfer->delay_usecs); | ||
358 | if (xfer->cs_change) { | ||
359 | if (!list_is_last(&xfer->transfer_list, | ||
360 | &msg->transfers)) | ||
361 | mcfqspi_cs_deselect(mcfqspi, | ||
362 | spi->chip_select, | ||
363 | cs_high); | ||
364 | } else { | ||
365 | if (list_is_last(&xfer->transfer_list, | ||
366 | &msg->transfers)) | ||
367 | mcfqspi_cs_deselect(mcfqspi, | ||
368 | spi->chip_select, | ||
369 | cs_high); | ||
370 | } | ||
371 | msg->actual_length += xfer->len; | ||
372 | } | ||
373 | msg->status = status; | ||
374 | msg->complete(msg->context); | ||
375 | |||
376 | spin_lock_irqsave(&mcfqspi->lock, flags); | ||
377 | } | ||
378 | spin_unlock_irqrestore(&mcfqspi->lock, flags); | ||
379 | } | ||
380 | |||
381 | static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg) | ||
382 | { | ||
383 | struct mcfqspi *mcfqspi; | ||
384 | struct spi_transfer *xfer; | ||
385 | unsigned long flags; | ||
386 | |||
387 | mcfqspi = spi_master_get_devdata(spi->master); | ||
388 | |||
389 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
390 | if (xfer->bits_per_word && ((xfer->bits_per_word < 8) | ||
391 | || (xfer->bits_per_word > 16))) { | ||
392 | dev_dbg(&spi->dev, | ||
393 | "%d bits per word is not supported\n", | ||
394 | xfer->bits_per_word); | ||
395 | goto fail; | ||
396 | } | ||
397 | if (xfer->speed_hz) { | ||
398 | u32 real_speed = MCFQSPI_BUSCLK / | ||
399 | mcfqspi_qmr_baud(xfer->speed_hz); | ||
400 | if (real_speed != xfer->speed_hz) | ||
401 | dev_dbg(&spi->dev, | ||
402 | "using speed %d instead of %d\n", | ||
403 | real_speed, xfer->speed_hz); | ||
404 | } | ||
405 | } | ||
406 | msg->status = -EINPROGRESS; | ||
407 | msg->actual_length = 0; | ||
408 | |||
409 | spin_lock_irqsave(&mcfqspi->lock, flags); | ||
410 | list_add_tail(&msg->queue, &mcfqspi->msgq); | ||
411 | queue_work(mcfqspi->workq, &mcfqspi->work); | ||
412 | spin_unlock_irqrestore(&mcfqspi->lock, flags); | ||
413 | |||
414 | return 0; | ||
415 | fail: | ||
416 | msg->status = -EINVAL; | ||
417 | return -EINVAL; | ||
418 | } | ||
419 | |||
420 | static int mcfqspi_setup(struct spi_device *spi) | ||
421 | { | ||
422 | if ((spi->bits_per_word < 8) || (spi->bits_per_word > 16)) { | ||
423 | dev_dbg(&spi->dev, "%d bits per word is not supported\n", | ||
424 | spi->bits_per_word); | ||
425 | return -EINVAL; | ||
426 | } | ||
427 | if (spi->chip_select >= spi->master->num_chipselect) { | ||
428 | dev_dbg(&spi->dev, "%d chip select is out of range\n", | ||
429 | spi->chip_select); | ||
430 | return -EINVAL; | ||
431 | } | ||
432 | |||
433 | mcfqspi_cs_deselect(spi_master_get_devdata(spi->master), | ||
434 | spi->chip_select, spi->mode & SPI_CS_HIGH); | ||
435 | |||
436 | dev_dbg(&spi->dev, | ||
437 | "bits per word %d, chip select %d, speed %d KHz\n", | ||
438 | spi->bits_per_word, spi->chip_select, | ||
439 | (MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz)) | ||
440 | / 1000); | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | static int __devinit mcfqspi_probe(struct platform_device *pdev) | ||
446 | { | ||
447 | struct spi_master *master; | ||
448 | struct mcfqspi *mcfqspi; | ||
449 | struct resource *res; | ||
450 | struct mcfqspi_platform_data *pdata; | ||
451 | int status; | ||
452 | |||
453 | master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi)); | ||
454 | if (master == NULL) { | ||
455 | dev_dbg(&pdev->dev, "spi_alloc_master failed\n"); | ||
456 | return -ENOMEM; | ||
457 | } | ||
458 | |||
459 | mcfqspi = spi_master_get_devdata(master); | ||
460 | |||
461 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
462 | if (!res) { | ||
463 | dev_dbg(&pdev->dev, "platform_get_resource failed\n"); | ||
464 | status = -ENXIO; | ||
465 | goto fail0; | ||
466 | } | ||
467 | |||
468 | if (!request_mem_region(res->start, resource_size(res), pdev->name)) { | ||
469 | dev_dbg(&pdev->dev, "request_mem_region failed\n"); | ||
470 | status = -EBUSY; | ||
471 | goto fail0; | ||
472 | } | ||
473 | |||
474 | mcfqspi->iobase = ioremap(res->start, resource_size(res)); | ||
475 | if (!mcfqspi->iobase) { | ||
476 | dev_dbg(&pdev->dev, "ioremap failed\n"); | ||
477 | status = -ENOMEM; | ||
478 | goto fail1; | ||
479 | } | ||
480 | |||
481 | mcfqspi->irq = platform_get_irq(pdev, 0); | ||
482 | if (mcfqspi->irq < 0) { | ||
483 | dev_dbg(&pdev->dev, "platform_get_irq failed\n"); | ||
484 | status = -ENXIO; | ||
485 | goto fail2; | ||
486 | } | ||
487 | |||
488 | status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, IRQF_DISABLED, | ||
489 | pdev->name, mcfqspi); | ||
490 | if (status) { | ||
491 | dev_dbg(&pdev->dev, "request_irq failed\n"); | ||
492 | goto fail2; | ||
493 | } | ||
494 | |||
495 | mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk"); | ||
496 | if (IS_ERR(mcfqspi->clk)) { | ||
497 | dev_dbg(&pdev->dev, "clk_get failed\n"); | ||
498 | status = PTR_ERR(mcfqspi->clk); | ||
499 | goto fail3; | ||
500 | } | ||
501 | clk_enable(mcfqspi->clk); | ||
502 | |||
503 | mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent)); | ||
504 | if (!mcfqspi->workq) { | ||
505 | dev_dbg(&pdev->dev, "create_workqueue failed\n"); | ||
506 | status = -ENOMEM; | ||
507 | goto fail4; | ||
508 | } | ||
509 | INIT_WORK(&mcfqspi->work, mcfqspi_work); | ||
510 | spin_lock_init(&mcfqspi->lock); | ||
511 | INIT_LIST_HEAD(&mcfqspi->msgq); | ||
512 | init_waitqueue_head(&mcfqspi->waitq); | ||
513 | |||
514 | pdata = pdev->dev.platform_data; | ||
515 | if (!pdata) { | ||
516 | dev_dbg(&pdev->dev, "platform data is missing\n"); | ||
517 | goto fail5; | ||
518 | } | ||
519 | master->bus_num = pdata->bus_num; | ||
520 | master->num_chipselect = pdata->num_chipselect; | ||
521 | |||
522 | mcfqspi->cs_control = pdata->cs_control; | ||
523 | status = mcfqspi_cs_setup(mcfqspi); | ||
524 | if (status) { | ||
525 | dev_dbg(&pdev->dev, "error initializing cs_control\n"); | ||
526 | goto fail5; | ||
527 | } | ||
528 | |||
529 | master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; | ||
530 | master->setup = mcfqspi_setup; | ||
531 | master->transfer = mcfqspi_transfer; | ||
532 | |||
533 | platform_set_drvdata(pdev, master); | ||
534 | |||
535 | status = spi_register_master(master); | ||
536 | if (status) { | ||
537 | dev_dbg(&pdev->dev, "spi_register_master failed\n"); | ||
538 | goto fail6; | ||
539 | } | ||
540 | dev_info(&pdev->dev, "Coldfire QSPI bus driver\n"); | ||
541 | |||
542 | return 0; | ||
543 | |||
544 | fail6: | ||
545 | mcfqspi_cs_teardown(mcfqspi); | ||
546 | fail5: | ||
547 | destroy_workqueue(mcfqspi->workq); | ||
548 | fail4: | ||
549 | clk_disable(mcfqspi->clk); | ||
550 | clk_put(mcfqspi->clk); | ||
551 | fail3: | ||
552 | free_irq(mcfqspi->irq, mcfqspi); | ||
553 | fail2: | ||
554 | iounmap(mcfqspi->iobase); | ||
555 | fail1: | ||
556 | release_mem_region(res->start, resource_size(res)); | ||
557 | fail0: | ||
558 | spi_master_put(master); | ||
559 | |||
560 | dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n"); | ||
561 | |||
562 | return status; | ||
563 | } | ||
564 | |||
565 | static int __devexit mcfqspi_remove(struct platform_device *pdev) | ||
566 | { | ||
567 | struct spi_master *master = platform_get_drvdata(pdev); | ||
568 | struct mcfqspi *mcfqspi = spi_master_get_devdata(master); | ||
569 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
570 | |||
571 | /* disable the hardware (set the baud rate to 0) */ | ||
572 | mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR); | ||
573 | |||
574 | platform_set_drvdata(pdev, NULL); | ||
575 | mcfqspi_cs_teardown(mcfqspi); | ||
576 | destroy_workqueue(mcfqspi->workq); | ||
577 | clk_disable(mcfqspi->clk); | ||
578 | clk_put(mcfqspi->clk); | ||
579 | free_irq(mcfqspi->irq, mcfqspi); | ||
580 | iounmap(mcfqspi->iobase); | ||
581 | release_mem_region(res->start, resource_size(res)); | ||
582 | spi_unregister_master(master); | ||
583 | spi_master_put(master); | ||
584 | |||
585 | return 0; | ||
586 | } | ||
587 | |||
588 | #ifdef CONFIG_PM | ||
589 | |||
590 | static int mcfqspi_suspend(struct device *dev) | ||
591 | { | ||
592 | struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); | ||
593 | |||
594 | clk_disable(mcfqspi->clk); | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static int mcfqspi_resume(struct device *dev) | ||
600 | { | ||
601 | struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); | ||
602 | |||
603 | clk_enable(mcfqspi->clk); | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | static struct dev_pm_ops mcfqspi_dev_pm_ops = { | ||
609 | .suspend = mcfqspi_suspend, | ||
610 | .resume = mcfqspi_resume, | ||
611 | }; | ||
612 | |||
613 | #define MCFQSPI_DEV_PM_OPS (&mcfqspi_dev_pm_ops) | ||
614 | #else | ||
615 | #define MCFQSPI_DEV_PM_OPS NULL | ||
616 | #endif | ||
617 | |||
618 | static struct platform_driver mcfqspi_driver = { | ||
619 | .driver.name = DRIVER_NAME, | ||
620 | .driver.owner = THIS_MODULE, | ||
621 | .driver.pm = MCFQSPI_DEV_PM_OPS, | ||
622 | .remove = __devexit_p(mcfqspi_remove), | ||
623 | }; | ||
624 | |||
625 | static int __init mcfqspi_init(void) | ||
626 | { | ||
627 | return platform_driver_probe(&mcfqspi_driver, mcfqspi_probe); | ||
628 | } | ||
629 | module_init(mcfqspi_init); | ||
630 | |||
631 | static void __exit mcfqspi_exit(void) | ||
632 | { | ||
633 | platform_driver_unregister(&mcfqspi_driver); | ||
634 | } | ||
635 | module_exit(mcfqspi_exit); | ||
636 | |||
637 | MODULE_AUTHOR("Steven King <sfking@fdwdc.com>"); | ||
638 | MODULE_DESCRIPTION("Coldfire QSPI Controller Driver"); | ||
639 | MODULE_LICENSE("GPL"); | ||
640 | MODULE_ALIAS("platform:" DRIVER_NAME); | ||
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c new file mode 100644 index 000000000000..225ab60b02c4 --- /dev/null +++ b/drivers/spi/davinci_spi.c | |||
@@ -0,0 +1,1255 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Texas Instruments. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/gpio.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/clk.h> | ||
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/spi/spi.h> | ||
29 | #include <linux/spi/spi_bitbang.h> | ||
30 | |||
31 | #include <mach/spi.h> | ||
32 | #include <mach/edma.h> | ||
33 | |||
34 | #define SPI_NO_RESOURCE ((resource_size_t)-1) | ||
35 | |||
36 | #define SPI_MAX_CHIPSELECT 2 | ||
37 | |||
38 | #define CS_DEFAULT 0xFF | ||
39 | |||
40 | #define SPI_BUFSIZ (SMP_CACHE_BYTES + 1) | ||
41 | #define DAVINCI_DMA_DATA_TYPE_S8 0x01 | ||
42 | #define DAVINCI_DMA_DATA_TYPE_S16 0x02 | ||
43 | #define DAVINCI_DMA_DATA_TYPE_S32 0x04 | ||
44 | |||
45 | #define SPIFMT_PHASE_MASK BIT(16) | ||
46 | #define SPIFMT_POLARITY_MASK BIT(17) | ||
47 | #define SPIFMT_DISTIMER_MASK BIT(18) | ||
48 | #define SPIFMT_SHIFTDIR_MASK BIT(20) | ||
49 | #define SPIFMT_WAITENA_MASK BIT(21) | ||
50 | #define SPIFMT_PARITYENA_MASK BIT(22) | ||
51 | #define SPIFMT_ODD_PARITY_MASK BIT(23) | ||
52 | #define SPIFMT_WDELAY_MASK 0x3f000000u | ||
53 | #define SPIFMT_WDELAY_SHIFT 24 | ||
54 | #define SPIFMT_CHARLEN_MASK 0x0000001Fu | ||
55 | |||
56 | /* SPIGCR1 */ | ||
57 | #define SPIGCR1_SPIENA_MASK 0x01000000u | ||
58 | |||
59 | /* SPIPC0 */ | ||
60 | #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ | ||
61 | #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ | ||
62 | #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ | ||
63 | #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ | ||
64 | #define SPIPC0_EN1FUN_MASK BIT(1) | ||
65 | #define SPIPC0_EN0FUN_MASK BIT(0) | ||
66 | |||
67 | #define SPIINT_MASKALL 0x0101035F | ||
68 | #define SPI_INTLVL_1 0x000001FFu | ||
69 | #define SPI_INTLVL_0 0x00000000u | ||
70 | |||
71 | /* SPIDAT1 */ | ||
72 | #define SPIDAT1_CSHOLD_SHIFT 28 | ||
73 | #define SPIDAT1_CSNR_SHIFT 16 | ||
74 | #define SPIGCR1_CLKMOD_MASK BIT(1) | ||
75 | #define SPIGCR1_MASTER_MASK BIT(0) | ||
76 | #define SPIGCR1_LOOPBACK_MASK BIT(16) | ||
77 | |||
78 | /* SPIBUF */ | ||
79 | #define SPIBUF_TXFULL_MASK BIT(29) | ||
80 | #define SPIBUF_RXEMPTY_MASK BIT(31) | ||
81 | |||
82 | /* Error Masks */ | ||
83 | #define SPIFLG_DLEN_ERR_MASK BIT(0) | ||
84 | #define SPIFLG_TIMEOUT_MASK BIT(1) | ||
85 | #define SPIFLG_PARERR_MASK BIT(2) | ||
86 | #define SPIFLG_DESYNC_MASK BIT(3) | ||
87 | #define SPIFLG_BITERR_MASK BIT(4) | ||
88 | #define SPIFLG_OVRRUN_MASK BIT(6) | ||
89 | #define SPIFLG_RX_INTR_MASK BIT(8) | ||
90 | #define SPIFLG_TX_INTR_MASK BIT(9) | ||
91 | #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) | ||
92 | #define SPIFLG_MASK (SPIFLG_DLEN_ERR_MASK \ | ||
93 | | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ | ||
94 | | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ | ||
95 | | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \ | ||
96 | | SPIFLG_TX_INTR_MASK \ | ||
97 | | SPIFLG_BUF_INIT_ACTIVE_MASK) | ||
98 | |||
99 | #define SPIINT_DLEN_ERR_INTR BIT(0) | ||
100 | #define SPIINT_TIMEOUT_INTR BIT(1) | ||
101 | #define SPIINT_PARERR_INTR BIT(2) | ||
102 | #define SPIINT_DESYNC_INTR BIT(3) | ||
103 | #define SPIINT_BITERR_INTR BIT(4) | ||
104 | #define SPIINT_OVRRUN_INTR BIT(6) | ||
105 | #define SPIINT_RX_INTR BIT(8) | ||
106 | #define SPIINT_TX_INTR BIT(9) | ||
107 | #define SPIINT_DMA_REQ_EN BIT(16) | ||
108 | #define SPIINT_ENABLE_HIGHZ BIT(24) | ||
109 | |||
110 | #define SPI_T2CDELAY_SHIFT 16 | ||
111 | #define SPI_C2TDELAY_SHIFT 24 | ||
112 | |||
113 | /* SPI Controller registers */ | ||
114 | #define SPIGCR0 0x00 | ||
115 | #define SPIGCR1 0x04 | ||
116 | #define SPIINT 0x08 | ||
117 | #define SPILVL 0x0c | ||
118 | #define SPIFLG 0x10 | ||
119 | #define SPIPC0 0x14 | ||
120 | #define SPIPC1 0x18 | ||
121 | #define SPIPC2 0x1c | ||
122 | #define SPIPC3 0x20 | ||
123 | #define SPIPC4 0x24 | ||
124 | #define SPIPC5 0x28 | ||
125 | #define SPIPC6 0x2c | ||
126 | #define SPIPC7 0x30 | ||
127 | #define SPIPC8 0x34 | ||
128 | #define SPIDAT0 0x38 | ||
129 | #define SPIDAT1 0x3c | ||
130 | #define SPIBUF 0x40 | ||
131 | #define SPIEMU 0x44 | ||
132 | #define SPIDELAY 0x48 | ||
133 | #define SPIDEF 0x4c | ||
134 | #define SPIFMT0 0x50 | ||
135 | #define SPIFMT1 0x54 | ||
136 | #define SPIFMT2 0x58 | ||
137 | #define SPIFMT3 0x5c | ||
138 | #define TGINTVEC0 0x60 | ||
139 | #define TGINTVEC1 0x64 | ||
140 | |||
141 | struct davinci_spi_slave { | ||
142 | u32 cmd_to_write; | ||
143 | u32 clk_ctrl_to_write; | ||
144 | u32 bytes_per_word; | ||
145 | u8 active_cs; | ||
146 | }; | ||
147 | |||
148 | /* We have 2 DMA channels per CS, one for RX and one for TX */ | ||
149 | struct davinci_spi_dma { | ||
150 | int dma_tx_channel; | ||
151 | int dma_rx_channel; | ||
152 | int dma_tx_sync_dev; | ||
153 | int dma_rx_sync_dev; | ||
154 | enum dma_event_q eventq; | ||
155 | |||
156 | struct completion dma_tx_completion; | ||
157 | struct completion dma_rx_completion; | ||
158 | }; | ||
159 | |||
160 | /* SPI Controller driver's private data. */ | ||
161 | struct davinci_spi { | ||
162 | struct spi_bitbang bitbang; | ||
163 | struct clk *clk; | ||
164 | |||
165 | u8 version; | ||
166 | resource_size_t pbase; | ||
167 | void __iomem *base; | ||
168 | size_t region_size; | ||
169 | u32 irq; | ||
170 | struct completion done; | ||
171 | |||
172 | const void *tx; | ||
173 | void *rx; | ||
174 | u8 *tmp_buf; | ||
175 | int count; | ||
176 | struct davinci_spi_dma *dma_channels; | ||
177 | struct davinci_spi_platform_data *pdata; | ||
178 | |||
179 | void (*get_rx)(u32 rx_data, struct davinci_spi *); | ||
180 | u32 (*get_tx)(struct davinci_spi *); | ||
181 | |||
182 | struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT]; | ||
183 | }; | ||
184 | |||
185 | static unsigned use_dma; | ||
186 | |||
187 | static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi) | ||
188 | { | ||
189 | u8 *rx = davinci_spi->rx; | ||
190 | |||
191 | *rx++ = (u8)data; | ||
192 | davinci_spi->rx = rx; | ||
193 | } | ||
194 | |||
195 | static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi) | ||
196 | { | ||
197 | u16 *rx = davinci_spi->rx; | ||
198 | |||
199 | *rx++ = (u16)data; | ||
200 | davinci_spi->rx = rx; | ||
201 | } | ||
202 | |||
203 | static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi) | ||
204 | { | ||
205 | u32 data; | ||
206 | const u8 *tx = davinci_spi->tx; | ||
207 | |||
208 | data = *tx++; | ||
209 | davinci_spi->tx = tx; | ||
210 | return data; | ||
211 | } | ||
212 | |||
213 | static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi) | ||
214 | { | ||
215 | u32 data; | ||
216 | const u16 *tx = davinci_spi->tx; | ||
217 | |||
218 | data = *tx++; | ||
219 | davinci_spi->tx = tx; | ||
220 | return data; | ||
221 | } | ||
222 | |||
223 | static inline void set_io_bits(void __iomem *addr, u32 bits) | ||
224 | { | ||
225 | u32 v = ioread32(addr); | ||
226 | |||
227 | v |= bits; | ||
228 | iowrite32(v, addr); | ||
229 | } | ||
230 | |||
231 | static inline void clear_io_bits(void __iomem *addr, u32 bits) | ||
232 | { | ||
233 | u32 v = ioread32(addr); | ||
234 | |||
235 | v &= ~bits; | ||
236 | iowrite32(v, addr); | ||
237 | } | ||
238 | |||
239 | static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num) | ||
240 | { | ||
241 | set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); | ||
242 | } | ||
243 | |||
244 | static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num) | ||
245 | { | ||
246 | clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); | ||
247 | } | ||
248 | |||
249 | static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable) | ||
250 | { | ||
251 | struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); | ||
252 | |||
253 | if (enable) | ||
254 | set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); | ||
255 | else | ||
256 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * Interface to control the chip select signal | ||
261 | */ | ||
262 | static void davinci_spi_chipselect(struct spi_device *spi, int value) | ||
263 | { | ||
264 | struct davinci_spi *davinci_spi; | ||
265 | struct davinci_spi_platform_data *pdata; | ||
266 | u32 data1_reg_val = 0; | ||
267 | |||
268 | davinci_spi = spi_master_get_devdata(spi->master); | ||
269 | pdata = davinci_spi->pdata; | ||
270 | |||
271 | /* | ||
272 | * Board specific chip select logic decides the polarity and cs | ||
273 | * line for the controller | ||
274 | */ | ||
275 | if (value == BITBANG_CS_INACTIVE) { | ||
276 | set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT); | ||
277 | |||
278 | data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT; | ||
279 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | ||
280 | |||
281 | while ((ioread32(davinci_spi->base + SPIBUF) | ||
282 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
283 | cpu_relax(); | ||
284 | } | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * davinci_spi_setup_transfer - This functions will determine transfer method | ||
289 | * @spi: spi device on which data transfer to be done | ||
290 | * @t: spi transfer in which transfer info is filled | ||
291 | * | ||
292 | * This function determines data transfer method (8/16/32 bit transfer). | ||
293 | * It will also set the SPI Clock Control register according to | ||
294 | * SPI slave device freq. | ||
295 | */ | ||
296 | static int davinci_spi_setup_transfer(struct spi_device *spi, | ||
297 | struct spi_transfer *t) | ||
298 | { | ||
299 | |||
300 | struct davinci_spi *davinci_spi; | ||
301 | struct davinci_spi_platform_data *pdata; | ||
302 | u8 bits_per_word = 0; | ||
303 | u32 hz = 0, prescale; | ||
304 | |||
305 | davinci_spi = spi_master_get_devdata(spi->master); | ||
306 | pdata = davinci_spi->pdata; | ||
307 | |||
308 | if (t) { | ||
309 | bits_per_word = t->bits_per_word; | ||
310 | hz = t->speed_hz; | ||
311 | } | ||
312 | |||
313 | /* if bits_per_word is not set then set it default */ | ||
314 | if (!bits_per_word) | ||
315 | bits_per_word = spi->bits_per_word; | ||
316 | |||
317 | /* | ||
318 | * Assign function pointer to appropriate transfer method | ||
319 | * 8bit, 16bit or 32bit transfer | ||
320 | */ | ||
321 | if (bits_per_word <= 8 && bits_per_word >= 2) { | ||
322 | davinci_spi->get_rx = davinci_spi_rx_buf_u8; | ||
323 | davinci_spi->get_tx = davinci_spi_tx_buf_u8; | ||
324 | davinci_spi->slave[spi->chip_select].bytes_per_word = 1; | ||
325 | } else if (bits_per_word <= 16 && bits_per_word >= 2) { | ||
326 | davinci_spi->get_rx = davinci_spi_rx_buf_u16; | ||
327 | davinci_spi->get_tx = davinci_spi_tx_buf_u16; | ||
328 | davinci_spi->slave[spi->chip_select].bytes_per_word = 2; | ||
329 | } else | ||
330 | return -EINVAL; | ||
331 | |||
332 | if (!hz) | ||
333 | hz = spi->max_speed_hz; | ||
334 | |||
335 | clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK, | ||
336 | spi->chip_select); | ||
337 | set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f, | ||
338 | spi->chip_select); | ||
339 | |||
340 | prescale = ((clk_get_rate(davinci_spi->clk) / hz) - 1) & 0xff; | ||
341 | |||
342 | clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select); | ||
343 | set_fmt_bits(davinci_spi->base, prescale << 8, spi->chip_select); | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data) | ||
349 | { | ||
350 | struct spi_device *spi = (struct spi_device *)data; | ||
351 | struct davinci_spi *davinci_spi; | ||
352 | struct davinci_spi_dma *davinci_spi_dma; | ||
353 | struct davinci_spi_platform_data *pdata; | ||
354 | |||
355 | davinci_spi = spi_master_get_devdata(spi->master); | ||
356 | davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); | ||
357 | pdata = davinci_spi->pdata; | ||
358 | |||
359 | if (ch_status == DMA_COMPLETE) | ||
360 | edma_stop(davinci_spi_dma->dma_rx_channel); | ||
361 | else | ||
362 | edma_clean_channel(davinci_spi_dma->dma_rx_channel); | ||
363 | |||
364 | complete(&davinci_spi_dma->dma_rx_completion); | ||
365 | /* We must disable the DMA RX request */ | ||
366 | davinci_spi_set_dma_req(spi, 0); | ||
367 | } | ||
368 | |||
369 | static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data) | ||
370 | { | ||
371 | struct spi_device *spi = (struct spi_device *)data; | ||
372 | struct davinci_spi *davinci_spi; | ||
373 | struct davinci_spi_dma *davinci_spi_dma; | ||
374 | struct davinci_spi_platform_data *pdata; | ||
375 | |||
376 | davinci_spi = spi_master_get_devdata(spi->master); | ||
377 | davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); | ||
378 | pdata = davinci_spi->pdata; | ||
379 | |||
380 | if (ch_status == DMA_COMPLETE) | ||
381 | edma_stop(davinci_spi_dma->dma_tx_channel); | ||
382 | else | ||
383 | edma_clean_channel(davinci_spi_dma->dma_tx_channel); | ||
384 | |||
385 | complete(&davinci_spi_dma->dma_tx_completion); | ||
386 | /* We must disable the DMA TX request */ | ||
387 | davinci_spi_set_dma_req(spi, 0); | ||
388 | } | ||
389 | |||
390 | static int davinci_spi_request_dma(struct spi_device *spi) | ||
391 | { | ||
392 | struct davinci_spi *davinci_spi; | ||
393 | struct davinci_spi_dma *davinci_spi_dma; | ||
394 | struct davinci_spi_platform_data *pdata; | ||
395 | struct device *sdev; | ||
396 | int r; | ||
397 | |||
398 | davinci_spi = spi_master_get_devdata(spi->master); | ||
399 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
400 | pdata = davinci_spi->pdata; | ||
401 | sdev = davinci_spi->bitbang.master->dev.parent; | ||
402 | |||
403 | r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev, | ||
404 | davinci_spi_dma_rx_callback, spi, | ||
405 | davinci_spi_dma->eventq); | ||
406 | if (r < 0) { | ||
407 | dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n"); | ||
408 | return -EAGAIN; | ||
409 | } | ||
410 | davinci_spi_dma->dma_rx_channel = r; | ||
411 | r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev, | ||
412 | davinci_spi_dma_tx_callback, spi, | ||
413 | davinci_spi_dma->eventq); | ||
414 | if (r < 0) { | ||
415 | edma_free_channel(davinci_spi_dma->dma_rx_channel); | ||
416 | davinci_spi_dma->dma_rx_channel = -1; | ||
417 | dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n"); | ||
418 | return -EAGAIN; | ||
419 | } | ||
420 | davinci_spi_dma->dma_tx_channel = r; | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | /** | ||
426 | * davinci_spi_setup - This functions will set default transfer method | ||
427 | * @spi: spi device on which data transfer to be done | ||
428 | * | ||
429 | * This functions sets the default transfer method. | ||
430 | */ | ||
431 | |||
432 | static int davinci_spi_setup(struct spi_device *spi) | ||
433 | { | ||
434 | int retval; | ||
435 | struct davinci_spi *davinci_spi; | ||
436 | struct davinci_spi_dma *davinci_spi_dma; | ||
437 | struct device *sdev; | ||
438 | |||
439 | davinci_spi = spi_master_get_devdata(spi->master); | ||
440 | sdev = davinci_spi->bitbang.master->dev.parent; | ||
441 | |||
442 | /* if bits per word length is zero then set it default 8 */ | ||
443 | if (!spi->bits_per_word) | ||
444 | spi->bits_per_word = 8; | ||
445 | |||
446 | davinci_spi->slave[spi->chip_select].cmd_to_write = 0; | ||
447 | |||
448 | if (use_dma && davinci_spi->dma_channels) { | ||
449 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
450 | |||
451 | if ((davinci_spi_dma->dma_rx_channel == -1) | ||
452 | || (davinci_spi_dma->dma_tx_channel == -1)) { | ||
453 | retval = davinci_spi_request_dma(spi); | ||
454 | if (retval < 0) | ||
455 | return retval; | ||
456 | } | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * SPI in DaVinci and DA8xx operate between | ||
461 | * 600 KHz and 50 MHz | ||
462 | */ | ||
463 | if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) { | ||
464 | dev_dbg(sdev, "Operating frequency is not in acceptable " | ||
465 | "range\n"); | ||
466 | return -EINVAL; | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * Set up SPIFMTn register, unique to this chipselect. | ||
471 | * | ||
472 | * NOTE: we could do all of these with one write. Also, some | ||
473 | * of the "version 2" features are found in chips that don't | ||
474 | * support all of them... | ||
475 | */ | ||
476 | if (spi->mode & SPI_LSB_FIRST) | ||
477 | set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, | ||
478 | spi->chip_select); | ||
479 | else | ||
480 | clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, | ||
481 | spi->chip_select); | ||
482 | |||
483 | if (spi->mode & SPI_CPOL) | ||
484 | set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, | ||
485 | spi->chip_select); | ||
486 | else | ||
487 | clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, | ||
488 | spi->chip_select); | ||
489 | |||
490 | if (!(spi->mode & SPI_CPHA)) | ||
491 | set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, | ||
492 | spi->chip_select); | ||
493 | else | ||
494 | clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, | ||
495 | spi->chip_select); | ||
496 | |||
497 | /* | ||
498 | * Version 1 hardware supports two basic SPI modes: | ||
499 | * - Standard SPI mode uses 4 pins, with chipselect | ||
500 | * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) | ||
501 | * (distinct from SPI_3WIRE, with just one data wire; | ||
502 | * or similar variants without MOSI or without MISO) | ||
503 | * | ||
504 | * Version 2 hardware supports an optional handshaking signal, | ||
505 | * so it can support two more modes: | ||
506 | * - 5 pin SPI variant is standard SPI plus SPI_READY | ||
507 | * - 4 pin with enable is (SPI_READY | SPI_NO_CS) | ||
508 | */ | ||
509 | |||
510 | if (davinci_spi->version == SPI_VERSION_2) { | ||
511 | clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK, | ||
512 | spi->chip_select); | ||
513 | set_fmt_bits(davinci_spi->base, | ||
514 | (davinci_spi->pdata->wdelay | ||
515 | << SPIFMT_WDELAY_SHIFT) | ||
516 | & SPIFMT_WDELAY_MASK, | ||
517 | spi->chip_select); | ||
518 | |||
519 | if (davinci_spi->pdata->odd_parity) | ||
520 | set_fmt_bits(davinci_spi->base, | ||
521 | SPIFMT_ODD_PARITY_MASK, | ||
522 | spi->chip_select); | ||
523 | else | ||
524 | clear_fmt_bits(davinci_spi->base, | ||
525 | SPIFMT_ODD_PARITY_MASK, | ||
526 | spi->chip_select); | ||
527 | |||
528 | if (davinci_spi->pdata->parity_enable) | ||
529 | set_fmt_bits(davinci_spi->base, | ||
530 | SPIFMT_PARITYENA_MASK, | ||
531 | spi->chip_select); | ||
532 | else | ||
533 | clear_fmt_bits(davinci_spi->base, | ||
534 | SPIFMT_PARITYENA_MASK, | ||
535 | spi->chip_select); | ||
536 | |||
537 | if (davinci_spi->pdata->wait_enable) | ||
538 | set_fmt_bits(davinci_spi->base, | ||
539 | SPIFMT_WAITENA_MASK, | ||
540 | spi->chip_select); | ||
541 | else | ||
542 | clear_fmt_bits(davinci_spi->base, | ||
543 | SPIFMT_WAITENA_MASK, | ||
544 | spi->chip_select); | ||
545 | |||
546 | if (davinci_spi->pdata->timer_disable) | ||
547 | set_fmt_bits(davinci_spi->base, | ||
548 | SPIFMT_DISTIMER_MASK, | ||
549 | spi->chip_select); | ||
550 | else | ||
551 | clear_fmt_bits(davinci_spi->base, | ||
552 | SPIFMT_DISTIMER_MASK, | ||
553 | spi->chip_select); | ||
554 | } | ||
555 | |||
556 | retval = davinci_spi_setup_transfer(spi, NULL); | ||
557 | |||
558 | return retval; | ||
559 | } | ||
560 | |||
561 | static void davinci_spi_cleanup(struct spi_device *spi) | ||
562 | { | ||
563 | struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); | ||
564 | struct davinci_spi_dma *davinci_spi_dma; | ||
565 | |||
566 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
567 | |||
568 | if (use_dma && davinci_spi->dma_channels) { | ||
569 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
570 | |||
571 | if ((davinci_spi_dma->dma_rx_channel != -1) | ||
572 | && (davinci_spi_dma->dma_tx_channel != -1)) { | ||
573 | edma_free_channel(davinci_spi_dma->dma_tx_channel); | ||
574 | edma_free_channel(davinci_spi_dma->dma_rx_channel); | ||
575 | } | ||
576 | } | ||
577 | } | ||
578 | |||
579 | static int davinci_spi_bufs_prep(struct spi_device *spi, | ||
580 | struct davinci_spi *davinci_spi) | ||
581 | { | ||
582 | int op_mode = 0; | ||
583 | |||
584 | /* | ||
585 | * REVISIT unless devices disagree about SPI_LOOP or | ||
586 | * SPI_READY (SPI_NO_CS only allows one device!), this | ||
587 | * should not need to be done before each message... | ||
588 | * optimize for both flags staying cleared. | ||
589 | */ | ||
590 | |||
591 | op_mode = SPIPC0_DIFUN_MASK | ||
592 | | SPIPC0_DOFUN_MASK | ||
593 | | SPIPC0_CLKFUN_MASK; | ||
594 | if (!(spi->mode & SPI_NO_CS)) | ||
595 | op_mode |= 1 << spi->chip_select; | ||
596 | if (spi->mode & SPI_READY) | ||
597 | op_mode |= SPIPC0_SPIENA_MASK; | ||
598 | |||
599 | iowrite32(op_mode, davinci_spi->base + SPIPC0); | ||
600 | |||
601 | if (spi->mode & SPI_LOOP) | ||
602 | set_io_bits(davinci_spi->base + SPIGCR1, | ||
603 | SPIGCR1_LOOPBACK_MASK); | ||
604 | else | ||
605 | clear_io_bits(davinci_spi->base + SPIGCR1, | ||
606 | SPIGCR1_LOOPBACK_MASK); | ||
607 | |||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | static int davinci_spi_check_error(struct davinci_spi *davinci_spi, | ||
612 | int int_status) | ||
613 | { | ||
614 | struct device *sdev = davinci_spi->bitbang.master->dev.parent; | ||
615 | |||
616 | if (int_status & SPIFLG_TIMEOUT_MASK) { | ||
617 | dev_dbg(sdev, "SPI Time-out Error\n"); | ||
618 | return -ETIMEDOUT; | ||
619 | } | ||
620 | if (int_status & SPIFLG_DESYNC_MASK) { | ||
621 | dev_dbg(sdev, "SPI Desynchronization Error\n"); | ||
622 | return -EIO; | ||
623 | } | ||
624 | if (int_status & SPIFLG_BITERR_MASK) { | ||
625 | dev_dbg(sdev, "SPI Bit error\n"); | ||
626 | return -EIO; | ||
627 | } | ||
628 | |||
629 | if (davinci_spi->version == SPI_VERSION_2) { | ||
630 | if (int_status & SPIFLG_DLEN_ERR_MASK) { | ||
631 | dev_dbg(sdev, "SPI Data Length Error\n"); | ||
632 | return -EIO; | ||
633 | } | ||
634 | if (int_status & SPIFLG_PARERR_MASK) { | ||
635 | dev_dbg(sdev, "SPI Parity Error\n"); | ||
636 | return -EIO; | ||
637 | } | ||
638 | if (int_status & SPIFLG_OVRRUN_MASK) { | ||
639 | dev_dbg(sdev, "SPI Data Overrun error\n"); | ||
640 | return -EIO; | ||
641 | } | ||
642 | if (int_status & SPIFLG_TX_INTR_MASK) { | ||
643 | dev_dbg(sdev, "SPI TX intr bit set\n"); | ||
644 | return -EIO; | ||
645 | } | ||
646 | if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { | ||
647 | dev_dbg(sdev, "SPI Buffer Init Active\n"); | ||
648 | return -EBUSY; | ||
649 | } | ||
650 | } | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | /** | ||
656 | * davinci_spi_bufs - functions which will handle transfer data | ||
657 | * @spi: spi device on which data transfer to be done | ||
658 | * @t: spi transfer in which transfer info is filled | ||
659 | * | ||
660 | * This function will put data to be transferred into data register | ||
661 | * of SPI controller and then wait until the completion will be marked | ||
662 | * by the IRQ Handler. | ||
663 | */ | ||
664 | static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t) | ||
665 | { | ||
666 | struct davinci_spi *davinci_spi; | ||
667 | int int_status, count, ret; | ||
668 | u8 conv, tmp; | ||
669 | u32 tx_data, data1_reg_val; | ||
670 | u32 buf_val, flg_val; | ||
671 | struct davinci_spi_platform_data *pdata; | ||
672 | |||
673 | davinci_spi = spi_master_get_devdata(spi->master); | ||
674 | pdata = davinci_spi->pdata; | ||
675 | |||
676 | davinci_spi->tx = t->tx_buf; | ||
677 | davinci_spi->rx = t->rx_buf; | ||
678 | |||
679 | /* convert len to words based on bits_per_word */ | ||
680 | conv = davinci_spi->slave[spi->chip_select].bytes_per_word; | ||
681 | davinci_spi->count = t->len / conv; | ||
682 | |||
683 | INIT_COMPLETION(davinci_spi->done); | ||
684 | |||
685 | ret = davinci_spi_bufs_prep(spi, davinci_spi); | ||
686 | if (ret) | ||
687 | return ret; | ||
688 | |||
689 | /* Enable SPI */ | ||
690 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
691 | |||
692 | iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | | ||
693 | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), | ||
694 | davinci_spi->base + SPIDELAY); | ||
695 | |||
696 | count = davinci_spi->count; | ||
697 | data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; | ||
698 | tmp = ~(0x1 << spi->chip_select); | ||
699 | |||
700 | clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); | ||
701 | |||
702 | data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; | ||
703 | |||
704 | while ((ioread32(davinci_spi->base + SPIBUF) | ||
705 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
706 | cpu_relax(); | ||
707 | |||
708 | /* Determine the command to execute READ or WRITE */ | ||
709 | if (t->tx_buf) { | ||
710 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); | ||
711 | |||
712 | while (1) { | ||
713 | tx_data = davinci_spi->get_tx(davinci_spi); | ||
714 | |||
715 | data1_reg_val &= ~(0xFFFF); | ||
716 | data1_reg_val |= (0xFFFF & tx_data); | ||
717 | |||
718 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
719 | if ((buf_val & SPIBUF_TXFULL_MASK) == 0) { | ||
720 | iowrite32(data1_reg_val, | ||
721 | davinci_spi->base + SPIDAT1); | ||
722 | |||
723 | count--; | ||
724 | } | ||
725 | while (ioread32(davinci_spi->base + SPIBUF) | ||
726 | & SPIBUF_RXEMPTY_MASK) | ||
727 | cpu_relax(); | ||
728 | |||
729 | /* getting the returned byte */ | ||
730 | if (t->rx_buf) { | ||
731 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
732 | davinci_spi->get_rx(buf_val, davinci_spi); | ||
733 | } | ||
734 | if (count <= 0) | ||
735 | break; | ||
736 | } | ||
737 | } else { | ||
738 | if (pdata->poll_mode) { | ||
739 | while (1) { | ||
740 | /* keeps the serial clock going */ | ||
741 | if ((ioread32(davinci_spi->base + SPIBUF) | ||
742 | & SPIBUF_TXFULL_MASK) == 0) | ||
743 | iowrite32(data1_reg_val, | ||
744 | davinci_spi->base + SPIDAT1); | ||
745 | |||
746 | while (ioread32(davinci_spi->base + SPIBUF) & | ||
747 | SPIBUF_RXEMPTY_MASK) | ||
748 | cpu_relax(); | ||
749 | |||
750 | flg_val = ioread32(davinci_spi->base + SPIFLG); | ||
751 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
752 | |||
753 | davinci_spi->get_rx(buf_val, davinci_spi); | ||
754 | |||
755 | count--; | ||
756 | if (count <= 0) | ||
757 | break; | ||
758 | } | ||
759 | } else { /* Receive in Interrupt mode */ | ||
760 | int i; | ||
761 | |||
762 | for (i = 0; i < davinci_spi->count; i++) { | ||
763 | set_io_bits(davinci_spi->base + SPIINT, | ||
764 | SPIINT_BITERR_INTR | ||
765 | | SPIINT_OVRRUN_INTR | ||
766 | | SPIINT_RX_INTR); | ||
767 | |||
768 | iowrite32(data1_reg_val, | ||
769 | davinci_spi->base + SPIDAT1); | ||
770 | |||
771 | while (ioread32(davinci_spi->base + SPIINT) & | ||
772 | SPIINT_RX_INTR) | ||
773 | cpu_relax(); | ||
774 | } | ||
775 | iowrite32((data1_reg_val & 0x0ffcffff), | ||
776 | davinci_spi->base + SPIDAT1); | ||
777 | } | ||
778 | } | ||
779 | |||
780 | /* | ||
781 | * Check for bit error, desync error,parity error,timeout error and | ||
782 | * receive overflow errors | ||
783 | */ | ||
784 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
785 | |||
786 | ret = davinci_spi_check_error(davinci_spi, int_status); | ||
787 | if (ret != 0) | ||
788 | return ret; | ||
789 | |||
790 | /* SPI Framework maintains the count only in bytes so convert back */ | ||
791 | davinci_spi->count *= conv; | ||
792 | |||
793 | return t->len; | ||
794 | } | ||
795 | |||
796 | #define DAVINCI_DMA_DATA_TYPE_S8 0x01 | ||
797 | #define DAVINCI_DMA_DATA_TYPE_S16 0x02 | ||
798 | #define DAVINCI_DMA_DATA_TYPE_S32 0x04 | ||
799 | |||
800 | static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | ||
801 | { | ||
802 | struct davinci_spi *davinci_spi; | ||
803 | int int_status = 0; | ||
804 | int count, temp_count; | ||
805 | u8 conv = 1; | ||
806 | u8 tmp; | ||
807 | u32 data1_reg_val; | ||
808 | struct davinci_spi_dma *davinci_spi_dma; | ||
809 | int word_len, data_type, ret; | ||
810 | unsigned long tx_reg, rx_reg; | ||
811 | struct davinci_spi_platform_data *pdata; | ||
812 | struct device *sdev; | ||
813 | |||
814 | davinci_spi = spi_master_get_devdata(spi->master); | ||
815 | pdata = davinci_spi->pdata; | ||
816 | sdev = davinci_spi->bitbang.master->dev.parent; | ||
817 | |||
818 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
819 | |||
820 | tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; | ||
821 | rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; | ||
822 | |||
823 | davinci_spi->tx = t->tx_buf; | ||
824 | davinci_spi->rx = t->rx_buf; | ||
825 | |||
826 | /* convert len to words based on bits_per_word */ | ||
827 | conv = davinci_spi->slave[spi->chip_select].bytes_per_word; | ||
828 | davinci_spi->count = t->len / conv; | ||
829 | |||
830 | INIT_COMPLETION(davinci_spi->done); | ||
831 | |||
832 | init_completion(&davinci_spi_dma->dma_rx_completion); | ||
833 | init_completion(&davinci_spi_dma->dma_tx_completion); | ||
834 | |||
835 | word_len = conv * 8; | ||
836 | |||
837 | if (word_len <= 8) | ||
838 | data_type = DAVINCI_DMA_DATA_TYPE_S8; | ||
839 | else if (word_len <= 16) | ||
840 | data_type = DAVINCI_DMA_DATA_TYPE_S16; | ||
841 | else if (word_len <= 32) | ||
842 | data_type = DAVINCI_DMA_DATA_TYPE_S32; | ||
843 | else | ||
844 | return -EINVAL; | ||
845 | |||
846 | ret = davinci_spi_bufs_prep(spi, davinci_spi); | ||
847 | if (ret) | ||
848 | return ret; | ||
849 | |||
850 | /* Put delay val if required */ | ||
851 | iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | | ||
852 | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), | ||
853 | davinci_spi->base + SPIDELAY); | ||
854 | |||
855 | count = davinci_spi->count; /* the number of elements */ | ||
856 | data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; | ||
857 | |||
858 | /* CS default = 0xFF */ | ||
859 | tmp = ~(0x1 << spi->chip_select); | ||
860 | |||
861 | clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); | ||
862 | |||
863 | data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; | ||
864 | |||
865 | /* disable all interrupts for dma transfers */ | ||
866 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); | ||
867 | /* Disable SPI to write configuration bits in SPIDAT */ | ||
868 | clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
869 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | ||
870 | /* Enable SPI */ | ||
871 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
872 | |||
873 | while ((ioread32(davinci_spi->base + SPIBUF) | ||
874 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
875 | cpu_relax(); | ||
876 | |||
877 | |||
878 | if (t->tx_buf) { | ||
879 | t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, | ||
880 | DMA_TO_DEVICE); | ||
881 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | ||
882 | dev_dbg(sdev, "Unable to DMA map a %d bytes" | ||
883 | " TX buffer\n", count); | ||
884 | return -ENOMEM; | ||
885 | } | ||
886 | temp_count = count; | ||
887 | } else { | ||
888 | /* We need TX clocking for RX transaction */ | ||
889 | t->tx_dma = dma_map_single(&spi->dev, | ||
890 | (void *)davinci_spi->tmp_buf, count + 1, | ||
891 | DMA_TO_DEVICE); | ||
892 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | ||
893 | dev_dbg(sdev, "Unable to DMA map a %d bytes" | ||
894 | " TX tmp buffer\n", count); | ||
895 | return -ENOMEM; | ||
896 | } | ||
897 | temp_count = count + 1; | ||
898 | } | ||
899 | |||
900 | edma_set_transfer_params(davinci_spi_dma->dma_tx_channel, | ||
901 | data_type, temp_count, 1, 0, ASYNC); | ||
902 | edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); | ||
903 | edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); | ||
904 | edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); | ||
905 | edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); | ||
906 | |||
907 | if (t->rx_buf) { | ||
908 | /* initiate transaction */ | ||
909 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | ||
910 | |||
911 | t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, | ||
912 | DMA_FROM_DEVICE); | ||
913 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { | ||
914 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", | ||
915 | count); | ||
916 | if (t->tx_buf != NULL) | ||
917 | dma_unmap_single(NULL, t->tx_dma, | ||
918 | count, DMA_TO_DEVICE); | ||
919 | return -ENOMEM; | ||
920 | } | ||
921 | edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, | ||
922 | data_type, count, 1, 0, ASYNC); | ||
923 | edma_set_src(davinci_spi_dma->dma_rx_channel, | ||
924 | rx_reg, INCR, W8BIT); | ||
925 | edma_set_dest(davinci_spi_dma->dma_rx_channel, | ||
926 | t->rx_dma, INCR, W8BIT); | ||
927 | edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); | ||
928 | edma_set_dest_index(davinci_spi_dma->dma_rx_channel, | ||
929 | data_type, 0); | ||
930 | } | ||
931 | |||
932 | if ((t->tx_buf) || (t->rx_buf)) | ||
933 | edma_start(davinci_spi_dma->dma_tx_channel); | ||
934 | |||
935 | if (t->rx_buf) | ||
936 | edma_start(davinci_spi_dma->dma_rx_channel); | ||
937 | |||
938 | if ((t->rx_buf) || (t->tx_buf)) | ||
939 | davinci_spi_set_dma_req(spi, 1); | ||
940 | |||
941 | if (t->tx_buf) | ||
942 | wait_for_completion_interruptible( | ||
943 | &davinci_spi_dma->dma_tx_completion); | ||
944 | |||
945 | if (t->rx_buf) | ||
946 | wait_for_completion_interruptible( | ||
947 | &davinci_spi_dma->dma_rx_completion); | ||
948 | |||
949 | dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE); | ||
950 | |||
951 | if (t->rx_buf) | ||
952 | dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); | ||
953 | |||
954 | /* | ||
955 | * Check for bit error, desync error,parity error,timeout error and | ||
956 | * receive overflow errors | ||
957 | */ | ||
958 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
959 | |||
960 | ret = davinci_spi_check_error(davinci_spi, int_status); | ||
961 | if (ret != 0) | ||
962 | return ret; | ||
963 | |||
964 | /* SPI Framework maintains the count only in bytes so convert back */ | ||
965 | davinci_spi->count *= conv; | ||
966 | |||
967 | return t->len; | ||
968 | } | ||
969 | |||
970 | /** | ||
971 | * davinci_spi_irq - IRQ handler for DaVinci SPI | ||
972 | * @irq: IRQ number for this SPI Master | ||
973 | * @context_data: structure for SPI Master controller davinci_spi | ||
974 | */ | ||
975 | static irqreturn_t davinci_spi_irq(s32 irq, void *context_data) | ||
976 | { | ||
977 | struct davinci_spi *davinci_spi = context_data; | ||
978 | u32 int_status, rx_data = 0; | ||
979 | irqreturn_t ret = IRQ_NONE; | ||
980 | |||
981 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
982 | |||
983 | while ((int_status & SPIFLG_RX_INTR_MASK)) { | ||
984 | if (likely(int_status & SPIFLG_RX_INTR_MASK)) { | ||
985 | ret = IRQ_HANDLED; | ||
986 | |||
987 | rx_data = ioread32(davinci_spi->base + SPIBUF); | ||
988 | davinci_spi->get_rx(rx_data, davinci_spi); | ||
989 | |||
990 | /* Disable Receive Interrupt */ | ||
991 | iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR), | ||
992 | davinci_spi->base + SPIINT); | ||
993 | } else | ||
994 | (void)davinci_spi_check_error(davinci_spi, int_status); | ||
995 | |||
996 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
997 | } | ||
998 | |||
999 | return ret; | ||
1000 | } | ||
1001 | |||
1002 | /** | ||
1003 | * davinci_spi_probe - probe function for SPI Master Controller | ||
1004 | * @pdev: platform_device structure which contains plateform specific data | ||
1005 | */ | ||
1006 | static int davinci_spi_probe(struct platform_device *pdev) | ||
1007 | { | ||
1008 | struct spi_master *master; | ||
1009 | struct davinci_spi *davinci_spi; | ||
1010 | struct davinci_spi_platform_data *pdata; | ||
1011 | struct resource *r, *mem; | ||
1012 | resource_size_t dma_rx_chan = SPI_NO_RESOURCE; | ||
1013 | resource_size_t dma_tx_chan = SPI_NO_RESOURCE; | ||
1014 | resource_size_t dma_eventq = SPI_NO_RESOURCE; | ||
1015 | int i = 0, ret = 0; | ||
1016 | |||
1017 | pdata = pdev->dev.platform_data; | ||
1018 | if (pdata == NULL) { | ||
1019 | ret = -ENODEV; | ||
1020 | goto err; | ||
1021 | } | ||
1022 | |||
1023 | master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi)); | ||
1024 | if (master == NULL) { | ||
1025 | ret = -ENOMEM; | ||
1026 | goto err; | ||
1027 | } | ||
1028 | |||
1029 | dev_set_drvdata(&pdev->dev, master); | ||
1030 | |||
1031 | davinci_spi = spi_master_get_devdata(master); | ||
1032 | if (davinci_spi == NULL) { | ||
1033 | ret = -ENOENT; | ||
1034 | goto free_master; | ||
1035 | } | ||
1036 | |||
1037 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1038 | if (r == NULL) { | ||
1039 | ret = -ENOENT; | ||
1040 | goto free_master; | ||
1041 | } | ||
1042 | |||
1043 | davinci_spi->pbase = r->start; | ||
1044 | davinci_spi->region_size = resource_size(r); | ||
1045 | davinci_spi->pdata = pdata; | ||
1046 | |||
1047 | mem = request_mem_region(r->start, davinci_spi->region_size, | ||
1048 | pdev->name); | ||
1049 | if (mem == NULL) { | ||
1050 | ret = -EBUSY; | ||
1051 | goto free_master; | ||
1052 | } | ||
1053 | |||
1054 | davinci_spi->base = (struct davinci_spi_reg __iomem *) | ||
1055 | ioremap(r->start, davinci_spi->region_size); | ||
1056 | if (davinci_spi->base == NULL) { | ||
1057 | ret = -ENOMEM; | ||
1058 | goto release_region; | ||
1059 | } | ||
1060 | |||
1061 | davinci_spi->irq = platform_get_irq(pdev, 0); | ||
1062 | if (davinci_spi->irq <= 0) { | ||
1063 | ret = -EINVAL; | ||
1064 | goto unmap_io; | ||
1065 | } | ||
1066 | |||
1067 | ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED, | ||
1068 | dev_name(&pdev->dev), davinci_spi); | ||
1069 | if (ret) | ||
1070 | goto unmap_io; | ||
1071 | |||
1072 | /* Allocate tmp_buf for tx_buf */ | ||
1073 | davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL); | ||
1074 | if (davinci_spi->tmp_buf == NULL) { | ||
1075 | ret = -ENOMEM; | ||
1076 | goto irq_free; | ||
1077 | } | ||
1078 | |||
1079 | davinci_spi->bitbang.master = spi_master_get(master); | ||
1080 | if (davinci_spi->bitbang.master == NULL) { | ||
1081 | ret = -ENODEV; | ||
1082 | goto free_tmp_buf; | ||
1083 | } | ||
1084 | |||
1085 | davinci_spi->clk = clk_get(&pdev->dev, NULL); | ||
1086 | if (IS_ERR(davinci_spi->clk)) { | ||
1087 | ret = -ENODEV; | ||
1088 | goto put_master; | ||
1089 | } | ||
1090 | clk_enable(davinci_spi->clk); | ||
1091 | |||
1092 | |||
1093 | master->bus_num = pdev->id; | ||
1094 | master->num_chipselect = pdata->num_chipselect; | ||
1095 | master->setup = davinci_spi_setup; | ||
1096 | master->cleanup = davinci_spi_cleanup; | ||
1097 | |||
1098 | davinci_spi->bitbang.chipselect = davinci_spi_chipselect; | ||
1099 | davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer; | ||
1100 | |||
1101 | davinci_spi->version = pdata->version; | ||
1102 | use_dma = pdata->use_dma; | ||
1103 | |||
1104 | davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; | ||
1105 | if (davinci_spi->version == SPI_VERSION_2) | ||
1106 | davinci_spi->bitbang.flags |= SPI_READY; | ||
1107 | |||
1108 | if (use_dma) { | ||
1109 | r = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
1110 | if (r) | ||
1111 | dma_rx_chan = r->start; | ||
1112 | r = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
1113 | if (r) | ||
1114 | dma_tx_chan = r->start; | ||
1115 | r = platform_get_resource(pdev, IORESOURCE_DMA, 2); | ||
1116 | if (r) | ||
1117 | dma_eventq = r->start; | ||
1118 | } | ||
1119 | |||
1120 | if (!use_dma || | ||
1121 | dma_rx_chan == SPI_NO_RESOURCE || | ||
1122 | dma_tx_chan == SPI_NO_RESOURCE || | ||
1123 | dma_eventq == SPI_NO_RESOURCE) { | ||
1124 | davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio; | ||
1125 | use_dma = 0; | ||
1126 | } else { | ||
1127 | davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma; | ||
1128 | davinci_spi->dma_channels = kzalloc(master->num_chipselect | ||
1129 | * sizeof(struct davinci_spi_dma), GFP_KERNEL); | ||
1130 | if (davinci_spi->dma_channels == NULL) { | ||
1131 | ret = -ENOMEM; | ||
1132 | goto free_clk; | ||
1133 | } | ||
1134 | |||
1135 | for (i = 0; i < master->num_chipselect; i++) { | ||
1136 | davinci_spi->dma_channels[i].dma_rx_channel = -1; | ||
1137 | davinci_spi->dma_channels[i].dma_rx_sync_dev = | ||
1138 | dma_rx_chan; | ||
1139 | davinci_spi->dma_channels[i].dma_tx_channel = -1; | ||
1140 | davinci_spi->dma_channels[i].dma_tx_sync_dev = | ||
1141 | dma_tx_chan; | ||
1142 | davinci_spi->dma_channels[i].eventq = dma_eventq; | ||
1143 | } | ||
1144 | dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n" | ||
1145 | "Using RX channel = %d , TX channel = %d and " | ||
1146 | "event queue = %d", dma_rx_chan, dma_tx_chan, | ||
1147 | dma_eventq); | ||
1148 | } | ||
1149 | |||
1150 | davinci_spi->get_rx = davinci_spi_rx_buf_u8; | ||
1151 | davinci_spi->get_tx = davinci_spi_tx_buf_u8; | ||
1152 | |||
1153 | init_completion(&davinci_spi->done); | ||
1154 | |||
1155 | /* Reset In/OUT SPI module */ | ||
1156 | iowrite32(0, davinci_spi->base + SPIGCR0); | ||
1157 | udelay(100); | ||
1158 | iowrite32(1, davinci_spi->base + SPIGCR0); | ||
1159 | |||
1160 | /* Clock internal */ | ||
1161 | if (davinci_spi->pdata->clk_internal) | ||
1162 | set_io_bits(davinci_spi->base + SPIGCR1, | ||
1163 | SPIGCR1_CLKMOD_MASK); | ||
1164 | else | ||
1165 | clear_io_bits(davinci_spi->base + SPIGCR1, | ||
1166 | SPIGCR1_CLKMOD_MASK); | ||
1167 | |||
1168 | /* master mode default */ | ||
1169 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK); | ||
1170 | |||
1171 | if (davinci_spi->pdata->intr_level) | ||
1172 | iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL); | ||
1173 | else | ||
1174 | iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL); | ||
1175 | |||
1176 | ret = spi_bitbang_start(&davinci_spi->bitbang); | ||
1177 | if (ret) | ||
1178 | goto free_clk; | ||
1179 | |||
1180 | dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base); | ||
1181 | |||
1182 | if (!pdata->poll_mode) | ||
1183 | dev_info(&pdev->dev, "Operating in interrupt mode" | ||
1184 | " using IRQ %d\n", davinci_spi->irq); | ||
1185 | |||
1186 | return ret; | ||
1187 | |||
1188 | free_clk: | ||
1189 | clk_disable(davinci_spi->clk); | ||
1190 | clk_put(davinci_spi->clk); | ||
1191 | put_master: | ||
1192 | spi_master_put(master); | ||
1193 | free_tmp_buf: | ||
1194 | kfree(davinci_spi->tmp_buf); | ||
1195 | irq_free: | ||
1196 | free_irq(davinci_spi->irq, davinci_spi); | ||
1197 | unmap_io: | ||
1198 | iounmap(davinci_spi->base); | ||
1199 | release_region: | ||
1200 | release_mem_region(davinci_spi->pbase, davinci_spi->region_size); | ||
1201 | free_master: | ||
1202 | kfree(master); | ||
1203 | err: | ||
1204 | return ret; | ||
1205 | } | ||
1206 | |||
1207 | /** | ||
1208 | * davinci_spi_remove - remove function for SPI Master Controller | ||
1209 | * @pdev: platform_device structure which contains plateform specific data | ||
1210 | * | ||
1211 | * This function will do the reverse action of davinci_spi_probe function | ||
1212 | * It will free the IRQ and SPI controller's memory region. | ||
1213 | * It will also call spi_bitbang_stop to destroy the work queue which was | ||
1214 | * created by spi_bitbang_start. | ||
1215 | */ | ||
1216 | static int __exit davinci_spi_remove(struct platform_device *pdev) | ||
1217 | { | ||
1218 | struct davinci_spi *davinci_spi; | ||
1219 | struct spi_master *master; | ||
1220 | |||
1221 | master = dev_get_drvdata(&pdev->dev); | ||
1222 | davinci_spi = spi_master_get_devdata(master); | ||
1223 | |||
1224 | spi_bitbang_stop(&davinci_spi->bitbang); | ||
1225 | |||
1226 | clk_disable(davinci_spi->clk); | ||
1227 | clk_put(davinci_spi->clk); | ||
1228 | spi_master_put(master); | ||
1229 | kfree(davinci_spi->tmp_buf); | ||
1230 | free_irq(davinci_spi->irq, davinci_spi); | ||
1231 | iounmap(davinci_spi->base); | ||
1232 | release_mem_region(davinci_spi->pbase, davinci_spi->region_size); | ||
1233 | |||
1234 | return 0; | ||
1235 | } | ||
1236 | |||
1237 | static struct platform_driver davinci_spi_driver = { | ||
1238 | .driver.name = "spi_davinci", | ||
1239 | .remove = __exit_p(davinci_spi_remove), | ||
1240 | }; | ||
1241 | |||
1242 | static int __init davinci_spi_init(void) | ||
1243 | { | ||
1244 | return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe); | ||
1245 | } | ||
1246 | module_init(davinci_spi_init); | ||
1247 | |||
1248 | static void __exit davinci_spi_exit(void) | ||
1249 | { | ||
1250 | platform_driver_unregister(&davinci_spi_driver); | ||
1251 | } | ||
1252 | module_exit(davinci_spi_exit); | ||
1253 | |||
1254 | MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver"); | ||
1255 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c index 31620fae77be..8ed38f1d6c18 100644 --- a/drivers/spi/dw_spi.c +++ b/drivers/spi/dw_spi.c | |||
@@ -152,6 +152,7 @@ static void mrst_spi_debugfs_remove(struct dw_spi *dws) | |||
152 | #else | 152 | #else |
153 | static inline int mrst_spi_debugfs_init(struct dw_spi *dws) | 153 | static inline int mrst_spi_debugfs_init(struct dw_spi *dws) |
154 | { | 154 | { |
155 | return 0; | ||
155 | } | 156 | } |
156 | 157 | ||
157 | static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) | 158 | static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) |
@@ -161,14 +162,14 @@ static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) | |||
161 | 162 | ||
162 | static void wait_till_not_busy(struct dw_spi *dws) | 163 | static void wait_till_not_busy(struct dw_spi *dws) |
163 | { | 164 | { |
164 | unsigned long end = jiffies + usecs_to_jiffies(1000); | 165 | unsigned long end = jiffies + 1 + usecs_to_jiffies(1000); |
165 | 166 | ||
166 | while (time_before(jiffies, end)) { | 167 | while (time_before(jiffies, end)) { |
167 | if (!(dw_readw(dws, sr) & SR_BUSY)) | 168 | if (!(dw_readw(dws, sr) & SR_BUSY)) |
168 | return; | 169 | return; |
169 | } | 170 | } |
170 | dev_err(&dws->master->dev, | 171 | dev_err(&dws->master->dev, |
171 | "DW SPI: Stutus keeps busy for 1000us after a read/write!\n"); | 172 | "DW SPI: Status keeps busy for 1000us after a read/write!\n"); |
172 | } | 173 | } |
173 | 174 | ||
174 | static void flush(struct dw_spi *dws) | 175 | static void flush(struct dw_spi *dws) |
@@ -358,6 +359,8 @@ static void transfer_complete(struct dw_spi *dws) | |||
358 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) | 359 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) |
359 | { | 360 | { |
360 | u16 irq_status, irq_mask = 0x3f; | 361 | u16 irq_status, irq_mask = 0x3f; |
362 | u32 int_level = dws->fifo_len / 2; | ||
363 | u32 left; | ||
361 | 364 | ||
362 | irq_status = dw_readw(dws, isr) & irq_mask; | 365 | irq_status = dw_readw(dws, isr) & irq_mask; |
363 | /* Error handling */ | 366 | /* Error handling */ |
@@ -369,22 +372,23 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws) | |||
369 | return IRQ_HANDLED; | 372 | return IRQ_HANDLED; |
370 | } | 373 | } |
371 | 374 | ||
372 | /* INT comes from tx */ | 375 | if (irq_status & SPI_INT_TXEI) { |
373 | if (dws->tx && (irq_status & SPI_INT_TXEI)) { | 376 | spi_mask_intr(dws, SPI_INT_TXEI); |
374 | while (dws->tx < dws->tx_end) | 377 | |
378 | left = (dws->tx_end - dws->tx) / dws->n_bytes; | ||
379 | left = (left > int_level) ? int_level : left; | ||
380 | |||
381 | while (left--) | ||
375 | dws->write(dws); | 382 | dws->write(dws); |
383 | dws->read(dws); | ||
376 | 384 | ||
377 | if (dws->tx == dws->tx_end) { | 385 | /* Re-enable the IRQ if there is still data left to tx */ |
378 | spi_mask_intr(dws, SPI_INT_TXEI); | 386 | if (dws->tx_end > dws->tx) |
387 | spi_umask_intr(dws, SPI_INT_TXEI); | ||
388 | else | ||
379 | transfer_complete(dws); | 389 | transfer_complete(dws); |
380 | } | ||
381 | } | 390 | } |
382 | 391 | ||
383 | /* INT comes from rx */ | ||
384 | if (dws->rx && (irq_status & SPI_INT_RXFI)) { | ||
385 | if (dws->read(dws)) | ||
386 | transfer_complete(dws); | ||
387 | } | ||
388 | return IRQ_HANDLED; | 392 | return IRQ_HANDLED; |
389 | } | 393 | } |
390 | 394 | ||
@@ -404,12 +408,9 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id) | |||
404 | /* Must be called inside pump_transfers() */ | 408 | /* Must be called inside pump_transfers() */ |
405 | static void poll_transfer(struct dw_spi *dws) | 409 | static void poll_transfer(struct dw_spi *dws) |
406 | { | 410 | { |
407 | if (dws->tx) { | 411 | while (dws->write(dws)) |
408 | while (dws->write(dws)) | 412 | dws->read(dws); |
409 | dws->read(dws); | ||
410 | } | ||
411 | 413 | ||
412 | dws->read(dws); | ||
413 | transfer_complete(dws); | 414 | transfer_complete(dws); |
414 | } | 415 | } |
415 | 416 | ||
@@ -428,6 +429,7 @@ static void pump_transfers(unsigned long data) | |||
428 | u8 bits = 0; | 429 | u8 bits = 0; |
429 | u8 imask = 0; | 430 | u8 imask = 0; |
430 | u8 cs_change = 0; | 431 | u8 cs_change = 0; |
432 | u16 txint_level = 0; | ||
431 | u16 clk_div = 0; | 433 | u16 clk_div = 0; |
432 | u32 speed = 0; | 434 | u32 speed = 0; |
433 | u32 cr0 = 0; | 435 | u32 cr0 = 0; |
@@ -438,6 +440,9 @@ static void pump_transfers(unsigned long data) | |||
438 | chip = dws->cur_chip; | 440 | chip = dws->cur_chip; |
439 | spi = message->spi; | 441 | spi = message->spi; |
440 | 442 | ||
443 | if (unlikely(!chip->clk_div)) | ||
444 | chip->clk_div = dws->max_freq / chip->speed_hz; | ||
445 | |||
441 | if (message->state == ERROR_STATE) { | 446 | if (message->state == ERROR_STATE) { |
442 | message->status = -EIO; | 447 | message->status = -EIO; |
443 | goto early_exit; | 448 | goto early_exit; |
@@ -492,7 +497,7 @@ static void pump_transfers(unsigned long data) | |||
492 | 497 | ||
493 | /* clk_div doesn't support odd number */ | 498 | /* clk_div doesn't support odd number */ |
494 | clk_div = dws->max_freq / speed; | 499 | clk_div = dws->max_freq / speed; |
495 | clk_div = (clk_div >> 1) << 1; | 500 | clk_div = (clk_div + 1) & 0xfffe; |
496 | 501 | ||
497 | chip->speed_hz = speed; | 502 | chip->speed_hz = speed; |
498 | chip->clk_div = clk_div; | 503 | chip->clk_div = clk_div; |
@@ -532,14 +537,35 @@ static void pump_transfers(unsigned long data) | |||
532 | } | 537 | } |
533 | message->state = RUNNING_STATE; | 538 | message->state = RUNNING_STATE; |
534 | 539 | ||
540 | /* | ||
541 | * Adjust transfer mode if necessary. Requires platform dependent | ||
542 | * chipselect mechanism. | ||
543 | */ | ||
544 | if (dws->cs_control) { | ||
545 | if (dws->rx && dws->tx) | ||
546 | chip->tmode = 0x00; | ||
547 | else if (dws->rx) | ||
548 | chip->tmode = 0x02; | ||
549 | else | ||
550 | chip->tmode = 0x01; | ||
551 | |||
552 | cr0 &= ~(0x3 << SPI_MODE_OFFSET); | ||
553 | cr0 |= (chip->tmode << SPI_TMOD_OFFSET); | ||
554 | } | ||
555 | |||
535 | /* Check if current transfer is a DMA transaction */ | 556 | /* Check if current transfer is a DMA transaction */ |
536 | dws->dma_mapped = map_dma_buffers(dws); | 557 | dws->dma_mapped = map_dma_buffers(dws); |
537 | 558 | ||
559 | /* | ||
560 | * Interrupt mode | ||
561 | * we only need set the TXEI IRQ, as TX/RX always happen syncronizely | ||
562 | */ | ||
538 | if (!dws->dma_mapped && !chip->poll_mode) { | 563 | if (!dws->dma_mapped && !chip->poll_mode) { |
539 | if (dws->rx) | 564 | int templen = dws->len / dws->n_bytes; |
540 | imask |= SPI_INT_RXFI; | 565 | txint_level = dws->fifo_len / 2; |
541 | if (dws->tx) | 566 | txint_level = (templen > txint_level) ? txint_level : templen; |
542 | imask |= SPI_INT_TXEI; | 567 | |
568 | imask |= SPI_INT_TXEI; | ||
543 | dws->transfer_handler = interrupt_transfer; | 569 | dws->transfer_handler = interrupt_transfer; |
544 | } | 570 | } |
545 | 571 | ||
@@ -549,21 +575,23 @@ static void pump_transfers(unsigned long data) | |||
549 | * 2. clk_div is changed | 575 | * 2. clk_div is changed |
550 | * 3. control value changes | 576 | * 3. control value changes |
551 | */ | 577 | */ |
552 | if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div) { | 578 | if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) { |
553 | spi_enable_chip(dws, 0); | 579 | spi_enable_chip(dws, 0); |
554 | 580 | ||
555 | if (dw_readw(dws, ctrl0) != cr0) | 581 | if (dw_readw(dws, ctrl0) != cr0) |
556 | dw_writew(dws, ctrl0, cr0); | 582 | dw_writew(dws, ctrl0, cr0); |
557 | 583 | ||
584 | spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); | ||
585 | spi_chip_sel(dws, spi->chip_select); | ||
586 | |||
558 | /* Set the interrupt mask, for poll mode just diable all int */ | 587 | /* Set the interrupt mask, for poll mode just diable all int */ |
559 | spi_mask_intr(dws, 0xff); | 588 | spi_mask_intr(dws, 0xff); |
560 | if (!chip->poll_mode) | 589 | if (imask) |
561 | spi_umask_intr(dws, imask); | 590 | spi_umask_intr(dws, imask); |
591 | if (txint_level) | ||
592 | dw_writew(dws, txfltr, txint_level); | ||
562 | 593 | ||
563 | spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); | ||
564 | spi_chip_sel(dws, spi->chip_select); | ||
565 | spi_enable_chip(dws, 1); | 594 | spi_enable_chip(dws, 1); |
566 | |||
567 | if (cs_change) | 595 | if (cs_change) |
568 | dws->prev_chip = chip; | 596 | dws->prev_chip = chip; |
569 | } | 597 | } |
@@ -712,11 +740,11 @@ static int dw_spi_setup(struct spi_device *spi) | |||
712 | } | 740 | } |
713 | chip->bits_per_word = spi->bits_per_word; | 741 | chip->bits_per_word = spi->bits_per_word; |
714 | 742 | ||
743 | if (!spi->max_speed_hz) { | ||
744 | dev_err(&spi->dev, "No max speed HZ parameter\n"); | ||
745 | return -EINVAL; | ||
746 | } | ||
715 | chip->speed_hz = spi->max_speed_hz; | 747 | chip->speed_hz = spi->max_speed_hz; |
716 | if (chip->speed_hz) | ||
717 | chip->clk_div = 25000000 / chip->speed_hz; | ||
718 | else | ||
719 | chip->clk_div = 8; /* default value */ | ||
720 | 748 | ||
721 | chip->tmode = 0; /* Tx & Rx */ | 749 | chip->tmode = 0; /* Tx & Rx */ |
722 | /* Default SPI mode is SCPOL = 0, SCPH = 0 */ | 750 | /* Default SPI mode is SCPOL = 0, SCPH = 0 */ |
@@ -735,7 +763,7 @@ static void dw_spi_cleanup(struct spi_device *spi) | |||
735 | kfree(chip); | 763 | kfree(chip); |
736 | } | 764 | } |
737 | 765 | ||
738 | static int __init init_queue(struct dw_spi *dws) | 766 | static int __devinit init_queue(struct dw_spi *dws) |
739 | { | 767 | { |
740 | INIT_LIST_HEAD(&dws->queue); | 768 | INIT_LIST_HEAD(&dws->queue); |
741 | spin_lock_init(&dws->lock); | 769 | spin_lock_init(&dws->lock); |
@@ -817,6 +845,22 @@ static void spi_hw_init(struct dw_spi *dws) | |||
817 | spi_mask_intr(dws, 0xff); | 845 | spi_mask_intr(dws, 0xff); |
818 | spi_enable_chip(dws, 1); | 846 | spi_enable_chip(dws, 1); |
819 | flush(dws); | 847 | flush(dws); |
848 | |||
849 | /* | ||
850 | * Try to detect the FIFO depth if not set by interface driver, | ||
851 | * the depth could be from 2 to 256 from HW spec | ||
852 | */ | ||
853 | if (!dws->fifo_len) { | ||
854 | u32 fifo; | ||
855 | for (fifo = 2; fifo <= 257; fifo++) { | ||
856 | dw_writew(dws, txfltr, fifo); | ||
857 | if (fifo != dw_readw(dws, txfltr)) | ||
858 | break; | ||
859 | } | ||
860 | |||
861 | dws->fifo_len = (fifo == 257) ? 0 : fifo; | ||
862 | dw_writew(dws, txfltr, 0); | ||
863 | } | ||
820 | } | 864 | } |
821 | 865 | ||
822 | int __devinit dw_spi_add_host(struct dw_spi *dws) | 866 | int __devinit dw_spi_add_host(struct dw_spi *dws) |
@@ -913,6 +957,7 @@ void __devexit dw_spi_remove_host(struct dw_spi *dws) | |||
913 | /* Disconnect from the SPI framework */ | 957 | /* Disconnect from the SPI framework */ |
914 | spi_unregister_master(dws->master); | 958 | spi_unregister_master(dws->master); |
915 | } | 959 | } |
960 | EXPORT_SYMBOL(dw_spi_remove_host); | ||
916 | 961 | ||
917 | int dw_spi_suspend_host(struct dw_spi *dws) | 962 | int dw_spi_suspend_host(struct dw_spi *dws) |
918 | { | 963 | { |
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c new file mode 100644 index 000000000000..e35b45ac5174 --- /dev/null +++ b/drivers/spi/dw_spi_mmio.c | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core | ||
3 | * | ||
4 | * Copyright (c) 2010, Octasic semiconductor. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/clk.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/spi/dw_spi.h> | ||
15 | #include <linux/spi/spi.h> | ||
16 | |||
17 | #define DRIVER_NAME "dw_spi_mmio" | ||
18 | |||
19 | struct dw_spi_mmio { | ||
20 | struct dw_spi dws; | ||
21 | struct clk *clk; | ||
22 | }; | ||
23 | |||
24 | static int __devinit dw_spi_mmio_probe(struct platform_device *pdev) | ||
25 | { | ||
26 | struct dw_spi_mmio *dwsmmio; | ||
27 | struct dw_spi *dws; | ||
28 | struct resource *mem, *ioarea; | ||
29 | int ret; | ||
30 | |||
31 | dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL); | ||
32 | if (!dwsmmio) { | ||
33 | ret = -ENOMEM; | ||
34 | goto err_end; | ||
35 | } | ||
36 | |||
37 | dws = &dwsmmio->dws; | ||
38 | |||
39 | /* Get basic io resource and map it */ | ||
40 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
41 | if (!mem) { | ||
42 | dev_err(&pdev->dev, "no mem resource?\n"); | ||
43 | ret = -EINVAL; | ||
44 | goto err_kfree; | ||
45 | } | ||
46 | |||
47 | ioarea = request_mem_region(mem->start, resource_size(mem), | ||
48 | pdev->name); | ||
49 | if (!ioarea) { | ||
50 | dev_err(&pdev->dev, "SPI region already claimed\n"); | ||
51 | ret = -EBUSY; | ||
52 | goto err_kfree; | ||
53 | } | ||
54 | |||
55 | dws->regs = ioremap_nocache(mem->start, resource_size(mem)); | ||
56 | if (!dws->regs) { | ||
57 | dev_err(&pdev->dev, "SPI region already mapped\n"); | ||
58 | ret = -ENOMEM; | ||
59 | goto err_release_reg; | ||
60 | } | ||
61 | |||
62 | dws->irq = platform_get_irq(pdev, 0); | ||
63 | if (dws->irq < 0) { | ||
64 | dev_err(&pdev->dev, "no irq resource?\n"); | ||
65 | ret = dws->irq; /* -ENXIO */ | ||
66 | goto err_unmap; | ||
67 | } | ||
68 | |||
69 | dwsmmio->clk = clk_get(&pdev->dev, NULL); | ||
70 | if (!dwsmmio->clk) { | ||
71 | ret = -ENODEV; | ||
72 | goto err_irq; | ||
73 | } | ||
74 | clk_enable(dwsmmio->clk); | ||
75 | |||
76 | dws->parent_dev = &pdev->dev; | ||
77 | dws->bus_num = 0; | ||
78 | dws->num_cs = 4; | ||
79 | dws->max_freq = clk_get_rate(dwsmmio->clk); | ||
80 | |||
81 | ret = dw_spi_add_host(dws); | ||
82 | if (ret) | ||
83 | goto err_clk; | ||
84 | |||
85 | platform_set_drvdata(pdev, dwsmmio); | ||
86 | return 0; | ||
87 | |||
88 | err_clk: | ||
89 | clk_disable(dwsmmio->clk); | ||
90 | clk_put(dwsmmio->clk); | ||
91 | dwsmmio->clk = NULL; | ||
92 | err_irq: | ||
93 | free_irq(dws->irq, dws); | ||
94 | err_unmap: | ||
95 | iounmap(dws->regs); | ||
96 | err_release_reg: | ||
97 | release_mem_region(mem->start, resource_size(mem)); | ||
98 | err_kfree: | ||
99 | kfree(dwsmmio); | ||
100 | err_end: | ||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | static int __devexit dw_spi_mmio_remove(struct platform_device *pdev) | ||
105 | { | ||
106 | struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev); | ||
107 | struct resource *mem; | ||
108 | |||
109 | platform_set_drvdata(pdev, NULL); | ||
110 | |||
111 | clk_disable(dwsmmio->clk); | ||
112 | clk_put(dwsmmio->clk); | ||
113 | dwsmmio->clk = NULL; | ||
114 | |||
115 | free_irq(dwsmmio->dws.irq, &dwsmmio->dws); | ||
116 | dw_spi_remove_host(&dwsmmio->dws); | ||
117 | iounmap(dwsmmio->dws.regs); | ||
118 | kfree(dwsmmio); | ||
119 | |||
120 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
121 | release_mem_region(mem->start, resource_size(mem)); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static struct platform_driver dw_spi_mmio_driver = { | ||
126 | .remove = __devexit_p(dw_spi_mmio_remove), | ||
127 | .driver = { | ||
128 | .name = DRIVER_NAME, | ||
129 | .owner = THIS_MODULE, | ||
130 | }, | ||
131 | }; | ||
132 | |||
133 | static int __init dw_spi_mmio_init(void) | ||
134 | { | ||
135 | return platform_driver_probe(&dw_spi_mmio_driver, dw_spi_mmio_probe); | ||
136 | } | ||
137 | module_init(dw_spi_mmio_init); | ||
138 | |||
139 | static void __exit dw_spi_mmio_exit(void) | ||
140 | { | ||
141 | platform_driver_unregister(&dw_spi_mmio_driver); | ||
142 | } | ||
143 | module_exit(dw_spi_mmio_exit); | ||
144 | |||
145 | MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>"); | ||
146 | MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core"); | ||
147 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c index 34ba69161734..1f0735f9cc76 100644 --- a/drivers/spi/dw_spi_pci.c +++ b/drivers/spi/dw_spi_pci.c | |||
@@ -73,6 +73,7 @@ static int __devinit spi_pci_probe(struct pci_dev *pdev, | |||
73 | dws->num_cs = 4; | 73 | dws->num_cs = 4; |
74 | dws->max_freq = 25000000; /* for Moorestwon */ | 74 | dws->max_freq = 25000000; /* for Moorestwon */ |
75 | dws->irq = pdev->irq; | 75 | dws->irq = pdev->irq; |
76 | dws->fifo_len = 40; /* FIFO has 40 words buffer */ | ||
76 | 77 | ||
77 | ret = dw_spi_add_host(dws); | 78 | ret = dw_spi_add_host(dws); |
78 | if (ret) | 79 | if (ret) |
@@ -98,6 +99,7 @@ static void __devexit spi_pci_remove(struct pci_dev *pdev) | |||
98 | struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); | 99 | struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); |
99 | 100 | ||
100 | pci_set_drvdata(pdev, NULL); | 101 | pci_set_drvdata(pdev, NULL); |
102 | dw_spi_remove_host(&dwpci->dws); | ||
101 | iounmap(dwpci->dws.regs); | 103 | iounmap(dwpci->dws.regs); |
102 | pci_release_region(pdev, 0); | 104 | pci_release_region(pdev, 0); |
103 | kfree(dwpci); | 105 | kfree(dwpci); |
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index f50c81df336a..04747868d6c4 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c | |||
@@ -503,7 +503,7 @@ static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op) | |||
503 | return mpc52xx_psc_spi_do_remove(&op->dev); | 503 | return mpc52xx_psc_spi_do_remove(&op->dev); |
504 | } | 504 | } |
505 | 505 | ||
506 | static struct of_device_id mpc52xx_psc_spi_of_match[] = { | 506 | static const struct of_device_id mpc52xx_psc_spi_of_match[] = { |
507 | { .compatible = "fsl,mpc5200-psc-spi", }, | 507 | { .compatible = "fsl,mpc5200-psc-spi", }, |
508 | { .compatible = "mpc5200-psc-spi", }, /* old */ | 508 | { .compatible = "mpc5200-psc-spi", }, /* old */ |
509 | {} | 509 | {} |
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c index 45bfe6458173..6eab46537a0a 100644 --- a/drivers/spi/mpc52xx_spi.c +++ b/drivers/spi/mpc52xx_spi.c | |||
@@ -550,7 +550,7 @@ static int __devexit mpc52xx_spi_remove(struct of_device *op) | |||
550 | return 0; | 550 | return 0; |
551 | } | 551 | } |
552 | 552 | ||
553 | static struct of_device_id mpc52xx_spi_match[] __devinitdata = { | 553 | static const struct of_device_id mpc52xx_spi_match[] __devinitconst = { |
554 | { .compatible = "fsl,mpc5200-spi", }, | 554 | { .compatible = "fsl,mpc5200-spi", }, |
555 | {} | 555 | {} |
556 | }; | 556 | }; |
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index 1893f1e96dc4..0ddbbe45e834 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c | |||
@@ -469,7 +469,7 @@ static int spi_imx_setup(struct spi_device *spi) | |||
469 | struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); | 469 | struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); |
470 | int gpio = spi_imx->chipselect[spi->chip_select]; | 470 | int gpio = spi_imx->chipselect[spi->chip_select]; |
471 | 471 | ||
472 | pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__, | 472 | dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, |
473 | spi->mode, spi->bits_per_word, spi->max_speed_hz); | 473 | spi->mode, spi->bits_per_word, spi->max_speed_hz); |
474 | 474 | ||
475 | if (gpio >= 0) | 475 | if (gpio >= 0) |
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c index 1fb2a6ea328c..4f0cc9d457e0 100644 --- a/drivers/spi/spi_mpc8xxx.c +++ b/drivers/spi/spi_mpc8xxx.c | |||
@@ -365,7 +365,7 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
365 | 365 | ||
366 | if ((mpc8xxx_spi->spibrg / hz) > 64) { | 366 | if ((mpc8xxx_spi->spibrg / hz) > 64) { |
367 | cs->hw_mode |= SPMODE_DIV16; | 367 | cs->hw_mode |= SPMODE_DIV16; |
368 | pm = mpc8xxx_spi->spibrg / (hz * 64); | 368 | pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; |
369 | 369 | ||
370 | WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " | 370 | WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " |
371 | "Will use %d Hz instead.\n", dev_name(&spi->dev), | 371 | "Will use %d Hz instead.\n", dev_name(&spi->dev), |
@@ -373,7 +373,7 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
373 | if (pm > 16) | 373 | if (pm > 16) |
374 | pm = 16; | 374 | pm = 16; |
375 | } else | 375 | } else |
376 | pm = mpc8xxx_spi->spibrg / (hz * 4); | 376 | pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; |
377 | if (pm) | 377 | if (pm) |
378 | pm--; | 378 | pm--; |
379 | 379 | ||
@@ -1328,7 +1328,7 @@ static struct of_platform_driver of_mpc8xxx_spi_driver = { | |||
1328 | static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) | 1328 | static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) |
1329 | { | 1329 | { |
1330 | struct resource *mem; | 1330 | struct resource *mem; |
1331 | unsigned int irq; | 1331 | int irq; |
1332 | struct spi_master *master; | 1332 | struct spi_master *master; |
1333 | 1333 | ||
1334 | if (!pdev->dev.platform_data) | 1334 | if (!pdev->dev.platform_data) |
@@ -1339,7 +1339,7 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) | |||
1339 | return -EINVAL; | 1339 | return -EINVAL; |
1340 | 1340 | ||
1341 | irq = platform_get_irq(pdev, 0); | 1341 | irq = platform_get_irq(pdev, 0); |
1342 | if (!irq) | 1342 | if (irq <= 0) |
1343 | return -EINVAL; | 1343 | return -EINVAL; |
1344 | 1344 | ||
1345 | master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); | 1345 | master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); |
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c index 140a18d6cf3e..6d8d4026a07a 100644 --- a/drivers/spi/spi_ppc4xx.c +++ b/drivers/spi/spi_ppc4xx.c | |||
@@ -578,7 +578,7 @@ static int __exit spi_ppc4xx_of_remove(struct of_device *op) | |||
578 | return 0; | 578 | return 0; |
579 | } | 579 | } |
580 | 580 | ||
581 | static struct of_device_id spi_ppc4xx_of_match[] = { | 581 | static const struct of_device_id spi_ppc4xx_of_match[] = { |
582 | { .compatible = "ibm,ppc4xx-spi", }, | 582 | { .compatible = "ibm,ppc4xx-spi", }, |
583 | {}, | 583 | {}, |
584 | }; | 584 | }; |
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c index 88a456dba967..97365815a729 100644 --- a/drivers/spi/spi_s3c64xx.c +++ b/drivers/spi/spi_s3c64xx.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/spi/spi.h> | 28 | #include <linux/spi/spi.h> |
29 | 29 | ||
30 | #include <mach/dma.h> | 30 | #include <mach/dma.h> |
31 | #include <plat/spi.h> | 31 | #include <plat/s3c64xx-spi.h> |
32 | 32 | ||
33 | /* Registers and bit-fields */ | 33 | /* Registers and bit-fields */ |
34 | 34 | ||
@@ -137,6 +137,7 @@ | |||
137 | /** | 137 | /** |
138 | * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. | 138 | * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. |
139 | * @clk: Pointer to the spi clock. | 139 | * @clk: Pointer to the spi clock. |
140 | * @src_clk: Pointer to the clock used to generate SPI signals. | ||
140 | * @master: Pointer to the SPI Protocol master. | 141 | * @master: Pointer to the SPI Protocol master. |
141 | * @workqueue: Work queue for the SPI xfer requests. | 142 | * @workqueue: Work queue for the SPI xfer requests. |
142 | * @cntrlr_info: Platform specific data for the controller this driver manages. | 143 | * @cntrlr_info: Platform specific data for the controller this driver manages. |
@@ -157,10 +158,11 @@ | |||
157 | struct s3c64xx_spi_driver_data { | 158 | struct s3c64xx_spi_driver_data { |
158 | void __iomem *regs; | 159 | void __iomem *regs; |
159 | struct clk *clk; | 160 | struct clk *clk; |
161 | struct clk *src_clk; | ||
160 | struct platform_device *pdev; | 162 | struct platform_device *pdev; |
161 | struct spi_master *master; | 163 | struct spi_master *master; |
162 | struct workqueue_struct *workqueue; | 164 | struct workqueue_struct *workqueue; |
163 | struct s3c64xx_spi_cntrlr_info *cntrlr_info; | 165 | struct s3c64xx_spi_info *cntrlr_info; |
164 | struct spi_device *tgl_spi; | 166 | struct spi_device *tgl_spi; |
165 | struct work_struct work; | 167 | struct work_struct work; |
166 | struct list_head queue; | 168 | struct list_head queue; |
@@ -180,7 +182,7 @@ static struct s3c2410_dma_client s3c64xx_spi_dma_client = { | |||
180 | 182 | ||
181 | static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) | 183 | static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) |
182 | { | 184 | { |
183 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 185 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
184 | void __iomem *regs = sdd->regs; | 186 | void __iomem *regs = sdd->regs; |
185 | unsigned long loops; | 187 | unsigned long loops; |
186 | u32 val; | 188 | u32 val; |
@@ -225,7 +227,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
225 | struct spi_device *spi, | 227 | struct spi_device *spi, |
226 | struct spi_transfer *xfer, int dma_mode) | 228 | struct spi_transfer *xfer, int dma_mode) |
227 | { | 229 | { |
228 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 230 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
229 | void __iomem *regs = sdd->regs; | 231 | void __iomem *regs = sdd->regs; |
230 | u32 modecfg, chcfg; | 232 | u32 modecfg, chcfg; |
231 | 233 | ||
@@ -298,19 +300,20 @@ static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, | |||
298 | if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ | 300 | if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ |
299 | /* Deselect the last toggled device */ | 301 | /* Deselect the last toggled device */ |
300 | cs = sdd->tgl_spi->controller_data; | 302 | cs = sdd->tgl_spi->controller_data; |
301 | cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1); | 303 | cs->set_level(cs->line, |
304 | spi->mode & SPI_CS_HIGH ? 0 : 1); | ||
302 | } | 305 | } |
303 | sdd->tgl_spi = NULL; | 306 | sdd->tgl_spi = NULL; |
304 | } | 307 | } |
305 | 308 | ||
306 | cs = spi->controller_data; | 309 | cs = spi->controller_data; |
307 | cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0); | 310 | cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); |
308 | } | 311 | } |
309 | 312 | ||
310 | static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, | 313 | static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, |
311 | struct spi_transfer *xfer, int dma_mode) | 314 | struct spi_transfer *xfer, int dma_mode) |
312 | { | 315 | { |
313 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 316 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
314 | void __iomem *regs = sdd->regs; | 317 | void __iomem *regs = sdd->regs; |
315 | unsigned long val; | 318 | unsigned long val; |
316 | int ms; | 319 | int ms; |
@@ -384,12 +387,11 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, | |||
384 | if (sdd->tgl_spi == spi) | 387 | if (sdd->tgl_spi == spi) |
385 | sdd->tgl_spi = NULL; | 388 | sdd->tgl_spi = NULL; |
386 | 389 | ||
387 | cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1); | 390 | cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); |
388 | } | 391 | } |
389 | 392 | ||
390 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | 393 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) |
391 | { | 394 | { |
392 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | ||
393 | void __iomem *regs = sdd->regs; | 395 | void __iomem *regs = sdd->regs; |
394 | u32 val; | 396 | u32 val; |
395 | 397 | ||
@@ -435,7 +437,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | |||
435 | /* Configure Clock */ | 437 | /* Configure Clock */ |
436 | val = readl(regs + S3C64XX_SPI_CLK_CFG); | 438 | val = readl(regs + S3C64XX_SPI_CLK_CFG); |
437 | val &= ~S3C64XX_SPI_PSR_MASK; | 439 | val &= ~S3C64XX_SPI_PSR_MASK; |
438 | val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1) | 440 | val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) |
439 | & S3C64XX_SPI_PSR_MASK); | 441 | & S3C64XX_SPI_PSR_MASK); |
440 | writel(val, regs + S3C64XX_SPI_CLK_CFG); | 442 | writel(val, regs + S3C64XX_SPI_CLK_CFG); |
441 | 443 | ||
@@ -558,7 +560,7 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, | |||
558 | static void handle_msg(struct s3c64xx_spi_driver_data *sdd, | 560 | static void handle_msg(struct s3c64xx_spi_driver_data *sdd, |
559 | struct spi_message *msg) | 561 | struct spi_message *msg) |
560 | { | 562 | { |
561 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 563 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
562 | struct spi_device *spi = msg->spi; | 564 | struct spi_device *spi = msg->spi; |
563 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; | 565 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; |
564 | struct spi_transfer *xfer; | 566 | struct spi_transfer *xfer; |
@@ -632,8 +634,8 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd, | |||
632 | S3C64XX_SPI_DEACT(sdd); | 634 | S3C64XX_SPI_DEACT(sdd); |
633 | 635 | ||
634 | if (status) { | 636 | if (status) { |
635 | dev_err(&spi->dev, "I/O Error: \ | 637 | dev_err(&spi->dev, "I/O Error: " |
636 | rx-%d tx-%d res:rx-%c tx-%c len-%d\n", | 638 | "rx-%d tx-%d res:rx-%c tx-%c len-%d\n", |
637 | xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, | 639 | xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, |
638 | (sdd->state & RXBUSY) ? 'f' : 'p', | 640 | (sdd->state & RXBUSY) ? 'f' : 'p', |
639 | (sdd->state & TXBUSY) ? 'f' : 'p', | 641 | (sdd->state & TXBUSY) ? 'f' : 'p', |
@@ -786,7 +788,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
786 | { | 788 | { |
787 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; | 789 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; |
788 | struct s3c64xx_spi_driver_data *sdd; | 790 | struct s3c64xx_spi_driver_data *sdd; |
789 | struct s3c64xx_spi_cntrlr_info *sci; | 791 | struct s3c64xx_spi_info *sci; |
790 | struct spi_message *msg; | 792 | struct spi_message *msg; |
791 | u32 psr, speed; | 793 | u32 psr, speed; |
792 | unsigned long flags; | 794 | unsigned long flags; |
@@ -831,17 +833,17 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
831 | } | 833 | } |
832 | 834 | ||
833 | /* Check if we can provide the requested rate */ | 835 | /* Check if we can provide the requested rate */ |
834 | speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */ | 836 | speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */ |
835 | 837 | ||
836 | if (spi->max_speed_hz > speed) | 838 | if (spi->max_speed_hz > speed) |
837 | spi->max_speed_hz = speed; | 839 | spi->max_speed_hz = speed; |
838 | 840 | ||
839 | psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1; | 841 | psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; |
840 | psr &= S3C64XX_SPI_PSR_MASK; | 842 | psr &= S3C64XX_SPI_PSR_MASK; |
841 | if (psr == S3C64XX_SPI_PSR_MASK) | 843 | if (psr == S3C64XX_SPI_PSR_MASK) |
842 | psr--; | 844 | psr--; |
843 | 845 | ||
844 | speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1); | 846 | speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); |
845 | if (spi->max_speed_hz < speed) { | 847 | if (spi->max_speed_hz < speed) { |
846 | if (psr+1 < S3C64XX_SPI_PSR_MASK) { | 848 | if (psr+1 < S3C64XX_SPI_PSR_MASK) { |
847 | psr++; | 849 | psr++; |
@@ -851,7 +853,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
851 | } | 853 | } |
852 | } | 854 | } |
853 | 855 | ||
854 | speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1); | 856 | speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); |
855 | if (spi->max_speed_hz >= speed) | 857 | if (spi->max_speed_hz >= speed) |
856 | spi->max_speed_hz = speed; | 858 | spi->max_speed_hz = speed; |
857 | else | 859 | else |
@@ -867,7 +869,7 @@ setup_exit: | |||
867 | 869 | ||
868 | static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) | 870 | static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) |
869 | { | 871 | { |
870 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 872 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
871 | void __iomem *regs = sdd->regs; | 873 | void __iomem *regs = sdd->regs; |
872 | unsigned int val; | 874 | unsigned int val; |
873 | 875 | ||
@@ -902,7 +904,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
902 | { | 904 | { |
903 | struct resource *mem_res, *dmatx_res, *dmarx_res; | 905 | struct resource *mem_res, *dmatx_res, *dmarx_res; |
904 | struct s3c64xx_spi_driver_data *sdd; | 906 | struct s3c64xx_spi_driver_data *sdd; |
905 | struct s3c64xx_spi_cntrlr_info *sci; | 907 | struct s3c64xx_spi_info *sci; |
906 | struct spi_master *master; | 908 | struct spi_master *master; |
907 | int ret; | 909 | int ret; |
908 | 910 | ||
@@ -1000,18 +1002,15 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
1000 | goto err4; | 1002 | goto err4; |
1001 | } | 1003 | } |
1002 | 1004 | ||
1003 | if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK) | 1005 | sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name); |
1004 | sci->src_clk = sdd->clk; | 1006 | if (IS_ERR(sdd->src_clk)) { |
1005 | else | ||
1006 | sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name); | ||
1007 | if (IS_ERR(sci->src_clk)) { | ||
1008 | dev_err(&pdev->dev, | 1007 | dev_err(&pdev->dev, |
1009 | "Unable to acquire clock '%s'\n", sci->src_clk_name); | 1008 | "Unable to acquire clock '%s'\n", sci->src_clk_name); |
1010 | ret = PTR_ERR(sci->src_clk); | 1009 | ret = PTR_ERR(sdd->src_clk); |
1011 | goto err5; | 1010 | goto err5; |
1012 | } | 1011 | } |
1013 | 1012 | ||
1014 | if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) { | 1013 | if (clk_enable(sdd->src_clk)) { |
1015 | dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", | 1014 | dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", |
1016 | sci->src_clk_name); | 1015 | sci->src_clk_name); |
1017 | ret = -EBUSY; | 1016 | ret = -EBUSY; |
@@ -1040,11 +1039,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
1040 | goto err8; | 1039 | goto err8; |
1041 | } | 1040 | } |
1042 | 1041 | ||
1043 | dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \ | 1042 | dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d " |
1044 | with %d Slaves attached\n", | 1043 | "with %d Slaves attached\n", |
1045 | pdev->id, master->num_chipselect); | 1044 | pdev->id, master->num_chipselect); |
1046 | dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\ | 1045 | dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", |
1047 | \tDMA=[Rx-%d, Tx-%d]\n", | ||
1048 | mem_res->end, mem_res->start, | 1046 | mem_res->end, mem_res->start, |
1049 | sdd->rx_dmach, sdd->tx_dmach); | 1047 | sdd->rx_dmach, sdd->tx_dmach); |
1050 | 1048 | ||
@@ -1053,11 +1051,9 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
1053 | err8: | 1051 | err8: |
1054 | destroy_workqueue(sdd->workqueue); | 1052 | destroy_workqueue(sdd->workqueue); |
1055 | err7: | 1053 | err7: |
1056 | if (sci->src_clk != sdd->clk) | 1054 | clk_disable(sdd->src_clk); |
1057 | clk_disable(sci->src_clk); | ||
1058 | err6: | 1055 | err6: |
1059 | if (sci->src_clk != sdd->clk) | 1056 | clk_put(sdd->src_clk); |
1060 | clk_put(sci->src_clk); | ||
1061 | err5: | 1057 | err5: |
1062 | clk_disable(sdd->clk); | 1058 | clk_disable(sdd->clk); |
1063 | err4: | 1059 | err4: |
@@ -1078,7 +1074,6 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) | |||
1078 | { | 1074 | { |
1079 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | 1075 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); |
1080 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); | 1076 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); |
1081 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | ||
1082 | struct resource *mem_res; | 1077 | struct resource *mem_res; |
1083 | unsigned long flags; | 1078 | unsigned long flags; |
1084 | 1079 | ||
@@ -1093,11 +1088,8 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) | |||
1093 | 1088 | ||
1094 | destroy_workqueue(sdd->workqueue); | 1089 | destroy_workqueue(sdd->workqueue); |
1095 | 1090 | ||
1096 | if (sci->src_clk != sdd->clk) | 1091 | clk_disable(sdd->src_clk); |
1097 | clk_disable(sci->src_clk); | 1092 | clk_put(sdd->src_clk); |
1098 | |||
1099 | if (sci->src_clk != sdd->clk) | ||
1100 | clk_put(sci->src_clk); | ||
1101 | 1093 | ||
1102 | clk_disable(sdd->clk); | 1094 | clk_disable(sdd->clk); |
1103 | clk_put(sdd->clk); | 1095 | clk_put(sdd->clk); |
@@ -1105,7 +1097,8 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) | |||
1105 | iounmap((void *) sdd->regs); | 1097 | iounmap((void *) sdd->regs); |
1106 | 1098 | ||
1107 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1099 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1108 | release_mem_region(mem_res->start, resource_size(mem_res)); | 1100 | if (mem_res != NULL) |
1101 | release_mem_region(mem_res->start, resource_size(mem_res)); | ||
1109 | 1102 | ||
1110 | platform_set_drvdata(pdev, NULL); | 1103 | platform_set_drvdata(pdev, NULL); |
1111 | spi_master_put(master); | 1104 | spi_master_put(master); |
@@ -1118,8 +1111,6 @@ static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state) | |||
1118 | { | 1111 | { |
1119 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | 1112 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); |
1120 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); | 1113 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); |
1121 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | ||
1122 | struct s3c64xx_spi_csinfo *cs; | ||
1123 | unsigned long flags; | 1114 | unsigned long flags; |
1124 | 1115 | ||
1125 | spin_lock_irqsave(&sdd->lock, flags); | 1116 | spin_lock_irqsave(&sdd->lock, flags); |
@@ -1130,9 +1121,7 @@ static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state) | |||
1130 | msleep(10); | 1121 | msleep(10); |
1131 | 1122 | ||
1132 | /* Disable the clock */ | 1123 | /* Disable the clock */ |
1133 | if (sci->src_clk != sdd->clk) | 1124 | clk_disable(sdd->src_clk); |
1134 | clk_disable(sci->src_clk); | ||
1135 | |||
1136 | clk_disable(sdd->clk); | 1125 | clk_disable(sdd->clk); |
1137 | 1126 | ||
1138 | sdd->cur_speed = 0; /* Output Clock is stopped */ | 1127 | sdd->cur_speed = 0; /* Output Clock is stopped */ |
@@ -1144,15 +1133,13 @@ static int s3c64xx_spi_resume(struct platform_device *pdev) | |||
1144 | { | 1133 | { |
1145 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | 1134 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); |
1146 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); | 1135 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); |
1147 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 1136 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
1148 | unsigned long flags; | 1137 | unsigned long flags; |
1149 | 1138 | ||
1150 | sci->cfg_gpio(pdev); | 1139 | sci->cfg_gpio(pdev); |
1151 | 1140 | ||
1152 | /* Enable the clock */ | 1141 | /* Enable the clock */ |
1153 | if (sci->src_clk != sdd->clk) | 1142 | clk_enable(sdd->src_clk); |
1154 | clk_enable(sci->src_clk); | ||
1155 | |||
1156 | clk_enable(sdd->clk); | 1143 | clk_enable(sdd->clk); |
1157 | 1144 | ||
1158 | s3c64xx_spi_hwinit(sdd, pdev->id); | 1145 | s3c64xx_spi_hwinit(sdd, pdev->id); |
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c index 30973ec16a93..d93b66743ba7 100644 --- a/drivers/spi/spi_sh_msiof.c +++ b/drivers/spi/spi_sh_msiof.c | |||
@@ -20,12 +20,12 @@ | |||
20 | #include <linux/bitmap.h> | 20 | #include <linux/bitmap.h> |
21 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/err.h> | ||
23 | 24 | ||
24 | #include <linux/spi/spi.h> | 25 | #include <linux/spi/spi.h> |
25 | #include <linux/spi/spi_bitbang.h> | 26 | #include <linux/spi/spi_bitbang.h> |
26 | #include <linux/spi/sh_msiof.h> | 27 | #include <linux/spi/sh_msiof.h> |
27 | 28 | ||
28 | #include <asm/spi.h> | ||
29 | #include <asm/unaligned.h> | 29 | #include <asm/unaligned.h> |
30 | 30 | ||
31 | struct sh_msiof_spi_priv { | 31 | struct sh_msiof_spi_priv { |
diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi_stmp.c index 2552bb364005..fadff76eb7e0 100644 --- a/drivers/spi/spi_stmp.c +++ b/drivers/spi/spi_stmp.c | |||
@@ -76,7 +76,7 @@ struct stmp_spi { | |||
76 | break; \ | 76 | break; \ |
77 | } \ | 77 | } \ |
78 | cpu_relax(); \ | 78 | cpu_relax(); \ |
79 | } while (time_before(end_jiffies, jiffies)); \ | 79 | } while (time_before(jiffies, end_jiffies)); \ |
80 | succeeded; \ | 80 | succeeded; \ |
81 | }) | 81 | }) |
82 | 82 | ||
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 9f386379c169..1b47363cb73f 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c | |||
@@ -93,6 +93,26 @@ struct xilinx_spi { | |||
93 | void (*rx_fn) (struct xilinx_spi *); | 93 | void (*rx_fn) (struct xilinx_spi *); |
94 | }; | 94 | }; |
95 | 95 | ||
96 | static void xspi_write32(u32 val, void __iomem *addr) | ||
97 | { | ||
98 | iowrite32(val, addr); | ||
99 | } | ||
100 | |||
101 | static unsigned int xspi_read32(void __iomem *addr) | ||
102 | { | ||
103 | return ioread32(addr); | ||
104 | } | ||
105 | |||
106 | static void xspi_write32_be(u32 val, void __iomem *addr) | ||
107 | { | ||
108 | iowrite32be(val, addr); | ||
109 | } | ||
110 | |||
111 | static unsigned int xspi_read32_be(void __iomem *addr) | ||
112 | { | ||
113 | return ioread32be(addr); | ||
114 | } | ||
115 | |||
96 | static void xspi_tx8(struct xilinx_spi *xspi) | 116 | static void xspi_tx8(struct xilinx_spi *xspi) |
97 | { | 117 | { |
98 | xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); | 118 | xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); |
@@ -374,11 +394,11 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, | |||
374 | xspi->mem = *mem; | 394 | xspi->mem = *mem; |
375 | xspi->irq = irq; | 395 | xspi->irq = irq; |
376 | if (pdata->little_endian) { | 396 | if (pdata->little_endian) { |
377 | xspi->read_fn = ioread32; | 397 | xspi->read_fn = xspi_read32; |
378 | xspi->write_fn = iowrite32; | 398 | xspi->write_fn = xspi_write32; |
379 | } else { | 399 | } else { |
380 | xspi->read_fn = ioread32be; | 400 | xspi->read_fn = xspi_read32_be; |
381 | xspi->write_fn = iowrite32be; | 401 | xspi->write_fn = xspi_write32_be; |
382 | } | 402 | } |
383 | xspi->bits_per_word = pdata->bits_per_word; | 403 | xspi->bits_per_word = pdata->bits_per_word; |
384 | if (xspi->bits_per_word == 8) { | 404 | if (xspi->bits_per_word == 8) { |
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c index 71dc3adc0495..ed34a8d419c7 100644 --- a/drivers/spi/xilinx_spi_of.c +++ b/drivers/spi/xilinx_spi_of.c | |||
@@ -99,7 +99,7 @@ static int __exit xilinx_spi_of_remove(struct of_device *op) | |||
99 | return xilinx_spi_remove(op); | 99 | return xilinx_spi_remove(op); |
100 | } | 100 | } |
101 | 101 | ||
102 | static struct of_device_id xilinx_spi_of_match[] = { | 102 | static const struct of_device_id xilinx_spi_of_match[] = { |
103 | { .compatible = "xlnx,xps-spi-2.00.a", }, | 103 | { .compatible = "xlnx,xps-spi-2.00.a", }, |
104 | { .compatible = "xlnx,xps-spi-2.00.b", }, | 104 | { .compatible = "xlnx,xps-spi-2.00.b", }, |
105 | {} | 105 | {} |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index e9f995486ec1..bbeeb92a2131 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
@@ -78,7 +78,7 @@ MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); | |||
78 | MODULE_DESCRIPTION("USB Mass Storage driver for Linux"); | 78 | MODULE_DESCRIPTION("USB Mass Storage driver for Linux"); |
79 | MODULE_LICENSE("GPL"); | 79 | MODULE_LICENSE("GPL"); |
80 | 80 | ||
81 | static unsigned int delay_use = 5; | 81 | static unsigned int delay_use = 1; |
82 | module_param(delay_use, uint, S_IRUGO | S_IWUSR); | 82 | module_param(delay_use, uint, S_IRUGO | S_IWUSR); |
83 | MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device"); | 83 | MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device"); |
84 | 84 | ||
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index e4e4d433b007..9ee67d6da710 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c | |||
@@ -1931,22 +1931,22 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i | |||
1931 | * PowerMac2,2 summer 2000 iMacs | 1931 | * PowerMac2,2 summer 2000 iMacs |
1932 | * PowerMac4,1 january 2001 iMacs "flower power" | 1932 | * PowerMac4,1 january 2001 iMacs "flower power" |
1933 | */ | 1933 | */ |
1934 | if (machine_is_compatible("PowerMac2,1") || | 1934 | if (of_machine_is_compatible("PowerMac2,1") || |
1935 | machine_is_compatible("PowerMac2,2") || | 1935 | of_machine_is_compatible("PowerMac2,2") || |
1936 | machine_is_compatible("PowerMac4,1")) | 1936 | of_machine_is_compatible("PowerMac4,1")) |
1937 | default_vmode = VMODE_1024_768_75; | 1937 | default_vmode = VMODE_1024_768_75; |
1938 | 1938 | ||
1939 | /* iBook SE */ | 1939 | /* iBook SE */ |
1940 | if (machine_is_compatible("PowerBook2,2")) | 1940 | if (of_machine_is_compatible("PowerBook2,2")) |
1941 | default_vmode = VMODE_800_600_60; | 1941 | default_vmode = VMODE_800_600_60; |
1942 | 1942 | ||
1943 | /* PowerBook Firewire (Pismo), iBook Dual USB */ | 1943 | /* PowerBook Firewire (Pismo), iBook Dual USB */ |
1944 | if (machine_is_compatible("PowerBook3,1") || | 1944 | if (of_machine_is_compatible("PowerBook3,1") || |
1945 | machine_is_compatible("PowerBook4,1")) | 1945 | of_machine_is_compatible("PowerBook4,1")) |
1946 | default_vmode = VMODE_1024_768_60; | 1946 | default_vmode = VMODE_1024_768_60; |
1947 | 1947 | ||
1948 | /* PowerBook Titanium */ | 1948 | /* PowerBook Titanium */ |
1949 | if (machine_is_compatible("PowerBook3,2")) | 1949 | if (of_machine_is_compatible("PowerBook3,2")) |
1950 | default_vmode = VMODE_1152_768_60; | 1950 | default_vmode = VMODE_1152_768_60; |
1951 | 1951 | ||
1952 | if (default_cmode > 16) | 1952 | if (default_cmode > 16) |
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index 1ddeb4c34763..e45ab8db2ddc 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c | |||
@@ -2439,7 +2439,7 @@ static int __devinit aty_init(struct fb_info *info) | |||
2439 | * The Apple iBook1 uses non-standard memory frequencies. | 2439 | * The Apple iBook1 uses non-standard memory frequencies. |
2440 | * We detect it and set the frequency manually. | 2440 | * We detect it and set the frequency manually. |
2441 | */ | 2441 | */ |
2442 | if (machine_is_compatible("PowerBook2,1")) { | 2442 | if (of_machine_is_compatible("PowerBook2,1")) { |
2443 | par->pll_limits.mclk = 70; | 2443 | par->pll_limits.mclk = 70; |
2444 | par->pll_limits.xclk = 53; | 2444 | par->pll_limits.xclk = 53; |
2445 | } | 2445 | } |
@@ -2659,7 +2659,7 @@ static int __devinit aty_init(struct fb_info *info) | |||
2659 | FBINFO_HWACCEL_YPAN; | 2659 | FBINFO_HWACCEL_YPAN; |
2660 | 2660 | ||
2661 | #ifdef CONFIG_PMAC_BACKLIGHT | 2661 | #ifdef CONFIG_PMAC_BACKLIGHT |
2662 | if (M64_HAS(G3_PB_1_1) && machine_is_compatible("PowerBook1,1")) { | 2662 | if (M64_HAS(G3_PB_1_1) && of_machine_is_compatible("PowerBook1,1")) { |
2663 | /* | 2663 | /* |
2664 | * these bits let the 101 powerbook | 2664 | * these bits let the 101 powerbook |
2665 | * wake up from sleep -- paulus | 2665 | * wake up from sleep -- paulus |
@@ -2690,9 +2690,9 @@ static int __devinit aty_init(struct fb_info *info) | |||
2690 | if (M64_HAS(G3_PB_1024x768)) | 2690 | if (M64_HAS(G3_PB_1024x768)) |
2691 | /* G3 PowerBook with 1024x768 LCD */ | 2691 | /* G3 PowerBook with 1024x768 LCD */ |
2692 | default_vmode = VMODE_1024_768_60; | 2692 | default_vmode = VMODE_1024_768_60; |
2693 | else if (machine_is_compatible("iMac")) | 2693 | else if (of_machine_is_compatible("iMac")) |
2694 | default_vmode = VMODE_1024_768_75; | 2694 | default_vmode = VMODE_1024_768_75; |
2695 | else if (machine_is_compatible("PowerBook2,1")) | 2695 | else if (of_machine_is_compatible("PowerBook2,1")) |
2696 | /* iBook with 800x600 LCD */ | 2696 | /* iBook with 800x600 LCD */ |
2697 | default_vmode = VMODE_800_600_60; | 2697 | default_vmode = VMODE_800_600_60; |
2698 | else | 2698 | else |
@@ -3104,7 +3104,7 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev, | |||
3104 | } | 3104 | } |
3105 | 3105 | ||
3106 | dp = pci_device_to_OF_node(pdev); | 3106 | dp = pci_device_to_OF_node(pdev); |
3107 | if (node == dp->node) { | 3107 | if (node == dp->phandle) { |
3108 | struct fb_var_screeninfo *var = &default_var; | 3108 | struct fb_var_screeninfo *var = &default_var; |
3109 | unsigned int N, P, Q, M, T, R; | 3109 | unsigned int N, P, Q, M, T, R; |
3110 | u32 v_total, h_total; | 3110 | u32 v_total, h_total; |
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c index 1a056adb61c8..fa1198c4ccc5 100644 --- a/drivers/video/aty/radeon_backlight.c +++ b/drivers/video/aty/radeon_backlight.c | |||
@@ -175,9 +175,9 @@ void radeonfb_bl_init(struct radeonfb_info *rinfo) | |||
175 | 175 | ||
176 | #ifdef CONFIG_PMAC_BACKLIGHT | 176 | #ifdef CONFIG_PMAC_BACKLIGHT |
177 | pdata->negative = pdata->negative || | 177 | pdata->negative = pdata->negative || |
178 | machine_is_compatible("PowerBook4,3") || | 178 | of_machine_is_compatible("PowerBook4,3") || |
179 | machine_is_compatible("PowerBook6,3") || | 179 | of_machine_is_compatible("PowerBook6,3") || |
180 | machine_is_compatible("PowerBook6,5"); | 180 | of_machine_is_compatible("PowerBook6,5"); |
181 | #endif | 181 | #endif |
182 | 182 | ||
183 | rinfo->info->bl_dev = bd; | 183 | rinfo->info->bl_dev = bd; |
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c index 53f8f1100e81..f9975100d56d 100644 --- a/drivers/video/pvr2fb.c +++ b/drivers/video/pvr2fb.c | |||
@@ -831,7 +831,7 @@ static int __devinit pvr2fb_common_init(void) | |||
831 | printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node); | 831 | printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node); |
832 | 832 | ||
833 | pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len, | 833 | pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len, |
834 | fb_info->fix.id, pgprot_val(PAGE_SHARED)); | 834 | fb_info->fix.id, PAGE_SHARED); |
835 | 835 | ||
836 | printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n", | 836 | printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n", |
837 | fb_info->node, pvr2fb_map); | 837 | fb_info->node, pvr2fb_map); |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index a69830d26f7f..8d7653e56df5 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/vmalloc.h> | 21 | #include <linux/vmalloc.h> |
22 | #include <linux/ioctl.h> | ||
22 | #include <video/sh_mobile_lcdc.h> | 23 | #include <video/sh_mobile_lcdc.h> |
23 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
24 | 25 | ||
@@ -106,6 +107,7 @@ static unsigned long lcdc_offs_sublcd[NR_CH_REGS] = { | |||
106 | #define LDRCNTR_SRC 0x00010000 | 107 | #define LDRCNTR_SRC 0x00010000 |
107 | #define LDRCNTR_MRS 0x00000002 | 108 | #define LDRCNTR_MRS 0x00000002 |
108 | #define LDRCNTR_MRC 0x00000001 | 109 | #define LDRCNTR_MRC 0x00000001 |
110 | #define LDSR_MRS 0x00000100 | ||
109 | 111 | ||
110 | struct sh_mobile_lcdc_priv; | 112 | struct sh_mobile_lcdc_priv; |
111 | struct sh_mobile_lcdc_chan { | 113 | struct sh_mobile_lcdc_chan { |
@@ -122,8 +124,8 @@ struct sh_mobile_lcdc_chan { | |||
122 | struct scatterlist *sglist; | 124 | struct scatterlist *sglist; |
123 | unsigned long frame_end; | 125 | unsigned long frame_end; |
124 | unsigned long pan_offset; | 126 | unsigned long pan_offset; |
125 | unsigned long new_pan_offset; | ||
126 | wait_queue_head_t frame_end_wait; | 127 | wait_queue_head_t frame_end_wait; |
128 | struct completion vsync_completion; | ||
127 | }; | 129 | }; |
128 | 130 | ||
129 | struct sh_mobile_lcdc_priv { | 131 | struct sh_mobile_lcdc_priv { |
@@ -366,19 +368,8 @@ static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data) | |||
366 | } | 368 | } |
367 | 369 | ||
368 | /* VSYNC End */ | 370 | /* VSYNC End */ |
369 | if (ldintr & LDINTR_VES) { | 371 | if (ldintr & LDINTR_VES) |
370 | unsigned long ldrcntr = lcdc_read(priv, _LDRCNTR); | 372 | complete(&ch->vsync_completion); |
371 | /* Set the source address for the next refresh */ | ||
372 | lcdc_write_chan_mirror(ch, LDSA1R, ch->dma_handle + | ||
373 | ch->new_pan_offset); | ||
374 | if (lcdc_chan_is_sublcd(ch)) | ||
375 | lcdc_write(ch->lcdc, _LDRCNTR, | ||
376 | ldrcntr ^ LDRCNTR_SRS); | ||
377 | else | ||
378 | lcdc_write(ch->lcdc, _LDRCNTR, | ||
379 | ldrcntr ^ LDRCNTR_MRS); | ||
380 | ch->pan_offset = ch->new_pan_offset; | ||
381 | } | ||
382 | } | 373 | } |
383 | 374 | ||
384 | return IRQ_HANDLED; | 375 | return IRQ_HANDLED; |
@@ -767,25 +758,69 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var, | |||
767 | struct fb_info *info) | 758 | struct fb_info *info) |
768 | { | 759 | { |
769 | struct sh_mobile_lcdc_chan *ch = info->par; | 760 | struct sh_mobile_lcdc_chan *ch = info->par; |
761 | struct sh_mobile_lcdc_priv *priv = ch->lcdc; | ||
762 | unsigned long ldrcntr; | ||
763 | unsigned long new_pan_offset; | ||
764 | |||
765 | new_pan_offset = (var->yoffset * info->fix.line_length) + | ||
766 | (var->xoffset * (info->var.bits_per_pixel / 8)); | ||
770 | 767 | ||
771 | if (info->var.xoffset == var->xoffset && | 768 | if (new_pan_offset == ch->pan_offset) |
772 | info->var.yoffset == var->yoffset) | ||
773 | return 0; /* No change, do nothing */ | 769 | return 0; /* No change, do nothing */ |
774 | 770 | ||
775 | ch->new_pan_offset = (var->yoffset * info->fix.line_length) + | 771 | ldrcntr = lcdc_read(priv, _LDRCNTR); |
776 | (var->xoffset * (info->var.bits_per_pixel / 8)); | ||
777 | 772 | ||
778 | if (ch->new_pan_offset != ch->pan_offset) { | 773 | /* Set the source address for the next refresh */ |
779 | unsigned long ldintr; | 774 | lcdc_write_chan_mirror(ch, LDSA1R, ch->dma_handle + new_pan_offset); |
780 | ldintr = lcdc_read(ch->lcdc, _LDINTR); | 775 | if (lcdc_chan_is_sublcd(ch)) |
781 | ldintr |= LDINTR_VEE; | 776 | lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS); |
782 | lcdc_write(ch->lcdc, _LDINTR, ldintr); | 777 | else |
783 | sh_mobile_lcdc_deferred_io_touch(info); | 778 | lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_MRS); |
784 | } | 779 | |
780 | ch->pan_offset = new_pan_offset; | ||
781 | |||
782 | sh_mobile_lcdc_deferred_io_touch(info); | ||
783 | |||
784 | return 0; | ||
785 | } | ||
786 | |||
787 | static int sh_mobile_wait_for_vsync(struct fb_info *info) | ||
788 | { | ||
789 | struct sh_mobile_lcdc_chan *ch = info->par; | ||
790 | unsigned long ldintr; | ||
791 | int ret; | ||
792 | |||
793 | /* Enable VSync End interrupt */ | ||
794 | ldintr = lcdc_read(ch->lcdc, _LDINTR); | ||
795 | ldintr |= LDINTR_VEE; | ||
796 | lcdc_write(ch->lcdc, _LDINTR, ldintr); | ||
797 | |||
798 | ret = wait_for_completion_interruptible_timeout(&ch->vsync_completion, | ||
799 | msecs_to_jiffies(100)); | ||
800 | if (!ret) | ||
801 | return -ETIMEDOUT; | ||
785 | 802 | ||
786 | return 0; | 803 | return 0; |
787 | } | 804 | } |
788 | 805 | ||
806 | static int sh_mobile_ioctl(struct fb_info *info, unsigned int cmd, | ||
807 | unsigned long arg) | ||
808 | { | ||
809 | int retval; | ||
810 | |||
811 | switch (cmd) { | ||
812 | case FBIO_WAITFORVSYNC: | ||
813 | retval = sh_mobile_wait_for_vsync(info); | ||
814 | break; | ||
815 | |||
816 | default: | ||
817 | retval = -ENOIOCTLCMD; | ||
818 | break; | ||
819 | } | ||
820 | return retval; | ||
821 | } | ||
822 | |||
823 | |||
789 | static struct fb_ops sh_mobile_lcdc_ops = { | 824 | static struct fb_ops sh_mobile_lcdc_ops = { |
790 | .owner = THIS_MODULE, | 825 | .owner = THIS_MODULE, |
791 | .fb_setcolreg = sh_mobile_lcdc_setcolreg, | 826 | .fb_setcolreg = sh_mobile_lcdc_setcolreg, |
@@ -795,6 +830,7 @@ static struct fb_ops sh_mobile_lcdc_ops = { | |||
795 | .fb_copyarea = sh_mobile_lcdc_copyarea, | 830 | .fb_copyarea = sh_mobile_lcdc_copyarea, |
796 | .fb_imageblit = sh_mobile_lcdc_imageblit, | 831 | .fb_imageblit = sh_mobile_lcdc_imageblit, |
797 | .fb_pan_display = sh_mobile_fb_pan_display, | 832 | .fb_pan_display = sh_mobile_fb_pan_display, |
833 | .fb_ioctl = sh_mobile_ioctl, | ||
798 | }; | 834 | }; |
799 | 835 | ||
800 | static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp) | 836 | static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp) |
@@ -962,8 +998,8 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
962 | goto err1; | 998 | goto err1; |
963 | } | 999 | } |
964 | init_waitqueue_head(&priv->ch[i].frame_end_wait); | 1000 | init_waitqueue_head(&priv->ch[i].frame_end_wait); |
1001 | init_completion(&priv->ch[i].vsync_completion); | ||
965 | priv->ch[j].pan_offset = 0; | 1002 | priv->ch[j].pan_offset = 0; |
966 | priv->ch[j].new_pan_offset = 0; | ||
967 | 1003 | ||
968 | switch (pdata->ch[i].chan) { | 1004 | switch (pdata->ch[i].chan) { |
969 | case LCDC_CHAN_MAINLCD: | 1005 | case LCDC_CHAN_MAINLCD: |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 505be88c82ae..369f2eebbad1 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -28,7 +28,7 @@ | |||
28 | struct virtio_balloon | 28 | struct virtio_balloon |
29 | { | 29 | { |
30 | struct virtio_device *vdev; | 30 | struct virtio_device *vdev; |
31 | struct virtqueue *inflate_vq, *deflate_vq; | 31 | struct virtqueue *inflate_vq, *deflate_vq, *stats_vq; |
32 | 32 | ||
33 | /* Where the ballooning thread waits for config to change. */ | 33 | /* Where the ballooning thread waits for config to change. */ |
34 | wait_queue_head_t config_change; | 34 | wait_queue_head_t config_change; |
@@ -49,6 +49,10 @@ struct virtio_balloon | |||
49 | /* The array of pfns we tell the Host about. */ | 49 | /* The array of pfns we tell the Host about. */ |
50 | unsigned int num_pfns; | 50 | unsigned int num_pfns; |
51 | u32 pfns[256]; | 51 | u32 pfns[256]; |
52 | |||
53 | /* Memory statistics */ | ||
54 | int need_stats_update; | ||
55 | struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; | ||
52 | }; | 56 | }; |
53 | 57 | ||
54 | static struct virtio_device_id id_table[] = { | 58 | static struct virtio_device_id id_table[] = { |
@@ -154,6 +158,72 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num) | |||
154 | } | 158 | } |
155 | } | 159 | } |
156 | 160 | ||
161 | static inline void update_stat(struct virtio_balloon *vb, int idx, | ||
162 | u16 tag, u64 val) | ||
163 | { | ||
164 | BUG_ON(idx >= VIRTIO_BALLOON_S_NR); | ||
165 | vb->stats[idx].tag = tag; | ||
166 | vb->stats[idx].val = val; | ||
167 | } | ||
168 | |||
169 | #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) | ||
170 | |||
171 | static void update_balloon_stats(struct virtio_balloon *vb) | ||
172 | { | ||
173 | unsigned long events[NR_VM_EVENT_ITEMS]; | ||
174 | struct sysinfo i; | ||
175 | int idx = 0; | ||
176 | |||
177 | all_vm_events(events); | ||
178 | si_meminfo(&i); | ||
179 | |||
180 | update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, | ||
181 | pages_to_bytes(events[PSWPIN])); | ||
182 | update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, | ||
183 | pages_to_bytes(events[PSWPOUT])); | ||
184 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); | ||
185 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); | ||
186 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, | ||
187 | pages_to_bytes(i.freeram)); | ||
188 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, | ||
189 | pages_to_bytes(i.totalram)); | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * While most virtqueues communicate guest-initiated requests to the hypervisor, | ||
194 | * the stats queue operates in reverse. The driver initializes the virtqueue | ||
195 | * with a single buffer. From that point forward, all conversations consist of | ||
196 | * a hypervisor request (a call to this function) which directs us to refill | ||
197 | * the virtqueue with a fresh stats buffer. Since stats collection can sleep, | ||
198 | * we notify our kthread which does the actual work via stats_handle_request(). | ||
199 | */ | ||
200 | static void stats_request(struct virtqueue *vq) | ||
201 | { | ||
202 | struct virtio_balloon *vb; | ||
203 | unsigned int len; | ||
204 | |||
205 | vb = vq->vq_ops->get_buf(vq, &len); | ||
206 | if (!vb) | ||
207 | return; | ||
208 | vb->need_stats_update = 1; | ||
209 | wake_up(&vb->config_change); | ||
210 | } | ||
211 | |||
212 | static void stats_handle_request(struct virtio_balloon *vb) | ||
213 | { | ||
214 | struct virtqueue *vq; | ||
215 | struct scatterlist sg; | ||
216 | |||
217 | vb->need_stats_update = 0; | ||
218 | update_balloon_stats(vb); | ||
219 | |||
220 | vq = vb->stats_vq; | ||
221 | sg_init_one(&sg, vb->stats, sizeof(vb->stats)); | ||
222 | if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0) | ||
223 | BUG(); | ||
224 | vq->vq_ops->kick(vq); | ||
225 | } | ||
226 | |||
157 | static void virtballoon_changed(struct virtio_device *vdev) | 227 | static void virtballoon_changed(struct virtio_device *vdev) |
158 | { | 228 | { |
159 | struct virtio_balloon *vb = vdev->priv; | 229 | struct virtio_balloon *vb = vdev->priv; |
@@ -190,8 +260,11 @@ static int balloon(void *_vballoon) | |||
190 | try_to_freeze(); | 260 | try_to_freeze(); |
191 | wait_event_interruptible(vb->config_change, | 261 | wait_event_interruptible(vb->config_change, |
192 | (diff = towards_target(vb)) != 0 | 262 | (diff = towards_target(vb)) != 0 |
263 | || vb->need_stats_update | ||
193 | || kthread_should_stop() | 264 | || kthread_should_stop() |
194 | || freezing(current)); | 265 | || freezing(current)); |
266 | if (vb->need_stats_update) | ||
267 | stats_handle_request(vb); | ||
195 | if (diff > 0) | 268 | if (diff > 0) |
196 | fill_balloon(vb, diff); | 269 | fill_balloon(vb, diff); |
197 | else if (diff < 0) | 270 | else if (diff < 0) |
@@ -204,10 +277,10 @@ static int balloon(void *_vballoon) | |||
204 | static int virtballoon_probe(struct virtio_device *vdev) | 277 | static int virtballoon_probe(struct virtio_device *vdev) |
205 | { | 278 | { |
206 | struct virtio_balloon *vb; | 279 | struct virtio_balloon *vb; |
207 | struct virtqueue *vqs[2]; | 280 | struct virtqueue *vqs[3]; |
208 | vq_callback_t *callbacks[] = { balloon_ack, balloon_ack }; | 281 | vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request }; |
209 | const char *names[] = { "inflate", "deflate" }; | 282 | const char *names[] = { "inflate", "deflate", "stats" }; |
210 | int err; | 283 | int err, nvqs; |
211 | 284 | ||
212 | vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); | 285 | vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); |
213 | if (!vb) { | 286 | if (!vb) { |
@@ -219,14 +292,31 @@ static int virtballoon_probe(struct virtio_device *vdev) | |||
219 | vb->num_pages = 0; | 292 | vb->num_pages = 0; |
220 | init_waitqueue_head(&vb->config_change); | 293 | init_waitqueue_head(&vb->config_change); |
221 | vb->vdev = vdev; | 294 | vb->vdev = vdev; |
295 | vb->need_stats_update = 0; | ||
222 | 296 | ||
223 | /* We expect two virtqueues. */ | 297 | /* We expect two virtqueues: inflate and deflate, |
224 | err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); | 298 | * and optionally stat. */ |
299 | nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; | ||
300 | err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); | ||
225 | if (err) | 301 | if (err) |
226 | goto out_free_vb; | 302 | goto out_free_vb; |
227 | 303 | ||
228 | vb->inflate_vq = vqs[0]; | 304 | vb->inflate_vq = vqs[0]; |
229 | vb->deflate_vq = vqs[1]; | 305 | vb->deflate_vq = vqs[1]; |
306 | if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { | ||
307 | struct scatterlist sg; | ||
308 | vb->stats_vq = vqs[2]; | ||
309 | |||
310 | /* | ||
311 | * Prime this virtqueue with one buffer so the hypervisor can | ||
312 | * use it to signal us later. | ||
313 | */ | ||
314 | sg_init_one(&sg, vb->stats, sizeof vb->stats); | ||
315 | if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq, | ||
316 | &sg, 1, 0, vb) < 0) | ||
317 | BUG(); | ||
318 | vb->stats_vq->vq_ops->kick(vb->stats_vq); | ||
319 | } | ||
230 | 320 | ||
231 | vb->thread = kthread_run(balloon, vb, "vballoon"); | 321 | vb->thread = kthread_run(balloon, vb, "vballoon"); |
232 | if (IS_ERR(vb->thread)) { | 322 | if (IS_ERR(vb->thread)) { |
@@ -264,7 +354,10 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev) | |||
264 | kfree(vb); | 354 | kfree(vb); |
265 | } | 355 | } |
266 | 356 | ||
267 | static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST }; | 357 | static unsigned int features[] = { |
358 | VIRTIO_BALLOON_F_MUST_TELL_HOST, | ||
359 | VIRTIO_BALLOON_F_STATS_VQ, | ||
360 | }; | ||
268 | 361 | ||
269 | static struct virtio_driver virtio_balloon_driver = { | 362 | static struct virtio_driver virtio_balloon_driver = { |
270 | .feature_table = features, | 363 | .feature_table = features, |
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 28d9cf7cf72f..1d5191fab62e 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -702,7 +702,7 @@ static struct pci_driver virtio_pci_driver = { | |||
702 | .name = "virtio-pci", | 702 | .name = "virtio-pci", |
703 | .id_table = virtio_pci_id_table, | 703 | .id_table = virtio_pci_id_table, |
704 | .probe = virtio_pci_probe, | 704 | .probe = virtio_pci_probe, |
705 | .remove = virtio_pci_remove, | 705 | .remove = __devexit_p(virtio_pci_remove), |
706 | #ifdef CONFIG_PM | 706 | #ifdef CONFIG_PM |
707 | .suspend = virtio_pci_suspend, | 707 | .suspend = virtio_pci_suspend, |
708 | .resume = virtio_pci_resume, | 708 | .resume = virtio_pci_resume, |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index fbd2ecde93e4..0db906b3c95d 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -21,6 +21,24 @@ | |||
21 | #include <linux/virtio_config.h> | 21 | #include <linux/virtio_config.h> |
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | 23 | ||
24 | /* virtio guest is communicating with a virtual "device" that actually runs on | ||
25 | * a host processor. Memory barriers are used to control SMP effects. */ | ||
26 | #ifdef CONFIG_SMP | ||
27 | /* Where possible, use SMP barriers which are more lightweight than mandatory | ||
28 | * barriers, because mandatory barriers control MMIO effects on accesses | ||
29 | * through relaxed memory I/O windows (which virtio does not use). */ | ||
30 | #define virtio_mb() smp_mb() | ||
31 | #define virtio_rmb() smp_rmb() | ||
32 | #define virtio_wmb() smp_wmb() | ||
33 | #else | ||
34 | /* We must force memory ordering even if guest is UP since host could be | ||
35 | * running on another CPU, but SMP barriers are defined to barrier() in that | ||
36 | * configuration. So fall back to mandatory barriers instead. */ | ||
37 | #define virtio_mb() mb() | ||
38 | #define virtio_rmb() rmb() | ||
39 | #define virtio_wmb() wmb() | ||
40 | #endif | ||
41 | |||
24 | #ifdef DEBUG | 42 | #ifdef DEBUG |
25 | /* For development, we want to crash whenever the ring is screwed. */ | 43 | /* For development, we want to crash whenever the ring is screwed. */ |
26 | #define BAD_RING(_vq, fmt, args...) \ | 44 | #define BAD_RING(_vq, fmt, args...) \ |
@@ -36,10 +54,9 @@ | |||
36 | panic("%s:in_use = %i\n", \ | 54 | panic("%s:in_use = %i\n", \ |
37 | (_vq)->vq.name, (_vq)->in_use); \ | 55 | (_vq)->vq.name, (_vq)->in_use); \ |
38 | (_vq)->in_use = __LINE__; \ | 56 | (_vq)->in_use = __LINE__; \ |
39 | mb(); \ | ||
40 | } while (0) | 57 | } while (0) |
41 | #define END_USE(_vq) \ | 58 | #define END_USE(_vq) \ |
42 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) | 59 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) |
43 | #else | 60 | #else |
44 | #define BAD_RING(_vq, fmt, args...) \ | 61 | #define BAD_RING(_vq, fmt, args...) \ |
45 | do { \ | 62 | do { \ |
@@ -221,13 +238,13 @@ static void vring_kick(struct virtqueue *_vq) | |||
221 | START_USE(vq); | 238 | START_USE(vq); |
222 | /* Descriptors and available array need to be set before we expose the | 239 | /* Descriptors and available array need to be set before we expose the |
223 | * new available array entries. */ | 240 | * new available array entries. */ |
224 | wmb(); | 241 | virtio_wmb(); |
225 | 242 | ||
226 | vq->vring.avail->idx += vq->num_added; | 243 | vq->vring.avail->idx += vq->num_added; |
227 | vq->num_added = 0; | 244 | vq->num_added = 0; |
228 | 245 | ||
229 | /* Need to update avail index before checking if we should notify */ | 246 | /* Need to update avail index before checking if we should notify */ |
230 | mb(); | 247 | virtio_mb(); |
231 | 248 | ||
232 | if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) | 249 | if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) |
233 | /* Prod other side to tell it about changes. */ | 250 | /* Prod other side to tell it about changes. */ |
@@ -286,7 +303,7 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len) | |||
286 | } | 303 | } |
287 | 304 | ||
288 | /* Only get used array entries after they have been exposed by host. */ | 305 | /* Only get used array entries after they have been exposed by host. */ |
289 | rmb(); | 306 | virtio_rmb(); |
290 | 307 | ||
291 | i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; | 308 | i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; |
292 | *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; | 309 | *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; |
@@ -324,7 +341,7 @@ static bool vring_enable_cb(struct virtqueue *_vq) | |||
324 | /* We optimistically turn back on interrupts, then check if there was | 341 | /* We optimistically turn back on interrupts, then check if there was |
325 | * more to do. */ | 342 | * more to do. */ |
326 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; | 343 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
327 | mb(); | 344 | virtio_mb(); |
328 | if (unlikely(more_used(vq))) { | 345 | if (unlikely(more_used(vq))) { |
329 | END_USE(vq); | 346 | END_USE(vq); |
330 | return false; | 347 | return false; |
@@ -334,6 +351,30 @@ static bool vring_enable_cb(struct virtqueue *_vq) | |||
334 | return true; | 351 | return true; |
335 | } | 352 | } |
336 | 353 | ||
354 | static void *vring_detach_unused_buf(struct virtqueue *_vq) | ||
355 | { | ||
356 | struct vring_virtqueue *vq = to_vvq(_vq); | ||
357 | unsigned int i; | ||
358 | void *buf; | ||
359 | |||
360 | START_USE(vq); | ||
361 | |||
362 | for (i = 0; i < vq->vring.num; i++) { | ||
363 | if (!vq->data[i]) | ||
364 | continue; | ||
365 | /* detach_buf clears data, so grab it now. */ | ||
366 | buf = vq->data[i]; | ||
367 | detach_buf(vq, i); | ||
368 | END_USE(vq); | ||
369 | return buf; | ||
370 | } | ||
371 | /* That should have freed everything. */ | ||
372 | BUG_ON(vq->num_free != vq->vring.num); | ||
373 | |||
374 | END_USE(vq); | ||
375 | return NULL; | ||
376 | } | ||
377 | |||
337 | irqreturn_t vring_interrupt(int irq, void *_vq) | 378 | irqreturn_t vring_interrupt(int irq, void *_vq) |
338 | { | 379 | { |
339 | struct vring_virtqueue *vq = to_vvq(_vq); | 380 | struct vring_virtqueue *vq = to_vvq(_vq); |
@@ -360,6 +401,7 @@ static struct virtqueue_ops vring_vq_ops = { | |||
360 | .kick = vring_kick, | 401 | .kick = vring_kick, |
361 | .disable_cb = vring_disable_cb, | 402 | .disable_cb = vring_disable_cb, |
362 | .enable_cb = vring_enable_cb, | 403 | .enable_cb = vring_enable_cb, |
404 | .detach_unused_buf = vring_detach_unused_buf, | ||
363 | }; | 405 | }; |
364 | 406 | ||
365 | struct virtqueue *vring_new_virtqueue(unsigned int num, | 407 | struct virtqueue *vring_new_virtqueue(unsigned int num, |
@@ -406,8 +448,11 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, | |||
406 | /* Put everything in free lists. */ | 448 | /* Put everything in free lists. */ |
407 | vq->num_free = num; | 449 | vq->num_free = num; |
408 | vq->free_head = 0; | 450 | vq->free_head = 0; |
409 | for (i = 0; i < num-1; i++) | 451 | for (i = 0; i < num-1; i++) { |
410 | vq->vring.desc[i].next = i+1; | 452 | vq->vring.desc[i].next = i+1; |
453 | vq->data[i] = NULL; | ||
454 | } | ||
455 | vq->data[i] = NULL; | ||
411 | 456 | ||
412 | return &vq->vq; | 457 | return &vq->vq; |
413 | } | 458 | } |