diff options
Diffstat (limited to 'drivers')
276 files changed, 10747 insertions, 2083 deletions
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index 0bba148a2c61..4ced54f7a5d9 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h | |||
@@ -76,12 +76,9 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node, | |||
76 | * evgpe - GPE handling and dispatch | 76 | * evgpe - GPE handling and dispatch |
77 | */ | 77 | */ |
78 | acpi_status | 78 | acpi_status |
79 | acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | 79 | acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info); |
80 | u8 type); | ||
81 | 80 | ||
82 | acpi_status | 81 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); |
83 | acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, | ||
84 | u8 write_to_hardware); | ||
85 | 82 | ||
86 | acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); | 83 | acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); |
87 | 84 | ||
@@ -122,9 +119,6 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, | |||
122 | u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list); | 119 | u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list); |
123 | 120 | ||
124 | acpi_status | 121 | acpi_status |
125 | acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type); | ||
126 | |||
127 | acpi_status | ||
128 | acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info); | 122 | acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info); |
129 | 123 | ||
130 | acpi_status acpi_ev_gpe_initialize(void); | 124 | acpi_status acpi_ev_gpe_initialize(void); |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 81e64f478679..13cb80caacde 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -426,6 +426,8 @@ struct acpi_gpe_event_info { | |||
426 | struct acpi_gpe_register_info *register_info; /* Backpointer to register info */ | 426 | struct acpi_gpe_register_info *register_info; /* Backpointer to register info */ |
427 | u8 flags; /* Misc info about this GPE */ | 427 | u8 flags; /* Misc info about this GPE */ |
428 | u8 gpe_number; /* This GPE */ | 428 | u8 gpe_number; /* This GPE */ |
429 | u8 runtime_count; | ||
430 | u8 wakeup_count; | ||
429 | }; | 431 | }; |
430 | 432 | ||
431 | /* Information about a GPE register pair, one per each status/enable pair in an array */ | 433 | /* Information about a GPE register pair, one per each status/enable pair in an array */ |
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 64062b1be3ee..07f6e2ea2ee5 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h | |||
@@ -287,8 +287,10 @@ struct acpi_object_buffer_field { | |||
287 | 287 | ||
288 | struct acpi_object_notify_handler { | 288 | struct acpi_object_notify_handler { |
289 | ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */ | 289 | ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */ |
290 | u32 handler_type; | ||
290 | acpi_notify_handler handler; | 291 | acpi_notify_handler handler; |
291 | void *context; | 292 | void *context; |
293 | struct acpi_object_notify_handler *next; | ||
292 | }; | 294 | }; |
293 | 295 | ||
294 | struct acpi_object_addr_handler { | 296 | struct acpi_object_addr_handler { |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index afacf4416c73..0b453467a5a0 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
@@ -54,54 +54,9 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); | |||
54 | 54 | ||
55 | /******************************************************************************* | 55 | /******************************************************************************* |
56 | * | 56 | * |
57 | * FUNCTION: acpi_ev_set_gpe_type | ||
58 | * | ||
59 | * PARAMETERS: gpe_event_info - GPE to set | ||
60 | * Type - New type | ||
61 | * | ||
62 | * RETURN: Status | ||
63 | * | ||
64 | * DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run) | ||
65 | * | ||
66 | ******************************************************************************/ | ||
67 | |||
68 | acpi_status | ||
69 | acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type) | ||
70 | { | ||
71 | acpi_status status; | ||
72 | |||
73 | ACPI_FUNCTION_TRACE(ev_set_gpe_type); | ||
74 | |||
75 | /* Validate type and update register enable masks */ | ||
76 | |||
77 | switch (type) { | ||
78 | case ACPI_GPE_TYPE_WAKE: | ||
79 | case ACPI_GPE_TYPE_RUNTIME: | ||
80 | case ACPI_GPE_TYPE_WAKE_RUN: | ||
81 | break; | ||
82 | |||
83 | default: | ||
84 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
85 | } | ||
86 | |||
87 | /* Disable the GPE if currently enabled */ | ||
88 | |||
89 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
90 | |||
91 | /* Clear the type bits and insert the new Type */ | ||
92 | |||
93 | gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK; | ||
94 | gpe_event_info->flags |= type; | ||
95 | return_ACPI_STATUS(status); | ||
96 | } | ||
97 | |||
98 | /******************************************************************************* | ||
99 | * | ||
100 | * FUNCTION: acpi_ev_update_gpe_enable_masks | 57 | * FUNCTION: acpi_ev_update_gpe_enable_masks |
101 | * | 58 | * |
102 | * PARAMETERS: gpe_event_info - GPE to update | 59 | * PARAMETERS: gpe_event_info - GPE to update |
103 | * Type - What to do: ACPI_GPE_DISABLE or | ||
104 | * ACPI_GPE_ENABLE | ||
105 | * | 60 | * |
106 | * RETURN: Status | 61 | * RETURN: Status |
107 | * | 62 | * |
@@ -110,8 +65,7 @@ acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type) | |||
110 | ******************************************************************************/ | 65 | ******************************************************************************/ |
111 | 66 | ||
112 | acpi_status | 67 | acpi_status |
113 | acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | 68 | acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info) |
114 | u8 type) | ||
115 | { | 69 | { |
116 | struct acpi_gpe_register_info *gpe_register_info; | 70 | struct acpi_gpe_register_info *gpe_register_info; |
117 | u8 register_bit; | 71 | u8 register_bit; |
@@ -127,37 +81,14 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | |||
127 | (1 << | 81 | (1 << |
128 | (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); | 82 | (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); |
129 | 83 | ||
130 | /* 1) Disable case. Simply clear all enable bits */ | 84 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit); |
131 | 85 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); | |
132 | if (type == ACPI_GPE_DISABLE) { | ||
133 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, | ||
134 | register_bit); | ||
135 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); | ||
136 | return_ACPI_STATUS(AE_OK); | ||
137 | } | ||
138 | |||
139 | /* 2) Enable case. Set/Clear the appropriate enable bits */ | ||
140 | 86 | ||
141 | switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { | 87 | if (gpe_event_info->runtime_count) |
142 | case ACPI_GPE_TYPE_WAKE: | ||
143 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); | ||
144 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); | ||
145 | break; | ||
146 | |||
147 | case ACPI_GPE_TYPE_RUNTIME: | ||
148 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, | ||
149 | register_bit); | ||
150 | ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); | 88 | ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); |
151 | break; | ||
152 | 89 | ||
153 | case ACPI_GPE_TYPE_WAKE_RUN: | 90 | if (gpe_event_info->wakeup_count) |
154 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); | 91 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); |
155 | ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); | ||
156 | break; | ||
157 | |||
158 | default: | ||
159 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
160 | } | ||
161 | 92 | ||
162 | return_ACPI_STATUS(AE_OK); | 93 | return_ACPI_STATUS(AE_OK); |
163 | } | 94 | } |
@@ -167,8 +98,6 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | |||
167 | * FUNCTION: acpi_ev_enable_gpe | 98 | * FUNCTION: acpi_ev_enable_gpe |
168 | * | 99 | * |
169 | * PARAMETERS: gpe_event_info - GPE to enable | 100 | * PARAMETERS: gpe_event_info - GPE to enable |
170 | * write_to_hardware - Enable now, or just mark data structs | ||
171 | * (WAKE GPEs should be deferred) | ||
172 | * | 101 | * |
173 | * RETURN: Status | 102 | * RETURN: Status |
174 | * | 103 | * |
@@ -176,9 +105,7 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | |||
176 | * | 105 | * |
177 | ******************************************************************************/ | 106 | ******************************************************************************/ |
178 | 107 | ||
179 | acpi_status | 108 | acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) |
180 | acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, | ||
181 | u8 write_to_hardware) | ||
182 | { | 109 | { |
183 | acpi_status status; | 110 | acpi_status status; |
184 | 111 | ||
@@ -186,47 +113,20 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, | |||
186 | 113 | ||
187 | /* Make sure HW enable masks are updated */ | 114 | /* Make sure HW enable masks are updated */ |
188 | 115 | ||
189 | status = | 116 | status = acpi_ev_update_gpe_enable_masks(gpe_event_info); |
190 | acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE); | 117 | if (ACPI_FAILURE(status)) |
191 | if (ACPI_FAILURE(status)) { | ||
192 | return_ACPI_STATUS(status); | 118 | return_ACPI_STATUS(status); |
193 | } | ||
194 | 119 | ||
195 | /* Mark wake-enabled or HW enable, or both */ | 120 | /* Mark wake-enabled or HW enable, or both */ |
196 | 121 | ||
197 | switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { | 122 | if (gpe_event_info->runtime_count) { |
198 | case ACPI_GPE_TYPE_WAKE: | 123 | /* Clear the GPE (of stale events), then enable it */ |
199 | 124 | status = acpi_hw_clear_gpe(gpe_event_info); | |
200 | ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); | 125 | if (ACPI_FAILURE(status)) |
201 | break; | 126 | return_ACPI_STATUS(status); |
202 | |||
203 | case ACPI_GPE_TYPE_WAKE_RUN: | ||
204 | |||
205 | ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); | ||
206 | |||
207 | /*lint -fallthrough */ | ||
208 | |||
209 | case ACPI_GPE_TYPE_RUNTIME: | ||
210 | |||
211 | ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); | ||
212 | |||
213 | if (write_to_hardware) { | ||
214 | |||
215 | /* Clear the GPE (of stale events), then enable it */ | ||
216 | |||
217 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
218 | if (ACPI_FAILURE(status)) { | ||
219 | return_ACPI_STATUS(status); | ||
220 | } | ||
221 | |||
222 | /* Enable the requested runtime GPE */ | ||
223 | |||
224 | status = acpi_hw_write_gpe_enable_reg(gpe_event_info); | ||
225 | } | ||
226 | break; | ||
227 | 127 | ||
228 | default: | 128 | /* Enable the requested runtime GPE */ |
229 | return_ACPI_STATUS(AE_BAD_PARAMETER); | 129 | status = acpi_hw_write_gpe_enable_reg(gpe_event_info); |
230 | } | 130 | } |
231 | 131 | ||
232 | return_ACPI_STATUS(AE_OK); | 132 | return_ACPI_STATUS(AE_OK); |
@@ -252,34 +152,9 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
252 | 152 | ||
253 | /* Make sure HW enable masks are updated */ | 153 | /* Make sure HW enable masks are updated */ |
254 | 154 | ||
255 | status = | 155 | status = acpi_ev_update_gpe_enable_masks(gpe_event_info); |
256 | acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE); | 156 | if (ACPI_FAILURE(status)) |
257 | if (ACPI_FAILURE(status)) { | ||
258 | return_ACPI_STATUS(status); | 157 | return_ACPI_STATUS(status); |
259 | } | ||
260 | |||
261 | /* Clear the appropriate enabled flags for this GPE */ | ||
262 | |||
263 | switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { | ||
264 | case ACPI_GPE_TYPE_WAKE: | ||
265 | ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); | ||
266 | break; | ||
267 | |||
268 | case ACPI_GPE_TYPE_WAKE_RUN: | ||
269 | ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); | ||
270 | |||
271 | /* fallthrough */ | ||
272 | |||
273 | case ACPI_GPE_TYPE_RUNTIME: | ||
274 | |||
275 | /* Disable the requested runtime GPE */ | ||
276 | |||
277 | ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); | ||
278 | break; | ||
279 | |||
280 | default: | ||
281 | break; | ||
282 | } | ||
283 | 158 | ||
284 | /* | 159 | /* |
285 | * Even if we don't know the GPE type, make sure that we always | 160 | * Even if we don't know the GPE type, make sure that we always |
@@ -521,7 +396,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
521 | 396 | ||
522 | /* Set the GPE flags for return to enabled state */ | 397 | /* Set the GPE flags for return to enabled state */ |
523 | 398 | ||
524 | (void)acpi_ev_enable_gpe(gpe_event_info, FALSE); | 399 | (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); |
525 | 400 | ||
526 | /* | 401 | /* |
527 | * Take a snapshot of the GPE info for this level - we copy the info to | 402 | * Take a snapshot of the GPE info for this level - we copy the info to |
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 247920900187..3d4c4aca11cd 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c | |||
@@ -258,7 +258,6 @@ acpi_ev_save_method_info(acpi_handle obj_handle, | |||
258 | u32 gpe_number; | 258 | u32 gpe_number; |
259 | char name[ACPI_NAME_SIZE + 1]; | 259 | char name[ACPI_NAME_SIZE + 1]; |
260 | u8 type; | 260 | u8 type; |
261 | acpi_status status; | ||
262 | 261 | ||
263 | ACPI_FUNCTION_TRACE(ev_save_method_info); | 262 | ACPI_FUNCTION_TRACE(ev_save_method_info); |
264 | 263 | ||
@@ -325,26 +324,20 @@ acpi_ev_save_method_info(acpi_handle obj_handle, | |||
325 | 324 | ||
326 | /* | 325 | /* |
327 | * Now we can add this information to the gpe_event_info block for use | 326 | * Now we can add this information to the gpe_event_info block for use |
328 | * during dispatch of this GPE. Default type is RUNTIME, although this may | 327 | * during dispatch of this GPE. |
329 | * change when the _PRW methods are executed later. | ||
330 | */ | 328 | */ |
331 | gpe_event_info = | 329 | gpe_event_info = |
332 | &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; | 330 | &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; |
333 | 331 | ||
334 | gpe_event_info->flags = (u8) | 332 | gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD); |
335 | (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME); | ||
336 | 333 | ||
337 | gpe_event_info->dispatch.method_node = | 334 | gpe_event_info->dispatch.method_node = |
338 | (struct acpi_namespace_node *)obj_handle; | 335 | (struct acpi_namespace_node *)obj_handle; |
339 | 336 | ||
340 | /* Update enable mask, but don't enable the HW GPE as of yet */ | ||
341 | |||
342 | status = acpi_ev_enable_gpe(gpe_event_info, FALSE); | ||
343 | |||
344 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, | 337 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, |
345 | "Registered GPE method %s as GPE number 0x%.2X\n", | 338 | "Registered GPE method %s as GPE number 0x%.2X\n", |
346 | name, gpe_number)); | 339 | name, gpe_number)); |
347 | return_ACPI_STATUS(status); | 340 | return_ACPI_STATUS(AE_OK); |
348 | } | 341 | } |
349 | 342 | ||
350 | /******************************************************************************* | 343 | /******************************************************************************* |
@@ -454,20 +447,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, | |||
454 | gpe_block-> | 447 | gpe_block-> |
455 | block_base_number]; | 448 | block_base_number]; |
456 | 449 | ||
457 | /* Mark GPE for WAKE-ONLY but WAKE_DISABLED */ | 450 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; |
458 | |||
459 | gpe_event_info->flags &= | ||
460 | ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED); | ||
461 | |||
462 | status = | ||
463 | acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE); | ||
464 | if (ACPI_FAILURE(status)) { | ||
465 | goto cleanup; | ||
466 | } | ||
467 | |||
468 | status = | ||
469 | acpi_ev_update_gpe_enable_masks(gpe_event_info, | ||
470 | ACPI_GPE_DISABLE); | ||
471 | } | 451 | } |
472 | 452 | ||
473 | cleanup: | 453 | cleanup: |
@@ -989,7 +969,6 @@ acpi_status | |||
989 | acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, | 969 | acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, |
990 | struct acpi_gpe_block_info *gpe_block) | 970 | struct acpi_gpe_block_info *gpe_block) |
991 | { | 971 | { |
992 | acpi_status status; | ||
993 | struct acpi_gpe_event_info *gpe_event_info; | 972 | struct acpi_gpe_event_info *gpe_event_info; |
994 | struct acpi_gpe_walk_info gpe_info; | 973 | struct acpi_gpe_walk_info gpe_info; |
995 | u32 wake_gpe_count; | 974 | u32 wake_gpe_count; |
@@ -1019,42 +998,50 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, | |||
1019 | gpe_info.gpe_block = gpe_block; | 998 | gpe_info.gpe_block = gpe_block; |
1020 | gpe_info.gpe_device = gpe_device; | 999 | gpe_info.gpe_device = gpe_device; |
1021 | 1000 | ||
1022 | status = | 1001 | acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, |
1023 | acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
1024 | ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, | 1002 | ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, |
1025 | acpi_ev_match_prw_and_gpe, NULL, | 1003 | acpi_ev_match_prw_and_gpe, NULL, |
1026 | &gpe_info, NULL); | 1004 | &gpe_info, NULL); |
1027 | } | 1005 | } |
1028 | 1006 | ||
1029 | /* | 1007 | /* |
1030 | * Enable all GPEs in this block that have these attributes: | 1008 | * Enable all GPEs that have a corresponding method and aren't |
1031 | * 1) are "runtime" or "run/wake" GPEs, and | 1009 | * capable of generating wakeups. Any other GPEs within this block |
1032 | * 2) have a corresponding _Lxx or _Exx method | 1010 | * must be enabled via the acpi_enable_gpe() interface. |
1033 | * | ||
1034 | * Any other GPEs within this block must be enabled via the | ||
1035 | * acpi_enable_gpe() external interface. | ||
1036 | */ | 1011 | */ |
1037 | wake_gpe_count = 0; | 1012 | wake_gpe_count = 0; |
1038 | gpe_enabled_count = 0; | 1013 | gpe_enabled_count = 0; |
1014 | if (gpe_device == acpi_gbl_fadt_gpe_device) | ||
1015 | gpe_device = NULL; | ||
1039 | 1016 | ||
1040 | for (i = 0; i < gpe_block->register_count; i++) { | 1017 | for (i = 0; i < gpe_block->register_count; i++) { |
1041 | for (j = 0; j < 8; j++) { | 1018 | for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { |
1019 | acpi_status status; | ||
1020 | acpi_size gpe_index; | ||
1021 | int gpe_number; | ||
1042 | 1022 | ||
1043 | /* Get the info block for this particular GPE */ | 1023 | /* Get the info block for this particular GPE */ |
1024 | gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j; | ||
1025 | gpe_event_info = &gpe_block->event_info[gpe_index]; | ||
1044 | 1026 | ||
1045 | gpe_event_info = &gpe_block->event_info[((acpi_size) i * | 1027 | if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) { |
1046 | ACPI_GPE_REGISTER_WIDTH) | ||
1047 | + j]; | ||
1048 | |||
1049 | if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | ||
1050 | ACPI_GPE_DISPATCH_METHOD) && | ||
1051 | (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) { | ||
1052 | gpe_enabled_count++; | ||
1053 | } | ||
1054 | |||
1055 | if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) { | ||
1056 | wake_gpe_count++; | 1028 | wake_gpe_count++; |
1029 | if (acpi_gbl_leave_wake_gpes_disabled) | ||
1030 | continue; | ||
1057 | } | 1031 | } |
1032 | |||
1033 | if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) | ||
1034 | continue; | ||
1035 | |||
1036 | gpe_number = gpe_index + gpe_block->block_base_number; | ||
1037 | status = acpi_enable_gpe(gpe_device, gpe_number, | ||
1038 | ACPI_GPE_TYPE_RUNTIME); | ||
1039 | if (ACPI_FAILURE(status)) | ||
1040 | ACPI_ERROR((AE_INFO, | ||
1041 | "Failed to enable GPE %02X\n", | ||
1042 | gpe_number)); | ||
1043 | else | ||
1044 | gpe_enabled_count++; | ||
1058 | } | 1045 | } |
1059 | } | 1046 | } |
1060 | 1047 | ||
@@ -1062,15 +1049,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, | |||
1062 | "Found %u Wake, Enabled %u Runtime GPEs in this block\n", | 1049 | "Found %u Wake, Enabled %u Runtime GPEs in this block\n", |
1063 | wake_gpe_count, gpe_enabled_count)); | 1050 | wake_gpe_count, gpe_enabled_count)); |
1064 | 1051 | ||
1065 | /* Enable all valid runtime GPEs found above */ | 1052 | return_ACPI_STATUS(AE_OK); |
1066 | |||
1067 | status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block, NULL); | ||
1068 | if (ACPI_FAILURE(status)) { | ||
1069 | ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p", | ||
1070 | gpe_block)); | ||
1071 | } | ||
1072 | |||
1073 | return_ACPI_STATUS(status); | ||
1074 | } | 1053 | } |
1075 | 1054 | ||
1076 | /******************************************************************************* | 1055 | /******************************************************************************* |
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index ce224e1eaa89..8f0fac6c4366 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c | |||
@@ -259,9 +259,15 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) | |||
259 | 259 | ||
260 | handler_obj = notify_info->notify.handler_obj; | 260 | handler_obj = notify_info->notify.handler_obj; |
261 | if (handler_obj) { | 261 | if (handler_obj) { |
262 | handler_obj->notify.handler(notify_info->notify.node, | 262 | struct acpi_object_notify_handler *notifier; |
263 | notify_info->notify.value, | 263 | |
264 | handler_obj->notify.context); | 264 | notifier = &handler_obj->notify; |
265 | while (notifier) { | ||
266 | notifier->handler(notify_info->notify.node, | ||
267 | notify_info->notify.value, | ||
268 | notifier->context); | ||
269 | notifier = notifier->next; | ||
270 | } | ||
265 | } | 271 | } |
266 | 272 | ||
267 | /* All done with the info object */ | 273 | /* All done with the info object */ |
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 2fe0809d4eb2..474e2cab603d 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c | |||
@@ -218,6 +218,72 @@ ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler) | |||
218 | 218 | ||
219 | /******************************************************************************* | 219 | /******************************************************************************* |
220 | * | 220 | * |
221 | * FUNCTION: acpi_populate_handler_object | ||
222 | * | ||
223 | * PARAMETERS: handler_obj - Handler object to populate | ||
224 | * handler_type - The type of handler: | ||
225 | * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) | ||
226 | * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) | ||
227 | * ACPI_ALL_NOTIFY: both system and device | ||
228 | * handler - Address of the handler | ||
229 | * context - Value passed to the handler on each GPE | ||
230 | * next - Address of a handler object to link to | ||
231 | * | ||
232 | * RETURN: None | ||
233 | * | ||
234 | * DESCRIPTION: Populate a handler object. | ||
235 | * | ||
236 | ******************************************************************************/ | ||
237 | static void | ||
238 | acpi_populate_handler_object(struct acpi_object_notify_handler *handler_obj, | ||
239 | u32 handler_type, | ||
240 | acpi_notify_handler handler, void *context, | ||
241 | struct acpi_object_notify_handler *next) | ||
242 | { | ||
243 | handler_obj->handler_type = handler_type; | ||
244 | handler_obj->handler = handler; | ||
245 | handler_obj->context = context; | ||
246 | handler_obj->next = next; | ||
247 | } | ||
248 | |||
249 | /******************************************************************************* | ||
250 | * | ||
251 | * FUNCTION: acpi_add_handler_object | ||
252 | * | ||
253 | * PARAMETERS: parent_obj - Parent of the new object | ||
254 | * handler - Address of the handler | ||
255 | * context - Value passed to the handler on each GPE | ||
256 | * | ||
257 | * RETURN: Status | ||
258 | * | ||
259 | * DESCRIPTION: Create a new handler object and populate it. | ||
260 | * | ||
261 | ******************************************************************************/ | ||
262 | static acpi_status | ||
263 | acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj, | ||
264 | acpi_notify_handler handler, void *context) | ||
265 | { | ||
266 | struct acpi_object_notify_handler *handler_obj; | ||
267 | |||
268 | /* The parent must not be a defice notify handler object. */ | ||
269 | if (parent_obj->handler_type & ACPI_DEVICE_NOTIFY) | ||
270 | return AE_BAD_PARAMETER; | ||
271 | |||
272 | handler_obj = ACPI_ALLOCATE_ZEROED(sizeof(*handler_obj)); | ||
273 | if (!handler_obj) | ||
274 | return AE_NO_MEMORY; | ||
275 | |||
276 | acpi_populate_handler_object(handler_obj, | ||
277 | ACPI_SYSTEM_NOTIFY, | ||
278 | handler, context, | ||
279 | parent_obj->next); | ||
280 | parent_obj->next = handler_obj; | ||
281 | |||
282 | return AE_OK; | ||
283 | } | ||
284 | |||
285 | /******************************************************************************* | ||
286 | * | ||
221 | * FUNCTION: acpi_install_notify_handler | 287 | * FUNCTION: acpi_install_notify_handler |
222 | * | 288 | * |
223 | * PARAMETERS: Device - The device for which notifies will be handled | 289 | * PARAMETERS: Device - The device for which notifies will be handled |
@@ -316,15 +382,32 @@ acpi_install_notify_handler(acpi_handle device, | |||
316 | obj_desc = acpi_ns_get_attached_object(node); | 382 | obj_desc = acpi_ns_get_attached_object(node); |
317 | if (obj_desc) { | 383 | if (obj_desc) { |
318 | 384 | ||
319 | /* Object exists - make sure there's no handler */ | 385 | /* Object exists. */ |
320 | 386 | ||
321 | if (((handler_type & ACPI_SYSTEM_NOTIFY) && | 387 | /* For a device notify, make sure there's no handler. */ |
322 | obj_desc->common_notify.system_notify) || | 388 | if ((handler_type & ACPI_DEVICE_NOTIFY) && |
323 | ((handler_type & ACPI_DEVICE_NOTIFY) && | 389 | obj_desc->common_notify.device_notify) { |
324 | obj_desc->common_notify.device_notify)) { | ||
325 | status = AE_ALREADY_EXISTS; | 390 | status = AE_ALREADY_EXISTS; |
326 | goto unlock_and_exit; | 391 | goto unlock_and_exit; |
327 | } | 392 | } |
393 | |||
394 | /* System notifies may have more handlers installed. */ | ||
395 | notify_obj = obj_desc->common_notify.system_notify; | ||
396 | |||
397 | if ((handler_type & ACPI_SYSTEM_NOTIFY) && notify_obj) { | ||
398 | struct acpi_object_notify_handler *parent_obj; | ||
399 | |||
400 | if (handler_type & ACPI_DEVICE_NOTIFY) { | ||
401 | status = AE_ALREADY_EXISTS; | ||
402 | goto unlock_and_exit; | ||
403 | } | ||
404 | |||
405 | parent_obj = ¬ify_obj->notify; | ||
406 | status = acpi_add_handler_object(parent_obj, | ||
407 | handler, | ||
408 | context); | ||
409 | goto unlock_and_exit; | ||
410 | } | ||
328 | } else { | 411 | } else { |
329 | /* Create a new object */ | 412 | /* Create a new object */ |
330 | 413 | ||
@@ -356,9 +439,10 @@ acpi_install_notify_handler(acpi_handle device, | |||
356 | goto unlock_and_exit; | 439 | goto unlock_and_exit; |
357 | } | 440 | } |
358 | 441 | ||
359 | notify_obj->notify.node = node; | 442 | acpi_populate_handler_object(¬ify_obj->notify, |
360 | notify_obj->notify.handler = handler; | 443 | handler_type, |
361 | notify_obj->notify.context = context; | 444 | handler, context, |
445 | NULL); | ||
362 | 446 | ||
363 | if (handler_type & ACPI_SYSTEM_NOTIFY) { | 447 | if (handler_type & ACPI_SYSTEM_NOTIFY) { |
364 | obj_desc->common_notify.system_notify = notify_obj; | 448 | obj_desc->common_notify.system_notify = notify_obj; |
@@ -418,6 +502,10 @@ acpi_remove_notify_handler(acpi_handle device, | |||
418 | goto exit; | 502 | goto exit; |
419 | } | 503 | } |
420 | 504 | ||
505 | |||
506 | /* Make sure all deferred tasks are completed */ | ||
507 | acpi_os_wait_events_complete(NULL); | ||
508 | |||
421 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | 509 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); |
422 | if (ACPI_FAILURE(status)) { | 510 | if (ACPI_FAILURE(status)) { |
423 | goto exit; | 511 | goto exit; |
@@ -445,15 +533,6 @@ acpi_remove_notify_handler(acpi_handle device, | |||
445 | goto unlock_and_exit; | 533 | goto unlock_and_exit; |
446 | } | 534 | } |
447 | 535 | ||
448 | /* Make sure all deferred tasks are completed */ | ||
449 | |||
450 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
451 | acpi_os_wait_events_complete(NULL); | ||
452 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
453 | if (ACPI_FAILURE(status)) { | ||
454 | goto exit; | ||
455 | } | ||
456 | |||
457 | if (handler_type & ACPI_SYSTEM_NOTIFY) { | 536 | if (handler_type & ACPI_SYSTEM_NOTIFY) { |
458 | acpi_gbl_system_notify.node = NULL; | 537 | acpi_gbl_system_notify.node = NULL; |
459 | acpi_gbl_system_notify.handler = NULL; | 538 | acpi_gbl_system_notify.handler = NULL; |
@@ -488,28 +567,60 @@ acpi_remove_notify_handler(acpi_handle device, | |||
488 | /* Object exists - make sure there's an existing handler */ | 567 | /* Object exists - make sure there's an existing handler */ |
489 | 568 | ||
490 | if (handler_type & ACPI_SYSTEM_NOTIFY) { | 569 | if (handler_type & ACPI_SYSTEM_NOTIFY) { |
570 | struct acpi_object_notify_handler *handler_obj; | ||
571 | struct acpi_object_notify_handler *parent_obj; | ||
572 | |||
491 | notify_obj = obj_desc->common_notify.system_notify; | 573 | notify_obj = obj_desc->common_notify.system_notify; |
492 | if (!notify_obj) { | 574 | if (!notify_obj) { |
493 | status = AE_NOT_EXIST; | 575 | status = AE_NOT_EXIST; |
494 | goto unlock_and_exit; | 576 | goto unlock_and_exit; |
495 | } | 577 | } |
496 | 578 | ||
497 | if (notify_obj->notify.handler != handler) { | 579 | handler_obj = ¬ify_obj->notify; |
580 | parent_obj = NULL; | ||
581 | while (handler_obj->handler != handler) { | ||
582 | if (handler_obj->next) { | ||
583 | parent_obj = handler_obj; | ||
584 | handler_obj = handler_obj->next; | ||
585 | } else { | ||
586 | break; | ||
587 | } | ||
588 | } | ||
589 | |||
590 | if (handler_obj->handler != handler) { | ||
498 | status = AE_BAD_PARAMETER; | 591 | status = AE_BAD_PARAMETER; |
499 | goto unlock_and_exit; | 592 | goto unlock_and_exit; |
500 | } | 593 | } |
501 | /* Make sure all deferred tasks are completed */ | ||
502 | 594 | ||
503 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 595 | /* |
504 | acpi_os_wait_events_complete(NULL); | 596 | * Remove the handler. There are three possible cases. |
505 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | 597 | * First, we may need to remove a non-embedded object. |
506 | if (ACPI_FAILURE(status)) { | 598 | * Second, we may need to remove the embedded object's |
507 | goto exit; | 599 | * handler data, while non-embedded objects exist. |
600 | * Finally, we may need to remove the embedded object | ||
601 | * entirely along with its container. | ||
602 | */ | ||
603 | if (parent_obj) { | ||
604 | /* Non-embedded object is being removed. */ | ||
605 | parent_obj->next = handler_obj->next; | ||
606 | ACPI_FREE(handler_obj); | ||
607 | } else if (notify_obj->notify.next) { | ||
608 | /* | ||
609 | * The handler matches the embedded object, but | ||
610 | * there are more handler objects in the list. | ||
611 | * Replace the embedded object's data with the | ||
612 | * first next object's data and remove that | ||
613 | * object. | ||
614 | */ | ||
615 | parent_obj = ¬ify_obj->notify; | ||
616 | handler_obj = notify_obj->notify.next; | ||
617 | *parent_obj = *handler_obj; | ||
618 | ACPI_FREE(handler_obj); | ||
619 | } else { | ||
620 | /* No more handler objects in the list. */ | ||
621 | obj_desc->common_notify.system_notify = NULL; | ||
622 | acpi_ut_remove_reference(notify_obj); | ||
508 | } | 623 | } |
509 | |||
510 | /* Remove the handler */ | ||
511 | obj_desc->common_notify.system_notify = NULL; | ||
512 | acpi_ut_remove_reference(notify_obj); | ||
513 | } | 624 | } |
514 | 625 | ||
515 | if (handler_type & ACPI_DEVICE_NOTIFY) { | 626 | if (handler_type & ACPI_DEVICE_NOTIFY) { |
@@ -523,14 +634,6 @@ acpi_remove_notify_handler(acpi_handle device, | |||
523 | status = AE_BAD_PARAMETER; | 634 | status = AE_BAD_PARAMETER; |
524 | goto unlock_and_exit; | 635 | goto unlock_and_exit; |
525 | } | 636 | } |
526 | /* Make sure all deferred tasks are completed */ | ||
527 | |||
528 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
529 | acpi_os_wait_events_complete(NULL); | ||
530 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
531 | if (ACPI_FAILURE(status)) { | ||
532 | goto exit; | ||
533 | } | ||
534 | 637 | ||
535 | /* Remove the handler */ | 638 | /* Remove the handler */ |
536 | obj_desc->common_notify.device_notify = NULL; | 639 | obj_desc->common_notify.device_notify = NULL; |
@@ -617,13 +720,6 @@ acpi_install_gpe_handler(acpi_handle gpe_device, | |||
617 | handler->context = context; | 720 | handler->context = context; |
618 | handler->method_node = gpe_event_info->dispatch.method_node; | 721 | handler->method_node = gpe_event_info->dispatch.method_node; |
619 | 722 | ||
620 | /* Disable the GPE before installing the handler */ | ||
621 | |||
622 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
623 | if (ACPI_FAILURE(status)) { | ||
624 | goto unlock_and_exit; | ||
625 | } | ||
626 | |||
627 | /* Install the handler */ | 723 | /* Install the handler */ |
628 | 724 | ||
629 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | 725 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); |
@@ -707,13 +803,6 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, | |||
707 | goto unlock_and_exit; | 803 | goto unlock_and_exit; |
708 | } | 804 | } |
709 | 805 | ||
710 | /* Disable the GPE before removing the handler */ | ||
711 | |||
712 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
713 | if (ACPI_FAILURE(status)) { | ||
714 | goto unlock_and_exit; | ||
715 | } | ||
716 | |||
717 | /* Make sure all deferred tasks are completed */ | 806 | /* Make sure all deferred tasks are completed */ |
718 | 807 | ||
719 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | 808 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index eed7a38d25f2..124c157215bf 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c | |||
@@ -201,23 +201,27 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event) | |||
201 | 201 | ||
202 | /******************************************************************************* | 202 | /******************************************************************************* |
203 | * | 203 | * |
204 | * FUNCTION: acpi_set_gpe_type | 204 | * FUNCTION: acpi_set_gpe |
205 | * | 205 | * |
206 | * PARAMETERS: gpe_device - Parent GPE Device | 206 | * PARAMETERS: gpe_device - Parent GPE Device |
207 | * gpe_number - GPE level within the GPE block | 207 | * gpe_number - GPE level within the GPE block |
208 | * Type - New GPE type | 208 | * action - Enable or disable |
209 | * Called from ISR or not | ||
209 | * | 210 | * |
210 | * RETURN: Status | 211 | * RETURN: Status |
211 | * | 212 | * |
212 | * DESCRIPTION: Set the type of an individual GPE | 213 | * DESCRIPTION: Enable or disable an ACPI event (general purpose) |
213 | * | 214 | * |
214 | ******************************************************************************/ | 215 | ******************************************************************************/ |
215 | acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type) | 216 | acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) |
216 | { | 217 | { |
217 | acpi_status status = AE_OK; | 218 | acpi_status status = AE_OK; |
219 | acpi_cpu_flags flags; | ||
218 | struct acpi_gpe_event_info *gpe_event_info; | 220 | struct acpi_gpe_event_info *gpe_event_info; |
219 | 221 | ||
220 | ACPI_FUNCTION_TRACE(acpi_set_gpe_type); | 222 | ACPI_FUNCTION_TRACE(acpi_set_gpe); |
223 | |||
224 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
221 | 225 | ||
222 | /* Ensure that we have a valid GPE number */ | 226 | /* Ensure that we have a valid GPE number */ |
223 | 227 | ||
@@ -227,19 +231,29 @@ acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type) | |||
227 | goto unlock_and_exit; | 231 | goto unlock_and_exit; |
228 | } | 232 | } |
229 | 233 | ||
230 | if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == type) { | 234 | /* Perform the action */ |
231 | return_ACPI_STATUS(AE_OK); | 235 | |
232 | } | 236 | switch (action) { |
237 | case ACPI_GPE_ENABLE: | ||
238 | status = acpi_ev_enable_gpe(gpe_event_info); | ||
239 | break; | ||
233 | 240 | ||
234 | /* Set the new type (will disable GPE if currently enabled) */ | 241 | case ACPI_GPE_DISABLE: |
242 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
243 | break; | ||
235 | 244 | ||
236 | status = acpi_ev_set_gpe_type(gpe_event_info, type); | 245 | default: |
246 | ACPI_ERROR((AE_INFO, "Invalid action\n")); | ||
247 | status = AE_BAD_PARAMETER; | ||
248 | break; | ||
249 | } | ||
237 | 250 | ||
238 | unlock_and_exit: | 251 | unlock_and_exit: |
252 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
239 | return_ACPI_STATUS(status); | 253 | return_ACPI_STATUS(status); |
240 | } | 254 | } |
241 | 255 | ||
242 | ACPI_EXPORT_SYMBOL(acpi_set_gpe_type) | 256 | ACPI_EXPORT_SYMBOL(acpi_set_gpe) |
243 | 257 | ||
244 | /******************************************************************************* | 258 | /******************************************************************************* |
245 | * | 259 | * |
@@ -247,15 +261,14 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe_type) | |||
247 | * | 261 | * |
248 | * PARAMETERS: gpe_device - Parent GPE Device | 262 | * PARAMETERS: gpe_device - Parent GPE Device |
249 | * gpe_number - GPE level within the GPE block | 263 | * gpe_number - GPE level within the GPE block |
250 | * Flags - Just enable, or also wake enable? | 264 | * type - Purpose the GPE will be used for |
251 | * Called from ISR or not | ||
252 | * | 265 | * |
253 | * RETURN: Status | 266 | * RETURN: Status |
254 | * | 267 | * |
255 | * DESCRIPTION: Enable an ACPI event (general purpose) | 268 | * DESCRIPTION: Take a reference to a GPE and enable it if necessary |
256 | * | 269 | * |
257 | ******************************************************************************/ | 270 | ******************************************************************************/ |
258 | acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) | 271 | acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type) |
259 | { | 272 | { |
260 | acpi_status status = AE_OK; | 273 | acpi_status status = AE_OK; |
261 | acpi_cpu_flags flags; | 274 | acpi_cpu_flags flags; |
@@ -263,6 +276,9 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) | |||
263 | 276 | ||
264 | ACPI_FUNCTION_TRACE(acpi_enable_gpe); | 277 | ACPI_FUNCTION_TRACE(acpi_enable_gpe); |
265 | 278 | ||
279 | if (type & ~ACPI_GPE_TYPE_WAKE_RUN) | ||
280 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
281 | |||
266 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | 282 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); |
267 | 283 | ||
268 | /* Ensure that we have a valid GPE number */ | 284 | /* Ensure that we have a valid GPE number */ |
@@ -273,15 +289,32 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) | |||
273 | goto unlock_and_exit; | 289 | goto unlock_and_exit; |
274 | } | 290 | } |
275 | 291 | ||
276 | /* Perform the enable */ | 292 | if (type & ACPI_GPE_TYPE_RUNTIME) { |
293 | if (++gpe_event_info->runtime_count == 1) { | ||
294 | status = acpi_ev_enable_gpe(gpe_event_info); | ||
295 | if (ACPI_FAILURE(status)) | ||
296 | gpe_event_info->runtime_count--; | ||
297 | } | ||
298 | } | ||
299 | |||
300 | if (type & ACPI_GPE_TYPE_WAKE) { | ||
301 | if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { | ||
302 | status = AE_BAD_PARAMETER; | ||
303 | goto unlock_and_exit; | ||
304 | } | ||
277 | 305 | ||
278 | status = acpi_ev_enable_gpe(gpe_event_info, TRUE); | 306 | /* |
307 | * Wake-up GPEs are only enabled right prior to putting the | ||
308 | * system into a sleep state. | ||
309 | */ | ||
310 | if (++gpe_event_info->wakeup_count == 1) | ||
311 | acpi_ev_update_gpe_enable_masks(gpe_event_info); | ||
312 | } | ||
279 | 313 | ||
280 | unlock_and_exit: | 314 | unlock_and_exit: |
281 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 315 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
282 | return_ACPI_STATUS(status); | 316 | return_ACPI_STATUS(status); |
283 | } | 317 | } |
284 | |||
285 | ACPI_EXPORT_SYMBOL(acpi_enable_gpe) | 318 | ACPI_EXPORT_SYMBOL(acpi_enable_gpe) |
286 | 319 | ||
287 | /******************************************************************************* | 320 | /******************************************************************************* |
@@ -290,15 +323,14 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe) | |||
290 | * | 323 | * |
291 | * PARAMETERS: gpe_device - Parent GPE Device | 324 | * PARAMETERS: gpe_device - Parent GPE Device |
292 | * gpe_number - GPE level within the GPE block | 325 | * gpe_number - GPE level within the GPE block |
293 | * Flags - Just disable, or also wake disable? | 326 | * type - Purpose the GPE won't be used for any more |
294 | * Called from ISR or not | ||
295 | * | 327 | * |
296 | * RETURN: Status | 328 | * RETURN: Status |
297 | * | 329 | * |
298 | * DESCRIPTION: Disable an ACPI event (general purpose) | 330 | * DESCRIPTION: Release a reference to a GPE and disable it if necessary |
299 | * | 331 | * |
300 | ******************************************************************************/ | 332 | ******************************************************************************/ |
301 | acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) | 333 | acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type) |
302 | { | 334 | { |
303 | acpi_status status = AE_OK; | 335 | acpi_status status = AE_OK; |
304 | acpi_cpu_flags flags; | 336 | acpi_cpu_flags flags; |
@@ -306,6 +338,9 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) | |||
306 | 338 | ||
307 | ACPI_FUNCTION_TRACE(acpi_disable_gpe); | 339 | ACPI_FUNCTION_TRACE(acpi_disable_gpe); |
308 | 340 | ||
341 | if (type & ~ACPI_GPE_TYPE_WAKE_RUN) | ||
342 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
343 | |||
309 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | 344 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); |
310 | /* Ensure that we have a valid GPE number */ | 345 | /* Ensure that we have a valid GPE number */ |
311 | 346 | ||
@@ -315,13 +350,24 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) | |||
315 | goto unlock_and_exit; | 350 | goto unlock_and_exit; |
316 | } | 351 | } |
317 | 352 | ||
318 | status = acpi_ev_disable_gpe(gpe_event_info); | 353 | if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) { |
354 | if (--gpe_event_info->runtime_count == 0) | ||
355 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
356 | } | ||
357 | |||
358 | if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) { | ||
359 | /* | ||
360 | * Wake-up GPEs are not enabled after leaving system sleep | ||
361 | * states, so we don't need to disable them here. | ||
362 | */ | ||
363 | if (--gpe_event_info->wakeup_count == 0) | ||
364 | acpi_ev_update_gpe_enable_masks(gpe_event_info); | ||
365 | } | ||
319 | 366 | ||
320 | unlock_and_exit: | 367 | unlock_and_exit: |
321 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 368 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
322 | return_ACPI_STATUS(status); | 369 | return_ACPI_STATUS(status); |
323 | } | 370 | } |
324 | |||
325 | ACPI_EXPORT_SYMBOL(acpi_disable_gpe) | 371 | ACPI_EXPORT_SYMBOL(acpi_disable_gpe) |
326 | 372 | ||
327 | /******************************************************************************* | 373 | /******************************************************************************* |
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 8a95e8329df7..f53fbe307c9d 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c | |||
@@ -422,11 +422,10 @@ static int acpi_button_add(struct acpi_device *device) | |||
422 | 422 | ||
423 | if (device->wakeup.flags.valid) { | 423 | if (device->wakeup.flags.valid) { |
424 | /* Button's GPE is run-wake GPE */ | 424 | /* Button's GPE is run-wake GPE */ |
425 | acpi_set_gpe_type(device->wakeup.gpe_device, | ||
426 | device->wakeup.gpe_number, | ||
427 | ACPI_GPE_TYPE_WAKE_RUN); | ||
428 | acpi_enable_gpe(device->wakeup.gpe_device, | 425 | acpi_enable_gpe(device->wakeup.gpe_device, |
429 | device->wakeup.gpe_number); | 426 | device->wakeup.gpe_number, |
427 | ACPI_GPE_TYPE_WAKE_RUN); | ||
428 | device->wakeup.run_wake_count++; | ||
430 | device->wakeup.state.enabled = 1; | 429 | device->wakeup.state.enabled = 1; |
431 | } | 430 | } |
432 | 431 | ||
@@ -446,6 +445,14 @@ static int acpi_button_remove(struct acpi_device *device, int type) | |||
446 | { | 445 | { |
447 | struct acpi_button *button = acpi_driver_data(device); | 446 | struct acpi_button *button = acpi_driver_data(device); |
448 | 447 | ||
448 | if (device->wakeup.flags.valid) { | ||
449 | acpi_disable_gpe(device->wakeup.gpe_device, | ||
450 | device->wakeup.gpe_number, | ||
451 | ACPI_GPE_TYPE_WAKE_RUN); | ||
452 | device->wakeup.run_wake_count--; | ||
453 | device->wakeup.state.enabled = 0; | ||
454 | } | ||
455 | |||
449 | acpi_button_remove_fs(device); | 456 | acpi_button_remove_fs(device); |
450 | input_unregister_device(button->input); | 457 | input_unregister_device(button->input); |
451 | kfree(button); | 458 | kfree(button); |
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index bbc2c1315c47..b2586f57e1f5 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
@@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle) | |||
935 | struct platform_device *dd; | 935 | struct platform_device *dd; |
936 | 936 | ||
937 | id = dock_station_count; | 937 | id = dock_station_count; |
938 | memset(&ds, 0, sizeof(ds)); | ||
938 | dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds)); | 939 | dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds)); |
939 | if (IS_ERR(dd)) | 940 | if (IS_ERR(dd)) |
940 | return PTR_ERR(dd); | 941 | return PTR_ERR(dd); |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d6471bb6852f..27e0b92b2e39 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -307,7 +307,11 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
307 | pr_debug(PREFIX "transaction start\n"); | 307 | pr_debug(PREFIX "transaction start\n"); |
308 | /* disable GPE during transaction if storm is detected */ | 308 | /* disable GPE during transaction if storm is detected */ |
309 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | 309 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { |
310 | acpi_disable_gpe(NULL, ec->gpe); | 310 | /* |
311 | * It has to be disabled at the hardware level regardless of the | ||
312 | * GPE reference counting, so that it doesn't trigger. | ||
313 | */ | ||
314 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); | ||
311 | } | 315 | } |
312 | 316 | ||
313 | status = acpi_ec_transaction_unlocked(ec, t); | 317 | status = acpi_ec_transaction_unlocked(ec, t); |
@@ -316,8 +320,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
316 | ec_check_sci_sync(ec, acpi_ec_read_status(ec)); | 320 | ec_check_sci_sync(ec, acpi_ec_read_status(ec)); |
317 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | 321 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { |
318 | msleep(1); | 322 | msleep(1); |
319 | /* it is safe to enable GPE outside of transaction */ | 323 | /* |
320 | acpi_enable_gpe(NULL, ec->gpe); | 324 | * It is safe to enable the GPE outside of the transaction. Use |
325 | * acpi_set_gpe() for that, since we used it to disable the GPE | ||
326 | * above. | ||
327 | */ | ||
328 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); | ||
321 | } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { | 329 | } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { |
322 | pr_info(PREFIX "GPE storm detected, " | 330 | pr_info(PREFIX "GPE storm detected, " |
323 | "transactions will use polling mode\n"); | 331 | "transactions will use polling mode\n"); |
@@ -788,8 +796,8 @@ static int ec_install_handlers(struct acpi_ec *ec) | |||
788 | &acpi_ec_gpe_handler, ec); | 796 | &acpi_ec_gpe_handler, ec); |
789 | if (ACPI_FAILURE(status)) | 797 | if (ACPI_FAILURE(status)) |
790 | return -ENODEV; | 798 | return -ENODEV; |
791 | acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); | 799 | |
792 | acpi_enable_gpe(NULL, ec->gpe); | 800 | acpi_enable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); |
793 | status = acpi_install_address_space_handler(ec->handle, | 801 | status = acpi_install_address_space_handler(ec->handle, |
794 | ACPI_ADR_SPACE_EC, | 802 | ACPI_ADR_SPACE_EC, |
795 | &acpi_ec_space_handler, | 803 | &acpi_ec_space_handler, |
@@ -806,6 +814,7 @@ static int ec_install_handlers(struct acpi_ec *ec) | |||
806 | } else { | 814 | } else { |
807 | acpi_remove_gpe_handler(NULL, ec->gpe, | 815 | acpi_remove_gpe_handler(NULL, ec->gpe, |
808 | &acpi_ec_gpe_handler); | 816 | &acpi_ec_gpe_handler); |
817 | acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); | ||
809 | return -ENODEV; | 818 | return -ENODEV; |
810 | } | 819 | } |
811 | } | 820 | } |
@@ -816,6 +825,7 @@ static int ec_install_handlers(struct acpi_ec *ec) | |||
816 | 825 | ||
817 | static void ec_remove_handlers(struct acpi_ec *ec) | 826 | static void ec_remove_handlers(struct acpi_ec *ec) |
818 | { | 827 | { |
828 | acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); | ||
819 | if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, | 829 | if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, |
820 | ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) | 830 | ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) |
821 | pr_err(PREFIX "failed to remove space handler\n"); | 831 | pr_err(PREFIX "failed to remove space handler\n"); |
@@ -1057,16 +1067,16 @@ error: | |||
1057 | static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state) | 1067 | static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state) |
1058 | { | 1068 | { |
1059 | struct acpi_ec *ec = acpi_driver_data(device); | 1069 | struct acpi_ec *ec = acpi_driver_data(device); |
1060 | /* Stop using GPE */ | 1070 | /* Stop using the GPE, but keep it reference counted. */ |
1061 | acpi_disable_gpe(NULL, ec->gpe); | 1071 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); |
1062 | return 0; | 1072 | return 0; |
1063 | } | 1073 | } |
1064 | 1074 | ||
1065 | static int acpi_ec_resume(struct acpi_device *device) | 1075 | static int acpi_ec_resume(struct acpi_device *device) |
1066 | { | 1076 | { |
1067 | struct acpi_ec *ec = acpi_driver_data(device); | 1077 | struct acpi_ec *ec = acpi_driver_data(device); |
1068 | /* Enable use of GPE back */ | 1078 | /* Enable the GPE again, but don't reference count it once more. */ |
1069 | acpi_enable_gpe(NULL, ec->gpe); | 1079 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); |
1070 | return 0; | 1080 | return 0; |
1071 | } | 1081 | } |
1072 | 1082 | ||
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index cb28e0502acc..9c4c962e46e3 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
@@ -36,8 +36,6 @@ static inline int acpi_debug_init(void) { return 0; } | |||
36 | int acpi_power_init(void); | 36 | int acpi_power_init(void); |
37 | int acpi_device_sleep_wake(struct acpi_device *dev, | 37 | int acpi_device_sleep_wake(struct acpi_device *dev, |
38 | int enable, int sleep_state, int dev_state); | 38 | int enable, int sleep_state, int dev_state); |
39 | int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state); | ||
40 | int acpi_disable_wakeup_device_power(struct acpi_device *dev); | ||
41 | int acpi_power_get_inferred_state(struct acpi_device *device); | 39 | int acpi_power_get_inferred_state(struct acpi_device *device); |
42 | int acpi_power_transition(struct acpi_device *device, int state); | 40 | int acpi_power_transition(struct acpi_device *device, int state); |
43 | extern int acpi_power_nocheck; | 41 | extern int acpi_power_nocheck; |
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c index a5a77b78a723..2ef04098cc1d 100644 --- a/drivers/acpi/pci_bind.c +++ b/drivers/acpi/pci_bind.c | |||
@@ -26,7 +26,9 @@ | |||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
29 | #include <linux/pci-acpi.h> | ||
29 | #include <linux/acpi.h> | 30 | #include <linux/acpi.h> |
31 | #include <linux/pm_runtime.h> | ||
30 | #include <acpi/acpi_bus.h> | 32 | #include <acpi/acpi_bus.h> |
31 | #include <acpi/acpi_drivers.h> | 33 | #include <acpi/acpi_drivers.h> |
32 | 34 | ||
@@ -38,7 +40,13 @@ static int acpi_pci_unbind(struct acpi_device *device) | |||
38 | struct pci_dev *dev; | 40 | struct pci_dev *dev; |
39 | 41 | ||
40 | dev = acpi_get_pci_dev(device->handle); | 42 | dev = acpi_get_pci_dev(device->handle); |
41 | if (!dev || !dev->subordinate) | 43 | if (!dev) |
44 | goto out; | ||
45 | |||
46 | device_set_run_wake(&dev->dev, false); | ||
47 | pci_acpi_remove_pm_notifier(device); | ||
48 | |||
49 | if (!dev->subordinate) | ||
42 | goto out; | 50 | goto out; |
43 | 51 | ||
44 | acpi_pci_irq_del_prt(dev->subordinate); | 52 | acpi_pci_irq_del_prt(dev->subordinate); |
@@ -62,6 +70,10 @@ static int acpi_pci_bind(struct acpi_device *device) | |||
62 | if (!dev) | 70 | if (!dev) |
63 | return 0; | 71 | return 0; |
64 | 72 | ||
73 | pci_acpi_add_pm_notifier(device, dev); | ||
74 | if (device->wakeup.flags.run_wake) | ||
75 | device_set_run_wake(&dev->dev, true); | ||
76 | |||
65 | /* | 77 | /* |
66 | * Install the 'bind' function to facilitate callbacks for | 78 | * Install the 'bind' function to facilitate callbacks for |
67 | * children of the P2P bridge. | 79 | * children of the P2P bridge. |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 64f55b6db73c..d724736d56c8 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/proc_fs.h> | 30 | #include <linux/proc_fs.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/pm.h> | 32 | #include <linux/pm.h> |
33 | #include <linux/pm_runtime.h> | ||
33 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
34 | #include <linux/pci-acpi.h> | 35 | #include <linux/pci-acpi.h> |
35 | #include <linux/acpi.h> | 36 | #include <linux/acpi.h> |
@@ -528,6 +529,10 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
528 | if (flags != base_flags) | 529 | if (flags != base_flags) |
529 | acpi_pci_osc_support(root, flags); | 530 | acpi_pci_osc_support(root, flags); |
530 | 531 | ||
532 | pci_acpi_add_bus_pm_notifier(device, root->bus); | ||
533 | if (device->wakeup.flags.run_wake) | ||
534 | device_set_run_wake(root->bus->bridge, true); | ||
535 | |||
531 | return 0; | 536 | return 0; |
532 | 537 | ||
533 | end: | 538 | end: |
@@ -549,6 +554,9 @@ static int acpi_pci_root_remove(struct acpi_device *device, int type) | |||
549 | { | 554 | { |
550 | struct acpi_pci_root *root = acpi_driver_data(device); | 555 | struct acpi_pci_root *root = acpi_driver_data(device); |
551 | 556 | ||
557 | device_set_run_wake(root->bus->bridge, false); | ||
558 | pci_acpi_remove_bus_pm_notifier(device); | ||
559 | |||
552 | kfree(root); | 560 | kfree(root); |
553 | return 0; | 561 | return 0; |
554 | } | 562 | } |
@@ -558,6 +566,7 @@ static int __init acpi_pci_root_init(void) | |||
558 | if (acpi_pci_disabled) | 566 | if (acpi_pci_disabled) |
559 | return 0; | 567 | return 0; |
560 | 568 | ||
569 | pci_acpi_crs_quirks(); | ||
561 | if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0) | 570 | if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0) |
562 | return -ENODEV; | 571 | return -ENODEV; |
563 | 572 | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 7c0441f63b39..cc978a8c00b7 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { | |||
110 | DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), | 110 | DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), |
111 | DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, | 111 | DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, |
112 | (void *)2}, | 112 | (void *)2}, |
113 | { set_max_cstate, "Pavilion zv5000", { | ||
114 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
115 | DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, | ||
116 | (void *)1}, | ||
117 | { set_max_cstate, "Asus L8400B", { | ||
118 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), | ||
119 | DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, | ||
120 | (void *)1}, | ||
113 | {}, | 121 | {}, |
114 | }; | 122 | }; |
115 | 123 | ||
@@ -872,12 +880,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
872 | return(acpi_idle_enter_c1(dev, state)); | 880 | return(acpi_idle_enter_c1(dev, state)); |
873 | 881 | ||
874 | local_irq_disable(); | 882 | local_irq_disable(); |
875 | current_thread_info()->status &= ~TS_POLLING; | 883 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
876 | /* | 884 | current_thread_info()->status &= ~TS_POLLING; |
877 | * TS_POLLING-cleared state must be visible before we test | 885 | /* |
878 | * NEED_RESCHED: | 886 | * TS_POLLING-cleared state must be visible before we test |
879 | */ | 887 | * NEED_RESCHED: |
880 | smp_mb(); | 888 | */ |
889 | smp_mb(); | ||
890 | } | ||
881 | 891 | ||
882 | if (unlikely(need_resched())) { | 892 | if (unlikely(need_resched())) { |
883 | current_thread_info()->status |= TS_POLLING; | 893 | current_thread_info()->status |= TS_POLLING; |
@@ -957,12 +967,14 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
957 | } | 967 | } |
958 | 968 | ||
959 | local_irq_disable(); | 969 | local_irq_disable(); |
960 | current_thread_info()->status &= ~TS_POLLING; | 970 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
961 | /* | 971 | current_thread_info()->status &= ~TS_POLLING; |
962 | * TS_POLLING-cleared state must be visible before we test | 972 | /* |
963 | * NEED_RESCHED: | 973 | * TS_POLLING-cleared state must be visible before we test |
964 | */ | 974 | * NEED_RESCHED: |
965 | smp_mb(); | 975 | */ |
976 | smp_mb(); | ||
977 | } | ||
966 | 978 | ||
967 | if (unlikely(need_resched())) { | 979 | if (unlikely(need_resched())) { |
968 | current_thread_info()->status |= TS_POLLING; | 980 | current_thread_info()->status |= TS_POLLING; |
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index 7247819dbd80..e306ba9aa34e 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c | |||
@@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in) | |||
125 | return status; | 125 | return status; |
126 | } | 126 | } |
127 | 127 | ||
128 | static int early_pdc_done; | ||
129 | |||
128 | void acpi_processor_set_pdc(acpi_handle handle) | 130 | void acpi_processor_set_pdc(acpi_handle handle) |
129 | { | 131 | { |
130 | struct acpi_object_list *obj_list; | 132 | struct acpi_object_list *obj_list; |
@@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle) | |||
132 | if (arch_has_acpi_pdc() == false) | 134 | if (arch_has_acpi_pdc() == false) |
133 | return; | 135 | return; |
134 | 136 | ||
137 | if (early_pdc_done) | ||
138 | return; | ||
139 | |||
135 | obj_list = acpi_processor_alloc_pdc(); | 140 | obj_list = acpi_processor_alloc_pdc(); |
136 | if (!obj_list) | 141 | if (!obj_list) |
137 | return; | 142 | return; |
@@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id) | |||
151 | return 0; | 156 | return 0; |
152 | } | 157 | } |
153 | 158 | ||
159 | static int param_early_pdc_optin(char *s) | ||
160 | { | ||
161 | early_pdc_optin = 1; | ||
162 | return 1; | ||
163 | } | ||
164 | __setup("acpi_early_pdc_eval", param_early_pdc_optin); | ||
165 | |||
154 | static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = { | 166 | static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = { |
155 | { | 167 | { |
156 | set_early_pdc_optin, "HP Envy", { | 168 | set_early_pdc_optin, "HP Envy", { |
@@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void) | |||
192 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | 204 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, |
193 | ACPI_UINT32_MAX, | 205 | ACPI_UINT32_MAX, |
194 | early_init_pdc, NULL, NULL, NULL); | 206 | early_init_pdc, NULL, NULL, NULL); |
207 | |||
208 | early_pdc_done = 1; | ||
195 | } | 209 | } |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 2cabadcc4d8c..a959f6a07508 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -413,7 +413,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr) | |||
413 | if (result) | 413 | if (result) |
414 | goto update_bios; | 414 | goto update_bios; |
415 | 415 | ||
416 | return 0; | 416 | /* We need to call _PPC once when cpufreq starts */ |
417 | if (ignore_ppc != 1) | ||
418 | result = acpi_processor_get_platform_limit(pr); | ||
419 | |||
420 | return result; | ||
417 | 421 | ||
418 | /* | 422 | /* |
419 | * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that | 423 | * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index ff9f6226085d..fb7fc24fe727 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -741,19 +741,40 @@ acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device, | |||
741 | return AE_OK; | 741 | return AE_OK; |
742 | } | 742 | } |
743 | 743 | ||
744 | static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | 744 | static void acpi_bus_set_run_wake_flags(struct acpi_device *device) |
745 | { | 745 | { |
746 | acpi_status status = 0; | ||
747 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
748 | union acpi_object *package = NULL; | ||
749 | int psw_error; | ||
750 | |||
751 | struct acpi_device_id button_device_ids[] = { | 746 | struct acpi_device_id button_device_ids[] = { |
752 | {"PNP0C0D", 0}, | 747 | {"PNP0C0D", 0}, |
753 | {"PNP0C0C", 0}, | 748 | {"PNP0C0C", 0}, |
754 | {"PNP0C0E", 0}, | 749 | {"PNP0C0E", 0}, |
755 | {"", 0}, | 750 | {"", 0}, |
756 | }; | 751 | }; |
752 | acpi_status status; | ||
753 | acpi_event_status event_status; | ||
754 | |||
755 | device->wakeup.run_wake_count = 0; | ||
756 | device->wakeup.flags.notifier_present = 0; | ||
757 | |||
758 | /* Power button, Lid switch always enable wakeup */ | ||
759 | if (!acpi_match_device_ids(device, button_device_ids)) { | ||
760 | device->wakeup.flags.run_wake = 1; | ||
761 | device->wakeup.flags.always_enabled = 1; | ||
762 | return; | ||
763 | } | ||
764 | |||
765 | status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number, | ||
766 | ACPI_NOT_ISR, &event_status); | ||
767 | if (status == AE_OK) | ||
768 | device->wakeup.flags.run_wake = | ||
769 | !!(event_status & ACPI_EVENT_FLAG_HANDLE); | ||
770 | } | ||
771 | |||
772 | static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | ||
773 | { | ||
774 | acpi_status status = 0; | ||
775 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
776 | union acpi_object *package = NULL; | ||
777 | int psw_error; | ||
757 | 778 | ||
758 | /* _PRW */ | 779 | /* _PRW */ |
759 | status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer); | 780 | status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer); |
@@ -773,6 +794,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | |||
773 | 794 | ||
774 | device->wakeup.flags.valid = 1; | 795 | device->wakeup.flags.valid = 1; |
775 | device->wakeup.prepare_count = 0; | 796 | device->wakeup.prepare_count = 0; |
797 | acpi_bus_set_run_wake_flags(device); | ||
776 | /* Call _PSW/_DSW object to disable its ability to wake the sleeping | 798 | /* Call _PSW/_DSW object to disable its ability to wake the sleeping |
777 | * system for the ACPI device with the _PRW object. | 799 | * system for the ACPI device with the _PRW object. |
778 | * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW. | 800 | * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW. |
@@ -784,10 +806,6 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | |||
784 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 806 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
785 | "error in _DSW or _PSW evaluation\n")); | 807 | "error in _DSW or _PSW evaluation\n")); |
786 | 808 | ||
787 | /* Power button, Lid switch always enable wakeup */ | ||
788 | if (!acpi_match_device_ids(device, button_device_ids)) | ||
789 | device->wakeup.flags.run_wake = 1; | ||
790 | |||
791 | end: | 809 | end: |
792 | if (ACPI_FAILURE(status)) | 810 | if (ACPI_FAILURE(status)) |
793 | device->flags.wake_capable = 0; | 811 | device->flags.wake_capable = 0; |
@@ -1336,9 +1354,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops, | |||
1336 | 1354 | ||
1337 | if (child) | 1355 | if (child) |
1338 | *child = device; | 1356 | *child = device; |
1339 | return 0; | 1357 | |
1358 | if (device) | ||
1359 | return 0; | ||
1360 | else | ||
1361 | return -ENODEV; | ||
1340 | } | 1362 | } |
1341 | 1363 | ||
1364 | /* | ||
1365 | * acpi_bus_add and acpi_bus_start | ||
1366 | * | ||
1367 | * scan a given ACPI tree and (probably recently hot-plugged) | ||
1368 | * create and add or starts found devices. | ||
1369 | * | ||
1370 | * If no devices were found -ENODEV is returned which does not | ||
1371 | * mean that this is a real error, there just have been no suitable | ||
1372 | * ACPI objects in the table trunk from which the kernel could create | ||
1373 | * a device and add/start an appropriate driver. | ||
1374 | */ | ||
1375 | |||
1342 | int | 1376 | int |
1343 | acpi_bus_add(struct acpi_device **child, | 1377 | acpi_bus_add(struct acpi_device **child, |
1344 | struct acpi_device *parent, acpi_handle handle, int type) | 1378 | struct acpi_device *parent, acpi_handle handle, int type) |
@@ -1348,8 +1382,7 @@ acpi_bus_add(struct acpi_device **child, | |||
1348 | memset(&ops, 0, sizeof(ops)); | 1382 | memset(&ops, 0, sizeof(ops)); |
1349 | ops.acpi_op_add = 1; | 1383 | ops.acpi_op_add = 1; |
1350 | 1384 | ||
1351 | acpi_bus_scan(handle, &ops, child); | 1385 | return acpi_bus_scan(handle, &ops, child); |
1352 | return 0; | ||
1353 | } | 1386 | } |
1354 | EXPORT_SYMBOL(acpi_bus_add); | 1387 | EXPORT_SYMBOL(acpi_bus_add); |
1355 | 1388 | ||
@@ -1357,11 +1390,13 @@ int acpi_bus_start(struct acpi_device *device) | |||
1357 | { | 1390 | { |
1358 | struct acpi_bus_ops ops; | 1391 | struct acpi_bus_ops ops; |
1359 | 1392 | ||
1393 | if (!device) | ||
1394 | return -EINVAL; | ||
1395 | |||
1360 | memset(&ops, 0, sizeof(ops)); | 1396 | memset(&ops, 0, sizeof(ops)); |
1361 | ops.acpi_op_start = 1; | 1397 | ops.acpi_op_start = 1; |
1362 | 1398 | ||
1363 | acpi_bus_scan(device->handle, &ops, NULL); | 1399 | return acpi_bus_scan(device->handle, &ops, NULL); |
1364 | return 0; | ||
1365 | } | 1400 | } |
1366 | EXPORT_SYMBOL(acpi_bus_start); | 1401 | EXPORT_SYMBOL(acpi_bus_start); |
1367 | 1402 | ||
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 79d33d908b5a..3bde594a9979 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -745,9 +745,18 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | |||
745 | return -ENODEV; | 745 | return -ENODEV; |
746 | } | 746 | } |
747 | 747 | ||
748 | error = enable ? | 748 | if (enable) { |
749 | acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) : | 749 | error = acpi_enable_wakeup_device_power(adev, |
750 | acpi_disable_wakeup_device_power(adev); | 750 | acpi_target_sleep_state); |
751 | if (!error) | ||
752 | acpi_enable_gpe(adev->wakeup.gpe_device, | ||
753 | adev->wakeup.gpe_number, | ||
754 | ACPI_GPE_TYPE_WAKE); | ||
755 | } else { | ||
756 | acpi_disable_gpe(adev->wakeup.gpe_device, adev->wakeup.gpe_number, | ||
757 | ACPI_GPE_TYPE_WAKE); | ||
758 | error = acpi_disable_wakeup_device_power(adev); | ||
759 | } | ||
751 | if (!error) | 760 | if (!error) |
752 | dev_info(dev, "wake-up capability %s by ACPI\n", | 761 | dev_info(dev, "wake-up capability %s by ACPI\n", |
753 | enable ? "enabled" : "disabled"); | 762 | enable ? "enabled" : "disabled"); |
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c index d11282975f35..a206a12da78a 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/system.c | |||
@@ -387,10 +387,10 @@ static ssize_t counter_set(struct kobject *kobj, | |||
387 | if (index < num_gpes) { | 387 | if (index < num_gpes) { |
388 | if (!strcmp(buf, "disable\n") && | 388 | if (!strcmp(buf, "disable\n") && |
389 | (status & ACPI_EVENT_FLAG_ENABLED)) | 389 | (status & ACPI_EVENT_FLAG_ENABLED)) |
390 | result = acpi_disable_gpe(handle, index); | 390 | result = acpi_set_gpe(handle, index, ACPI_GPE_DISABLE); |
391 | else if (!strcmp(buf, "enable\n") && | 391 | else if (!strcmp(buf, "enable\n") && |
392 | !(status & ACPI_EVENT_FLAG_ENABLED)) | 392 | !(status & ACPI_EVENT_FLAG_ENABLED)) |
393 | result = acpi_enable_gpe(handle, index); | 393 | result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE); |
394 | else if (!strcmp(buf, "clear\n") && | 394 | else if (!strcmp(buf, "clear\n") && |
395 | (status & ACPI_EVENT_FLAG_SET)) | 395 | (status & ACPI_EVENT_FLAG_SET)) |
396 | result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR); | 396 | result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR); |
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index f336bca7c450..8a0ed2800e63 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c | |||
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id, | |||
213 | unsigned long table_end; | 213 | unsigned long table_end; |
214 | acpi_size tbl_size; | 214 | acpi_size tbl_size; |
215 | 215 | ||
216 | if (acpi_disabled) | 216 | if (acpi_disabled && !acpi_ht) |
217 | return -ENODEV; | 217 | return -ENODEV; |
218 | 218 | ||
219 | if (!handler) | 219 | if (!handler) |
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler) | |||
280 | struct acpi_table_header *table = NULL; | 280 | struct acpi_table_header *table = NULL; |
281 | acpi_size tbl_size; | 281 | acpi_size tbl_size; |
282 | 282 | ||
283 | if (acpi_disabled) | 283 | if (acpi_disabled && !acpi_ht) |
284 | return -ENODEV; | 284 | return -ENODEV; |
285 | 285 | ||
286 | if (!handler) | 286 | if (!handler) |
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index e0ee0c036f5a..4b9d339a6e28 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c | |||
@@ -21,12 +21,12 @@ | |||
21 | ACPI_MODULE_NAME("wakeup_devices") | 21 | ACPI_MODULE_NAME("wakeup_devices") |
22 | 22 | ||
23 | /** | 23 | /** |
24 | * acpi_enable_wakeup_device_prep - prepare wakeup devices | 24 | * acpi_enable_wakeup_device_prep - Prepare wake-up devices. |
25 | * @sleep_state: ACPI state | 25 | * @sleep_state: ACPI system sleep state. |
26 | * Enable all wakup devices power if the devices' wakeup level | 26 | * |
27 | * is higher than requested sleep level | 27 | * Enable all wake-up devices' power, unless the requested system sleep state is |
28 | * too deep. | ||
28 | */ | 29 | */ |
29 | |||
30 | void acpi_enable_wakeup_device_prep(u8 sleep_state) | 30 | void acpi_enable_wakeup_device_prep(u8 sleep_state) |
31 | { | 31 | { |
32 | struct list_head *node, *next; | 32 | struct list_head *node, *next; |
@@ -36,9 +36,8 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state) | |||
36 | struct acpi_device, | 36 | struct acpi_device, |
37 | wakeup_list); | 37 | wakeup_list); |
38 | 38 | ||
39 | if (!dev->wakeup.flags.valid || | 39 | if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled |
40 | !dev->wakeup.state.enabled || | 40 | || (sleep_state > (u32) dev->wakeup.sleep_state)) |
41 | (sleep_state > (u32) dev->wakeup.sleep_state)) | ||
42 | continue; | 41 | continue; |
43 | 42 | ||
44 | acpi_enable_wakeup_device_power(dev, sleep_state); | 43 | acpi_enable_wakeup_device_power(dev, sleep_state); |
@@ -46,9 +45,12 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state) | |||
46 | } | 45 | } |
47 | 46 | ||
48 | /** | 47 | /** |
49 | * acpi_enable_wakeup_device - enable wakeup devices | 48 | * acpi_enable_wakeup_device - Enable wake-up device GPEs. |
50 | * @sleep_state: ACPI state | 49 | * @sleep_state: ACPI system sleep state. |
51 | * Enable all wakup devices's GPE | 50 | * |
51 | * Enable all wake-up devices' GPEs, with the assumption that | ||
52 | * acpi_disable_all_gpes() was executed before, so we don't need to disable any | ||
53 | * GPEs here. | ||
52 | */ | 54 | */ |
53 | void acpi_enable_wakeup_device(u8 sleep_state) | 55 | void acpi_enable_wakeup_device(u8 sleep_state) |
54 | { | 56 | { |
@@ -65,29 +67,22 @@ void acpi_enable_wakeup_device(u8 sleep_state) | |||
65 | if (!dev->wakeup.flags.valid) | 67 | if (!dev->wakeup.flags.valid) |
66 | continue; | 68 | continue; |
67 | 69 | ||
68 | /* If users want to disable run-wake GPE, | ||
69 | * we only disable it for wake and leave it for runtime | ||
70 | */ | ||
71 | if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count) | 70 | if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count) |
72 | || sleep_state > (u32) dev->wakeup.sleep_state) { | 71 | || sleep_state > (u32) dev->wakeup.sleep_state) |
73 | if (dev->wakeup.flags.run_wake) { | ||
74 | /* set_gpe_type will disable GPE, leave it like that */ | ||
75 | acpi_set_gpe_type(dev->wakeup.gpe_device, | ||
76 | dev->wakeup.gpe_number, | ||
77 | ACPI_GPE_TYPE_RUNTIME); | ||
78 | } | ||
79 | continue; | 72 | continue; |
80 | } | 73 | |
81 | if (!dev->wakeup.flags.run_wake) | 74 | /* The wake-up power should have been enabled already. */ |
82 | acpi_enable_gpe(dev->wakeup.gpe_device, | 75 | acpi_set_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number, |
83 | dev->wakeup.gpe_number); | 76 | ACPI_GPE_ENABLE); |
84 | } | 77 | } |
85 | } | 78 | } |
86 | 79 | ||
87 | /** | 80 | /** |
88 | * acpi_disable_wakeup_device - disable devices' wakeup capability | 81 | * acpi_disable_wakeup_device - Disable devices' wakeup capability. |
89 | * @sleep_state: ACPI state | 82 | * @sleep_state: ACPI system sleep state. |
90 | * Disable all wakup devices's GPE and wakeup capability | 83 | * |
84 | * This function only affects devices with wakeup.state.enabled set, which means | ||
85 | * that it reverses the changes made by acpi_enable_wakeup_device_prep(). | ||
91 | */ | 86 | */ |
92 | void acpi_disable_wakeup_device(u8 sleep_state) | 87 | void acpi_disable_wakeup_device(u8 sleep_state) |
93 | { | 88 | { |
@@ -97,30 +92,11 @@ void acpi_disable_wakeup_device(u8 sleep_state) | |||
97 | struct acpi_device *dev = | 92 | struct acpi_device *dev = |
98 | container_of(node, struct acpi_device, wakeup_list); | 93 | container_of(node, struct acpi_device, wakeup_list); |
99 | 94 | ||
100 | if (!dev->wakeup.flags.valid) | 95 | if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled |
101 | continue; | 96 | || (sleep_state > (u32) dev->wakeup.sleep_state)) |
102 | |||
103 | if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count) | ||
104 | || sleep_state > (u32) dev->wakeup.sleep_state) { | ||
105 | if (dev->wakeup.flags.run_wake) { | ||
106 | acpi_set_gpe_type(dev->wakeup.gpe_device, | ||
107 | dev->wakeup.gpe_number, | ||
108 | ACPI_GPE_TYPE_WAKE_RUN); | ||
109 | /* Re-enable it, since set_gpe_type will disable it */ | ||
110 | acpi_enable_gpe(dev->wakeup.gpe_device, | ||
111 | dev->wakeup.gpe_number); | ||
112 | } | ||
113 | continue; | 97 | continue; |
114 | } | ||
115 | 98 | ||
116 | acpi_disable_wakeup_device_power(dev); | 99 | acpi_disable_wakeup_device_power(dev); |
117 | /* Never disable run-wake GPE */ | ||
118 | if (!dev->wakeup.flags.run_wake) { | ||
119 | acpi_disable_gpe(dev->wakeup.gpe_device, | ||
120 | dev->wakeup.gpe_number); | ||
121 | acpi_clear_gpe(dev->wakeup.gpe_device, | ||
122 | dev->wakeup.gpe_number, ACPI_NOT_ISR); | ||
123 | } | ||
124 | } | 100 | } |
125 | } | 101 | } |
126 | 102 | ||
@@ -134,13 +110,11 @@ int __init acpi_wakeup_device_init(void) | |||
134 | struct acpi_device, | 110 | struct acpi_device, |
135 | wakeup_list); | 111 | wakeup_list); |
136 | /* In case user doesn't load button driver */ | 112 | /* In case user doesn't load button driver */ |
137 | if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled) | 113 | if (!dev->wakeup.flags.always_enabled || |
114 | dev->wakeup.state.enabled) | ||
138 | continue; | 115 | continue; |
139 | acpi_set_gpe_type(dev->wakeup.gpe_device, | 116 | acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number, |
140 | dev->wakeup.gpe_number, | 117 | ACPI_GPE_TYPE_WAKE); |
141 | ACPI_GPE_TYPE_WAKE_RUN); | ||
142 | acpi_enable_gpe(dev->wakeup.gpe_device, | ||
143 | dev->wakeup.gpe_number); | ||
144 | dev->wakeup.state.enabled = 1; | 118 | dev->wakeup.state.enabled = 1; |
145 | } | 119 | } |
146 | mutex_unlock(&acpi_device_lock); | 120 | mutex_unlock(&acpi_device_lock); |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index b34390347c16..a6a736a7dbf2 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -3082,8 +3082,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3082 | ahci_save_initial_config(pdev, hpriv); | 3082 | ahci_save_initial_config(pdev, hpriv); |
3083 | 3083 | ||
3084 | /* prepare host */ | 3084 | /* prepare host */ |
3085 | if (hpriv->cap & HOST_CAP_NCQ) | 3085 | if (hpriv->cap & HOST_CAP_NCQ) { |
3086 | pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA; | 3086 | pi.flags |= ATA_FLAG_NCQ; |
3087 | /* Auto-activate optimization is supposed to be supported on | ||
3088 | all AHCI controllers indicating NCQ support, but it seems | ||
3089 | to be broken at least on some NVIDIA MCP79 chipsets. | ||
3090 | Until we get info on which NVIDIA chipsets don't have this | ||
3091 | issue, if any, disable AA on all NVIDIA AHCIs. */ | ||
3092 | if (pdev->vendor != PCI_VENDOR_ID_NVIDIA) | ||
3093 | pi.flags |= ATA_FLAG_FPDMA_AA; | ||
3094 | } | ||
3087 | 3095 | ||
3088 | if (hpriv->cap & HOST_CAP_PMP) | 3096 | if (hpriv->cap & HOST_CAP_PMP) |
3089 | pi.flags |= ATA_FLAG_PMP; | 3097 | pi.flags |= ATA_FLAG_PMP; |
diff --git a/drivers/base/class.c b/drivers/base/class.c index 161746deab4b..6e2c3b064f53 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c | |||
@@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj) | |||
59 | else | 59 | else |
60 | pr_debug("class '%s' does not have a release() function, " | 60 | pr_debug("class '%s' does not have a release() function, " |
61 | "be careful\n", class->name); | 61 | "be careful\n", class->name); |
62 | |||
63 | kfree(cp); | ||
62 | } | 64 | } |
63 | 65 | ||
64 | static struct sysfs_ops class_sysfs_ops = { | 66 | static struct sysfs_ops class_sysfs_ops = { |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 51042f0ba7e1..7eff828b2117 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -243,10 +243,12 @@ static int index_to_minor(int index) | |||
243 | static int __devinit virtblk_probe(struct virtio_device *vdev) | 243 | static int __devinit virtblk_probe(struct virtio_device *vdev) |
244 | { | 244 | { |
245 | struct virtio_blk *vblk; | 245 | struct virtio_blk *vblk; |
246 | struct request_queue *q; | ||
246 | int err; | 247 | int err; |
247 | u64 cap; | 248 | u64 cap; |
248 | u32 v; | 249 | u32 v, blk_size, sg_elems, opt_io_size; |
249 | u32 blk_size, sg_elems; | 250 | u16 min_io_size; |
251 | u8 physical_block_exp, alignment_offset; | ||
250 | 252 | ||
251 | if (index_to_minor(index) >= 1 << MINORBITS) | 253 | if (index_to_minor(index) >= 1 << MINORBITS) |
252 | return -ENOSPC; | 254 | return -ENOSPC; |
@@ -293,13 +295,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
293 | goto out_mempool; | 295 | goto out_mempool; |
294 | } | 296 | } |
295 | 297 | ||
296 | vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); | 298 | q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); |
297 | if (!vblk->disk->queue) { | 299 | if (!q) { |
298 | err = -ENOMEM; | 300 | err = -ENOMEM; |
299 | goto out_put_disk; | 301 | goto out_put_disk; |
300 | } | 302 | } |
301 | 303 | ||
302 | vblk->disk->queue->queuedata = vblk; | 304 | q->queuedata = vblk; |
303 | 305 | ||
304 | if (index < 26) { | 306 | if (index < 26) { |
305 | sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); | 307 | sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); |
@@ -323,10 +325,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
323 | 325 | ||
324 | /* If barriers are supported, tell block layer that queue is ordered */ | 326 | /* If barriers are supported, tell block layer that queue is ordered */ |
325 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) | 327 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) |
326 | blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH, | 328 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, |
327 | virtblk_prepare_flush); | 329 | virtblk_prepare_flush); |
328 | else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) | 330 | else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) |
329 | blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL); | 331 | blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL); |
330 | 332 | ||
331 | /* If disk is read-only in the host, the guest should obey */ | 333 | /* If disk is read-only in the host, the guest should obey */ |
332 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) | 334 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) |
@@ -345,14 +347,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
345 | set_capacity(vblk->disk, cap); | 347 | set_capacity(vblk->disk, cap); |
346 | 348 | ||
347 | /* We can handle whatever the host told us to handle. */ | 349 | /* We can handle whatever the host told us to handle. */ |
348 | blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2); | 350 | blk_queue_max_phys_segments(q, vblk->sg_elems-2); |
349 | blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2); | 351 | blk_queue_max_hw_segments(q, vblk->sg_elems-2); |
350 | 352 | ||
351 | /* No need to bounce any requests */ | 353 | /* No need to bounce any requests */ |
352 | blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY); | 354 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
353 | 355 | ||
354 | /* No real sector limit. */ | 356 | /* No real sector limit. */ |
355 | blk_queue_max_sectors(vblk->disk->queue, -1U); | 357 | blk_queue_max_sectors(q, -1U); |
356 | 358 | ||
357 | /* Host can optionally specify maximum segment size and number of | 359 | /* Host can optionally specify maximum segment size and number of |
358 | * segments. */ | 360 | * segments. */ |
@@ -360,16 +362,45 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
360 | offsetof(struct virtio_blk_config, size_max), | 362 | offsetof(struct virtio_blk_config, size_max), |
361 | &v); | 363 | &v); |
362 | if (!err) | 364 | if (!err) |
363 | blk_queue_max_segment_size(vblk->disk->queue, v); | 365 | blk_queue_max_segment_size(q, v); |
364 | else | 366 | else |
365 | blk_queue_max_segment_size(vblk->disk->queue, -1U); | 367 | blk_queue_max_segment_size(q, -1U); |
366 | 368 | ||
367 | /* Host can optionally specify the block size of the device */ | 369 | /* Host can optionally specify the block size of the device */ |
368 | err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, | 370 | err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, |
369 | offsetof(struct virtio_blk_config, blk_size), | 371 | offsetof(struct virtio_blk_config, blk_size), |
370 | &blk_size); | 372 | &blk_size); |
371 | if (!err) | 373 | if (!err) |
372 | blk_queue_logical_block_size(vblk->disk->queue, blk_size); | 374 | blk_queue_logical_block_size(q, blk_size); |
375 | else | ||
376 | blk_size = queue_logical_block_size(q); | ||
377 | |||
378 | /* Use topology information if available */ | ||
379 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | ||
380 | offsetof(struct virtio_blk_config, physical_block_exp), | ||
381 | &physical_block_exp); | ||
382 | if (!err && physical_block_exp) | ||
383 | blk_queue_physical_block_size(q, | ||
384 | blk_size * (1 << physical_block_exp)); | ||
385 | |||
386 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | ||
387 | offsetof(struct virtio_blk_config, alignment_offset), | ||
388 | &alignment_offset); | ||
389 | if (!err && alignment_offset) | ||
390 | blk_queue_alignment_offset(q, blk_size * alignment_offset); | ||
391 | |||
392 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | ||
393 | offsetof(struct virtio_blk_config, min_io_size), | ||
394 | &min_io_size); | ||
395 | if (!err && min_io_size) | ||
396 | blk_queue_io_min(q, blk_size * min_io_size); | ||
397 | |||
398 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | ||
399 | offsetof(struct virtio_blk_config, opt_io_size), | ||
400 | &opt_io_size); | ||
401 | if (!err && opt_io_size) | ||
402 | blk_queue_io_opt(q, blk_size * opt_io_size); | ||
403 | |||
373 | 404 | ||
374 | add_disk(vblk->disk); | 405 | add_disk(vblk->disk); |
375 | return 0; | 406 | return 0; |
@@ -412,7 +443,7 @@ static struct virtio_device_id id_table[] = { | |||
412 | static unsigned int features[] = { | 443 | static unsigned int features[] = { |
413 | VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, | 444 | VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, |
414 | VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, | 445 | VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, |
415 | VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH | 446 | VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY |
416 | }; | 447 | }; |
417 | 448 | ||
418 | /* | 449 | /* |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index e023682be2c4..3141dd3b6e53 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -666,6 +666,14 @@ config VIRTIO_CONSOLE | |||
666 | help | 666 | help |
667 | Virtio console for use with lguest and other hypervisors. | 667 | Virtio console for use with lguest and other hypervisors. |
668 | 668 | ||
669 | Also serves as a general-purpose serial device for data | ||
670 | transfer between the guest and host. Character devices at | ||
671 | /dev/vportNpn will be created when corresponding ports are | ||
672 | found, where N is the device number and n is the port number | ||
673 | within that device. If specified by the host, a sysfs | ||
674 | attribute called 'name' will be populated with a name for | ||
675 | the port which can be used by udev scripts to create a | ||
676 | symlink to the device. | ||
669 | 677 | ||
670 | config HVCS | 678 | config HVCS |
671 | tristate "IBM Hypervisor Virtual Console Server support" | 679 | tristate "IBM Hypervisor Virtual Console Server support" |
diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c index 0afc8b82212e..5fe4631e2a61 100644 --- a/drivers/char/hvc_beat.c +++ b/drivers/char/hvc_beat.c | |||
@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt) | |||
84 | return cnt; | 84 | return cnt; |
85 | } | 85 | } |
86 | 86 | ||
87 | static struct hv_ops hvc_beat_get_put_ops = { | 87 | static const struct hv_ops hvc_beat_get_put_ops = { |
88 | .get_chars = hvc_beat_get_chars, | 88 | .get_chars = hvc_beat_get_chars, |
89 | .put_chars = hvc_beat_put_chars, | 89 | .put_chars = hvc_beat_put_chars, |
90 | }; | 90 | }; |
@@ -99,7 +99,7 @@ static int hvc_beat_config(char *p) | |||
99 | 99 | ||
100 | static int __init hvc_beat_console_init(void) | 100 | static int __init hvc_beat_console_init(void) |
101 | { | 101 | { |
102 | if (hvc_beat_useit && machine_is_compatible("Beat")) { | 102 | if (hvc_beat_useit && of_machine_is_compatible("Beat")) { |
103 | hvc_instantiate(0, 0, &hvc_beat_get_put_ops); | 103 | hvc_instantiate(0, 0, &hvc_beat_get_put_ops); |
104 | } | 104 | } |
105 | return 0; | 105 | return 0; |
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 416d3423150d..d8dac5820f0e 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c | |||
@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index) | |||
125 | * console interfaces but can still be used as a tty device. This has to be | 125 | * console interfaces but can still be used as a tty device. This has to be |
126 | * static because kmalloc will not work during early console init. | 126 | * static because kmalloc will not work during early console init. |
127 | */ | 127 | */ |
128 | static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES]; | 128 | static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES]; |
129 | static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] = | 129 | static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] = |
130 | {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1}; | 130 | {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1}; |
131 | 131 | ||
@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kref *kref) | |||
247 | * vty adapters do NOT get an hvc_instantiate() callback since they | 247 | * vty adapters do NOT get an hvc_instantiate() callback since they |
248 | * appear after early console init. | 248 | * appear after early console init. |
249 | */ | 249 | */ |
250 | int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops) | 250 | int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) |
251 | { | 251 | { |
252 | struct hvc_struct *hp; | 252 | struct hvc_struct *hp; |
253 | 253 | ||
@@ -749,7 +749,8 @@ static const struct tty_operations hvc_ops = { | |||
749 | }; | 749 | }; |
750 | 750 | ||
751 | struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data, | 751 | struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data, |
752 | struct hv_ops *ops, int outbuf_size) | 752 | const struct hv_ops *ops, |
753 | int outbuf_size) | ||
753 | { | 754 | { |
754 | struct hvc_struct *hp; | 755 | struct hvc_struct *hp; |
755 | int i; | 756 | int i; |
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h index 10950ca706d8..52ddf4d3716c 100644 --- a/drivers/char/hvc_console.h +++ b/drivers/char/hvc_console.h | |||
@@ -55,7 +55,7 @@ struct hvc_struct { | |||
55 | int outbuf_size; | 55 | int outbuf_size; |
56 | int n_outbuf; | 56 | int n_outbuf; |
57 | uint32_t vtermno; | 57 | uint32_t vtermno; |
58 | struct hv_ops *ops; | 58 | const struct hv_ops *ops; |
59 | int irq_requested; | 59 | int irq_requested; |
60 | int data; | 60 | int data; |
61 | struct winsize ws; | 61 | struct winsize ws; |
@@ -76,11 +76,12 @@ struct hv_ops { | |||
76 | }; | 76 | }; |
77 | 77 | ||
78 | /* Register a vterm and a slot index for use as a console (console_init) */ | 78 | /* Register a vterm and a slot index for use as a console (console_init) */ |
79 | extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops); | 79 | extern int hvc_instantiate(uint32_t vtermno, int index, |
80 | const struct hv_ops *ops); | ||
80 | 81 | ||
81 | /* register a vterm for hvc tty operation (module_init or hotplug add) */ | 82 | /* register a vterm for hvc tty operation (module_init or hotplug add) */ |
82 | extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data, | 83 | extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data, |
83 | struct hv_ops *ops, int outbuf_size); | 84 | const struct hv_ops *ops, int outbuf_size); |
84 | /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ | 85 | /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ |
85 | extern int hvc_remove(struct hvc_struct *hp); | 86 | extern int hvc_remove(struct hvc_struct *hp); |
86 | 87 | ||
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c index 936d05bf37fa..fd0242676a2a 100644 --- a/drivers/char/hvc_iseries.c +++ b/drivers/char/hvc_iseries.c | |||
@@ -197,7 +197,7 @@ done: | |||
197 | return sent; | 197 | return sent; |
198 | } | 198 | } |
199 | 199 | ||
200 | static struct hv_ops hvc_get_put_ops = { | 200 | static const struct hv_ops hvc_get_put_ops = { |
201 | .get_chars = get_chars, | 201 | .get_chars = get_chars, |
202 | .put_chars = put_chars, | 202 | .put_chars = put_chars, |
203 | .notifier_add = notifier_add_irq, | 203 | .notifier_add = notifier_add_irq, |
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c index fe62bd0e17b7..21681a81cc35 100644 --- a/drivers/char/hvc_iucv.c +++ b/drivers/char/hvc_iucv.c | |||
@@ -922,7 +922,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev) | |||
922 | 922 | ||
923 | 923 | ||
924 | /* HVC operations */ | 924 | /* HVC operations */ |
925 | static struct hv_ops hvc_iucv_ops = { | 925 | static const struct hv_ops hvc_iucv_ops = { |
926 | .get_chars = hvc_iucv_get_chars, | 926 | .get_chars = hvc_iucv_get_chars, |
927 | .put_chars = hvc_iucv_put_chars, | 927 | .put_chars = hvc_iucv_put_chars, |
928 | .notifier_add = hvc_iucv_notifier_add, | 928 | .notifier_add = hvc_iucv_notifier_add, |
diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c index 88590d040046..61c4a61558d9 100644 --- a/drivers/char/hvc_rtas.c +++ b/drivers/char/hvc_rtas.c | |||
@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count) | |||
71 | return i; | 71 | return i; |
72 | } | 72 | } |
73 | 73 | ||
74 | static struct hv_ops hvc_rtas_get_put_ops = { | 74 | static const struct hv_ops hvc_rtas_get_put_ops = { |
75 | .get_chars = hvc_rtas_read_console, | 75 | .get_chars = hvc_rtas_read_console, |
76 | .put_chars = hvc_rtas_write_console, | 76 | .put_chars = hvc_rtas_write_console, |
77 | }; | 77 | }; |
diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c index bd63ba878a56..b0957e61a7be 100644 --- a/drivers/char/hvc_udbg.c +++ b/drivers/char/hvc_udbg.c | |||
@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count) | |||
58 | return i; | 58 | return i; |
59 | } | 59 | } |
60 | 60 | ||
61 | static struct hv_ops hvc_udbg_ops = { | 61 | static const struct hv_ops hvc_udbg_ops = { |
62 | .get_chars = hvc_udbg_get, | 62 | .get_chars = hvc_udbg_get, |
63 | .put_chars = hvc_udbg_put, | 63 | .put_chars = hvc_udbg_put, |
64 | }; | 64 | }; |
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c index 10be343d6ae7..27370e99c66f 100644 --- a/drivers/char/hvc_vio.c +++ b/drivers/char/hvc_vio.c | |||
@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count) | |||
77 | return got; | 77 | return got; |
78 | } | 78 | } |
79 | 79 | ||
80 | static struct hv_ops hvc_get_put_ops = { | 80 | static const struct hv_ops hvc_get_put_ops = { |
81 | .get_chars = filtered_get_chars, | 81 | .get_chars = filtered_get_chars, |
82 | .put_chars = hvc_put_chars, | 82 | .put_chars = hvc_put_chars, |
83 | .notifier_add = notifier_add_irq, | 83 | .notifier_add = notifier_add_irq, |
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c index b1a71638c772..60446f82a3fc 100644 --- a/drivers/char/hvc_xen.c +++ b/drivers/char/hvc_xen.c | |||
@@ -122,7 +122,7 @@ static int read_console(uint32_t vtermno, char *buf, int len) | |||
122 | return recv; | 122 | return recv; |
123 | } | 123 | } |
124 | 124 | ||
125 | static struct hv_ops hvc_ops = { | 125 | static const struct hv_ops hvc_ops = { |
126 | .get_chars = read_console, | 126 | .get_chars = read_console, |
127 | .put_chars = write_console, | 127 | .put_chars = write_console, |
128 | .notifier_add = notifier_add_irq, | 128 | .notifier_add = notifier_add_irq, |
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 87060266ef91..6ea1014697d1 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
@@ -186,3 +186,15 @@ config HW_RANDOM_MXC_RNGA | |||
186 | module will be called mxc-rnga. | 186 | module will be called mxc-rnga. |
187 | 187 | ||
188 | If unsure, say Y. | 188 | If unsure, say Y. |
189 | |||
190 | config HW_RANDOM_NOMADIK | ||
191 | tristate "ST-Ericsson Nomadik Random Number Generator support" | ||
192 | depends on HW_RANDOM && PLAT_NOMADIK | ||
193 | ---help--- | ||
194 | This driver provides kernel-side support for the Random Number | ||
195 | Generator hardware found on ST-Ericsson SoCs (8815 and 8500). | ||
196 | |||
197 | To compile this driver as a module, choose M here: the | ||
198 | module will be called nomadik-rng. | ||
199 | |||
200 | If unsure, say Y. | ||
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 5eeb1303f0d0..4273308aa1e3 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile | |||
@@ -18,3 +18,4 @@ obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o | |||
18 | obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o | 18 | obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o |
19 | obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o | 19 | obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o |
20 | obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o | 20 | obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o |
21 | obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o | ||
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c new file mode 100644 index 000000000000..a8b4c4010144 --- /dev/null +++ b/drivers/char/hw_random/nomadik-rng.c | |||
@@ -0,0 +1,103 @@ | |||
1 | /* | ||
2 | * Nomadik RNG support | ||
3 | * Copyright 2009 Alessandro Rubini | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/device.h> | ||
15 | #include <linux/amba/bus.h> | ||
16 | #include <linux/hw_random.h> | ||
17 | #include <linux/io.h> | ||
18 | |||
19 | static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) | ||
20 | { | ||
21 | void __iomem *base = (void __iomem *)rng->priv; | ||
22 | |||
23 | /* | ||
24 | * The register is 32 bits and gives 16 random bits (low half). | ||
25 | * A subsequent read will delay the core for 400ns, so we just read | ||
26 | * once and accept the very unlikely very small delay, even if wait==0. | ||
27 | */ | ||
28 | *(u16 *)data = __raw_readl(base + 8) & 0xffff; | ||
29 | return 2; | ||
30 | } | ||
31 | |||
32 | /* we have at most one RNG per machine, granted */ | ||
33 | static struct hwrng nmk_rng = { | ||
34 | .name = "nomadik", | ||
35 | .read = nmk_rng_read, | ||
36 | }; | ||
37 | |||
38 | static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id) | ||
39 | { | ||
40 | void __iomem *base; | ||
41 | int ret; | ||
42 | |||
43 | ret = amba_request_regions(dev, dev->dev.init_name); | ||
44 | if (ret) | ||
45 | return ret; | ||
46 | ret = -ENOMEM; | ||
47 | base = ioremap(dev->res.start, resource_size(&dev->res)); | ||
48 | if (!base) | ||
49 | goto out_release; | ||
50 | nmk_rng.priv = (unsigned long)base; | ||
51 | ret = hwrng_register(&nmk_rng); | ||
52 | if (ret) | ||
53 | goto out_unmap; | ||
54 | return 0; | ||
55 | |||
56 | out_unmap: | ||
57 | iounmap(base); | ||
58 | out_release: | ||
59 | amba_release_regions(dev); | ||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | static int nmk_rng_remove(struct amba_device *dev) | ||
64 | { | ||
65 | void __iomem *base = (void __iomem *)nmk_rng.priv; | ||
66 | hwrng_unregister(&nmk_rng); | ||
67 | iounmap(base); | ||
68 | amba_release_regions(dev); | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static struct amba_id nmk_rng_ids[] = { | ||
73 | { | ||
74 | .id = 0x000805e1, | ||
75 | .mask = 0x000fffff, /* top bits are rev and cfg: accept all */ | ||
76 | }, | ||
77 | {0, 0}, | ||
78 | }; | ||
79 | |||
80 | static struct amba_driver nmk_rng_driver = { | ||
81 | .drv = { | ||
82 | .owner = THIS_MODULE, | ||
83 | .name = "rng", | ||
84 | }, | ||
85 | .probe = nmk_rng_probe, | ||
86 | .remove = nmk_rng_remove, | ||
87 | .id_table = nmk_rng_ids, | ||
88 | }; | ||
89 | |||
90 | static int __init nmk_rng_init(void) | ||
91 | { | ||
92 | return amba_driver_register(&nmk_rng_driver); | ||
93 | } | ||
94 | |||
95 | static void __devexit nmk_rng_exit(void) | ||
96 | { | ||
97 | amba_driver_unregister(&nmk_rng_driver); | ||
98 | } | ||
99 | |||
100 | module_init(nmk_rng_init); | ||
101 | module_exit(nmk_rng_exit); | ||
102 | |||
103 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index a035ae39a359..213373b5f17f 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -1,18 +1,6 @@ | |||
1 | /*D:300 | 1 | /* |
2 | * The Guest console driver | 2 | * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation |
3 | * | 3 | * Copyright (C) 2009, 2010 Red Hat, Inc. |
4 | * Writing console drivers is one of the few remaining Dark Arts in Linux. | ||
5 | * Fortunately for us, the path of virtual consoles has been well-trodden by | ||
6 | * the PowerPC folks, who wrote "hvc_console.c" to generically support any | ||
7 | * virtual console. We use that infrastructure which only requires us to write | ||
8 | * the basic put_chars and get_chars functions and call the right register | ||
9 | * functions. | ||
10 | :*/ | ||
11 | |||
12 | /*M:002 The console can be flooded: while the Guest is processing input the | ||
13 | * Host can send more. Buffering in the Host could alleviate this, but it is a | ||
14 | * difficult problem in general. :*/ | ||
15 | /* Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation | ||
16 | * | 4 | * |
17 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
18 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -28,142 +16,694 @@ | |||
28 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
29 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
30 | */ | 18 | */ |
19 | #include <linux/cdev.h> | ||
20 | #include <linux/debugfs.h> | ||
21 | #include <linux/device.h> | ||
31 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/fs.h> | ||
32 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/list.h> | ||
26 | #include <linux/poll.h> | ||
27 | #include <linux/sched.h> | ||
28 | #include <linux/spinlock.h> | ||
33 | #include <linux/virtio.h> | 29 | #include <linux/virtio.h> |
34 | #include <linux/virtio_console.h> | 30 | #include <linux/virtio_console.h> |
31 | #include <linux/wait.h> | ||
32 | #include <linux/workqueue.h> | ||
35 | #include "hvc_console.h" | 33 | #include "hvc_console.h" |
36 | 34 | ||
37 | /*D:340 These represent our input and output console queues, and the virtio | 35 | /* |
38 | * operations for them. */ | 36 | * This is a global struct for storing common data for all the devices |
39 | static struct virtqueue *in_vq, *out_vq; | 37 | * this driver handles. |
40 | static struct virtio_device *vdev; | 38 | * |
39 | * Mainly, it has a linked list for all the consoles in one place so | ||
40 | * that callbacks from hvc for get_chars(), put_chars() work properly | ||
41 | * across multiple devices and multiple ports per device. | ||
42 | */ | ||
43 | struct ports_driver_data { | ||
44 | /* Used for registering chardevs */ | ||
45 | struct class *class; | ||
46 | |||
47 | /* Used for exporting per-port information to debugfs */ | ||
48 | struct dentry *debugfs_dir; | ||
49 | |||
50 | /* Number of devices this driver is handling */ | ||
51 | unsigned int index; | ||
52 | |||
53 | /* | ||
54 | * This is used to keep track of the number of hvc consoles | ||
55 | * spawned by this driver. This number is given as the first | ||
56 | * argument to hvc_alloc(). To correctly map an initial | ||
57 | * console spawned via hvc_instantiate to the console being | ||
58 | * hooked up via hvc_alloc, we need to pass the same vtermno. | ||
59 | * | ||
60 | * We also just assume the first console being initialised was | ||
61 | * the first one that got used as the initial console. | ||
62 | */ | ||
63 | unsigned int next_vtermno; | ||
64 | |||
65 | /* All the console devices handled by this driver */ | ||
66 | struct list_head consoles; | ||
67 | }; | ||
68 | static struct ports_driver_data pdrvdata; | ||
69 | |||
70 | DEFINE_SPINLOCK(pdrvdata_lock); | ||
71 | |||
72 | /* This struct holds information that's relevant only for console ports */ | ||
73 | struct console { | ||
74 | /* We'll place all consoles in a list in the pdrvdata struct */ | ||
75 | struct list_head list; | ||
76 | |||
77 | /* The hvc device associated with this console port */ | ||
78 | struct hvc_struct *hvc; | ||
79 | |||
80 | /* | ||
81 | * This number identifies the number that we used to register | ||
82 | * with hvc in hvc_instantiate() and hvc_alloc(); this is the | ||
83 | * number passed on by the hvc callbacks to us to | ||
84 | * differentiate between the other console ports handled by | ||
85 | * this driver | ||
86 | */ | ||
87 | u32 vtermno; | ||
88 | }; | ||
89 | |||
90 | struct port_buffer { | ||
91 | char *buf; | ||
92 | |||
93 | /* size of the buffer in *buf above */ | ||
94 | size_t size; | ||
95 | |||
96 | /* used length of the buffer */ | ||
97 | size_t len; | ||
98 | /* offset in the buf from which to consume data */ | ||
99 | size_t offset; | ||
100 | }; | ||
101 | |||
102 | /* | ||
103 | * This is a per-device struct that stores data common to all the | ||
104 | * ports for that device (vdev->priv). | ||
105 | */ | ||
106 | struct ports_device { | ||
107 | /* | ||
108 | * Workqueue handlers where we process deferred work after | ||
109 | * notification | ||
110 | */ | ||
111 | struct work_struct control_work; | ||
112 | struct work_struct config_work; | ||
113 | |||
114 | struct list_head ports; | ||
115 | |||
116 | /* To protect the list of ports */ | ||
117 | spinlock_t ports_lock; | ||
118 | |||
119 | /* To protect the vq operations for the control channel */ | ||
120 | spinlock_t cvq_lock; | ||
121 | |||
122 | /* The current config space is stored here */ | ||
123 | struct virtio_console_config config; | ||
124 | |||
125 | /* The virtio device we're associated with */ | ||
126 | struct virtio_device *vdev; | ||
127 | |||
128 | /* | ||
129 | * A couple of virtqueues for the control channel: one for | ||
130 | * guest->host transfers, one for host->guest transfers | ||
131 | */ | ||
132 | struct virtqueue *c_ivq, *c_ovq; | ||
133 | |||
134 | /* Array of per-port IO virtqueues */ | ||
135 | struct virtqueue **in_vqs, **out_vqs; | ||
136 | |||
137 | /* Used for numbering devices for sysfs and debugfs */ | ||
138 | unsigned int drv_index; | ||
139 | |||
140 | /* Major number for this device. Ports will be created as minors. */ | ||
141 | int chr_major; | ||
142 | }; | ||
143 | |||
144 | /* This struct holds the per-port data */ | ||
145 | struct port { | ||
146 | /* Next port in the list, head is in the ports_device */ | ||
147 | struct list_head list; | ||
148 | |||
149 | /* Pointer to the parent virtio_console device */ | ||
150 | struct ports_device *portdev; | ||
151 | |||
152 | /* The current buffer from which data has to be fed to readers */ | ||
153 | struct port_buffer *inbuf; | ||
154 | |||
155 | /* | ||
156 | * To protect the operations on the in_vq associated with this | ||
157 | * port. Has to be a spinlock because it can be called from | ||
158 | * interrupt context (get_char()). | ||
159 | */ | ||
160 | spinlock_t inbuf_lock; | ||
161 | |||
162 | /* The IO vqs for this port */ | ||
163 | struct virtqueue *in_vq, *out_vq; | ||
164 | |||
165 | /* File in the debugfs directory that exposes this port's information */ | ||
166 | struct dentry *debugfs_file; | ||
167 | |||
168 | /* | ||
169 | * The entries in this struct will be valid if this port is | ||
170 | * hooked up to an hvc console | ||
171 | */ | ||
172 | struct console cons; | ||
173 | |||
174 | /* Each port associates with a separate char device */ | ||
175 | struct cdev cdev; | ||
176 | struct device *dev; | ||
177 | |||
178 | /* A waitqueue for poll() or blocking read operations */ | ||
179 | wait_queue_head_t waitqueue; | ||
180 | |||
181 | /* The 'name' of the port that we expose via sysfs properties */ | ||
182 | char *name; | ||
183 | |||
184 | /* The 'id' to identify the port with the Host */ | ||
185 | u32 id; | ||
186 | |||
187 | /* Is the host device open */ | ||
188 | bool host_connected; | ||
189 | |||
190 | /* We should allow only one process to open a port */ | ||
191 | bool guest_connected; | ||
192 | }; | ||
193 | |||
194 | /* This is the very early arch-specified put chars function. */ | ||
195 | static int (*early_put_chars)(u32, const char *, int); | ||
196 | |||
197 | static struct port *find_port_by_vtermno(u32 vtermno) | ||
198 | { | ||
199 | struct port *port; | ||
200 | struct console *cons; | ||
201 | unsigned long flags; | ||
202 | |||
203 | spin_lock_irqsave(&pdrvdata_lock, flags); | ||
204 | list_for_each_entry(cons, &pdrvdata.consoles, list) { | ||
205 | if (cons->vtermno == vtermno) { | ||
206 | port = container_of(cons, struct port, cons); | ||
207 | goto out; | ||
208 | } | ||
209 | } | ||
210 | port = NULL; | ||
211 | out: | ||
212 | spin_unlock_irqrestore(&pdrvdata_lock, flags); | ||
213 | return port; | ||
214 | } | ||
215 | |||
216 | static struct port *find_port_by_id(struct ports_device *portdev, u32 id) | ||
217 | { | ||
218 | struct port *port; | ||
219 | unsigned long flags; | ||
220 | |||
221 | spin_lock_irqsave(&portdev->ports_lock, flags); | ||
222 | list_for_each_entry(port, &portdev->ports, list) | ||
223 | if (port->id == id) | ||
224 | goto out; | ||
225 | port = NULL; | ||
226 | out: | ||
227 | spin_unlock_irqrestore(&portdev->ports_lock, flags); | ||
228 | |||
229 | return port; | ||
230 | } | ||
231 | |||
232 | static struct port *find_port_by_vq(struct ports_device *portdev, | ||
233 | struct virtqueue *vq) | ||
234 | { | ||
235 | struct port *port; | ||
236 | unsigned long flags; | ||
237 | |||
238 | spin_lock_irqsave(&portdev->ports_lock, flags); | ||
239 | list_for_each_entry(port, &portdev->ports, list) | ||
240 | if (port->in_vq == vq || port->out_vq == vq) | ||
241 | goto out; | ||
242 | port = NULL; | ||
243 | out: | ||
244 | spin_unlock_irqrestore(&portdev->ports_lock, flags); | ||
245 | return port; | ||
246 | } | ||
247 | |||
248 | static bool is_console_port(struct port *port) | ||
249 | { | ||
250 | if (port->cons.hvc) | ||
251 | return true; | ||
252 | return false; | ||
253 | } | ||
254 | |||
255 | static inline bool use_multiport(struct ports_device *portdev) | ||
256 | { | ||
257 | /* | ||
258 | * This condition can be true when put_chars is called from | ||
259 | * early_init | ||
260 | */ | ||
261 | if (!portdev->vdev) | ||
262 | return 0; | ||
263 | return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); | ||
264 | } | ||
41 | 265 | ||
42 | /* This is our input buffer, and how much data is left in it. */ | 266 | static void free_buf(struct port_buffer *buf) |
43 | static unsigned int in_len; | 267 | { |
44 | static char *in, *inbuf; | 268 | kfree(buf->buf); |
269 | kfree(buf); | ||
270 | } | ||
271 | |||
272 | static struct port_buffer *alloc_buf(size_t buf_size) | ||
273 | { | ||
274 | struct port_buffer *buf; | ||
45 | 275 | ||
46 | /* The operations for our console. */ | 276 | buf = kmalloc(sizeof(*buf), GFP_KERNEL); |
47 | static struct hv_ops virtio_cons; | 277 | if (!buf) |
278 | goto fail; | ||
279 | buf->buf = kzalloc(buf_size, GFP_KERNEL); | ||
280 | if (!buf->buf) | ||
281 | goto free_buf; | ||
282 | buf->len = 0; | ||
283 | buf->offset = 0; | ||
284 | buf->size = buf_size; | ||
285 | return buf; | ||
286 | |||
287 | free_buf: | ||
288 | kfree(buf); | ||
289 | fail: | ||
290 | return NULL; | ||
291 | } | ||
292 | |||
293 | /* Callers should take appropriate locks */ | ||
294 | static void *get_inbuf(struct port *port) | ||
295 | { | ||
296 | struct port_buffer *buf; | ||
297 | struct virtqueue *vq; | ||
298 | unsigned int len; | ||
48 | 299 | ||
49 | /* The hvc device */ | 300 | vq = port->in_vq; |
50 | static struct hvc_struct *hvc; | 301 | buf = vq->vq_ops->get_buf(vq, &len); |
302 | if (buf) { | ||
303 | buf->len = len; | ||
304 | buf->offset = 0; | ||
305 | } | ||
306 | return buf; | ||
307 | } | ||
51 | 308 | ||
52 | /*D:310 The put_chars() callback is pretty straightforward. | 309 | /* |
310 | * Create a scatter-gather list representing our input buffer and put | ||
311 | * it in the queue. | ||
53 | * | 312 | * |
54 | * We turn the characters into a scatter-gather list, add it to the output | 313 | * Callers should take appropriate locks. |
55 | * queue and then kick the Host. Then we sit here waiting for it to finish: | 314 | */ |
56 | * inefficient in theory, but in practice implementations will do it | 315 | static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) |
57 | * immediately (lguest's Launcher does). */ | ||
58 | static int put_chars(u32 vtermno, const char *buf, int count) | ||
59 | { | 316 | { |
60 | struct scatterlist sg[1]; | 317 | struct scatterlist sg[1]; |
318 | int ret; | ||
319 | |||
320 | sg_init_one(sg, buf->buf, buf->size); | ||
321 | |||
322 | ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf); | ||
323 | vq->vq_ops->kick(vq); | ||
324 | return ret; | ||
325 | } | ||
326 | |||
327 | /* Discard any unread data this port has. Callers lockers. */ | ||
328 | static void discard_port_data(struct port *port) | ||
329 | { | ||
330 | struct port_buffer *buf; | ||
331 | struct virtqueue *vq; | ||
61 | unsigned int len; | 332 | unsigned int len; |
333 | int ret; | ||
62 | 334 | ||
63 | /* This is a convenient routine to initialize a single-elem sg list */ | 335 | vq = port->in_vq; |
64 | sg_init_one(sg, buf, count); | 336 | if (port->inbuf) |
337 | buf = port->inbuf; | ||
338 | else | ||
339 | buf = vq->vq_ops->get_buf(vq, &len); | ||
65 | 340 | ||
66 | /* add_buf wants a token to identify this buffer: we hand it any | 341 | ret = 0; |
67 | * non-NULL pointer, since there's only ever one buffer. */ | 342 | while (buf) { |
68 | if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) >= 0) { | 343 | if (add_inbuf(vq, buf) < 0) { |
69 | /* Tell Host to go! */ | 344 | ret++; |
70 | out_vq->vq_ops->kick(out_vq); | 345 | free_buf(buf); |
71 | /* Chill out until it's done with the buffer. */ | 346 | } |
72 | while (!out_vq->vq_ops->get_buf(out_vq, &len)) | 347 | buf = vq->vq_ops->get_buf(vq, &len); |
73 | cpu_relax(); | ||
74 | } | 348 | } |
349 | port->inbuf = NULL; | ||
350 | if (ret) | ||
351 | dev_warn(port->dev, "Errors adding %d buffers back to vq\n", | ||
352 | ret); | ||
353 | } | ||
75 | 354 | ||
76 | /* We're expected to return the amount of data we wrote: all of it. */ | 355 | static bool port_has_data(struct port *port) |
77 | return count; | 356 | { |
357 | unsigned long flags; | ||
358 | bool ret; | ||
359 | |||
360 | spin_lock_irqsave(&port->inbuf_lock, flags); | ||
361 | if (port->inbuf) { | ||
362 | ret = true; | ||
363 | goto out; | ||
364 | } | ||
365 | port->inbuf = get_inbuf(port); | ||
366 | if (port->inbuf) { | ||
367 | ret = true; | ||
368 | goto out; | ||
369 | } | ||
370 | ret = false; | ||
371 | out: | ||
372 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | ||
373 | return ret; | ||
78 | } | 374 | } |
79 | 375 | ||
80 | /* Create a scatter-gather list representing our input buffer and put it in the | 376 | static ssize_t send_control_msg(struct port *port, unsigned int event, |
81 | * queue. */ | 377 | unsigned int value) |
82 | static void add_inbuf(void) | ||
83 | { | 378 | { |
84 | struct scatterlist sg[1]; | 379 | struct scatterlist sg[1]; |
85 | sg_init_one(sg, inbuf, PAGE_SIZE); | 380 | struct virtio_console_control cpkt; |
381 | struct virtqueue *vq; | ||
382 | int len; | ||
383 | |||
384 | if (!use_multiport(port->portdev)) | ||
385 | return 0; | ||
386 | |||
387 | cpkt.id = port->id; | ||
388 | cpkt.event = event; | ||
389 | cpkt.value = value; | ||
390 | |||
391 | vq = port->portdev->c_ovq; | ||
86 | 392 | ||
87 | /* We should always be able to add one buffer to an empty queue. */ | 393 | sg_init_one(sg, &cpkt, sizeof(cpkt)); |
88 | if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) < 0) | 394 | if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) { |
89 | BUG(); | 395 | vq->vq_ops->kick(vq); |
90 | in_vq->vq_ops->kick(in_vq); | 396 | while (!vq->vq_ops->get_buf(vq, &len)) |
397 | cpu_relax(); | ||
398 | } | ||
399 | return 0; | ||
91 | } | 400 | } |
92 | 401 | ||
93 | /*D:350 get_chars() is the callback from the hvc_console infrastructure when | 402 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) |
94 | * an interrupt is received. | ||
95 | * | ||
96 | * Most of the code deals with the fact that the hvc_console() infrastructure | ||
97 | * only asks us for 16 bytes at a time. We keep in_offset and in_used fields | ||
98 | * for partially-filled buffers. */ | ||
99 | static int get_chars(u32 vtermno, char *buf, int count) | ||
100 | { | 403 | { |
101 | /* If we don't have an input queue yet, we can't get input. */ | 404 | struct scatterlist sg[1]; |
102 | BUG_ON(!in_vq); | 405 | struct virtqueue *out_vq; |
406 | ssize_t ret; | ||
407 | unsigned int len; | ||
408 | |||
409 | out_vq = port->out_vq; | ||
410 | |||
411 | sg_init_one(sg, in_buf, in_count); | ||
412 | ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf); | ||
413 | |||
414 | /* Tell Host to go! */ | ||
415 | out_vq->vq_ops->kick(out_vq); | ||
416 | |||
417 | if (ret < 0) { | ||
418 | len = 0; | ||
419 | goto fail; | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * Wait till the host acknowledges it pushed out the data we | ||
424 | * sent. Also ensure we return to userspace the number of | ||
425 | * bytes that were successfully consumed by the host. | ||
426 | */ | ||
427 | while (!out_vq->vq_ops->get_buf(out_vq, &len)) | ||
428 | cpu_relax(); | ||
429 | fail: | ||
430 | /* We're expected to return the amount of data we wrote */ | ||
431 | return len; | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * Give out the data that's requested from the buffer that we have | ||
436 | * queued up. | ||
437 | */ | ||
438 | static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, | ||
439 | bool to_user) | ||
440 | { | ||
441 | struct port_buffer *buf; | ||
442 | unsigned long flags; | ||
443 | |||
444 | if (!out_count || !port_has_data(port)) | ||
445 | return 0; | ||
446 | |||
447 | buf = port->inbuf; | ||
448 | out_count = min(out_count, buf->len - buf->offset); | ||
449 | |||
450 | if (to_user) { | ||
451 | ssize_t ret; | ||
452 | |||
453 | ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); | ||
454 | if (ret) | ||
455 | return -EFAULT; | ||
456 | } else { | ||
457 | memcpy(out_buf, buf->buf + buf->offset, out_count); | ||
458 | } | ||
459 | |||
460 | buf->offset += out_count; | ||
461 | |||
462 | if (buf->offset == buf->len) { | ||
463 | /* | ||
464 | * We're done using all the data in this buffer. | ||
465 | * Re-queue so that the Host can send us more data. | ||
466 | */ | ||
467 | spin_lock_irqsave(&port->inbuf_lock, flags); | ||
468 | port->inbuf = NULL; | ||
469 | |||
470 | if (add_inbuf(port->in_vq, buf) < 0) | ||
471 | dev_warn(port->dev, "failed add_buf\n"); | ||
472 | |||
473 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | ||
474 | } | ||
475 | /* Return the number of bytes actually copied */ | ||
476 | return out_count; | ||
477 | } | ||
103 | 478 | ||
104 | /* No buffer? Try to get one. */ | 479 | /* The condition that must be true for polling to end */ |
105 | if (!in_len) { | 480 | static bool wait_is_over(struct port *port) |
106 | in = in_vq->vq_ops->get_buf(in_vq, &in_len); | 481 | { |
107 | if (!in) | 482 | return port_has_data(port) || !port->host_connected; |
483 | } | ||
484 | |||
485 | static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | ||
486 | size_t count, loff_t *offp) | ||
487 | { | ||
488 | struct port *port; | ||
489 | ssize_t ret; | ||
490 | |||
491 | port = filp->private_data; | ||
492 | |||
493 | if (!port_has_data(port)) { | ||
494 | /* | ||
495 | * If nothing's connected on the host just return 0 in | ||
496 | * case of list_empty; this tells the userspace app | ||
497 | * that there's no connection | ||
498 | */ | ||
499 | if (!port->host_connected) | ||
108 | return 0; | 500 | return 0; |
501 | if (filp->f_flags & O_NONBLOCK) | ||
502 | return -EAGAIN; | ||
503 | |||
504 | ret = wait_event_interruptible(port->waitqueue, | ||
505 | wait_is_over(port)); | ||
506 | if (ret < 0) | ||
507 | return ret; | ||
508 | } | ||
509 | /* | ||
510 | * We could've received a disconnection message while we were | ||
511 | * waiting for more data. | ||
512 | * | ||
513 | * This check is not clubbed in the if() statement above as we | ||
514 | * might receive some data as well as the host could get | ||
515 | * disconnected after we got woken up from our wait. So we | ||
516 | * really want to give off whatever data we have and only then | ||
517 | * check for host_connected. | ||
518 | */ | ||
519 | if (!port_has_data(port) && !port->host_connected) | ||
520 | return 0; | ||
521 | |||
522 | return fill_readbuf(port, ubuf, count, true); | ||
523 | } | ||
524 | |||
525 | static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | ||
526 | size_t count, loff_t *offp) | ||
527 | { | ||
528 | struct port *port; | ||
529 | char *buf; | ||
530 | ssize_t ret; | ||
531 | |||
532 | port = filp->private_data; | ||
533 | |||
534 | count = min((size_t)(32 * 1024), count); | ||
535 | |||
536 | buf = kmalloc(count, GFP_KERNEL); | ||
537 | if (!buf) | ||
538 | return -ENOMEM; | ||
539 | |||
540 | ret = copy_from_user(buf, ubuf, count); | ||
541 | if (ret) { | ||
542 | ret = -EFAULT; | ||
543 | goto free_buf; | ||
109 | } | 544 | } |
110 | 545 | ||
111 | /* You want more than we have to give? Well, try wanting less! */ | 546 | ret = send_buf(port, buf, count); |
112 | if (in_len < count) | 547 | free_buf: |
113 | count = in_len; | 548 | kfree(buf); |
549 | return ret; | ||
550 | } | ||
551 | |||
552 | static unsigned int port_fops_poll(struct file *filp, poll_table *wait) | ||
553 | { | ||
554 | struct port *port; | ||
555 | unsigned int ret; | ||
556 | |||
557 | port = filp->private_data; | ||
558 | poll_wait(filp, &port->waitqueue, wait); | ||
559 | |||
560 | ret = 0; | ||
561 | if (port->inbuf) | ||
562 | ret |= POLLIN | POLLRDNORM; | ||
563 | if (port->host_connected) | ||
564 | ret |= POLLOUT; | ||
565 | if (!port->host_connected) | ||
566 | ret |= POLLHUP; | ||
567 | |||
568 | return ret; | ||
569 | } | ||
570 | |||
571 | static int port_fops_release(struct inode *inode, struct file *filp) | ||
572 | { | ||
573 | struct port *port; | ||
574 | |||
575 | port = filp->private_data; | ||
576 | |||
577 | /* Notify host of port being closed */ | ||
578 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); | ||
579 | |||
580 | spin_lock_irq(&port->inbuf_lock); | ||
581 | port->guest_connected = false; | ||
582 | |||
583 | discard_port_data(port); | ||
584 | |||
585 | spin_unlock_irq(&port->inbuf_lock); | ||
586 | |||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static int port_fops_open(struct inode *inode, struct file *filp) | ||
591 | { | ||
592 | struct cdev *cdev = inode->i_cdev; | ||
593 | struct port *port; | ||
594 | |||
595 | port = container_of(cdev, struct port, cdev); | ||
596 | filp->private_data = port; | ||
597 | |||
598 | /* | ||
599 | * Don't allow opening of console port devices -- that's done | ||
600 | * via /dev/hvc | ||
601 | */ | ||
602 | if (is_console_port(port)) | ||
603 | return -ENXIO; | ||
604 | |||
605 | /* Allow only one process to open a particular port at a time */ | ||
606 | spin_lock_irq(&port->inbuf_lock); | ||
607 | if (port->guest_connected) { | ||
608 | spin_unlock_irq(&port->inbuf_lock); | ||
609 | return -EMFILE; | ||
610 | } | ||
114 | 611 | ||
115 | /* Copy across to their buffer and increment offset. */ | 612 | port->guest_connected = true; |
116 | memcpy(buf, in, count); | 613 | spin_unlock_irq(&port->inbuf_lock); |
117 | in += count; | ||
118 | in_len -= count; | ||
119 | 614 | ||
120 | /* Finished? Re-register buffer so Host will use it again. */ | 615 | /* Notify host of port being opened */ |
121 | if (in_len == 0) | 616 | send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); |
122 | add_inbuf(); | ||
123 | 617 | ||
124 | return count; | 618 | return 0; |
125 | } | 619 | } |
126 | /*:*/ | ||
127 | 620 | ||
128 | /*D:320 Console drivers are initialized very early so boot messages can go out, | 621 | /* |
129 | * so we do things slightly differently from the generic virtio initialization | 622 | * The file operations that we support: programs in the guest can open |
130 | * of the net and block drivers. | 623 | * a console device, read from it, write to it, poll for data and |
624 | * close it. The devices are at | ||
625 | * /dev/vport<device number>p<port number> | ||
626 | */ | ||
627 | static const struct file_operations port_fops = { | ||
628 | .owner = THIS_MODULE, | ||
629 | .open = port_fops_open, | ||
630 | .read = port_fops_read, | ||
631 | .write = port_fops_write, | ||
632 | .poll = port_fops_poll, | ||
633 | .release = port_fops_release, | ||
634 | }; | ||
635 | |||
636 | /* | ||
637 | * The put_chars() callback is pretty straightforward. | ||
131 | * | 638 | * |
132 | * At this stage, the console is output-only. It's too early to set up a | 639 | * We turn the characters into a scatter-gather list, add it to the |
133 | * virtqueue, so we let the drivers do some boutique early-output thing. */ | 640 | * output queue and then kick the Host. Then we sit here waiting for |
134 | int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) | 641 | * it to finish: inefficient in theory, but in practice |
642 | * implementations will do it immediately (lguest's Launcher does). | ||
643 | */ | ||
644 | static int put_chars(u32 vtermno, const char *buf, int count) | ||
135 | { | 645 | { |
136 | virtio_cons.put_chars = put_chars; | 646 | struct port *port; |
137 | return hvc_instantiate(0, 0, &virtio_cons); | 647 | |
648 | port = find_port_by_vtermno(vtermno); | ||
649 | if (!port) | ||
650 | return 0; | ||
651 | |||
652 | if (unlikely(early_put_chars)) | ||
653 | return early_put_chars(vtermno, buf, count); | ||
654 | |||
655 | return send_buf(port, (void *)buf, count); | ||
138 | } | 656 | } |
139 | 657 | ||
140 | /* | 658 | /* |
141 | * virtio console configuration. This supports: | 659 | * get_chars() is the callback from the hvc_console infrastructure |
142 | * - console resize | 660 | * when an interrupt is received. |
661 | * | ||
662 | * We call out to fill_readbuf that gets us the required data from the | ||
663 | * buffers that are queued up. | ||
143 | */ | 664 | */ |
144 | static void virtcons_apply_config(struct virtio_device *dev) | 665 | static int get_chars(u32 vtermno, char *buf, int count) |
145 | { | 666 | { |
667 | struct port *port; | ||
668 | |||
669 | port = find_port_by_vtermno(vtermno); | ||
670 | if (!port) | ||
671 | return 0; | ||
672 | |||
673 | /* If we don't have an input queue yet, we can't get input. */ | ||
674 | BUG_ON(!port->in_vq); | ||
675 | |||
676 | return fill_readbuf(port, buf, count, false); | ||
677 | } | ||
678 | |||
679 | static void resize_console(struct port *port) | ||
680 | { | ||
681 | struct virtio_device *vdev; | ||
146 | struct winsize ws; | 682 | struct winsize ws; |
147 | 683 | ||
148 | if (virtio_has_feature(dev, VIRTIO_CONSOLE_F_SIZE)) { | 684 | vdev = port->portdev->vdev; |
149 | dev->config->get(dev, | 685 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { |
150 | offsetof(struct virtio_console_config, cols), | 686 | vdev->config->get(vdev, |
151 | &ws.ws_col, sizeof(u16)); | 687 | offsetof(struct virtio_console_config, cols), |
152 | dev->config->get(dev, | 688 | &ws.ws_col, sizeof(u16)); |
153 | offsetof(struct virtio_console_config, rows), | 689 | vdev->config->get(vdev, |
154 | &ws.ws_row, sizeof(u16)); | 690 | offsetof(struct virtio_console_config, rows), |
155 | hvc_resize(hvc, ws); | 691 | &ws.ws_row, sizeof(u16)); |
692 | hvc_resize(port->cons.hvc, ws); | ||
156 | } | 693 | } |
157 | } | 694 | } |
158 | 695 | ||
159 | /* | 696 | /* We set the configuration at this point, since we now have a tty */ |
160 | * we support only one console, the hvc struct is a global var | ||
161 | * We set the configuration at this point, since we now have a tty | ||
162 | */ | ||
163 | static int notifier_add_vio(struct hvc_struct *hp, int data) | 697 | static int notifier_add_vio(struct hvc_struct *hp, int data) |
164 | { | 698 | { |
699 | struct port *port; | ||
700 | |||
701 | port = find_port_by_vtermno(hp->vtermno); | ||
702 | if (!port) | ||
703 | return -EINVAL; | ||
704 | |||
165 | hp->irq_requested = 1; | 705 | hp->irq_requested = 1; |
166 | virtcons_apply_config(vdev); | 706 | resize_console(port); |
167 | 707 | ||
168 | return 0; | 708 | return 0; |
169 | } | 709 | } |
@@ -173,79 +713,797 @@ static void notifier_del_vio(struct hvc_struct *hp, int data) | |||
173 | hp->irq_requested = 0; | 713 | hp->irq_requested = 0; |
174 | } | 714 | } |
175 | 715 | ||
176 | static void hvc_handle_input(struct virtqueue *vq) | 716 | /* The operations for console ports. */ |
717 | static const struct hv_ops hv_ops = { | ||
718 | .get_chars = get_chars, | ||
719 | .put_chars = put_chars, | ||
720 | .notifier_add = notifier_add_vio, | ||
721 | .notifier_del = notifier_del_vio, | ||
722 | .notifier_hangup = notifier_del_vio, | ||
723 | }; | ||
724 | |||
725 | /* | ||
726 | * Console drivers are initialized very early so boot messages can go | ||
727 | * out, so we do things slightly differently from the generic virtio | ||
728 | * initialization of the net and block drivers. | ||
729 | * | ||
730 | * At this stage, the console is output-only. It's too early to set | ||
731 | * up a virtqueue, so we let the drivers do some boutique early-output | ||
732 | * thing. | ||
733 | */ | ||
734 | int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) | ||
177 | { | 735 | { |
178 | if (hvc_poll(hvc)) | 736 | early_put_chars = put_chars; |
737 | return hvc_instantiate(0, 0, &hv_ops); | ||
738 | } | ||
739 | |||
740 | int init_port_console(struct port *port) | ||
741 | { | ||
742 | int ret; | ||
743 | |||
744 | /* | ||
745 | * The Host's telling us this port is a console port. Hook it | ||
746 | * up with an hvc console. | ||
747 | * | ||
748 | * To set up and manage our virtual console, we call | ||
749 | * hvc_alloc(). | ||
750 | * | ||
751 | * The first argument of hvc_alloc() is the virtual console | ||
752 | * number. The second argument is the parameter for the | ||
753 | * notification mechanism (like irq number). We currently | ||
754 | * leave this as zero, virtqueues have implicit notifications. | ||
755 | * | ||
756 | * The third argument is a "struct hv_ops" containing the | ||
757 | * put_chars() get_chars(), notifier_add() and notifier_del() | ||
758 | * pointers. The final argument is the output buffer size: we | ||
759 | * can do any size, so we put PAGE_SIZE here. | ||
760 | */ | ||
761 | port->cons.vtermno = pdrvdata.next_vtermno; | ||
762 | |||
763 | port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); | ||
764 | if (IS_ERR(port->cons.hvc)) { | ||
765 | ret = PTR_ERR(port->cons.hvc); | ||
766 | dev_err(port->dev, | ||
767 | "error %d allocating hvc for port\n", ret); | ||
768 | port->cons.hvc = NULL; | ||
769 | return ret; | ||
770 | } | ||
771 | spin_lock_irq(&pdrvdata_lock); | ||
772 | pdrvdata.next_vtermno++; | ||
773 | list_add_tail(&port->cons.list, &pdrvdata.consoles); | ||
774 | spin_unlock_irq(&pdrvdata_lock); | ||
775 | port->guest_connected = true; | ||
776 | |||
777 | /* Notify host of port being opened */ | ||
778 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); | ||
779 | |||
780 | return 0; | ||
781 | } | ||
782 | |||
783 | static ssize_t show_port_name(struct device *dev, | ||
784 | struct device_attribute *attr, char *buffer) | ||
785 | { | ||
786 | struct port *port; | ||
787 | |||
788 | port = dev_get_drvdata(dev); | ||
789 | |||
790 | return sprintf(buffer, "%s\n", port->name); | ||
791 | } | ||
792 | |||
793 | static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL); | ||
794 | |||
795 | static struct attribute *port_sysfs_entries[] = { | ||
796 | &dev_attr_name.attr, | ||
797 | NULL | ||
798 | }; | ||
799 | |||
800 | static struct attribute_group port_attribute_group = { | ||
801 | .name = NULL, /* put in device directory */ | ||
802 | .attrs = port_sysfs_entries, | ||
803 | }; | ||
804 | |||
805 | static int debugfs_open(struct inode *inode, struct file *filp) | ||
806 | { | ||
807 | filp->private_data = inode->i_private; | ||
808 | return 0; | ||
809 | } | ||
810 | |||
811 | static ssize_t debugfs_read(struct file *filp, char __user *ubuf, | ||
812 | size_t count, loff_t *offp) | ||
813 | { | ||
814 | struct port *port; | ||
815 | char *buf; | ||
816 | ssize_t ret, out_offset, out_count; | ||
817 | |||
818 | out_count = 1024; | ||
819 | buf = kmalloc(out_count, GFP_KERNEL); | ||
820 | if (!buf) | ||
821 | return -ENOMEM; | ||
822 | |||
823 | port = filp->private_data; | ||
824 | out_offset = 0; | ||
825 | out_offset += snprintf(buf + out_offset, out_count, | ||
826 | "name: %s\n", port->name ? port->name : ""); | ||
827 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
828 | "guest_connected: %d\n", port->guest_connected); | ||
829 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
830 | "host_connected: %d\n", port->host_connected); | ||
831 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
832 | "is_console: %s\n", | ||
833 | is_console_port(port) ? "yes" : "no"); | ||
834 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
835 | "console_vtermno: %u\n", port->cons.vtermno); | ||
836 | |||
837 | ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); | ||
838 | kfree(buf); | ||
839 | return ret; | ||
840 | } | ||
841 | |||
842 | static const struct file_operations port_debugfs_ops = { | ||
843 | .owner = THIS_MODULE, | ||
844 | .open = debugfs_open, | ||
845 | .read = debugfs_read, | ||
846 | }; | ||
847 | |||
848 | /* Remove all port-specific data. */ | ||
849 | static int remove_port(struct port *port) | ||
850 | { | ||
851 | struct port_buffer *buf; | ||
852 | |||
853 | spin_lock_irq(&port->portdev->ports_lock); | ||
854 | list_del(&port->list); | ||
855 | spin_unlock_irq(&port->portdev->ports_lock); | ||
856 | |||
857 | if (is_console_port(port)) { | ||
858 | spin_lock_irq(&pdrvdata_lock); | ||
859 | list_del(&port->cons.list); | ||
860 | spin_unlock_irq(&pdrvdata_lock); | ||
861 | hvc_remove(port->cons.hvc); | ||
862 | } | ||
863 | if (port->guest_connected) | ||
864 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); | ||
865 | |||
866 | sysfs_remove_group(&port->dev->kobj, &port_attribute_group); | ||
867 | device_destroy(pdrvdata.class, port->dev->devt); | ||
868 | cdev_del(&port->cdev); | ||
869 | |||
870 | /* Remove unused data this port might have received. */ | ||
871 | discard_port_data(port); | ||
872 | |||
873 | /* Remove buffers we queued up for the Host to send us data in. */ | ||
874 | while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) | ||
875 | free_buf(buf); | ||
876 | |||
877 | kfree(port->name); | ||
878 | |||
879 | debugfs_remove(port->debugfs_file); | ||
880 | |||
881 | kfree(port); | ||
882 | return 0; | ||
883 | } | ||
884 | |||
885 | /* Any private messages that the Host and Guest want to share */ | ||
886 | static void handle_control_message(struct ports_device *portdev, | ||
887 | struct port_buffer *buf) | ||
888 | { | ||
889 | struct virtio_console_control *cpkt; | ||
890 | struct port *port; | ||
891 | size_t name_size; | ||
892 | int err; | ||
893 | |||
894 | cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); | ||
895 | |||
896 | port = find_port_by_id(portdev, cpkt->id); | ||
897 | if (!port) { | ||
898 | /* No valid header at start of buffer. Drop it. */ | ||
899 | dev_dbg(&portdev->vdev->dev, | ||
900 | "Invalid index %u in control packet\n", cpkt->id); | ||
901 | return; | ||
902 | } | ||
903 | |||
904 | switch (cpkt->event) { | ||
905 | case VIRTIO_CONSOLE_CONSOLE_PORT: | ||
906 | if (!cpkt->value) | ||
907 | break; | ||
908 | if (is_console_port(port)) | ||
909 | break; | ||
910 | |||
911 | init_port_console(port); | ||
912 | /* | ||
913 | * Could remove the port here in case init fails - but | ||
914 | * have to notify the host first. | ||
915 | */ | ||
916 | break; | ||
917 | case VIRTIO_CONSOLE_RESIZE: | ||
918 | if (!is_console_port(port)) | ||
919 | break; | ||
920 | port->cons.hvc->irq_requested = 1; | ||
921 | resize_console(port); | ||
922 | break; | ||
923 | case VIRTIO_CONSOLE_PORT_OPEN: | ||
924 | port->host_connected = cpkt->value; | ||
925 | wake_up_interruptible(&port->waitqueue); | ||
926 | break; | ||
927 | case VIRTIO_CONSOLE_PORT_NAME: | ||
928 | /* | ||
929 | * Skip the size of the header and the cpkt to get the size | ||
930 | * of the name that was sent | ||
931 | */ | ||
932 | name_size = buf->len - buf->offset - sizeof(*cpkt) + 1; | ||
933 | |||
934 | port->name = kmalloc(name_size, GFP_KERNEL); | ||
935 | if (!port->name) { | ||
936 | dev_err(port->dev, | ||
937 | "Not enough space to store port name\n"); | ||
938 | break; | ||
939 | } | ||
940 | strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt), | ||
941 | name_size - 1); | ||
942 | port->name[name_size - 1] = 0; | ||
943 | |||
944 | /* | ||
945 | * Since we only have one sysfs attribute, 'name', | ||
946 | * create it only if we have a name for the port. | ||
947 | */ | ||
948 | err = sysfs_create_group(&port->dev->kobj, | ||
949 | &port_attribute_group); | ||
950 | if (err) | ||
951 | dev_err(port->dev, | ||
952 | "Error %d creating sysfs device attributes\n", | ||
953 | err); | ||
954 | |||
955 | break; | ||
956 | case VIRTIO_CONSOLE_PORT_REMOVE: | ||
957 | /* | ||
958 | * Hot unplug the port. We don't decrement nr_ports | ||
959 | * since we don't want to deal with extra complexities | ||
960 | * of using the lowest-available port id: We can just | ||
961 | * pick up the nr_ports number as the id and not have | ||
962 | * userspace send it to us. This helps us in two | ||
963 | * ways: | ||
964 | * | ||
965 | * - We don't need to have a 'port_id' field in the | ||
966 | * config space when a port is hot-added. This is a | ||
967 | * good thing as we might queue up multiple hotplug | ||
968 | * requests issued in our workqueue. | ||
969 | * | ||
970 | * - Another way to deal with this would have been to | ||
971 | * use a bitmap of the active ports and select the | ||
972 | * lowest non-active port from that map. That | ||
973 | * bloats the already tight config space and we | ||
974 | * would end up artificially limiting the | ||
975 | * max. number of ports to sizeof(bitmap). Right | ||
976 | * now we can support 2^32 ports (as the port id is | ||
977 | * stored in a u32 type). | ||
978 | * | ||
979 | */ | ||
980 | remove_port(port); | ||
981 | break; | ||
982 | } | ||
983 | } | ||
984 | |||
985 | static void control_work_handler(struct work_struct *work) | ||
986 | { | ||
987 | struct ports_device *portdev; | ||
988 | struct virtqueue *vq; | ||
989 | struct port_buffer *buf; | ||
990 | unsigned int len; | ||
991 | |||
992 | portdev = container_of(work, struct ports_device, control_work); | ||
993 | vq = portdev->c_ivq; | ||
994 | |||
995 | spin_lock(&portdev->cvq_lock); | ||
996 | while ((buf = vq->vq_ops->get_buf(vq, &len))) { | ||
997 | spin_unlock(&portdev->cvq_lock); | ||
998 | |||
999 | buf->len = len; | ||
1000 | buf->offset = 0; | ||
1001 | |||
1002 | handle_control_message(portdev, buf); | ||
1003 | |||
1004 | spin_lock(&portdev->cvq_lock); | ||
1005 | if (add_inbuf(portdev->c_ivq, buf) < 0) { | ||
1006 | dev_warn(&portdev->vdev->dev, | ||
1007 | "Error adding buffer to queue\n"); | ||
1008 | free_buf(buf); | ||
1009 | } | ||
1010 | } | ||
1011 | spin_unlock(&portdev->cvq_lock); | ||
1012 | } | ||
1013 | |||
1014 | static void in_intr(struct virtqueue *vq) | ||
1015 | { | ||
1016 | struct port *port; | ||
1017 | unsigned long flags; | ||
1018 | |||
1019 | port = find_port_by_vq(vq->vdev->priv, vq); | ||
1020 | if (!port) | ||
1021 | return; | ||
1022 | |||
1023 | spin_lock_irqsave(&port->inbuf_lock, flags); | ||
1024 | if (!port->inbuf) | ||
1025 | port->inbuf = get_inbuf(port); | ||
1026 | |||
1027 | /* | ||
1028 | * Don't queue up data when port is closed. This condition | ||
1029 | * can be reached when a console port is not yet connected (no | ||
1030 | * tty is spawned) and the host sends out data to console | ||
1031 | * ports. For generic serial ports, the host won't | ||
1032 | * (shouldn't) send data till the guest is connected. | ||
1033 | */ | ||
1034 | if (!port->guest_connected) | ||
1035 | discard_port_data(port); | ||
1036 | |||
1037 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | ||
1038 | |||
1039 | wake_up_interruptible(&port->waitqueue); | ||
1040 | |||
1041 | if (is_console_port(port) && hvc_poll(port->cons.hvc)) | ||
179 | hvc_kick(); | 1042 | hvc_kick(); |
180 | } | 1043 | } |
181 | 1044 | ||
182 | /*D:370 Once we're further in boot, we get probed like any other virtio device. | 1045 | static void control_intr(struct virtqueue *vq) |
183 | * At this stage we set up the output virtqueue. | 1046 | { |
184 | * | 1047 | struct ports_device *portdev; |
185 | * To set up and manage our virtual console, we call hvc_alloc(). Since we | 1048 | |
186 | * never remove the console device we never need this pointer again. | 1049 | portdev = vq->vdev->priv; |
1050 | schedule_work(&portdev->control_work); | ||
1051 | } | ||
1052 | |||
1053 | static void config_intr(struct virtio_device *vdev) | ||
1054 | { | ||
1055 | struct ports_device *portdev; | ||
1056 | |||
1057 | portdev = vdev->priv; | ||
1058 | if (use_multiport(portdev)) { | ||
1059 | /* Handle port hot-add */ | ||
1060 | schedule_work(&portdev->config_work); | ||
1061 | } | ||
1062 | /* | ||
1063 | * We'll use this way of resizing only for legacy support. | ||
1064 | * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use | ||
1065 | * control messages to indicate console size changes so that | ||
1066 | * it can be done per-port | ||
1067 | */ | ||
1068 | resize_console(find_port_by_id(portdev, 0)); | ||
1069 | } | ||
1070 | |||
1071 | static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | ||
1072 | { | ||
1073 | struct port_buffer *buf; | ||
1074 | unsigned int ret; | ||
1075 | int err; | ||
1076 | |||
1077 | ret = 0; | ||
1078 | do { | ||
1079 | buf = alloc_buf(PAGE_SIZE); | ||
1080 | if (!buf) | ||
1081 | break; | ||
1082 | |||
1083 | spin_lock_irq(lock); | ||
1084 | err = add_inbuf(vq, buf); | ||
1085 | if (err < 0) { | ||
1086 | spin_unlock_irq(lock); | ||
1087 | free_buf(buf); | ||
1088 | break; | ||
1089 | } | ||
1090 | ret++; | ||
1091 | spin_unlock_irq(lock); | ||
1092 | } while (err > 0); | ||
1093 | |||
1094 | return ret; | ||
1095 | } | ||
1096 | |||
1097 | static int add_port(struct ports_device *portdev, u32 id) | ||
1098 | { | ||
1099 | char debugfs_name[16]; | ||
1100 | struct port *port; | ||
1101 | struct port_buffer *buf; | ||
1102 | dev_t devt; | ||
1103 | int err; | ||
1104 | |||
1105 | port = kmalloc(sizeof(*port), GFP_KERNEL); | ||
1106 | if (!port) { | ||
1107 | err = -ENOMEM; | ||
1108 | goto fail; | ||
1109 | } | ||
1110 | |||
1111 | port->portdev = portdev; | ||
1112 | port->id = id; | ||
1113 | |||
1114 | port->name = NULL; | ||
1115 | port->inbuf = NULL; | ||
1116 | port->cons.hvc = NULL; | ||
1117 | |||
1118 | port->host_connected = port->guest_connected = false; | ||
1119 | |||
1120 | port->in_vq = portdev->in_vqs[port->id]; | ||
1121 | port->out_vq = portdev->out_vqs[port->id]; | ||
1122 | |||
1123 | cdev_init(&port->cdev, &port_fops); | ||
1124 | |||
1125 | devt = MKDEV(portdev->chr_major, id); | ||
1126 | err = cdev_add(&port->cdev, devt, 1); | ||
1127 | if (err < 0) { | ||
1128 | dev_err(&port->portdev->vdev->dev, | ||
1129 | "Error %d adding cdev for port %u\n", err, id); | ||
1130 | goto free_port; | ||
1131 | } | ||
1132 | port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, | ||
1133 | devt, port, "vport%up%u", | ||
1134 | port->portdev->drv_index, id); | ||
1135 | if (IS_ERR(port->dev)) { | ||
1136 | err = PTR_ERR(port->dev); | ||
1137 | dev_err(&port->portdev->vdev->dev, | ||
1138 | "Error %d creating device for port %u\n", | ||
1139 | err, id); | ||
1140 | goto free_cdev; | ||
1141 | } | ||
1142 | |||
1143 | spin_lock_init(&port->inbuf_lock); | ||
1144 | init_waitqueue_head(&port->waitqueue); | ||
1145 | |||
1146 | /* Fill the in_vq with buffers so the host can send us data. */ | ||
1147 | err = fill_queue(port->in_vq, &port->inbuf_lock); | ||
1148 | if (!err) { | ||
1149 | dev_err(port->dev, "Error allocating inbufs\n"); | ||
1150 | err = -ENOMEM; | ||
1151 | goto free_device; | ||
1152 | } | ||
1153 | |||
1154 | /* | ||
1155 | * If we're not using multiport support, this has to be a console port | ||
1156 | */ | ||
1157 | if (!use_multiport(port->portdev)) { | ||
1158 | err = init_port_console(port); | ||
1159 | if (err) | ||
1160 | goto free_inbufs; | ||
1161 | } | ||
1162 | |||
1163 | spin_lock_irq(&portdev->ports_lock); | ||
1164 | list_add_tail(&port->list, &port->portdev->ports); | ||
1165 | spin_unlock_irq(&portdev->ports_lock); | ||
1166 | |||
1167 | /* | ||
1168 | * Tell the Host we're set so that it can send us various | ||
1169 | * configuration parameters for this port (eg, port name, | ||
1170 | * caching, whether this is a console port, etc.) | ||
1171 | */ | ||
1172 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | ||
1173 | |||
1174 | if (pdrvdata.debugfs_dir) { | ||
1175 | /* | ||
1176 | * Finally, create the debugfs file that we can use to | ||
1177 | * inspect a port's state at any time | ||
1178 | */ | ||
1179 | sprintf(debugfs_name, "vport%up%u", | ||
1180 | port->portdev->drv_index, id); | ||
1181 | port->debugfs_file = debugfs_create_file(debugfs_name, 0444, | ||
1182 | pdrvdata.debugfs_dir, | ||
1183 | port, | ||
1184 | &port_debugfs_ops); | ||
1185 | } | ||
1186 | return 0; | ||
1187 | |||
1188 | free_inbufs: | ||
1189 | while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) | ||
1190 | free_buf(buf); | ||
1191 | free_device: | ||
1192 | device_destroy(pdrvdata.class, port->dev->devt); | ||
1193 | free_cdev: | ||
1194 | cdev_del(&port->cdev); | ||
1195 | free_port: | ||
1196 | kfree(port); | ||
1197 | fail: | ||
1198 | return err; | ||
1199 | } | ||
1200 | |||
1201 | /* | ||
1202 | * The workhandler for config-space updates. | ||
187 | * | 1203 | * |
188 | * Finally we put our input buffer in the input queue, ready to receive. */ | 1204 | * This is called when ports are hot-added. |
189 | static int __devinit virtcons_probe(struct virtio_device *dev) | 1205 | */ |
1206 | static void config_work_handler(struct work_struct *work) | ||
1207 | { | ||
1208 | struct virtio_console_config virtconconf; | ||
1209 | struct ports_device *portdev; | ||
1210 | struct virtio_device *vdev; | ||
1211 | int err; | ||
1212 | |||
1213 | portdev = container_of(work, struct ports_device, config_work); | ||
1214 | |||
1215 | vdev = portdev->vdev; | ||
1216 | vdev->config->get(vdev, | ||
1217 | offsetof(struct virtio_console_config, nr_ports), | ||
1218 | &virtconconf.nr_ports, | ||
1219 | sizeof(virtconconf.nr_ports)); | ||
1220 | |||
1221 | if (portdev->config.nr_ports == virtconconf.nr_ports) { | ||
1222 | /* | ||
1223 | * Port 0 got hot-added. Since we already did all the | ||
1224 | * other initialisation for it, just tell the Host | ||
1225 | * that the port is ready if we find the port. In | ||
1226 | * case the port was hot-removed earlier, we call | ||
1227 | * add_port to add the port. | ||
1228 | */ | ||
1229 | struct port *port; | ||
1230 | |||
1231 | port = find_port_by_id(portdev, 0); | ||
1232 | if (!port) | ||
1233 | add_port(portdev, 0); | ||
1234 | else | ||
1235 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | ||
1236 | return; | ||
1237 | } | ||
1238 | if (virtconconf.nr_ports > portdev->config.max_nr_ports) { | ||
1239 | dev_warn(&vdev->dev, | ||
1240 | "More ports specified (%u) than allowed (%u)", | ||
1241 | portdev->config.nr_ports + 1, | ||
1242 | portdev->config.max_nr_ports); | ||
1243 | return; | ||
1244 | } | ||
1245 | if (virtconconf.nr_ports < portdev->config.nr_ports) | ||
1246 | return; | ||
1247 | |||
1248 | /* Hot-add ports */ | ||
1249 | while (virtconconf.nr_ports - portdev->config.nr_ports) { | ||
1250 | err = add_port(portdev, portdev->config.nr_ports); | ||
1251 | if (err) | ||
1252 | break; | ||
1253 | portdev->config.nr_ports++; | ||
1254 | } | ||
1255 | } | ||
1256 | |||
1257 | static int init_vqs(struct ports_device *portdev) | ||
190 | { | 1258 | { |
191 | vq_callback_t *callbacks[] = { hvc_handle_input, NULL}; | 1259 | vq_callback_t **io_callbacks; |
192 | const char *names[] = { "input", "output" }; | 1260 | char **io_names; |
193 | struct virtqueue *vqs[2]; | 1261 | struct virtqueue **vqs; |
1262 | u32 i, j, nr_ports, nr_queues; | ||
194 | int err; | 1263 | int err; |
195 | 1264 | ||
196 | vdev = dev; | 1265 | nr_ports = portdev->config.max_nr_ports; |
1266 | nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; | ||
197 | 1267 | ||
198 | /* This is the scratch page we use to receive console input */ | 1268 | vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); |
199 | inbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 1269 | if (!vqs) { |
200 | if (!inbuf) { | ||
201 | err = -ENOMEM; | 1270 | err = -ENOMEM; |
202 | goto fail; | 1271 | goto fail; |
203 | } | 1272 | } |
1273 | io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); | ||
1274 | if (!io_callbacks) { | ||
1275 | err = -ENOMEM; | ||
1276 | goto free_vqs; | ||
1277 | } | ||
1278 | io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); | ||
1279 | if (!io_names) { | ||
1280 | err = -ENOMEM; | ||
1281 | goto free_callbacks; | ||
1282 | } | ||
1283 | portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), | ||
1284 | GFP_KERNEL); | ||
1285 | if (!portdev->in_vqs) { | ||
1286 | err = -ENOMEM; | ||
1287 | goto free_names; | ||
1288 | } | ||
1289 | portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), | ||
1290 | GFP_KERNEL); | ||
1291 | if (!portdev->out_vqs) { | ||
1292 | err = -ENOMEM; | ||
1293 | goto free_invqs; | ||
1294 | } | ||
1295 | |||
1296 | /* | ||
1297 | * For backward compat (newer host but older guest), the host | ||
1298 | * spawns a console port first and also inits the vqs for port | ||
1299 | * 0 before others. | ||
1300 | */ | ||
1301 | j = 0; | ||
1302 | io_callbacks[j] = in_intr; | ||
1303 | io_callbacks[j + 1] = NULL; | ||
1304 | io_names[j] = "input"; | ||
1305 | io_names[j + 1] = "output"; | ||
1306 | j += 2; | ||
204 | 1307 | ||
1308 | if (use_multiport(portdev)) { | ||
1309 | io_callbacks[j] = control_intr; | ||
1310 | io_callbacks[j + 1] = NULL; | ||
1311 | io_names[j] = "control-i"; | ||
1312 | io_names[j + 1] = "control-o"; | ||
1313 | |||
1314 | for (i = 1; i < nr_ports; i++) { | ||
1315 | j += 2; | ||
1316 | io_callbacks[j] = in_intr; | ||
1317 | io_callbacks[j + 1] = NULL; | ||
1318 | io_names[j] = "input"; | ||
1319 | io_names[j + 1] = "output"; | ||
1320 | } | ||
1321 | } | ||
205 | /* Find the queues. */ | 1322 | /* Find the queues. */ |
206 | /* FIXME: This is why we want to wean off hvc: we do nothing | 1323 | err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, |
207 | * when input comes in. */ | 1324 | io_callbacks, |
208 | err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); | 1325 | (const char **)io_names); |
209 | if (err) | 1326 | if (err) |
1327 | goto free_outvqs; | ||
1328 | |||
1329 | j = 0; | ||
1330 | portdev->in_vqs[0] = vqs[0]; | ||
1331 | portdev->out_vqs[0] = vqs[1]; | ||
1332 | j += 2; | ||
1333 | if (use_multiport(portdev)) { | ||
1334 | portdev->c_ivq = vqs[j]; | ||
1335 | portdev->c_ovq = vqs[j + 1]; | ||
1336 | |||
1337 | for (i = 1; i < nr_ports; i++) { | ||
1338 | j += 2; | ||
1339 | portdev->in_vqs[i] = vqs[j]; | ||
1340 | portdev->out_vqs[i] = vqs[j + 1]; | ||
1341 | } | ||
1342 | } | ||
1343 | kfree(io_callbacks); | ||
1344 | kfree(io_names); | ||
1345 | kfree(vqs); | ||
1346 | |||
1347 | return 0; | ||
1348 | |||
1349 | free_names: | ||
1350 | kfree(io_names); | ||
1351 | free_callbacks: | ||
1352 | kfree(io_callbacks); | ||
1353 | free_outvqs: | ||
1354 | kfree(portdev->out_vqs); | ||
1355 | free_invqs: | ||
1356 | kfree(portdev->in_vqs); | ||
1357 | free_vqs: | ||
1358 | kfree(vqs); | ||
1359 | fail: | ||
1360 | return err; | ||
1361 | } | ||
1362 | |||
1363 | static const struct file_operations portdev_fops = { | ||
1364 | .owner = THIS_MODULE, | ||
1365 | }; | ||
1366 | |||
1367 | /* | ||
1368 | * Once we're further in boot, we get probed like any other virtio | ||
1369 | * device. | ||
1370 | * | ||
1371 | * If the host also supports multiple console ports, we check the | ||
1372 | * config space to see how many ports the host has spawned. We | ||
1373 | * initialize each port found. | ||
1374 | */ | ||
1375 | static int __devinit virtcons_probe(struct virtio_device *vdev) | ||
1376 | { | ||
1377 | struct ports_device *portdev; | ||
1378 | u32 i; | ||
1379 | int err; | ||
1380 | bool multiport; | ||
1381 | |||
1382 | portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); | ||
1383 | if (!portdev) { | ||
1384 | err = -ENOMEM; | ||
1385 | goto fail; | ||
1386 | } | ||
1387 | |||
1388 | /* Attach this portdev to this virtio_device, and vice-versa. */ | ||
1389 | portdev->vdev = vdev; | ||
1390 | vdev->priv = portdev; | ||
1391 | |||
1392 | spin_lock_irq(&pdrvdata_lock); | ||
1393 | portdev->drv_index = pdrvdata.index++; | ||
1394 | spin_unlock_irq(&pdrvdata_lock); | ||
1395 | |||
1396 | portdev->chr_major = register_chrdev(0, "virtio-portsdev", | ||
1397 | &portdev_fops); | ||
1398 | if (portdev->chr_major < 0) { | ||
1399 | dev_err(&vdev->dev, | ||
1400 | "Error %d registering chrdev for device %u\n", | ||
1401 | portdev->chr_major, portdev->drv_index); | ||
1402 | err = portdev->chr_major; | ||
210 | goto free; | 1403 | goto free; |
1404 | } | ||
211 | 1405 | ||
212 | in_vq = vqs[0]; | 1406 | multiport = false; |
213 | out_vq = vqs[1]; | 1407 | portdev->config.nr_ports = 1; |
1408 | portdev->config.max_nr_ports = 1; | ||
1409 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { | ||
1410 | multiport = true; | ||
1411 | vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; | ||
214 | 1412 | ||
215 | /* Start using the new console output. */ | 1413 | vdev->config->get(vdev, offsetof(struct virtio_console_config, |
216 | virtio_cons.get_chars = get_chars; | 1414 | nr_ports), |
217 | virtio_cons.put_chars = put_chars; | 1415 | &portdev->config.nr_ports, |
218 | virtio_cons.notifier_add = notifier_add_vio; | 1416 | sizeof(portdev->config.nr_ports)); |
219 | virtio_cons.notifier_del = notifier_del_vio; | 1417 | vdev->config->get(vdev, offsetof(struct virtio_console_config, |
220 | virtio_cons.notifier_hangup = notifier_del_vio; | 1418 | max_nr_ports), |
221 | 1419 | &portdev->config.max_nr_ports, | |
222 | /* The first argument of hvc_alloc() is the virtual console number, so | 1420 | sizeof(portdev->config.max_nr_ports)); |
223 | * we use zero. The second argument is the parameter for the | 1421 | if (portdev->config.nr_ports > portdev->config.max_nr_ports) { |
224 | * notification mechanism (like irq number). We currently leave this | 1422 | dev_warn(&vdev->dev, |
225 | * as zero, virtqueues have implicit notifications. | 1423 | "More ports (%u) specified than allowed (%u). Will init %u ports.", |
226 | * | 1424 | portdev->config.nr_ports, |
227 | * The third argument is a "struct hv_ops" containing the put_chars() | 1425 | portdev->config.max_nr_ports, |
228 | * get_chars(), notifier_add() and notifier_del() pointers. | 1426 | portdev->config.max_nr_ports); |
229 | * The final argument is the output buffer size: we can do any size, | 1427 | |
230 | * so we put PAGE_SIZE here. */ | 1428 | portdev->config.nr_ports = portdev->config.max_nr_ports; |
231 | hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE); | 1429 | } |
232 | if (IS_ERR(hvc)) { | 1430 | } |
233 | err = PTR_ERR(hvc); | 1431 | |
234 | goto free_vqs; | 1432 | /* Let the Host know we support multiple ports.*/ |
1433 | vdev->config->finalize_features(vdev); | ||
1434 | |||
1435 | err = init_vqs(portdev); | ||
1436 | if (err < 0) { | ||
1437 | dev_err(&vdev->dev, "Error %d initializing vqs\n", err); | ||
1438 | goto free_chrdev; | ||
1439 | } | ||
1440 | |||
1441 | spin_lock_init(&portdev->ports_lock); | ||
1442 | INIT_LIST_HEAD(&portdev->ports); | ||
1443 | |||
1444 | if (multiport) { | ||
1445 | spin_lock_init(&portdev->cvq_lock); | ||
1446 | INIT_WORK(&portdev->control_work, &control_work_handler); | ||
1447 | INIT_WORK(&portdev->config_work, &config_work_handler); | ||
1448 | |||
1449 | err = fill_queue(portdev->c_ivq, &portdev->cvq_lock); | ||
1450 | if (!err) { | ||
1451 | dev_err(&vdev->dev, | ||
1452 | "Error allocating buffers for control queue\n"); | ||
1453 | err = -ENOMEM; | ||
1454 | goto free_vqs; | ||
1455 | } | ||
235 | } | 1456 | } |
236 | 1457 | ||
237 | /* Register the input buffer the first time. */ | 1458 | for (i = 0; i < portdev->config.nr_ports; i++) |
238 | add_inbuf(); | 1459 | add_port(portdev, i); |
1460 | |||
1461 | /* Start using the new console output. */ | ||
1462 | early_put_chars = NULL; | ||
239 | return 0; | 1463 | return 0; |
240 | 1464 | ||
241 | free_vqs: | 1465 | free_vqs: |
242 | vdev->config->del_vqs(vdev); | 1466 | vdev->config->del_vqs(vdev); |
1467 | kfree(portdev->in_vqs); | ||
1468 | kfree(portdev->out_vqs); | ||
1469 | free_chrdev: | ||
1470 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); | ||
243 | free: | 1471 | free: |
244 | kfree(inbuf); | 1472 | kfree(portdev); |
245 | fail: | 1473 | fail: |
246 | return err; | 1474 | return err; |
247 | } | 1475 | } |
248 | 1476 | ||
1477 | static void virtcons_remove(struct virtio_device *vdev) | ||
1478 | { | ||
1479 | struct ports_device *portdev; | ||
1480 | struct port *port, *port2; | ||
1481 | struct port_buffer *buf; | ||
1482 | unsigned int len; | ||
1483 | |||
1484 | portdev = vdev->priv; | ||
1485 | |||
1486 | cancel_work_sync(&portdev->control_work); | ||
1487 | cancel_work_sync(&portdev->config_work); | ||
1488 | |||
1489 | list_for_each_entry_safe(port, port2, &portdev->ports, list) | ||
1490 | remove_port(port); | ||
1491 | |||
1492 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); | ||
1493 | |||
1494 | while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len))) | ||
1495 | free_buf(buf); | ||
1496 | |||
1497 | while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq))) | ||
1498 | free_buf(buf); | ||
1499 | |||
1500 | vdev->config->del_vqs(vdev); | ||
1501 | kfree(portdev->in_vqs); | ||
1502 | kfree(portdev->out_vqs); | ||
1503 | |||
1504 | kfree(portdev); | ||
1505 | } | ||
1506 | |||
249 | static struct virtio_device_id id_table[] = { | 1507 | static struct virtio_device_id id_table[] = { |
250 | { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, | 1508 | { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, |
251 | { 0 }, | 1509 | { 0 }, |
@@ -253,6 +1511,7 @@ static struct virtio_device_id id_table[] = { | |||
253 | 1511 | ||
254 | static unsigned int features[] = { | 1512 | static unsigned int features[] = { |
255 | VIRTIO_CONSOLE_F_SIZE, | 1513 | VIRTIO_CONSOLE_F_SIZE, |
1514 | VIRTIO_CONSOLE_F_MULTIPORT, | ||
256 | }; | 1515 | }; |
257 | 1516 | ||
258 | static struct virtio_driver virtio_console = { | 1517 | static struct virtio_driver virtio_console = { |
@@ -262,14 +1521,41 @@ static struct virtio_driver virtio_console = { | |||
262 | .driver.owner = THIS_MODULE, | 1521 | .driver.owner = THIS_MODULE, |
263 | .id_table = id_table, | 1522 | .id_table = id_table, |
264 | .probe = virtcons_probe, | 1523 | .probe = virtcons_probe, |
265 | .config_changed = virtcons_apply_config, | 1524 | .remove = virtcons_remove, |
1525 | .config_changed = config_intr, | ||
266 | }; | 1526 | }; |
267 | 1527 | ||
268 | static int __init init(void) | 1528 | static int __init init(void) |
269 | { | 1529 | { |
1530 | int err; | ||
1531 | |||
1532 | pdrvdata.class = class_create(THIS_MODULE, "virtio-ports"); | ||
1533 | if (IS_ERR(pdrvdata.class)) { | ||
1534 | err = PTR_ERR(pdrvdata.class); | ||
1535 | pr_err("Error %d creating virtio-ports class\n", err); | ||
1536 | return err; | ||
1537 | } | ||
1538 | |||
1539 | pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL); | ||
1540 | if (!pdrvdata.debugfs_dir) { | ||
1541 | pr_warning("Error %ld creating debugfs dir for virtio-ports\n", | ||
1542 | PTR_ERR(pdrvdata.debugfs_dir)); | ||
1543 | } | ||
1544 | INIT_LIST_HEAD(&pdrvdata.consoles); | ||
1545 | |||
270 | return register_virtio_driver(&virtio_console); | 1546 | return register_virtio_driver(&virtio_console); |
271 | } | 1547 | } |
1548 | |||
1549 | static void __exit fini(void) | ||
1550 | { | ||
1551 | unregister_virtio_driver(&virtio_console); | ||
1552 | |||
1553 | class_destroy(pdrvdata.class); | ||
1554 | if (pdrvdata.debugfs_dir) | ||
1555 | debugfs_remove_recursive(pdrvdata.debugfs_dir); | ||
1556 | } | ||
272 | module_init(init); | 1557 | module_init(init); |
1558 | module_exit(fini); | ||
273 | 1559 | ||
274 | MODULE_DEVICE_TABLE(virtio, id_table); | 1560 | MODULE_DEVICE_TABLE(virtio, id_table); |
275 | MODULE_DESCRIPTION("Virtio console driver"); | 1561 | MODULE_DESCRIPTION("Virtio console driver"); |
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c index 27d20fac19d1..b314a999aabe 100644 --- a/drivers/clocksource/cs5535-clockevt.c +++ b/drivers/clocksource/cs5535-clockevt.c | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | #define DRV_NAME "cs5535-clockevt" | 22 | #define DRV_NAME "cs5535-clockevt" |
23 | 23 | ||
24 | static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; | 24 | static int timer_irq; |
25 | module_param_named(irq, timer_irq, int, 0644); | 25 | module_param_named(irq, timer_irq, int, 0644); |
26 | MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); | 26 | MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); |
27 | 27 | ||
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 46e899ac924e..1c3849f6b7a2 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -1274,7 +1274,7 @@ static int __exit crypto4xx_remove(struct of_device *ofdev) | |||
1274 | return 0; | 1274 | return 0; |
1275 | } | 1275 | } |
1276 | 1276 | ||
1277 | static struct of_device_id crypto4xx_match[] = { | 1277 | static const struct of_device_id crypto4xx_match[] = { |
1278 | { .compatible = "amcc,ppc4xx-crypto",}, | 1278 | { .compatible = "amcc,ppc4xx-crypto",}, |
1279 | { }, | 1279 | { }, |
1280 | }; | 1280 | }; |
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 4801162919d9..c7a5a43ba691 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c | |||
@@ -135,13 +135,13 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, | |||
135 | /* | 135 | /* |
136 | * The requested key size is not supported by HW, do a fallback | 136 | * The requested key size is not supported by HW, do a fallback |
137 | */ | 137 | */ |
138 | op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | 138 | op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
139 | op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); | 139 | op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); |
140 | 140 | ||
141 | ret = crypto_cipher_setkey(op->fallback.cip, key, len); | 141 | ret = crypto_cipher_setkey(op->fallback.cip, key, len); |
142 | if (ret) { | 142 | if (ret) { |
143 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | 143 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
144 | tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK); | 144 | tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK); |
145 | } | 145 | } |
146 | return ret; | 146 | return ret; |
147 | } | 147 | } |
@@ -263,7 +263,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm) | |||
263 | 263 | ||
264 | if (IS_ERR(op->fallback.cip)) { | 264 | if (IS_ERR(op->fallback.cip)) { |
265 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); | 265 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); |
266 | return PTR_ERR(op->fallback.blk); | 266 | return PTR_ERR(op->fallback.cip); |
267 | } | 267 | } |
268 | 268 | ||
269 | return 0; | 269 | return 0; |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index c47ffe8a73ef..fd529d68c5ba 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -1958,7 +1958,7 @@ err_out: | |||
1958 | return err; | 1958 | return err; |
1959 | } | 1959 | } |
1960 | 1960 | ||
1961 | static struct of_device_id talitos_match[] = { | 1961 | static const struct of_device_id talitos_match[] = { |
1962 | { | 1962 | { |
1963 | .compatible = "fsl,sec2.0", | 1963 | .compatible = "fsl,sec2.0", |
1964 | }, | 1964 | }, |
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index cbaf420c36c5..2d3dc7ded0a9 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c | |||
@@ -893,20 +893,31 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, | |||
893 | 893 | ||
894 | static struct kmem_cache *fwnet_packet_task_cache; | 894 | static struct kmem_cache *fwnet_packet_task_cache; |
895 | 895 | ||
896 | static void fwnet_free_ptask(struct fwnet_packet_task *ptask) | ||
897 | { | ||
898 | dev_kfree_skb_any(ptask->skb); | ||
899 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
900 | } | ||
901 | |||
896 | static int fwnet_send_packet(struct fwnet_packet_task *ptask); | 902 | static int fwnet_send_packet(struct fwnet_packet_task *ptask); |
897 | 903 | ||
898 | static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | 904 | static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) |
899 | { | 905 | { |
900 | struct fwnet_device *dev; | 906 | struct fwnet_device *dev = ptask->dev; |
901 | unsigned long flags; | 907 | unsigned long flags; |
902 | 908 | bool free; | |
903 | dev = ptask->dev; | ||
904 | 909 | ||
905 | spin_lock_irqsave(&dev->lock, flags); | 910 | spin_lock_irqsave(&dev->lock, flags); |
906 | list_del(&ptask->pt_link); | ||
907 | spin_unlock_irqrestore(&dev->lock, flags); | ||
908 | 911 | ||
909 | ptask->outstanding_pkts--; /* FIXME access inside lock */ | 912 | ptask->outstanding_pkts--; |
913 | |||
914 | /* Check whether we or the networking TX soft-IRQ is last user. */ | ||
915 | free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link)); | ||
916 | |||
917 | if (ptask->outstanding_pkts == 0) | ||
918 | list_del(&ptask->pt_link); | ||
919 | |||
920 | spin_unlock_irqrestore(&dev->lock, flags); | ||
910 | 921 | ||
911 | if (ptask->outstanding_pkts > 0) { | 922 | if (ptask->outstanding_pkts > 0) { |
912 | u16 dg_size; | 923 | u16 dg_size; |
@@ -951,10 +962,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | |||
951 | ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; | 962 | ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; |
952 | } | 963 | } |
953 | fwnet_send_packet(ptask); | 964 | fwnet_send_packet(ptask); |
954 | } else { | ||
955 | dev_kfree_skb_any(ptask->skb); | ||
956 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
957 | } | 965 | } |
966 | |||
967 | if (free) | ||
968 | fwnet_free_ptask(ptask); | ||
958 | } | 969 | } |
959 | 970 | ||
960 | static void fwnet_write_complete(struct fw_card *card, int rcode, | 971 | static void fwnet_write_complete(struct fw_card *card, int rcode, |
@@ -977,6 +988,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) | |||
977 | unsigned tx_len; | 988 | unsigned tx_len; |
978 | struct rfc2734_header *bufhdr; | 989 | struct rfc2734_header *bufhdr; |
979 | unsigned long flags; | 990 | unsigned long flags; |
991 | bool free; | ||
980 | 992 | ||
981 | dev = ptask->dev; | 993 | dev = ptask->dev; |
982 | tx_len = ptask->max_payload; | 994 | tx_len = ptask->max_payload; |
@@ -1022,12 +1034,16 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) | |||
1022 | generation, SCODE_100, 0ULL, ptask->skb->data, | 1034 | generation, SCODE_100, 0ULL, ptask->skb->data, |
1023 | tx_len + 8, fwnet_write_complete, ptask); | 1035 | tx_len + 8, fwnet_write_complete, ptask); |
1024 | 1036 | ||
1025 | /* FIXME race? */ | ||
1026 | spin_lock_irqsave(&dev->lock, flags); | 1037 | spin_lock_irqsave(&dev->lock, flags); |
1027 | list_add_tail(&ptask->pt_link, &dev->broadcasted_list); | 1038 | |
1039 | /* If the AT tasklet already ran, we may be last user. */ | ||
1040 | free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); | ||
1041 | if (!free) | ||
1042 | list_add_tail(&ptask->pt_link, &dev->broadcasted_list); | ||
1043 | |||
1028 | spin_unlock_irqrestore(&dev->lock, flags); | 1044 | spin_unlock_irqrestore(&dev->lock, flags); |
1029 | 1045 | ||
1030 | return 0; | 1046 | goto out; |
1031 | } | 1047 | } |
1032 | 1048 | ||
1033 | fw_send_request(dev->card, &ptask->transaction, | 1049 | fw_send_request(dev->card, &ptask->transaction, |
@@ -1035,12 +1051,19 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) | |||
1035 | ptask->generation, ptask->speed, ptask->fifo_addr, | 1051 | ptask->generation, ptask->speed, ptask->fifo_addr, |
1036 | ptask->skb->data, tx_len, fwnet_write_complete, ptask); | 1052 | ptask->skb->data, tx_len, fwnet_write_complete, ptask); |
1037 | 1053 | ||
1038 | /* FIXME race? */ | ||
1039 | spin_lock_irqsave(&dev->lock, flags); | 1054 | spin_lock_irqsave(&dev->lock, flags); |
1040 | list_add_tail(&ptask->pt_link, &dev->sent_list); | 1055 | |
1056 | /* If the AT tasklet already ran, we may be last user. */ | ||
1057 | free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); | ||
1058 | if (!free) | ||
1059 | list_add_tail(&ptask->pt_link, &dev->sent_list); | ||
1060 | |||
1041 | spin_unlock_irqrestore(&dev->lock, flags); | 1061 | spin_unlock_irqrestore(&dev->lock, flags); |
1042 | 1062 | ||
1043 | dev->netdev->trans_start = jiffies; | 1063 | dev->netdev->trans_start = jiffies; |
1064 | out: | ||
1065 | if (free) | ||
1066 | fwnet_free_ptask(ptask); | ||
1044 | 1067 | ||
1045 | return 0; | 1068 | return 0; |
1046 | } | 1069 | } |
@@ -1298,6 +1321,8 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) | |||
1298 | spin_unlock_irqrestore(&dev->lock, flags); | 1321 | spin_unlock_irqrestore(&dev->lock, flags); |
1299 | 1322 | ||
1300 | ptask->max_payload = max_payload; | 1323 | ptask->max_payload = max_payload; |
1324 | INIT_LIST_HEAD(&ptask->pt_link); | ||
1325 | |||
1301 | fwnet_send_packet(ptask); | 1326 | fwnet_send_packet(ptask); |
1302 | 1327 | ||
1303 | return NETDEV_TX_OK; | 1328 | return NETDEV_TX_OK; |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 2345d4103fe6..43ebf337b131 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -2101,11 +2101,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, | |||
2101 | u32 payload_index, payload_end_index, next_page_index; | 2101 | u32 payload_index, payload_end_index, next_page_index; |
2102 | int page, end_page, i, length, offset; | 2102 | int page, end_page, i, length, offset; |
2103 | 2103 | ||
2104 | /* | ||
2105 | * FIXME: Cycle lost behavior should be configurable: lose | ||
2106 | * packet, retransmit or terminate.. | ||
2107 | */ | ||
2108 | |||
2109 | p = packet; | 2104 | p = packet; |
2110 | payload_index = payload; | 2105 | payload_index = payload; |
2111 | 2106 | ||
@@ -2135,6 +2130,14 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, | |||
2135 | if (!p->skip) { | 2130 | if (!p->skip) { |
2136 | d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); | 2131 | d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); |
2137 | d[0].req_count = cpu_to_le16(8); | 2132 | d[0].req_count = cpu_to_le16(8); |
2133 | /* | ||
2134 | * Link the skip address to this descriptor itself. This causes | ||
2135 | * a context to skip a cycle whenever lost cycles or FIFO | ||
2136 | * overruns occur, without dropping the data. The application | ||
2137 | * should then decide whether this is an error condition or not. | ||
2138 | * FIXME: Make the context's cycle-lost behaviour configurable? | ||
2139 | */ | ||
2140 | d[0].branch_address = cpu_to_le32(d_bus | z); | ||
2138 | 2141 | ||
2139 | header = (__le32 *) &d[1]; | 2142 | header = (__le32 *) &d[1]; |
2140 | header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | | 2143 | header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f665b05592f3..ab6c97330412 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, | |||
598 | return mode; | 598 | return mode; |
599 | } | 599 | } |
600 | 600 | ||
601 | /* | ||
602 | * EDID is delightfully ambiguous about how interlaced modes are to be | ||
603 | * encoded. Our internal representation is of frame height, but some | ||
604 | * HDTV detailed timings are encoded as field height. | ||
605 | * | ||
606 | * The format list here is from CEA, in frame size. Technically we | ||
607 | * should be checking refresh rate too. Whatever. | ||
608 | */ | ||
609 | static void | ||
610 | drm_mode_do_interlace_quirk(struct drm_display_mode *mode, | ||
611 | struct detailed_pixel_timing *pt) | ||
612 | { | ||
613 | int i; | ||
614 | static const struct { | ||
615 | int w, h; | ||
616 | } cea_interlaced[] = { | ||
617 | { 1920, 1080 }, | ||
618 | { 720, 480 }, | ||
619 | { 1440, 480 }, | ||
620 | { 2880, 480 }, | ||
621 | { 720, 576 }, | ||
622 | { 1440, 576 }, | ||
623 | { 2880, 576 }, | ||
624 | }; | ||
625 | static const int n_sizes = | ||
626 | sizeof(cea_interlaced)/sizeof(cea_interlaced[0]); | ||
627 | |||
628 | if (!(pt->misc & DRM_EDID_PT_INTERLACED)) | ||
629 | return; | ||
630 | |||
631 | for (i = 0; i < n_sizes; i++) { | ||
632 | if ((mode->hdisplay == cea_interlaced[i].w) && | ||
633 | (mode->vdisplay == cea_interlaced[i].h / 2)) { | ||
634 | mode->vdisplay *= 2; | ||
635 | mode->vsync_start *= 2; | ||
636 | mode->vsync_end *= 2; | ||
637 | mode->vtotal *= 2; | ||
638 | mode->vtotal |= 1; | ||
639 | } | ||
640 | } | ||
641 | |||
642 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
643 | } | ||
644 | |||
601 | /** | 645 | /** |
602 | * drm_mode_detailed - create a new mode from an EDID detailed timing section | 646 | * drm_mode_detailed - create a new mode from an EDID detailed timing section |
603 | * @dev: DRM device (needed to create new mode) | 647 | * @dev: DRM device (needed to create new mode) |
@@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
680 | 724 | ||
681 | drm_mode_set_name(mode); | 725 | drm_mode_set_name(mode); |
682 | 726 | ||
683 | if (pt->misc & DRM_EDID_PT_INTERLACED) | 727 | drm_mode_do_interlace_quirk(mode, pt); |
684 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
685 | 728 | ||
686 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { | 729 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { |
687 | pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; | 730 | pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index cdec32977129..2ac074c8f5d2 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, | |||
405 | wasted += alignment - tmp; | 405 | wasted += alignment - tmp; |
406 | } | 406 | } |
407 | 407 | ||
408 | if (entry->size >= size + wasted) { | 408 | if (entry->size >= size + wasted && |
409 | (entry->start + wasted + size) <= end) { | ||
409 | if (!best_match) | 410 | if (!best_match) |
410 | return entry; | 411 | return entry; |
411 | if (entry->size < best_size) { | 412 | if (entry->size < best_size) { |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 79beffcf5936..cf4cb3e9a0c2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -176,6 +176,8 @@ MODULE_DEVICE_TABLE(pci, pciidlist); | |||
176 | 176 | ||
177 | static int i915_drm_freeze(struct drm_device *dev) | 177 | static int i915_drm_freeze(struct drm_device *dev) |
178 | { | 178 | { |
179 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
180 | |||
179 | pci_save_state(dev->pdev); | 181 | pci_save_state(dev->pdev); |
180 | 182 | ||
181 | /* If KMS is active, we do the leavevt stuff here */ | 183 | /* If KMS is active, we do the leavevt stuff here */ |
@@ -191,17 +193,12 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
191 | 193 | ||
192 | i915_save_state(dev); | 194 | i915_save_state(dev); |
193 | 195 | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static void i915_drm_suspend(struct drm_device *dev) | ||
198 | { | ||
199 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
200 | |||
201 | intel_opregion_free(dev, 1); | 196 | intel_opregion_free(dev, 1); |
202 | 197 | ||
203 | /* Modeset on resume, not lid events */ | 198 | /* Modeset on resume, not lid events */ |
204 | dev_priv->modeset_on_lid = 0; | 199 | dev_priv->modeset_on_lid = 0; |
200 | |||
201 | return 0; | ||
205 | } | 202 | } |
206 | 203 | ||
207 | static int i915_suspend(struct drm_device *dev, pm_message_t state) | 204 | static int i915_suspend(struct drm_device *dev, pm_message_t state) |
@@ -221,8 +218,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
221 | if (error) | 218 | if (error) |
222 | return error; | 219 | return error; |
223 | 220 | ||
224 | i915_drm_suspend(dev); | ||
225 | |||
226 | if (state.event == PM_EVENT_SUSPEND) { | 221 | if (state.event == PM_EVENT_SUSPEND) { |
227 | /* Shut down the device */ | 222 | /* Shut down the device */ |
228 | pci_disable_device(dev->pdev); | 223 | pci_disable_device(dev->pdev); |
@@ -237,6 +232,10 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
237 | struct drm_i915_private *dev_priv = dev->dev_private; | 232 | struct drm_i915_private *dev_priv = dev->dev_private; |
238 | int error = 0; | 233 | int error = 0; |
239 | 234 | ||
235 | i915_restore_state(dev); | ||
236 | |||
237 | intel_opregion_init(dev, 1); | ||
238 | |||
240 | /* KMS EnterVT equivalent */ | 239 | /* KMS EnterVT equivalent */ |
241 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 240 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
242 | mutex_lock(&dev->struct_mutex); | 241 | mutex_lock(&dev->struct_mutex); |
@@ -263,10 +262,6 @@ static int i915_resume(struct drm_device *dev) | |||
263 | 262 | ||
264 | pci_set_master(dev->pdev); | 263 | pci_set_master(dev->pdev); |
265 | 264 | ||
266 | i915_restore_state(dev); | ||
267 | |||
268 | intel_opregion_init(dev, 1); | ||
269 | |||
270 | return i915_drm_thaw(dev); | 265 | return i915_drm_thaw(dev); |
271 | } | 266 | } |
272 | 267 | ||
@@ -423,8 +418,6 @@ static int i915_pm_suspend(struct device *dev) | |||
423 | if (error) | 418 | if (error) |
424 | return error; | 419 | return error; |
425 | 420 | ||
426 | i915_drm_suspend(drm_dev); | ||
427 | |||
428 | pci_disable_device(pdev); | 421 | pci_disable_device(pdev); |
429 | pci_set_power_state(pdev, PCI_D3hot); | 422 | pci_set_power_state(pdev, PCI_D3hot); |
430 | 423 | ||
@@ -464,13 +457,8 @@ static int i915_pm_poweroff(struct device *dev) | |||
464 | { | 457 | { |
465 | struct pci_dev *pdev = to_pci_dev(dev); | 458 | struct pci_dev *pdev = to_pci_dev(dev); |
466 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 459 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
467 | int error; | ||
468 | |||
469 | error = i915_drm_freeze(drm_dev); | ||
470 | if (!error) | ||
471 | i915_drm_suspend(drm_dev); | ||
472 | 460 | ||
473 | return error; | 461 | return i915_drm_freeze(drm_dev); |
474 | } | 462 | } |
475 | 463 | ||
476 | const struct dev_pm_ops i915_pm_ops = { | 464 | const struct dev_pm_ops i915_pm_ops = { |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b1d0acbae4e4..c2e8a45780d5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -636,6 +636,13 @@ static const struct dmi_system_id bad_lid_status[] = { | |||
636 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), | 636 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), |
637 | }, | 637 | }, |
638 | }, | 638 | }, |
639 | { | ||
640 | .ident = "Clevo M5x0N", | ||
641 | .matches = { | ||
642 | DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), | ||
643 | DMI_MATCH(DMI_BOARD_NAME, "M5x0N"), | ||
644 | }, | ||
645 | }, | ||
639 | { } | 646 | { } |
640 | }; | 647 | }; |
641 | 648 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 2cd0fad17dac..0e9cd1d49130 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -5861,13 +5861,12 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, | |||
5861 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 5861 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
5862 | struct nvbios *bios = &dev_priv->VBIOS; | 5862 | struct nvbios *bios = &dev_priv->VBIOS; |
5863 | struct init_exec iexec = { true, false }; | 5863 | struct init_exec iexec = { true, false }; |
5864 | unsigned long flags; | ||
5865 | 5864 | ||
5866 | spin_lock_irqsave(&bios->lock, flags); | 5865 | mutex_lock(&bios->lock); |
5867 | bios->display.output = dcbent; | 5866 | bios->display.output = dcbent; |
5868 | parse_init_table(bios, table, &iexec); | 5867 | parse_init_table(bios, table, &iexec); |
5869 | bios->display.output = NULL; | 5868 | bios->display.output = NULL; |
5870 | spin_unlock_irqrestore(&bios->lock, flags); | 5869 | mutex_unlock(&bios->lock); |
5871 | } | 5870 | } |
5872 | 5871 | ||
5873 | static bool NVInitVBIOS(struct drm_device *dev) | 5872 | static bool NVInitVBIOS(struct drm_device *dev) |
@@ -5876,7 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev) | |||
5876 | struct nvbios *bios = &dev_priv->VBIOS; | 5875 | struct nvbios *bios = &dev_priv->VBIOS; |
5877 | 5876 | ||
5878 | memset(bios, 0, sizeof(struct nvbios)); | 5877 | memset(bios, 0, sizeof(struct nvbios)); |
5879 | spin_lock_init(&bios->lock); | 5878 | mutex_init(&bios->lock); |
5880 | bios->dev = dev; | 5879 | bios->dev = dev; |
5881 | 5880 | ||
5882 | if (!NVShadowVBIOS(dev, bios->data)) | 5881 | if (!NVShadowVBIOS(dev, bios->data)) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 68446fd4146b..fd94bd6dc264 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h | |||
@@ -205,7 +205,7 @@ struct nvbios { | |||
205 | struct drm_device *dev; | 205 | struct drm_device *dev; |
206 | struct nouveau_bios_info pub; | 206 | struct nouveau_bios_info pub; |
207 | 207 | ||
208 | spinlock_t lock; | 208 | struct mutex lock; |
209 | 209 | ||
210 | uint8_t data[NV_PROM_SIZE]; | 210 | uint8_t data[NV_PROM_SIZE]; |
211 | unsigned int length; | 211 | unsigned int length; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 5445cefdd03e..1c15ef37b71c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -583,6 +583,7 @@ struct drm_nouveau_private { | |||
583 | uint64_t vm_end; | 583 | uint64_t vm_end; |
584 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; | 584 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; |
585 | int vm_vram_pt_nr; | 585 | int vm_vram_pt_nr; |
586 | uint64_t vram_sys_base; | ||
586 | 587 | ||
587 | /* the mtrr covering the FB */ | 588 | /* the mtrr covering the FB */ |
588 | int fb_mtrr; | 589 | int fb_mtrr; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 8f3a12f614ed..2dc09dbd817d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
285 | uint32_t flags, uint64_t phys) | 285 | uint32_t flags, uint64_t phys) |
286 | { | 286 | { |
287 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 287 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
288 | struct nouveau_gpuobj **pgt; | 288 | struct nouveau_gpuobj *pgt; |
289 | unsigned psz, pfl, pages; | 289 | unsigned block; |
290 | 290 | int i; | |
291 | if (virt >= dev_priv->vm_gart_base && | ||
292 | (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) { | ||
293 | psz = 12; | ||
294 | pgt = &dev_priv->gart_info.sg_ctxdma; | ||
295 | pfl = 0x21; | ||
296 | virt -= dev_priv->vm_gart_base; | ||
297 | } else | ||
298 | if (virt >= dev_priv->vm_vram_base && | ||
299 | (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) { | ||
300 | psz = 16; | ||
301 | pgt = dev_priv->vm_vram_pt; | ||
302 | pfl = 0x01; | ||
303 | virt -= dev_priv->vm_vram_base; | ||
304 | } else { | ||
305 | NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n", | ||
306 | virt, virt + size - 1); | ||
307 | return -EINVAL; | ||
308 | } | ||
309 | 291 | ||
310 | pages = size >> psz; | 292 | virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; |
293 | size = (size >> 16) << 1; | ||
294 | |||
295 | phys |= ((uint64_t)flags << 32); | ||
296 | phys |= 1; | ||
297 | if (dev_priv->vram_sys_base) { | ||
298 | phys += dev_priv->vram_sys_base; | ||
299 | phys |= 0x30; | ||
300 | } | ||
311 | 301 | ||
312 | dev_priv->engine.instmem.prepare_access(dev, true); | 302 | dev_priv->engine.instmem.prepare_access(dev, true); |
313 | if (flags & 0x80000000) { | 303 | while (size) { |
314 | while (pages--) { | 304 | unsigned offset_h = upper_32_bits(phys); |
315 | struct nouveau_gpuobj *pt = pgt[virt >> 29]; | 305 | unsigned offset_l = lower_32_bits(phys); |
316 | unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; | 306 | unsigned pte, end; |
307 | |||
308 | for (i = 7; i >= 0; i--) { | ||
309 | block = 1 << (i + 1); | ||
310 | if (size >= block && !(virt & (block - 1))) | ||
311 | break; | ||
312 | } | ||
313 | offset_l |= (i << 7); | ||
317 | 314 | ||
318 | nv_wo32(dev, pt, pte++, 0x00000000); | 315 | phys += block << 15; |
319 | nv_wo32(dev, pt, pte++, 0x00000000); | 316 | size -= block; |
320 | 317 | ||
321 | virt += (1 << psz); | 318 | while (block) { |
322 | } | 319 | pgt = dev_priv->vm_vram_pt[virt >> 14]; |
323 | } else { | 320 | pte = virt & 0x3ffe; |
324 | while (pages--) { | ||
325 | struct nouveau_gpuobj *pt = pgt[virt >> 29]; | ||
326 | unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; | ||
327 | unsigned offset_h = upper_32_bits(phys) & 0xff; | ||
328 | unsigned offset_l = lower_32_bits(phys); | ||
329 | 321 | ||
330 | nv_wo32(dev, pt, pte++, offset_l | pfl); | 322 | end = pte + block; |
331 | nv_wo32(dev, pt, pte++, offset_h | flags); | 323 | if (end > 16384) |
324 | end = 16384; | ||
325 | block -= (end - pte); | ||
326 | virt += (end - pte); | ||
332 | 327 | ||
333 | phys += (1 << psz); | 328 | while (pte < end) { |
334 | virt += (1 << psz); | 329 | nv_wo32(dev, pgt, pte++, offset_l); |
330 | nv_wo32(dev, pgt, pte++, offset_h); | ||
331 | } | ||
335 | } | 332 | } |
336 | } | 333 | } |
337 | dev_priv->engine.instmem.finish_access(dev); | 334 | dev_priv->engine.instmem.finish_access(dev); |
@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
356 | void | 353 | void |
357 | nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | 354 | nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) |
358 | { | 355 | { |
359 | nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); | 356 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
357 | struct nouveau_gpuobj *pgt; | ||
358 | unsigned pages, pte, end; | ||
359 | |||
360 | virt -= dev_priv->vm_vram_base; | ||
361 | pages = (size >> 16) << 1; | ||
362 | |||
363 | dev_priv->engine.instmem.prepare_access(dev, true); | ||
364 | while (pages) { | ||
365 | pgt = dev_priv->vm_vram_pt[virt >> 29]; | ||
366 | pte = (virt & 0x1ffe0000ULL) >> 15; | ||
367 | |||
368 | end = pte + pages; | ||
369 | if (end > 16384) | ||
370 | end = 16384; | ||
371 | pages -= (end - pte); | ||
372 | virt += (end - pte) << 15; | ||
373 | |||
374 | while (pte < end) | ||
375 | nv_wo32(dev, pgt, pte++, 0); | ||
376 | } | ||
377 | dev_priv->engine.instmem.finish_access(dev); | ||
378 | |||
379 | nv_wr32(dev, 0x100c80, 0x00050001); | ||
380 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
381 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
382 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | ||
383 | return; | ||
384 | } | ||
385 | |||
386 | nv_wr32(dev, 0x100c80, 0x00000001); | ||
387 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
388 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
389 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | ||
390 | } | ||
360 | } | 391 | } |
361 | 392 | ||
362 | /* | 393 | /* |
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index d0e038d28948..1d73b15d70da 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c | |||
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, | |||
119 | struct drm_connector *connector) | 119 | struct drm_connector *connector) |
120 | { | 120 | { |
121 | struct drm_device *dev = encoder->dev; | 121 | struct drm_device *dev = encoder->dev; |
122 | uint8_t saved_seq1, saved_pi, saved_rpc1; | 122 | uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; |
123 | uint8_t saved_palette0[3], saved_palette_mask; | 123 | uint8_t saved_palette0[3], saved_palette_mask; |
124 | uint32_t saved_rtest_ctrl, saved_rgen_ctrl; | 124 | uint32_t saved_rtest_ctrl, saved_rgen_ctrl; |
125 | int i; | 125 | int i; |
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, | |||
135 | /* only implemented for head A for now */ | 135 | /* only implemented for head A for now */ |
136 | NVSetOwner(dev, 0); | 136 | NVSetOwner(dev, 0); |
137 | 137 | ||
138 | saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX); | ||
139 | NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80); | ||
140 | |||
138 | saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); | 141 | saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); |
139 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); | 142 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); |
140 | 143 | ||
@@ -203,6 +206,7 @@ out: | |||
203 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); | 206 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); |
204 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); | 207 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); |
205 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); | 208 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); |
209 | NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); | ||
206 | 210 | ||
207 | if (blue == 0x18) { | 211 | if (blue == 0x18) { |
208 | NV_INFO(dev, "Load detected on head A\n"); | 212 | NV_INFO(dev, "Load detected on head A\n"); |
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 58b917c3341b..21ac6e49b6ee 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c | |||
@@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder) | |||
579 | nouveau_encoder(encoder)->restore.output); | 579 | nouveau_encoder(encoder)->restore.output); |
580 | 580 | ||
581 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); | 581 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); |
582 | |||
583 | nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED; | ||
582 | } | 584 | } |
583 | 585 | ||
584 | static int nv17_tv_create_resources(struct drm_encoder *encoder, | 586 | static int nv17_tv_create_resources(struct drm_encoder *encoder, |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 94400f777e7f..f0dc4e36ef05 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev) | |||
76 | for (i = 0x1700; i <= 0x1710; i += 4) | 76 | for (i = 0x1700; i <= 0x1710; i += 4) |
77 | priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); | 77 | priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); |
78 | 78 | ||
79 | if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) | ||
80 | dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; | ||
81 | else | ||
82 | dev_priv->vram_sys_base = 0; | ||
83 | |||
79 | /* Reserve the last MiB of VRAM, we should probably try to avoid | 84 | /* Reserve the last MiB of VRAM, we should probably try to avoid |
80 | * setting up the below tables over the top of the VBIOS image at | 85 | * setting up the below tables over the top of the VBIOS image at |
81 | * some point. | 86 | * some point. |
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev) | |||
172 | * We map the entire fake channel into the start of the PRAMIN BAR | 177 | * We map the entire fake channel into the start of the PRAMIN BAR |
173 | */ | 178 | */ |
174 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, | 179 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, |
175 | 0, &priv->pramin_pt); | 180 | 0, &priv->pramin_pt); |
176 | if (ret) | 181 | if (ret) |
177 | return ret; | 182 | return ret; |
178 | 183 | ||
179 | for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) { | 184 | v = c_offset | 1; |
180 | if (v < (c_offset + c_size)) | 185 | if (dev_priv->vram_sys_base) { |
181 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); | 186 | v += dev_priv->vram_sys_base; |
182 | else | 187 | v |= 0x30; |
183 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); | 188 | } |
189 | |||
190 | i = 0; | ||
191 | while (v < dev_priv->vram_sys_base + c_offset + c_size) { | ||
192 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v); | ||
193 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); | ||
194 | v += 0x1000; | ||
195 | i += 8; | ||
196 | } | ||
197 | |||
198 | while (i < pt_size) { | ||
199 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000); | ||
184 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); | 200 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); |
201 | i += 8; | ||
185 | } | 202 | } |
186 | 203 | ||
187 | BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); | 204 | BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); |
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
416 | { | 433 | { |
417 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 434 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
418 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | 435 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; |
419 | uint32_t pte, pte_end, vram; | 436 | struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj; |
437 | uint32_t pte, pte_end; | ||
438 | uint64_t vram; | ||
420 | 439 | ||
421 | if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) | 440 | if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) |
422 | return -EINVAL; | 441 | return -EINVAL; |
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
424 | NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", | 443 | NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", |
425 | gpuobj->im_pramin->start, gpuobj->im_pramin->size); | 444 | gpuobj->im_pramin->start, gpuobj->im_pramin->size); |
426 | 445 | ||
427 | pte = (gpuobj->im_pramin->start >> 12) << 3; | 446 | pte = (gpuobj->im_pramin->start >> 12) << 1; |
428 | pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; | 447 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; |
429 | vram = gpuobj->im_backing_start; | 448 | vram = gpuobj->im_backing_start; |
430 | 449 | ||
431 | NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", | 450 | NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", |
432 | gpuobj->im_pramin->start, pte, pte_end); | 451 | gpuobj->im_pramin->start, pte, pte_end); |
433 | NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); | 452 | NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); |
434 | 453 | ||
454 | vram |= 1; | ||
455 | if (dev_priv->vram_sys_base) { | ||
456 | vram += dev_priv->vram_sys_base; | ||
457 | vram |= 0x30; | ||
458 | } | ||
459 | |||
435 | dev_priv->engine.instmem.prepare_access(dev, true); | 460 | dev_priv->engine.instmem.prepare_access(dev, true); |
436 | while (pte < pte_end) { | 461 | while (pte < pte_end) { |
437 | nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); | 462 | nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); |
438 | nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); | 463 | nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); |
439 | |||
440 | pte += 8; | ||
441 | vram += NV50_INSTMEM_PAGE_SIZE; | 464 | vram += NV50_INSTMEM_PAGE_SIZE; |
442 | } | 465 | } |
443 | dev_priv->engine.instmem.finish_access(dev); | 466 | dev_priv->engine.instmem.finish_access(dev); |
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
470 | if (gpuobj->im_bound == 0) | 493 | if (gpuobj->im_bound == 0) |
471 | return -EINVAL; | 494 | return -EINVAL; |
472 | 495 | ||
473 | pte = (gpuobj->im_pramin->start >> 12) << 3; | 496 | pte = (gpuobj->im_pramin->start >> 12) << 1; |
474 | pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; | 497 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; |
475 | 498 | ||
476 | dev_priv->engine.instmem.prepare_access(dev, true); | 499 | dev_priv->engine.instmem.prepare_access(dev, true); |
477 | while (pte < pte_end) { | 500 | while (pte < pte_end) { |
478 | nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); | 501 | nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); |
479 | nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); | 502 | nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); |
480 | pte += 8; | ||
481 | } | 503 | } |
482 | dev_priv->engine.instmem.finish_access(dev); | 504 | dev_priv->engine.instmem.finish_access(dev); |
483 | 505 | ||
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index e3b44562d265..7f152f66f196 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <asm/unaligned.h> | ||
27 | 28 | ||
28 | #define ATOM_DEBUG | 29 | #define ATOM_DEBUG |
29 | 30 | ||
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, | |||
212 | case ATOM_ARG_PS: | 213 | case ATOM_ARG_PS: |
213 | idx = U8(*ptr); | 214 | idx = U8(*ptr); |
214 | (*ptr)++; | 215 | (*ptr)++; |
215 | val = le32_to_cpu(ctx->ps[idx]); | 216 | /* get_unaligned_le32 avoids unaligned accesses from atombios |
217 | * tables, noticed on a DEC Alpha. */ | ||
218 | val = get_unaligned_le32((u32 *)&ctx->ps[idx]); | ||
216 | if (print) | 219 | if (print) |
217 | DEBUG("PS[0x%02X,0x%04X]", idx, val); | 220 | DEBUG("PS[0x%02X,0x%04X]", idx, val); |
218 | break; | 221 | break; |
@@ -640,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) | |||
640 | uint8_t count = U8((*ptr)++); | 643 | uint8_t count = U8((*ptr)++); |
641 | SDEBUG(" count: %d\n", count); | 644 | SDEBUG(" count: %d\n", count); |
642 | if (arg == ATOM_UNIT_MICROSEC) | 645 | if (arg == ATOM_UNIT_MICROSEC) |
643 | schedule_timeout_uninterruptible(usecs_to_jiffies(count)); | 646 | udelay(count); |
644 | else | 647 | else |
645 | schedule_timeout_uninterruptible(msecs_to_jiffies(count)); | 648 | schedule_timeout_uninterruptible(msecs_to_jiffies(count)); |
646 | } | 649 | } |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index b32eeea5bb8b..99915a682d59 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -350,7 +350,7 @@ retry: | |||
350 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 350 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
351 | 351 | ||
352 | if (args.ucReplyStatus && !args.ucDataOutLen) { | 352 | if (args.ucReplyStatus && !args.ucDataOutLen) { |
353 | if (args.ucReplyStatus == 0x20 && retry_count < 10) | 353 | if (args.ucReplyStatus == 0x20 && retry_count++ < 10) |
354 | goto retry; | 354 | goto retry; |
355 | DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", | 355 | DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", |
356 | req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], | 356 | req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index af1c3ca8a4cb..446b765ac72a 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev) | |||
543 | void r600_vb_ib_put(struct radeon_device *rdev) | 543 | void r600_vb_ib_put(struct radeon_device *rdev) |
544 | { | 544 | { |
545 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); | 545 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); |
546 | mutex_lock(&rdev->ib_pool.mutex); | ||
547 | list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs); | ||
548 | mutex_unlock(&rdev->ib_pool.mutex); | ||
549 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 546 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
550 | } | 547 | } |
551 | 548 | ||
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 6d5a711c2e91..75bcf35a0931 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c | |||
@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev, | |||
1428 | 1428 | ||
1429 | gb_tiling_config |= R600_BANK_SWAPS(1); | 1429 | gb_tiling_config |= R600_BANK_SWAPS(1); |
1430 | 1430 | ||
1431 | backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, | 1431 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740) |
1432 | dev_priv->r600_max_backends, | 1432 | backend_map = 0x28; |
1433 | (0xff << dev_priv->r600_max_backends) & 0xff); | 1433 | else |
1434 | backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, | ||
1435 | dev_priv->r600_max_backends, | ||
1436 | (0xff << dev_priv->r600_max_backends) & 0xff); | ||
1434 | gb_tiling_config |= R600_BACKEND_MAP(backend_map); | 1437 | gb_tiling_config |= R600_BACKEND_MAP(backend_map); |
1435 | 1438 | ||
1436 | cc_gc_shader_pipe_config = | 1439 | cc_gc_shader_pipe_config = |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index f57480ba1355..c0356bb193e5 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -96,6 +96,7 @@ extern int radeon_audio; | |||
96 | * symbol; | 96 | * symbol; |
97 | */ | 97 | */ |
98 | #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ | 98 | #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
99 | /* RADEON_IB_POOL_SIZE must be a power of 2 */ | ||
99 | #define RADEON_IB_POOL_SIZE 16 | 100 | #define RADEON_IB_POOL_SIZE 16 |
100 | #define RADEON_DEBUGFS_MAX_NUM_FILES 32 | 101 | #define RADEON_DEBUGFS_MAX_NUM_FILES 32 |
101 | #define RADEONFB_CONN_LIMIT 4 | 102 | #define RADEONFB_CONN_LIMIT 4 |
@@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); | |||
363 | */ | 364 | */ |
364 | struct radeon_ib { | 365 | struct radeon_ib { |
365 | struct list_head list; | 366 | struct list_head list; |
366 | unsigned long idx; | 367 | unsigned idx; |
367 | uint64_t gpu_addr; | 368 | uint64_t gpu_addr; |
368 | struct radeon_fence *fence; | 369 | struct radeon_fence *fence; |
369 | uint32_t *ptr; | 370 | uint32_t *ptr; |
370 | uint32_t length_dw; | 371 | uint32_t length_dw; |
372 | bool free; | ||
371 | }; | 373 | }; |
372 | 374 | ||
373 | /* | 375 | /* |
@@ -377,10 +379,9 @@ struct radeon_ib { | |||
377 | struct radeon_ib_pool { | 379 | struct radeon_ib_pool { |
378 | struct mutex mutex; | 380 | struct mutex mutex; |
379 | struct radeon_bo *robj; | 381 | struct radeon_bo *robj; |
380 | struct list_head scheduled_ibs; | ||
381 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; | 382 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; |
382 | bool ready; | 383 | bool ready; |
383 | DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); | 384 | unsigned head_id; |
384 | }; | 385 | }; |
385 | 386 | ||
386 | struct radeon_cp { | 387 | struct radeon_cp { |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 2dcda6115874..4d8831548a5f 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
206 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 206 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
207 | } | 207 | } |
208 | 208 | ||
209 | /* Asrock RS600 board lists the DVI port as HDMI */ | ||
210 | if ((dev->pdev->device == 0x7941) && | ||
211 | (dev->pdev->subsystem_vendor == 0x1849) && | ||
212 | (dev->pdev->subsystem_device == 0x7941)) { | ||
213 | if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && | ||
214 | (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) | ||
215 | *connector_type = DRM_MODE_CONNECTOR_DVID; | ||
216 | } | ||
217 | |||
209 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ | 218 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ |
210 | if ((dev->pdev->device == 0x7941) && | 219 | if ((dev->pdev->device == 0x7941) && |
211 | (dev->pdev->subsystem_vendor == 0x147b) && | 220 | (dev->pdev->subsystem_vendor == 0x147b) && |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index e7b19440102e..22d476160d52 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -1279,47 +1279,47 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1279 | rdev->mode_info.connector_table = radeon_connector_table; | 1279 | rdev->mode_info.connector_table = radeon_connector_table; |
1280 | if (rdev->mode_info.connector_table == CT_NONE) { | 1280 | if (rdev->mode_info.connector_table == CT_NONE) { |
1281 | #ifdef CONFIG_PPC_PMAC | 1281 | #ifdef CONFIG_PPC_PMAC |
1282 | if (machine_is_compatible("PowerBook3,3")) { | 1282 | if (of_machine_is_compatible("PowerBook3,3")) { |
1283 | /* powerbook with VGA */ | 1283 | /* powerbook with VGA */ |
1284 | rdev->mode_info.connector_table = CT_POWERBOOK_VGA; | 1284 | rdev->mode_info.connector_table = CT_POWERBOOK_VGA; |
1285 | } else if (machine_is_compatible("PowerBook3,4") || | 1285 | } else if (of_machine_is_compatible("PowerBook3,4") || |
1286 | machine_is_compatible("PowerBook3,5")) { | 1286 | of_machine_is_compatible("PowerBook3,5")) { |
1287 | /* powerbook with internal tmds */ | 1287 | /* powerbook with internal tmds */ |
1288 | rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL; | 1288 | rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL; |
1289 | } else if (machine_is_compatible("PowerBook5,1") || | 1289 | } else if (of_machine_is_compatible("PowerBook5,1") || |
1290 | machine_is_compatible("PowerBook5,2") || | 1290 | of_machine_is_compatible("PowerBook5,2") || |
1291 | machine_is_compatible("PowerBook5,3") || | 1291 | of_machine_is_compatible("PowerBook5,3") || |
1292 | machine_is_compatible("PowerBook5,4") || | 1292 | of_machine_is_compatible("PowerBook5,4") || |
1293 | machine_is_compatible("PowerBook5,5")) { | 1293 | of_machine_is_compatible("PowerBook5,5")) { |
1294 | /* powerbook with external single link tmds (sil164) */ | 1294 | /* powerbook with external single link tmds (sil164) */ |
1295 | rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; | 1295 | rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; |
1296 | } else if (machine_is_compatible("PowerBook5,6")) { | 1296 | } else if (of_machine_is_compatible("PowerBook5,6")) { |
1297 | /* powerbook with external dual or single link tmds */ | 1297 | /* powerbook with external dual or single link tmds */ |
1298 | rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; | 1298 | rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; |
1299 | } else if (machine_is_compatible("PowerBook5,7") || | 1299 | } else if (of_machine_is_compatible("PowerBook5,7") || |
1300 | machine_is_compatible("PowerBook5,8") || | 1300 | of_machine_is_compatible("PowerBook5,8") || |
1301 | machine_is_compatible("PowerBook5,9")) { | 1301 | of_machine_is_compatible("PowerBook5,9")) { |
1302 | /* PowerBook6,2 ? */ | 1302 | /* PowerBook6,2 ? */ |
1303 | /* powerbook with external dual link tmds (sil1178?) */ | 1303 | /* powerbook with external dual link tmds (sil1178?) */ |
1304 | rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; | 1304 | rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; |
1305 | } else if (machine_is_compatible("PowerBook4,1") || | 1305 | } else if (of_machine_is_compatible("PowerBook4,1") || |
1306 | machine_is_compatible("PowerBook4,2") || | 1306 | of_machine_is_compatible("PowerBook4,2") || |
1307 | machine_is_compatible("PowerBook4,3") || | 1307 | of_machine_is_compatible("PowerBook4,3") || |
1308 | machine_is_compatible("PowerBook6,3") || | 1308 | of_machine_is_compatible("PowerBook6,3") || |
1309 | machine_is_compatible("PowerBook6,5") || | 1309 | of_machine_is_compatible("PowerBook6,5") || |
1310 | machine_is_compatible("PowerBook6,7")) { | 1310 | of_machine_is_compatible("PowerBook6,7")) { |
1311 | /* ibook */ | 1311 | /* ibook */ |
1312 | rdev->mode_info.connector_table = CT_IBOOK; | 1312 | rdev->mode_info.connector_table = CT_IBOOK; |
1313 | } else if (machine_is_compatible("PowerMac4,4")) { | 1313 | } else if (of_machine_is_compatible("PowerMac4,4")) { |
1314 | /* emac */ | 1314 | /* emac */ |
1315 | rdev->mode_info.connector_table = CT_EMAC; | 1315 | rdev->mode_info.connector_table = CT_EMAC; |
1316 | } else if (machine_is_compatible("PowerMac10,1")) { | 1316 | } else if (of_machine_is_compatible("PowerMac10,1")) { |
1317 | /* mini with internal tmds */ | 1317 | /* mini with internal tmds */ |
1318 | rdev->mode_info.connector_table = CT_MINI_INTERNAL; | 1318 | rdev->mode_info.connector_table = CT_MINI_INTERNAL; |
1319 | } else if (machine_is_compatible("PowerMac10,2")) { | 1319 | } else if (of_machine_is_compatible("PowerMac10,2")) { |
1320 | /* mini with external tmds */ | 1320 | /* mini with external tmds */ |
1321 | rdev->mode_info.connector_table = CT_MINI_EXTERNAL; | 1321 | rdev->mode_info.connector_table = CT_MINI_EXTERNAL; |
1322 | } else if (machine_is_compatible("PowerMac12,1")) { | 1322 | } else if (of_machine_is_compatible("PowerMac12,1")) { |
1323 | /* PowerMac8,1 ? */ | 1323 | /* PowerMac8,1 ? */ |
1324 | /* imac g5 isight */ | 1324 | /* imac g5 isight */ |
1325 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; | 1325 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 238188540017..65f81942f399 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -780,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
780 | * connected and the DVI port disconnected. If the edid doesn't | 780 | * connected and the DVI port disconnected. If the edid doesn't |
781 | * say HDMI, vice versa. | 781 | * say HDMI, vice versa. |
782 | */ | 782 | */ |
783 | if (radeon_connector->shared_ddc && connector_status_connected) { | 783 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { |
784 | struct drm_device *dev = connector->dev; | 784 | struct drm_device *dev = connector->dev; |
785 | struct drm_connector *list_connector; | 785 | struct drm_connector *list_connector; |
786 | struct radeon_connector *list_radeon_connector; | 786 | struct radeon_connector *list_radeon_connector; |
@@ -1060,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1060 | return; | 1060 | return; |
1061 | } | 1061 | } |
1062 | if (radeon_connector->ddc_bus && i2c_bus->valid) { | 1062 | if (radeon_connector->ddc_bus && i2c_bus->valid) { |
1063 | if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus, | 1063 | if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) { |
1064 | sizeof(struct radeon_i2c_bus_rec)) == 0) { | ||
1065 | radeon_connector->shared_ddc = true; | 1064 | radeon_connector->shared_ddc = true; |
1066 | shared_ddc = true; | 1065 | shared_ddc = true; |
1067 | } | 1066 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 1190148cf5e6..e9d085021c1f 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
86 | &p->validated); | 86 | &p->validated); |
87 | } | 87 | } |
88 | } | 88 | } |
89 | return radeon_bo_list_validate(&p->validated, p->ib->fence); | 89 | return radeon_bo_list_validate(&p->validated); |
90 | } | 90 | } |
91 | 91 | ||
92 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | 92 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) |
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
189 | { | 189 | { |
190 | unsigned i; | 190 | unsigned i; |
191 | 191 | ||
192 | if (error && parser->ib) { | 192 | if (!error && parser->ib) { |
193 | radeon_bo_list_unvalidate(&parser->validated, | 193 | radeon_bo_list_fence(&parser->validated, parser->ib->fence); |
194 | parser->ib->fence); | ||
195 | } else { | ||
196 | radeon_bo_list_unreserve(&parser->validated); | ||
197 | } | 194 | } |
195 | radeon_bo_list_unreserve(&parser->validated); | ||
198 | for (i = 0; i < parser->nrelocs; i++) { | 196 | for (i = 0; i < parser->nrelocs; i++) { |
199 | if (parser->relocs[i].gobj) { | 197 | if (parser->relocs[i].gobj) { |
200 | mutex_lock(&parser->rdev->ddev->struct_mutex); | 198 | mutex_lock(&parser->rdev->ddev->struct_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index e13785282a82..c57ad606504d 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -106,9 +106,10 @@ | |||
106 | * 1.29- R500 3D cmd buffer support | 106 | * 1.29- R500 3D cmd buffer support |
107 | * 1.30- Add support for occlusion queries | 107 | * 1.30- Add support for occlusion queries |
108 | * 1.31- Add support for num Z pipes from GET_PARAM | 108 | * 1.31- Add support for num Z pipes from GET_PARAM |
109 | * 1.32- fixes for rv740 setup | ||
109 | */ | 110 | */ |
110 | #define DRIVER_MAJOR 1 | 111 | #define DRIVER_MAJOR 1 |
111 | #define DRIVER_MINOR 31 | 112 | #define DRIVER_MINOR 32 |
112 | #define DRIVER_PATCHLEVEL 0 | 113 | #define DRIVER_PATCHLEVEL 0 |
113 | 114 | ||
114 | enum radeon_cp_microcode_version { | 115 | enum radeon_cp_microcode_version { |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d72a71bff218..f1da370928eb 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head) | |||
306 | } | 306 | } |
307 | } | 307 | } |
308 | 308 | ||
309 | int radeon_bo_list_validate(struct list_head *head, void *fence) | 309 | int radeon_bo_list_validate(struct list_head *head) |
310 | { | 310 | { |
311 | struct radeon_bo_list *lobj; | 311 | struct radeon_bo_list *lobj; |
312 | struct radeon_bo *bo; | 312 | struct radeon_bo *bo; |
313 | struct radeon_fence *old_fence = NULL; | ||
314 | int r; | 313 | int r; |
315 | 314 | ||
316 | r = radeon_bo_list_reserve(head); | 315 | r = radeon_bo_list_reserve(head); |
@@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence) | |||
334 | } | 333 | } |
335 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); | 334 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
336 | lobj->tiling_flags = bo->tiling_flags; | 335 | lobj->tiling_flags = bo->tiling_flags; |
337 | if (fence) { | ||
338 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; | ||
339 | bo->tbo.sync_obj = radeon_fence_ref(fence); | ||
340 | bo->tbo.sync_obj_arg = NULL; | ||
341 | } | ||
342 | if (old_fence) { | ||
343 | radeon_fence_unref(&old_fence); | ||
344 | } | ||
345 | } | 336 | } |
346 | return 0; | 337 | return 0; |
347 | } | 338 | } |
348 | 339 | ||
349 | void radeon_bo_list_unvalidate(struct list_head *head, void *fence) | 340 | void radeon_bo_list_fence(struct list_head *head, void *fence) |
350 | { | 341 | { |
351 | struct radeon_bo_list *lobj; | 342 | struct radeon_bo_list *lobj; |
352 | struct radeon_fence *old_fence; | 343 | struct radeon_bo *bo; |
353 | 344 | struct radeon_fence *old_fence = NULL; | |
354 | if (fence) | 345 | |
355 | list_for_each_entry(lobj, head, list) { | 346 | list_for_each_entry(lobj, head, list) { |
356 | old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); | 347 | bo = lobj->bo; |
357 | if (old_fence == fence) { | 348 | spin_lock(&bo->tbo.lock); |
358 | lobj->bo->tbo.sync_obj = NULL; | 349 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; |
359 | radeon_fence_unref(&old_fence); | 350 | bo->tbo.sync_obj = radeon_fence_ref(fence); |
360 | } | 351 | bo->tbo.sync_obj_arg = NULL; |
352 | spin_unlock(&bo->tbo.lock); | ||
353 | if (old_fence) { | ||
354 | radeon_fence_unref(&old_fence); | ||
361 | } | 355 | } |
362 | radeon_bo_list_unreserve(head); | 356 | } |
363 | } | 357 | } |
364 | 358 | ||
365 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 359 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index a02f18011ad1..7ab43de1e244 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | |||
156 | struct list_head *head); | 156 | struct list_head *head); |
157 | extern int radeon_bo_list_reserve(struct list_head *head); | 157 | extern int radeon_bo_list_reserve(struct list_head *head); |
158 | extern void radeon_bo_list_unreserve(struct list_head *head); | 158 | extern void radeon_bo_list_unreserve(struct list_head *head); |
159 | extern int radeon_bo_list_validate(struct list_head *head, void *fence); | 159 | extern int radeon_bo_list_validate(struct list_head *head); |
160 | extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); | 160 | extern void radeon_bo_list_fence(struct list_head *head, void *fence); |
161 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 161 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
162 | struct vm_area_struct *vma); | 162 | struct vm_area_struct *vma); |
163 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, | 163 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 4d12b2d17b4d..6579eb4c1f28 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) | |||
41 | { | 41 | { |
42 | struct radeon_fence *fence; | 42 | struct radeon_fence *fence; |
43 | struct radeon_ib *nib; | 43 | struct radeon_ib *nib; |
44 | unsigned long i; | 44 | int r = 0, i, c; |
45 | int r = 0; | ||
46 | 45 | ||
47 | *ib = NULL; | 46 | *ib = NULL; |
48 | r = radeon_fence_create(rdev, &fence); | 47 | r = radeon_fence_create(rdev, &fence); |
49 | if (r) { | 48 | if (r) { |
50 | DRM_ERROR("failed to create fence for new IB\n"); | 49 | dev_err(rdev->dev, "failed to create fence for new IB\n"); |
51 | return r; | 50 | return r; |
52 | } | 51 | } |
53 | mutex_lock(&rdev->ib_pool.mutex); | 52 | mutex_lock(&rdev->ib_pool.mutex); |
54 | i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | 53 | for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { |
55 | if (i < RADEON_IB_POOL_SIZE) { | 54 | i &= (RADEON_IB_POOL_SIZE - 1); |
56 | set_bit(i, rdev->ib_pool.alloc_bm); | 55 | if (rdev->ib_pool.ibs[i].free) { |
57 | rdev->ib_pool.ibs[i].length_dw = 0; | 56 | nib = &rdev->ib_pool.ibs[i]; |
58 | *ib = &rdev->ib_pool.ibs[i]; | 57 | break; |
59 | mutex_unlock(&rdev->ib_pool.mutex); | 58 | } |
60 | goto out; | ||
61 | } | 59 | } |
62 | if (list_empty(&rdev->ib_pool.scheduled_ibs)) { | 60 | if (nib == NULL) { |
63 | /* we go do nothings here */ | 61 | /* This should never happen, it means we allocated all |
62 | * IB and haven't scheduled one yet, return EBUSY to | ||
63 | * userspace hoping that on ioctl recall we get better | ||
64 | * luck | ||
65 | */ | ||
66 | dev_err(rdev->dev, "no free indirect buffer !\n"); | ||
64 | mutex_unlock(&rdev->ib_pool.mutex); | 67 | mutex_unlock(&rdev->ib_pool.mutex); |
65 | DRM_ERROR("all IB allocated none scheduled.\n"); | 68 | radeon_fence_unref(&fence); |
66 | r = -EINVAL; | 69 | return -EBUSY; |
67 | goto out; | ||
68 | } | 70 | } |
69 | /* get the first ib on the scheduled list */ | 71 | rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); |
70 | nib = list_entry(rdev->ib_pool.scheduled_ibs.next, | 72 | nib->free = false; |
71 | struct radeon_ib, list); | 73 | if (nib->fence) { |
72 | if (nib->fence == NULL) { | ||
73 | /* we go do nothings here */ | ||
74 | mutex_unlock(&rdev->ib_pool.mutex); | 74 | mutex_unlock(&rdev->ib_pool.mutex); |
75 | DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); | 75 | r = radeon_fence_wait(nib->fence, false); |
76 | r = -EINVAL; | 76 | if (r) { |
77 | goto out; | 77 | dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", |
78 | } | 78 | nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); |
79 | mutex_unlock(&rdev->ib_pool.mutex); | 79 | mutex_lock(&rdev->ib_pool.mutex); |
80 | 80 | nib->free = true; | |
81 | r = radeon_fence_wait(nib->fence, false); | 81 | mutex_unlock(&rdev->ib_pool.mutex); |
82 | if (r) { | 82 | radeon_fence_unref(&fence); |
83 | DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, | 83 | return r; |
84 | (unsigned long)nib->gpu_addr, nib->length_dw); | 84 | } |
85 | DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); | 85 | mutex_lock(&rdev->ib_pool.mutex); |
86 | goto out; | ||
87 | } | 86 | } |
88 | radeon_fence_unref(&nib->fence); | 87 | radeon_fence_unref(&nib->fence); |
89 | 88 | nib->fence = fence; | |
90 | nib->length_dw = 0; | 89 | nib->length_dw = 0; |
91 | |||
92 | /* scheduled list is accessed here */ | ||
93 | mutex_lock(&rdev->ib_pool.mutex); | ||
94 | list_del(&nib->list); | ||
95 | INIT_LIST_HEAD(&nib->list); | ||
96 | mutex_unlock(&rdev->ib_pool.mutex); | 90 | mutex_unlock(&rdev->ib_pool.mutex); |
97 | |||
98 | *ib = nib; | 91 | *ib = nib; |
99 | out: | 92 | return 0; |
100 | if (r) { | ||
101 | radeon_fence_unref(&fence); | ||
102 | } else { | ||
103 | (*ib)->fence = fence; | ||
104 | } | ||
105 | return r; | ||
106 | } | 93 | } |
107 | 94 | ||
108 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | 95 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) |
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | |||
113 | if (tmp == NULL) { | 100 | if (tmp == NULL) { |
114 | return; | 101 | return; |
115 | } | 102 | } |
116 | mutex_lock(&rdev->ib_pool.mutex); | 103 | if (!tmp->fence->emited) |
117 | if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { | ||
118 | /* IB is scheduled & not signaled don't do anythings */ | ||
119 | mutex_unlock(&rdev->ib_pool.mutex); | ||
120 | return; | ||
121 | } | ||
122 | list_del(&tmp->list); | ||
123 | INIT_LIST_HEAD(&tmp->list); | ||
124 | if (tmp->fence) | ||
125 | radeon_fence_unref(&tmp->fence); | 104 | radeon_fence_unref(&tmp->fence); |
126 | 105 | mutex_lock(&rdev->ib_pool.mutex); | |
127 | tmp->length_dw = 0; | 106 | tmp->free = true; |
128 | clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); | ||
129 | mutex_unlock(&rdev->ib_pool.mutex); | 107 | mutex_unlock(&rdev->ib_pool.mutex); |
130 | } | 108 | } |
131 | 109 | ||
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | |||
135 | 113 | ||
136 | if (!ib->length_dw || !rdev->cp.ready) { | 114 | if (!ib->length_dw || !rdev->cp.ready) { |
137 | /* TODO: Nothings in the ib we should report. */ | 115 | /* TODO: Nothings in the ib we should report. */ |
138 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); | 116 | DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); |
139 | return -EINVAL; | 117 | return -EINVAL; |
140 | } | 118 | } |
141 | 119 | ||
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | |||
148 | radeon_ring_ib_execute(rdev, ib); | 126 | radeon_ring_ib_execute(rdev, ib); |
149 | radeon_fence_emit(rdev, ib->fence); | 127 | radeon_fence_emit(rdev, ib->fence); |
150 | mutex_lock(&rdev->ib_pool.mutex); | 128 | mutex_lock(&rdev->ib_pool.mutex); |
151 | list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); | 129 | /* once scheduled IB is considered free and protected by the fence */ |
130 | ib->free = true; | ||
152 | mutex_unlock(&rdev->ib_pool.mutex); | 131 | mutex_unlock(&rdev->ib_pool.mutex); |
153 | radeon_ring_unlock_commit(rdev); | 132 | radeon_ring_unlock_commit(rdev); |
154 | return 0; | 133 | return 0; |
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
164 | if (rdev->ib_pool.robj) | 143 | if (rdev->ib_pool.robj) |
165 | return 0; | 144 | return 0; |
166 | /* Allocate 1M object buffer */ | 145 | /* Allocate 1M object buffer */ |
167 | INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); | ||
168 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, | 146 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, |
169 | true, RADEON_GEM_DOMAIN_GTT, | 147 | true, RADEON_GEM_DOMAIN_GTT, |
170 | &rdev->ib_pool.robj); | 148 | &rdev->ib_pool.robj); |
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
195 | rdev->ib_pool.ibs[i].ptr = ptr + offset; | 173 | rdev->ib_pool.ibs[i].ptr = ptr + offset; |
196 | rdev->ib_pool.ibs[i].idx = i; | 174 | rdev->ib_pool.ibs[i].idx = i; |
197 | rdev->ib_pool.ibs[i].length_dw = 0; | 175 | rdev->ib_pool.ibs[i].length_dw = 0; |
198 | INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); | 176 | rdev->ib_pool.ibs[i].free = true; |
199 | } | 177 | } |
200 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | 178 | rdev->ib_pool.head_id = 0; |
201 | rdev->ib_pool.ready = true; | 179 | rdev->ib_pool.ready = true; |
202 | DRM_INFO("radeon: ib pool ready.\n"); | 180 | DRM_INFO("radeon: ib pool ready.\n"); |
203 | if (radeon_debugfs_ib_init(rdev)) { | 181 | if (radeon_debugfs_ib_init(rdev)) { |
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) | |||
214 | return; | 192 | return; |
215 | } | 193 | } |
216 | mutex_lock(&rdev->ib_pool.mutex); | 194 | mutex_lock(&rdev->ib_pool.mutex); |
217 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | ||
218 | if (rdev->ib_pool.robj) { | 195 | if (rdev->ib_pool.robj) { |
219 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); | 196 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); |
220 | if (likely(r == 0)) { | 197 | if (likely(r == 0)) { |
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data) | |||
363 | if (ib == NULL) { | 340 | if (ib == NULL) { |
364 | return 0; | 341 | return 0; |
365 | } | 342 | } |
366 | seq_printf(m, "IB %04lu\n", ib->idx); | 343 | seq_printf(m, "IB %04u\n", ib->idx); |
367 | seq_printf(m, "IB fence %p\n", ib->fence); | 344 | seq_printf(m, "IB fence %p\n", ib->fence); |
368 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); | 345 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); |
369 | for (i = 0; i < ib->length_dw; i++) { | 346 | for (i = 0; i < ib->length_dw; i++) { |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 5943d561fd1e..03021674d097 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
549 | 549 | ||
550 | gb_tiling_config |= BANK_SWAPS(1); | 550 | gb_tiling_config |= BANK_SWAPS(1); |
551 | 551 | ||
552 | backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, | 552 | if (rdev->family == CHIP_RV740) |
553 | rdev->config.rv770.max_backends, | 553 | backend_map = 0x28; |
554 | (0xff << rdev->config.rv770.max_backends) & 0xff); | 554 | else |
555 | backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, | ||
556 | rdev->config.rv770.max_backends, | ||
557 | (0xff << rdev->config.rv770.max_backends) & 0xff); | ||
555 | gb_tiling_config |= BACKEND_MAP(backend_map); | 558 | gb_tiling_config |= BACKEND_MAP(backend_map); |
556 | 559 | ||
557 | cc_gc_shader_pipe_config = | 560 | cc_gc_shader_pipe_config = |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 1a3e909b7bba..c7320ce4567d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1020,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, | |||
1020 | struct ttm_mem_reg *mem) | 1020 | struct ttm_mem_reg *mem) |
1021 | { | 1021 | { |
1022 | int i; | 1022 | int i; |
1023 | struct drm_mm_node *node = mem->mm_node; | ||
1024 | |||
1025 | if (node && placement->lpfn != 0 && | ||
1026 | (node->start < placement->fpfn || | ||
1027 | node->start + node->size > placement->lpfn)) | ||
1028 | return -1; | ||
1023 | 1029 | ||
1024 | for (i = 0; i < placement->num_placement; i++) { | 1030 | for (i = 0; i < placement->num_placement; i++) { |
1025 | if ((placement->placement[i] & mem->placement & | 1031 | if ((placement->placement[i] & mem->placement & |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index e2123af7775a..3d47a2c12322 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -196,14 +196,15 @@ EXPORT_SYMBOL(ttm_tt_populate); | |||
196 | 196 | ||
197 | #ifdef CONFIG_X86 | 197 | #ifdef CONFIG_X86 |
198 | static inline int ttm_tt_set_page_caching(struct page *p, | 198 | static inline int ttm_tt_set_page_caching(struct page *p, |
199 | enum ttm_caching_state c_state) | 199 | enum ttm_caching_state c_old, |
200 | enum ttm_caching_state c_new) | ||
200 | { | 201 | { |
201 | int ret = 0; | 202 | int ret = 0; |
202 | 203 | ||
203 | if (PageHighMem(p)) | 204 | if (PageHighMem(p)) |
204 | return 0; | 205 | return 0; |
205 | 206 | ||
206 | if (get_page_memtype(p) != -1) { | 207 | if (c_old != tt_cached) { |
207 | /* p isn't in the default caching state, set it to | 208 | /* p isn't in the default caching state, set it to |
208 | * writeback first to free its current memtype. */ | 209 | * writeback first to free its current memtype. */ |
209 | 210 | ||
@@ -212,16 +213,17 @@ static inline int ttm_tt_set_page_caching(struct page *p, | |||
212 | return ret; | 213 | return ret; |
213 | } | 214 | } |
214 | 215 | ||
215 | if (c_state == tt_wc) | 216 | if (c_new == tt_wc) |
216 | ret = set_memory_wc((unsigned long) page_address(p), 1); | 217 | ret = set_memory_wc((unsigned long) page_address(p), 1); |
217 | else if (c_state == tt_uncached) | 218 | else if (c_new == tt_uncached) |
218 | ret = set_pages_uc(p, 1); | 219 | ret = set_pages_uc(p, 1); |
219 | 220 | ||
220 | return ret; | 221 | return ret; |
221 | } | 222 | } |
222 | #else /* CONFIG_X86 */ | 223 | #else /* CONFIG_X86 */ |
223 | static inline int ttm_tt_set_page_caching(struct page *p, | 224 | static inline int ttm_tt_set_page_caching(struct page *p, |
224 | enum ttm_caching_state c_state) | 225 | enum ttm_caching_state c_old, |
226 | enum ttm_caching_state c_new) | ||
225 | { | 227 | { |
226 | return 0; | 228 | return 0; |
227 | } | 229 | } |
@@ -254,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, | |||
254 | for (i = 0; i < ttm->num_pages; ++i) { | 256 | for (i = 0; i < ttm->num_pages; ++i) { |
255 | cur_page = ttm->pages[i]; | 257 | cur_page = ttm->pages[i]; |
256 | if (likely(cur_page != NULL)) { | 258 | if (likely(cur_page != NULL)) { |
257 | ret = ttm_tt_set_page_caching(cur_page, c_state); | 259 | ret = ttm_tt_set_page_caching(cur_page, |
260 | ttm->caching_state, | ||
261 | c_state); | ||
258 | if (unlikely(ret != 0)) | 262 | if (unlikely(ret != 0)) |
259 | goto out_err; | 263 | goto out_err; |
260 | } | 264 | } |
@@ -268,7 +272,7 @@ out_err: | |||
268 | for (j = 0; j < i; ++j) { | 272 | for (j = 0; j < i; ++j) { |
269 | cur_page = ttm->pages[j]; | 273 | cur_page = ttm->pages[j]; |
270 | if (likely(cur_page != NULL)) { | 274 | if (likely(cur_page != NULL)) { |
271 | (void)ttm_tt_set_page_caching(cur_page, | 275 | (void)ttm_tt_set_page_caching(cur_page, c_state, |
272 | ttm->caching_state); | 276 | ttm->caching_state); |
273 | } | 277 | } |
274 | } | 278 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index a6e8f687fa64..0c9c0811f42d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
348 | */ | 348 | */ |
349 | 349 | ||
350 | DRM_INFO("It appears like vesafb is loaded. " | 350 | DRM_INFO("It appears like vesafb is loaded. " |
351 | "Ignore above error if any. Entering stealth mode.\n"); | 351 | "Ignore above error if any.\n"); |
352 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); | 352 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); |
353 | if (unlikely(ret != 0)) { | 353 | if (unlikely(ret != 0)) { |
354 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); | 354 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); |
355 | goto out_no_device; | 355 | goto out_no_device; |
356 | } | 356 | } |
357 | vmw_kms_init(dev_priv); | ||
358 | vmw_overlay_init(dev_priv); | ||
359 | } else { | ||
360 | ret = vmw_request_device(dev_priv); | ||
361 | if (unlikely(ret != 0)) | ||
362 | goto out_no_device; | ||
363 | vmw_kms_init(dev_priv); | ||
364 | vmw_overlay_init(dev_priv); | ||
365 | vmw_fb_init(dev_priv); | ||
366 | } | 357 | } |
358 | ret = vmw_request_device(dev_priv); | ||
359 | if (unlikely(ret != 0)) | ||
360 | goto out_no_device; | ||
361 | vmw_kms_init(dev_priv); | ||
362 | vmw_overlay_init(dev_priv); | ||
363 | vmw_fb_init(dev_priv); | ||
367 | 364 | ||
368 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | 365 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
369 | register_pm_notifier(&dev_priv->pm_nb); | 366 | register_pm_notifier(&dev_priv->pm_nb); |
@@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
406 | 403 | ||
407 | unregister_pm_notifier(&dev_priv->pm_nb); | 404 | unregister_pm_notifier(&dev_priv->pm_nb); |
408 | 405 | ||
409 | if (!dev_priv->stealth) { | 406 | vmw_fb_close(dev_priv); |
410 | vmw_fb_close(dev_priv); | 407 | vmw_kms_close(dev_priv); |
411 | vmw_kms_close(dev_priv); | 408 | vmw_overlay_close(dev_priv); |
412 | vmw_overlay_close(dev_priv); | 409 | vmw_release_device(dev_priv); |
413 | vmw_release_device(dev_priv); | 410 | if (dev_priv->stealth) |
414 | pci_release_regions(dev->pdev); | ||
415 | } else { | ||
416 | vmw_kms_close(dev_priv); | ||
417 | vmw_overlay_close(dev_priv); | ||
418 | pci_release_region(dev->pdev, 2); | 411 | pci_release_region(dev->pdev, 2); |
419 | } | 412 | else |
413 | pci_release_regions(dev->pdev); | ||
414 | |||
420 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 415 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
421 | drm_irq_uninstall(dev_priv->dev); | 416 | drm_irq_uninstall(dev_priv->dev); |
422 | if (dev->devname == vmw_devname) | 417 | if (dev->devname == vmw_devname) |
@@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev, | |||
585 | int ret = 0; | 580 | int ret = 0; |
586 | 581 | ||
587 | DRM_INFO("Master set.\n"); | 582 | DRM_INFO("Master set.\n"); |
588 | if (dev_priv->stealth) { | ||
589 | ret = vmw_request_device(dev_priv); | ||
590 | if (unlikely(ret != 0)) | ||
591 | return ret; | ||
592 | } | ||
593 | 583 | ||
594 | if (active) { | 584 | if (active) { |
595 | BUG_ON(active != &dev_priv->fbdev_master); | 585 | BUG_ON(active != &dev_priv->fbdev_master); |
@@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev, | |||
649 | 639 | ||
650 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 640 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
651 | 641 | ||
652 | if (dev_priv->stealth) { | ||
653 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
654 | if (unlikely(ret != 0)) | ||
655 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | ||
656 | vmw_release_device(dev_priv); | ||
657 | } | ||
658 | dev_priv->active_master = &dev_priv->fbdev_master; | 642 | dev_priv->active_master = &dev_priv->fbdev_master; |
659 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 643 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
660 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | 644 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
661 | 645 | ||
662 | if (!dev_priv->stealth) | 646 | vmw_fb_on(dev_priv); |
663 | vmw_fb_on(dev_priv); | ||
664 | } | 647 | } |
665 | 648 | ||
666 | 649 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index d69caf92ffe7..0897359b3e4e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
182 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); | 182 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); |
183 | } | 183 | } |
184 | 184 | ||
185 | static int vmw_cmd_dma(struct vmw_private *dev_priv, | 185 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
186 | struct vmw_sw_context *sw_context, | 186 | struct vmw_sw_context *sw_context, |
187 | SVGA3dCmdHeader *header) | 187 | SVGAGuestPtr *ptr, |
188 | struct vmw_dma_buffer **vmw_bo_p) | ||
188 | { | 189 | { |
189 | uint32_t handle; | ||
190 | struct vmw_dma_buffer *vmw_bo = NULL; | 190 | struct vmw_dma_buffer *vmw_bo = NULL; |
191 | struct ttm_buffer_object *bo; | 191 | struct ttm_buffer_object *bo; |
192 | struct vmw_surface *srf = NULL; | 192 | uint32_t handle = ptr->gmrId; |
193 | struct vmw_dma_cmd { | ||
194 | SVGA3dCmdHeader header; | ||
195 | SVGA3dCmdSurfaceDMA dma; | ||
196 | } *cmd; | ||
197 | struct vmw_relocation *reloc; | 193 | struct vmw_relocation *reloc; |
198 | int ret; | ||
199 | uint32_t cur_validate_node; | 194 | uint32_t cur_validate_node; |
200 | struct ttm_validate_buffer *val_buf; | 195 | struct ttm_validate_buffer *val_buf; |
196 | int ret; | ||
201 | 197 | ||
202 | cmd = container_of(header, struct vmw_dma_cmd, header); | ||
203 | handle = cmd->dma.guest.ptr.gmrId; | ||
204 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | 198 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); |
205 | if (unlikely(ret != 0)) { | 199 | if (unlikely(ret != 0)) { |
206 | DRM_ERROR("Could not find or use GMR region.\n"); | 200 | DRM_ERROR("Could not find or use GMR region.\n"); |
@@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
209 | bo = &vmw_bo->base; | 203 | bo = &vmw_bo->base; |
210 | 204 | ||
211 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { | 205 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
212 | DRM_ERROR("Max number of DMA commands per submission" | 206 | DRM_ERROR("Max number relocations per submission" |
213 | " exceeded\n"); | 207 | " exceeded\n"); |
214 | ret = -EINVAL; | 208 | ret = -EINVAL; |
215 | goto out_no_reloc; | 209 | goto out_no_reloc; |
216 | } | 210 | } |
217 | 211 | ||
218 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 212 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
219 | reloc->location = &cmd->dma.guest.ptr; | 213 | reloc->location = ptr; |
220 | 214 | ||
221 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | 215 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); |
222 | if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { | 216 | if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { |
@@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
234 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | 228 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
235 | ++sw_context->cur_val_buf; | 229 | ++sw_context->cur_val_buf; |
236 | } | 230 | } |
231 | *vmw_bo_p = vmw_bo; | ||
232 | return 0; | ||
233 | |||
234 | out_no_reloc: | ||
235 | vmw_dmabuf_unreference(&vmw_bo); | ||
236 | vmw_bo_p = NULL; | ||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, | ||
241 | struct vmw_sw_context *sw_context, | ||
242 | SVGA3dCmdHeader *header) | ||
243 | { | ||
244 | struct vmw_dma_buffer *vmw_bo; | ||
245 | struct vmw_query_cmd { | ||
246 | SVGA3dCmdHeader header; | ||
247 | SVGA3dCmdEndQuery q; | ||
248 | } *cmd; | ||
249 | int ret; | ||
250 | |||
251 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
252 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
253 | if (unlikely(ret != 0)) | ||
254 | return ret; | ||
255 | |||
256 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
257 | &cmd->q.guestResult, | ||
258 | &vmw_bo); | ||
259 | if (unlikely(ret != 0)) | ||
260 | return ret; | ||
261 | |||
262 | vmw_dmabuf_unreference(&vmw_bo); | ||
263 | return 0; | ||
264 | } | ||
237 | 265 | ||
266 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | ||
267 | struct vmw_sw_context *sw_context, | ||
268 | SVGA3dCmdHeader *header) | ||
269 | { | ||
270 | struct vmw_dma_buffer *vmw_bo; | ||
271 | struct vmw_query_cmd { | ||
272 | SVGA3dCmdHeader header; | ||
273 | SVGA3dCmdWaitForQuery q; | ||
274 | } *cmd; | ||
275 | int ret; | ||
276 | |||
277 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
278 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
279 | if (unlikely(ret != 0)) | ||
280 | return ret; | ||
281 | |||
282 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
283 | &cmd->q.guestResult, | ||
284 | &vmw_bo); | ||
285 | if (unlikely(ret != 0)) | ||
286 | return ret; | ||
287 | |||
288 | vmw_dmabuf_unreference(&vmw_bo); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | |||
293 | static int vmw_cmd_dma(struct vmw_private *dev_priv, | ||
294 | struct vmw_sw_context *sw_context, | ||
295 | SVGA3dCmdHeader *header) | ||
296 | { | ||
297 | struct vmw_dma_buffer *vmw_bo = NULL; | ||
298 | struct ttm_buffer_object *bo; | ||
299 | struct vmw_surface *srf = NULL; | ||
300 | struct vmw_dma_cmd { | ||
301 | SVGA3dCmdHeader header; | ||
302 | SVGA3dCmdSurfaceDMA dma; | ||
303 | } *cmd; | ||
304 | int ret; | ||
305 | |||
306 | cmd = container_of(header, struct vmw_dma_cmd, header); | ||
307 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
308 | &cmd->dma.guest.ptr, | ||
309 | &vmw_bo); | ||
310 | if (unlikely(ret != 0)) | ||
311 | return ret; | ||
312 | |||
313 | bo = &vmw_bo->base; | ||
238 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, | 314 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, |
239 | cmd->dma.host.sid, &srf); | 315 | cmd->dma.host.sid, &srf); |
240 | if (ret) { | 316 | if (ret) { |
@@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | |||
379 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), | 455 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), |
380 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), | 456 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), |
381 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), | 457 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), |
382 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), | 458 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), |
383 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), | 459 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), |
384 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), | 460 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), |
385 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, | 461 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
386 | &vmw_cmd_blt_surf_screen_check) | 462 | &vmw_cmd_blt_surf_screen_check) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 4f4f6432be8b..a93367041cdc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
559 | info->pixmap.scan_align = 1; | 559 | info->pixmap.scan_align = 1; |
560 | #endif | 560 | #endif |
561 | 561 | ||
562 | info->aperture_base = vmw_priv->vram_start; | ||
563 | info->aperture_size = vmw_priv->vram_size; | ||
564 | |||
562 | /* | 565 | /* |
563 | * Dirty & Deferred IO | 566 | * Dirty & Deferred IO |
564 | */ | 567 | */ |
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 24b56dc54597..2f6cf69ecb39 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
961 | remaining -= 7; | 961 | remaining -= 7; |
962 | pr_devel("client 0x%p called 'target'\n", priv); | 962 | pr_devel("client 0x%p called 'target'\n", priv); |
963 | /* if target is default */ | 963 | /* if target is default */ |
964 | if (!strncmp(kbuf, "default", 7)) | 964 | if (!strncmp(curr_pos, "default", 7)) |
965 | pdev = pci_dev_get(vga_default_device()); | 965 | pdev = pci_dev_get(vga_default_device()); |
966 | else { | 966 | else { |
967 | if (!vga_pci_str_to_vars(curr_pos, remaining, | 967 | if (!vga_pci_str_to_vars(curr_pos, remaining, |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 24d90ea246ce..71d4c0703629 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
@@ -55,6 +55,12 @@ source "drivers/hid/usbhid/Kconfig" | |||
55 | menu "Special HID drivers" | 55 | menu "Special HID drivers" |
56 | depends on HID | 56 | depends on HID |
57 | 57 | ||
58 | config HID_3M_PCT | ||
59 | tristate "3M PCT" | ||
60 | depends on USB_HID | ||
61 | ---help--- | ||
62 | Support for 3M PCT touch screens. | ||
63 | |||
58 | config HID_A4TECH | 64 | config HID_A4TECH |
59 | tristate "A4 tech" if EMBEDDED | 65 | tristate "A4 tech" if EMBEDDED |
60 | depends on USB_HID | 66 | depends on USB_HID |
@@ -183,6 +189,23 @@ config LOGIRUMBLEPAD2_FF | |||
183 | Say Y here if you want to enable force feedback support for Logitech | 189 | Say Y here if you want to enable force feedback support for Logitech |
184 | Rumblepad 2 devices. | 190 | Rumblepad 2 devices. |
185 | 191 | ||
192 | config LOGIG940_FF | ||
193 | bool "Logitech Flight System G940 force feedback support" | ||
194 | depends on HID_LOGITECH | ||
195 | select INPUT_FF_MEMLESS | ||
196 | help | ||
197 | Say Y here if you want to enable force feedback support for Logitech | ||
198 | Flight System G940 devices. | ||
199 | |||
200 | config HID_MAGICMOUSE | ||
201 | tristate "Apple MagicMouse multi-touch support" | ||
202 | depends on BT_HIDP | ||
203 | ---help--- | ||
204 | Support for the Apple Magic Mouse multi-touch. | ||
205 | |||
206 | Say Y here if you want support for the multi-touch features of the | ||
207 | Apple Wireless "Magic" Mouse. | ||
208 | |||
186 | config HID_MICROSOFT | 209 | config HID_MICROSOFT |
187 | tristate "Microsoft" if EMBEDDED | 210 | tristate "Microsoft" if EMBEDDED |
188 | depends on USB_HID | 211 | depends on USB_HID |
@@ -190,6 +213,12 @@ config HID_MICROSOFT | |||
190 | ---help--- | 213 | ---help--- |
191 | Support for Microsoft devices that are not fully compliant with HID standard. | 214 | Support for Microsoft devices that are not fully compliant with HID standard. |
192 | 215 | ||
216 | config HID_MOSART | ||
217 | tristate "MosArt" | ||
218 | depends on USB_HID | ||
219 | ---help--- | ||
220 | Support for MosArt dual-touch panels. | ||
221 | |||
193 | config HID_MONTEREY | 222 | config HID_MONTEREY |
194 | tristate "Monterey" if EMBEDDED | 223 | tristate "Monterey" if EMBEDDED |
195 | depends on USB_HID | 224 | depends on USB_HID |
@@ -198,12 +227,18 @@ config HID_MONTEREY | |||
198 | Support for Monterey Genius KB29E. | 227 | Support for Monterey Genius KB29E. |
199 | 228 | ||
200 | config HID_NTRIG | 229 | config HID_NTRIG |
201 | tristate "NTrig" if EMBEDDED | 230 | tristate "NTrig" |
202 | depends on USB_HID | 231 | depends on USB_HID |
203 | default !EMBEDDED | ||
204 | ---help--- | 232 | ---help--- |
205 | Support for N-Trig touch screen. | 233 | Support for N-Trig touch screen. |
206 | 234 | ||
235 | config HID_ORTEK | ||
236 | tristate "Ortek" if EMBEDDED | ||
237 | depends on USB_HID | ||
238 | default !EMBEDDED | ||
239 | ---help--- | ||
240 | Support for Ortek WKB-2000 wireless keyboard + mouse trackpad. | ||
241 | |||
207 | config HID_PANTHERLORD | 242 | config HID_PANTHERLORD |
208 | tristate "Pantherlord support" if EMBEDDED | 243 | tristate "Pantherlord support" if EMBEDDED |
209 | depends on USB_HID | 244 | depends on USB_HID |
@@ -227,6 +262,12 @@ config HID_PETALYNX | |||
227 | ---help--- | 262 | ---help--- |
228 | Support for Petalynx Maxter remote control. | 263 | Support for Petalynx Maxter remote control. |
229 | 264 | ||
265 | config HID_QUANTA | ||
266 | tristate "Quanta Optical Touch" | ||
267 | depends on USB_HID | ||
268 | ---help--- | ||
269 | Support for Quanta Optical Touch dual-touch panels. | ||
270 | |||
230 | config HID_SAMSUNG | 271 | config HID_SAMSUNG |
231 | tristate "Samsung" if EMBEDDED | 272 | tristate "Samsung" if EMBEDDED |
232 | depends on USB_HID | 273 | depends on USB_HID |
@@ -241,6 +282,12 @@ config HID_SONY | |||
241 | ---help--- | 282 | ---help--- |
242 | Support for Sony PS3 controller. | 283 | Support for Sony PS3 controller. |
243 | 284 | ||
285 | config HID_STANTUM | ||
286 | tristate "Stantum" | ||
287 | depends on USB_HID | ||
288 | ---help--- | ||
289 | Support for Stantum multitouch panel. | ||
290 | |||
244 | config HID_SUNPLUS | 291 | config HID_SUNPLUS |
245 | tristate "Sunplus" if EMBEDDED | 292 | tristate "Sunplus" if EMBEDDED |
246 | depends on USB_HID | 293 | depends on USB_HID |
@@ -305,9 +352,8 @@ config THRUSTMASTER_FF | |||
305 | Rumble Force or Force Feedback Wheel. | 352 | Rumble Force or Force Feedback Wheel. |
306 | 353 | ||
307 | config HID_WACOM | 354 | config HID_WACOM |
308 | tristate "Wacom Bluetooth devices support" if EMBEDDED | 355 | tristate "Wacom Bluetooth devices support" |
309 | depends on BT_HIDP | 356 | depends on BT_HIDP |
310 | default !EMBEDDED | ||
311 | ---help--- | 357 | ---help--- |
312 | Support for Wacom Graphire Bluetooth tablet. | 358 | Support for Wacom Graphire Bluetooth tablet. |
313 | 359 | ||
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 0de2dff5542c..0b2618f092ca 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile | |||
@@ -18,7 +18,11 @@ endif | |||
18 | ifdef CONFIG_LOGIRUMBLEPAD2_FF | 18 | ifdef CONFIG_LOGIRUMBLEPAD2_FF |
19 | hid-logitech-objs += hid-lg2ff.o | 19 | hid-logitech-objs += hid-lg2ff.o |
20 | endif | 20 | endif |
21 | ifdef CONFIG_LOGIG940_FF | ||
22 | hid-logitech-objs += hid-lg3ff.o | ||
23 | endif | ||
21 | 24 | ||
25 | obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o | ||
22 | obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o | 26 | obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o |
23 | obj-$(CONFIG_HID_APPLE) += hid-apple.o | 27 | obj-$(CONFIG_HID_APPLE) += hid-apple.o |
24 | obj-$(CONFIG_HID_BELKIN) += hid-belkin.o | 28 | obj-$(CONFIG_HID_BELKIN) += hid-belkin.o |
@@ -31,14 +35,19 @@ obj-$(CONFIG_HID_GYRATION) += hid-gyration.o | |||
31 | obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o | 35 | obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o |
32 | obj-$(CONFIG_HID_KYE) += hid-kye.o | 36 | obj-$(CONFIG_HID_KYE) += hid-kye.o |
33 | obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o | 37 | obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o |
38 | obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o | ||
34 | obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o | 39 | obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o |
35 | obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o | 40 | obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o |
41 | obj-$(CONFIG_HID_MOSART) += hid-mosart.o | ||
36 | obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o | 42 | obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o |
43 | obj-$(CONFIG_HID_ORTEK) += hid-ortek.o | ||
44 | obj-$(CONFIG_HID_QUANTA) += hid-quanta.o | ||
37 | obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o | 45 | obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o |
38 | obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o | 46 | obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o |
39 | obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o | 47 | obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o |
40 | obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o | 48 | obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o |
41 | obj-$(CONFIG_HID_SONY) += hid-sony.o | 49 | obj-$(CONFIG_HID_SONY) += hid-sony.o |
50 | obj-$(CONFIG_HID_STANTUM) += hid-stantum.o | ||
42 | obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o | 51 | obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o |
43 | obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o | 52 | obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o |
44 | obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o | 53 | obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o |
diff --git a/drivers/hid/hid-3m-pct.c b/drivers/hid/hid-3m-pct.c new file mode 100644 index 000000000000..2370aefc86b2 --- /dev/null +++ b/drivers/hid/hid-3m-pct.c | |||
@@ -0,0 +1,290 @@ | |||
1 | /* | ||
2 | * HID driver for 3M PCT multitouch panels | ||
3 | * | ||
4 | * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | #include <linux/hid.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/usb.h> | ||
19 | |||
20 | MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); | ||
21 | MODULE_DESCRIPTION("3M PCT multitouch panels"); | ||
22 | MODULE_LICENSE("GPL"); | ||
23 | |||
24 | #include "hid-ids.h" | ||
25 | |||
26 | struct mmm_finger { | ||
27 | __s32 x, y; | ||
28 | __u8 rank; | ||
29 | bool touch, valid; | ||
30 | }; | ||
31 | |||
32 | struct mmm_data { | ||
33 | struct mmm_finger f[10]; | ||
34 | __u8 curid, num; | ||
35 | bool touch, valid; | ||
36 | }; | ||
37 | |||
38 | static int mmm_input_mapping(struct hid_device *hdev, struct hid_input *hi, | ||
39 | struct hid_field *field, struct hid_usage *usage, | ||
40 | unsigned long **bit, int *max) | ||
41 | { | ||
42 | switch (usage->hid & HID_USAGE_PAGE) { | ||
43 | |||
44 | case HID_UP_BUTTON: | ||
45 | return -1; | ||
46 | |||
47 | case HID_UP_GENDESK: | ||
48 | switch (usage->hid) { | ||
49 | case HID_GD_X: | ||
50 | hid_map_usage(hi, usage, bit, max, | ||
51 | EV_ABS, ABS_MT_POSITION_X); | ||
52 | /* touchscreen emulation */ | ||
53 | input_set_abs_params(hi->input, ABS_X, | ||
54 | field->logical_minimum, | ||
55 | field->logical_maximum, 0, 0); | ||
56 | return 1; | ||
57 | case HID_GD_Y: | ||
58 | hid_map_usage(hi, usage, bit, max, | ||
59 | EV_ABS, ABS_MT_POSITION_Y); | ||
60 | /* touchscreen emulation */ | ||
61 | input_set_abs_params(hi->input, ABS_Y, | ||
62 | field->logical_minimum, | ||
63 | field->logical_maximum, 0, 0); | ||
64 | return 1; | ||
65 | } | ||
66 | return 0; | ||
67 | |||
68 | case HID_UP_DIGITIZER: | ||
69 | switch (usage->hid) { | ||
70 | /* we do not want to map these: no input-oriented meaning */ | ||
71 | case 0x14: | ||
72 | case 0x23: | ||
73 | case HID_DG_INPUTMODE: | ||
74 | case HID_DG_DEVICEINDEX: | ||
75 | case HID_DG_CONTACTCOUNT: | ||
76 | case HID_DG_CONTACTMAX: | ||
77 | case HID_DG_INRANGE: | ||
78 | case HID_DG_CONFIDENCE: | ||
79 | return -1; | ||
80 | case HID_DG_TIPSWITCH: | ||
81 | /* touchscreen emulation */ | ||
82 | hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); | ||
83 | return 1; | ||
84 | case HID_DG_CONTACTID: | ||
85 | hid_map_usage(hi, usage, bit, max, | ||
86 | EV_ABS, ABS_MT_TRACKING_ID); | ||
87 | return 1; | ||
88 | } | ||
89 | /* let hid-input decide for the others */ | ||
90 | return 0; | ||
91 | |||
92 | case 0xff000000: | ||
93 | /* we do not want to map these: no input-oriented meaning */ | ||
94 | return -1; | ||
95 | } | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static int mmm_input_mapped(struct hid_device *hdev, struct hid_input *hi, | ||
101 | struct hid_field *field, struct hid_usage *usage, | ||
102 | unsigned long **bit, int *max) | ||
103 | { | ||
104 | if (usage->type == EV_KEY || usage->type == EV_ABS) | ||
105 | clear_bit(usage->code, *bit); | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * this function is called when a whole packet has been received and processed, | ||
112 | * so that it can decide what to send to the input layer. | ||
113 | */ | ||
114 | static void mmm_filter_event(struct mmm_data *md, struct input_dev *input) | ||
115 | { | ||
116 | struct mmm_finger *oldest = 0; | ||
117 | bool pressed = false, released = false; | ||
118 | int i; | ||
119 | |||
120 | /* | ||
121 | * we need to iterate on all fingers to decide if we have a press | ||
122 | * or a release event in our touchscreen emulation. | ||
123 | */ | ||
124 | for (i = 0; i < 10; ++i) { | ||
125 | struct mmm_finger *f = &md->f[i]; | ||
126 | if (!f->valid) { | ||
127 | /* this finger is just placeholder data, ignore */ | ||
128 | } else if (f->touch) { | ||
129 | /* this finger is on the screen */ | ||
130 | input_event(input, EV_ABS, ABS_MT_TRACKING_ID, i); | ||
131 | input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x); | ||
132 | input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y); | ||
133 | input_mt_sync(input); | ||
134 | /* | ||
135 | * touchscreen emulation: maintain the age rank | ||
136 | * of this finger, decide if we have a press | ||
137 | */ | ||
138 | if (f->rank == 0) { | ||
139 | f->rank = ++(md->num); | ||
140 | if (f->rank == 1) | ||
141 | pressed = true; | ||
142 | } | ||
143 | if (f->rank == 1) | ||
144 | oldest = f; | ||
145 | } else { | ||
146 | /* this finger took off the screen */ | ||
147 | /* touchscreen emulation: maintain age rank of others */ | ||
148 | int j; | ||
149 | |||
150 | for (j = 0; j < 10; ++j) { | ||
151 | struct mmm_finger *g = &md->f[j]; | ||
152 | if (g->rank > f->rank) { | ||
153 | g->rank--; | ||
154 | if (g->rank == 1) | ||
155 | oldest = g; | ||
156 | } | ||
157 | } | ||
158 | f->rank = 0; | ||
159 | --(md->num); | ||
160 | if (md->num == 0) | ||
161 | released = true; | ||
162 | } | ||
163 | f->valid = 0; | ||
164 | } | ||
165 | |||
166 | /* touchscreen emulation */ | ||
167 | if (oldest) { | ||
168 | if (pressed) | ||
169 | input_event(input, EV_KEY, BTN_TOUCH, 1); | ||
170 | input_event(input, EV_ABS, ABS_X, oldest->x); | ||
171 | input_event(input, EV_ABS, ABS_Y, oldest->y); | ||
172 | } else if (released) { | ||
173 | input_event(input, EV_KEY, BTN_TOUCH, 0); | ||
174 | } | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * this function is called upon all reports | ||
179 | * so that we can accumulate contact point information, | ||
180 | * and call input_mt_sync after each point. | ||
181 | */ | ||
182 | static int mmm_event(struct hid_device *hid, struct hid_field *field, | ||
183 | struct hid_usage *usage, __s32 value) | ||
184 | { | ||
185 | struct mmm_data *md = hid_get_drvdata(hid); | ||
186 | /* | ||
187 | * strangely, this function can be called before | ||
188 | * field->hidinput is initialized! | ||
189 | */ | ||
190 | if (hid->claimed & HID_CLAIMED_INPUT) { | ||
191 | struct input_dev *input = field->hidinput->input; | ||
192 | switch (usage->hid) { | ||
193 | case HID_DG_TIPSWITCH: | ||
194 | md->touch = value; | ||
195 | break; | ||
196 | case HID_DG_CONFIDENCE: | ||
197 | md->valid = value; | ||
198 | break; | ||
199 | case HID_DG_CONTACTID: | ||
200 | if (md->valid) { | ||
201 | md->curid = value; | ||
202 | md->f[value].touch = md->touch; | ||
203 | md->f[value].valid = 1; | ||
204 | } | ||
205 | break; | ||
206 | case HID_GD_X: | ||
207 | if (md->valid) | ||
208 | md->f[md->curid].x = value; | ||
209 | break; | ||
210 | case HID_GD_Y: | ||
211 | if (md->valid) | ||
212 | md->f[md->curid].y = value; | ||
213 | break; | ||
214 | case HID_DG_CONTACTCOUNT: | ||
215 | mmm_filter_event(md, input); | ||
216 | break; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | /* we have handled the hidinput part, now remains hiddev */ | ||
221 | if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) | ||
222 | hid->hiddev_hid_event(hid, field, usage, value); | ||
223 | |||
224 | return 1; | ||
225 | } | ||
226 | |||
227 | static int mmm_probe(struct hid_device *hdev, const struct hid_device_id *id) | ||
228 | { | ||
229 | int ret; | ||
230 | struct mmm_data *md; | ||
231 | |||
232 | md = kzalloc(sizeof(struct mmm_data), GFP_KERNEL); | ||
233 | if (!md) { | ||
234 | dev_err(&hdev->dev, "cannot allocate 3M data\n"); | ||
235 | return -ENOMEM; | ||
236 | } | ||
237 | hid_set_drvdata(hdev, md); | ||
238 | |||
239 | ret = hid_parse(hdev); | ||
240 | if (!ret) | ||
241 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); | ||
242 | |||
243 | if (ret) | ||
244 | kfree(md); | ||
245 | return ret; | ||
246 | } | ||
247 | |||
248 | static void mmm_remove(struct hid_device *hdev) | ||
249 | { | ||
250 | hid_hw_stop(hdev); | ||
251 | kfree(hid_get_drvdata(hdev)); | ||
252 | hid_set_drvdata(hdev, NULL); | ||
253 | } | ||
254 | |||
255 | static const struct hid_device_id mmm_devices[] = { | ||
256 | { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) }, | ||
257 | { } | ||
258 | }; | ||
259 | MODULE_DEVICE_TABLE(hid, mmm_devices); | ||
260 | |||
261 | static const struct hid_usage_id mmm_grabbed_usages[] = { | ||
262 | { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, | ||
263 | { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} | ||
264 | }; | ||
265 | |||
266 | static struct hid_driver mmm_driver = { | ||
267 | .name = "3m-pct", | ||
268 | .id_table = mmm_devices, | ||
269 | .probe = mmm_probe, | ||
270 | .remove = mmm_remove, | ||
271 | .input_mapping = mmm_input_mapping, | ||
272 | .input_mapped = mmm_input_mapped, | ||
273 | .usage_table = mmm_grabbed_usages, | ||
274 | .event = mmm_event, | ||
275 | }; | ||
276 | |||
277 | static int __init mmm_init(void) | ||
278 | { | ||
279 | return hid_register_driver(&mmm_driver); | ||
280 | } | ||
281 | |||
282 | static void __exit mmm_exit(void) | ||
283 | { | ||
284 | hid_unregister_driver(&mmm_driver); | ||
285 | } | ||
286 | |||
287 | module_init(mmm_init); | ||
288 | module_exit(mmm_exit); | ||
289 | MODULE_LICENSE("GPL"); | ||
290 | |||
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 5b4d66dc1a05..78286b184ace 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c | |||
@@ -40,6 +40,11 @@ module_param(fnmode, uint, 0644); | |||
40 | MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, " | 40 | MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, " |
41 | "[1] = fkeyslast, 2 = fkeysfirst)"); | 41 | "[1] = fkeyslast, 2 = fkeysfirst)"); |
42 | 42 | ||
43 | static unsigned int iso_layout = 1; | ||
44 | module_param(iso_layout, uint, 0644); | ||
45 | MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. " | ||
46 | "(0 = disabled, [1] = enabled)"); | ||
47 | |||
43 | struct apple_sc { | 48 | struct apple_sc { |
44 | unsigned long quirks; | 49 | unsigned long quirks; |
45 | unsigned int fn_on; | 50 | unsigned int fn_on; |
@@ -199,11 +204,13 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, | |||
199 | } | 204 | } |
200 | } | 205 | } |
201 | 206 | ||
202 | if (asc->quirks & APPLE_ISO_KEYBOARD) { | 207 | if (iso_layout) { |
203 | trans = apple_find_translation(apple_iso_keyboard, usage->code); | 208 | if (asc->quirks & APPLE_ISO_KEYBOARD) { |
204 | if (trans) { | 209 | trans = apple_find_translation(apple_iso_keyboard, usage->code); |
205 | input_event(input, usage->type, trans->to, value); | 210 | if (trans) { |
206 | return 1; | 211 | input_event(input, usage->type, trans->to, value); |
212 | return 1; | ||
213 | } | ||
207 | } | 214 | } |
208 | } | 215 | } |
209 | 216 | ||
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index eabe5f87c6c1..368fbb0c4ca6 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Copyright (c) 1999 Andreas Gal | 4 | * Copyright (c) 1999 Andreas Gal |
5 | * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> | 5 | * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> |
6 | * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc | 6 | * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc |
7 | * Copyright (c) 2006-2007 Jiri Kosina | 7 | * Copyright (c) 2006-2010 Jiri Kosina |
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* | 10 | /* |
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(hid_debug); | |||
51 | * Register a new report for a device. | 51 | * Register a new report for a device. |
52 | */ | 52 | */ |
53 | 53 | ||
54 | static struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id) | 54 | struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id) |
55 | { | 55 | { |
56 | struct hid_report_enum *report_enum = device->report_enum + type; | 56 | struct hid_report_enum *report_enum = device->report_enum + type; |
57 | struct hid_report *report; | 57 | struct hid_report *report; |
@@ -75,6 +75,7 @@ static struct hid_report *hid_register_report(struct hid_device *device, unsigne | |||
75 | 75 | ||
76 | return report; | 76 | return report; |
77 | } | 77 | } |
78 | EXPORT_SYMBOL_GPL(hid_register_report); | ||
78 | 79 | ||
79 | /* | 80 | /* |
80 | * Register a new field for this report. | 81 | * Register a new field for this report. |
@@ -387,7 +388,8 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) | |||
387 | __u32 data; | 388 | __u32 data; |
388 | unsigned n; | 389 | unsigned n; |
389 | 390 | ||
390 | if (item->size == 0) { | 391 | /* Local delimiter could have value 0, which allows size to be 0 */ |
392 | if (item->size == 0 && item->tag != HID_LOCAL_ITEM_TAG_DELIMITER) { | ||
391 | dbg_hid("item data expected for local item\n"); | 393 | dbg_hid("item data expected for local item\n"); |
392 | return -1; | 394 | return -1; |
393 | } | 395 | } |
@@ -1248,11 +1250,13 @@ EXPORT_SYMBOL_GPL(hid_disconnect); | |||
1248 | 1250 | ||
1249 | /* a list of devices for which there is a specialized driver on HID bus */ | 1251 | /* a list of devices for which there is a specialized driver on HID bus */ |
1250 | static const struct hid_device_id hid_blacklist[] = { | 1252 | static const struct hid_device_id hid_blacklist[] = { |
1253 | { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) }, | ||
1251 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, | 1254 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, |
1252 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, | 1255 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, |
1253 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, | 1256 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, |
1254 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, | 1257 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, |
1255 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, | 1258 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, |
1259 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, | ||
1256 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, | 1260 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, |
1257 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, | 1261 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, |
1258 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, | 1262 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, |
@@ -1324,6 +1328,7 @@ static const struct hid_device_id hid_blacklist[] = { | |||
1324 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, | 1328 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, |
1325 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, | 1329 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, |
1326 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, | 1330 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, |
1331 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) }, | ||
1327 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) }, | 1332 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) }, |
1328 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) }, | 1333 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) }, |
1329 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, | 1334 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, |
@@ -1337,10 +1342,15 @@ static const struct hid_device_id hid_blacklist[] = { | |||
1337 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, | 1342 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, |
1338 | { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, | 1343 | { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, |
1339 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, | 1344 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, |
1345 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, | ||
1340 | { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, | 1346 | { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, |
1347 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, | ||
1348 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) }, | ||
1341 | { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, | 1349 | { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, |
1342 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, | 1350 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, |
1351 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, | ||
1343 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, | 1352 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, |
1353 | { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) }, | ||
1344 | { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, | 1354 | { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, |
1345 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, | 1355 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, |
1346 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, | 1356 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, |
@@ -1543,8 +1553,9 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
1543 | { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, | 1553 | { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, |
1544 | { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, | 1554 | { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, |
1545 | { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, | 1555 | { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, |
1546 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM)}, | 1556 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)}, |
1547 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2)}, | 1557 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, |
1558 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, | ||
1548 | { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, | 1559 | { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, |
1549 | { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, | 1560 | { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, |
1550 | { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, | 1561 | { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, |
@@ -1661,8 +1672,6 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
1661 | { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) }, | 1672 | { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) }, |
1662 | { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, | 1673 | { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, |
1663 | { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, | 1674 | { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, |
1664 | { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY1) }, | ||
1665 | { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY2) }, | ||
1666 | { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) }, | 1675 | { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) }, |
1667 | { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) }, | 1676 | { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) }, |
1668 | { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, | 1677 | { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, |
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 6abd0369aedb..cd4ece6fdfb9 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c | |||
@@ -864,13 +864,13 @@ static const char **names[EV_MAX + 1] = { | |||
864 | [EV_SND] = sounds, [EV_REP] = repeats, | 864 | [EV_SND] = sounds, [EV_REP] = repeats, |
865 | }; | 865 | }; |
866 | 866 | ||
867 | void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f) { | 867 | static void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f) |
868 | 868 | { | |
869 | seq_printf(f, "%s.%s", events[type] ? events[type] : "?", | 869 | seq_printf(f, "%s.%s", events[type] ? events[type] : "?", |
870 | names[type] ? (names[type][code] ? names[type][code] : "?") : "?"); | 870 | names[type] ? (names[type][code] ? names[type][code] : "?") : "?"); |
871 | } | 871 | } |
872 | 872 | ||
873 | void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f) | 873 | static void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f) |
874 | { | 874 | { |
875 | int i, j, k; | 875 | int i, j, k; |
876 | struct hid_report *report; | 876 | struct hid_report *report; |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 010368e649ed..72c05f90553c 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -18,6 +18,9 @@ | |||
18 | #ifndef HID_IDS_H_FILE | 18 | #ifndef HID_IDS_H_FILE |
19 | #define HID_IDS_H_FILE | 19 | #define HID_IDS_H_FILE |
20 | 20 | ||
21 | #define USB_VENDOR_ID_3M 0x0596 | ||
22 | #define USB_DEVICE_ID_3M1968 0x0500 | ||
23 | |||
21 | #define USB_VENDOR_ID_A4TECH 0x09da | 24 | #define USB_VENDOR_ID_A4TECH 0x09da |
22 | #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006 | 25 | #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006 |
23 | #define USB_DEVICE_ID_A4TECH_X5_005D 0x000a | 26 | #define USB_DEVICE_ID_A4TECH_X5_005D 0x000a |
@@ -56,6 +59,7 @@ | |||
56 | 59 | ||
57 | #define USB_VENDOR_ID_APPLE 0x05ac | 60 | #define USB_VENDOR_ID_APPLE 0x05ac |
58 | #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 | 61 | #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 |
62 | #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d | ||
59 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e | 63 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e |
60 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f | 64 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f |
61 | #define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214 | 65 | #define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214 |
@@ -96,9 +100,12 @@ | |||
96 | #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 | 100 | #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 |
97 | #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 | 101 | #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 |
98 | 102 | ||
99 | #define USB_VENDOR_ID_ASUS 0x0b05 | 103 | #define USB_VENDOR_ID_ASUS 0x0486 |
100 | #define USB_DEVICE_ID_ASUS_LCM 0x1726 | 104 | #define USB_DEVICE_ID_ASUS_T91MT 0x0185 |
101 | #define USB_DEVICE_ID_ASUS_LCM2 0x175b | 105 | |
106 | #define USB_VENDOR_ID_ASUSTEK 0x0b05 | ||
107 | #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 | ||
108 | #define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b | ||
102 | 109 | ||
103 | #define USB_VENDOR_ID_ATEN 0x0557 | 110 | #define USB_VENDOR_ID_ATEN 0x0557 |
104 | #define USB_DEVICE_ID_ATEN_UC100KM 0x2004 | 111 | #define USB_DEVICE_ID_ATEN_UC100KM 0x2004 |
@@ -169,6 +176,9 @@ | |||
169 | #define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f | 176 | #define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f |
170 | #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 | 177 | #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 |
171 | 178 | ||
179 | #define USB_VENDOR_ID_ETURBOTOUCH 0x22b9 | ||
180 | #define USB_DEVICE_ID_ETURBOTOUCH 0x0006 | ||
181 | |||
172 | #define USB_VENDOR_ID_ETT 0x0664 | 182 | #define USB_VENDOR_ID_ETT 0x0664 |
173 | #define USB_DEVICE_ID_TC5UH 0x0309 | 183 | #define USB_DEVICE_ID_TC5UH 0x0309 |
174 | 184 | ||
@@ -303,6 +313,7 @@ | |||
303 | #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219 | 313 | #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219 |
304 | #define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283 | 314 | #define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283 |
305 | #define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286 | 315 | #define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286 |
316 | #define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940 0xc287 | ||
306 | #define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294 | 317 | #define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294 |
307 | #define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293 | 318 | #define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293 |
308 | #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295 | 319 | #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295 |
@@ -365,6 +376,9 @@ | |||
365 | #define USB_VENDOR_ID_ONTRAK 0x0a07 | 376 | #define USB_VENDOR_ID_ONTRAK 0x0a07 |
366 | #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064 | 377 | #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064 |
367 | 378 | ||
379 | #define USB_VENDOR_ID_ORTEK 0x05a4 | ||
380 | #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 | ||
381 | |||
368 | #define USB_VENDOR_ID_PANJIT 0x134c | 382 | #define USB_VENDOR_ID_PANJIT 0x134c |
369 | 383 | ||
370 | #define USB_VENDOR_ID_PANTHERLORD 0x0810 | 384 | #define USB_VENDOR_ID_PANTHERLORD 0x0810 |
@@ -382,9 +396,16 @@ | |||
382 | #define USB_VENDOR_ID_POWERCOM 0x0d9f | 396 | #define USB_VENDOR_ID_POWERCOM 0x0d9f |
383 | #define USB_DEVICE_ID_POWERCOM_UPS 0x0002 | 397 | #define USB_DEVICE_ID_POWERCOM_UPS 0x0002 |
384 | 398 | ||
399 | #define USB_VENDOR_ID_PRODIGE 0x05af | ||
400 | #define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062 | ||
401 | |||
385 | #define USB_VENDOR_ID_SAITEK 0x06a3 | 402 | #define USB_VENDOR_ID_SAITEK 0x06a3 |
386 | #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 | 403 | #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 |
387 | 404 | ||
405 | #define USB_VENDOR_ID_QUANTA 0x0408 | ||
406 | #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000 | ||
407 | #define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN 0x3001 | ||
408 | |||
388 | #define USB_VENDOR_ID_SAMSUNG 0x0419 | 409 | #define USB_VENDOR_ID_SAMSUNG 0x0419 |
389 | #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 | 410 | #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 |
390 | 411 | ||
@@ -396,18 +417,20 @@ | |||
396 | #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034 | 417 | #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034 |
397 | #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046 | 418 | #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046 |
398 | 419 | ||
420 | #define USB_VENDOR_ID_STANTUM 0x1f87 | ||
421 | #define USB_DEVICE_ID_MTP 0x0002 | ||
422 | |||
399 | #define USB_VENDOR_ID_SUN 0x0430 | 423 | #define USB_VENDOR_ID_SUN 0x0430 |
400 | #define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab | 424 | #define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab |
401 | 425 | ||
402 | #define USB_VENDOR_ID_SUNPLUS 0x04fc | 426 | #define USB_VENDOR_ID_SUNPLUS 0x04fc |
403 | #define USB_DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8 | 427 | #define USB_DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8 |
404 | 428 | ||
405 | #define USB_VENDOR_ID_TENX 0x1130 | ||
406 | #define USB_DEVICE_ID_TENX_IBUDDY1 0x0001 | ||
407 | #define USB_DEVICE_ID_TENX_IBUDDY2 0x0002 | ||
408 | |||
409 | #define USB_VENDOR_ID_THRUSTMASTER 0x044f | 429 | #define USB_VENDOR_ID_THRUSTMASTER 0x044f |
410 | 430 | ||
431 | #define USB_VENDOR_ID_TOUCHPACK 0x1bfd | ||
432 | #define USB_DEVICE_ID_TOUCHPACK_RTS 0x1688 | ||
433 | |||
411 | #define USB_VENDOR_ID_TOPMAX 0x0663 | 434 | #define USB_VENDOR_ID_TOPMAX 0x0663 |
412 | #define USB_DEVICE_ID_TOPMAX_COBRAPAD 0x0103 | 435 | #define USB_DEVICE_ID_TOPMAX_COBRAPAD 0x0103 |
413 | 436 | ||
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 5862b0f3b55d..79d9edd0bdfa 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2000-2001 Vojtech Pavlik | 2 | * Copyright (c) 2000-2001 Vojtech Pavlik |
3 | * Copyright (c) 2006-2007 Jiri Kosina | 3 | * Copyright (c) 2006-2010 Jiri Kosina |
4 | * | 4 | * |
5 | * HID to Linux Input mapping | 5 | * HID to Linux Input mapping |
6 | */ | 6 | */ |
@@ -193,12 +193,17 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel | |||
193 | break; | 193 | break; |
194 | 194 | ||
195 | case HID_UP_BUTTON: | 195 | case HID_UP_BUTTON: |
196 | code = ((usage->hid - 1) & 0xf); | 196 | code = ((usage->hid - 1) & HID_USAGE); |
197 | 197 | ||
198 | switch (field->application) { | 198 | switch (field->application) { |
199 | case HID_GD_MOUSE: | 199 | case HID_GD_MOUSE: |
200 | case HID_GD_POINTER: code += 0x110; break; | 200 | case HID_GD_POINTER: code += 0x110; break; |
201 | case HID_GD_JOYSTICK: code += 0x120; break; | 201 | case HID_GD_JOYSTICK: |
202 | if (code <= 0xf) | ||
203 | code += BTN_JOYSTICK; | ||
204 | else | ||
205 | code += BTN_TRIGGER_HAPPY; | ||
206 | break; | ||
202 | case HID_GD_GAMEPAD: code += 0x130; break; | 207 | case HID_GD_GAMEPAD: code += 0x130; break; |
203 | default: | 208 | default: |
204 | switch (field->physical) { | 209 | switch (field->physical) { |
@@ -400,6 +405,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel | |||
400 | case 0x192: map_key_clear(KEY_CALC); break; | 405 | case 0x192: map_key_clear(KEY_CALC); break; |
401 | case 0x194: map_key_clear(KEY_FILE); break; | 406 | case 0x194: map_key_clear(KEY_FILE); break; |
402 | case 0x196: map_key_clear(KEY_WWW); break; | 407 | case 0x196: map_key_clear(KEY_WWW); break; |
408 | case 0x199: map_key_clear(KEY_CHAT); break; | ||
403 | case 0x19c: map_key_clear(KEY_LOGOFF); break; | 409 | case 0x19c: map_key_clear(KEY_LOGOFF); break; |
404 | case 0x19e: map_key_clear(KEY_COFFEE); break; | 410 | case 0x19e: map_key_clear(KEY_COFFEE); break; |
405 | case 0x1a6: map_key_clear(KEY_HELP); break; | 411 | case 0x1a6: map_key_clear(KEY_HELP); break; |
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index 9fcd3d017ab3..3677c9037a11 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #define LG_FF 0x200 | 34 | #define LG_FF 0x200 |
35 | #define LG_FF2 0x400 | 35 | #define LG_FF2 0x400 |
36 | #define LG_RDESC_REL_ABS 0x800 | 36 | #define LG_RDESC_REL_ABS 0x800 |
37 | #define LG_FF3 0x1000 | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * Certain Logitech keyboards send in report #3 keys which are far | 40 | * Certain Logitech keyboards send in report #3 keys which are far |
@@ -266,7 +267,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
266 | goto err_free; | 267 | goto err_free; |
267 | } | 268 | } |
268 | 269 | ||
269 | if (quirks & (LG_FF | LG_FF2)) | 270 | if (quirks & (LG_FF | LG_FF2 | LG_FF3)) |
270 | connect_mask &= ~HID_CONNECT_FF; | 271 | connect_mask &= ~HID_CONNECT_FF; |
271 | 272 | ||
272 | ret = hid_hw_start(hdev, connect_mask); | 273 | ret = hid_hw_start(hdev, connect_mask); |
@@ -279,6 +280,8 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
279 | lgff_init(hdev); | 280 | lgff_init(hdev); |
280 | if (quirks & LG_FF2) | 281 | if (quirks & LG_FF2) |
281 | lg2ff_init(hdev); | 282 | lg2ff_init(hdev); |
283 | if (quirks & LG_FF3) | ||
284 | lg3ff_init(hdev); | ||
282 | 285 | ||
283 | return 0; | 286 | return 0; |
284 | err_free: | 287 | err_free: |
@@ -331,6 +334,8 @@ static const struct hid_device_id lg_devices[] = { | |||
331 | .driver_data = LG_FF }, | 334 | .driver_data = LG_FF }, |
332 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), | 335 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), |
333 | .driver_data = LG_FF2 }, | 336 | .driver_data = LG_FF2 }, |
337 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), | ||
338 | .driver_data = LG_FF3 }, | ||
334 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), | 339 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), |
335 | .driver_data = LG_RDESC_REL_ABS }, | 340 | .driver_data = LG_RDESC_REL_ABS }, |
336 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER), | 341 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER), |
diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h index bf31592eaf79..ce2ac8672624 100644 --- a/drivers/hid/hid-lg.h +++ b/drivers/hid/hid-lg.h | |||
@@ -13,4 +13,10 @@ int lg2ff_init(struct hid_device *hdev); | |||
13 | static inline int lg2ff_init(struct hid_device *hdev) { return -1; } | 13 | static inline int lg2ff_init(struct hid_device *hdev) { return -1; } |
14 | #endif | 14 | #endif |
15 | 15 | ||
16 | #ifdef CONFIG_LOGIG940_FF | ||
17 | int lg3ff_init(struct hid_device *hdev); | ||
18 | #else | ||
19 | static inline int lg3ff_init(struct hid_device *hdev) { return -1; } | ||
20 | #endif | ||
21 | |||
16 | #endif | 22 | #endif |
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c new file mode 100644 index 000000000000..4002832ee4af --- /dev/null +++ b/drivers/hid/hid-lg3ff.c | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * Force feedback support for Logitech Flight System G940 | ||
3 | * | ||
4 | * Copyright (c) 2009 Gary Stein <LordCnidarian@gmail.com> | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 | */ | ||
22 | |||
23 | |||
24 | #include <linux/input.h> | ||
25 | #include <linux/usb.h> | ||
26 | #include <linux/hid.h> | ||
27 | |||
28 | #include "usbhid/usbhid.h" | ||
29 | #include "hid-lg.h" | ||
30 | |||
31 | /* | ||
32 | * G940 Theory of Operation (from experimentation) | ||
33 | * | ||
34 | * There are 63 fields (only 3 of them currently used) | ||
35 | * 0 - seems to be command field | ||
36 | * 1 - 30 deal with the x axis | ||
37 | * 31 -60 deal with the y axis | ||
38 | * | ||
39 | * Field 1 is x axis constant force | ||
40 | * Field 31 is y axis constant force | ||
41 | * | ||
42 | * other interesting fields 1,2,3,4 on x axis | ||
43 | * (same for 31,32,33,34 on y axis) | ||
44 | * | ||
45 | * 0 0 127 127 makes the joystick autocenter hard | ||
46 | * | ||
47 | * 127 0 127 127 makes the joystick loose on the right, | ||
48 | * but stops all movemnt left | ||
49 | * | ||
50 | * -127 0 -127 -127 makes the joystick loose on the left, | ||
51 | * but stops all movement right | ||
52 | * | ||
53 | * 0 0 -127 -127 makes the joystick rattle very hard | ||
54 | * | ||
55 | * I'm sure these are effects that I don't know enough about them | ||
56 | */ | ||
57 | |||
58 | struct lg3ff_device { | ||
59 | struct hid_report *report; | ||
60 | }; | ||
61 | |||
62 | static int hid_lg3ff_play(struct input_dev *dev, void *data, | ||
63 | struct ff_effect *effect) | ||
64 | { | ||
65 | struct hid_device *hid = input_get_drvdata(dev); | ||
66 | struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; | ||
67 | struct hid_report *report = list_entry(report_list->next, struct hid_report, list); | ||
68 | int x, y; | ||
69 | |||
70 | /* | ||
71 | * Maxusage should always be 63 (maximum fields) | ||
72 | * likely a better way to ensure this data is clean | ||
73 | */ | ||
74 | memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage); | ||
75 | |||
76 | switch (effect->type) { | ||
77 | case FF_CONSTANT: | ||
78 | /* | ||
79 | * Already clamped in ff_memless | ||
80 | * 0 is center (different then other logitech) | ||
81 | */ | ||
82 | x = effect->u.ramp.start_level; | ||
83 | y = effect->u.ramp.end_level; | ||
84 | |||
85 | /* send command byte */ | ||
86 | report->field[0]->value[0] = 0x51; | ||
87 | |||
88 | /* | ||
89 | * Sign backwards from other Force3d pro | ||
90 | * which get recast here in two's complement 8 bits | ||
91 | */ | ||
92 | report->field[0]->value[1] = (unsigned char)(-x); | ||
93 | report->field[0]->value[31] = (unsigned char)(-y); | ||
94 | |||
95 | usbhid_submit_report(hid, report, USB_DIR_OUT); | ||
96 | break; | ||
97 | } | ||
98 | return 0; | ||
99 | } | ||
100 | static void hid_lg3ff_set_autocenter(struct input_dev *dev, u16 magnitude) | ||
101 | { | ||
102 | struct hid_device *hid = input_get_drvdata(dev); | ||
103 | struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; | ||
104 | struct hid_report *report = list_entry(report_list->next, struct hid_report, list); | ||
105 | |||
106 | /* | ||
107 | * Auto Centering probed from device | ||
108 | * NOTE: deadman's switch on G940 must be covered | ||
109 | * for effects to work | ||
110 | */ | ||
111 | report->field[0]->value[0] = 0x51; | ||
112 | report->field[0]->value[1] = 0x00; | ||
113 | report->field[0]->value[2] = 0x00; | ||
114 | report->field[0]->value[3] = 0x7F; | ||
115 | report->field[0]->value[4] = 0x7F; | ||
116 | report->field[0]->value[31] = 0x00; | ||
117 | report->field[0]->value[32] = 0x00; | ||
118 | report->field[0]->value[33] = 0x7F; | ||
119 | report->field[0]->value[34] = 0x7F; | ||
120 | |||
121 | usbhid_submit_report(hid, report, USB_DIR_OUT); | ||
122 | } | ||
123 | |||
124 | |||
125 | static const signed short ff3_joystick_ac[] = { | ||
126 | FF_CONSTANT, | ||
127 | FF_AUTOCENTER, | ||
128 | -1 | ||
129 | }; | ||
130 | |||
131 | int lg3ff_init(struct hid_device *hid) | ||
132 | { | ||
133 | struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); | ||
134 | struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; | ||
135 | struct input_dev *dev = hidinput->input; | ||
136 | struct hid_report *report; | ||
137 | struct hid_field *field; | ||
138 | const signed short *ff_bits = ff3_joystick_ac; | ||
139 | int error; | ||
140 | int i; | ||
141 | |||
142 | /* Find the report to use */ | ||
143 | if (list_empty(report_list)) { | ||
144 | err_hid("No output report found"); | ||
145 | return -1; | ||
146 | } | ||
147 | |||
148 | /* Check that the report looks ok */ | ||
149 | report = list_entry(report_list->next, struct hid_report, list); | ||
150 | if (!report) { | ||
151 | err_hid("NULL output report"); | ||
152 | return -1; | ||
153 | } | ||
154 | |||
155 | field = report->field[0]; | ||
156 | if (!field) { | ||
157 | err_hid("NULL field"); | ||
158 | return -1; | ||
159 | } | ||
160 | |||
161 | /* Assume single fixed device G940 */ | ||
162 | for (i = 0; ff_bits[i] >= 0; i++) | ||
163 | set_bit(ff_bits[i], dev->ffbit); | ||
164 | |||
165 | error = input_ff_create_memless(dev, NULL, hid_lg3ff_play); | ||
166 | if (error) | ||
167 | return error; | ||
168 | |||
169 | if (test_bit(FF_AUTOCENTER, dev->ffbit)) | ||
170 | dev->ff->set_autocenter = hid_lg3ff_set_autocenter; | ||
171 | |||
172 | dev_info(&hid->dev, "Force feedback for Logitech Flight System G940 by " | ||
173 | "Gary Stein <LordCnidarian@gmail.com>\n"); | ||
174 | return 0; | ||
175 | } | ||
176 | |||
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c index 987abebe0829..61142b76a9b1 100644 --- a/drivers/hid/hid-lgff.c +++ b/drivers/hid/hid-lgff.c | |||
@@ -67,6 +67,7 @@ static const struct dev_type devices[] = { | |||
67 | { 0x046d, 0xc219, ff_rumble }, | 67 | { 0x046d, 0xc219, ff_rumble }, |
68 | { 0x046d, 0xc283, ff_joystick }, | 68 | { 0x046d, 0xc283, ff_joystick }, |
69 | { 0x046d, 0xc286, ff_joystick_ac }, | 69 | { 0x046d, 0xc286, ff_joystick_ac }, |
70 | { 0x046d, 0xc287, ff_joystick_ac }, | ||
70 | { 0x046d, 0xc293, ff_joystick }, | 71 | { 0x046d, 0xc293, ff_joystick }, |
71 | { 0x046d, 0xc294, ff_wheel }, | 72 | { 0x046d, 0xc294, ff_wheel }, |
72 | { 0x046d, 0xc295, ff_joystick }, | 73 | { 0x046d, 0xc295, ff_joystick }, |
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c new file mode 100644 index 000000000000..4a3a94f2b10c --- /dev/null +++ b/drivers/hid/hid-magicmouse.c | |||
@@ -0,0 +1,449 @@ | |||
1 | /* | ||
2 | * Apple "Magic" Wireless Mouse driver | ||
3 | * | ||
4 | * Copyright (c) 2010 Michael Poole <mdpoole@troilus.org> | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/device.h> | ||
15 | #include <linux/hid.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/usb.h> | ||
18 | |||
19 | #include "hid-ids.h" | ||
20 | |||
21 | static bool emulate_3button = true; | ||
22 | module_param(emulate_3button, bool, 0644); | ||
23 | MODULE_PARM_DESC(emulate_3button, "Emulate a middle button"); | ||
24 | |||
25 | static int middle_button_start = -350; | ||
26 | static int middle_button_stop = +350; | ||
27 | |||
28 | static bool emulate_scroll_wheel = true; | ||
29 | module_param(emulate_scroll_wheel, bool, 0644); | ||
30 | MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel"); | ||
31 | |||
32 | static bool report_touches = true; | ||
33 | module_param(report_touches, bool, 0644); | ||
34 | MODULE_PARM_DESC(report_touches, "Emit touch records (otherwise, only use them for emulation)"); | ||
35 | |||
36 | static bool report_undeciphered; | ||
37 | module_param(report_undeciphered, bool, 0644); | ||
38 | MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event"); | ||
39 | |||
40 | #define TOUCH_REPORT_ID 0x29 | ||
41 | /* These definitions are not precise, but they're close enough. (Bits | ||
42 | * 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem | ||
43 | * to be some kind of bit mask -- 0x20 may be a near-field reading, | ||
44 | * and 0x40 is actual contact, and 0x10 may be a start/stop or change | ||
45 | * indication.) | ||
46 | */ | ||
47 | #define TOUCH_STATE_MASK 0xf0 | ||
48 | #define TOUCH_STATE_NONE 0x00 | ||
49 | #define TOUCH_STATE_START 0x30 | ||
50 | #define TOUCH_STATE_DRAG 0x40 | ||
51 | |||
52 | /** | ||
53 | * struct magicmouse_sc - Tracks Magic Mouse-specific data. | ||
54 | * @input: Input device through which we report events. | ||
55 | * @quirks: Currently unused. | ||
56 | * @last_timestamp: Timestamp from most recent (18-bit) touch report | ||
57 | * (units of milliseconds over short windows, but seems to | ||
58 | * increase faster when there are no touches). | ||
59 | * @delta_time: 18-bit difference between the two most recent touch | ||
60 | * reports from the mouse. | ||
61 | * @ntouches: Number of touches in most recent touch report. | ||
62 | * @scroll_accel: Number of consecutive scroll motions. | ||
63 | * @scroll_jiffies: Time of last scroll motion. | ||
64 | * @touches: Most recent data for a touch, indexed by tracking ID. | ||
65 | * @tracking_ids: Mapping of current touch input data to @touches. | ||
66 | */ | ||
67 | struct magicmouse_sc { | ||
68 | struct input_dev *input; | ||
69 | unsigned long quirks; | ||
70 | |||
71 | int last_timestamp; | ||
72 | int delta_time; | ||
73 | int ntouches; | ||
74 | int scroll_accel; | ||
75 | unsigned long scroll_jiffies; | ||
76 | |||
77 | struct { | ||
78 | short x; | ||
79 | short y; | ||
80 | short scroll_y; | ||
81 | u8 size; | ||
82 | } touches[16]; | ||
83 | int tracking_ids[16]; | ||
84 | }; | ||
85 | |||
86 | static int magicmouse_firm_touch(struct magicmouse_sc *msc) | ||
87 | { | ||
88 | int touch = -1; | ||
89 | int ii; | ||
90 | |||
91 | /* If there is only one "firm" touch, set touch to its | ||
92 | * tracking ID. | ||
93 | */ | ||
94 | for (ii = 0; ii < msc->ntouches; ii++) { | ||
95 | int idx = msc->tracking_ids[ii]; | ||
96 | if (msc->touches[idx].size < 8) { | ||
97 | /* Ignore this touch. */ | ||
98 | } else if (touch >= 0) { | ||
99 | touch = -1; | ||
100 | break; | ||
101 | } else { | ||
102 | touch = idx; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | return touch; | ||
107 | } | ||
108 | |||
109 | static void magicmouse_emit_buttons(struct magicmouse_sc *msc, int state) | ||
110 | { | ||
111 | int last_state = test_bit(BTN_LEFT, msc->input->key) << 0 | | ||
112 | test_bit(BTN_RIGHT, msc->input->key) << 1 | | ||
113 | test_bit(BTN_MIDDLE, msc->input->key) << 2; | ||
114 | |||
115 | if (emulate_3button) { | ||
116 | int id; | ||
117 | |||
118 | /* If some button was pressed before, keep it held | ||
119 | * down. Otherwise, if there's exactly one firm | ||
120 | * touch, use that to override the mouse's guess. | ||
121 | */ | ||
122 | if (state == 0) { | ||
123 | /* The button was released. */ | ||
124 | } else if (last_state != 0) { | ||
125 | state = last_state; | ||
126 | } else if ((id = magicmouse_firm_touch(msc)) >= 0) { | ||
127 | int x = msc->touches[id].x; | ||
128 | if (x < middle_button_start) | ||
129 | state = 1; | ||
130 | else if (x > middle_button_stop) | ||
131 | state = 2; | ||
132 | else | ||
133 | state = 4; | ||
134 | } /* else: we keep the mouse's guess */ | ||
135 | |||
136 | input_report_key(msc->input, BTN_MIDDLE, state & 4); | ||
137 | } | ||
138 | |||
139 | input_report_key(msc->input, BTN_LEFT, state & 1); | ||
140 | input_report_key(msc->input, BTN_RIGHT, state & 2); | ||
141 | |||
142 | if (state != last_state) | ||
143 | msc->scroll_accel = 0; | ||
144 | } | ||
145 | |||
146 | static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tdata) | ||
147 | { | ||
148 | struct input_dev *input = msc->input; | ||
149 | __s32 x_y = tdata[0] << 8 | tdata[1] << 16 | tdata[2] << 24; | ||
150 | int misc = tdata[5] | tdata[6] << 8; | ||
151 | int id = (misc >> 6) & 15; | ||
152 | int x = x_y << 12 >> 20; | ||
153 | int y = -(x_y >> 20); | ||
154 | |||
155 | /* Store tracking ID and other fields. */ | ||
156 | msc->tracking_ids[raw_id] = id; | ||
157 | msc->touches[id].x = x; | ||
158 | msc->touches[id].y = y; | ||
159 | msc->touches[id].size = misc & 63; | ||
160 | |||
161 | /* If requested, emulate a scroll wheel by detecting small | ||
162 | * vertical touch motions along the middle of the mouse. | ||
163 | */ | ||
164 | if (emulate_scroll_wheel && | ||
165 | middle_button_start < x && x < middle_button_stop) { | ||
166 | static const int accel_profile[] = { | ||
167 | 256, 228, 192, 160, 128, 96, 64, 32, | ||
168 | }; | ||
169 | unsigned long now = jiffies; | ||
170 | int step = msc->touches[id].scroll_y - y; | ||
171 | |||
172 | /* Reset acceleration after half a second. */ | ||
173 | if (time_after(now, msc->scroll_jiffies + HZ / 2)) | ||
174 | msc->scroll_accel = 0; | ||
175 | |||
176 | /* Calculate and apply the scroll motion. */ | ||
177 | switch (tdata[7] & TOUCH_STATE_MASK) { | ||
178 | case TOUCH_STATE_START: | ||
179 | msc->touches[id].scroll_y = y; | ||
180 | msc->scroll_accel = min_t(int, msc->scroll_accel + 1, | ||
181 | ARRAY_SIZE(accel_profile) - 1); | ||
182 | break; | ||
183 | case TOUCH_STATE_DRAG: | ||
184 | step = step / accel_profile[msc->scroll_accel]; | ||
185 | if (step != 0) { | ||
186 | msc->touches[id].scroll_y = y; | ||
187 | msc->scroll_jiffies = now; | ||
188 | input_report_rel(input, REL_WHEEL, step); | ||
189 | } | ||
190 | break; | ||
191 | } | ||
192 | } | ||
193 | |||
194 | /* Generate the input events for this touch. */ | ||
195 | if (report_touches) { | ||
196 | int orientation = (misc >> 10) - 32; | ||
197 | |||
198 | input_report_abs(input, ABS_MT_TRACKING_ID, id); | ||
199 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, tdata[3]); | ||
200 | input_report_abs(input, ABS_MT_TOUCH_MINOR, tdata[4]); | ||
201 | input_report_abs(input, ABS_MT_ORIENTATION, orientation); | ||
202 | input_report_abs(input, ABS_MT_POSITION_X, x); | ||
203 | input_report_abs(input, ABS_MT_POSITION_Y, y); | ||
204 | |||
205 | if (report_undeciphered) | ||
206 | input_event(input, EV_MSC, MSC_RAW, tdata[7]); | ||
207 | |||
208 | input_mt_sync(input); | ||
209 | } | ||
210 | } | ||
211 | |||
212 | static int magicmouse_raw_event(struct hid_device *hdev, | ||
213 | struct hid_report *report, u8 *data, int size) | ||
214 | { | ||
215 | struct magicmouse_sc *msc = hid_get_drvdata(hdev); | ||
216 | struct input_dev *input = msc->input; | ||
217 | int x, y, ts, ii, clicks; | ||
218 | |||
219 | switch (data[0]) { | ||
220 | case 0x10: | ||
221 | if (size != 6) | ||
222 | return 0; | ||
223 | x = (__s16)(data[2] | data[3] << 8); | ||
224 | y = (__s16)(data[4] | data[5] << 8); | ||
225 | clicks = data[1]; | ||
226 | break; | ||
227 | case TOUCH_REPORT_ID: | ||
228 | /* Expect six bytes of prefix, and N*8 bytes of touch data. */ | ||
229 | if (size < 6 || ((size - 6) % 8) != 0) | ||
230 | return 0; | ||
231 | ts = data[3] >> 6 | data[4] << 2 | data[5] << 10; | ||
232 | msc->delta_time = (ts - msc->last_timestamp) & 0x3ffff; | ||
233 | msc->last_timestamp = ts; | ||
234 | msc->ntouches = (size - 6) / 8; | ||
235 | for (ii = 0; ii < msc->ntouches; ii++) | ||
236 | magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); | ||
237 | /* When emulating three-button mode, it is important | ||
238 | * to have the current touch information before | ||
239 | * generating a click event. | ||
240 | */ | ||
241 | x = (signed char)data[1]; | ||
242 | y = (signed char)data[2]; | ||
243 | clicks = data[3]; | ||
244 | break; | ||
245 | case 0x20: /* Theoretically battery status (0-100), but I have | ||
246 | * never seen it -- maybe it is only upon request. | ||
247 | */ | ||
248 | case 0x60: /* Unknown, maybe laser on/off. */ | ||
249 | case 0x61: /* Laser reflection status change. | ||
250 | * data[1]: 0 = spotted, 1 = lost | ||
251 | */ | ||
252 | default: | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | magicmouse_emit_buttons(msc, clicks & 3); | ||
257 | input_report_rel(input, REL_X, x); | ||
258 | input_report_rel(input, REL_Y, y); | ||
259 | input_sync(input); | ||
260 | return 1; | ||
261 | } | ||
262 | |||
263 | static int magicmouse_input_open(struct input_dev *dev) | ||
264 | { | ||
265 | struct hid_device *hid = input_get_drvdata(dev); | ||
266 | |||
267 | return hid->ll_driver->open(hid); | ||
268 | } | ||
269 | |||
270 | static void magicmouse_input_close(struct input_dev *dev) | ||
271 | { | ||
272 | struct hid_device *hid = input_get_drvdata(dev); | ||
273 | |||
274 | hid->ll_driver->close(hid); | ||
275 | } | ||
276 | |||
277 | static void magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev) | ||
278 | { | ||
279 | input_set_drvdata(input, hdev); | ||
280 | input->event = hdev->ll_driver->hidinput_input_event; | ||
281 | input->open = magicmouse_input_open; | ||
282 | input->close = magicmouse_input_close; | ||
283 | |||
284 | input->name = hdev->name; | ||
285 | input->phys = hdev->phys; | ||
286 | input->uniq = hdev->uniq; | ||
287 | input->id.bustype = hdev->bus; | ||
288 | input->id.vendor = hdev->vendor; | ||
289 | input->id.product = hdev->product; | ||
290 | input->id.version = hdev->version; | ||
291 | input->dev.parent = hdev->dev.parent; | ||
292 | |||
293 | __set_bit(EV_KEY, input->evbit); | ||
294 | __set_bit(BTN_LEFT, input->keybit); | ||
295 | __set_bit(BTN_RIGHT, input->keybit); | ||
296 | if (emulate_3button) | ||
297 | __set_bit(BTN_MIDDLE, input->keybit); | ||
298 | __set_bit(BTN_TOOL_FINGER, input->keybit); | ||
299 | |||
300 | __set_bit(EV_REL, input->evbit); | ||
301 | __set_bit(REL_X, input->relbit); | ||
302 | __set_bit(REL_Y, input->relbit); | ||
303 | if (emulate_scroll_wheel) | ||
304 | __set_bit(REL_WHEEL, input->relbit); | ||
305 | |||
306 | if (report_touches) { | ||
307 | __set_bit(EV_ABS, input->evbit); | ||
308 | |||
309 | input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0); | ||
310 | input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0); | ||
311 | input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0); | ||
312 | input_set_abs_params(input, ABS_MT_ORIENTATION, -32, 31, 1, 0); | ||
313 | input_set_abs_params(input, ABS_MT_POSITION_X, -1100, 1358, | ||
314 | 4, 0); | ||
315 | /* Note: Touch Y position from the device is inverted relative | ||
316 | * to how pointer motion is reported (and relative to how USB | ||
317 | * HID recommends the coordinates work). This driver keeps | ||
318 | * the origin at the same position, and just uses the additive | ||
319 | * inverse of the reported Y. | ||
320 | */ | ||
321 | input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, 2047, | ||
322 | 4, 0); | ||
323 | } | ||
324 | |||
325 | if (report_undeciphered) { | ||
326 | __set_bit(EV_MSC, input->evbit); | ||
327 | __set_bit(MSC_RAW, input->mscbit); | ||
328 | } | ||
329 | } | ||
330 | |||
331 | static int magicmouse_probe(struct hid_device *hdev, | ||
332 | const struct hid_device_id *id) | ||
333 | { | ||
334 | __u8 feature_1[] = { 0xd7, 0x01 }; | ||
335 | __u8 feature_2[] = { 0xf8, 0x01, 0x32 }; | ||
336 | struct input_dev *input; | ||
337 | struct magicmouse_sc *msc; | ||
338 | struct hid_report *report; | ||
339 | int ret; | ||
340 | |||
341 | msc = kzalloc(sizeof(*msc), GFP_KERNEL); | ||
342 | if (msc == NULL) { | ||
343 | dev_err(&hdev->dev, "can't alloc magicmouse descriptor\n"); | ||
344 | return -ENOMEM; | ||
345 | } | ||
346 | |||
347 | msc->quirks = id->driver_data; | ||
348 | hid_set_drvdata(hdev, msc); | ||
349 | |||
350 | ret = hid_parse(hdev); | ||
351 | if (ret) { | ||
352 | dev_err(&hdev->dev, "magicmouse hid parse failed\n"); | ||
353 | goto err_free; | ||
354 | } | ||
355 | |||
356 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); | ||
357 | if (ret) { | ||
358 | dev_err(&hdev->dev, "magicmouse hw start failed\n"); | ||
359 | goto err_free; | ||
360 | } | ||
361 | |||
362 | report = hid_register_report(hdev, HID_INPUT_REPORT, TOUCH_REPORT_ID); | ||
363 | if (!report) { | ||
364 | dev_err(&hdev->dev, "unable to register touch report\n"); | ||
365 | ret = -ENOMEM; | ||
366 | goto err_stop_hw; | ||
367 | } | ||
368 | report->size = 6; | ||
369 | |||
370 | ret = hdev->hid_output_raw_report(hdev, feature_1, sizeof(feature_1), | ||
371 | HID_FEATURE_REPORT); | ||
372 | if (ret != sizeof(feature_1)) { | ||
373 | dev_err(&hdev->dev, "unable to request touch data (1:%d)\n", | ||
374 | ret); | ||
375 | goto err_stop_hw; | ||
376 | } | ||
377 | ret = hdev->hid_output_raw_report(hdev, feature_2, | ||
378 | sizeof(feature_2), HID_FEATURE_REPORT); | ||
379 | if (ret != sizeof(feature_2)) { | ||
380 | dev_err(&hdev->dev, "unable to request touch data (2:%d)\n", | ||
381 | ret); | ||
382 | goto err_stop_hw; | ||
383 | } | ||
384 | |||
385 | input = input_allocate_device(); | ||
386 | if (!input) { | ||
387 | dev_err(&hdev->dev, "can't alloc input device\n"); | ||
388 | ret = -ENOMEM; | ||
389 | goto err_stop_hw; | ||
390 | } | ||
391 | magicmouse_setup_input(input, hdev); | ||
392 | |||
393 | ret = input_register_device(input); | ||
394 | if (ret) { | ||
395 | dev_err(&hdev->dev, "input device registration failed\n"); | ||
396 | goto err_input; | ||
397 | } | ||
398 | msc->input = input; | ||
399 | |||
400 | return 0; | ||
401 | err_input: | ||
402 | input_free_device(input); | ||
403 | err_stop_hw: | ||
404 | hid_hw_stop(hdev); | ||
405 | err_free: | ||
406 | kfree(msc); | ||
407 | return ret; | ||
408 | } | ||
409 | |||
410 | static void magicmouse_remove(struct hid_device *hdev) | ||
411 | { | ||
412 | hid_hw_stop(hdev); | ||
413 | kfree(hid_get_drvdata(hdev)); | ||
414 | } | ||
415 | |||
416 | static const struct hid_device_id magic_mice[] = { | ||
417 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE), | ||
418 | .driver_data = 0 }, | ||
419 | { } | ||
420 | }; | ||
421 | MODULE_DEVICE_TABLE(hid, magic_mice); | ||
422 | |||
423 | static struct hid_driver magicmouse_driver = { | ||
424 | .name = "magicmouse", | ||
425 | .id_table = magic_mice, | ||
426 | .probe = magicmouse_probe, | ||
427 | .remove = magicmouse_remove, | ||
428 | .raw_event = magicmouse_raw_event, | ||
429 | }; | ||
430 | |||
431 | static int __init magicmouse_init(void) | ||
432 | { | ||
433 | int ret; | ||
434 | |||
435 | ret = hid_register_driver(&magicmouse_driver); | ||
436 | if (ret) | ||
437 | printk(KERN_ERR "can't register magicmouse driver\n"); | ||
438 | |||
439 | return ret; | ||
440 | } | ||
441 | |||
442 | static void __exit magicmouse_exit(void) | ||
443 | { | ||
444 | hid_unregister_driver(&magicmouse_driver); | ||
445 | } | ||
446 | |||
447 | module_init(magicmouse_init); | ||
448 | module_exit(magicmouse_exit); | ||
449 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c new file mode 100644 index 000000000000..c8718168fe42 --- /dev/null +++ b/drivers/hid/hid-mosart.c | |||
@@ -0,0 +1,273 @@ | |||
1 | /* | ||
2 | * HID driver for the multitouch panel on the ASUS EeePC T91MT | ||
3 | * | ||
4 | * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr> | ||
5 | * Copyright (c) 2010 Teemu Tuominen <teemu.tuominen@cybercom.com> | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the Free | ||
12 | * Software Foundation; either version 2 of the License, or (at your option) | ||
13 | * any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/device.h> | ||
17 | #include <linux/hid.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/usb.h> | ||
20 | #include "usbhid/usbhid.h" | ||
21 | |||
22 | MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); | ||
23 | MODULE_DESCRIPTION("MosArt dual-touch panel"); | ||
24 | MODULE_LICENSE("GPL"); | ||
25 | |||
26 | #include "hid-ids.h" | ||
27 | |||
28 | struct mosart_data { | ||
29 | __u16 x, y; | ||
30 | __u8 id; | ||
31 | bool valid; /* valid finger data, or just placeholder? */ | ||
32 | bool first; /* is this the first finger in this frame? */ | ||
33 | bool activity_now; /* at least one active finger in this frame? */ | ||
34 | bool activity; /* at least one active finger previously? */ | ||
35 | }; | ||
36 | |||
37 | static int mosart_input_mapping(struct hid_device *hdev, struct hid_input *hi, | ||
38 | struct hid_field *field, struct hid_usage *usage, | ||
39 | unsigned long **bit, int *max) | ||
40 | { | ||
41 | switch (usage->hid & HID_USAGE_PAGE) { | ||
42 | |||
43 | case HID_UP_GENDESK: | ||
44 | switch (usage->hid) { | ||
45 | case HID_GD_X: | ||
46 | hid_map_usage(hi, usage, bit, max, | ||
47 | EV_ABS, ABS_MT_POSITION_X); | ||
48 | /* touchscreen emulation */ | ||
49 | input_set_abs_params(hi->input, ABS_X, | ||
50 | field->logical_minimum, | ||
51 | field->logical_maximum, 0, 0); | ||
52 | return 1; | ||
53 | case HID_GD_Y: | ||
54 | hid_map_usage(hi, usage, bit, max, | ||
55 | EV_ABS, ABS_MT_POSITION_Y); | ||
56 | /* touchscreen emulation */ | ||
57 | input_set_abs_params(hi->input, ABS_Y, | ||
58 | field->logical_minimum, | ||
59 | field->logical_maximum, 0, 0); | ||
60 | return 1; | ||
61 | } | ||
62 | return 0; | ||
63 | |||
64 | case HID_UP_DIGITIZER: | ||
65 | switch (usage->hid) { | ||
66 | case HID_DG_CONFIDENCE: | ||
67 | case HID_DG_TIPSWITCH: | ||
68 | case HID_DG_INPUTMODE: | ||
69 | case HID_DG_DEVICEINDEX: | ||
70 | case HID_DG_CONTACTCOUNT: | ||
71 | case HID_DG_CONTACTMAX: | ||
72 | case HID_DG_TIPPRESSURE: | ||
73 | case HID_DG_WIDTH: | ||
74 | case HID_DG_HEIGHT: | ||
75 | return -1; | ||
76 | case HID_DG_INRANGE: | ||
77 | /* touchscreen emulation */ | ||
78 | hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); | ||
79 | return 1; | ||
80 | |||
81 | case HID_DG_CONTACTID: | ||
82 | hid_map_usage(hi, usage, bit, max, | ||
83 | EV_ABS, ABS_MT_TRACKING_ID); | ||
84 | return 1; | ||
85 | |||
86 | } | ||
87 | return 0; | ||
88 | |||
89 | case 0xff000000: | ||
90 | /* ignore HID features */ | ||
91 | return -1; | ||
92 | } | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static int mosart_input_mapped(struct hid_device *hdev, struct hid_input *hi, | ||
98 | struct hid_field *field, struct hid_usage *usage, | ||
99 | unsigned long **bit, int *max) | ||
100 | { | ||
101 | if (usage->type == EV_KEY || usage->type == EV_ABS) | ||
102 | clear_bit(usage->code, *bit); | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * this function is called when a whole finger has been parsed, | ||
109 | * so that it can decide what to send to the input layer. | ||
110 | */ | ||
111 | static void mosart_filter_event(struct mosart_data *td, struct input_dev *input) | ||
112 | { | ||
113 | td->first = !td->first; /* touchscreen emulation */ | ||
114 | |||
115 | if (!td->valid) { | ||
116 | /* | ||
117 | * touchscreen emulation: if no finger in this frame is valid | ||
118 | * and there previously was finger activity, this is a release | ||
119 | */ | ||
120 | if (!td->first && !td->activity_now && td->activity) { | ||
121 | input_event(input, EV_KEY, BTN_TOUCH, 0); | ||
122 | td->activity = false; | ||
123 | } | ||
124 | return; | ||
125 | } | ||
126 | |||
127 | input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id); | ||
128 | input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x); | ||
129 | input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y); | ||
130 | |||
131 | input_mt_sync(input); | ||
132 | td->valid = false; | ||
133 | |||
134 | /* touchscreen emulation: if first active finger in this frame... */ | ||
135 | if (!td->activity_now) { | ||
136 | /* if there was no previous activity, emit touch event */ | ||
137 | if (!td->activity) { | ||
138 | input_event(input, EV_KEY, BTN_TOUCH, 1); | ||
139 | td->activity = true; | ||
140 | } | ||
141 | td->activity_now = true; | ||
142 | /* and in any case this is our preferred finger */ | ||
143 | input_event(input, EV_ABS, ABS_X, td->x); | ||
144 | input_event(input, EV_ABS, ABS_Y, td->y); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | |||
149 | static int mosart_event(struct hid_device *hid, struct hid_field *field, | ||
150 | struct hid_usage *usage, __s32 value) | ||
151 | { | ||
152 | struct mosart_data *td = hid_get_drvdata(hid); | ||
153 | |||
154 | if (hid->claimed & HID_CLAIMED_INPUT) { | ||
155 | struct input_dev *input = field->hidinput->input; | ||
156 | switch (usage->hid) { | ||
157 | case HID_DG_INRANGE: | ||
158 | td->valid = !!value; | ||
159 | break; | ||
160 | case HID_GD_X: | ||
161 | td->x = value; | ||
162 | break; | ||
163 | case HID_GD_Y: | ||
164 | td->y = value; | ||
165 | mosart_filter_event(td, input); | ||
166 | break; | ||
167 | case HID_DG_CONTACTID: | ||
168 | td->id = value; | ||
169 | break; | ||
170 | case HID_DG_CONTACTCOUNT: | ||
171 | /* touch emulation: this is the last field in a frame */ | ||
172 | td->first = false; | ||
173 | td->activity_now = false; | ||
174 | break; | ||
175 | case HID_DG_CONFIDENCE: | ||
176 | case HID_DG_TIPSWITCH: | ||
177 | /* avoid interference from generic hidinput handling */ | ||
178 | break; | ||
179 | |||
180 | default: | ||
181 | /* fallback to the generic hidinput handling */ | ||
182 | return 0; | ||
183 | } | ||
184 | } | ||
185 | |||
186 | /* we have handled the hidinput part, now remains hiddev */ | ||
187 | if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) | ||
188 | hid->hiddev_hid_event(hid, field, usage, value); | ||
189 | |||
190 | return 1; | ||
191 | } | ||
192 | |||
193 | static int mosart_probe(struct hid_device *hdev, const struct hid_device_id *id) | ||
194 | { | ||
195 | int ret; | ||
196 | struct mosart_data *td; | ||
197 | |||
198 | |||
199 | td = kmalloc(sizeof(struct mosart_data), GFP_KERNEL); | ||
200 | if (!td) { | ||
201 | dev_err(&hdev->dev, "cannot allocate MosArt data\n"); | ||
202 | return -ENOMEM; | ||
203 | } | ||
204 | td->valid = false; | ||
205 | td->activity = false; | ||
206 | td->activity_now = false; | ||
207 | td->first = false; | ||
208 | hid_set_drvdata(hdev, td); | ||
209 | |||
210 | /* currently, it's better to have one evdev device only */ | ||
211 | #if 0 | ||
212 | hdev->quirks |= HID_QUIRK_MULTI_INPUT; | ||
213 | #endif | ||
214 | |||
215 | ret = hid_parse(hdev); | ||
216 | if (ret == 0) | ||
217 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); | ||
218 | |||
219 | if (ret == 0) { | ||
220 | struct hid_report_enum *re = hdev->report_enum | ||
221 | + HID_FEATURE_REPORT; | ||
222 | struct hid_report *r = re->report_id_hash[7]; | ||
223 | |||
224 | r->field[0]->value[0] = 0x02; | ||
225 | usbhid_submit_report(hdev, r, USB_DIR_OUT); | ||
226 | } else | ||
227 | kfree(td); | ||
228 | |||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | static void mosart_remove(struct hid_device *hdev) | ||
233 | { | ||
234 | hid_hw_stop(hdev); | ||
235 | kfree(hid_get_drvdata(hdev)); | ||
236 | hid_set_drvdata(hdev, NULL); | ||
237 | } | ||
238 | |||
239 | static const struct hid_device_id mosart_devices[] = { | ||
240 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, | ||
241 | { } | ||
242 | }; | ||
243 | MODULE_DEVICE_TABLE(hid, mosart_devices); | ||
244 | |||
245 | static const struct hid_usage_id mosart_grabbed_usages[] = { | ||
246 | { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, | ||
247 | { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} | ||
248 | }; | ||
249 | |||
250 | static struct hid_driver mosart_driver = { | ||
251 | .name = "mosart", | ||
252 | .id_table = mosart_devices, | ||
253 | .probe = mosart_probe, | ||
254 | .remove = mosart_remove, | ||
255 | .input_mapping = mosart_input_mapping, | ||
256 | .input_mapped = mosart_input_mapped, | ||
257 | .usage_table = mosart_grabbed_usages, | ||
258 | .event = mosart_event, | ||
259 | }; | ||
260 | |||
261 | static int __init mosart_init(void) | ||
262 | { | ||
263 | return hid_register_driver(&mosart_driver); | ||
264 | } | ||
265 | |||
266 | static void __exit mosart_exit(void) | ||
267 | { | ||
268 | hid_unregister_driver(&mosart_driver); | ||
269 | } | ||
270 | |||
271 | module_init(mosart_init); | ||
272 | module_exit(mosart_exit); | ||
273 | |||
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c index 49ce69d7bba7..3234c729a895 100644 --- a/drivers/hid/hid-ntrig.c +++ b/drivers/hid/hid-ntrig.c | |||
@@ -25,11 +25,16 @@ | |||
25 | EV_KEY, (c)) | 25 | EV_KEY, (c)) |
26 | 26 | ||
27 | struct ntrig_data { | 27 | struct ntrig_data { |
28 | __s32 x, y, id, w, h; | 28 | /* Incoming raw values for a single contact */ |
29 | char reading_a_point, found_contact_id; | 29 | __u16 x, y, w, h; |
30 | char pen_active; | 30 | __u16 id; |
31 | char finger_active; | 31 | __u8 confidence; |
32 | char inverted; | 32 | |
33 | bool reading_mt; | ||
34 | __u8 first_contact_confidence; | ||
35 | |||
36 | __u8 mt_footer[4]; | ||
37 | __u8 mt_foot_count; | ||
33 | }; | 38 | }; |
34 | 39 | ||
35 | /* | 40 | /* |
@@ -42,8 +47,11 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
42 | struct hid_field *field, struct hid_usage *usage, | 47 | struct hid_field *field, struct hid_usage *usage, |
43 | unsigned long **bit, int *max) | 48 | unsigned long **bit, int *max) |
44 | { | 49 | { |
45 | switch (usage->hid & HID_USAGE_PAGE) { | 50 | /* No special mappings needed for the pen and single touch */ |
51 | if (field->physical) | ||
52 | return 0; | ||
46 | 53 | ||
54 | switch (usage->hid & HID_USAGE_PAGE) { | ||
47 | case HID_UP_GENDESK: | 55 | case HID_UP_GENDESK: |
48 | switch (usage->hid) { | 56 | switch (usage->hid) { |
49 | case HID_GD_X: | 57 | case HID_GD_X: |
@@ -66,18 +74,12 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
66 | case HID_UP_DIGITIZER: | 74 | case HID_UP_DIGITIZER: |
67 | switch (usage->hid) { | 75 | switch (usage->hid) { |
68 | /* we do not want to map these for now */ | 76 | /* we do not want to map these for now */ |
69 | case HID_DG_CONTACTID: /* value is useless */ | 77 | case HID_DG_CONTACTID: /* Not trustworthy, squelch for now */ |
70 | case HID_DG_INPUTMODE: | 78 | case HID_DG_INPUTMODE: |
71 | case HID_DG_DEVICEINDEX: | 79 | case HID_DG_DEVICEINDEX: |
72 | case HID_DG_CONTACTCOUNT: | ||
73 | case HID_DG_CONTACTMAX: | 80 | case HID_DG_CONTACTMAX: |
74 | return -1; | 81 | return -1; |
75 | 82 | ||
76 | /* original mapping by Rafi Rubin */ | ||
77 | case HID_DG_CONFIDENCE: | ||
78 | nt_map_key_clear(BTN_TOOL_DOUBLETAP); | ||
79 | return 1; | ||
80 | |||
81 | /* width/height mapped on TouchMajor/TouchMinor/Orientation */ | 83 | /* width/height mapped on TouchMajor/TouchMinor/Orientation */ |
82 | case HID_DG_WIDTH: | 84 | case HID_DG_WIDTH: |
83 | hid_map_usage(hi, usage, bit, max, | 85 | hid_map_usage(hi, usage, bit, max, |
@@ -104,6 +106,10 @@ static int ntrig_input_mapped(struct hid_device *hdev, struct hid_input *hi, | |||
104 | struct hid_field *field, struct hid_usage *usage, | 106 | struct hid_field *field, struct hid_usage *usage, |
105 | unsigned long **bit, int *max) | 107 | unsigned long **bit, int *max) |
106 | { | 108 | { |
109 | /* No special mappings needed for the pen and single touch */ | ||
110 | if (field->physical) | ||
111 | return 0; | ||
112 | |||
107 | if (usage->type == EV_KEY || usage->type == EV_REL | 113 | if (usage->type == EV_KEY || usage->type == EV_REL |
108 | || usage->type == EV_ABS) | 114 | || usage->type == EV_ABS) |
109 | clear_bit(usage->code, *bit); | 115 | clear_bit(usage->code, *bit); |
@@ -123,31 +129,30 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
123 | struct input_dev *input = field->hidinput->input; | 129 | struct input_dev *input = field->hidinput->input; |
124 | struct ntrig_data *nd = hid_get_drvdata(hid); | 130 | struct ntrig_data *nd = hid_get_drvdata(hid); |
125 | 131 | ||
132 | /* No special handling needed for the pen */ | ||
133 | if (field->application == HID_DG_PEN) | ||
134 | return 0; | ||
135 | |||
126 | if (hid->claimed & HID_CLAIMED_INPUT) { | 136 | if (hid->claimed & HID_CLAIMED_INPUT) { |
127 | switch (usage->hid) { | 137 | switch (usage->hid) { |
128 | 138 | case 0xff000001: | |
129 | case HID_DG_INRANGE: | 139 | /* Tag indicating the start of a multitouch group */ |
130 | if (field->application & 0x3) | 140 | nd->reading_mt = 1; |
131 | nd->pen_active = (value != 0); | 141 | nd->first_contact_confidence = 0; |
132 | else | 142 | break; |
133 | nd->finger_active = (value != 0); | 143 | case HID_DG_CONFIDENCE: |
134 | return 0; | 144 | nd->confidence = value; |
135 | 145 | break; | |
136 | case HID_DG_INVERT: | ||
137 | nd->inverted = value; | ||
138 | return 0; | ||
139 | |||
140 | case HID_GD_X: | 146 | case HID_GD_X: |
141 | nd->x = value; | 147 | nd->x = value; |
142 | nd->reading_a_point = 1; | 148 | /* Clear the contact footer */ |
149 | nd->mt_foot_count = 0; | ||
143 | break; | 150 | break; |
144 | case HID_GD_Y: | 151 | case HID_GD_Y: |
145 | nd->y = value; | 152 | nd->y = value; |
146 | break; | 153 | break; |
147 | case HID_DG_CONTACTID: | 154 | case HID_DG_CONTACTID: |
148 | nd->id = value; | 155 | nd->id = value; |
149 | /* we receive this only when in multitouch mode */ | ||
150 | nd->found_contact_id = 1; | ||
151 | break; | 156 | break; |
152 | case HID_DG_WIDTH: | 157 | case HID_DG_WIDTH: |
153 | nd->w = value; | 158 | nd->w = value; |
@@ -159,35 +164,13 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
159 | * report received in a finger event. We want | 164 | * report received in a finger event. We want |
160 | * to emit a normal (X, Y) position | 165 | * to emit a normal (X, Y) position |
161 | */ | 166 | */ |
162 | if (!nd->found_contact_id) { | 167 | if (!nd->reading_mt) { |
163 | if (nd->pen_active && nd->finger_active) { | 168 | input_report_key(input, BTN_TOOL_DOUBLETAP, |
164 | input_report_key(input, BTN_TOOL_DOUBLETAP, 0); | 169 | (nd->confidence != 0)); |
165 | input_report_key(input, BTN_TOOL_DOUBLETAP, 1); | ||
166 | } | ||
167 | input_event(input, EV_ABS, ABS_X, nd->x); | 170 | input_event(input, EV_ABS, ABS_X, nd->x); |
168 | input_event(input, EV_ABS, ABS_Y, nd->y); | 171 | input_event(input, EV_ABS, ABS_Y, nd->y); |
169 | } | 172 | } |
170 | break; | 173 | break; |
171 | case HID_DG_TIPPRESSURE: | ||
172 | /* | ||
173 | * when in single touch mode, this is the last | ||
174 | * report received in a pen event. We want | ||
175 | * to emit a normal (X, Y) position | ||
176 | */ | ||
177 | if (! nd->found_contact_id) { | ||
178 | if (nd->pen_active && nd->finger_active) { | ||
179 | input_report_key(input, | ||
180 | nd->inverted ? BTN_TOOL_RUBBER : BTN_TOOL_PEN | ||
181 | , 0); | ||
182 | input_report_key(input, | ||
183 | nd->inverted ? BTN_TOOL_RUBBER : BTN_TOOL_PEN | ||
184 | , 1); | ||
185 | } | ||
186 | input_event(input, EV_ABS, ABS_X, nd->x); | ||
187 | input_event(input, EV_ABS, ABS_Y, nd->y); | ||
188 | input_event(input, EV_ABS, ABS_PRESSURE, value); | ||
189 | } | ||
190 | break; | ||
191 | case 0xff000002: | 174 | case 0xff000002: |
192 | /* | 175 | /* |
193 | * we receive this when the device is in multitouch | 176 | * we receive this when the device is in multitouch |
@@ -195,10 +178,34 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
195 | * this usage tells if the contact point is real | 178 | * this usage tells if the contact point is real |
196 | * or a placeholder | 179 | * or a placeholder |
197 | */ | 180 | */ |
198 | if (!nd->reading_a_point || value != 1) | 181 | |
182 | /* Shouldn't get more than 4 footer packets, so skip */ | ||
183 | if (nd->mt_foot_count >= 4) | ||
199 | break; | 184 | break; |
185 | |||
186 | nd->mt_footer[nd->mt_foot_count++] = value; | ||
187 | |||
188 | /* if the footer isn't complete break */ | ||
189 | if (nd->mt_foot_count != 4) | ||
190 | break; | ||
191 | |||
192 | /* Pen activity signal, trigger end of touch. */ | ||
193 | if (nd->mt_footer[2]) { | ||
194 | nd->confidence = 0; | ||
195 | break; | ||
196 | } | ||
197 | |||
198 | /* If the contact was invalid */ | ||
199 | if (!(nd->confidence && nd->mt_footer[0]) | ||
200 | || nd->w <= 250 | ||
201 | || nd->h <= 190) { | ||
202 | nd->confidence = 0; | ||
203 | break; | ||
204 | } | ||
205 | |||
200 | /* emit a normal (X, Y) for the first point only */ | 206 | /* emit a normal (X, Y) for the first point only */ |
201 | if (nd->id == 0) { | 207 | if (nd->id == 0) { |
208 | nd->first_contact_confidence = nd->confidence; | ||
202 | input_event(input, EV_ABS, ABS_X, nd->x); | 209 | input_event(input, EV_ABS, ABS_X, nd->x); |
203 | input_event(input, EV_ABS, ABS_Y, nd->y); | 210 | input_event(input, EV_ABS, ABS_Y, nd->y); |
204 | } | 211 | } |
@@ -220,8 +227,39 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
220 | ABS_MT_TOUCH_MINOR, nd->w); | 227 | ABS_MT_TOUCH_MINOR, nd->w); |
221 | } | 228 | } |
222 | input_mt_sync(field->hidinput->input); | 229 | input_mt_sync(field->hidinput->input); |
223 | nd->reading_a_point = 0; | 230 | break; |
224 | nd->found_contact_id = 0; | 231 | |
232 | case HID_DG_CONTACTCOUNT: /* End of a multitouch group */ | ||
233 | if (!nd->reading_mt) | ||
234 | break; | ||
235 | |||
236 | nd->reading_mt = 0; | ||
237 | |||
238 | if (nd->first_contact_confidence) { | ||
239 | switch (value) { | ||
240 | case 0: /* for single touch devices */ | ||
241 | case 1: | ||
242 | input_report_key(input, | ||
243 | BTN_TOOL_DOUBLETAP, 1); | ||
244 | break; | ||
245 | case 2: | ||
246 | input_report_key(input, | ||
247 | BTN_TOOL_TRIPLETAP, 1); | ||
248 | break; | ||
249 | case 3: | ||
250 | default: | ||
251 | input_report_key(input, | ||
252 | BTN_TOOL_QUADTAP, 1); | ||
253 | } | ||
254 | input_report_key(input, BTN_TOUCH, 1); | ||
255 | } else { | ||
256 | input_report_key(input, | ||
257 | BTN_TOOL_DOUBLETAP, 0); | ||
258 | input_report_key(input, | ||
259 | BTN_TOOL_TRIPLETAP, 0); | ||
260 | input_report_key(input, | ||
261 | BTN_TOOL_QUADTAP, 0); | ||
262 | } | ||
225 | break; | 263 | break; |
226 | 264 | ||
227 | default: | 265 | default: |
@@ -231,8 +269,8 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
231 | } | 269 | } |
232 | 270 | ||
233 | /* we have handled the hidinput part, now remains hiddev */ | 271 | /* we have handled the hidinput part, now remains hiddev */ |
234 | if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) | 272 | if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_hid_event) |
235 | hid->hiddev_hid_event(hid, field, usage, value); | 273 | hid->hiddev_hid_event(hid, field, usage, value); |
236 | 274 | ||
237 | return 1; | 275 | return 1; |
238 | } | 276 | } |
@@ -241,23 +279,67 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
241 | { | 279 | { |
242 | int ret; | 280 | int ret; |
243 | struct ntrig_data *nd; | 281 | struct ntrig_data *nd; |
282 | struct hid_input *hidinput; | ||
283 | struct input_dev *input; | ||
284 | |||
285 | if (id->driver_data) | ||
286 | hdev->quirks |= HID_QUIRK_MULTI_INPUT; | ||
244 | 287 | ||
245 | nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL); | 288 | nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL); |
246 | if (!nd) { | 289 | if (!nd) { |
247 | dev_err(&hdev->dev, "cannot allocate N-Trig data\n"); | 290 | dev_err(&hdev->dev, "cannot allocate N-Trig data\n"); |
248 | return -ENOMEM; | 291 | return -ENOMEM; |
249 | } | 292 | } |
250 | nd->reading_a_point = 0; | 293 | |
251 | nd->found_contact_id = 0; | 294 | nd->reading_mt = 0; |
252 | hid_set_drvdata(hdev, nd); | 295 | hid_set_drvdata(hdev, nd); |
253 | 296 | ||
254 | ret = hid_parse(hdev); | 297 | ret = hid_parse(hdev); |
255 | if (!ret) | 298 | if (ret) { |
256 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); | 299 | dev_err(&hdev->dev, "parse failed\n"); |
300 | goto err_free; | ||
301 | } | ||
302 | |||
303 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); | ||
304 | if (ret) { | ||
305 | dev_err(&hdev->dev, "hw start failed\n"); | ||
306 | goto err_free; | ||
307 | } | ||
257 | 308 | ||
258 | if (ret) | ||
259 | kfree (nd); | ||
260 | 309 | ||
310 | list_for_each_entry(hidinput, &hdev->inputs, list) { | ||
311 | input = hidinput->input; | ||
312 | switch (hidinput->report->field[0]->application) { | ||
313 | case HID_DG_PEN: | ||
314 | input->name = "N-Trig Pen"; | ||
315 | break; | ||
316 | case HID_DG_TOUCHSCREEN: | ||
317 | __clear_bit(BTN_TOOL_PEN, input->keybit); | ||
318 | /* | ||
319 | * A little something special to enable | ||
320 | * two and three finger taps. | ||
321 | */ | ||
322 | __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); | ||
323 | __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); | ||
324 | __set_bit(BTN_TOOL_QUADTAP, input->keybit); | ||
325 | /* | ||
326 | * The physical touchscreen (single touch) | ||
327 | * input has a value for physical, whereas | ||
328 | * the multitouch only has logical input | ||
329 | * fields. | ||
330 | */ | ||
331 | input->name = | ||
332 | (hidinput->report->field[0] | ||
333 | ->physical) ? | ||
334 | "N-Trig Touchscreen" : | ||
335 | "N-Trig MultiTouch"; | ||
336 | break; | ||
337 | } | ||
338 | } | ||
339 | |||
340 | return 0; | ||
341 | err_free: | ||
342 | kfree(nd); | ||
261 | return ret; | 343 | return ret; |
262 | } | 344 | } |
263 | 345 | ||
@@ -276,7 +358,7 @@ MODULE_DEVICE_TABLE(hid, ntrig_devices); | |||
276 | 358 | ||
277 | static const struct hid_usage_id ntrig_grabbed_usages[] = { | 359 | static const struct hid_usage_id ntrig_grabbed_usages[] = { |
278 | { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, | 360 | { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, |
279 | { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} | 361 | { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1 } |
280 | }; | 362 | }; |
281 | 363 | ||
282 | static struct hid_driver ntrig_driver = { | 364 | static struct hid_driver ntrig_driver = { |
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c new file mode 100644 index 000000000000..aa9a960f73a4 --- /dev/null +++ b/drivers/hid/hid-ortek.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * HID driver for Ortek WKB-2000 (wireless keyboard + mouse trackpad). | ||
3 | * Fixes LogicalMaximum error in USB report description, see | ||
4 | * http://bugzilla.kernel.org/show_bug.cgi?id=14787 | ||
5 | * | ||
6 | * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com> | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the Free | ||
12 | * Software Foundation; either version 2 of the License, or (at your option) | ||
13 | * any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/device.h> | ||
17 | #include <linux/hid.h> | ||
18 | #include <linux/module.h> | ||
19 | |||
20 | #include "hid-ids.h" | ||
21 | |||
22 | static void ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, | ||
23 | unsigned int rsize) | ||
24 | { | ||
25 | if (rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { | ||
26 | dev_info(&hdev->dev, "Fixing up Ortek WKB-2000 " | ||
27 | "report descriptor.\n"); | ||
28 | rdesc[55] = 0x92; | ||
29 | } | ||
30 | } | ||
31 | |||
32 | static const struct hid_device_id ortek_devices[] = { | ||
33 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, | ||
34 | { } | ||
35 | }; | ||
36 | MODULE_DEVICE_TABLE(hid, ortek_devices); | ||
37 | |||
38 | static struct hid_driver ortek_driver = { | ||
39 | .name = "ortek", | ||
40 | .id_table = ortek_devices, | ||
41 | .report_fixup = ortek_report_fixup | ||
42 | }; | ||
43 | |||
44 | static int __init ortek_init(void) | ||
45 | { | ||
46 | return hid_register_driver(&ortek_driver); | ||
47 | } | ||
48 | |||
49 | static void __exit ortek_exit(void) | ||
50 | { | ||
51 | hid_unregister_driver(&ortek_driver); | ||
52 | } | ||
53 | |||
54 | module_init(ortek_init); | ||
55 | module_exit(ortek_exit); | ||
56 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/hid/hid-quanta.c b/drivers/hid/hid-quanta.c new file mode 100644 index 000000000000..01dd51c4986c --- /dev/null +++ b/drivers/hid/hid-quanta.c | |||
@@ -0,0 +1,260 @@ | |||
1 | /* | ||
2 | * HID driver for Quanta Optical Touch dual-touch panels | ||
3 | * | ||
4 | * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | #include <linux/hid.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); | ||
20 | MODULE_DESCRIPTION("Quanta dual-touch panel"); | ||
21 | MODULE_LICENSE("GPL"); | ||
22 | |||
23 | #include "hid-ids.h" | ||
24 | |||
25 | struct quanta_data { | ||
26 | __u16 x, y; | ||
27 | __u8 id; | ||
28 | bool valid; /* valid finger data, or just placeholder? */ | ||
29 | bool first; /* is this the first finger in this frame? */ | ||
30 | bool activity_now; /* at least one active finger in this frame? */ | ||
31 | bool activity; /* at least one active finger previously? */ | ||
32 | }; | ||
33 | |||
34 | static int quanta_input_mapping(struct hid_device *hdev, struct hid_input *hi, | ||
35 | struct hid_field *field, struct hid_usage *usage, | ||
36 | unsigned long **bit, int *max) | ||
37 | { | ||
38 | switch (usage->hid & HID_USAGE_PAGE) { | ||
39 | |||
40 | case HID_UP_GENDESK: | ||
41 | switch (usage->hid) { | ||
42 | case HID_GD_X: | ||
43 | hid_map_usage(hi, usage, bit, max, | ||
44 | EV_ABS, ABS_MT_POSITION_X); | ||
45 | /* touchscreen emulation */ | ||
46 | input_set_abs_params(hi->input, ABS_X, | ||
47 | field->logical_minimum, | ||
48 | field->logical_maximum, 0, 0); | ||
49 | return 1; | ||
50 | case HID_GD_Y: | ||
51 | hid_map_usage(hi, usage, bit, max, | ||
52 | EV_ABS, ABS_MT_POSITION_Y); | ||
53 | /* touchscreen emulation */ | ||
54 | input_set_abs_params(hi->input, ABS_Y, | ||
55 | field->logical_minimum, | ||
56 | field->logical_maximum, 0, 0); | ||
57 | return 1; | ||
58 | } | ||
59 | return 0; | ||
60 | |||
61 | case HID_UP_DIGITIZER: | ||
62 | switch (usage->hid) { | ||
63 | case HID_DG_CONFIDENCE: | ||
64 | case HID_DG_TIPSWITCH: | ||
65 | case HID_DG_INPUTMODE: | ||
66 | case HID_DG_DEVICEINDEX: | ||
67 | case HID_DG_CONTACTCOUNT: | ||
68 | case HID_DG_CONTACTMAX: | ||
69 | case HID_DG_TIPPRESSURE: | ||
70 | case HID_DG_WIDTH: | ||
71 | case HID_DG_HEIGHT: | ||
72 | return -1; | ||
73 | case HID_DG_INRANGE: | ||
74 | /* touchscreen emulation */ | ||
75 | hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); | ||
76 | return 1; | ||
77 | case HID_DG_CONTACTID: | ||
78 | hid_map_usage(hi, usage, bit, max, | ||
79 | EV_ABS, ABS_MT_TRACKING_ID); | ||
80 | return 1; | ||
81 | } | ||
82 | return 0; | ||
83 | |||
84 | case 0xff000000: | ||
85 | /* ignore vendor-specific features */ | ||
86 | return -1; | ||
87 | } | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static int quanta_input_mapped(struct hid_device *hdev, struct hid_input *hi, | ||
93 | struct hid_field *field, struct hid_usage *usage, | ||
94 | unsigned long **bit, int *max) | ||
95 | { | ||
96 | if (usage->type == EV_KEY || usage->type == EV_ABS) | ||
97 | clear_bit(usage->code, *bit); | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * this function is called when a whole finger has been parsed, | ||
104 | * so that it can decide what to send to the input layer. | ||
105 | */ | ||
106 | static void quanta_filter_event(struct quanta_data *td, struct input_dev *input) | ||
107 | { | ||
108 | |||
109 | td->first = !td->first; /* touchscreen emulation */ | ||
110 | |||
111 | if (!td->valid) { | ||
112 | /* | ||
113 | * touchscreen emulation: if no finger in this frame is valid | ||
114 | * and there previously was finger activity, this is a release | ||
115 | */ | ||
116 | if (!td->first && !td->activity_now && td->activity) { | ||
117 | input_event(input, EV_KEY, BTN_TOUCH, 0); | ||
118 | td->activity = false; | ||
119 | } | ||
120 | return; | ||
121 | } | ||
122 | |||
123 | input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id); | ||
124 | input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x); | ||
125 | input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y); | ||
126 | |||
127 | input_mt_sync(input); | ||
128 | td->valid = false; | ||
129 | |||
130 | /* touchscreen emulation: if first active finger in this frame... */ | ||
131 | if (!td->activity_now) { | ||
132 | /* if there was no previous activity, emit touch event */ | ||
133 | if (!td->activity) { | ||
134 | input_event(input, EV_KEY, BTN_TOUCH, 1); | ||
135 | td->activity = true; | ||
136 | } | ||
137 | td->activity_now = true; | ||
138 | /* and in any case this is our preferred finger */ | ||
139 | input_event(input, EV_ABS, ABS_X, td->x); | ||
140 | input_event(input, EV_ABS, ABS_Y, td->y); | ||
141 | } | ||
142 | } | ||
143 | |||
144 | |||
145 | static int quanta_event(struct hid_device *hid, struct hid_field *field, | ||
146 | struct hid_usage *usage, __s32 value) | ||
147 | { | ||
148 | struct quanta_data *td = hid_get_drvdata(hid); | ||
149 | |||
150 | if (hid->claimed & HID_CLAIMED_INPUT) { | ||
151 | struct input_dev *input = field->hidinput->input; | ||
152 | |||
153 | switch (usage->hid) { | ||
154 | case HID_DG_INRANGE: | ||
155 | td->valid = !!value; | ||
156 | break; | ||
157 | case HID_GD_X: | ||
158 | td->x = value; | ||
159 | break; | ||
160 | case HID_GD_Y: | ||
161 | td->y = value; | ||
162 | quanta_filter_event(td, input); | ||
163 | break; | ||
164 | case HID_DG_CONTACTID: | ||
165 | td->id = value; | ||
166 | break; | ||
167 | case HID_DG_CONTACTCOUNT: | ||
168 | /* touch emulation: this is the last field in a frame */ | ||
169 | td->first = false; | ||
170 | td->activity_now = false; | ||
171 | break; | ||
172 | case HID_DG_CONFIDENCE: | ||
173 | case HID_DG_TIPSWITCH: | ||
174 | /* avoid interference from generic hidinput handling */ | ||
175 | break; | ||
176 | |||
177 | default: | ||
178 | /* fallback to the generic hidinput handling */ | ||
179 | return 0; | ||
180 | } | ||
181 | } | ||
182 | |||
183 | /* we have handled the hidinput part, now remains hiddev */ | ||
184 | if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) | ||
185 | hid->hiddev_hid_event(hid, field, usage, value); | ||
186 | |||
187 | return 1; | ||
188 | } | ||
189 | |||
190 | static int quanta_probe(struct hid_device *hdev, const struct hid_device_id *id) | ||
191 | { | ||
192 | int ret; | ||
193 | struct quanta_data *td; | ||
194 | |||
195 | td = kmalloc(sizeof(struct quanta_data), GFP_KERNEL); | ||
196 | if (!td) { | ||
197 | dev_err(&hdev->dev, "cannot allocate Quanta Touch data\n"); | ||
198 | return -ENOMEM; | ||
199 | } | ||
200 | td->valid = false; | ||
201 | td->activity = false; | ||
202 | td->activity_now = false; | ||
203 | td->first = false; | ||
204 | hid_set_drvdata(hdev, td); | ||
205 | |||
206 | ret = hid_parse(hdev); | ||
207 | if (!ret) | ||
208 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); | ||
209 | |||
210 | if (ret) | ||
211 | kfree(td); | ||
212 | |||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | static void quanta_remove(struct hid_device *hdev) | ||
217 | { | ||
218 | hid_hw_stop(hdev); | ||
219 | kfree(hid_get_drvdata(hdev)); | ||
220 | hid_set_drvdata(hdev, NULL); | ||
221 | } | ||
222 | |||
223 | static const struct hid_device_id quanta_devices[] = { | ||
224 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, | ||
225 | USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, | ||
226 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, | ||
227 | USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) }, | ||
228 | { } | ||
229 | }; | ||
230 | MODULE_DEVICE_TABLE(hid, quanta_devices); | ||
231 | |||
232 | static const struct hid_usage_id quanta_grabbed_usages[] = { | ||
233 | { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, | ||
234 | { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} | ||
235 | }; | ||
236 | |||
237 | static struct hid_driver quanta_driver = { | ||
238 | .name = "quanta-touch", | ||
239 | .id_table = quanta_devices, | ||
240 | .probe = quanta_probe, | ||
241 | .remove = quanta_remove, | ||
242 | .input_mapping = quanta_input_mapping, | ||
243 | .input_mapped = quanta_input_mapped, | ||
244 | .usage_table = quanta_grabbed_usages, | ||
245 | .event = quanta_event, | ||
246 | }; | ||
247 | |||
248 | static int __init quanta_init(void) | ||
249 | { | ||
250 | return hid_register_driver(&quanta_driver); | ||
251 | } | ||
252 | |||
253 | static void __exit quanta_exit(void) | ||
254 | { | ||
255 | hid_unregister_driver(&quanta_driver); | ||
256 | } | ||
257 | |||
258 | module_init(quanta_init); | ||
259 | module_exit(quanta_exit); | ||
260 | |||
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 4e8450228a24..9bf00d77d92b 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c | |||
@@ -48,7 +48,7 @@ static void sony_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
48 | * to "operational". Without this, the ps3 controller will not report any | 48 | * to "operational". Without this, the ps3 controller will not report any |
49 | * events. | 49 | * events. |
50 | */ | 50 | */ |
51 | static int sony_set_operational(struct hid_device *hdev) | 51 | static int sony_set_operational_usb(struct hid_device *hdev) |
52 | { | 52 | { |
53 | struct usb_interface *intf = to_usb_interface(hdev->dev.parent); | 53 | struct usb_interface *intf = to_usb_interface(hdev->dev.parent); |
54 | struct usb_device *dev = interface_to_usbdev(intf); | 54 | struct usb_device *dev = interface_to_usbdev(intf); |
@@ -73,6 +73,12 @@ static int sony_set_operational(struct hid_device *hdev) | |||
73 | return ret; | 73 | return ret; |
74 | } | 74 | } |
75 | 75 | ||
76 | static int sony_set_operational_bt(struct hid_device *hdev) | ||
77 | { | ||
78 | unsigned char buf[] = { 0x53, 0xf4, 0x42, 0x03, 0x00, 0x00 }; | ||
79 | return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT); | ||
80 | } | ||
81 | |||
76 | static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | 82 | static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) |
77 | { | 83 | { |
78 | int ret; | 84 | int ret; |
@@ -81,7 +87,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
81 | 87 | ||
82 | sc = kzalloc(sizeof(*sc), GFP_KERNEL); | 88 | sc = kzalloc(sizeof(*sc), GFP_KERNEL); |
83 | if (sc == NULL) { | 89 | if (sc == NULL) { |
84 | dev_err(&hdev->dev, "can't alloc apple descriptor\n"); | 90 | dev_err(&hdev->dev, "can't alloc sony descriptor\n"); |
85 | return -ENOMEM; | 91 | return -ENOMEM; |
86 | } | 92 | } |
87 | 93 | ||
@@ -101,7 +107,17 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
101 | goto err_free; | 107 | goto err_free; |
102 | } | 108 | } |
103 | 109 | ||
104 | ret = sony_set_operational(hdev); | 110 | switch (hdev->bus) { |
111 | case BUS_USB: | ||
112 | ret = sony_set_operational_usb(hdev); | ||
113 | break; | ||
114 | case BUS_BLUETOOTH: | ||
115 | ret = sony_set_operational_bt(hdev); | ||
116 | break; | ||
117 | default: | ||
118 | ret = 0; | ||
119 | } | ||
120 | |||
105 | if (ret < 0) | 121 | if (ret < 0) |
106 | goto err_stop; | 122 | goto err_stop; |
107 | 123 | ||
@@ -121,6 +137,7 @@ static void sony_remove(struct hid_device *hdev) | |||
121 | 137 | ||
122 | static const struct hid_device_id sony_devices[] = { | 138 | static const struct hid_device_id sony_devices[] = { |
123 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, | 139 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, |
140 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, | ||
124 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE), | 141 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE), |
125 | .driver_data = VAIO_RDESC_CONSTANT }, | 142 | .driver_data = VAIO_RDESC_CONSTANT }, |
126 | { } | 143 | { } |
diff --git a/drivers/hid/hid-stantum.c b/drivers/hid/hid-stantum.c new file mode 100644 index 000000000000..2e592a06654e --- /dev/null +++ b/drivers/hid/hid-stantum.c | |||
@@ -0,0 +1,283 @@ | |||
1 | /* | ||
2 | * HID driver for Stantum multitouch panels | ||
3 | * | ||
4 | * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | #include <linux/hid.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); | ||
20 | MODULE_DESCRIPTION("Stantum HID multitouch panels"); | ||
21 | MODULE_LICENSE("GPL"); | ||
22 | |||
23 | #include "hid-ids.h" | ||
24 | |||
25 | struct stantum_data { | ||
26 | __s32 x, y, z, w, h; /* x, y, pressure, width, height */ | ||
27 | __u16 id; /* touch id */ | ||
28 | bool valid; /* valid finger data, or just placeholder? */ | ||
29 | bool first; /* first finger in the HID packet? */ | ||
30 | bool activity; /* at least one active finger so far? */ | ||
31 | }; | ||
32 | |||
33 | static int stantum_input_mapping(struct hid_device *hdev, struct hid_input *hi, | ||
34 | struct hid_field *field, struct hid_usage *usage, | ||
35 | unsigned long **bit, int *max) | ||
36 | { | ||
37 | switch (usage->hid & HID_USAGE_PAGE) { | ||
38 | |||
39 | case HID_UP_GENDESK: | ||
40 | switch (usage->hid) { | ||
41 | case HID_GD_X: | ||
42 | hid_map_usage(hi, usage, bit, max, | ||
43 | EV_ABS, ABS_MT_POSITION_X); | ||
44 | /* touchscreen emulation */ | ||
45 | input_set_abs_params(hi->input, ABS_X, | ||
46 | field->logical_minimum, | ||
47 | field->logical_maximum, 0, 0); | ||
48 | return 1; | ||
49 | case HID_GD_Y: | ||
50 | hid_map_usage(hi, usage, bit, max, | ||
51 | EV_ABS, ABS_MT_POSITION_Y); | ||
52 | /* touchscreen emulation */ | ||
53 | input_set_abs_params(hi->input, ABS_Y, | ||
54 | field->logical_minimum, | ||
55 | field->logical_maximum, 0, 0); | ||
56 | return 1; | ||
57 | } | ||
58 | return 0; | ||
59 | |||
60 | case HID_UP_DIGITIZER: | ||
61 | switch (usage->hid) { | ||
62 | case HID_DG_INRANGE: | ||
63 | case HID_DG_CONFIDENCE: | ||
64 | case HID_DG_INPUTMODE: | ||
65 | case HID_DG_DEVICEINDEX: | ||
66 | case HID_DG_CONTACTCOUNT: | ||
67 | case HID_DG_CONTACTMAX: | ||
68 | return -1; | ||
69 | |||
70 | case HID_DG_TIPSWITCH: | ||
71 | /* touchscreen emulation */ | ||
72 | hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); | ||
73 | return 1; | ||
74 | |||
75 | case HID_DG_WIDTH: | ||
76 | hid_map_usage(hi, usage, bit, max, | ||
77 | EV_ABS, ABS_MT_TOUCH_MAJOR); | ||
78 | return 1; | ||
79 | case HID_DG_HEIGHT: | ||
80 | hid_map_usage(hi, usage, bit, max, | ||
81 | EV_ABS, ABS_MT_TOUCH_MINOR); | ||
82 | input_set_abs_params(hi->input, ABS_MT_ORIENTATION, | ||
83 | 1, 1, 0, 0); | ||
84 | return 1; | ||
85 | case HID_DG_TIPPRESSURE: | ||
86 | hid_map_usage(hi, usage, bit, max, | ||
87 | EV_ABS, ABS_MT_PRESSURE); | ||
88 | return 1; | ||
89 | |||
90 | case HID_DG_CONTACTID: | ||
91 | hid_map_usage(hi, usage, bit, max, | ||
92 | EV_ABS, ABS_MT_TRACKING_ID); | ||
93 | return 1; | ||
94 | |||
95 | } | ||
96 | return 0; | ||
97 | |||
98 | case 0xff000000: | ||
99 | /* no input-oriented meaning */ | ||
100 | return -1; | ||
101 | } | ||
102 | |||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | static int stantum_input_mapped(struct hid_device *hdev, struct hid_input *hi, | ||
107 | struct hid_field *field, struct hid_usage *usage, | ||
108 | unsigned long **bit, int *max) | ||
109 | { | ||
110 | if (usage->type == EV_KEY || usage->type == EV_ABS) | ||
111 | clear_bit(usage->code, *bit); | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * this function is called when a whole finger has been parsed, | ||
118 | * so that it can decide what to send to the input layer. | ||
119 | */ | ||
120 | static void stantum_filter_event(struct stantum_data *sd, | ||
121 | struct input_dev *input) | ||
122 | { | ||
123 | bool wide; | ||
124 | |||
125 | if (!sd->valid) { | ||
126 | /* | ||
127 | * touchscreen emulation: if the first finger is not valid and | ||
128 | * there previously was finger activity, this is a release | ||
129 | */ | ||
130 | if (sd->first && sd->activity) { | ||
131 | input_event(input, EV_KEY, BTN_TOUCH, 0); | ||
132 | sd->activity = false; | ||
133 | } | ||
134 | return; | ||
135 | } | ||
136 | |||
137 | input_event(input, EV_ABS, ABS_MT_TRACKING_ID, sd->id); | ||
138 | input_event(input, EV_ABS, ABS_MT_POSITION_X, sd->x); | ||
139 | input_event(input, EV_ABS, ABS_MT_POSITION_Y, sd->y); | ||
140 | |||
141 | wide = (sd->w > sd->h); | ||
142 | input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide); | ||
143 | input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, wide ? sd->w : sd->h); | ||
144 | input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, wide ? sd->h : sd->w); | ||
145 | |||
146 | input_event(input, EV_ABS, ABS_MT_PRESSURE, sd->z); | ||
147 | |||
148 | input_mt_sync(input); | ||
149 | sd->valid = false; | ||
150 | |||
151 | /* touchscreen emulation */ | ||
152 | if (sd->first) { | ||
153 | if (!sd->activity) { | ||
154 | input_event(input, EV_KEY, BTN_TOUCH, 1); | ||
155 | sd->activity = true; | ||
156 | } | ||
157 | input_event(input, EV_ABS, ABS_X, sd->x); | ||
158 | input_event(input, EV_ABS, ABS_Y, sd->y); | ||
159 | } | ||
160 | sd->first = false; | ||
161 | } | ||
162 | |||
163 | |||
164 | static int stantum_event(struct hid_device *hid, struct hid_field *field, | ||
165 | struct hid_usage *usage, __s32 value) | ||
166 | { | ||
167 | struct stantum_data *sd = hid_get_drvdata(hid); | ||
168 | |||
169 | if (hid->claimed & HID_CLAIMED_INPUT) { | ||
170 | struct input_dev *input = field->hidinput->input; | ||
171 | |||
172 | switch (usage->hid) { | ||
173 | case HID_DG_INRANGE: | ||
174 | /* this is the last field in a finger */ | ||
175 | stantum_filter_event(sd, input); | ||
176 | break; | ||
177 | case HID_DG_WIDTH: | ||
178 | sd->w = value; | ||
179 | break; | ||
180 | case HID_DG_HEIGHT: | ||
181 | sd->h = value; | ||
182 | break; | ||
183 | case HID_GD_X: | ||
184 | sd->x = value; | ||
185 | break; | ||
186 | case HID_GD_Y: | ||
187 | sd->y = value; | ||
188 | break; | ||
189 | case HID_DG_TIPPRESSURE: | ||
190 | sd->z = value; | ||
191 | break; | ||
192 | case HID_DG_CONTACTID: | ||
193 | sd->id = value; | ||
194 | break; | ||
195 | case HID_DG_CONFIDENCE: | ||
196 | sd->valid = !!value; | ||
197 | break; | ||
198 | case 0xff000002: | ||
199 | /* this comes only before the first finger */ | ||
200 | sd->first = true; | ||
201 | break; | ||
202 | |||
203 | default: | ||
204 | /* ignore the others */ | ||
205 | return 1; | ||
206 | } | ||
207 | } | ||
208 | |||
209 | /* we have handled the hidinput part, now remains hiddev */ | ||
210 | if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) | ||
211 | hid->hiddev_hid_event(hid, field, usage, value); | ||
212 | |||
213 | return 1; | ||
214 | } | ||
215 | |||
216 | static int stantum_probe(struct hid_device *hdev, | ||
217 | const struct hid_device_id *id) | ||
218 | { | ||
219 | int ret; | ||
220 | struct stantum_data *sd; | ||
221 | |||
222 | sd = kmalloc(sizeof(struct stantum_data), GFP_KERNEL); | ||
223 | if (!sd) { | ||
224 | dev_err(&hdev->dev, "cannot allocate Stantum data\n"); | ||
225 | return -ENOMEM; | ||
226 | } | ||
227 | sd->valid = false; | ||
228 | sd->first = false; | ||
229 | sd->activity = false; | ||
230 | hid_set_drvdata(hdev, sd); | ||
231 | |||
232 | ret = hid_parse(hdev); | ||
233 | if (!ret) | ||
234 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); | ||
235 | |||
236 | if (ret) | ||
237 | kfree(sd); | ||
238 | |||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | static void stantum_remove(struct hid_device *hdev) | ||
243 | { | ||
244 | hid_hw_stop(hdev); | ||
245 | kfree(hid_get_drvdata(hdev)); | ||
246 | hid_set_drvdata(hdev, NULL); | ||
247 | } | ||
248 | |||
249 | static const struct hid_device_id stantum_devices[] = { | ||
250 | { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) }, | ||
251 | { } | ||
252 | }; | ||
253 | MODULE_DEVICE_TABLE(hid, stantum_devices); | ||
254 | |||
255 | static const struct hid_usage_id stantum_grabbed_usages[] = { | ||
256 | { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, | ||
257 | { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} | ||
258 | }; | ||
259 | |||
260 | static struct hid_driver stantum_driver = { | ||
261 | .name = "stantum", | ||
262 | .id_table = stantum_devices, | ||
263 | .probe = stantum_probe, | ||
264 | .remove = stantum_remove, | ||
265 | .input_mapping = stantum_input_mapping, | ||
266 | .input_mapped = stantum_input_mapped, | ||
267 | .usage_table = stantum_grabbed_usages, | ||
268 | .event = stantum_event, | ||
269 | }; | ||
270 | |||
271 | static int __init stantum_init(void) | ||
272 | { | ||
273 | return hid_register_driver(&stantum_driver); | ||
274 | } | ||
275 | |||
276 | static void __exit stantum_exit(void) | ||
277 | { | ||
278 | hid_unregister_driver(&stantum_driver); | ||
279 | } | ||
280 | |||
281 | module_init(stantum_init); | ||
282 | module_exit(stantum_exit); | ||
283 | |||
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c index 12dcda529201..8d3b46f5d149 100644 --- a/drivers/hid/hid-wacom.c +++ b/drivers/hid/hid-wacom.c | |||
@@ -156,7 +156,9 @@ static int wacom_probe(struct hid_device *hdev, | |||
156 | struct hid_input *hidinput; | 156 | struct hid_input *hidinput; |
157 | struct input_dev *input; | 157 | struct input_dev *input; |
158 | struct wacom_data *wdata; | 158 | struct wacom_data *wdata; |
159 | char rep_data[2]; | ||
159 | int ret; | 160 | int ret; |
161 | int limit; | ||
160 | 162 | ||
161 | wdata = kzalloc(sizeof(*wdata), GFP_KERNEL); | 163 | wdata = kzalloc(sizeof(*wdata), GFP_KERNEL); |
162 | if (wdata == NULL) { | 164 | if (wdata == NULL) { |
@@ -166,6 +168,7 @@ static int wacom_probe(struct hid_device *hdev, | |||
166 | 168 | ||
167 | hid_set_drvdata(hdev, wdata); | 169 | hid_set_drvdata(hdev, wdata); |
168 | 170 | ||
171 | /* Parse the HID report now */ | ||
169 | ret = hid_parse(hdev); | 172 | ret = hid_parse(hdev); |
170 | if (ret) { | 173 | if (ret) { |
171 | dev_err(&hdev->dev, "parse failed\n"); | 174 | dev_err(&hdev->dev, "parse failed\n"); |
@@ -178,6 +181,31 @@ static int wacom_probe(struct hid_device *hdev, | |||
178 | goto err_free; | 181 | goto err_free; |
179 | } | 182 | } |
180 | 183 | ||
184 | /* | ||
185 | * Note that if the raw queries fail, it's not a hard failure and it | ||
186 | * is safe to continue | ||
187 | */ | ||
188 | |||
189 | /* Set Wacom mode2 */ | ||
190 | rep_data[0] = 0x03; rep_data[1] = 0x00; | ||
191 | limit = 3; | ||
192 | do { | ||
193 | ret = hdev->hid_output_raw_report(hdev, rep_data, 2, | ||
194 | HID_FEATURE_REPORT); | ||
195 | } while (ret < 0 && limit-- > 0); | ||
196 | if (ret < 0) | ||
197 | dev_warn(&hdev->dev, "failed to poke device #1, %d\n", ret); | ||
198 | |||
199 | /* 0x06 - high reporting speed, 0x05 - low speed */ | ||
200 | rep_data[0] = 0x06; rep_data[1] = 0x00; | ||
201 | limit = 3; | ||
202 | do { | ||
203 | ret = hdev->hid_output_raw_report(hdev, rep_data, 2, | ||
204 | HID_FEATURE_REPORT); | ||
205 | } while (ret < 0 && limit-- > 0); | ||
206 | if (ret < 0) | ||
207 | dev_warn(&hdev->dev, "failed to poke device #2, %d\n", ret); | ||
208 | |||
181 | hidinput = list_entry(hdev->inputs.next, struct hid_input, list); | 209 | hidinput = list_entry(hdev->inputs.next, struct hid_input, list); |
182 | input = hidinput->input; | 210 | input = hidinput->input; |
183 | 211 | ||
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index cdd136942bca..d04476700b7b 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
@@ -134,7 +134,7 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t | |||
134 | goto out; | 134 | goto out; |
135 | } | 135 | } |
136 | 136 | ||
137 | ret = dev->hid_output_raw_report(dev, buf, count); | 137 | ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT); |
138 | out: | 138 | out: |
139 | kfree(buf); | 139 | kfree(buf); |
140 | return ret; | 140 | return ret; |
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index e2997a8d5e1b..56d06cd8075b 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> | 5 | * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> |
6 | * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc | 6 | * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc |
7 | * Copyright (c) 2007-2008 Oliver Neukum | 7 | * Copyright (c) 2007-2008 Oliver Neukum |
8 | * Copyright (c) 2006-2009 Jiri Kosina | 8 | * Copyright (c) 2006-2010 Jiri Kosina |
9 | */ | 9 | */ |
10 | 10 | ||
11 | /* | 11 | /* |
@@ -316,6 +316,7 @@ static int hid_submit_out(struct hid_device *hid) | |||
316 | err_hid("usb_submit_urb(out) failed"); | 316 | err_hid("usb_submit_urb(out) failed"); |
317 | return -1; | 317 | return -1; |
318 | } | 318 | } |
319 | usbhid->last_out = jiffies; | ||
319 | } else { | 320 | } else { |
320 | /* | 321 | /* |
321 | * queue work to wake up the device. | 322 | * queue work to wake up the device. |
@@ -377,6 +378,7 @@ static int hid_submit_ctrl(struct hid_device *hid) | |||
377 | err_hid("usb_submit_urb(ctrl) failed"); | 378 | err_hid("usb_submit_urb(ctrl) failed"); |
378 | return -1; | 379 | return -1; |
379 | } | 380 | } |
381 | usbhid->last_ctrl = jiffies; | ||
380 | } else { | 382 | } else { |
381 | /* | 383 | /* |
382 | * queue work to wake up the device. | 384 | * queue work to wake up the device. |
@@ -512,9 +514,20 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re | |||
512 | usbhid->out[usbhid->outhead].report = report; | 514 | usbhid->out[usbhid->outhead].report = report; |
513 | usbhid->outhead = head; | 515 | usbhid->outhead = head; |
514 | 516 | ||
515 | if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) | 517 | if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) { |
516 | if (hid_submit_out(hid)) | 518 | if (hid_submit_out(hid)) |
517 | clear_bit(HID_OUT_RUNNING, &usbhid->iofl); | 519 | clear_bit(HID_OUT_RUNNING, &usbhid->iofl); |
520 | } else { | ||
521 | /* | ||
522 | * the queue is known to run | ||
523 | * but an earlier request may be stuck | ||
524 | * we may need to time out | ||
525 | * no race because this is called under | ||
526 | * spinlock | ||
527 | */ | ||
528 | if (time_after(jiffies, usbhid->last_out + HZ * 5)) | ||
529 | usb_unlink_urb(usbhid->urbout); | ||
530 | } | ||
518 | return; | 531 | return; |
519 | } | 532 | } |
520 | 533 | ||
@@ -535,9 +548,20 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re | |||
535 | usbhid->ctrl[usbhid->ctrlhead].dir = dir; | 548 | usbhid->ctrl[usbhid->ctrlhead].dir = dir; |
536 | usbhid->ctrlhead = head; | 549 | usbhid->ctrlhead = head; |
537 | 550 | ||
538 | if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) | 551 | if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) { |
539 | if (hid_submit_ctrl(hid)) | 552 | if (hid_submit_ctrl(hid)) |
540 | clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); | 553 | clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); |
554 | } else { | ||
555 | /* | ||
556 | * the queue is known to run | ||
557 | * but an earlier request may be stuck | ||
558 | * we may need to time out | ||
559 | * no race because this is called under | ||
560 | * spinlock | ||
561 | */ | ||
562 | if (time_after(jiffies, usbhid->last_ctrl + HZ * 5)) | ||
563 | usb_unlink_urb(usbhid->urbctrl); | ||
564 | } | ||
541 | } | 565 | } |
542 | 566 | ||
543 | void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) | 567 | void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) |
@@ -774,7 +798,8 @@ static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid) | |||
774 | return 0; | 798 | return 0; |
775 | } | 799 | } |
776 | 800 | ||
777 | static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count) | 801 | static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count, |
802 | unsigned char report_type) | ||
778 | { | 803 | { |
779 | struct usbhid_device *usbhid = hid->driver_data; | 804 | struct usbhid_device *usbhid = hid->driver_data; |
780 | struct usb_device *dev = hid_to_usb_dev(hid); | 805 | struct usb_device *dev = hid_to_usb_dev(hid); |
@@ -785,7 +810,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co | |||
785 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), | 810 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), |
786 | HID_REQ_SET_REPORT, | 811 | HID_REQ_SET_REPORT, |
787 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | 812 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, |
788 | ((HID_OUTPUT_REPORT + 1) << 8) | *buf, | 813 | ((report_type + 1) << 8) | *buf, |
789 | interface->desc.bInterfaceNumber, buf + 1, count - 1, | 814 | interface->desc.bInterfaceNumber, buf + 1, count - 1, |
790 | USB_CTRL_SET_TIMEOUT); | 815 | USB_CTRL_SET_TIMEOUT); |
791 | 816 | ||
@@ -981,9 +1006,6 @@ static int usbhid_start(struct hid_device *hid) | |||
981 | 1006 | ||
982 | spin_lock_init(&usbhid->lock); | 1007 | spin_lock_init(&usbhid->lock); |
983 | 1008 | ||
984 | usbhid->intf = intf; | ||
985 | usbhid->ifnum = interface->desc.bInterfaceNumber; | ||
986 | |||
987 | usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); | 1009 | usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); |
988 | if (!usbhid->urbctrl) { | 1010 | if (!usbhid->urbctrl) { |
989 | ret = -ENOMEM; | 1011 | ret = -ENOMEM; |
@@ -1154,6 +1176,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * | |||
1154 | 1176 | ||
1155 | hid->driver_data = usbhid; | 1177 | hid->driver_data = usbhid; |
1156 | usbhid->hid = hid; | 1178 | usbhid->hid = hid; |
1179 | usbhid->intf = intf; | ||
1180 | usbhid->ifnum = interface->desc.bInterfaceNumber; | ||
1157 | 1181 | ||
1158 | ret = hid_add_device(hid); | 1182 | ret = hid_add_device(hid); |
1159 | if (ret) { | 1183 | if (ret) { |
@@ -1342,7 +1366,7 @@ static int hid_reset_resume(struct usb_interface *intf) | |||
1342 | 1366 | ||
1343 | #endif /* CONFIG_PM */ | 1367 | #endif /* CONFIG_PM */ |
1344 | 1368 | ||
1345 | static struct usb_device_id hid_usb_ids [] = { | 1369 | static const struct usb_device_id hid_usb_ids[] = { |
1346 | { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, | 1370 | { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, |
1347 | .bInterfaceClass = USB_INTERFACE_CLASS_HID }, | 1371 | .bInterfaceClass = USB_INTERFACE_CLASS_HID }, |
1348 | { } /* Terminating entry */ | 1372 | { } /* Terminating entry */ |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 38773dc2821b..7844280897d1 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -43,8 +43,10 @@ static const struct hid_blacklist { | |||
43 | 43 | ||
44 | { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, | 44 | { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, |
45 | 45 | ||
46 | { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, | ||
46 | { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, | 47 | { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, |
47 | { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, | 48 | { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, |
49 | { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, | ||
48 | 50 | ||
49 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, | 51 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, |
50 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, | 52 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, |
@@ -57,6 +59,7 @@ static const struct hid_blacklist { | |||
57 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, | 59 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, |
58 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, | 60 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, |
59 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, | 61 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, |
62 | { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, | ||
60 | { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, | 63 | { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, |
61 | { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, | 64 | { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, |
62 | { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, | 65 | { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, |
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h index 08f505ca2e3d..ec20400c7f29 100644 --- a/drivers/hid/usbhid/usbhid.h +++ b/drivers/hid/usbhid/usbhid.h | |||
@@ -80,12 +80,14 @@ struct usbhid_device { | |||
80 | unsigned char ctrlhead, ctrltail; /* Control fifo head & tail */ | 80 | unsigned char ctrlhead, ctrltail; /* Control fifo head & tail */ |
81 | char *ctrlbuf; /* Control buffer */ | 81 | char *ctrlbuf; /* Control buffer */ |
82 | dma_addr_t ctrlbuf_dma; /* Control buffer dma */ | 82 | dma_addr_t ctrlbuf_dma; /* Control buffer dma */ |
83 | unsigned long last_ctrl; /* record of last output for timeouts */ | ||
83 | 84 | ||
84 | struct urb *urbout; /* Output URB */ | 85 | struct urb *urbout; /* Output URB */ |
85 | struct hid_output_fifo out[HID_CONTROL_FIFO_SIZE]; /* Output pipe fifo */ | 86 | struct hid_output_fifo out[HID_CONTROL_FIFO_SIZE]; /* Output pipe fifo */ |
86 | unsigned char outhead, outtail; /* Output pipe fifo head & tail */ | 87 | unsigned char outhead, outtail; /* Output pipe fifo head & tail */ |
87 | char *outbuf; /* Output buffer */ | 88 | char *outbuf; /* Output buffer */ |
88 | dma_addr_t outbuf_dma; /* Output buffer dma */ | 89 | dma_addr_t outbuf_dma; /* Output buffer dma */ |
90 | unsigned long last_out; /* record of last output for timeouts */ | ||
89 | 91 | ||
90 | spinlock_t lock; /* fifo spinlock */ | 92 | spinlock_t lock; /* fifo spinlock */ |
91 | unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ | 93 | unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ |
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c index aa6713b4a988..291d9393d359 100644 --- a/drivers/input/input-polldev.c +++ b/drivers/input/input-polldev.c | |||
@@ -100,6 +100,12 @@ static void input_close_polled_device(struct input_dev *input) | |||
100 | struct input_polled_dev *dev = input_get_drvdata(input); | 100 | struct input_polled_dev *dev = input_get_drvdata(input); |
101 | 101 | ||
102 | cancel_delayed_work_sync(&dev->work); | 102 | cancel_delayed_work_sync(&dev->work); |
103 | /* | ||
104 | * Clean up work struct to remove references to the workqueue. | ||
105 | * It may be destroyed by the next call. This causes problems | ||
106 | * at next device open-close in case of poll_interval == 0. | ||
107 | */ | ||
108 | INIT_DELAYED_WORK(&dev->work, dev->work.work.func); | ||
103 | input_polldev_stop_workqueue(); | 109 | input_polldev_stop_workqueue(); |
104 | 110 | ||
105 | if (dev->close) | 111 | if (dev->close) |
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index d84a36e545f6..b54aee7cd9e3 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
@@ -1161,9 +1161,17 @@ static int i8042_pm_restore(struct device *dev) | |||
1161 | return 0; | 1161 | return 0; |
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | static int i8042_pm_thaw(struct device *dev) | ||
1165 | { | ||
1166 | i8042_interrupt(0, NULL); | ||
1167 | |||
1168 | return 0; | ||
1169 | } | ||
1170 | |||
1164 | static const struct dev_pm_ops i8042_pm_ops = { | 1171 | static const struct dev_pm_ops i8042_pm_ops = { |
1165 | .suspend = i8042_pm_reset, | 1172 | .suspend = i8042_pm_reset, |
1166 | .resume = i8042_pm_restore, | 1173 | .resume = i8042_pm_restore, |
1174 | .thaw = i8042_pm_thaw, | ||
1167 | .poweroff = i8042_pm_reset, | 1175 | .poweroff = i8042_pm_reset, |
1168 | .restore = i8042_pm_restore, | 1176 | .restore = i8042_pm_restore, |
1169 | }; | 1177 | }; |
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index 09a5e7341bd5..5256123a5228 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c | |||
@@ -618,8 +618,8 @@ static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt) | |||
618 | #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH | 618 | #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH |
619 | static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) | 619 | static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) |
620 | { | 620 | { |
621 | dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1] ; | 621 | dev->x = (pkt[2] << 8) | pkt[1]; |
622 | dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3] ; | 622 | dev->y = (pkt[4] << 8) | pkt[3]; |
623 | dev->press = pkt[5] & 0xff; | 623 | dev->press = pkt[5] & 0xff; |
624 | dev->touch = pkt[0] & 0x01; | 624 | dev->touch = pkt[0] & 0x01; |
625 | 625 | ||
@@ -809,9 +809,9 @@ static struct usbtouch_device_info usbtouch_dev_info[] = { | |||
809 | #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH | 809 | #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH |
810 | [DEVTYPE_GENERAL_TOUCH] = { | 810 | [DEVTYPE_GENERAL_TOUCH] = { |
811 | .min_xc = 0x0, | 811 | .min_xc = 0x0, |
812 | .max_xc = 0x0500, | 812 | .max_xc = 0x7fff, |
813 | .min_yc = 0x0, | 813 | .min_yc = 0x0, |
814 | .max_yc = 0x0500, | 814 | .max_yc = 0x7fff, |
815 | .rept_size = 7, | 815 | .rept_size = 7, |
816 | .read_data = general_touch_read_data, | 816 | .read_data = general_touch_read_data, |
817 | }, | 817 | }, |
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig index 3464ebc4cdbc..452fde9edf86 100644 --- a/drivers/isdn/hisax/Kconfig +++ b/drivers/isdn/hisax/Kconfig | |||
@@ -109,7 +109,7 @@ config HISAX_16_3 | |||
109 | 109 | ||
110 | config HISAX_TELESPCI | 110 | config HISAX_TELESPCI |
111 | bool "Teles PCI" | 111 | bool "Teles PCI" |
112 | depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) | 112 | depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) |
113 | help | 113 | help |
114 | This enables HiSax support for the Teles PCI. | 114 | This enables HiSax support for the Teles PCI. |
115 | See <file:Documentation/isdn/README.HiSax> on how to configure it. | 115 | See <file:Documentation/isdn/README.HiSax> on how to configure it. |
@@ -237,7 +237,7 @@ config HISAX_MIC | |||
237 | 237 | ||
238 | config HISAX_NETJET | 238 | config HISAX_NETJET |
239 | bool "NETjet card" | 239 | bool "NETjet card" |
240 | depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) | 240 | depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) |
241 | help | 241 | help |
242 | This enables HiSax support for the NetJet from Traverse | 242 | This enables HiSax support for the NetJet from Traverse |
243 | Technologies. | 243 | Technologies. |
@@ -248,7 +248,7 @@ config HISAX_NETJET | |||
248 | 248 | ||
249 | config HISAX_NETJET_U | 249 | config HISAX_NETJET_U |
250 | bool "NETspider U card" | 250 | bool "NETspider U card" |
251 | depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) | 251 | depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) |
252 | help | 252 | help |
253 | This enables HiSax support for the Netspider U interface ISDN card | 253 | This enables HiSax support for the Netspider U interface ISDN card |
254 | from Traverse Technologies. | 254 | from Traverse Technologies. |
@@ -287,7 +287,7 @@ config HISAX_HSTSAPHIR | |||
287 | 287 | ||
288 | config HISAX_BKM_A4T | 288 | config HISAX_BKM_A4T |
289 | bool "Telekom A4T card" | 289 | bool "Telekom A4T card" |
290 | depends on PCI && PCI_LEGACY | 290 | depends on PCI |
291 | help | 291 | help |
292 | This enables HiSax support for the Telekom A4T card. | 292 | This enables HiSax support for the Telekom A4T card. |
293 | 293 | ||
@@ -297,7 +297,7 @@ config HISAX_BKM_A4T | |||
297 | 297 | ||
298 | config HISAX_SCT_QUADRO | 298 | config HISAX_SCT_QUADRO |
299 | bool "Scitel Quadro card" | 299 | bool "Scitel Quadro card" |
300 | depends on PCI && PCI_LEGACY | 300 | depends on PCI |
301 | help | 301 | help |
302 | This enables HiSax support for the Scitel Quadro card. | 302 | This enables HiSax support for the Scitel Quadro card. |
303 | 303 | ||
@@ -316,7 +316,7 @@ config HISAX_GAZEL | |||
316 | 316 | ||
317 | config HISAX_HFC_PCI | 317 | config HISAX_HFC_PCI |
318 | bool "HFC PCI-Bus cards" | 318 | bool "HFC PCI-Bus cards" |
319 | depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) | 319 | depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) |
320 | help | 320 | help |
321 | This enables HiSax support for the HFC-S PCI 2BDS0 based cards. | 321 | This enables HiSax support for the HFC-S PCI 2BDS0 based cards. |
322 | 322 | ||
@@ -325,7 +325,7 @@ config HISAX_HFC_PCI | |||
325 | 325 | ||
326 | config HISAX_W6692 | 326 | config HISAX_W6692 |
327 | bool "Winbond W6692 based cards" | 327 | bool "Winbond W6692 based cards" |
328 | depends on PCI && PCI_LEGACY | 328 | depends on PCI |
329 | help | 329 | help |
330 | This enables HiSax support for Winbond W6692 based PCI ISDN cards. | 330 | This enables HiSax support for Winbond W6692 based PCI ISDN cards. |
331 | 331 | ||
@@ -341,7 +341,7 @@ config HISAX_HFC_SX | |||
341 | 341 | ||
342 | config HISAX_ENTERNOW_PCI | 342 | config HISAX_ENTERNOW_PCI |
343 | bool "Formula-n enter:now PCI card" | 343 | bool "Formula-n enter:now PCI card" |
344 | depends on HISAX_NETJET && PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) | 344 | depends on HISAX_NETJET && PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) |
345 | help | 345 | help |
346 | This enables HiSax support for the Formula-n enter:now PCI | 346 | This enables HiSax support for the Formula-n enter:now PCI |
347 | ISDN card. | 347 | ISDN card. |
@@ -412,7 +412,7 @@ config HISAX_HFC4S8S | |||
412 | 412 | ||
413 | config HISAX_FRITZ_PCIPNP | 413 | config HISAX_FRITZ_PCIPNP |
414 | tristate "AVM Fritz!Card PCI/PCIv2/PnP support (EXPERIMENTAL)" | 414 | tristate "AVM Fritz!Card PCI/PCIv2/PnP support (EXPERIMENTAL)" |
415 | depends on PCI && PCI_LEGACY && EXPERIMENTAL | 415 | depends on PCI && EXPERIMENTAL |
416 | help | 416 | help |
417 | This enables the driver for the AVM Fritz!Card PCI, | 417 | This enables the driver for the AVM Fritz!Card PCI, |
418 | Fritz!Card PCI v2 and Fritz!Card PnP. | 418 | Fritz!Card PCI v2 and Fritz!Card PnP. |
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c index 7cabc5a19492..14295a155e71 100644 --- a/drivers/isdn/hisax/avm_pci.c +++ b/drivers/isdn/hisax/avm_pci.c | |||
@@ -822,7 +822,7 @@ static int __devinit avm_pnp_setup(struct IsdnCardState *cs) | |||
822 | 822 | ||
823 | #endif /* __ISAPNP__ */ | 823 | #endif /* __ISAPNP__ */ |
824 | 824 | ||
825 | #ifndef CONFIG_PCI_LEGACY | 825 | #ifndef CONFIG_PCI |
826 | 826 | ||
827 | static int __devinit avm_pci_setup(struct IsdnCardState *cs) | 827 | static int __devinit avm_pci_setup(struct IsdnCardState *cs) |
828 | { | 828 | { |
@@ -835,7 +835,7 @@ static struct pci_dev *dev_avm __devinitdata = NULL; | |||
835 | 835 | ||
836 | static int __devinit avm_pci_setup(struct IsdnCardState *cs) | 836 | static int __devinit avm_pci_setup(struct IsdnCardState *cs) |
837 | { | 837 | { |
838 | if ((dev_avm = pci_find_device(PCI_VENDOR_ID_AVM, | 838 | if ((dev_avm = hisax_find_pci_device(PCI_VENDOR_ID_AVM, |
839 | PCI_DEVICE_ID_AVM_A1, dev_avm))) { | 839 | PCI_DEVICE_ID_AVM_A1, dev_avm))) { |
840 | 840 | ||
841 | if (pci_enable_device(dev_avm)) | 841 | if (pci_enable_device(dev_avm)) |
@@ -864,7 +864,7 @@ static int __devinit avm_pci_setup(struct IsdnCardState *cs) | |||
864 | return (1); | 864 | return (1); |
865 | } | 865 | } |
866 | 866 | ||
867 | #endif /* CONFIG_PCI_LEGACY */ | 867 | #endif /* CONFIG_PCI */ |
868 | 868 | ||
869 | int __devinit | 869 | int __devinit |
870 | setup_avm_pcipnp(struct IsdnCard *card) | 870 | setup_avm_pcipnp(struct IsdnCard *card) |
diff --git a/drivers/isdn/hisax/bkm_a4t.c b/drivers/isdn/hisax/bkm_a4t.c index 9ca2ee54cc94..9f2009c0b69c 100644 --- a/drivers/isdn/hisax/bkm_a4t.c +++ b/drivers/isdn/hisax/bkm_a4t.c | |||
@@ -340,7 +340,7 @@ setup_bkm_a4t(struct IsdnCard *card) | |||
340 | } else | 340 | } else |
341 | return (0); | 341 | return (0); |
342 | 342 | ||
343 | while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN, | 343 | while ((dev_a4t = hisax_find_pci_device(PCI_VENDOR_ID_ZORAN, |
344 | PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) { | 344 | PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) { |
345 | ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr); | 345 | ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr); |
346 | if (!ret) | 346 | if (!ret) |
diff --git a/drivers/isdn/hisax/bkm_a8.c b/drivers/isdn/hisax/bkm_a8.c index e1ff4717a8a6..e775706c60e3 100644 --- a/drivers/isdn/hisax/bkm_a8.c +++ b/drivers/isdn/hisax/bkm_a8.c | |||
@@ -301,7 +301,7 @@ setup_sct_quadro(struct IsdnCard *card) | |||
301 | (sub_vendor_id != PCI_VENDOR_ID_BERKOM))) | 301 | (sub_vendor_id != PCI_VENDOR_ID_BERKOM))) |
302 | return (0); | 302 | return (0); |
303 | if (cs->subtyp == SCT_1) { | 303 | if (cs->subtyp == SCT_1) { |
304 | while ((dev_a8 = pci_find_device(PCI_VENDOR_ID_PLX, | 304 | while ((dev_a8 = hisax_find_pci_device(PCI_VENDOR_ID_PLX, |
305 | PCI_DEVICE_ID_PLX_9050, dev_a8))) { | 305 | PCI_DEVICE_ID_PLX_9050, dev_a8))) { |
306 | 306 | ||
307 | sub_vendor_id = dev_a8->subsystem_vendor; | 307 | sub_vendor_id = dev_a8->subsystem_vendor; |
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c index 0b0c2e5d806b..780da9bda915 100644 --- a/drivers/isdn/hisax/diva.c +++ b/drivers/isdn/hisax/diva.c | |||
@@ -1148,7 +1148,7 @@ static int __devinit setup_diva_isapnp(struct IsdnCard *card) | |||
1148 | 1148 | ||
1149 | #endif /* ISAPNP */ | 1149 | #endif /* ISAPNP */ |
1150 | 1150 | ||
1151 | #ifdef CONFIG_PCI_LEGACY | 1151 | #ifdef CONFIG_PCI |
1152 | static struct pci_dev *dev_diva __devinitdata = NULL; | 1152 | static struct pci_dev *dev_diva __devinitdata = NULL; |
1153 | static struct pci_dev *dev_diva_u __devinitdata = NULL; | 1153 | static struct pci_dev *dev_diva_u __devinitdata = NULL; |
1154 | static struct pci_dev *dev_diva201 __devinitdata = NULL; | 1154 | static struct pci_dev *dev_diva201 __devinitdata = NULL; |
@@ -1159,21 +1159,21 @@ static int __devinit setup_diva_pci(struct IsdnCard *card) | |||
1159 | struct IsdnCardState *cs = card->cs; | 1159 | struct IsdnCardState *cs = card->cs; |
1160 | 1160 | ||
1161 | cs->subtyp = 0; | 1161 | cs->subtyp = 0; |
1162 | if ((dev_diva = pci_find_device(PCI_VENDOR_ID_EICON, | 1162 | if ((dev_diva = hisax_find_pci_device(PCI_VENDOR_ID_EICON, |
1163 | PCI_DEVICE_ID_EICON_DIVA20, dev_diva))) { | 1163 | PCI_DEVICE_ID_EICON_DIVA20, dev_diva))) { |
1164 | if (pci_enable_device(dev_diva)) | 1164 | if (pci_enable_device(dev_diva)) |
1165 | return(0); | 1165 | return(0); |
1166 | cs->subtyp = DIVA_PCI; | 1166 | cs->subtyp = DIVA_PCI; |
1167 | cs->irq = dev_diva->irq; | 1167 | cs->irq = dev_diva->irq; |
1168 | cs->hw.diva.cfg_reg = pci_resource_start(dev_diva, 2); | 1168 | cs->hw.diva.cfg_reg = pci_resource_start(dev_diva, 2); |
1169 | } else if ((dev_diva_u = pci_find_device(PCI_VENDOR_ID_EICON, | 1169 | } else if ((dev_diva_u = hisax_find_pci_device(PCI_VENDOR_ID_EICON, |
1170 | PCI_DEVICE_ID_EICON_DIVA20_U, dev_diva_u))) { | 1170 | PCI_DEVICE_ID_EICON_DIVA20_U, dev_diva_u))) { |
1171 | if (pci_enable_device(dev_diva_u)) | 1171 | if (pci_enable_device(dev_diva_u)) |
1172 | return(0); | 1172 | return(0); |
1173 | cs->subtyp = DIVA_PCI; | 1173 | cs->subtyp = DIVA_PCI; |
1174 | cs->irq = dev_diva_u->irq; | 1174 | cs->irq = dev_diva_u->irq; |
1175 | cs->hw.diva.cfg_reg = pci_resource_start(dev_diva_u, 2); | 1175 | cs->hw.diva.cfg_reg = pci_resource_start(dev_diva_u, 2); |
1176 | } else if ((dev_diva201 = pci_find_device(PCI_VENDOR_ID_EICON, | 1176 | } else if ((dev_diva201 = hisax_find_pci_device(PCI_VENDOR_ID_EICON, |
1177 | PCI_DEVICE_ID_EICON_DIVA201, dev_diva201))) { | 1177 | PCI_DEVICE_ID_EICON_DIVA201, dev_diva201))) { |
1178 | if (pci_enable_device(dev_diva201)) | 1178 | if (pci_enable_device(dev_diva201)) |
1179 | return(0); | 1179 | return(0); |
@@ -1183,7 +1183,7 @@ static int __devinit setup_diva_pci(struct IsdnCard *card) | |||
1183 | (ulong) ioremap(pci_resource_start(dev_diva201, 0), 4096); | 1183 | (ulong) ioremap(pci_resource_start(dev_diva201, 0), 4096); |
1184 | cs->hw.diva.cfg_reg = | 1184 | cs->hw.diva.cfg_reg = |
1185 | (ulong) ioremap(pci_resource_start(dev_diva201, 1), 4096); | 1185 | (ulong) ioremap(pci_resource_start(dev_diva201, 1), 4096); |
1186 | } else if ((dev_diva202 = pci_find_device(PCI_VENDOR_ID_EICON, | 1186 | } else if ((dev_diva202 = hisax_find_pci_device(PCI_VENDOR_ID_EICON, |
1187 | PCI_DEVICE_ID_EICON_DIVA202, dev_diva202))) { | 1187 | PCI_DEVICE_ID_EICON_DIVA202, dev_diva202))) { |
1188 | if (pci_enable_device(dev_diva202)) | 1188 | if (pci_enable_device(dev_diva202)) |
1189 | return(0); | 1189 | return(0); |
@@ -1229,14 +1229,14 @@ static int __devinit setup_diva_pci(struct IsdnCard *card) | |||
1229 | return (1); /* card found */ | 1229 | return (1); /* card found */ |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | #else /* if !CONFIG_PCI_LEGACY */ | 1232 | #else /* if !CONFIG_PCI */ |
1233 | 1233 | ||
1234 | static int __devinit setup_diva_pci(struct IsdnCard *card) | 1234 | static int __devinit setup_diva_pci(struct IsdnCard *card) |
1235 | { | 1235 | { |
1236 | return (-1); /* card not found; continue search */ | 1236 | return (-1); /* card not found; continue search */ |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | #endif /* CONFIG_PCI_LEGACY */ | 1239 | #endif /* CONFIG_PCI */ |
1240 | 1240 | ||
1241 | int __devinit | 1241 | int __devinit |
1242 | setup_diva(struct IsdnCard *card) | 1242 | setup_diva(struct IsdnCard *card) |
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c index aa29d1cf16af..23c41fcd864e 100644 --- a/drivers/isdn/hisax/elsa.c +++ b/drivers/isdn/hisax/elsa.c | |||
@@ -1025,7 +1025,7 @@ setup_elsa_pcmcia(struct IsdnCard *card) | |||
1025 | cs->irq); | 1025 | cs->irq); |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | #ifdef CONFIG_PCI_LEGACY | 1028 | #ifdef CONFIG_PCI |
1029 | static struct pci_dev *dev_qs1000 __devinitdata = NULL; | 1029 | static struct pci_dev *dev_qs1000 __devinitdata = NULL; |
1030 | static struct pci_dev *dev_qs3000 __devinitdata = NULL; | 1030 | static struct pci_dev *dev_qs3000 __devinitdata = NULL; |
1031 | 1031 | ||
@@ -1035,7 +1035,7 @@ setup_elsa_pci(struct IsdnCard *card) | |||
1035 | struct IsdnCardState *cs = card->cs; | 1035 | struct IsdnCardState *cs = card->cs; |
1036 | 1036 | ||
1037 | cs->subtyp = 0; | 1037 | cs->subtyp = 0; |
1038 | if ((dev_qs1000 = pci_find_device(PCI_VENDOR_ID_ELSA, | 1038 | if ((dev_qs1000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA, |
1039 | PCI_DEVICE_ID_ELSA_MICROLINK, dev_qs1000))) { | 1039 | PCI_DEVICE_ID_ELSA_MICROLINK, dev_qs1000))) { |
1040 | if (pci_enable_device(dev_qs1000)) | 1040 | if (pci_enable_device(dev_qs1000)) |
1041 | return(0); | 1041 | return(0); |
@@ -1043,7 +1043,7 @@ setup_elsa_pci(struct IsdnCard *card) | |||
1043 | cs->irq = dev_qs1000->irq; | 1043 | cs->irq = dev_qs1000->irq; |
1044 | cs->hw.elsa.cfg = pci_resource_start(dev_qs1000, 1); | 1044 | cs->hw.elsa.cfg = pci_resource_start(dev_qs1000, 1); |
1045 | cs->hw.elsa.base = pci_resource_start(dev_qs1000, 3); | 1045 | cs->hw.elsa.base = pci_resource_start(dev_qs1000, 3); |
1046 | } else if ((dev_qs3000 = pci_find_device(PCI_VENDOR_ID_ELSA, | 1046 | } else if ((dev_qs3000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA, |
1047 | PCI_DEVICE_ID_ELSA_QS3000, dev_qs3000))) { | 1047 | PCI_DEVICE_ID_ELSA_QS3000, dev_qs3000))) { |
1048 | if (pci_enable_device(dev_qs3000)) | 1048 | if (pci_enable_device(dev_qs3000)) |
1049 | return(0); | 1049 | return(0); |
@@ -1093,7 +1093,7 @@ setup_elsa_pci(struct IsdnCard *card) | |||
1093 | { | 1093 | { |
1094 | return (1); | 1094 | return (1); |
1095 | } | 1095 | } |
1096 | #endif /* CONFIG_PCI_LEGACY */ | 1096 | #endif /* CONFIG_PCI */ |
1097 | 1097 | ||
1098 | static int __devinit | 1098 | static int __devinit |
1099 | setup_elsa_common(struct IsdnCard *card) | 1099 | setup_elsa_common(struct IsdnCard *card) |
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c index 39f421ed8de8..26264abf1f58 100644 --- a/drivers/isdn/hisax/enternow_pci.c +++ b/drivers/isdn/hisax/enternow_pci.c | |||
@@ -406,7 +406,7 @@ setup_enternow_pci(struct IsdnCard *card) | |||
406 | 406 | ||
407 | for ( ;; ) | 407 | for ( ;; ) |
408 | { | 408 | { |
409 | if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, | 409 | if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, |
410 | PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { | 410 | PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { |
411 | ret = en_pci_probe(dev_netjet, cs); | 411 | ret = en_pci_probe(dev_netjet, cs); |
412 | if (!ret) | 412 | if (!ret) |
diff --git a/drivers/isdn/hisax/gazel.c b/drivers/isdn/hisax/gazel.c index 0ea3b4607680..353982fc1436 100644 --- a/drivers/isdn/hisax/gazel.c +++ b/drivers/isdn/hisax/gazel.c | |||
@@ -531,7 +531,7 @@ setup_gazelisa(struct IsdnCard *card, struct IsdnCardState *cs) | |||
531 | return (0); | 531 | return (0); |
532 | } | 532 | } |
533 | 533 | ||
534 | #ifdef CONFIG_PCI_LEGACY | 534 | #ifdef CONFIG_PCI |
535 | static struct pci_dev *dev_tel __devinitdata = NULL; | 535 | static struct pci_dev *dev_tel __devinitdata = NULL; |
536 | 536 | ||
537 | static int __devinit | 537 | static int __devinit |
@@ -546,7 +546,7 @@ setup_gazelpci(struct IsdnCardState *cs) | |||
546 | found = 0; | 546 | found = 0; |
547 | seekcard = PCI_DEVICE_ID_PLX_R685; | 547 | seekcard = PCI_DEVICE_ID_PLX_R685; |
548 | for (nbseek = 0; nbseek < 4; nbseek++) { | 548 | for (nbseek = 0; nbseek < 4; nbseek++) { |
549 | if ((dev_tel = pci_find_device(PCI_VENDOR_ID_PLX, | 549 | if ((dev_tel = hisax_find_pci_device(PCI_VENDOR_ID_PLX, |
550 | seekcard, dev_tel))) { | 550 | seekcard, dev_tel))) { |
551 | if (pci_enable_device(dev_tel)) | 551 | if (pci_enable_device(dev_tel)) |
552 | return 1; | 552 | return 1; |
@@ -620,7 +620,7 @@ setup_gazelpci(struct IsdnCardState *cs) | |||
620 | 620 | ||
621 | return (0); | 621 | return (0); |
622 | } | 622 | } |
623 | #endif /* CONFIG_PCI_LEGACY */ | 623 | #endif /* CONFIG_PCI */ |
624 | 624 | ||
625 | int __devinit | 625 | int __devinit |
626 | setup_gazel(struct IsdnCard *card) | 626 | setup_gazel(struct IsdnCard *card) |
@@ -640,7 +640,7 @@ setup_gazel(struct IsdnCard *card) | |||
640 | return (0); | 640 | return (0); |
641 | } else { | 641 | } else { |
642 | 642 | ||
643 | #ifdef CONFIG_PCI_LEGACY | 643 | #ifdef CONFIG_PCI |
644 | if (setup_gazelpci(cs)) | 644 | if (setup_gazelpci(cs)) |
645 | return (0); | 645 | return (0); |
646 | #else | 646 | #else |
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 10914731b304..917cc84065bd 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c | |||
@@ -1658,7 +1658,7 @@ setup_hfcpci(struct IsdnCard *card) | |||
1658 | 1658 | ||
1659 | i = 0; | 1659 | i = 0; |
1660 | while (id_list[i].vendor_id) { | 1660 | while (id_list[i].vendor_id) { |
1661 | tmp_hfcpci = pci_find_device(id_list[i].vendor_id, | 1661 | tmp_hfcpci = hisax_find_pci_device(id_list[i].vendor_id, |
1662 | id_list[i].device_id, | 1662 | id_list[i].device_id, |
1663 | dev_hfcpci); | 1663 | dev_hfcpci); |
1664 | i++; | 1664 | i++; |
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h index 0685c1946969..832a87855ffb 100644 --- a/drivers/isdn/hisax/hisax.h +++ b/drivers/isdn/hisax/hisax.h | |||
@@ -1323,3 +1323,26 @@ void release_tei(struct IsdnCardState *cs); | |||
1323 | char *HiSax_getrev(const char *revision); | 1323 | char *HiSax_getrev(const char *revision); |
1324 | int TeiNew(void); | 1324 | int TeiNew(void); |
1325 | void TeiFree(void); | 1325 | void TeiFree(void); |
1326 | |||
1327 | #ifdef CONFIG_PCI | ||
1328 | |||
1329 | #include <linux/pci.h> | ||
1330 | |||
1331 | /* adaptation wrapper for old usage | ||
1332 | * WARNING! This is unfit for use in a PCI hotplug environment, | ||
1333 | * as the returned PCI device can disappear at any moment in time. | ||
1334 | * Callers should be converted to use pci_get_device() instead. | ||
1335 | */ | ||
1336 | static inline struct pci_dev *hisax_find_pci_device(unsigned int vendor, | ||
1337 | unsigned int device, | ||
1338 | struct pci_dev *from) | ||
1339 | { | ||
1340 | struct pci_dev *pdev; | ||
1341 | |||
1342 | pci_dev_get(from); | ||
1343 | pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); | ||
1344 | pci_dev_put(pdev); | ||
1345 | return pdev; | ||
1346 | } | ||
1347 | |||
1348 | #endif | ||
diff --git a/drivers/isdn/hisax/niccy.c b/drivers/isdn/hisax/niccy.c index ef00633e1d2a..ccaa6e13310f 100644 --- a/drivers/isdn/hisax/niccy.c +++ b/drivers/isdn/hisax/niccy.c | |||
@@ -297,12 +297,12 @@ int __devinit setup_niccy(struct IsdnCard *card) | |||
297 | return 0; | 297 | return 0; |
298 | } | 298 | } |
299 | } else { | 299 | } else { |
300 | #ifdef CONFIG_PCI_LEGACY | 300 | #ifdef CONFIG_PCI |
301 | static struct pci_dev *niccy_dev __devinitdata; | 301 | static struct pci_dev *niccy_dev __devinitdata; |
302 | 302 | ||
303 | u_int pci_ioaddr; | 303 | u_int pci_ioaddr; |
304 | cs->subtyp = 0; | 304 | cs->subtyp = 0; |
305 | if ((niccy_dev = pci_find_device(PCI_VENDOR_ID_SATSAGEM, | 305 | if ((niccy_dev = hisax_find_pci_device(PCI_VENDOR_ID_SATSAGEM, |
306 | PCI_DEVICE_ID_SATSAGEM_NICCY, | 306 | PCI_DEVICE_ID_SATSAGEM_NICCY, |
307 | niccy_dev))) { | 307 | niccy_dev))) { |
308 | if (pci_enable_device(niccy_dev)) | 308 | if (pci_enable_device(niccy_dev)) |
@@ -354,7 +354,7 @@ int __devinit setup_niccy(struct IsdnCard *card) | |||
354 | printk(KERN_WARNING "Niccy: io0 0 and NO_PCI_BIOS\n"); | 354 | printk(KERN_WARNING "Niccy: io0 0 and NO_PCI_BIOS\n"); |
355 | printk(KERN_WARNING "Niccy: unable to config NICCY PCI\n"); | 355 | printk(KERN_WARNING "Niccy: unable to config NICCY PCI\n"); |
356 | return 0; | 356 | return 0; |
357 | #endif /* CONFIG_PCI_LEGACY */ | 357 | #endif /* CONFIG_PCI */ |
358 | } | 358 | } |
359 | printk(KERN_INFO "HiSax: NICCY %s config irq:%d data:0x%X ale:0x%X\n", | 359 | printk(KERN_INFO "HiSax: NICCY %s config irq:%d data:0x%X ale:0x%X\n", |
360 | (cs->subtyp == 1) ? "PnP" : "PCI", | 360 | (cs->subtyp == 1) ? "PnP" : "PCI", |
diff --git a/drivers/isdn/hisax/nj_s.c b/drivers/isdn/hisax/nj_s.c index 8d36ccc87d81..2344e7b33448 100644 --- a/drivers/isdn/hisax/nj_s.c +++ b/drivers/isdn/hisax/nj_s.c | |||
@@ -276,7 +276,7 @@ setup_netjet_s(struct IsdnCard *card) | |||
276 | 276 | ||
277 | for ( ;; ) | 277 | for ( ;; ) |
278 | { | 278 | { |
279 | if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, | 279 | if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, |
280 | PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { | 280 | PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { |
281 | ret = njs_pci_probe(dev_netjet, cs); | 281 | ret = njs_pci_probe(dev_netjet, cs); |
282 | if (!ret) | 282 | if (!ret) |
diff --git a/drivers/isdn/hisax/nj_u.c b/drivers/isdn/hisax/nj_u.c index d306c946ffba..095e974aed80 100644 --- a/drivers/isdn/hisax/nj_u.c +++ b/drivers/isdn/hisax/nj_u.c | |||
@@ -240,7 +240,7 @@ setup_netjet_u(struct IsdnCard *card) | |||
240 | 240 | ||
241 | for ( ;; ) | 241 | for ( ;; ) |
242 | { | 242 | { |
243 | if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, | 243 | if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, |
244 | PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { | 244 | PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { |
245 | ret = nju_pci_probe(dev_netjet, cs); | 245 | ret = nju_pci_probe(dev_netjet, cs); |
246 | if (!ret) | 246 | if (!ret) |
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c index 5569a522e2a1..69dfc8d29017 100644 --- a/drivers/isdn/hisax/sedlbauer.c +++ b/drivers/isdn/hisax/sedlbauer.c | |||
@@ -598,7 +598,7 @@ setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt) | |||
598 | } | 598 | } |
599 | #endif /* __ISAPNP__ */ | 599 | #endif /* __ISAPNP__ */ |
600 | 600 | ||
601 | #ifdef CONFIG_PCI_LEGACY | 601 | #ifdef CONFIG_PCI |
602 | static struct pci_dev *dev_sedl __devinitdata = NULL; | 602 | static struct pci_dev *dev_sedl __devinitdata = NULL; |
603 | 603 | ||
604 | static int __devinit | 604 | static int __devinit |
@@ -607,7 +607,7 @@ setup_sedlbauer_pci(struct IsdnCard *card) | |||
607 | struct IsdnCardState *cs = card->cs; | 607 | struct IsdnCardState *cs = card->cs; |
608 | u16 sub_vendor_id, sub_id; | 608 | u16 sub_vendor_id, sub_id; |
609 | 609 | ||
610 | if ((dev_sedl = pci_find_device(PCI_VENDOR_ID_TIGERJET, | 610 | if ((dev_sedl = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, |
611 | PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) { | 611 | PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) { |
612 | if (pci_enable_device(dev_sedl)) | 612 | if (pci_enable_device(dev_sedl)) |
613 | return(0); | 613 | return(0); |
@@ -673,7 +673,7 @@ setup_sedlbauer_pci(struct IsdnCard *card) | |||
673 | return (1); | 673 | return (1); |
674 | } | 674 | } |
675 | 675 | ||
676 | #endif /* CONFIG_PCI_LEGACY */ | 676 | #endif /* CONFIG_PCI */ |
677 | 677 | ||
678 | int __devinit | 678 | int __devinit |
679 | setup_sedlbauer(struct IsdnCard *card) | 679 | setup_sedlbauer(struct IsdnCard *card) |
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c index 28b08de4673d..b85ceb3746ce 100644 --- a/drivers/isdn/hisax/telespci.c +++ b/drivers/isdn/hisax/telespci.c | |||
@@ -300,7 +300,7 @@ setup_telespci(struct IsdnCard *card) | |||
300 | if (cs->typ != ISDN_CTYPE_TELESPCI) | 300 | if (cs->typ != ISDN_CTYPE_TELESPCI) |
301 | return (0); | 301 | return (0); |
302 | 302 | ||
303 | if ((dev_tel = pci_find_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) { | 303 | if ((dev_tel = hisax_find_pci_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) { |
304 | if (pci_enable_device(dev_tel)) | 304 | if (pci_enable_device(dev_tel)) |
305 | return(0); | 305 | return(0); |
306 | cs->irq = dev_tel->irq; | 306 | cs->irq = dev_tel->irq; |
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c index c4d862c11a60..9d6e864023fe 100644 --- a/drivers/isdn/hisax/w6692.c +++ b/drivers/isdn/hisax/w6692.c | |||
@@ -1007,7 +1007,7 @@ setup_w6692(struct IsdnCard *card) | |||
1007 | return (0); | 1007 | return (0); |
1008 | 1008 | ||
1009 | while (id_list[id_idx].vendor_id) { | 1009 | while (id_list[id_idx].vendor_id) { |
1010 | dev_w6692 = pci_find_device(id_list[id_idx].vendor_id, | 1010 | dev_w6692 = hisax_find_pci_device(id_list[id_idx].vendor_id, |
1011 | id_list[id_idx].device_id, | 1011 | id_list[id_idx].device_id, |
1012 | dev_w6692); | 1012 | dev_w6692); |
1013 | if (dev_w6692) { | 1013 | if (dev_w6692) { |
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 23741cec45e3..d840a109f833 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c | |||
@@ -322,8 +322,8 @@ static int __init adb_init(void) | |||
322 | adb_controller = NULL; | 322 | adb_controller = NULL; |
323 | } else { | 323 | } else { |
324 | #ifdef CONFIG_PPC | 324 | #ifdef CONFIG_PPC |
325 | if (machine_is_compatible("AAPL,PowerBook1998") || | 325 | if (of_machine_is_compatible("AAPL,PowerBook1998") || |
326 | machine_is_compatible("PowerBook1,1")) | 326 | of_machine_is_compatible("PowerBook1,1")) |
327 | sleepy_trackpad = 1; | 327 | sleepy_trackpad = 1; |
328 | #endif /* CONFIG_PPC */ | 328 | #endif /* CONFIG_PPC */ |
329 | 329 | ||
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c index 454bc501df3c..5738d8bf2d97 100644 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c | |||
@@ -1899,7 +1899,7 @@ static int create_control_loops(void) | |||
1899 | */ | 1899 | */ |
1900 | if (rackmac) | 1900 | if (rackmac) |
1901 | cpu_pid_type = CPU_PID_TYPE_RACKMAC; | 1901 | cpu_pid_type = CPU_PID_TYPE_RACKMAC; |
1902 | else if (machine_is_compatible("PowerMac7,3") | 1902 | else if (of_machine_is_compatible("PowerMac7,3") |
1903 | && (cpu_count > 1) | 1903 | && (cpu_count > 1) |
1904 | && fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID | 1904 | && fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID |
1905 | && fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) { | 1905 | && fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) { |
@@ -2234,10 +2234,10 @@ static int __init therm_pm72_init(void) | |||
2234 | { | 2234 | { |
2235 | struct device_node *np; | 2235 | struct device_node *np; |
2236 | 2236 | ||
2237 | rackmac = machine_is_compatible("RackMac3,1"); | 2237 | rackmac = of_machine_is_compatible("RackMac3,1"); |
2238 | 2238 | ||
2239 | if (!machine_is_compatible("PowerMac7,2") && | 2239 | if (!of_machine_is_compatible("PowerMac7,2") && |
2240 | !machine_is_compatible("PowerMac7,3") && | 2240 | !of_machine_is_compatible("PowerMac7,3") && |
2241 | !rackmac) | 2241 | !rackmac) |
2242 | return -ENODEV; | 2242 | return -ENODEV; |
2243 | 2243 | ||
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index ba48fd76396e..7fb8b4da35a7 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c | |||
@@ -490,7 +490,7 @@ g4fan_init( void ) | |||
490 | info = of_get_property(np, "thermal-info", NULL); | 490 | info = of_get_property(np, "thermal-info", NULL); |
491 | of_node_put(np); | 491 | of_node_put(np); |
492 | 492 | ||
493 | if( !info || !machine_is_compatible("PowerMac3,6") ) | 493 | if( !info || !of_machine_is_compatible("PowerMac3,6") ) |
494 | return -ENODEV; | 494 | return -ENODEV; |
495 | 495 | ||
496 | if( info->id != 3 ) { | 496 | if( info->id != 3 ) { |
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c index a348bb0791d3..4f3c4479c16a 100644 --- a/drivers/macintosh/via-pmu-backlight.c +++ b/drivers/macintosh/via-pmu-backlight.c | |||
@@ -150,13 +150,13 @@ void __init pmu_backlight_init() | |||
150 | 150 | ||
151 | /* Special case for the old PowerBook since I can't test on it */ | 151 | /* Special case for the old PowerBook since I can't test on it */ |
152 | autosave = | 152 | autosave = |
153 | machine_is_compatible("AAPL,3400/2400") || | 153 | of_machine_is_compatible("AAPL,3400/2400") || |
154 | machine_is_compatible("AAPL,3500"); | 154 | of_machine_is_compatible("AAPL,3500"); |
155 | 155 | ||
156 | if (!autosave && | 156 | if (!autosave && |
157 | !pmac_has_backlight_type("pmu") && | 157 | !pmac_has_backlight_type("pmu") && |
158 | !machine_is_compatible("AAPL,PowerBook1998") && | 158 | !of_machine_is_compatible("AAPL,PowerBook1998") && |
159 | !machine_is_compatible("PowerBook1,1")) | 159 | !of_machine_is_compatible("PowerBook1,1")) |
160 | return; | 160 | return; |
161 | 161 | ||
162 | snprintf(name, sizeof(name), "pmubl"); | 162 | snprintf(name, sizeof(name), "pmubl"); |
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index db379c381432..42764849eb78 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c | |||
@@ -463,8 +463,8 @@ static int __init via_pmu_dev_init(void) | |||
463 | #endif | 463 | #endif |
464 | 464 | ||
465 | #ifdef CONFIG_PPC32 | 465 | #ifdef CONFIG_PPC32 |
466 | if (machine_is_compatible("AAPL,3400/2400") || | 466 | if (of_machine_is_compatible("AAPL,3400/2400") || |
467 | machine_is_compatible("AAPL,3500")) { | 467 | of_machine_is_compatible("AAPL,3500")) { |
468 | int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO, | 468 | int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO, |
469 | NULL, PMAC_MB_INFO_MODEL, 0); | 469 | NULL, PMAC_MB_INFO_MODEL, 0); |
470 | pmu_battery_count = 1; | 470 | pmu_battery_count = 1; |
@@ -472,8 +472,8 @@ static int __init via_pmu_dev_init(void) | |||
472 | pmu_batteries[0].flags |= PMU_BATT_TYPE_COMET; | 472 | pmu_batteries[0].flags |= PMU_BATT_TYPE_COMET; |
473 | else | 473 | else |
474 | pmu_batteries[0].flags |= PMU_BATT_TYPE_HOOPER; | 474 | pmu_batteries[0].flags |= PMU_BATT_TYPE_HOOPER; |
475 | } else if (machine_is_compatible("AAPL,PowerBook1998") || | 475 | } else if (of_machine_is_compatible("AAPL,PowerBook1998") || |
476 | machine_is_compatible("PowerBook1,1")) { | 476 | of_machine_is_compatible("PowerBook1,1")) { |
477 | pmu_battery_count = 2; | 477 | pmu_battery_count = 2; |
478 | pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART; | 478 | pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART; |
479 | pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART; | 479 | pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART; |
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c index 075b4d99e354..437f55c5d18d 100644 --- a/drivers/macintosh/windfarm_core.c +++ b/drivers/macintosh/windfarm_core.c | |||
@@ -468,9 +468,9 @@ static int __init windfarm_core_init(void) | |||
468 | DBG("wf: core loaded\n"); | 468 | DBG("wf: core loaded\n"); |
469 | 469 | ||
470 | /* Don't register on old machines that use therm_pm72 for now */ | 470 | /* Don't register on old machines that use therm_pm72 for now */ |
471 | if (machine_is_compatible("PowerMac7,2") || | 471 | if (of_machine_is_compatible("PowerMac7,2") || |
472 | machine_is_compatible("PowerMac7,3") || | 472 | of_machine_is_compatible("PowerMac7,3") || |
473 | machine_is_compatible("RackMac3,1")) | 473 | of_machine_is_compatible("RackMac3,1")) |
474 | return -ENODEV; | 474 | return -ENODEV; |
475 | platform_device_register(&wf_platform_device); | 475 | platform_device_register(&wf_platform_device); |
476 | return 0; | 476 | return 0; |
diff --git a/drivers/macintosh/windfarm_cpufreq_clamp.c b/drivers/macintosh/windfarm_cpufreq_clamp.c index 900aade06198..1a77a7c97d0e 100644 --- a/drivers/macintosh/windfarm_cpufreq_clamp.c +++ b/drivers/macintosh/windfarm_cpufreq_clamp.c | |||
@@ -76,9 +76,9 @@ static int __init wf_cpufreq_clamp_init(void) | |||
76 | struct wf_control *clamp; | 76 | struct wf_control *clamp; |
77 | 77 | ||
78 | /* Don't register on old machines that use therm_pm72 for now */ | 78 | /* Don't register on old machines that use therm_pm72 for now */ |
79 | if (machine_is_compatible("PowerMac7,2") || | 79 | if (of_machine_is_compatible("PowerMac7,2") || |
80 | machine_is_compatible("PowerMac7,3") || | 80 | of_machine_is_compatible("PowerMac7,3") || |
81 | machine_is_compatible("RackMac3,1")) | 81 | of_machine_is_compatible("RackMac3,1")) |
82 | return -ENODEV; | 82 | return -ENODEV; |
83 | 83 | ||
84 | clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL); | 84 | clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL); |
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index ed6426a10773..d8257d35afde 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c | |||
@@ -239,9 +239,9 @@ static struct i2c_driver wf_lm75_driver = { | |||
239 | static int __init wf_lm75_sensor_init(void) | 239 | static int __init wf_lm75_sensor_init(void) |
240 | { | 240 | { |
241 | /* Don't register on old machines that use therm_pm72 for now */ | 241 | /* Don't register on old machines that use therm_pm72 for now */ |
242 | if (machine_is_compatible("PowerMac7,2") || | 242 | if (of_machine_is_compatible("PowerMac7,2") || |
243 | machine_is_compatible("PowerMac7,3") || | 243 | of_machine_is_compatible("PowerMac7,3") || |
244 | machine_is_compatible("RackMac3,1")) | 244 | of_machine_is_compatible("RackMac3,1")) |
245 | return -ENODEV; | 245 | return -ENODEV; |
246 | return i2c_add_driver(&wf_lm75_driver); | 246 | return i2c_add_driver(&wf_lm75_driver); |
247 | } | 247 | } |
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c index a67b349319e9..b486eb929fde 100644 --- a/drivers/macintosh/windfarm_max6690_sensor.c +++ b/drivers/macintosh/windfarm_max6690_sensor.c | |||
@@ -188,9 +188,9 @@ static struct i2c_driver wf_max6690_driver = { | |||
188 | static int __init wf_max6690_sensor_init(void) | 188 | static int __init wf_max6690_sensor_init(void) |
189 | { | 189 | { |
190 | /* Don't register on old machines that use therm_pm72 for now */ | 190 | /* Don't register on old machines that use therm_pm72 for now */ |
191 | if (machine_is_compatible("PowerMac7,2") || | 191 | if (of_machine_is_compatible("PowerMac7,2") || |
192 | machine_is_compatible("PowerMac7,3") || | 192 | of_machine_is_compatible("PowerMac7,3") || |
193 | machine_is_compatible("RackMac3,1")) | 193 | of_machine_is_compatible("RackMac3,1")) |
194 | return -ENODEV; | 194 | return -ENODEV; |
195 | return i2c_add_driver(&wf_max6690_driver); | 195 | return i2c_add_driver(&wf_max6690_driver); |
196 | } | 196 | } |
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c index 73d695dc9e50..e0ee80700cde 100644 --- a/drivers/macintosh/windfarm_pm112.c +++ b/drivers/macintosh/windfarm_pm112.c | |||
@@ -676,7 +676,7 @@ static int __init wf_pm112_init(void) | |||
676 | { | 676 | { |
677 | struct device_node *cpu; | 677 | struct device_node *cpu; |
678 | 678 | ||
679 | if (!machine_is_compatible("PowerMac11,2")) | 679 | if (!of_machine_is_compatible("PowerMac11,2")) |
680 | return -ENODEV; | 680 | return -ENODEV; |
681 | 681 | ||
682 | /* Count the number of CPU cores */ | 682 | /* Count the number of CPU cores */ |
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c index 66ec4fb115bb..947d4afa25ca 100644 --- a/drivers/macintosh/windfarm_pm121.c +++ b/drivers/macintosh/windfarm_pm121.c | |||
@@ -1008,7 +1008,7 @@ static int __init pm121_init(void) | |||
1008 | { | 1008 | { |
1009 | int rc = -ENODEV; | 1009 | int rc = -ENODEV; |
1010 | 1010 | ||
1011 | if (machine_is_compatible("PowerMac12,1")) | 1011 | if (of_machine_is_compatible("PowerMac12,1")) |
1012 | rc = pm121_init_pm(); | 1012 | rc = pm121_init_pm(); |
1013 | 1013 | ||
1014 | if (rc == 0) { | 1014 | if (rc == 0) { |
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c index abbe206474f5..565d5b2adc95 100644 --- a/drivers/macintosh/windfarm_pm81.c +++ b/drivers/macintosh/windfarm_pm81.c | |||
@@ -779,8 +779,8 @@ static int __init wf_smu_init(void) | |||
779 | { | 779 | { |
780 | int rc = -ENODEV; | 780 | int rc = -ENODEV; |
781 | 781 | ||
782 | if (machine_is_compatible("PowerMac8,1") || | 782 | if (of_machine_is_compatible("PowerMac8,1") || |
783 | machine_is_compatible("PowerMac8,2")) | 783 | of_machine_is_compatible("PowerMac8,2")) |
784 | rc = wf_init_pm(); | 784 | rc = wf_init_pm(); |
785 | 785 | ||
786 | if (rc == 0) { | 786 | if (rc == 0) { |
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c index 764c525b2117..bea99168ff35 100644 --- a/drivers/macintosh/windfarm_pm91.c +++ b/drivers/macintosh/windfarm_pm91.c | |||
@@ -711,7 +711,7 @@ static int __init wf_smu_init(void) | |||
711 | { | 711 | { |
712 | int rc = -ENODEV; | 712 | int rc = -ENODEV; |
713 | 713 | ||
714 | if (machine_is_compatible("PowerMac9,1")) | 714 | if (of_machine_is_compatible("PowerMac9,1")) |
715 | rc = wf_init_pm(); | 715 | rc = wf_init_pm(); |
716 | 716 | ||
717 | if (rc == 0) { | 717 | if (rc == 0) { |
diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c index 9c567b93f417..3c193504bb80 100644 --- a/drivers/macintosh/windfarm_smu_sensors.c +++ b/drivers/macintosh/windfarm_smu_sensors.c | |||
@@ -363,9 +363,9 @@ smu_cpu_power_create(struct wf_sensor *volts, struct wf_sensor *amps) | |||
363 | * I yet have to figure out what's up with 8,2 and will have to | 363 | * I yet have to figure out what's up with 8,2 and will have to |
364 | * adjust for later, unless we can 100% trust the SDB partition... | 364 | * adjust for later, unless we can 100% trust the SDB partition... |
365 | */ | 365 | */ |
366 | if ((machine_is_compatible("PowerMac8,1") || | 366 | if ((of_machine_is_compatible("PowerMac8,1") || |
367 | machine_is_compatible("PowerMac8,2") || | 367 | of_machine_is_compatible("PowerMac8,2") || |
368 | machine_is_compatible("PowerMac9,1")) && | 368 | of_machine_is_compatible("PowerMac9,1")) && |
369 | cpuvcp_version >= 2) { | 369 | cpuvcp_version >= 2) { |
370 | pow->quadratic = 1; | 370 | pow->quadratic = 1; |
371 | DBG("windfarm: CPU Power using quadratic transform\n"); | 371 | DBG("windfarm: CPU Power using quadratic transform\n"); |
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 54abf9e303b7..f1c8cae70b4b 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c | |||
@@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, | |||
172 | { | 172 | { |
173 | int r = 0; | 173 | int r = 0; |
174 | size_t dummy = 0; | 174 | size_t dummy = 0; |
175 | int overhead_size = | 175 | int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg); |
176 | sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg); | ||
177 | struct dm_ulog_request *tfr = prealloced_ulog_tfr; | 176 | struct dm_ulog_request *tfr = prealloced_ulog_tfr; |
178 | struct receiving_pkg pkg; | 177 | struct receiving_pkg pkg; |
179 | 178 | ||
179 | /* | ||
180 | * Given the space needed to hold the 'struct cn_msg' and | ||
181 | * 'struct dm_ulog_request' - do we have enough payload | ||
182 | * space remaining? | ||
183 | */ | ||
180 | if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { | 184 | if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { |
181 | DMINFO("Size of tfr exceeds preallocated size"); | 185 | DMINFO("Size of tfr exceeds preallocated size"); |
182 | return -EINVAL; | 186 | return -EINVAL; |
@@ -191,7 +195,7 @@ resend: | |||
191 | */ | 195 | */ |
192 | mutex_lock(&dm_ulog_lock); | 196 | mutex_lock(&dm_ulog_lock); |
193 | 197 | ||
194 | memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); | 198 | memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg)); |
195 | memcpy(tfr->uuid, uuid, DM_UUID_LEN); | 199 | memcpy(tfr->uuid, uuid, DM_UUID_LEN); |
196 | tfr->luid = luid; | 200 | tfr->luid = luid; |
197 | tfr->seq = dm_ulog_seq++; | 201 | tfr->seq = dm_ulog_seq++; |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index ad779bd13aec..6c1046df81f6 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |||
724 | /* | 724 | /* |
725 | * Dispatch io. | 725 | * Dispatch io. |
726 | */ | 726 | */ |
727 | if (unlikely(ms->log_failure)) { | 727 | if (unlikely(ms->log_failure) && errors_handled(ms)) { |
728 | spin_lock_irq(&ms->lock); | 728 | spin_lock_irq(&ms->lock); |
729 | bio_list_merge(&ms->failures, &sync); | 729 | bio_list_merge(&ms->failures, &sync); |
730 | spin_unlock_irq(&ms->lock); | 730 | spin_unlock_irq(&ms->lock); |
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 5f19ceb6fe91..168bd38f5006 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c | |||
@@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success) | |||
660 | spin_lock_irq(&rh->region_lock); | 660 | spin_lock_irq(&rh->region_lock); |
661 | if (success) | 661 | if (success) |
662 | list_add(®->list, ®->rh->recovered_regions); | 662 | list_add(®->list, ®->rh->recovered_regions); |
663 | else { | 663 | else |
664 | reg->state = DM_RH_NOSYNC; | ||
665 | list_add(®->list, ®->rh->failed_recovered_regions); | 664 | list_add(®->list, ®->rh->failed_recovered_regions); |
666 | } | 665 | |
667 | spin_unlock_irq(&rh->region_lock); | 666 | spin_unlock_irq(&rh->region_lock); |
668 | 667 | ||
669 | rh->wakeup_workers(rh->context); | 668 | rh->wakeup_workers(rh->context); |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 7d08879689ac..c097d8a4823d 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, | |||
254 | * Issue the synchronous I/O from a different thread | 254 | * Issue the synchronous I/O from a different thread |
255 | * to avoid generic_make_request recursion. | 255 | * to avoid generic_make_request recursion. |
256 | */ | 256 | */ |
257 | INIT_WORK(&req.work, do_metadata); | 257 | INIT_WORK_ON_STACK(&req.work, do_metadata); |
258 | queue_work(ps->metadata_wq, &req.work); | 258 | queue_work(ps->metadata_wq, &req.work); |
259 | flush_workqueue(ps->metadata_wq); | 259 | flush_workqueue(ps->metadata_wq); |
260 | 260 | ||
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index e0efc1adcaff..bd58703ee8f6 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
110 | } | 110 | } |
111 | 111 | ||
112 | stripes = simple_strtoul(argv[0], &end, 10); | 112 | stripes = simple_strtoul(argv[0], &end, 10); |
113 | if (*end) { | 113 | if (!stripes || *end) { |
114 | ti->error = "Invalid stripe count"; | 114 | ti->error = "Invalid stripe count"; |
115 | return -EINVAL; | 115 | return -EINVAL; |
116 | } | 116 | } |
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c index f53392df7b97..f91b40942e07 100644 --- a/drivers/md/dm-sysfs.c +++ b/drivers/md/dm-sysfs.c | |||
@@ -80,20 +80,12 @@ static struct sysfs_ops dm_sysfs_ops = { | |||
80 | }; | 80 | }; |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * The sysfs structure is embedded in md struct, nothing to do here | ||
84 | */ | ||
85 | static void dm_sysfs_release(struct kobject *kobj) | ||
86 | { | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * dm kobject is embedded in mapped_device structure | 83 | * dm kobject is embedded in mapped_device structure |
91 | * no need to define release function here | 84 | * no need to define release function here |
92 | */ | 85 | */ |
93 | static struct kobj_type dm_ktype = { | 86 | static struct kobj_type dm_ktype = { |
94 | .sysfs_ops = &dm_sysfs_ops, | 87 | .sysfs_ops = &dm_sysfs_ops, |
95 | .default_attrs = dm_attrs, | 88 | .default_attrs = dm_attrs, |
96 | .release = dm_sysfs_release | ||
97 | }; | 89 | }; |
98 | 90 | ||
99 | /* | 91 | /* |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3167480b532c..aa4e2aa86d49 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq) | |||
1595 | return BLKPREP_OK; | 1595 | return BLKPREP_OK; |
1596 | } | 1596 | } |
1597 | 1597 | ||
1598 | static void map_request(struct dm_target *ti, struct request *clone, | 1598 | /* |
1599 | struct mapped_device *md) | 1599 | * Returns: |
1600 | * 0 : the request has been processed (not requeued) | ||
1601 | * !0 : the request has been requeued | ||
1602 | */ | ||
1603 | static int map_request(struct dm_target *ti, struct request *clone, | ||
1604 | struct mapped_device *md) | ||
1600 | { | 1605 | { |
1601 | int r; | 1606 | int r, requeued = 0; |
1602 | struct dm_rq_target_io *tio = clone->end_io_data; | 1607 | struct dm_rq_target_io *tio = clone->end_io_data; |
1603 | 1608 | ||
1604 | /* | 1609 | /* |
@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone, | |||
1625 | case DM_MAPIO_REQUEUE: | 1630 | case DM_MAPIO_REQUEUE: |
1626 | /* The target wants to requeue the I/O */ | 1631 | /* The target wants to requeue the I/O */ |
1627 | dm_requeue_unmapped_request(clone); | 1632 | dm_requeue_unmapped_request(clone); |
1633 | requeued = 1; | ||
1628 | break; | 1634 | break; |
1629 | default: | 1635 | default: |
1630 | if (r > 0) { | 1636 | if (r > 0) { |
@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone, | |||
1636 | dm_kill_unmapped_request(clone, r); | 1642 | dm_kill_unmapped_request(clone, r); |
1637 | break; | 1643 | break; |
1638 | } | 1644 | } |
1645 | |||
1646 | return requeued; | ||
1639 | } | 1647 | } |
1640 | 1648 | ||
1641 | /* | 1649 | /* |
@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q) | |||
1677 | atomic_inc(&md->pending[rq_data_dir(clone)]); | 1685 | atomic_inc(&md->pending[rq_data_dir(clone)]); |
1678 | 1686 | ||
1679 | spin_unlock(q->queue_lock); | 1687 | spin_unlock(q->queue_lock); |
1680 | map_request(ti, clone, md); | 1688 | if (map_request(ti, clone, md)) |
1689 | goto requeued; | ||
1690 | |||
1681 | spin_lock_irq(q->queue_lock); | 1691 | spin_lock_irq(q->queue_lock); |
1682 | } | 1692 | } |
1683 | 1693 | ||
1684 | goto out; | 1694 | goto out; |
1685 | 1695 | ||
1696 | requeued: | ||
1697 | spin_lock_irq(q->queue_lock); | ||
1698 | |||
1686 | plug_and_out: | 1699 | plug_and_out: |
1687 | if (!elv_queue_empty(q)) | 1700 | if (!elv_queue_empty(q)) |
1688 | /* Some requests still remain, retry later */ | 1701 | /* Some requests still remain, retry later */ |
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig index 1b249897c9fb..465295b1d14b 100644 --- a/drivers/media/dvb/dvb-usb/Kconfig +++ b/drivers/media/dvb/dvb-usb/Kconfig | |||
@@ -112,11 +112,13 @@ config DVB_USB_CXUSB | |||
112 | select DVB_MT352 if !DVB_FE_CUSTOMISE | 112 | select DVB_MT352 if !DVB_FE_CUSTOMISE |
113 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE | 113 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE |
114 | select DVB_DIB7000P if !DVB_FE_CUSTOMISE | 114 | select DVB_DIB7000P if !DVB_FE_CUSTOMISE |
115 | select DVB_LGS8GL5 if !DVB_FE_CUSTOMISE | ||
116 | select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE | 115 | select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE |
116 | select DVB_ATBM8830 if !DVB_FE_CUSTOMISE | ||
117 | select DVB_LGS8GXX if !DVB_FE_CUSTOMISE | ||
117 | select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE | 118 | select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE |
118 | select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE | 119 | select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE |
119 | select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE | 120 | select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE |
121 | select MEDIA_TUNER_MAX2165 if !MEDIA_TUNER_CUSTOMISE | ||
120 | help | 122 | help |
121 | Say Y here to support the Conexant USB2.0 hybrid reference design. | 123 | Say Y here to support the Conexant USB2.0 hybrid reference design. |
122 | Currently, only DVB and ATSC modes are supported, analog mode | 124 | Currently, only DVB and ATSC modes are supported, analog mode |
diff --git a/drivers/media/dvb/frontends/l64781.c b/drivers/media/dvb/frontends/l64781.c index 3051b64aa17c..445fa1068064 100644 --- a/drivers/media/dvb/frontends/l64781.c +++ b/drivers/media/dvb/frontends/l64781.c | |||
@@ -192,8 +192,8 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa | |||
192 | spi_bias *= qam_tab[p->constellation]; | 192 | spi_bias *= qam_tab[p->constellation]; |
193 | spi_bias /= p->code_rate_HP + 1; | 193 | spi_bias /= p->code_rate_HP + 1; |
194 | spi_bias /= (guard_tab[p->guard_interval] + 32); | 194 | spi_bias /= (guard_tab[p->guard_interval] + 32); |
195 | spi_bias *= 1000ULL; | 195 | spi_bias *= 1000; |
196 | spi_bias /= 1000ULL + ppm/1000; | 196 | spi_bias /= 1000 + ppm/1000; |
197 | spi_bias *= p->code_rate_HP; | 197 | spi_bias *= p->code_rate_HP; |
198 | 198 | ||
199 | val0x04 = (p->transmission_mode << 2) | p->guard_interval; | 199 | val0x04 = (p->transmission_mode << 2) | p->guard_interval; |
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c index 3182a406bdd1..ae08b077fd04 100644 --- a/drivers/media/video/bt8xx/bttv-driver.c +++ b/drivers/media/video/bt8xx/bttv-driver.c | |||
@@ -4461,6 +4461,7 @@ static int __devinit bttv_probe(struct pci_dev *dev, | |||
4461 | request_modules(btv); | 4461 | request_modules(btv); |
4462 | } | 4462 | } |
4463 | 4463 | ||
4464 | init_bttv_i2c_ir(btv); | ||
4464 | bttv_input_init(btv); | 4465 | bttv_input_init(btv); |
4465 | 4466 | ||
4466 | /* everything is fine */ | 4467 | /* everything is fine */ |
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c index 63aa31a041e8..407fa61e4cda 100644 --- a/drivers/media/video/bt8xx/bttv-i2c.c +++ b/drivers/media/video/bt8xx/bttv-i2c.c | |||
@@ -388,7 +388,12 @@ int __devinit init_bttv_i2c(struct bttv *btv) | |||
388 | if (0 == btv->i2c_rc && i2c_scan) | 388 | if (0 == btv->i2c_rc && i2c_scan) |
389 | do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client); | 389 | do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client); |
390 | 390 | ||
391 | /* Instantiate the IR receiver device, if present */ | 391 | return btv->i2c_rc; |
392 | } | ||
393 | |||
394 | /* Instantiate the I2C IR receiver device, if present */ | ||
395 | void __devinit init_bttv_i2c_ir(struct bttv *btv) | ||
396 | { | ||
392 | if (0 == btv->i2c_rc) { | 397 | if (0 == btv->i2c_rc) { |
393 | struct i2c_board_info info; | 398 | struct i2c_board_info info; |
394 | /* The external IR receiver is at i2c address 0x34 (0x35 for | 399 | /* The external IR receiver is at i2c address 0x34 (0x35 for |
@@ -408,7 +413,6 @@ int __devinit init_bttv_i2c(struct bttv *btv) | |||
408 | strlcpy(info.type, "ir_video", I2C_NAME_SIZE); | 413 | strlcpy(info.type, "ir_video", I2C_NAME_SIZE); |
409 | i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list); | 414 | i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list); |
410 | } | 415 | } |
411 | return btv->i2c_rc; | ||
412 | } | 416 | } |
413 | 417 | ||
414 | int __devexit fini_bttv_i2c(struct bttv *btv) | 418 | int __devexit fini_bttv_i2c(struct bttv *btv) |
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h index a1d0e9c9f286..6cccc2a17eee 100644 --- a/drivers/media/video/bt8xx/bttvp.h +++ b/drivers/media/video/bt8xx/bttvp.h | |||
@@ -279,6 +279,7 @@ extern unsigned int bttv_debug; | |||
279 | extern unsigned int bttv_gpio; | 279 | extern unsigned int bttv_gpio; |
280 | extern void bttv_gpio_tracking(struct bttv *btv, char *comment); | 280 | extern void bttv_gpio_tracking(struct bttv *btv, char *comment); |
281 | extern int init_bttv_i2c(struct bttv *btv); | 281 | extern int init_bttv_i2c(struct bttv *btv); |
282 | extern void init_bttv_i2c_ir(struct bttv *btv); | ||
282 | extern int fini_bttv_i2c(struct bttv *btv); | 283 | extern int fini_bttv_i2c(struct bttv *btv); |
283 | 284 | ||
284 | #define bttv_printk if (bttv_verbose) printk | 285 | #define bttv_printk if (bttv_verbose) printk |
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c index fc4dd6045720..7438f8d775ba 100644 --- a/drivers/media/video/mt9t112.c +++ b/drivers/media/video/mt9t112.c | |||
@@ -514,7 +514,7 @@ static int mt9t112_init_pll(const struct i2c_client *client) | |||
514 | /* poll to verify out of standby. Must Poll this bit */ | 514 | /* poll to verify out of standby. Must Poll this bit */ |
515 | for (i = 0; i < 100; i++) { | 515 | for (i = 0; i < 100; i++) { |
516 | mt9t112_reg_read(data, client, 0x0018); | 516 | mt9t112_reg_read(data, client, 0x0018); |
517 | if (0x4000 & data) | 517 | if (!(0x4000 & data)) |
518 | break; | 518 | break; |
519 | 519 | ||
520 | mdelay(10); | 520 | mdelay(10); |
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c index 50b415e07eda..f7f7e04cf485 100644 --- a/drivers/media/video/pwc/pwc-ctrl.c +++ b/drivers/media/video/pwc/pwc-ctrl.c | |||
@@ -753,7 +753,7 @@ int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value) | |||
753 | buf[0] = 0xff; /* fixed */ | 753 | buf[0] = 0xff; /* fixed */ |
754 | 754 | ||
755 | ret = send_control_msg(pdev, | 755 | ret = send_control_msg(pdev, |
756 | SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, sizeof(buf)); | 756 | SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1); |
757 | 757 | ||
758 | if (!mode && ret >= 0) { | 758 | if (!mode && ret >= 0) { |
759 | if (value < 0) | 759 | if (value < 0) |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index fee6eee7ae5b..006cb2efcd22 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -296,6 +296,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, | |||
296 | req_hdr->opcode = opcode; | 296 | req_hdr->opcode = opcode; |
297 | req_hdr->subsystem = subsystem; | 297 | req_hdr->subsystem = subsystem; |
298 | req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); | 298 | req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); |
299 | req_hdr->version = 0; | ||
299 | } | 300 | } |
300 | 301 | ||
301 | static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, | 302 | static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index d29bb532eccf..765543663a4f 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -4006,11 +4006,21 @@ check_page: | |||
4006 | } | 4006 | } |
4007 | } | 4007 | } |
4008 | 4008 | ||
4009 | if (!buffer_info->dma) | 4009 | if (!buffer_info->dma) { |
4010 | buffer_info->dma = pci_map_page(pdev, | 4010 | buffer_info->dma = pci_map_page(pdev, |
4011 | buffer_info->page, 0, | 4011 | buffer_info->page, 0, |
4012 | buffer_info->length, | 4012 | buffer_info->length, |
4013 | PCI_DMA_FROMDEVICE); | 4013 | PCI_DMA_FROMDEVICE); |
4014 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { | ||
4015 | put_page(buffer_info->page); | ||
4016 | dev_kfree_skb(skb); | ||
4017 | buffer_info->page = NULL; | ||
4018 | buffer_info->skb = NULL; | ||
4019 | buffer_info->dma = 0; | ||
4020 | adapter->alloc_rx_buff_failed++; | ||
4021 | break; /* while !buffer_info->skb */ | ||
4022 | } | ||
4023 | } | ||
4014 | 4024 | ||
4015 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 4025 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
4016 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | 4026 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
@@ -4101,6 +4111,13 @@ map_skb: | |||
4101 | skb->data, | 4111 | skb->data, |
4102 | buffer_info->length, | 4112 | buffer_info->length, |
4103 | PCI_DMA_FROMDEVICE); | 4113 | PCI_DMA_FROMDEVICE); |
4114 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { | ||
4115 | dev_kfree_skb(skb); | ||
4116 | buffer_info->skb = NULL; | ||
4117 | buffer_info->dma = 0; | ||
4118 | adapter->alloc_rx_buff_failed++; | ||
4119 | break; /* while !buffer_info->skb */ | ||
4120 | } | ||
4104 | 4121 | ||
4105 | /* | 4122 | /* |
4106 | * XXX if it was allocated cleanly it will never map to a | 4123 | * XXX if it was allocated cleanly it will never map to a |
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 3103f4165311..35a06b47587b 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -357,12 +357,34 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) | |||
357 | u32 fctrl_reg; | 357 | u32 fctrl_reg; |
358 | u32 rmcs_reg; | 358 | u32 rmcs_reg; |
359 | u32 reg; | 359 | u32 reg; |
360 | u32 link_speed = 0; | ||
361 | bool link_up; | ||
360 | 362 | ||
361 | #ifdef CONFIG_DCB | 363 | #ifdef CONFIG_DCB |
362 | if (hw->fc.requested_mode == ixgbe_fc_pfc) | 364 | if (hw->fc.requested_mode == ixgbe_fc_pfc) |
363 | goto out; | 365 | goto out; |
364 | 366 | ||
365 | #endif /* CONFIG_DCB */ | 367 | #endif /* CONFIG_DCB */ |
368 | /* | ||
369 | * On 82598 having Rx FC on causes resets while doing 1G | ||
370 | * so if it's on turn it off once we know link_speed. For | ||
371 | * more details see 82598 Specification update. | ||
372 | */ | ||
373 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); | ||
374 | if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { | ||
375 | switch (hw->fc.requested_mode) { | ||
376 | case ixgbe_fc_full: | ||
377 | hw->fc.requested_mode = ixgbe_fc_tx_pause; | ||
378 | break; | ||
379 | case ixgbe_fc_rx_pause: | ||
380 | hw->fc.requested_mode = ixgbe_fc_none; | ||
381 | break; | ||
382 | default: | ||
383 | /* no change */ | ||
384 | break; | ||
385 | } | ||
386 | } | ||
387 | |||
366 | /* Negotiate the fc mode to use */ | 388 | /* Negotiate the fc mode to use */ |
367 | ret_val = ixgbe_fc_autoneg(hw); | 389 | ret_val = ixgbe_fc_autoneg(hw); |
368 | if (ret_val) | 390 | if (ret_val) |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 7b7c8486c0bf..951b73cf5ca2 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -5763,6 +5763,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5763 | if (err) | 5763 | if (err) |
5764 | goto err_sw_init; | 5764 | goto err_sw_init; |
5765 | 5765 | ||
5766 | /* Make it possible the adapter to be woken up via WOL */ | ||
5767 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | ||
5768 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); | ||
5769 | |||
5766 | /* | 5770 | /* |
5767 | * If there is a fan on this device and it has failed log the | 5771 | * If there is a fan on this device and it has failed log the |
5768 | * failure. | 5772 | * failure. |
diff --git a/drivers/net/mace.c b/drivers/net/mace.c index d9fbad386389..43aea91e3369 100644 --- a/drivers/net/mace.c +++ b/drivers/net/mace.c | |||
@@ -206,7 +206,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i | |||
206 | mp->port_aaui = port_aaui; | 206 | mp->port_aaui = port_aaui; |
207 | else { | 207 | else { |
208 | /* Apple Network Server uses the AAUI port */ | 208 | /* Apple Network Server uses the AAUI port */ |
209 | if (machine_is_compatible("AAPL,ShinerESB")) | 209 | if (of_machine_is_compatible("AAPL,ShinerESB")) |
210 | mp->port_aaui = 1; | 210 | mp->port_aaui = 1; |
211 | else { | 211 | else { |
212 | #ifdef CONFIG_MACE_AAUI_PORT | 212 | #ifdef CONFIG_MACE_AAUI_PORT |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 103e8b0e2a0d..46997e177ee3 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -2284,6 +2284,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2284 | fail2: | 2284 | fail2: |
2285 | efx_fini_struct(efx); | 2285 | efx_fini_struct(efx); |
2286 | fail1: | 2286 | fail1: |
2287 | WARN_ON(rc > 0); | ||
2287 | EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); | 2288 | EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); |
2288 | free_netdev(net_dev); | 2289 | free_netdev(net_dev); |
2289 | return rc; | 2290 | return rc; |
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c index bf0b96af5334..5712fddd72f2 100644 --- a/drivers/net/sfc/falcon_boards.c +++ b/drivers/net/sfc/falcon_boards.c | |||
@@ -29,6 +29,15 @@ | |||
29 | #define FALCON_BOARD_SFN4111T 0x51 | 29 | #define FALCON_BOARD_SFN4111T 0x51 |
30 | #define FALCON_BOARD_SFN4112F 0x52 | 30 | #define FALCON_BOARD_SFN4112F 0x52 |
31 | 31 | ||
32 | /* Board temperature is about 15°C above ambient when air flow is | ||
33 | * limited. */ | ||
34 | #define FALCON_BOARD_TEMP_BIAS 15 | ||
35 | |||
36 | /* SFC4000 datasheet says: 'The maximum permitted junction temperature | ||
37 | * is 125°C; the thermal design of the environment for the SFC4000 | ||
38 | * should aim to keep this well below 100°C.' */ | ||
39 | #define FALCON_JUNC_TEMP_MAX 90 | ||
40 | |||
32 | /***************************************************************************** | 41 | /***************************************************************************** |
33 | * Support for LM87 sensor chip used on several boards | 42 | * Support for LM87 sensor chip used on several boards |
34 | */ | 43 | */ |
@@ -548,16 +557,16 @@ fail_hwmon: | |||
548 | static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ | 557 | static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ |
549 | 558 | ||
550 | static const u8 sfe4002_lm87_regs[] = { | 559 | static const u8 sfe4002_lm87_regs[] = { |
551 | LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */ | 560 | LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ |
552 | LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */ | 561 | LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ |
553 | LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */ | 562 | LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ |
554 | LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */ | 563 | LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */ |
555 | LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */ | 564 | LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ |
556 | LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */ | 565 | LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ |
557 | LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */ | 566 | LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */ |
558 | LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */ | 567 | LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ |
559 | LM87_TEMP_INT_LIMITS(10, 60), /* board */ | 568 | LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS), |
560 | LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */ | 569 | LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), |
561 | 0 | 570 | 0 |
562 | }; | 571 | }; |
563 | 572 | ||
@@ -619,14 +628,14 @@ static int sfe4002_init(struct efx_nic *efx) | |||
619 | static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ | 628 | static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ |
620 | 629 | ||
621 | static const u8 sfn4112f_lm87_regs[] = { | 630 | static const u8 sfn4112f_lm87_regs[] = { |
622 | LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */ | 631 | LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ |
623 | LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */ | 632 | LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ |
624 | LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */ | 633 | LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ |
625 | LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */ | 634 | LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ |
626 | LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */ | 635 | LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ |
627 | LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */ | 636 | LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ |
628 | LM87_TEMP_INT_LIMITS(10, 60), /* board */ | 637 | LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS), |
629 | LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */ | 638 | LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), |
630 | 0 | 639 | 0 |
631 | }; | 640 | }; |
632 | 641 | ||
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index 9f035b9f0350..f66b3da6ddff 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c | |||
@@ -127,7 +127,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
127 | efx_dword_t reg; | 127 | efx_dword_t reg; |
128 | 128 | ||
129 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ | 129 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ |
130 | rc = efx_mcdi_poll_reboot(efx); | 130 | rc = -efx_mcdi_poll_reboot(efx); |
131 | if (rc) | 131 | if (rc) |
132 | goto out; | 132 | goto out; |
133 | 133 | ||
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c index e0d13a451019..67eec7a6e487 100644 --- a/drivers/net/sfc/qt202x_phy.c +++ b/drivers/net/sfc/qt202x_phy.c | |||
@@ -320,7 +320,7 @@ static int qt202x_reset_phy(struct efx_nic *efx) | |||
320 | 320 | ||
321 | falcon_board(efx)->type->init_phy(efx); | 321 | falcon_board(efx)->type->init_phy(efx); |
322 | 322 | ||
323 | return rc; | 323 | return 0; |
324 | 324 | ||
325 | fail: | 325 | fail: |
326 | EFX_ERR(efx, "PHY reset timed out\n"); | 326 | EFX_ERR(efx, "PHY reset timed out\n"); |
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index 75a669d48e5e..d71c1976072e 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -1437,7 +1437,6 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) | |||
1437 | /* Transmit complete. */ | 1437 | /* Transmit complete. */ |
1438 | lp->lstats.tx_ints++; | 1438 | lp->lstats.tx_ints++; |
1439 | tc35815_txdone(dev); | 1439 | tc35815_txdone(dev); |
1440 | netif_wake_queue(dev); | ||
1441 | if (ret < 0) | 1440 | if (ret < 0) |
1442 | ret = 0; | 1441 | ret = 0; |
1443 | } | 1442 | } |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 4f27f022fbf7..5f3b9eaeb04f 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -584,6 +584,11 @@ static const struct usb_device_id products [] = { | |||
584 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), | 584 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), |
585 | .driver_info = (unsigned long) &mbm_info, | 585 | .driver_info = (unsigned long) &mbm_info, |
586 | }, { | 586 | }, { |
587 | /* Ericsson C3607w ver 2 */ | ||
588 | USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM, | ||
589 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), | ||
590 | .driver_info = (unsigned long) &mbm_info, | ||
591 | }, { | ||
587 | /* Toshiba F3507g */ | 592 | /* Toshiba F3507g */ |
588 | USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, | 593 | USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, |
589 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), | 594 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index c93f58f5c6f2..317aa34b21cf 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -1877,13 +1877,12 @@ static void velocity_error(struct velocity_info *vptr, int status) | |||
1877 | /** | 1877 | /** |
1878 | * tx_srv - transmit interrupt service | 1878 | * tx_srv - transmit interrupt service |
1879 | * @vptr; Velocity | 1879 | * @vptr; Velocity |
1880 | * @status: | ||
1881 | * | 1880 | * |
1882 | * Scan the queues looking for transmitted packets that | 1881 | * Scan the queues looking for transmitted packets that |
1883 | * we can complete and clean up. Update any statistics as | 1882 | * we can complete and clean up. Update any statistics as |
1884 | * necessary/ | 1883 | * necessary/ |
1885 | */ | 1884 | */ |
1886 | static int velocity_tx_srv(struct velocity_info *vptr, u32 status) | 1885 | static int velocity_tx_srv(struct velocity_info *vptr) |
1887 | { | 1886 | { |
1888 | struct tx_desc *td; | 1887 | struct tx_desc *td; |
1889 | int qnum; | 1888 | int qnum; |
@@ -2090,14 +2089,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
2090 | /** | 2089 | /** |
2091 | * velocity_rx_srv - service RX interrupt | 2090 | * velocity_rx_srv - service RX interrupt |
2092 | * @vptr: velocity | 2091 | * @vptr: velocity |
2093 | * @status: adapter status (unused) | ||
2094 | * | 2092 | * |
2095 | * Walk the receive ring of the velocity adapter and remove | 2093 | * Walk the receive ring of the velocity adapter and remove |
2096 | * any received packets from the receive queue. Hand the ring | 2094 | * any received packets from the receive queue. Hand the ring |
2097 | * slots back to the adapter for reuse. | 2095 | * slots back to the adapter for reuse. |
2098 | */ | 2096 | */ |
2099 | static int velocity_rx_srv(struct velocity_info *vptr, int status, | 2097 | static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) |
2100 | int budget_left) | ||
2101 | { | 2098 | { |
2102 | struct net_device_stats *stats = &vptr->dev->stats; | 2099 | struct net_device_stats *stats = &vptr->dev->stats; |
2103 | int rd_curr = vptr->rx.curr; | 2100 | int rd_curr = vptr->rx.curr; |
@@ -2151,32 +2148,24 @@ static int velocity_poll(struct napi_struct *napi, int budget) | |||
2151 | struct velocity_info *vptr = container_of(napi, | 2148 | struct velocity_info *vptr = container_of(napi, |
2152 | struct velocity_info, napi); | 2149 | struct velocity_info, napi); |
2153 | unsigned int rx_done; | 2150 | unsigned int rx_done; |
2154 | u32 isr_status; | 2151 | unsigned long flags; |
2155 | |||
2156 | spin_lock(&vptr->lock); | ||
2157 | isr_status = mac_read_isr(vptr->mac_regs); | ||
2158 | |||
2159 | /* Ack the interrupt */ | ||
2160 | mac_write_isr(vptr->mac_regs, isr_status); | ||
2161 | if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) | ||
2162 | velocity_error(vptr, isr_status); | ||
2163 | 2152 | ||
2153 | spin_lock_irqsave(&vptr->lock, flags); | ||
2164 | /* | 2154 | /* |
2165 | * Do rx and tx twice for performance (taken from the VIA | 2155 | * Do rx and tx twice for performance (taken from the VIA |
2166 | * out-of-tree driver). | 2156 | * out-of-tree driver). |
2167 | */ | 2157 | */ |
2168 | rx_done = velocity_rx_srv(vptr, isr_status, budget / 2); | 2158 | rx_done = velocity_rx_srv(vptr, budget / 2); |
2169 | velocity_tx_srv(vptr, isr_status); | 2159 | velocity_tx_srv(vptr); |
2170 | rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done); | 2160 | rx_done += velocity_rx_srv(vptr, budget - rx_done); |
2171 | velocity_tx_srv(vptr, isr_status); | 2161 | velocity_tx_srv(vptr); |
2172 | |||
2173 | spin_unlock(&vptr->lock); | ||
2174 | 2162 | ||
2175 | /* If budget not fully consumed, exit the polling mode */ | 2163 | /* If budget not fully consumed, exit the polling mode */ |
2176 | if (rx_done < budget) { | 2164 | if (rx_done < budget) { |
2177 | napi_complete(napi); | 2165 | napi_complete(napi); |
2178 | mac_enable_int(vptr->mac_regs); | 2166 | mac_enable_int(vptr->mac_regs); |
2179 | } | 2167 | } |
2168 | spin_unlock_irqrestore(&vptr->lock, flags); | ||
2180 | 2169 | ||
2181 | return rx_done; | 2170 | return rx_done; |
2182 | } | 2171 | } |
@@ -2206,10 +2195,17 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance) | |||
2206 | return IRQ_NONE; | 2195 | return IRQ_NONE; |
2207 | } | 2196 | } |
2208 | 2197 | ||
2198 | /* Ack the interrupt */ | ||
2199 | mac_write_isr(vptr->mac_regs, isr_status); | ||
2200 | |||
2209 | if (likely(napi_schedule_prep(&vptr->napi))) { | 2201 | if (likely(napi_schedule_prep(&vptr->napi))) { |
2210 | mac_disable_int(vptr->mac_regs); | 2202 | mac_disable_int(vptr->mac_regs); |
2211 | __napi_schedule(&vptr->napi); | 2203 | __napi_schedule(&vptr->napi); |
2212 | } | 2204 | } |
2205 | |||
2206 | if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) | ||
2207 | velocity_error(vptr, isr_status); | ||
2208 | |||
2213 | spin_unlock(&vptr->lock); | 2209 | spin_unlock(&vptr->lock); |
2214 | 2210 | ||
2215 | return IRQ_HANDLED; | 2211 | return IRQ_HANDLED; |
@@ -3100,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev) | |||
3100 | velocity_init_registers(vptr, VELOCITY_INIT_WOL); | 3096 | velocity_init_registers(vptr, VELOCITY_INIT_WOL); |
3101 | mac_disable_int(vptr->mac_regs); | 3097 | mac_disable_int(vptr->mac_regs); |
3102 | 3098 | ||
3103 | velocity_tx_srv(vptr, 0); | 3099 | velocity_tx_srv(vptr); |
3104 | 3100 | ||
3105 | for (i = 0; i < vptr->tx.numq; i++) { | 3101 | for (i = 0; i < vptr->tx.numq; i++) { |
3106 | if (vptr->tx.used[i]) | 3102 | if (vptr->tx.used[i]) |
@@ -3344,6 +3340,7 @@ static int velocity_set_coalesce(struct net_device *dev, | |||
3344 | { | 3340 | { |
3345 | struct velocity_info *vptr = netdev_priv(dev); | 3341 | struct velocity_info *vptr = netdev_priv(dev); |
3346 | int max_us = 0x3f * 64; | 3342 | int max_us = 0x3f * 64; |
3343 | unsigned long flags; | ||
3347 | 3344 | ||
3348 | /* 6 bits of */ | 3345 | /* 6 bits of */ |
3349 | if (ecmd->tx_coalesce_usecs > max_us) | 3346 | if (ecmd->tx_coalesce_usecs > max_us) |
@@ -3365,6 +3362,7 @@ static int velocity_set_coalesce(struct net_device *dev, | |||
3365 | ecmd->tx_coalesce_usecs); | 3362 | ecmd->tx_coalesce_usecs); |
3366 | 3363 | ||
3367 | /* Setup the interrupt suppression and queue timers */ | 3364 | /* Setup the interrupt suppression and queue timers */ |
3365 | spin_lock_irqsave(&vptr->lock, flags); | ||
3368 | mac_disable_int(vptr->mac_regs); | 3366 | mac_disable_int(vptr->mac_regs); |
3369 | setup_adaptive_interrupts(vptr); | 3367 | setup_adaptive_interrupts(vptr); |
3370 | setup_queue_timers(vptr); | 3368 | setup_queue_timers(vptr); |
@@ -3372,6 +3370,7 @@ static int velocity_set_coalesce(struct net_device *dev, | |||
3372 | mac_write_int_mask(vptr->int_mask, vptr->mac_regs); | 3370 | mac_write_int_mask(vptr->int_mask, vptr->mac_regs); |
3373 | mac_clear_isr(vptr->mac_regs); | 3371 | mac_clear_isr(vptr->mac_regs); |
3374 | mac_enable_int(vptr->mac_regs); | 3372 | mac_enable_int(vptr->mac_regs); |
3373 | spin_unlock_irqrestore(&vptr->lock, flags); | ||
3375 | 3374 | ||
3376 | return 0; | 3375 | return 0; |
3377 | } | 3376 | } |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index fa12b9060b0b..29bf33692f71 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -1615,7 +1615,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, | |||
1615 | bf->bf_frmlen -= padsize; | 1615 | bf->bf_frmlen -= padsize; |
1616 | } | 1616 | } |
1617 | 1617 | ||
1618 | if (conf_is_ht(&hw->conf) && !is_pae(skb)) | 1618 | if (conf_is_ht(&hw->conf)) |
1619 | bf->bf_state.bf_type |= BUF_HT; | 1619 | bf->bf_state.bf_type |= BUF_HT; |
1620 | 1620 | ||
1621 | bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); | 1621 | bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); |
@@ -1701,7 +1701,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | |||
1701 | goto tx_done; | 1701 | goto tx_done; |
1702 | } | 1702 | } |
1703 | 1703 | ||
1704 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { | 1704 | if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) { |
1705 | /* | 1705 | /* |
1706 | * Try aggregation if it's a unicast data frame | 1706 | * Try aggregation if it's a unicast data frame |
1707 | * and the destination is HT capable. | 1707 | * and the destination is HT capable. |
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index fe3bf9491997..c484cc253892 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h | |||
@@ -115,6 +115,7 @@ | |||
115 | #define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ | 115 | #define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ |
116 | #define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ | 116 | #define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ |
117 | #define B43_MMIO_RNG 0x65A | 117 | #define B43_MMIO_RNG 0x65A |
118 | #define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */ | ||
118 | #define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ | 119 | #define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ |
119 | #define B43_MMIO_IFSCTL_USE_EDCF 0x0004 | 120 | #define B43_MMIO_IFSCTL_USE_EDCF 0x0004 |
120 | #define B43_MMIO_POWERUP_DELAY 0x6A8 | 121 | #define B43_MMIO_POWERUP_DELAY 0x6A8 |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 4c41cfe44f26..490fb45d1d05 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -628,10 +628,17 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev) | |||
628 | static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) | 628 | static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) |
629 | { | 629 | { |
630 | /* slot_time is in usec. */ | 630 | /* slot_time is in usec. */ |
631 | if (dev->phy.type != B43_PHYTYPE_G) | 631 | /* This test used to exit for all but a G PHY. */ |
632 | if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) | ||
632 | return; | 633 | return; |
633 | b43_write16(dev, 0x684, 510 + slot_time); | 634 | b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time); |
634 | b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); | 635 | /* Shared memory location 0x0010 is the slot time and should be |
636 | * set to slot_time; however, this register is initially 0 and changing | ||
637 | * the value adversely affects the transmit rate for BCM4311 | ||
638 | * devices. Until this behavior is unterstood, delete this step | ||
639 | * | ||
640 | * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); | ||
641 | */ | ||
635 | } | 642 | } |
636 | 643 | ||
637 | static void b43_short_slot_timing_enable(struct b43_wldev *dev) | 644 | static void b43_short_slot_timing_enable(struct b43_wldev *dev) |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 9b4b8b5c7574..31462813bac0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -2008,7 +2008,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2008 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " | 2008 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " |
2009 | "%d index %d\n", scd_ssn , index); | 2009 | "%d index %d\n", scd_ssn , index); |
2010 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2010 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
2011 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | 2011 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
2012 | 2012 | ||
2013 | if (priv->mac80211_registered && | 2013 | if (priv->mac80211_registered && |
2014 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && | 2014 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index de45f308b744..cffaae772d51 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -1125,7 +1125,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv, | |||
1125 | scd_ssn , index, txq_id, txq->swq_id); | 1125 | scd_ssn , index, txq_id, txq->swq_id); |
1126 | 1126 | ||
1127 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 1127 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
1128 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | 1128 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
1129 | 1129 | ||
1130 | if (priv->mac80211_registered && | 1130 | if (priv->mac80211_registered && |
1131 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && | 1131 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && |
@@ -1153,16 +1153,14 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv, | |||
1153 | tx_resp->failure_frame); | 1153 | tx_resp->failure_frame); |
1154 | 1154 | ||
1155 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 1155 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
1156 | if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) | 1156 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
1157 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | ||
1158 | 1157 | ||
1159 | if (priv->mac80211_registered && | 1158 | if (priv->mac80211_registered && |
1160 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) | 1159 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) |
1161 | iwl_wake_queue(priv, txq_id); | 1160 | iwl_wake_queue(priv, txq_id); |
1162 | } | 1161 | } |
1163 | 1162 | ||
1164 | if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) | 1163 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); |
1165 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | ||
1166 | 1164 | ||
1167 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) | 1165 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) |
1168 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); | 1166 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 5461f105bd2d..f36f804804fc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
@@ -2745,6 +2745,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) | |||
2745 | priv->staging_rxon.flags = 0; | 2745 | priv->staging_rxon.flags = 0; |
2746 | 2746 | ||
2747 | iwl_set_rxon_channel(priv, conf->channel); | 2747 | iwl_set_rxon_channel(priv, conf->channel); |
2748 | iwl_set_rxon_ht(priv, ht_conf); | ||
2748 | 2749 | ||
2749 | iwl_set_flags_for_band(priv, conf->channel->band); | 2750 | iwl_set_flags_for_band(priv, conf->channel->band); |
2750 | spin_unlock_irqrestore(&priv->lock, flags); | 2751 | spin_unlock_irqrestore(&priv->lock, flags); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 27ca859e7453..b69e972671b2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h | |||
@@ -446,6 +446,8 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv); | |||
446 | int iwl_hw_tx_queue_init(struct iwl_priv *priv, | 446 | int iwl_hw_tx_queue_init(struct iwl_priv *priv, |
447 | struct iwl_tx_queue *txq); | 447 | struct iwl_tx_queue *txq); |
448 | int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); | 448 | int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); |
449 | void iwl_free_tfds_in_queue(struct iwl_priv *priv, | ||
450 | int sta_id, int tid, int freed); | ||
449 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | 451 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
450 | int slots_num, u32 txq_id); | 452 | int slots_num, u32 txq_id); |
451 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); | 453 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 6f36b6e79f5e..2dbce85404aa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c | |||
@@ -928,7 +928,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, | |||
928 | if (ieee80211_is_mgmt(fc) || | 928 | if (ieee80211_is_mgmt(fc) || |
929 | ieee80211_has_protected(fc) || | 929 | ieee80211_has_protected(fc) || |
930 | ieee80211_has_morefrags(fc) || | 930 | ieee80211_has_morefrags(fc) || |
931 | le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) | 931 | le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG || |
932 | (ieee80211_is_data_qos(fc) && | ||
933 | *ieee80211_get_qos_ctl(hdr) & | ||
934 | IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)) | ||
932 | ret = skb_linearize(skb); | 935 | ret = skb_linearize(skb); |
933 | else | 936 | else |
934 | ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? | 937 | ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 87ce2bd292c7..8f4071562857 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -120,6 +120,20 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |||
120 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); | 120 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); |
121 | 121 | ||
122 | 122 | ||
123 | void iwl_free_tfds_in_queue(struct iwl_priv *priv, | ||
124 | int sta_id, int tid, int freed) | ||
125 | { | ||
126 | if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) | ||
127 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | ||
128 | else { | ||
129 | IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n", | ||
130 | priv->stations[sta_id].tid[tid].tfds_in_queue, | ||
131 | freed); | ||
132 | priv->stations[sta_id].tid[tid].tfds_in_queue = 0; | ||
133 | } | ||
134 | } | ||
135 | EXPORT_SYMBOL(iwl_free_tfds_in_queue); | ||
136 | |||
123 | /** | 137 | /** |
124 | * iwl_tx_queue_free - Deallocate DMA queue. | 138 | * iwl_tx_queue_free - Deallocate DMA queue. |
125 | * @txq: Transmit queue to deallocate. | 139 | * @txq: Transmit queue to deallocate. |
@@ -1131,6 +1145,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | |||
1131 | struct iwl_queue *q = &txq->q; | 1145 | struct iwl_queue *q = &txq->q; |
1132 | struct iwl_tx_info *tx_info; | 1146 | struct iwl_tx_info *tx_info; |
1133 | int nfreed = 0; | 1147 | int nfreed = 0; |
1148 | struct ieee80211_hdr *hdr; | ||
1134 | 1149 | ||
1135 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | 1150 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { |
1136 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " | 1151 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " |
@@ -1145,13 +1160,16 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | |||
1145 | 1160 | ||
1146 | tx_info = &txq->txb[txq->q.read_ptr]; | 1161 | tx_info = &txq->txb[txq->q.read_ptr]; |
1147 | iwl_tx_status(priv, tx_info->skb[0]); | 1162 | iwl_tx_status(priv, tx_info->skb[0]); |
1163 | |||
1164 | hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; | ||
1165 | if (hdr && ieee80211_is_data_qos(hdr->frame_control)) | ||
1166 | nfreed++; | ||
1148 | tx_info->skb[0] = NULL; | 1167 | tx_info->skb[0] = NULL; |
1149 | 1168 | ||
1150 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | 1169 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) |
1151 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | 1170 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); |
1152 | 1171 | ||
1153 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | 1172 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); |
1154 | nfreed++; | ||
1155 | } | 1173 | } |
1156 | return nfreed; | 1174 | return nfreed; |
1157 | } | 1175 | } |
@@ -1559,7 +1577,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | |||
1559 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | 1577 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { |
1560 | /* calculate mac80211 ampdu sw queue to wake */ | 1578 | /* calculate mac80211 ampdu sw queue to wake */ |
1561 | int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); | 1579 | int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); |
1562 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | 1580 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
1563 | 1581 | ||
1564 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && | 1582 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && |
1565 | priv->mac80211_registered && | 1583 | priv->mac80211_registered && |
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c index 6d6ed7485175..f727b4a83196 100644 --- a/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/drivers/net/wireless/iwmc3200wifi/rx.c | |||
@@ -794,7 +794,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf, | |||
794 | } | 794 | } |
795 | 795 | ||
796 | bss->bss = kzalloc(bss_len, GFP_KERNEL); | 796 | bss->bss = kzalloc(bss_len, GFP_KERNEL); |
797 | if (!bss) { | 797 | if (!bss->bss) { |
798 | kfree(bss); | 798 | kfree(bss); |
799 | IWM_ERR(iwm, "Couldn't allocate bss\n"); | 799 | IWM_ERR(iwm, "Couldn't allocate bss\n"); |
800 | return -ENOMEM; | 800 | return -ENOMEM; |
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index bc5726dd5fe4..7ba3052b0708 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c | |||
@@ -65,6 +65,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = { | |||
65 | /* Sitecom */ | 65 | /* Sitecom */ |
66 | {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, | 66 | {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, |
67 | {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, | 67 | {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, |
68 | {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B}, | ||
68 | /* Sphairon Access Systems GmbH */ | 69 | /* Sphairon Access Systems GmbH */ |
69 | {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, | 70 | {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, |
70 | /* Dick Smith Electronics */ | 71 | /* Dick Smith Electronics */ |
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index d2fa27c5c1b2..7cecc8fea9bd 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig | |||
@@ -1,3 +1,11 @@ | |||
1 | config OF_FLATTREE | ||
2 | bool | ||
3 | depends on OF | ||
4 | |||
5 | config OF_DYNAMIC | ||
6 | def_bool y | ||
7 | depends on OF && PPC_OF | ||
8 | |||
1 | config OF_DEVICE | 9 | config OF_DEVICE |
2 | def_bool y | 10 | def_bool y |
3 | depends on OF && (SPARC || PPC_OF || MICROBLAZE) | 11 | depends on OF && (SPARC || PPC_OF || MICROBLAZE) |
diff --git a/drivers/of/Makefile b/drivers/of/Makefile index bdfb5f5d4b06..f232cc98ce00 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | obj-y = base.o | 1 | obj-y = base.o |
2 | obj-$(CONFIG_OF_FLATTREE) += fdt.o | ||
2 | obj-$(CONFIG_OF_DEVICE) += device.o platform.o | 3 | obj-$(CONFIG_OF_DEVICE) += device.o platform.o |
3 | obj-$(CONFIG_OF_GPIO) += gpio.o | 4 | obj-$(CONFIG_OF_GPIO) += gpio.o |
4 | obj-$(CONFIG_OF_I2C) += of_i2c.o | 5 | obj-$(CONFIG_OF_I2C) += of_i2c.o |
diff --git a/drivers/of/base.c b/drivers/of/base.c index e6627b2320f1..cb96888d1427 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -20,8 +20,10 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/of.h> | 21 | #include <linux/of.h> |
22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
23 | #include <linux/proc_fs.h> | ||
23 | 24 | ||
24 | struct device_node *allnodes; | 25 | struct device_node *allnodes; |
26 | struct device_node *of_chosen; | ||
25 | 27 | ||
26 | /* use when traversing tree through the allnext, child, sibling, | 28 | /* use when traversing tree through the allnext, child, sibling, |
27 | * or parent members of struct device_node. | 29 | * or parent members of struct device_node. |
@@ -37,7 +39,7 @@ int of_n_addr_cells(struct device_node *np) | |||
37 | np = np->parent; | 39 | np = np->parent; |
38 | ip = of_get_property(np, "#address-cells", NULL); | 40 | ip = of_get_property(np, "#address-cells", NULL); |
39 | if (ip) | 41 | if (ip) |
40 | return *ip; | 42 | return be32_to_cpup(ip); |
41 | } while (np->parent); | 43 | } while (np->parent); |
42 | /* No #address-cells property for the root node */ | 44 | /* No #address-cells property for the root node */ |
43 | return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; | 45 | return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; |
@@ -53,13 +55,88 @@ int of_n_size_cells(struct device_node *np) | |||
53 | np = np->parent; | 55 | np = np->parent; |
54 | ip = of_get_property(np, "#size-cells", NULL); | 56 | ip = of_get_property(np, "#size-cells", NULL); |
55 | if (ip) | 57 | if (ip) |
56 | return *ip; | 58 | return be32_to_cpup(ip); |
57 | } while (np->parent); | 59 | } while (np->parent); |
58 | /* No #size-cells property for the root node */ | 60 | /* No #size-cells property for the root node */ |
59 | return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; | 61 | return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; |
60 | } | 62 | } |
61 | EXPORT_SYMBOL(of_n_size_cells); | 63 | EXPORT_SYMBOL(of_n_size_cells); |
62 | 64 | ||
65 | #if !defined(CONFIG_SPARC) /* SPARC doesn't do ref counting (yet) */ | ||
66 | /** | ||
67 | * of_node_get - Increment refcount of a node | ||
68 | * @node: Node to inc refcount, NULL is supported to | ||
69 | * simplify writing of callers | ||
70 | * | ||
71 | * Returns node. | ||
72 | */ | ||
73 | struct device_node *of_node_get(struct device_node *node) | ||
74 | { | ||
75 | if (node) | ||
76 | kref_get(&node->kref); | ||
77 | return node; | ||
78 | } | ||
79 | EXPORT_SYMBOL(of_node_get); | ||
80 | |||
81 | static inline struct device_node *kref_to_device_node(struct kref *kref) | ||
82 | { | ||
83 | return container_of(kref, struct device_node, kref); | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * of_node_release - release a dynamically allocated node | ||
88 | * @kref: kref element of the node to be released | ||
89 | * | ||
90 | * In of_node_put() this function is passed to kref_put() | ||
91 | * as the destructor. | ||
92 | */ | ||
93 | static void of_node_release(struct kref *kref) | ||
94 | { | ||
95 | struct device_node *node = kref_to_device_node(kref); | ||
96 | struct property *prop = node->properties; | ||
97 | |||
98 | /* We should never be releasing nodes that haven't been detached. */ | ||
99 | if (!of_node_check_flag(node, OF_DETACHED)) { | ||
100 | pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name); | ||
101 | dump_stack(); | ||
102 | kref_init(&node->kref); | ||
103 | return; | ||
104 | } | ||
105 | |||
106 | if (!of_node_check_flag(node, OF_DYNAMIC)) | ||
107 | return; | ||
108 | |||
109 | while (prop) { | ||
110 | struct property *next = prop->next; | ||
111 | kfree(prop->name); | ||
112 | kfree(prop->value); | ||
113 | kfree(prop); | ||
114 | prop = next; | ||
115 | |||
116 | if (!prop) { | ||
117 | prop = node->deadprops; | ||
118 | node->deadprops = NULL; | ||
119 | } | ||
120 | } | ||
121 | kfree(node->full_name); | ||
122 | kfree(node->data); | ||
123 | kfree(node); | ||
124 | } | ||
125 | |||
126 | /** | ||
127 | * of_node_put - Decrement refcount of a node | ||
128 | * @node: Node to dec refcount, NULL is supported to | ||
129 | * simplify writing of callers | ||
130 | * | ||
131 | */ | ||
132 | void of_node_put(struct device_node *node) | ||
133 | { | ||
134 | if (node) | ||
135 | kref_put(&node->kref, of_node_release); | ||
136 | } | ||
137 | EXPORT_SYMBOL(of_node_put); | ||
138 | #endif /* !CONFIG_SPARC */ | ||
139 | |||
63 | struct property *of_find_property(const struct device_node *np, | 140 | struct property *of_find_property(const struct device_node *np, |
64 | const char *name, | 141 | const char *name, |
65 | int *lenp) | 142 | int *lenp) |
@@ -144,6 +221,27 @@ int of_device_is_compatible(const struct device_node *device, | |||
144 | EXPORT_SYMBOL(of_device_is_compatible); | 221 | EXPORT_SYMBOL(of_device_is_compatible); |
145 | 222 | ||
146 | /** | 223 | /** |
224 | * of_machine_is_compatible - Test root of device tree for a given compatible value | ||
225 | * @compat: compatible string to look for in root node's compatible property. | ||
226 | * | ||
227 | * Returns true if the root node has the given value in its | ||
228 | * compatible property. | ||
229 | */ | ||
230 | int of_machine_is_compatible(const char *compat) | ||
231 | { | ||
232 | struct device_node *root; | ||
233 | int rc = 0; | ||
234 | |||
235 | root = of_find_node_by_path("/"); | ||
236 | if (root) { | ||
237 | rc = of_device_is_compatible(root, compat); | ||
238 | of_node_put(root); | ||
239 | } | ||
240 | return rc; | ||
241 | } | ||
242 | EXPORT_SYMBOL(of_machine_is_compatible); | ||
243 | |||
244 | /** | ||
147 | * of_device_is_available - check if a device is available for use | 245 | * of_device_is_available - check if a device is available for use |
148 | * | 246 | * |
149 | * @device: Node to check for availability | 247 | * @device: Node to check for availability |
@@ -519,6 +617,27 @@ int of_modalias_node(struct device_node *node, char *modalias, int len) | |||
519 | EXPORT_SYMBOL_GPL(of_modalias_node); | 617 | EXPORT_SYMBOL_GPL(of_modalias_node); |
520 | 618 | ||
521 | /** | 619 | /** |
620 | * of_find_node_by_phandle - Find a node given a phandle | ||
621 | * @handle: phandle of the node to find | ||
622 | * | ||
623 | * Returns a node pointer with refcount incremented, use | ||
624 | * of_node_put() on it when done. | ||
625 | */ | ||
626 | struct device_node *of_find_node_by_phandle(phandle handle) | ||
627 | { | ||
628 | struct device_node *np; | ||
629 | |||
630 | read_lock(&devtree_lock); | ||
631 | for (np = allnodes; np; np = np->allnext) | ||
632 | if (np->phandle == handle) | ||
633 | break; | ||
634 | of_node_get(np); | ||
635 | read_unlock(&devtree_lock); | ||
636 | return np; | ||
637 | } | ||
638 | EXPORT_SYMBOL(of_find_node_by_phandle); | ||
639 | |||
640 | /** | ||
522 | * of_parse_phandle - Resolve a phandle property to a device_node pointer | 641 | * of_parse_phandle - Resolve a phandle property to a device_node pointer |
523 | * @np: Pointer to device node holding phandle property | 642 | * @np: Pointer to device node holding phandle property |
524 | * @phandle_name: Name of property holding a phandle value | 643 | * @phandle_name: Name of property holding a phandle value |
@@ -578,8 +697,8 @@ int of_parse_phandles_with_args(struct device_node *np, const char *list_name, | |||
578 | const void **out_args) | 697 | const void **out_args) |
579 | { | 698 | { |
580 | int ret = -EINVAL; | 699 | int ret = -EINVAL; |
581 | const u32 *list; | 700 | const __be32 *list; |
582 | const u32 *list_end; | 701 | const __be32 *list_end; |
583 | int size; | 702 | int size; |
584 | int cur_index = 0; | 703 | int cur_index = 0; |
585 | struct device_node *node = NULL; | 704 | struct device_node *node = NULL; |
@@ -593,7 +712,7 @@ int of_parse_phandles_with_args(struct device_node *np, const char *list_name, | |||
593 | list_end = list + size / sizeof(*list); | 712 | list_end = list + size / sizeof(*list); |
594 | 713 | ||
595 | while (list < list_end) { | 714 | while (list < list_end) { |
596 | const u32 *cells; | 715 | const __be32 *cells; |
597 | const phandle *phandle; | 716 | const phandle *phandle; |
598 | 717 | ||
599 | phandle = list++; | 718 | phandle = list++; |
@@ -617,7 +736,7 @@ int of_parse_phandles_with_args(struct device_node *np, const char *list_name, | |||
617 | goto err1; | 736 | goto err1; |
618 | } | 737 | } |
619 | 738 | ||
620 | list += *cells; | 739 | list += be32_to_cpup(cells); |
621 | if (list > list_end) { | 740 | if (list > list_end) { |
622 | pr_debug("%s: insufficient arguments length\n", | 741 | pr_debug("%s: insufficient arguments length\n", |
623 | np->full_name); | 742 | np->full_name); |
@@ -658,3 +777,190 @@ err0: | |||
658 | return ret; | 777 | return ret; |
659 | } | 778 | } |
660 | EXPORT_SYMBOL(of_parse_phandles_with_args); | 779 | EXPORT_SYMBOL(of_parse_phandles_with_args); |
780 | |||
781 | /** | ||
782 | * prom_add_property - Add a property to a node | ||
783 | */ | ||
784 | int prom_add_property(struct device_node *np, struct property *prop) | ||
785 | { | ||
786 | struct property **next; | ||
787 | unsigned long flags; | ||
788 | |||
789 | prop->next = NULL; | ||
790 | write_lock_irqsave(&devtree_lock, flags); | ||
791 | next = &np->properties; | ||
792 | while (*next) { | ||
793 | if (strcmp(prop->name, (*next)->name) == 0) { | ||
794 | /* duplicate ! don't insert it */ | ||
795 | write_unlock_irqrestore(&devtree_lock, flags); | ||
796 | return -1; | ||
797 | } | ||
798 | next = &(*next)->next; | ||
799 | } | ||
800 | *next = prop; | ||
801 | write_unlock_irqrestore(&devtree_lock, flags); | ||
802 | |||
803 | #ifdef CONFIG_PROC_DEVICETREE | ||
804 | /* try to add to proc as well if it was initialized */ | ||
805 | if (np->pde) | ||
806 | proc_device_tree_add_prop(np->pde, prop); | ||
807 | #endif /* CONFIG_PROC_DEVICETREE */ | ||
808 | |||
809 | return 0; | ||
810 | } | ||
811 | |||
812 | /** | ||
813 | * prom_remove_property - Remove a property from a node. | ||
814 | * | ||
815 | * Note that we don't actually remove it, since we have given out | ||
816 | * who-knows-how-many pointers to the data using get-property. | ||
817 | * Instead we just move the property to the "dead properties" | ||
818 | * list, so it won't be found any more. | ||
819 | */ | ||
820 | int prom_remove_property(struct device_node *np, struct property *prop) | ||
821 | { | ||
822 | struct property **next; | ||
823 | unsigned long flags; | ||
824 | int found = 0; | ||
825 | |||
826 | write_lock_irqsave(&devtree_lock, flags); | ||
827 | next = &np->properties; | ||
828 | while (*next) { | ||
829 | if (*next == prop) { | ||
830 | /* found the node */ | ||
831 | *next = prop->next; | ||
832 | prop->next = np->deadprops; | ||
833 | np->deadprops = prop; | ||
834 | found = 1; | ||
835 | break; | ||
836 | } | ||
837 | next = &(*next)->next; | ||
838 | } | ||
839 | write_unlock_irqrestore(&devtree_lock, flags); | ||
840 | |||
841 | if (!found) | ||
842 | return -ENODEV; | ||
843 | |||
844 | #ifdef CONFIG_PROC_DEVICETREE | ||
845 | /* try to remove the proc node as well */ | ||
846 | if (np->pde) | ||
847 | proc_device_tree_remove_prop(np->pde, prop); | ||
848 | #endif /* CONFIG_PROC_DEVICETREE */ | ||
849 | |||
850 | return 0; | ||
851 | } | ||
852 | |||
853 | /* | ||
854 | * prom_update_property - Update a property in a node. | ||
855 | * | ||
856 | * Note that we don't actually remove it, since we have given out | ||
857 | * who-knows-how-many pointers to the data using get-property. | ||
858 | * Instead we just move the property to the "dead properties" list, | ||
859 | * and add the new property to the property list | ||
860 | */ | ||
861 | int prom_update_property(struct device_node *np, | ||
862 | struct property *newprop, | ||
863 | struct property *oldprop) | ||
864 | { | ||
865 | struct property **next; | ||
866 | unsigned long flags; | ||
867 | int found = 0; | ||
868 | |||
869 | write_lock_irqsave(&devtree_lock, flags); | ||
870 | next = &np->properties; | ||
871 | while (*next) { | ||
872 | if (*next == oldprop) { | ||
873 | /* found the node */ | ||
874 | newprop->next = oldprop->next; | ||
875 | *next = newprop; | ||
876 | oldprop->next = np->deadprops; | ||
877 | np->deadprops = oldprop; | ||
878 | found = 1; | ||
879 | break; | ||
880 | } | ||
881 | next = &(*next)->next; | ||
882 | } | ||
883 | write_unlock_irqrestore(&devtree_lock, flags); | ||
884 | |||
885 | if (!found) | ||
886 | return -ENODEV; | ||
887 | |||
888 | #ifdef CONFIG_PROC_DEVICETREE | ||
889 | /* try to add to proc as well if it was initialized */ | ||
890 | if (np->pde) | ||
891 | proc_device_tree_update_prop(np->pde, newprop, oldprop); | ||
892 | #endif /* CONFIG_PROC_DEVICETREE */ | ||
893 | |||
894 | return 0; | ||
895 | } | ||
896 | |||
897 | #if defined(CONFIG_OF_DYNAMIC) | ||
898 | /* | ||
899 | * Support for dynamic device trees. | ||
900 | * | ||
901 | * On some platforms, the device tree can be manipulated at runtime. | ||
902 | * The routines in this section support adding, removing and changing | ||
903 | * device tree nodes. | ||
904 | */ | ||
905 | |||
906 | /** | ||
907 | * of_attach_node - Plug a device node into the tree and global list. | ||
908 | */ | ||
909 | void of_attach_node(struct device_node *np) | ||
910 | { | ||
911 | unsigned long flags; | ||
912 | |||
913 | write_lock_irqsave(&devtree_lock, flags); | ||
914 | np->sibling = np->parent->child; | ||
915 | np->allnext = allnodes; | ||
916 | np->parent->child = np; | ||
917 | allnodes = np; | ||
918 | write_unlock_irqrestore(&devtree_lock, flags); | ||
919 | } | ||
920 | |||
921 | /** | ||
922 | * of_detach_node - "Unplug" a node from the device tree. | ||
923 | * | ||
924 | * The caller must hold a reference to the node. The memory associated with | ||
925 | * the node is not freed until its refcount goes to zero. | ||
926 | */ | ||
927 | void of_detach_node(struct device_node *np) | ||
928 | { | ||
929 | struct device_node *parent; | ||
930 | unsigned long flags; | ||
931 | |||
932 | write_lock_irqsave(&devtree_lock, flags); | ||
933 | |||
934 | parent = np->parent; | ||
935 | if (!parent) | ||
936 | goto out_unlock; | ||
937 | |||
938 | if (allnodes == np) | ||
939 | allnodes = np->allnext; | ||
940 | else { | ||
941 | struct device_node *prev; | ||
942 | for (prev = allnodes; | ||
943 | prev->allnext != np; | ||
944 | prev = prev->allnext) | ||
945 | ; | ||
946 | prev->allnext = np->allnext; | ||
947 | } | ||
948 | |||
949 | if (parent->child == np) | ||
950 | parent->child = np->sibling; | ||
951 | else { | ||
952 | struct device_node *prevsib; | ||
953 | for (prevsib = np->parent->child; | ||
954 | prevsib->sibling != np; | ||
955 | prevsib = prevsib->sibling) | ||
956 | ; | ||
957 | prevsib->sibling = np->sibling; | ||
958 | } | ||
959 | |||
960 | of_node_set_flag(np, OF_DETACHED); | ||
961 | |||
962 | out_unlock: | ||
963 | write_unlock_irqrestore(&devtree_lock, flags); | ||
964 | } | ||
965 | #endif /* defined(CONFIG_OF_DYNAMIC) */ | ||
966 | |||
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c new file mode 100644 index 000000000000..406757a9d7ea --- /dev/null +++ b/drivers/of/fdt.c | |||
@@ -0,0 +1,590 @@ | |||
1 | /* | ||
2 | * Functions for working with the Flattened Device Tree data format | ||
3 | * | ||
4 | * Copyright 2009 Benjamin Herrenschmidt, IBM Corp | ||
5 | * benh@kernel.crashing.org | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/initrd.h> | ||
14 | #include <linux/of.h> | ||
15 | #include <linux/of_fdt.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/errno.h> | ||
18 | |||
19 | #ifdef CONFIG_PPC | ||
20 | #include <asm/machdep.h> | ||
21 | #endif /* CONFIG_PPC */ | ||
22 | |||
23 | #include <asm/page.h> | ||
24 | |||
25 | int __initdata dt_root_addr_cells; | ||
26 | int __initdata dt_root_size_cells; | ||
27 | |||
28 | struct boot_param_header *initial_boot_params; | ||
29 | |||
30 | char *find_flat_dt_string(u32 offset) | ||
31 | { | ||
32 | return ((char *)initial_boot_params) + | ||
33 | be32_to_cpu(initial_boot_params->off_dt_strings) + offset; | ||
34 | } | ||
35 | |||
36 | /** | ||
37 | * of_scan_flat_dt - scan flattened tree blob and call callback on each. | ||
38 | * @it: callback function | ||
39 | * @data: context data pointer | ||
40 | * | ||
41 | * This function is used to scan the flattened device-tree, it is | ||
42 | * used to extract the memory information at boot before we can | ||
43 | * unflatten the tree | ||
44 | */ | ||
45 | int __init of_scan_flat_dt(int (*it)(unsigned long node, | ||
46 | const char *uname, int depth, | ||
47 | void *data), | ||
48 | void *data) | ||
49 | { | ||
50 | unsigned long p = ((unsigned long)initial_boot_params) + | ||
51 | be32_to_cpu(initial_boot_params->off_dt_struct); | ||
52 | int rc = 0; | ||
53 | int depth = -1; | ||
54 | |||
55 | do { | ||
56 | u32 tag = be32_to_cpup((__be32 *)p); | ||
57 | char *pathp; | ||
58 | |||
59 | p += 4; | ||
60 | if (tag == OF_DT_END_NODE) { | ||
61 | depth--; | ||
62 | continue; | ||
63 | } | ||
64 | if (tag == OF_DT_NOP) | ||
65 | continue; | ||
66 | if (tag == OF_DT_END) | ||
67 | break; | ||
68 | if (tag == OF_DT_PROP) { | ||
69 | u32 sz = be32_to_cpup((__be32 *)p); | ||
70 | p += 8; | ||
71 | if (be32_to_cpu(initial_boot_params->version) < 0x10) | ||
72 | p = _ALIGN(p, sz >= 8 ? 8 : 4); | ||
73 | p += sz; | ||
74 | p = _ALIGN(p, 4); | ||
75 | continue; | ||
76 | } | ||
77 | if (tag != OF_DT_BEGIN_NODE) { | ||
78 | pr_err("Invalid tag %x in flat device tree!\n", tag); | ||
79 | return -EINVAL; | ||
80 | } | ||
81 | depth++; | ||
82 | pathp = (char *)p; | ||
83 | p = _ALIGN(p + strlen(pathp) + 1, 4); | ||
84 | if ((*pathp) == '/') { | ||
85 | char *lp, *np; | ||
86 | for (lp = NULL, np = pathp; *np; np++) | ||
87 | if ((*np) == '/') | ||
88 | lp = np+1; | ||
89 | if (lp != NULL) | ||
90 | pathp = lp; | ||
91 | } | ||
92 | rc = it(p, pathp, depth, data); | ||
93 | if (rc != 0) | ||
94 | break; | ||
95 | } while (1); | ||
96 | |||
97 | return rc; | ||
98 | } | ||
99 | |||
100 | /** | ||
101 | * of_get_flat_dt_root - find the root node in the flat blob | ||
102 | */ | ||
103 | unsigned long __init of_get_flat_dt_root(void) | ||
104 | { | ||
105 | unsigned long p = ((unsigned long)initial_boot_params) + | ||
106 | be32_to_cpu(initial_boot_params->off_dt_struct); | ||
107 | |||
108 | while (be32_to_cpup((__be32 *)p) == OF_DT_NOP) | ||
109 | p += 4; | ||
110 | BUG_ON(be32_to_cpup((__be32 *)p) != OF_DT_BEGIN_NODE); | ||
111 | p += 4; | ||
112 | return _ALIGN(p + strlen((char *)p) + 1, 4); | ||
113 | } | ||
114 | |||
115 | /** | ||
116 | * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr | ||
117 | * | ||
118 | * This function can be used within scan_flattened_dt callback to get | ||
119 | * access to properties | ||
120 | */ | ||
121 | void *__init of_get_flat_dt_prop(unsigned long node, const char *name, | ||
122 | unsigned long *size) | ||
123 | { | ||
124 | unsigned long p = node; | ||
125 | |||
126 | do { | ||
127 | u32 tag = be32_to_cpup((__be32 *)p); | ||
128 | u32 sz, noff; | ||
129 | const char *nstr; | ||
130 | |||
131 | p += 4; | ||
132 | if (tag == OF_DT_NOP) | ||
133 | continue; | ||
134 | if (tag != OF_DT_PROP) | ||
135 | return NULL; | ||
136 | |||
137 | sz = be32_to_cpup((__be32 *)p); | ||
138 | noff = be32_to_cpup((__be32 *)(p + 4)); | ||
139 | p += 8; | ||
140 | if (be32_to_cpu(initial_boot_params->version) < 0x10) | ||
141 | p = _ALIGN(p, sz >= 8 ? 8 : 4); | ||
142 | |||
143 | nstr = find_flat_dt_string(noff); | ||
144 | if (nstr == NULL) { | ||
145 | pr_warning("Can't find property index name !\n"); | ||
146 | return NULL; | ||
147 | } | ||
148 | if (strcmp(name, nstr) == 0) { | ||
149 | if (size) | ||
150 | *size = sz; | ||
151 | return (void *)p; | ||
152 | } | ||
153 | p += sz; | ||
154 | p = _ALIGN(p, 4); | ||
155 | } while (1); | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * of_flat_dt_is_compatible - Return true if given node has compat in compatible list | ||
160 | * @node: node to test | ||
161 | * @compat: compatible string to compare with compatible list. | ||
162 | */ | ||
163 | int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) | ||
164 | { | ||
165 | const char *cp; | ||
166 | unsigned long cplen, l; | ||
167 | |||
168 | cp = of_get_flat_dt_prop(node, "compatible", &cplen); | ||
169 | if (cp == NULL) | ||
170 | return 0; | ||
171 | while (cplen > 0) { | ||
172 | if (strncasecmp(cp, compat, strlen(compat)) == 0) | ||
173 | return 1; | ||
174 | l = strlen(cp) + 1; | ||
175 | cp += l; | ||
176 | cplen -= l; | ||
177 | } | ||
178 | |||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, | ||
183 | unsigned long align) | ||
184 | { | ||
185 | void *res; | ||
186 | |||
187 | *mem = _ALIGN(*mem, align); | ||
188 | res = (void *)*mem; | ||
189 | *mem += size; | ||
190 | |||
191 | return res; | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * unflatten_dt_node - Alloc and populate a device_node from the flat tree | ||
196 | * @p: pointer to node in flat tree | ||
197 | * @dad: Parent struct device_node | ||
198 | * @allnextpp: pointer to ->allnext from last allocated device_node | ||
199 | * @fpsize: Size of the node path up at the current depth. | ||
200 | */ | ||
201 | unsigned long __init unflatten_dt_node(unsigned long mem, | ||
202 | unsigned long *p, | ||
203 | struct device_node *dad, | ||
204 | struct device_node ***allnextpp, | ||
205 | unsigned long fpsize) | ||
206 | { | ||
207 | struct device_node *np; | ||
208 | struct property *pp, **prev_pp = NULL; | ||
209 | char *pathp; | ||
210 | u32 tag; | ||
211 | unsigned int l, allocl; | ||
212 | int has_name = 0; | ||
213 | int new_format = 0; | ||
214 | |||
215 | tag = be32_to_cpup((__be32 *)(*p)); | ||
216 | if (tag != OF_DT_BEGIN_NODE) { | ||
217 | pr_err("Weird tag at start of node: %x\n", tag); | ||
218 | return mem; | ||
219 | } | ||
220 | *p += 4; | ||
221 | pathp = (char *)*p; | ||
222 | l = allocl = strlen(pathp) + 1; | ||
223 | *p = _ALIGN(*p + l, 4); | ||
224 | |||
225 | /* version 0x10 has a more compact unit name here instead of the full | ||
226 | * path. we accumulate the full path size using "fpsize", we'll rebuild | ||
227 | * it later. We detect this because the first character of the name is | ||
228 | * not '/'. | ||
229 | */ | ||
230 | if ((*pathp) != '/') { | ||
231 | new_format = 1; | ||
232 | if (fpsize == 0) { | ||
233 | /* root node: special case. fpsize accounts for path | ||
234 | * plus terminating zero. root node only has '/', so | ||
235 | * fpsize should be 2, but we want to avoid the first | ||
236 | * level nodes to have two '/' so we use fpsize 1 here | ||
237 | */ | ||
238 | fpsize = 1; | ||
239 | allocl = 2; | ||
240 | } else { | ||
241 | /* account for '/' and path size minus terminal 0 | ||
242 | * already in 'l' | ||
243 | */ | ||
244 | fpsize += l; | ||
245 | allocl = fpsize; | ||
246 | } | ||
247 | } | ||
248 | |||
249 | np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, | ||
250 | __alignof__(struct device_node)); | ||
251 | if (allnextpp) { | ||
252 | memset(np, 0, sizeof(*np)); | ||
253 | np->full_name = ((char *)np) + sizeof(struct device_node); | ||
254 | if (new_format) { | ||
255 | char *fn = np->full_name; | ||
256 | /* rebuild full path for new format */ | ||
257 | if (dad && dad->parent) { | ||
258 | strcpy(fn, dad->full_name); | ||
259 | #ifdef DEBUG | ||
260 | if ((strlen(fn) + l + 1) != allocl) { | ||
261 | pr_debug("%s: p: %d, l: %d, a: %d\n", | ||
262 | pathp, (int)strlen(fn), | ||
263 | l, allocl); | ||
264 | } | ||
265 | #endif | ||
266 | fn += strlen(fn); | ||
267 | } | ||
268 | *(fn++) = '/'; | ||
269 | memcpy(fn, pathp, l); | ||
270 | } else | ||
271 | memcpy(np->full_name, pathp, l); | ||
272 | prev_pp = &np->properties; | ||
273 | **allnextpp = np; | ||
274 | *allnextpp = &np->allnext; | ||
275 | if (dad != NULL) { | ||
276 | np->parent = dad; | ||
277 | /* we temporarily use the next field as `last_child'*/ | ||
278 | if (dad->next == NULL) | ||
279 | dad->child = np; | ||
280 | else | ||
281 | dad->next->sibling = np; | ||
282 | dad->next = np; | ||
283 | } | ||
284 | kref_init(&np->kref); | ||
285 | } | ||
286 | while (1) { | ||
287 | u32 sz, noff; | ||
288 | char *pname; | ||
289 | |||
290 | tag = be32_to_cpup((__be32 *)(*p)); | ||
291 | if (tag == OF_DT_NOP) { | ||
292 | *p += 4; | ||
293 | continue; | ||
294 | } | ||
295 | if (tag != OF_DT_PROP) | ||
296 | break; | ||
297 | *p += 4; | ||
298 | sz = be32_to_cpup((__be32 *)(*p)); | ||
299 | noff = be32_to_cpup((__be32 *)((*p) + 4)); | ||
300 | *p += 8; | ||
301 | if (be32_to_cpu(initial_boot_params->version) < 0x10) | ||
302 | *p = _ALIGN(*p, sz >= 8 ? 8 : 4); | ||
303 | |||
304 | pname = find_flat_dt_string(noff); | ||
305 | if (pname == NULL) { | ||
306 | pr_info("Can't find property name in list !\n"); | ||
307 | break; | ||
308 | } | ||
309 | if (strcmp(pname, "name") == 0) | ||
310 | has_name = 1; | ||
311 | l = strlen(pname) + 1; | ||
312 | pp = unflatten_dt_alloc(&mem, sizeof(struct property), | ||
313 | __alignof__(struct property)); | ||
314 | if (allnextpp) { | ||
315 | /* We accept flattened tree phandles either in | ||
316 | * ePAPR-style "phandle" properties, or the | ||
317 | * legacy "linux,phandle" properties. If both | ||
318 | * appear and have different values, things | ||
319 | * will get weird. Don't do that. */ | ||
320 | if ((strcmp(pname, "phandle") == 0) || | ||
321 | (strcmp(pname, "linux,phandle") == 0)) { | ||
322 | if (np->phandle == 0) | ||
323 | np->phandle = *((u32 *)*p); | ||
324 | } | ||
325 | /* And we process the "ibm,phandle" property | ||
326 | * used in pSeries dynamic device tree | ||
327 | * stuff */ | ||
328 | if (strcmp(pname, "ibm,phandle") == 0) | ||
329 | np->phandle = *((u32 *)*p); | ||
330 | pp->name = pname; | ||
331 | pp->length = sz; | ||
332 | pp->value = (void *)*p; | ||
333 | *prev_pp = pp; | ||
334 | prev_pp = &pp->next; | ||
335 | } | ||
336 | *p = _ALIGN((*p) + sz, 4); | ||
337 | } | ||
338 | /* with version 0x10 we may not have the name property, recreate | ||
339 | * it here from the unit name if absent | ||
340 | */ | ||
341 | if (!has_name) { | ||
342 | char *p1 = pathp, *ps = pathp, *pa = NULL; | ||
343 | int sz; | ||
344 | |||
345 | while (*p1) { | ||
346 | if ((*p1) == '@') | ||
347 | pa = p1; | ||
348 | if ((*p1) == '/') | ||
349 | ps = p1 + 1; | ||
350 | p1++; | ||
351 | } | ||
352 | if (pa < ps) | ||
353 | pa = p1; | ||
354 | sz = (pa - ps) + 1; | ||
355 | pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, | ||
356 | __alignof__(struct property)); | ||
357 | if (allnextpp) { | ||
358 | pp->name = "name"; | ||
359 | pp->length = sz; | ||
360 | pp->value = pp + 1; | ||
361 | *prev_pp = pp; | ||
362 | prev_pp = &pp->next; | ||
363 | memcpy(pp->value, ps, sz - 1); | ||
364 | ((char *)pp->value)[sz - 1] = 0; | ||
365 | pr_debug("fixed up name for %s -> %s\n", pathp, | ||
366 | (char *)pp->value); | ||
367 | } | ||
368 | } | ||
369 | if (allnextpp) { | ||
370 | *prev_pp = NULL; | ||
371 | np->name = of_get_property(np, "name", NULL); | ||
372 | np->type = of_get_property(np, "device_type", NULL); | ||
373 | |||
374 | if (!np->name) | ||
375 | np->name = "<NULL>"; | ||
376 | if (!np->type) | ||
377 | np->type = "<NULL>"; | ||
378 | } | ||
379 | while (tag == OF_DT_BEGIN_NODE) { | ||
380 | mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); | ||
381 | tag = be32_to_cpup((__be32 *)(*p)); | ||
382 | } | ||
383 | if (tag != OF_DT_END_NODE) { | ||
384 | pr_err("Weird tag at end of node: %x\n", tag); | ||
385 | return mem; | ||
386 | } | ||
387 | *p += 4; | ||
388 | return mem; | ||
389 | } | ||
390 | |||
391 | #ifdef CONFIG_BLK_DEV_INITRD | ||
392 | /** | ||
393 | * early_init_dt_check_for_initrd - Decode initrd location from flat tree | ||
394 | * @node: reference to node containing initrd location ('chosen') | ||
395 | */ | ||
396 | void __init early_init_dt_check_for_initrd(unsigned long node) | ||
397 | { | ||
398 | unsigned long start, end, len; | ||
399 | __be32 *prop; | ||
400 | |||
401 | pr_debug("Looking for initrd properties... "); | ||
402 | |||
403 | prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len); | ||
404 | if (!prop) | ||
405 | return; | ||
406 | start = of_read_ulong(prop, len/4); | ||
407 | |||
408 | prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len); | ||
409 | if (!prop) | ||
410 | return; | ||
411 | end = of_read_ulong(prop, len/4); | ||
412 | |||
413 | early_init_dt_setup_initrd_arch(start, end); | ||
414 | pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n", start, end); | ||
415 | } | ||
416 | #else | ||
417 | inline void early_init_dt_check_for_initrd(unsigned long node) | ||
418 | { | ||
419 | } | ||
420 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
421 | |||
422 | /** | ||
423 | * early_init_dt_scan_root - fetch the top level address and size cells | ||
424 | */ | ||
425 | int __init early_init_dt_scan_root(unsigned long node, const char *uname, | ||
426 | int depth, void *data) | ||
427 | { | ||
428 | __be32 *prop; | ||
429 | |||
430 | if (depth != 0) | ||
431 | return 0; | ||
432 | |||
433 | dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT; | ||
434 | dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT; | ||
435 | |||
436 | prop = of_get_flat_dt_prop(node, "#size-cells", NULL); | ||
437 | if (prop) | ||
438 | dt_root_size_cells = be32_to_cpup(prop); | ||
439 | pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells); | ||
440 | |||
441 | prop = of_get_flat_dt_prop(node, "#address-cells", NULL); | ||
442 | if (prop) | ||
443 | dt_root_addr_cells = be32_to_cpup(prop); | ||
444 | pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells); | ||
445 | |||
446 | /* break now */ | ||
447 | return 1; | ||
448 | } | ||
449 | |||
450 | u64 __init dt_mem_next_cell(int s, __be32 **cellp) | ||
451 | { | ||
452 | __be32 *p = *cellp; | ||
453 | |||
454 | *cellp = p + s; | ||
455 | return of_read_number(p, s); | ||
456 | } | ||
457 | |||
458 | /** | ||
459 | * early_init_dt_scan_memory - Look for an parse memory nodes | ||
460 | */ | ||
461 | int __init early_init_dt_scan_memory(unsigned long node, const char *uname, | ||
462 | int depth, void *data) | ||
463 | { | ||
464 | char *type = of_get_flat_dt_prop(node, "device_type", NULL); | ||
465 | __be32 *reg, *endp; | ||
466 | unsigned long l; | ||
467 | |||
468 | /* We are scanning "memory" nodes only */ | ||
469 | if (type == NULL) { | ||
470 | /* | ||
471 | * The longtrail doesn't have a device_type on the | ||
472 | * /memory node, so look for the node called /memory@0. | ||
473 | */ | ||
474 | if (depth != 1 || strcmp(uname, "memory@0") != 0) | ||
475 | return 0; | ||
476 | } else if (strcmp(type, "memory") != 0) | ||
477 | return 0; | ||
478 | |||
479 | reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); | ||
480 | if (reg == NULL) | ||
481 | reg = of_get_flat_dt_prop(node, "reg", &l); | ||
482 | if (reg == NULL) | ||
483 | return 0; | ||
484 | |||
485 | endp = reg + (l / sizeof(__be32)); | ||
486 | |||
487 | pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", | ||
488 | uname, l, reg[0], reg[1], reg[2], reg[3]); | ||
489 | |||
490 | while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { | ||
491 | u64 base, size; | ||
492 | |||
493 | base = dt_mem_next_cell(dt_root_addr_cells, ®); | ||
494 | size = dt_mem_next_cell(dt_root_size_cells, ®); | ||
495 | |||
496 | if (size == 0) | ||
497 | continue; | ||
498 | pr_debug(" - %llx , %llx\n", (unsigned long long)base, | ||
499 | (unsigned long long)size); | ||
500 | |||
501 | early_init_dt_add_memory_arch(base, size); | ||
502 | } | ||
503 | |||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, | ||
508 | int depth, void *data) | ||
509 | { | ||
510 | unsigned long l; | ||
511 | char *p; | ||
512 | |||
513 | pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); | ||
514 | |||
515 | if (depth != 1 || | ||
516 | (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) | ||
517 | return 0; | ||
518 | |||
519 | early_init_dt_check_for_initrd(node); | ||
520 | |||
521 | /* Retreive command line */ | ||
522 | p = of_get_flat_dt_prop(node, "bootargs", &l); | ||
523 | if (p != NULL && l > 0) | ||
524 | strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); | ||
525 | |||
526 | #ifdef CONFIG_CMDLINE | ||
527 | #ifndef CONFIG_CMDLINE_FORCE | ||
528 | if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) | ||
529 | #endif | ||
530 | strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); | ||
531 | #endif /* CONFIG_CMDLINE */ | ||
532 | |||
533 | early_init_dt_scan_chosen_arch(node); | ||
534 | |||
535 | pr_debug("Command line is: %s\n", cmd_line); | ||
536 | |||
537 | /* break now */ | ||
538 | return 1; | ||
539 | } | ||
540 | |||
541 | /** | ||
542 | * unflatten_device_tree - create tree of device_nodes from flat blob | ||
543 | * | ||
544 | * unflattens the device-tree passed by the firmware, creating the | ||
545 | * tree of struct device_node. It also fills the "name" and "type" | ||
546 | * pointers of the nodes so the normal device-tree walking functions | ||
547 | * can be used. | ||
548 | */ | ||
549 | void __init unflatten_device_tree(void) | ||
550 | { | ||
551 | unsigned long start, mem, size; | ||
552 | struct device_node **allnextp = &allnodes; | ||
553 | |||
554 | pr_debug(" -> unflatten_device_tree()\n"); | ||
555 | |||
556 | /* First pass, scan for size */ | ||
557 | start = ((unsigned long)initial_boot_params) + | ||
558 | be32_to_cpu(initial_boot_params->off_dt_struct); | ||
559 | size = unflatten_dt_node(0, &start, NULL, NULL, 0); | ||
560 | size = (size | 3) + 1; | ||
561 | |||
562 | pr_debug(" size is %lx, allocating...\n", size); | ||
563 | |||
564 | /* Allocate memory for the expanded device tree */ | ||
565 | mem = early_init_dt_alloc_memory_arch(size + 4, | ||
566 | __alignof__(struct device_node)); | ||
567 | mem = (unsigned long) __va(mem); | ||
568 | |||
569 | ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); | ||
570 | |||
571 | pr_debug(" unflattening %lx...\n", mem); | ||
572 | |||
573 | /* Second pass, do actual unflattening */ | ||
574 | start = ((unsigned long)initial_boot_params) + | ||
575 | be32_to_cpu(initial_boot_params->off_dt_struct); | ||
576 | unflatten_dt_node(mem, &start, NULL, &allnextp, 0); | ||
577 | if (be32_to_cpup((__be32 *)start) != OF_DT_END) | ||
578 | pr_warning("Weird tag at end of tree: %08x\n", *((u32 *)start)); | ||
579 | if (be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef) | ||
580 | pr_warning("End of tree marker overwritten: %08x\n", | ||
581 | be32_to_cpu(((__be32 *)mem)[size / 4])); | ||
582 | *allnextp = NULL; | ||
583 | |||
584 | /* Get pointer to OF "/chosen" node for use everywhere */ | ||
585 | of_chosen = of_find_node_by_path("/chosen"); | ||
586 | if (of_chosen == NULL) | ||
587 | of_chosen = of_find_node_by_path("/chosen@0"); | ||
588 | |||
589 | pr_debug(" <- unflatten_device_tree()\n"); | ||
590 | } | ||
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c index 6eea601a9204..24c3606217f8 100644 --- a/drivers/of/gpio.c +++ b/drivers/of/gpio.c | |||
@@ -36,7 +36,7 @@ int of_get_gpio_flags(struct device_node *np, int index, | |||
36 | struct of_gpio_chip *of_gc = NULL; | 36 | struct of_gpio_chip *of_gc = NULL; |
37 | int size; | 37 | int size; |
38 | const void *gpio_spec; | 38 | const void *gpio_spec; |
39 | const u32 *gpio_cells; | 39 | const __be32 *gpio_cells; |
40 | 40 | ||
41 | ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index, | 41 | ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index, |
42 | &gc, &gpio_spec); | 42 | &gc, &gpio_spec); |
@@ -55,7 +55,7 @@ int of_get_gpio_flags(struct device_node *np, int index, | |||
55 | 55 | ||
56 | gpio_cells = of_get_property(gc, "#gpio-cells", &size); | 56 | gpio_cells = of_get_property(gc, "#gpio-cells", &size); |
57 | if (!gpio_cells || size != sizeof(*gpio_cells) || | 57 | if (!gpio_cells || size != sizeof(*gpio_cells) || |
58 | *gpio_cells != of_gc->gpio_cells) { | 58 | be32_to_cpup(gpio_cells) != of_gc->gpio_cells) { |
59 | pr_debug("%s: wrong #gpio-cells for %s\n", | 59 | pr_debug("%s: wrong #gpio-cells for %s\n", |
60 | np->full_name, gc->full_name); | 60 | np->full_name, gc->full_name); |
61 | ret = -EINVAL; | 61 | ret = -EINVAL; |
@@ -127,7 +127,8 @@ EXPORT_SYMBOL(of_gpio_count); | |||
127 | int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np, | 127 | int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np, |
128 | const void *gpio_spec, enum of_gpio_flags *flags) | 128 | const void *gpio_spec, enum of_gpio_flags *flags) |
129 | { | 129 | { |
130 | const u32 *gpio = gpio_spec; | 130 | const __be32 *gpio = gpio_spec; |
131 | const u32 n = be32_to_cpup(gpio); | ||
131 | 132 | ||
132 | /* | 133 | /* |
133 | * We're discouraging gpio_cells < 2, since that way you'll have to | 134 | * We're discouraging gpio_cells < 2, since that way you'll have to |
@@ -140,13 +141,13 @@ int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np, | |||
140 | return -EINVAL; | 141 | return -EINVAL; |
141 | } | 142 | } |
142 | 143 | ||
143 | if (*gpio > of_gc->gc.ngpio) | 144 | if (n > of_gc->gc.ngpio) |
144 | return -EINVAL; | 145 | return -EINVAL; |
145 | 146 | ||
146 | if (flags) | 147 | if (flags) |
147 | *flags = gpio[1]; | 148 | *flags = be32_to_cpu(gpio[1]); |
148 | 149 | ||
149 | return *gpio; | 150 | return n; |
150 | } | 151 | } |
151 | EXPORT_SYMBOL(of_gpio_simple_xlate); | 152 | EXPORT_SYMBOL(of_gpio_simple_xlate); |
152 | 153 | ||
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c index fa65a2b2ae2e..a3a708e590d0 100644 --- a/drivers/of/of_i2c.c +++ b/drivers/of/of_i2c.c | |||
@@ -25,7 +25,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap, | |||
25 | for_each_child_of_node(adap_node, node) { | 25 | for_each_child_of_node(adap_node, node) { |
26 | struct i2c_board_info info = {}; | 26 | struct i2c_board_info info = {}; |
27 | struct dev_archdata dev_ad = {}; | 27 | struct dev_archdata dev_ad = {}; |
28 | const u32 *addr; | 28 | const __be32 *addr; |
29 | int len; | 29 | int len; |
30 | 30 | ||
31 | if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) | 31 | if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) |
@@ -40,7 +40,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap, | |||
40 | 40 | ||
41 | info.irq = irq_of_parse_and_map(node, 0); | 41 | info.irq = irq_of_parse_and_map(node, 0); |
42 | 42 | ||
43 | info.addr = *addr; | 43 | info.addr = be32_to_cpup(addr); |
44 | 44 | ||
45 | dev_archdata_set_node(&dev_ad, node); | 45 | dev_archdata_set_node(&dev_ad, node); |
46 | info.archdata = &dev_ad; | 46 | info.archdata = &dev_ad; |
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 4b22ba568b19..18ecae4a4375 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c | |||
@@ -51,7 +51,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) | |||
51 | 51 | ||
52 | /* Loop over the child nodes and register a phy_device for each one */ | 52 | /* Loop over the child nodes and register a phy_device for each one */ |
53 | for_each_child_of_node(np, child) { | 53 | for_each_child_of_node(np, child) { |
54 | const u32 *addr; | 54 | const __be32 *addr; |
55 | int len; | 55 | int len; |
56 | 56 | ||
57 | /* A PHY must have a reg property in the range [0-31] */ | 57 | /* A PHY must have a reg property in the range [0-31] */ |
@@ -68,7 +68,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) | |||
68 | mdio->irq[*addr] = PHY_POLL; | 68 | mdio->irq[*addr] = PHY_POLL; |
69 | } | 69 | } |
70 | 70 | ||
71 | phy = get_phy_device(mdio, *addr); | 71 | phy = get_phy_device(mdio, be32_to_cpup(addr)); |
72 | if (!phy) { | 72 | if (!phy) { |
73 | dev_err(&mdio->dev, "error probing PHY at address %i\n", | 73 | dev_err(&mdio->dev, "error probing PHY at address %i\n", |
74 | *addr); | 74 | *addr); |
@@ -160,7 +160,7 @@ struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, | |||
160 | struct device_node *net_np; | 160 | struct device_node *net_np; |
161 | char bus_id[MII_BUS_ID_SIZE + 3]; | 161 | char bus_id[MII_BUS_ID_SIZE + 3]; |
162 | struct phy_device *phy; | 162 | struct phy_device *phy; |
163 | const u32 *phy_id; | 163 | const __be32 *phy_id; |
164 | int sz; | 164 | int sz; |
165 | 165 | ||
166 | if (!dev->dev.parent) | 166 | if (!dev->dev.parent) |
@@ -174,7 +174,7 @@ struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, | |||
174 | if (!phy_id || sz < sizeof(*phy_id)) | 174 | if (!phy_id || sz < sizeof(*phy_id)) |
175 | return NULL; | 175 | return NULL; |
176 | 176 | ||
177 | sprintf(bus_id, PHY_ID_FMT, "0", phy_id[0]); | 177 | sprintf(bus_id, PHY_ID_FMT, "0", be32_to_cpu(phy_id[0])); |
178 | 178 | ||
179 | phy = phy_connect(dev, bus_id, hndlr, 0, iface); | 179 | phy = phy_connect(dev, bus_id, hndlr, 0, iface); |
180 | return IS_ERR(phy) ? NULL : phy; | 180 | return IS_ERR(phy) ? NULL : phy; |
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c index bed0ed6dcdc1..f65f48b98448 100644 --- a/drivers/of/of_spi.c +++ b/drivers/of/of_spi.c | |||
@@ -23,7 +23,7 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np) | |||
23 | { | 23 | { |
24 | struct spi_device *spi; | 24 | struct spi_device *spi; |
25 | struct device_node *nc; | 25 | struct device_node *nc; |
26 | const u32 *prop; | 26 | const __be32 *prop; |
27 | int rc; | 27 | int rc; |
28 | int len; | 28 | int len; |
29 | 29 | ||
@@ -54,7 +54,7 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np) | |||
54 | spi_dev_put(spi); | 54 | spi_dev_put(spi); |
55 | continue; | 55 | continue; |
56 | } | 56 | } |
57 | spi->chip_select = *prop; | 57 | spi->chip_select = be32_to_cpup(prop); |
58 | 58 | ||
59 | /* Mode (clock phase/polarity/etc.) */ | 59 | /* Mode (clock phase/polarity/etc.) */ |
60 | if (of_find_property(nc, "spi-cpha", NULL)) | 60 | if (of_find_property(nc, "spi-cpha", NULL)) |
@@ -72,7 +72,7 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np) | |||
72 | spi_dev_put(spi); | 72 | spi_dev_put(spi); |
73 | continue; | 73 | continue; |
74 | } | 74 | } |
75 | spi->max_speed_hz = *prop; | 75 | spi->max_speed_hz = be32_to_cpup(prop); |
76 | 76 | ||
77 | /* IRQ */ | 77 | /* IRQ */ |
78 | spi->irq = irq_of_parse_and_map(nc, 0); | 78 | spi->irq = irq_of_parse_and_map(nc, 0); |
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index b1ecefa2a23d..7858a117e80b 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -21,17 +21,6 @@ config PCI_MSI | |||
21 | 21 | ||
22 | If you don't know what to do here, say N. | 22 | If you don't know what to do here, say N. |
23 | 23 | ||
24 | config PCI_LEGACY | ||
25 | bool "Enable deprecated pci_find_* API" | ||
26 | depends on PCI | ||
27 | default y | ||
28 | help | ||
29 | Say Y here if you want to include support for the deprecated | ||
30 | pci_find_device() API. Most drivers have been converted over | ||
31 | to using the proper hotplug APIs, so this option serves to | ||
32 | include/exclude only a few drivers that are still using this | ||
33 | API. | ||
34 | |||
35 | config PCI_DEBUG | 24 | config PCI_DEBUG |
36 | bool "PCI Debugging" | 25 | bool "PCI Debugging" |
37 | depends on PCI && DEBUG_KERNEL | 26 | depends on PCI && DEBUG_KERNEL |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 4df48d58eaa6..8674c1ebe979 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -2,14 +2,13 @@ | |||
2 | # Makefile for the PCI bus specific drivers. | 2 | # Makefile for the PCI bus specific drivers. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \ | 5 | obj-y += access.o bus.o probe.o remove.o pci.o \ |
6 | pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ | 6 | pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ |
7 | irq.o | 7 | irq.o |
8 | obj-$(CONFIG_PROC_FS) += proc.o | 8 | obj-$(CONFIG_PROC_FS) += proc.o |
9 | obj-$(CONFIG_SYSFS) += slot.o | 9 | obj-$(CONFIG_SYSFS) += slot.o |
10 | 10 | ||
11 | obj-$(CONFIG_PCI_LEGACY) += legacy.o | 11 | obj-$(CONFIG_PCI_QUIRKS) += quirks.o |
12 | CFLAGS_legacy.o += -Wno-deprecated-declarations | ||
13 | 12 | ||
14 | # Build PCI Express stuff if needed | 13 | # Build PCI Express stuff if needed |
15 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ | 14 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index cef28a79103f..712250f5874a 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -17,6 +17,52 @@ | |||
17 | 17 | ||
18 | #include "pci.h" | 18 | #include "pci.h" |
19 | 19 | ||
20 | void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, | ||
21 | unsigned int flags) | ||
22 | { | ||
23 | struct pci_bus_resource *bus_res; | ||
24 | |||
25 | bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL); | ||
26 | if (!bus_res) { | ||
27 | dev_err(&bus->dev, "can't add %pR resource\n", res); | ||
28 | return; | ||
29 | } | ||
30 | |||
31 | bus_res->res = res; | ||
32 | bus_res->flags = flags; | ||
33 | list_add_tail(&bus_res->list, &bus->resources); | ||
34 | } | ||
35 | |||
36 | struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n) | ||
37 | { | ||
38 | struct pci_bus_resource *bus_res; | ||
39 | |||
40 | if (n < PCI_BRIDGE_RESOURCE_NUM) | ||
41 | return bus->resource[n]; | ||
42 | |||
43 | n -= PCI_BRIDGE_RESOURCE_NUM; | ||
44 | list_for_each_entry(bus_res, &bus->resources, list) { | ||
45 | if (n-- == 0) | ||
46 | return bus_res->res; | ||
47 | } | ||
48 | return NULL; | ||
49 | } | ||
50 | EXPORT_SYMBOL_GPL(pci_bus_resource_n); | ||
51 | |||
52 | void pci_bus_remove_resources(struct pci_bus *bus) | ||
53 | { | ||
54 | struct pci_bus_resource *bus_res, *tmp; | ||
55 | int i; | ||
56 | |||
57 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) | ||
58 | bus->resource[i] = 0; | ||
59 | |||
60 | list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) { | ||
61 | list_del(&bus_res->list); | ||
62 | kfree(bus_res); | ||
63 | } | ||
64 | } | ||
65 | |||
20 | /** | 66 | /** |
21 | * pci_bus_alloc_resource - allocate a resource from a parent bus | 67 | * pci_bus_alloc_resource - allocate a resource from a parent bus |
22 | * @bus: PCI bus | 68 | * @bus: PCI bus |
@@ -36,11 +82,14 @@ int | |||
36 | pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | 82 | pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, |
37 | resource_size_t size, resource_size_t align, | 83 | resource_size_t size, resource_size_t align, |
38 | resource_size_t min, unsigned int type_mask, | 84 | resource_size_t min, unsigned int type_mask, |
39 | void (*alignf)(void *, struct resource *, resource_size_t, | 85 | resource_size_t (*alignf)(void *, |
40 | resource_size_t), | 86 | const struct resource *, |
87 | resource_size_t, | ||
88 | resource_size_t), | ||
41 | void *alignf_data) | 89 | void *alignf_data) |
42 | { | 90 | { |
43 | int i, ret = -ENOMEM; | 91 | int i, ret = -ENOMEM; |
92 | struct resource *r; | ||
44 | resource_size_t max = -1; | 93 | resource_size_t max = -1; |
45 | 94 | ||
46 | type_mask |= IORESOURCE_IO | IORESOURCE_MEM; | 95 | type_mask |= IORESOURCE_IO | IORESOURCE_MEM; |
@@ -49,8 +98,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | |||
49 | if (!(res->flags & IORESOURCE_MEM_64)) | 98 | if (!(res->flags & IORESOURCE_MEM_64)) |
50 | max = PCIBIOS_MAX_MEM_32; | 99 | max = PCIBIOS_MAX_MEM_32; |
51 | 100 | ||
52 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 101 | pci_bus_for_each_resource(bus, r, i) { |
53 | struct resource *r = bus->resource[i]; | ||
54 | if (!r) | 102 | if (!r) |
55 | continue; | 103 | continue; |
56 | 104 | ||
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 4dd7114964ac..efa9f2de51c1 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c | |||
@@ -332,8 +332,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) | |||
332 | slot->hotplug_slot->info->attention_status = 0; | 332 | slot->hotplug_slot->info->attention_status = 0; |
333 | slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot); | 333 | slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot); |
334 | slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot); | 334 | slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot); |
335 | slot->hotplug_slot->info->max_bus_speed = PCI_SPEED_UNKNOWN; | ||
336 | slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; | ||
337 | 335 | ||
338 | acpiphp_slot->slot = slot; | 336 | acpiphp_slot->slot = slot; |
339 | snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun); | 337 | snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun); |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 8e952fdab764..cb2fd01eddae 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -720,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func) | |||
720 | -ret_val); | 720 | -ret_val); |
721 | goto acpiphp_bus_add_out; | 721 | goto acpiphp_bus_add_out; |
722 | } | 722 | } |
723 | /* | ||
724 | * try to start anyway. We could have failed to add | ||
725 | * simply because this bus had previously been added | ||
726 | * on another add. Don't bother with the return value | ||
727 | * we just keep going. | ||
728 | */ | ||
729 | ret_val = acpi_bus_start(device); | 723 | ret_val = acpi_bus_start(device); |
730 | 724 | ||
731 | acpiphp_bus_add_out: | 725 | acpiphp_bus_add_out: |
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c index 148fb463b81c..fb3f84661bdc 100644 --- a/drivers/pci/hotplug/cpcihp_generic.c +++ b/drivers/pci/hotplug/cpcihp_generic.c | |||
@@ -162,6 +162,7 @@ static int __init cpcihp_generic_init(void) | |||
162 | dev = pci_get_slot(bus, PCI_DEVFN(bridge_slot, 0)); | 162 | dev = pci_get_slot(bus, PCI_DEVFN(bridge_slot, 0)); |
163 | if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { | 163 | if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { |
164 | err("Invalid bridge device %s", bridge); | 164 | err("Invalid bridge device %s", bridge); |
165 | pci_dev_put(dev); | ||
165 | return -EINVAL; | 166 | return -EINVAL; |
166 | } | 167 | } |
167 | bus = dev->subordinate; | 168 | bus = dev->subordinate; |
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h index 9c6a9fd26812..d8ffc7366801 100644 --- a/drivers/pci/hotplug/cpqphp.h +++ b/drivers/pci/hotplug/cpqphp.h | |||
@@ -310,8 +310,6 @@ struct controller { | |||
310 | u8 first_slot; | 310 | u8 first_slot; |
311 | u8 add_support; | 311 | u8 add_support; |
312 | u8 push_flag; | 312 | u8 push_flag; |
313 | enum pci_bus_speed speed; | ||
314 | enum pci_bus_speed speed_capability; | ||
315 | u8 push_button; /* 0 = no pushbutton, 1 = pushbutton present */ | 313 | u8 push_button; /* 0 = no pushbutton, 1 = pushbutton present */ |
316 | u8 slot_switch_type; /* 0 = no switch, 1 = switch present */ | 314 | u8 slot_switch_type; /* 0 = no switch, 1 = switch present */ |
317 | u8 defeature_PHP; /* 0 = PHP not supported, 1 = PHP supported */ | 315 | u8 defeature_PHP; /* 0 = PHP not supported, 1 = PHP supported */ |
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 075b4f4b6e0d..f184d1d2ecbe 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c | |||
@@ -583,30 +583,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
583 | return 0; | 583 | return 0; |
584 | } | 584 | } |
585 | 585 | ||
586 | static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | ||
587 | { | ||
588 | struct slot *slot = hotplug_slot->private; | ||
589 | struct controller *ctrl = slot->ctrl; | ||
590 | |||
591 | dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); | ||
592 | |||
593 | *value = ctrl->speed_capability; | ||
594 | |||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | ||
599 | { | ||
600 | struct slot *slot = hotplug_slot->private; | ||
601 | struct controller *ctrl = slot->ctrl; | ||
602 | |||
603 | dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); | ||
604 | |||
605 | *value = ctrl->speed; | ||
606 | |||
607 | return 0; | ||
608 | } | ||
609 | |||
610 | static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = { | 586 | static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = { |
611 | .set_attention_status = set_attention_status, | 587 | .set_attention_status = set_attention_status, |
612 | .enable_slot = process_SI, | 588 | .enable_slot = process_SI, |
@@ -616,8 +592,6 @@ static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = { | |||
616 | .get_attention_status = get_attention_status, | 592 | .get_attention_status = get_attention_status, |
617 | .get_latch_status = get_latch_status, | 593 | .get_latch_status = get_latch_status, |
618 | .get_adapter_status = get_adapter_status, | 594 | .get_adapter_status = get_adapter_status, |
619 | .get_max_bus_speed = get_max_bus_speed, | ||
620 | .get_cur_bus_speed = get_cur_bus_speed, | ||
621 | }; | 595 | }; |
622 | 596 | ||
623 | #define SLOT_NAME_SIZE 10 | 597 | #define SLOT_NAME_SIZE 10 |
@@ -629,6 +603,7 @@ static int ctrl_slot_setup(struct controller *ctrl, | |||
629 | struct slot *slot; | 603 | struct slot *slot; |
630 | struct hotplug_slot *hotplug_slot; | 604 | struct hotplug_slot *hotplug_slot; |
631 | struct hotplug_slot_info *hotplug_slot_info; | 605 | struct hotplug_slot_info *hotplug_slot_info; |
606 | struct pci_bus *bus = ctrl->pci_bus; | ||
632 | u8 number_of_slots; | 607 | u8 number_of_slots; |
633 | u8 slot_device; | 608 | u8 slot_device; |
634 | u8 slot_number; | 609 | u8 slot_number; |
@@ -694,7 +669,7 @@ static int ctrl_slot_setup(struct controller *ctrl, | |||
694 | slot->capabilities |= PCISLOT_64_BIT_SUPPORTED; | 669 | slot->capabilities |= PCISLOT_64_BIT_SUPPORTED; |
695 | if (is_slot66mhz(slot)) | 670 | if (is_slot66mhz(slot)) |
696 | slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED; | 671 | slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED; |
697 | if (ctrl->speed == PCI_SPEED_66MHz) | 672 | if (bus->cur_bus_speed == PCI_SPEED_66MHz) |
698 | slot->capabilities |= PCISLOT_66_MHZ_OPERATION; | 673 | slot->capabilities |= PCISLOT_66_MHZ_OPERATION; |
699 | 674 | ||
700 | ctrl_slot = | 675 | ctrl_slot = |
@@ -844,6 +819,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
844 | u32 rc; | 819 | u32 rc; |
845 | struct controller *ctrl; | 820 | struct controller *ctrl; |
846 | struct pci_func *func; | 821 | struct pci_func *func; |
822 | struct pci_bus *bus; | ||
847 | int err; | 823 | int err; |
848 | 824 | ||
849 | err = pci_enable_device(pdev); | 825 | err = pci_enable_device(pdev); |
@@ -852,6 +828,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
852 | pci_name(pdev), err); | 828 | pci_name(pdev), err); |
853 | return err; | 829 | return err; |
854 | } | 830 | } |
831 | bus = pdev->subordinate; | ||
855 | 832 | ||
856 | /* Need to read VID early b/c it's used to differentiate CPQ and INTC | 833 | /* Need to read VID early b/c it's used to differentiate CPQ and INTC |
857 | * discovery | 834 | * discovery |
@@ -929,22 +906,22 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
929 | pci_read_config_byte(pdev, 0x41, &bus_cap); | 906 | pci_read_config_byte(pdev, 0x41, &bus_cap); |
930 | if (bus_cap & 0x80) { | 907 | if (bus_cap & 0x80) { |
931 | dbg("bus max supports 133MHz PCI-X\n"); | 908 | dbg("bus max supports 133MHz PCI-X\n"); |
932 | ctrl->speed_capability = PCI_SPEED_133MHz_PCIX; | 909 | bus->max_bus_speed = PCI_SPEED_133MHz_PCIX; |
933 | break; | 910 | break; |
934 | } | 911 | } |
935 | if (bus_cap & 0x40) { | 912 | if (bus_cap & 0x40) { |
936 | dbg("bus max supports 100MHz PCI-X\n"); | 913 | dbg("bus max supports 100MHz PCI-X\n"); |
937 | ctrl->speed_capability = PCI_SPEED_100MHz_PCIX; | 914 | bus->max_bus_speed = PCI_SPEED_100MHz_PCIX; |
938 | break; | 915 | break; |
939 | } | 916 | } |
940 | if (bus_cap & 20) { | 917 | if (bus_cap & 20) { |
941 | dbg("bus max supports 66MHz PCI-X\n"); | 918 | dbg("bus max supports 66MHz PCI-X\n"); |
942 | ctrl->speed_capability = PCI_SPEED_66MHz_PCIX; | 919 | bus->max_bus_speed = PCI_SPEED_66MHz_PCIX; |
943 | break; | 920 | break; |
944 | } | 921 | } |
945 | if (bus_cap & 10) { | 922 | if (bus_cap & 10) { |
946 | dbg("bus max supports 66MHz PCI\n"); | 923 | dbg("bus max supports 66MHz PCI\n"); |
947 | ctrl->speed_capability = PCI_SPEED_66MHz; | 924 | bus->max_bus_speed = PCI_SPEED_66MHz; |
948 | break; | 925 | break; |
949 | } | 926 | } |
950 | 927 | ||
@@ -955,7 +932,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
955 | case PCI_SUB_HPC_ID: | 932 | case PCI_SUB_HPC_ID: |
956 | /* Original 6500/7000 implementation */ | 933 | /* Original 6500/7000 implementation */ |
957 | ctrl->slot_switch_type = 1; | 934 | ctrl->slot_switch_type = 1; |
958 | ctrl->speed_capability = PCI_SPEED_33MHz; | 935 | bus->max_bus_speed = PCI_SPEED_33MHz; |
959 | ctrl->push_button = 0; | 936 | ctrl->push_button = 0; |
960 | ctrl->pci_config_space = 1; | 937 | ctrl->pci_config_space = 1; |
961 | ctrl->defeature_PHP = 1; | 938 | ctrl->defeature_PHP = 1; |
@@ -966,7 +943,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
966 | /* First Pushbutton implementation */ | 943 | /* First Pushbutton implementation */ |
967 | ctrl->push_flag = 1; | 944 | ctrl->push_flag = 1; |
968 | ctrl->slot_switch_type = 1; | 945 | ctrl->slot_switch_type = 1; |
969 | ctrl->speed_capability = PCI_SPEED_33MHz; | 946 | bus->max_bus_speed = PCI_SPEED_33MHz; |
970 | ctrl->push_button = 1; | 947 | ctrl->push_button = 1; |
971 | ctrl->pci_config_space = 1; | 948 | ctrl->pci_config_space = 1; |
972 | ctrl->defeature_PHP = 1; | 949 | ctrl->defeature_PHP = 1; |
@@ -976,7 +953,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
976 | case PCI_SUB_HPC_ID_INTC: | 953 | case PCI_SUB_HPC_ID_INTC: |
977 | /* Third party (6500/7000) */ | 954 | /* Third party (6500/7000) */ |
978 | ctrl->slot_switch_type = 1; | 955 | ctrl->slot_switch_type = 1; |
979 | ctrl->speed_capability = PCI_SPEED_33MHz; | 956 | bus->max_bus_speed = PCI_SPEED_33MHz; |
980 | ctrl->push_button = 0; | 957 | ctrl->push_button = 0; |
981 | ctrl->pci_config_space = 1; | 958 | ctrl->pci_config_space = 1; |
982 | ctrl->defeature_PHP = 1; | 959 | ctrl->defeature_PHP = 1; |
@@ -987,7 +964,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
987 | /* First 66 Mhz implementation */ | 964 | /* First 66 Mhz implementation */ |
988 | ctrl->push_flag = 1; | 965 | ctrl->push_flag = 1; |
989 | ctrl->slot_switch_type = 1; | 966 | ctrl->slot_switch_type = 1; |
990 | ctrl->speed_capability = PCI_SPEED_66MHz; | 967 | bus->max_bus_speed = PCI_SPEED_66MHz; |
991 | ctrl->push_button = 1; | 968 | ctrl->push_button = 1; |
992 | ctrl->pci_config_space = 1; | 969 | ctrl->pci_config_space = 1; |
993 | ctrl->defeature_PHP = 1; | 970 | ctrl->defeature_PHP = 1; |
@@ -998,7 +975,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
998 | /* First PCI-X implementation, 100MHz */ | 975 | /* First PCI-X implementation, 100MHz */ |
999 | ctrl->push_flag = 1; | 976 | ctrl->push_flag = 1; |
1000 | ctrl->slot_switch_type = 1; | 977 | ctrl->slot_switch_type = 1; |
1001 | ctrl->speed_capability = PCI_SPEED_100MHz_PCIX; | 978 | bus->max_bus_speed = PCI_SPEED_100MHz_PCIX; |
1002 | ctrl->push_button = 1; | 979 | ctrl->push_button = 1; |
1003 | ctrl->pci_config_space = 1; | 980 | ctrl->pci_config_space = 1; |
1004 | ctrl->defeature_PHP = 1; | 981 | ctrl->defeature_PHP = 1; |
@@ -1015,9 +992,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1015 | case PCI_VENDOR_ID_INTEL: | 992 | case PCI_VENDOR_ID_INTEL: |
1016 | /* Check for speed capability (0=33, 1=66) */ | 993 | /* Check for speed capability (0=33, 1=66) */ |
1017 | if (subsystem_deviceid & 0x0001) | 994 | if (subsystem_deviceid & 0x0001) |
1018 | ctrl->speed_capability = PCI_SPEED_66MHz; | 995 | bus->max_bus_speed = PCI_SPEED_66MHz; |
1019 | else | 996 | else |
1020 | ctrl->speed_capability = PCI_SPEED_33MHz; | 997 | bus->max_bus_speed = PCI_SPEED_33MHz; |
1021 | 998 | ||
1022 | /* Check for push button */ | 999 | /* Check for push button */ |
1023 | if (subsystem_deviceid & 0x0002) | 1000 | if (subsystem_deviceid & 0x0002) |
@@ -1079,7 +1056,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1079 | pdev->bus->number); | 1056 | pdev->bus->number); |
1080 | 1057 | ||
1081 | dbg("Hotplug controller capabilities:\n"); | 1058 | dbg("Hotplug controller capabilities:\n"); |
1082 | dbg(" speed_capability %d\n", ctrl->speed_capability); | 1059 | dbg(" speed_capability %d\n", bus->max_bus_speed); |
1083 | dbg(" slot_switch_type %s\n", ctrl->slot_switch_type ? | 1060 | dbg(" slot_switch_type %s\n", ctrl->slot_switch_type ? |
1084 | "switch present" : "no switch"); | 1061 | "switch present" : "no switch"); |
1085 | dbg(" defeature_PHP %s\n", ctrl->defeature_PHP ? | 1062 | dbg(" defeature_PHP %s\n", ctrl->defeature_PHP ? |
@@ -1142,7 +1119,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1142 | } | 1119 | } |
1143 | 1120 | ||
1144 | /* Check for 66Mhz operation */ | 1121 | /* Check for 66Mhz operation */ |
1145 | ctrl->speed = get_controller_speed(ctrl); | 1122 | bus->cur_bus_speed = get_controller_speed(ctrl); |
1146 | 1123 | ||
1147 | 1124 | ||
1148 | /******************************************************** | 1125 | /******************************************************** |
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index 0ff689afa757..e43908d9b5df 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c | |||
@@ -1130,12 +1130,13 @@ static int is_bridge(struct pci_func * func) | |||
1130 | static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot) | 1130 | static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot) |
1131 | { | 1131 | { |
1132 | struct slot *slot; | 1132 | struct slot *slot; |
1133 | struct pci_bus *bus = ctrl->pci_bus; | ||
1133 | u8 reg; | 1134 | u8 reg; |
1134 | u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); | 1135 | u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); |
1135 | u16 reg16; | 1136 | u16 reg16; |
1136 | u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); | 1137 | u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); |
1137 | 1138 | ||
1138 | if (ctrl->speed == adapter_speed) | 1139 | if (bus->cur_bus_speed == adapter_speed) |
1139 | return 0; | 1140 | return 0; |
1140 | 1141 | ||
1141 | /* We don't allow freq/mode changes if we find another adapter running | 1142 | /* We don't allow freq/mode changes if we find another adapter running |
@@ -1152,7 +1153,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1152 | * lower speed/mode, we allow the new adapter to function at | 1153 | * lower speed/mode, we allow the new adapter to function at |
1153 | * this rate if supported | 1154 | * this rate if supported |
1154 | */ | 1155 | */ |
1155 | if (ctrl->speed < adapter_speed) | 1156 | if (bus->cur_bus_speed < adapter_speed) |
1156 | return 0; | 1157 | return 0; |
1157 | 1158 | ||
1158 | return 1; | 1159 | return 1; |
@@ -1161,20 +1162,20 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1161 | /* If the controller doesn't support freq/mode changes and the | 1162 | /* If the controller doesn't support freq/mode changes and the |
1162 | * controller is running at a higher mode, we bail | 1163 | * controller is running at a higher mode, we bail |
1163 | */ | 1164 | */ |
1164 | if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability)) | 1165 | if ((bus->cur_bus_speed > adapter_speed) && (!ctrl->pcix_speed_capability)) |
1165 | return 1; | 1166 | return 1; |
1166 | 1167 | ||
1167 | /* But we allow the adapter to run at a lower rate if possible */ | 1168 | /* But we allow the adapter to run at a lower rate if possible */ |
1168 | if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability)) | 1169 | if ((bus->cur_bus_speed < adapter_speed) && (!ctrl->pcix_speed_capability)) |
1169 | return 0; | 1170 | return 0; |
1170 | 1171 | ||
1171 | /* We try to set the max speed supported by both the adapter and | 1172 | /* We try to set the max speed supported by both the adapter and |
1172 | * controller | 1173 | * controller |
1173 | */ | 1174 | */ |
1174 | if (ctrl->speed_capability < adapter_speed) { | 1175 | if (bus->max_bus_speed < adapter_speed) { |
1175 | if (ctrl->speed == ctrl->speed_capability) | 1176 | if (bus->cur_bus_speed == bus->max_bus_speed) |
1176 | return 0; | 1177 | return 0; |
1177 | adapter_speed = ctrl->speed_capability; | 1178 | adapter_speed = bus->max_bus_speed; |
1178 | } | 1179 | } |
1179 | 1180 | ||
1180 | writel(0x0L, ctrl->hpc_reg + LED_CONTROL); | 1181 | writel(0x0L, ctrl->hpc_reg + LED_CONTROL); |
@@ -1229,8 +1230,8 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1229 | pci_write_config_byte(ctrl->pci_dev, 0x43, reg); | 1230 | pci_write_config_byte(ctrl->pci_dev, 0x43, reg); |
1230 | 1231 | ||
1231 | /* Only if mode change...*/ | 1232 | /* Only if mode change...*/ |
1232 | if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || | 1233 | if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || |
1233 | ((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) | 1234 | ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) |
1234 | set_SOGO(ctrl); | 1235 | set_SOGO(ctrl); |
1235 | 1236 | ||
1236 | wait_for_ctrl_irq(ctrl); | 1237 | wait_for_ctrl_irq(ctrl); |
@@ -1243,7 +1244,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1243 | set_SOGO(ctrl); | 1244 | set_SOGO(ctrl); |
1244 | wait_for_ctrl_irq(ctrl); | 1245 | wait_for_ctrl_irq(ctrl); |
1245 | 1246 | ||
1246 | ctrl->speed = adapter_speed; | 1247 | bus->cur_bus_speed = adapter_speed; |
1247 | slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); | 1248 | slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); |
1248 | 1249 | ||
1249 | info("Successfully changed frequency/mode for adapter in slot %d\n", | 1250 | info("Successfully changed frequency/mode for adapter in slot %d\n", |
@@ -1269,6 +1270,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ | |||
1269 | */ | 1270 | */ |
1270 | static u32 board_replaced(struct pci_func *func, struct controller *ctrl) | 1271 | static u32 board_replaced(struct pci_func *func, struct controller *ctrl) |
1271 | { | 1272 | { |
1273 | struct pci_bus *bus = ctrl->pci_bus; | ||
1272 | u8 hp_slot; | 1274 | u8 hp_slot; |
1273 | u8 temp_byte; | 1275 | u8 temp_byte; |
1274 | u8 adapter_speed; | 1276 | u8 adapter_speed; |
@@ -1309,7 +1311,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) | |||
1309 | wait_for_ctrl_irq (ctrl); | 1311 | wait_for_ctrl_irq (ctrl); |
1310 | 1312 | ||
1311 | adapter_speed = get_adapter_speed(ctrl, hp_slot); | 1313 | adapter_speed = get_adapter_speed(ctrl, hp_slot); |
1312 | if (ctrl->speed != adapter_speed) | 1314 | if (bus->cur_bus_speed != adapter_speed) |
1313 | if (set_controller_speed(ctrl, adapter_speed, hp_slot)) | 1315 | if (set_controller_speed(ctrl, adapter_speed, hp_slot)) |
1314 | rc = WRONG_BUS_FREQUENCY; | 1316 | rc = WRONG_BUS_FREQUENCY; |
1315 | 1317 | ||
@@ -1426,6 +1428,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) | |||
1426 | u32 temp_register = 0xFFFFFFFF; | 1428 | u32 temp_register = 0xFFFFFFFF; |
1427 | u32 rc = 0; | 1429 | u32 rc = 0; |
1428 | struct pci_func *new_slot = NULL; | 1430 | struct pci_func *new_slot = NULL; |
1431 | struct pci_bus *bus = ctrl->pci_bus; | ||
1429 | struct slot *p_slot; | 1432 | struct slot *p_slot; |
1430 | struct resource_lists res_lists; | 1433 | struct resource_lists res_lists; |
1431 | 1434 | ||
@@ -1456,7 +1459,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) | |||
1456 | wait_for_ctrl_irq (ctrl); | 1459 | wait_for_ctrl_irq (ctrl); |
1457 | 1460 | ||
1458 | adapter_speed = get_adapter_speed(ctrl, hp_slot); | 1461 | adapter_speed = get_adapter_speed(ctrl, hp_slot); |
1459 | if (ctrl->speed != adapter_speed) | 1462 | if (bus->cur_bus_speed != adapter_speed) |
1460 | if (set_controller_speed(ctrl, adapter_speed, hp_slot)) | 1463 | if (set_controller_speed(ctrl, adapter_speed, hp_slot)) |
1461 | rc = WRONG_BUS_FREQUENCY; | 1464 | rc = WRONG_BUS_FREQUENCY; |
1462 | 1465 | ||
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 7485ffda950c..d934dd4fa873 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c | |||
@@ -395,89 +395,40 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 * value) | |||
395 | return rc; | 395 | return rc; |
396 | } | 396 | } |
397 | 397 | ||
398 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | 398 | static int get_max_bus_speed(struct slot *slot) |
399 | { | 399 | { |
400 | int rc = -ENODEV; | 400 | int rc; |
401 | struct slot *pslot; | ||
402 | u8 mode = 0; | 401 | u8 mode = 0; |
402 | enum pci_bus_speed speed; | ||
403 | struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus; | ||
403 | 404 | ||
404 | debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__, | 405 | debug("%s - Entry slot[%p]\n", __func__, slot); |
405 | hotplug_slot, value); | ||
406 | 406 | ||
407 | ibmphp_lock_operations(); | 407 | ibmphp_lock_operations(); |
408 | 408 | mode = slot->supported_bus_mode; | |
409 | if (hotplug_slot) { | 409 | speed = slot->supported_speed; |
410 | pslot = hotplug_slot->private; | ||
411 | if (pslot) { | ||
412 | rc = 0; | ||
413 | mode = pslot->supported_bus_mode; | ||
414 | *value = pslot->supported_speed; | ||
415 | switch (*value) { | ||
416 | case BUS_SPEED_33: | ||
417 | break; | ||
418 | case BUS_SPEED_66: | ||
419 | if (mode == BUS_MODE_PCIX) | ||
420 | *value += 0x01; | ||
421 | break; | ||
422 | case BUS_SPEED_100: | ||
423 | case BUS_SPEED_133: | ||
424 | *value = pslot->supported_speed + 0x01; | ||
425 | break; | ||
426 | default: | ||
427 | /* Note (will need to change): there would be soon 256, 512 also */ | ||
428 | rc = -ENODEV; | ||
429 | } | ||
430 | } | ||
431 | } | ||
432 | |||
433 | ibmphp_unlock_operations(); | 410 | ibmphp_unlock_operations(); |
434 | debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value); | ||
435 | return rc; | ||
436 | } | ||
437 | 411 | ||
438 | static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | 412 | switch (speed) { |
439 | { | 413 | case BUS_SPEED_33: |
440 | int rc = -ENODEV; | 414 | break; |
441 | struct slot *pslot; | 415 | case BUS_SPEED_66: |
442 | u8 mode = 0; | 416 | if (mode == BUS_MODE_PCIX) |
443 | 417 | speed += 0x01; | |
444 | debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__, | 418 | break; |
445 | hotplug_slot, value); | 419 | case BUS_SPEED_100: |
446 | 420 | case BUS_SPEED_133: | |
447 | ibmphp_lock_operations(); | 421 | speed += 0x01; |
448 | 422 | break; | |
449 | if (hotplug_slot) { | 423 | default: |
450 | pslot = hotplug_slot->private; | 424 | /* Note (will need to change): there would be soon 256, 512 also */ |
451 | if (pslot) { | 425 | rc = -ENODEV; |
452 | rc = get_cur_bus_info(&pslot); | ||
453 | if (!rc) { | ||
454 | mode = pslot->bus_on->current_bus_mode; | ||
455 | *value = pslot->bus_on->current_speed; | ||
456 | switch (*value) { | ||
457 | case BUS_SPEED_33: | ||
458 | break; | ||
459 | case BUS_SPEED_66: | ||
460 | if (mode == BUS_MODE_PCIX) | ||
461 | *value += 0x01; | ||
462 | else if (mode == BUS_MODE_PCI) | ||
463 | ; | ||
464 | else | ||
465 | *value = PCI_SPEED_UNKNOWN; | ||
466 | break; | ||
467 | case BUS_SPEED_100: | ||
468 | case BUS_SPEED_133: | ||
469 | *value += 0x01; | ||
470 | break; | ||
471 | default: | ||
472 | /* Note of change: there would also be 256, 512 soon */ | ||
473 | rc = -ENODEV; | ||
474 | } | ||
475 | } | ||
476 | } | ||
477 | } | 426 | } |
478 | 427 | ||
479 | ibmphp_unlock_operations(); | 428 | if (!rc) |
480 | debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value); | 429 | bus->max_bus_speed = speed; |
430 | |||
431 | debug("%s - Exit rc[%d] speed[%x]\n", __func__, rc, speed); | ||
481 | return rc; | 432 | return rc; |
482 | } | 433 | } |
483 | 434 | ||
@@ -572,6 +523,7 @@ static int __init init_ops(void) | |||
572 | if (slot_cur->bus_on->current_speed == 0xFF) | 523 | if (slot_cur->bus_on->current_speed == 0xFF) |
573 | if (get_cur_bus_info(&slot_cur)) | 524 | if (get_cur_bus_info(&slot_cur)) |
574 | return -1; | 525 | return -1; |
526 | get_max_bus_speed(slot_cur); | ||
575 | 527 | ||
576 | if (slot_cur->ctrl->options == 0xFF) | 528 | if (slot_cur->ctrl->options == 0xFF) |
577 | if (get_hpc_options(slot_cur, &slot_cur->ctrl->options)) | 529 | if (get_hpc_options(slot_cur, &slot_cur->ctrl->options)) |
@@ -655,6 +607,7 @@ static int validate(struct slot *slot_cur, int opn) | |||
655 | int ibmphp_update_slot_info(struct slot *slot_cur) | 607 | int ibmphp_update_slot_info(struct slot *slot_cur) |
656 | { | 608 | { |
657 | struct hotplug_slot_info *info; | 609 | struct hotplug_slot_info *info; |
610 | struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus; | ||
658 | int rc; | 611 | int rc; |
659 | u8 bus_speed; | 612 | u8 bus_speed; |
660 | u8 mode; | 613 | u8 mode; |
@@ -700,8 +653,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur) | |||
700 | bus_speed = PCI_SPEED_UNKNOWN; | 653 | bus_speed = PCI_SPEED_UNKNOWN; |
701 | } | 654 | } |
702 | 655 | ||
703 | info->cur_bus_speed = bus_speed; | 656 | bus->cur_bus_speed = bus_speed; |
704 | info->max_bus_speed = slot_cur->hotplug_slot->info->max_bus_speed; | ||
705 | // To do: bus_names | 657 | // To do: bus_names |
706 | 658 | ||
707 | rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info); | 659 | rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info); |
@@ -1326,8 +1278,6 @@ struct hotplug_slot_ops ibmphp_hotplug_slot_ops = { | |||
1326 | .get_attention_status = get_attention_status, | 1278 | .get_attention_status = get_attention_status, |
1327 | .get_latch_status = get_latch_status, | 1279 | .get_latch_status = get_latch_status, |
1328 | .get_adapter_status = get_adapter_present, | 1280 | .get_adapter_status = get_adapter_present, |
1329 | .get_max_bus_speed = get_max_bus_speed, | ||
1330 | .get_cur_bus_speed = get_cur_bus_speed, | ||
1331 | /* .get_max_adapter_speed = get_max_adapter_speed, | 1281 | /* .get_max_adapter_speed = get_max_adapter_speed, |
1332 | .get_bus_name_status = get_bus_name, | 1282 | .get_bus_name_status = get_bus_name, |
1333 | */ | 1283 | */ |
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index c1abac8ab5c3..5becbdee4027 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c | |||
@@ -245,7 +245,7 @@ static void __init print_ebda_hpc (void) | |||
245 | 245 | ||
246 | int __init ibmphp_access_ebda (void) | 246 | int __init ibmphp_access_ebda (void) |
247 | { | 247 | { |
248 | u8 format, num_ctlrs, rio_complete, hs_complete; | 248 | u8 format, num_ctlrs, rio_complete, hs_complete, ebda_sz; |
249 | u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base; | 249 | u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base; |
250 | int rc = 0; | 250 | int rc = 0; |
251 | 251 | ||
@@ -260,7 +260,16 @@ int __init ibmphp_access_ebda (void) | |||
260 | iounmap (io_mem); | 260 | iounmap (io_mem); |
261 | debug ("returned ebda segment: %x\n", ebda_seg); | 261 | debug ("returned ebda segment: %x\n", ebda_seg); |
262 | 262 | ||
263 | io_mem = ioremap(ebda_seg<<4, 1024); | 263 | io_mem = ioremap(ebda_seg<<4, 1); |
264 | if (!io_mem) | ||
265 | return -ENOMEM; | ||
266 | ebda_sz = readb(io_mem); | ||
267 | iounmap(io_mem); | ||
268 | debug("ebda size: %d(KiB)\n", ebda_sz); | ||
269 | if (ebda_sz == 0) | ||
270 | return -ENOMEM; | ||
271 | |||
272 | io_mem = ioremap(ebda_seg<<4, (ebda_sz * 1024)); | ||
264 | if (!io_mem ) | 273 | if (!io_mem ) |
265 | return -ENOMEM; | 274 | return -ENOMEM; |
266 | next_offset = 0x180; | 275 | next_offset = 0x180; |
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c index c7084f0eca5a..1aaf3f32d3cd 100644 --- a/drivers/pci/hotplug/ibmphp_hpc.c +++ b/drivers/pci/hotplug/ibmphp_hpc.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/mutex.h> | 36 | #include <linux/mutex.h> |
37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
38 | #include <linux/semaphore.h> | ||
38 | #include <linux/kthread.h> | 39 | #include <linux/kthread.h> |
39 | #include "ibmphp.h" | 40 | #include "ibmphp.h" |
40 | 41 | ||
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 38183a534b65..728b119f71ad 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c | |||
@@ -64,32 +64,6 @@ static int debug; | |||
64 | static LIST_HEAD(pci_hotplug_slot_list); | 64 | static LIST_HEAD(pci_hotplug_slot_list); |
65 | static DEFINE_MUTEX(pci_hp_mutex); | 65 | static DEFINE_MUTEX(pci_hp_mutex); |
66 | 66 | ||
67 | /* these strings match up with the values in pci_bus_speed */ | ||
68 | static char *pci_bus_speed_strings[] = { | ||
69 | "33 MHz PCI", /* 0x00 */ | ||
70 | "66 MHz PCI", /* 0x01 */ | ||
71 | "66 MHz PCI-X", /* 0x02 */ | ||
72 | "100 MHz PCI-X", /* 0x03 */ | ||
73 | "133 MHz PCI-X", /* 0x04 */ | ||
74 | NULL, /* 0x05 */ | ||
75 | NULL, /* 0x06 */ | ||
76 | NULL, /* 0x07 */ | ||
77 | NULL, /* 0x08 */ | ||
78 | "66 MHz PCI-X 266", /* 0x09 */ | ||
79 | "100 MHz PCI-X 266", /* 0x0a */ | ||
80 | "133 MHz PCI-X 266", /* 0x0b */ | ||
81 | NULL, /* 0x0c */ | ||
82 | NULL, /* 0x0d */ | ||
83 | NULL, /* 0x0e */ | ||
84 | NULL, /* 0x0f */ | ||
85 | NULL, /* 0x10 */ | ||
86 | "66 MHz PCI-X 533", /* 0x11 */ | ||
87 | "100 MHz PCI-X 533", /* 0x12 */ | ||
88 | "133 MHz PCI-X 533", /* 0x13 */ | ||
89 | "2.5 GT/s PCIe", /* 0x14 */ | ||
90 | "5.0 GT/s PCIe", /* 0x15 */ | ||
91 | }; | ||
92 | |||
93 | #ifdef CONFIG_HOTPLUG_PCI_CPCI | 67 | #ifdef CONFIG_HOTPLUG_PCI_CPCI |
94 | extern int cpci_hotplug_init(int debug); | 68 | extern int cpci_hotplug_init(int debug); |
95 | extern void cpci_hotplug_exit(void); | 69 | extern void cpci_hotplug_exit(void); |
@@ -118,8 +92,6 @@ GET_STATUS(power_status, u8) | |||
118 | GET_STATUS(attention_status, u8) | 92 | GET_STATUS(attention_status, u8) |
119 | GET_STATUS(latch_status, u8) | 93 | GET_STATUS(latch_status, u8) |
120 | GET_STATUS(adapter_status, u8) | 94 | GET_STATUS(adapter_status, u8) |
121 | GET_STATUS(max_bus_speed, enum pci_bus_speed) | ||
122 | GET_STATUS(cur_bus_speed, enum pci_bus_speed) | ||
123 | 95 | ||
124 | static ssize_t power_read_file(struct pci_slot *slot, char *buf) | 96 | static ssize_t power_read_file(struct pci_slot *slot, char *buf) |
125 | { | 97 | { |
@@ -263,60 +235,6 @@ static struct pci_slot_attribute hotplug_slot_attr_presence = { | |||
263 | .show = presence_read_file, | 235 | .show = presence_read_file, |
264 | }; | 236 | }; |
265 | 237 | ||
266 | static char *unknown_speed = "Unknown bus speed"; | ||
267 | |||
268 | static ssize_t max_bus_speed_read_file(struct pci_slot *slot, char *buf) | ||
269 | { | ||
270 | char *speed_string; | ||
271 | int retval; | ||
272 | enum pci_bus_speed value; | ||
273 | |||
274 | retval = get_max_bus_speed(slot->hotplug, &value); | ||
275 | if (retval) | ||
276 | goto exit; | ||
277 | |||
278 | if (value == PCI_SPEED_UNKNOWN) | ||
279 | speed_string = unknown_speed; | ||
280 | else | ||
281 | speed_string = pci_bus_speed_strings[value]; | ||
282 | |||
283 | retval = sprintf (buf, "%s\n", speed_string); | ||
284 | |||
285 | exit: | ||
286 | return retval; | ||
287 | } | ||
288 | |||
289 | static struct pci_slot_attribute hotplug_slot_attr_max_bus_speed = { | ||
290 | .attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO}, | ||
291 | .show = max_bus_speed_read_file, | ||
292 | }; | ||
293 | |||
294 | static ssize_t cur_bus_speed_read_file(struct pci_slot *slot, char *buf) | ||
295 | { | ||
296 | char *speed_string; | ||
297 | int retval; | ||
298 | enum pci_bus_speed value; | ||
299 | |||
300 | retval = get_cur_bus_speed(slot->hotplug, &value); | ||
301 | if (retval) | ||
302 | goto exit; | ||
303 | |||
304 | if (value == PCI_SPEED_UNKNOWN) | ||
305 | speed_string = unknown_speed; | ||
306 | else | ||
307 | speed_string = pci_bus_speed_strings[value]; | ||
308 | |||
309 | retval = sprintf (buf, "%s\n", speed_string); | ||
310 | |||
311 | exit: | ||
312 | return retval; | ||
313 | } | ||
314 | |||
315 | static struct pci_slot_attribute hotplug_slot_attr_cur_bus_speed = { | ||
316 | .attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO}, | ||
317 | .show = cur_bus_speed_read_file, | ||
318 | }; | ||
319 | |||
320 | static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf, | 238 | static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf, |
321 | size_t count) | 239 | size_t count) |
322 | { | 240 | { |
@@ -391,26 +309,6 @@ static bool has_adapter_file(struct pci_slot *pci_slot) | |||
391 | return false; | 309 | return false; |
392 | } | 310 | } |
393 | 311 | ||
394 | static bool has_max_bus_speed_file(struct pci_slot *pci_slot) | ||
395 | { | ||
396 | struct hotplug_slot *slot = pci_slot->hotplug; | ||
397 | if ((!slot) || (!slot->ops)) | ||
398 | return false; | ||
399 | if (slot->ops->get_max_bus_speed) | ||
400 | return true; | ||
401 | return false; | ||
402 | } | ||
403 | |||
404 | static bool has_cur_bus_speed_file(struct pci_slot *pci_slot) | ||
405 | { | ||
406 | struct hotplug_slot *slot = pci_slot->hotplug; | ||
407 | if ((!slot) || (!slot->ops)) | ||
408 | return false; | ||
409 | if (slot->ops->get_cur_bus_speed) | ||
410 | return true; | ||
411 | return false; | ||
412 | } | ||
413 | |||
414 | static bool has_test_file(struct pci_slot *pci_slot) | 312 | static bool has_test_file(struct pci_slot *pci_slot) |
415 | { | 313 | { |
416 | struct hotplug_slot *slot = pci_slot->hotplug; | 314 | struct hotplug_slot *slot = pci_slot->hotplug; |
@@ -456,20 +354,6 @@ static int fs_add_slot(struct pci_slot *slot) | |||
456 | goto exit_adapter; | 354 | goto exit_adapter; |
457 | } | 355 | } |
458 | 356 | ||
459 | if (has_max_bus_speed_file(slot)) { | ||
460 | retval = sysfs_create_file(&slot->kobj, | ||
461 | &hotplug_slot_attr_max_bus_speed.attr); | ||
462 | if (retval) | ||
463 | goto exit_max_speed; | ||
464 | } | ||
465 | |||
466 | if (has_cur_bus_speed_file(slot)) { | ||
467 | retval = sysfs_create_file(&slot->kobj, | ||
468 | &hotplug_slot_attr_cur_bus_speed.attr); | ||
469 | if (retval) | ||
470 | goto exit_cur_speed; | ||
471 | } | ||
472 | |||
473 | if (has_test_file(slot)) { | 357 | if (has_test_file(slot)) { |
474 | retval = sysfs_create_file(&slot->kobj, | 358 | retval = sysfs_create_file(&slot->kobj, |
475 | &hotplug_slot_attr_test.attr); | 359 | &hotplug_slot_attr_test.attr); |
@@ -480,14 +364,6 @@ static int fs_add_slot(struct pci_slot *slot) | |||
480 | goto exit; | 364 | goto exit; |
481 | 365 | ||
482 | exit_test: | 366 | exit_test: |
483 | if (has_cur_bus_speed_file(slot)) | ||
484 | sysfs_remove_file(&slot->kobj, | ||
485 | &hotplug_slot_attr_cur_bus_speed.attr); | ||
486 | exit_cur_speed: | ||
487 | if (has_max_bus_speed_file(slot)) | ||
488 | sysfs_remove_file(&slot->kobj, | ||
489 | &hotplug_slot_attr_max_bus_speed.attr); | ||
490 | exit_max_speed: | ||
491 | if (has_adapter_file(slot)) | 367 | if (has_adapter_file(slot)) |
492 | sysfs_remove_file(&slot->kobj, | 368 | sysfs_remove_file(&slot->kobj, |
493 | &hotplug_slot_attr_presence.attr); | 369 | &hotplug_slot_attr_presence.attr); |
@@ -523,14 +399,6 @@ static void fs_remove_slot(struct pci_slot *slot) | |||
523 | sysfs_remove_file(&slot->kobj, | 399 | sysfs_remove_file(&slot->kobj, |
524 | &hotplug_slot_attr_presence.attr); | 400 | &hotplug_slot_attr_presence.attr); |
525 | 401 | ||
526 | if (has_max_bus_speed_file(slot)) | ||
527 | sysfs_remove_file(&slot->kobj, | ||
528 | &hotplug_slot_attr_max_bus_speed.attr); | ||
529 | |||
530 | if (has_cur_bus_speed_file(slot)) | ||
531 | sysfs_remove_file(&slot->kobj, | ||
532 | &hotplug_slot_attr_cur_bus_speed.attr); | ||
533 | |||
534 | if (has_test_file(slot)) | 402 | if (has_test_file(slot)) |
535 | sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr); | 403 | sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr); |
536 | 404 | ||
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 5674b2075bdc..920f820edf87 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -69,8 +69,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value); | |||
69 | static int get_attention_status (struct hotplug_slot *slot, u8 *value); | 69 | static int get_attention_status (struct hotplug_slot *slot, u8 *value); |
70 | static int get_latch_status (struct hotplug_slot *slot, u8 *value); | 70 | static int get_latch_status (struct hotplug_slot *slot, u8 *value); |
71 | static int get_adapter_status (struct hotplug_slot *slot, u8 *value); | 71 | static int get_adapter_status (struct hotplug_slot *slot, u8 *value); |
72 | static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | ||
73 | static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | ||
74 | 72 | ||
75 | /** | 73 | /** |
76 | * release_slot - free up the memory used by a slot | 74 | * release_slot - free up the memory used by a slot |
@@ -113,8 +111,6 @@ static int init_slot(struct controller *ctrl) | |||
113 | ops->disable_slot = disable_slot; | 111 | ops->disable_slot = disable_slot; |
114 | ops->get_power_status = get_power_status; | 112 | ops->get_power_status = get_power_status; |
115 | ops->get_adapter_status = get_adapter_status; | 113 | ops->get_adapter_status = get_adapter_status; |
116 | ops->get_max_bus_speed = get_max_bus_speed; | ||
117 | ops->get_cur_bus_speed = get_cur_bus_speed; | ||
118 | if (MRL_SENS(ctrl)) | 114 | if (MRL_SENS(ctrl)) |
119 | ops->get_latch_status = get_latch_status; | 115 | ops->get_latch_status = get_latch_status; |
120 | if (ATTN_LED(ctrl)) { | 116 | if (ATTN_LED(ctrl)) { |
@@ -227,27 +223,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) | |||
227 | return pciehp_get_adapter_status(slot, value); | 223 | return pciehp_get_adapter_status(slot, value); |
228 | } | 224 | } |
229 | 225 | ||
230 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, | ||
231 | enum pci_bus_speed *value) | ||
232 | { | ||
233 | struct slot *slot = hotplug_slot->private; | ||
234 | |||
235 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | ||
236 | __func__, slot_name(slot)); | ||
237 | |||
238 | return pciehp_get_max_link_speed(slot, value); | ||
239 | } | ||
240 | |||
241 | static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | ||
242 | { | ||
243 | struct slot *slot = hotplug_slot->private; | ||
244 | |||
245 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | ||
246 | __func__, slot_name(slot)); | ||
247 | |||
248 | return pciehp_get_cur_link_speed(slot, value); | ||
249 | } | ||
250 | |||
251 | static int pciehp_probe(struct pcie_device *dev) | 226 | static int pciehp_probe(struct pcie_device *dev) |
252 | { | 227 | { |
253 | int rc; | 228 | int rc; |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index d6ac1b261dd9..9a7f247e8ac1 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -341,6 +341,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) | |||
341 | p_slot->state = POWERON_STATE; | 341 | p_slot->state = POWERON_STATE; |
342 | break; | 342 | break; |
343 | default: | 343 | default: |
344 | kfree(info); | ||
344 | goto out; | 345 | goto out; |
345 | } | 346 | } |
346 | queue_work(pciehp_wq, &info->work); | 347 | queue_work(pciehp_wq, &info->work); |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 10040d58c8ef..40b48f569b1e 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -492,6 +492,7 @@ int pciehp_power_on_slot(struct slot * slot) | |||
492 | u16 slot_cmd; | 492 | u16 slot_cmd; |
493 | u16 cmd_mask; | 493 | u16 cmd_mask; |
494 | u16 slot_status; | 494 | u16 slot_status; |
495 | u16 lnk_status; | ||
495 | int retval = 0; | 496 | int retval = 0; |
496 | 497 | ||
497 | /* Clear sticky power-fault bit from previous power failures */ | 498 | /* Clear sticky power-fault bit from previous power failures */ |
@@ -523,6 +524,14 @@ int pciehp_power_on_slot(struct slot * slot) | |||
523 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, | 524 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
524 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); | 525 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); |
525 | 526 | ||
527 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); | ||
528 | if (retval) { | ||
529 | ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n", | ||
530 | __func__); | ||
531 | return retval; | ||
532 | } | ||
533 | pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); | ||
534 | |||
526 | return retval; | 535 | return retval; |
527 | } | 536 | } |
528 | 537 | ||
@@ -610,37 +619,6 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
610 | return IRQ_HANDLED; | 619 | return IRQ_HANDLED; |
611 | } | 620 | } |
612 | 621 | ||
613 | int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *value) | ||
614 | { | ||
615 | struct controller *ctrl = slot->ctrl; | ||
616 | enum pcie_link_speed lnk_speed; | ||
617 | u32 lnk_cap; | ||
618 | int retval = 0; | ||
619 | |||
620 | retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap); | ||
621 | if (retval) { | ||
622 | ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); | ||
623 | return retval; | ||
624 | } | ||
625 | |||
626 | switch (lnk_cap & 0x000F) { | ||
627 | case 1: | ||
628 | lnk_speed = PCIE_2_5GB; | ||
629 | break; | ||
630 | case 2: | ||
631 | lnk_speed = PCIE_5_0GB; | ||
632 | break; | ||
633 | default: | ||
634 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; | ||
635 | break; | ||
636 | } | ||
637 | |||
638 | *value = lnk_speed; | ||
639 | ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed); | ||
640 | |||
641 | return retval; | ||
642 | } | ||
643 | |||
644 | int pciehp_get_max_lnk_width(struct slot *slot, | 622 | int pciehp_get_max_lnk_width(struct slot *slot, |
645 | enum pcie_link_width *value) | 623 | enum pcie_link_width *value) |
646 | { | 624 | { |
@@ -691,38 +669,6 @@ int pciehp_get_max_lnk_width(struct slot *slot, | |||
691 | return retval; | 669 | return retval; |
692 | } | 670 | } |
693 | 671 | ||
694 | int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *value) | ||
695 | { | ||
696 | struct controller *ctrl = slot->ctrl; | ||
697 | enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN; | ||
698 | int retval = 0; | ||
699 | u16 lnk_status; | ||
700 | |||
701 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); | ||
702 | if (retval) { | ||
703 | ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", | ||
704 | __func__); | ||
705 | return retval; | ||
706 | } | ||
707 | |||
708 | switch (lnk_status & PCI_EXP_LNKSTA_CLS) { | ||
709 | case 1: | ||
710 | lnk_speed = PCIE_2_5GB; | ||
711 | break; | ||
712 | case 2: | ||
713 | lnk_speed = PCIE_5_0GB; | ||
714 | break; | ||
715 | default: | ||
716 | lnk_speed = PCIE_LNK_SPEED_UNKNOWN; | ||
717 | break; | ||
718 | } | ||
719 | |||
720 | *value = lnk_speed; | ||
721 | ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed); | ||
722 | |||
723 | return retval; | ||
724 | } | ||
725 | |||
726 | int pciehp_get_cur_lnk_width(struct slot *slot, | 672 | int pciehp_get_cur_lnk_width(struct slot *slot, |
727 | enum pcie_link_width *value) | 673 | enum pcie_link_width *value) |
728 | { | 674 | { |
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 21733108adde..0a16444c14c9 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -53,17 +53,15 @@ static int __ref pciehp_add_bridge(struct pci_dev *dev) | |||
53 | busnr = pci_scan_bridge(parent, dev, busnr, pass); | 53 | busnr = pci_scan_bridge(parent, dev, busnr, pass); |
54 | if (!dev->subordinate) | 54 | if (!dev->subordinate) |
55 | return -1; | 55 | return -1; |
56 | pci_bus_size_bridges(dev->subordinate); | 56 | |
57 | pci_bus_assign_resources(parent); | ||
58 | pci_enable_bridges(parent); | ||
59 | pci_bus_add_devices(parent); | ||
60 | return 0; | 57 | return 0; |
61 | } | 58 | } |
62 | 59 | ||
63 | int pciehp_configure_device(struct slot *p_slot) | 60 | int pciehp_configure_device(struct slot *p_slot) |
64 | { | 61 | { |
65 | struct pci_dev *dev; | 62 | struct pci_dev *dev; |
66 | struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; | 63 | struct pci_dev *bridge = p_slot->ctrl->pcie->port; |
64 | struct pci_bus *parent = bridge->subordinate; | ||
67 | int num, fn; | 65 | int num, fn; |
68 | struct controller *ctrl = p_slot->ctrl; | 66 | struct controller *ctrl = p_slot->ctrl; |
69 | 67 | ||
@@ -96,12 +94,25 @@ int pciehp_configure_device(struct slot *p_slot) | |||
96 | (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { | 94 | (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { |
97 | pciehp_add_bridge(dev); | 95 | pciehp_add_bridge(dev); |
98 | } | 96 | } |
97 | pci_dev_put(dev); | ||
98 | } | ||
99 | |||
100 | pci_assign_unassigned_bridge_resources(bridge); | ||
101 | |||
102 | for (fn = 0; fn < 8; fn++) { | ||
103 | dev = pci_get_slot(parent, PCI_DEVFN(0, fn)); | ||
104 | if (!dev) | ||
105 | continue; | ||
106 | if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { | ||
107 | pci_dev_put(dev); | ||
108 | continue; | ||
109 | } | ||
99 | pci_configure_slot(dev); | 110 | pci_configure_slot(dev); |
100 | pci_dev_put(dev); | 111 | pci_dev_put(dev); |
101 | } | 112 | } |
102 | 113 | ||
103 | pci_bus_assign_resources(parent); | ||
104 | pci_bus_add_devices(parent); | 114 | pci_bus_add_devices(parent); |
115 | |||
105 | return 0; | 116 | return 0; |
106 | } | 117 | } |
107 | 118 | ||
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index c159223389ec..dcaae725fd79 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c | |||
@@ -130,10 +130,9 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 * value) | |||
130 | return 0; | 130 | return 0; |
131 | } | 131 | } |
132 | 132 | ||
133 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | 133 | static enum pci_bus_speed get_max_bus_speed(struct slot *slot) |
134 | { | 134 | { |
135 | struct slot *slot = (struct slot *)hotplug_slot->private; | 135 | enum pci_bus_speed speed; |
136 | |||
137 | switch (slot->type) { | 136 | switch (slot->type) { |
138 | case 1: | 137 | case 1: |
139 | case 2: | 138 | case 2: |
@@ -141,30 +140,30 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe | |||
141 | case 4: | 140 | case 4: |
142 | case 5: | 141 | case 5: |
143 | case 6: | 142 | case 6: |
144 | *value = PCI_SPEED_33MHz; /* speed for case 1-6 */ | 143 | speed = PCI_SPEED_33MHz; /* speed for case 1-6 */ |
145 | break; | 144 | break; |
146 | case 7: | 145 | case 7: |
147 | case 8: | 146 | case 8: |
148 | *value = PCI_SPEED_66MHz; | 147 | speed = PCI_SPEED_66MHz; |
149 | break; | 148 | break; |
150 | case 11: | 149 | case 11: |
151 | case 14: | 150 | case 14: |
152 | *value = PCI_SPEED_66MHz_PCIX; | 151 | speed = PCI_SPEED_66MHz_PCIX; |
153 | break; | 152 | break; |
154 | case 12: | 153 | case 12: |
155 | case 15: | 154 | case 15: |
156 | *value = PCI_SPEED_100MHz_PCIX; | 155 | speed = PCI_SPEED_100MHz_PCIX; |
157 | break; | 156 | break; |
158 | case 13: | 157 | case 13: |
159 | case 16: | 158 | case 16: |
160 | *value = PCI_SPEED_133MHz_PCIX; | 159 | speed = PCI_SPEED_133MHz_PCIX; |
161 | break; | 160 | break; |
162 | default: | 161 | default: |
163 | *value = PCI_SPEED_UNKNOWN; | 162 | speed = PCI_SPEED_UNKNOWN; |
164 | break; | 163 | break; |
165 | |||
166 | } | 164 | } |
167 | return 0; | 165 | |
166 | return speed; | ||
168 | } | 167 | } |
169 | 168 | ||
170 | static int get_children_props(struct device_node *dn, const int **drc_indexes, | 169 | static int get_children_props(struct device_node *dn, const int **drc_indexes, |
@@ -408,6 +407,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) | |||
408 | slot->state = NOT_VALID; | 407 | slot->state = NOT_VALID; |
409 | return -EINVAL; | 408 | return -EINVAL; |
410 | } | 409 | } |
410 | |||
411 | slot->bus->max_bus_speed = get_max_bus_speed(slot); | ||
411 | return 0; | 412 | return 0; |
412 | } | 413 | } |
413 | 414 | ||
@@ -429,7 +430,6 @@ struct hotplug_slot_ops rpaphp_hotplug_slot_ops = { | |||
429 | .get_power_status = get_power_status, | 430 | .get_power_status = get_power_status, |
430 | .get_attention_status = get_attention_status, | 431 | .get_attention_status = get_attention_status, |
431 | .get_adapter_status = get_adapter_status, | 432 | .get_adapter_status = get_adapter_status, |
432 | .get_max_bus_speed = get_max_bus_speed, | ||
433 | }; | 433 | }; |
434 | 434 | ||
435 | module_init(rpaphp_init); | 435 | module_init(rpaphp_init); |
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index 8e210cd76e55..d2627e1c3ac1 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h | |||
@@ -333,8 +333,6 @@ struct hpc_ops { | |||
333 | int (*set_attention_status)(struct slot *slot, u8 status); | 333 | int (*set_attention_status)(struct slot *slot, u8 status); |
334 | int (*get_latch_status)(struct slot *slot, u8 *status); | 334 | int (*get_latch_status)(struct slot *slot, u8 *status); |
335 | int (*get_adapter_status)(struct slot *slot, u8 *status); | 335 | int (*get_adapter_status)(struct slot *slot, u8 *status); |
336 | int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); | ||
337 | int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); | ||
338 | int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed); | 336 | int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed); |
339 | int (*get_mode1_ECC_cap)(struct slot *slot, u8 *mode); | 337 | int (*get_mode1_ECC_cap)(struct slot *slot, u8 *mode); |
340 | int (*get_prog_int)(struct slot *slot, u8 *prog_int); | 338 | int (*get_prog_int)(struct slot *slot, u8 *prog_int); |
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index 8a520a3d0f59..a5062297f488 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c | |||
@@ -65,8 +65,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value); | |||
65 | static int get_attention_status (struct hotplug_slot *slot, u8 *value); | 65 | static int get_attention_status (struct hotplug_slot *slot, u8 *value); |
66 | static int get_latch_status (struct hotplug_slot *slot, u8 *value); | 66 | static int get_latch_status (struct hotplug_slot *slot, u8 *value); |
67 | static int get_adapter_status (struct hotplug_slot *slot, u8 *value); | 67 | static int get_adapter_status (struct hotplug_slot *slot, u8 *value); |
68 | static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | ||
69 | static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | ||
70 | 68 | ||
71 | static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { | 69 | static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { |
72 | .set_attention_status = set_attention_status, | 70 | .set_attention_status = set_attention_status, |
@@ -76,8 +74,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { | |||
76 | .get_attention_status = get_attention_status, | 74 | .get_attention_status = get_attention_status, |
77 | .get_latch_status = get_latch_status, | 75 | .get_latch_status = get_latch_status, |
78 | .get_adapter_status = get_adapter_status, | 76 | .get_adapter_status = get_adapter_status, |
79 | .get_max_bus_speed = get_max_bus_speed, | ||
80 | .get_cur_bus_speed = get_cur_bus_speed, | ||
81 | }; | 77 | }; |
82 | 78 | ||
83 | /** | 79 | /** |
@@ -279,37 +275,6 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value) | |||
279 | return 0; | 275 | return 0; |
280 | } | 276 | } |
281 | 277 | ||
282 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, | ||
283 | enum pci_bus_speed *value) | ||
284 | { | ||
285 | struct slot *slot = get_slot(hotplug_slot); | ||
286 | int retval; | ||
287 | |||
288 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | ||
289 | __func__, slot_name(slot)); | ||
290 | |||
291 | retval = slot->hpc_ops->get_max_bus_speed(slot, value); | ||
292 | if (retval < 0) | ||
293 | *value = PCI_SPEED_UNKNOWN; | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | ||
299 | { | ||
300 | struct slot *slot = get_slot(hotplug_slot); | ||
301 | int retval; | ||
302 | |||
303 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | ||
304 | __func__, slot_name(slot)); | ||
305 | |||
306 | retval = slot->hpc_ops->get_cur_bus_speed(slot, value); | ||
307 | if (retval < 0) | ||
308 | *value = PCI_SPEED_UNKNOWN; | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static int is_shpc_capable(struct pci_dev *dev) | 278 | static int is_shpc_capable(struct pci_dev *dev) |
314 | { | 279 | { |
315 | if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == | 280 | if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == |
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index b8ab2796e66a..3bba0c0888ff 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c | |||
@@ -285,17 +285,8 @@ static int board_added(struct slot *p_slot) | |||
285 | return WRONG_BUS_FREQUENCY; | 285 | return WRONG_BUS_FREQUENCY; |
286 | } | 286 | } |
287 | 287 | ||
288 | rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp); | 288 | bsp = ctrl->pci_dev->bus->cur_bus_speed; |
289 | if (rc) { | 289 | msp = ctrl->pci_dev->bus->max_bus_speed; |
290 | ctrl_err(ctrl, "Can't get bus operation speed\n"); | ||
291 | return WRONG_BUS_FREQUENCY; | ||
292 | } | ||
293 | |||
294 | rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp); | ||
295 | if (rc) { | ||
296 | ctrl_err(ctrl, "Can't get max bus operation speed\n"); | ||
297 | msp = bsp; | ||
298 | } | ||
299 | 290 | ||
300 | /* Check if there are other slots or devices on the same bus */ | 291 | /* Check if there are other slots or devices on the same bus */ |
301 | if (!list_empty(&ctrl->pci_dev->subordinate->devices)) | 292 | if (!list_empty(&ctrl->pci_dev->subordinate->devices)) |
@@ -462,6 +453,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work) | |||
462 | p_slot->state = POWERON_STATE; | 453 | p_slot->state = POWERON_STATE; |
463 | break; | 454 | break; |
464 | default: | 455 | default: |
456 | kfree(info); | ||
465 | goto out; | 457 | goto out; |
466 | } | 458 | } |
467 | queue_work(shpchp_wq, &info->work); | 459 | queue_work(shpchp_wq, &info->work); |
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index 86dc39847769..5f5e8d2e3552 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c | |||
@@ -660,6 +660,75 @@ static int hpc_slot_disable(struct slot * slot) | |||
660 | return retval; | 660 | return retval; |
661 | } | 661 | } |
662 | 662 | ||
663 | static int shpc_get_cur_bus_speed(struct controller *ctrl) | ||
664 | { | ||
665 | int retval = 0; | ||
666 | struct pci_bus *bus = ctrl->pci_dev->subordinate; | ||
667 | enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; | ||
668 | u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG); | ||
669 | u8 pi = shpc_readb(ctrl, PROG_INTERFACE); | ||
670 | u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7); | ||
671 | |||
672 | if ((pi == 1) && (speed_mode > 4)) { | ||
673 | retval = -ENODEV; | ||
674 | goto out; | ||
675 | } | ||
676 | |||
677 | switch (speed_mode) { | ||
678 | case 0x0: | ||
679 | bus_speed = PCI_SPEED_33MHz; | ||
680 | break; | ||
681 | case 0x1: | ||
682 | bus_speed = PCI_SPEED_66MHz; | ||
683 | break; | ||
684 | case 0x2: | ||
685 | bus_speed = PCI_SPEED_66MHz_PCIX; | ||
686 | break; | ||
687 | case 0x3: | ||
688 | bus_speed = PCI_SPEED_100MHz_PCIX; | ||
689 | break; | ||
690 | case 0x4: | ||
691 | bus_speed = PCI_SPEED_133MHz_PCIX; | ||
692 | break; | ||
693 | case 0x5: | ||
694 | bus_speed = PCI_SPEED_66MHz_PCIX_ECC; | ||
695 | break; | ||
696 | case 0x6: | ||
697 | bus_speed = PCI_SPEED_100MHz_PCIX_ECC; | ||
698 | break; | ||
699 | case 0x7: | ||
700 | bus_speed = PCI_SPEED_133MHz_PCIX_ECC; | ||
701 | break; | ||
702 | case 0x8: | ||
703 | bus_speed = PCI_SPEED_66MHz_PCIX_266; | ||
704 | break; | ||
705 | case 0x9: | ||
706 | bus_speed = PCI_SPEED_100MHz_PCIX_266; | ||
707 | break; | ||
708 | case 0xa: | ||
709 | bus_speed = PCI_SPEED_133MHz_PCIX_266; | ||
710 | break; | ||
711 | case 0xb: | ||
712 | bus_speed = PCI_SPEED_66MHz_PCIX_533; | ||
713 | break; | ||
714 | case 0xc: | ||
715 | bus_speed = PCI_SPEED_100MHz_PCIX_533; | ||
716 | break; | ||
717 | case 0xd: | ||
718 | bus_speed = PCI_SPEED_133MHz_PCIX_533; | ||
719 | break; | ||
720 | default: | ||
721 | retval = -ENODEV; | ||
722 | break; | ||
723 | } | ||
724 | |||
725 | out: | ||
726 | bus->cur_bus_speed = bus_speed; | ||
727 | dbg("Current bus speed = %d\n", bus_speed); | ||
728 | return retval; | ||
729 | } | ||
730 | |||
731 | |||
663 | static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) | 732 | static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) |
664 | { | 733 | { |
665 | int retval; | 734 | int retval; |
@@ -720,6 +789,8 @@ static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) | |||
720 | retval = shpc_write_cmd(slot, 0, cmd); | 789 | retval = shpc_write_cmd(slot, 0, cmd); |
721 | if (retval) | 790 | if (retval) |
722 | ctrl_err(ctrl, "%s: Write command failed!\n", __func__); | 791 | ctrl_err(ctrl, "%s: Write command failed!\n", __func__); |
792 | else | ||
793 | shpc_get_cur_bus_speed(ctrl); | ||
723 | 794 | ||
724 | return retval; | 795 | return retval; |
725 | } | 796 | } |
@@ -803,10 +874,10 @@ static irqreturn_t shpc_isr(int irq, void *dev_id) | |||
803 | return IRQ_HANDLED; | 874 | return IRQ_HANDLED; |
804 | } | 875 | } |
805 | 876 | ||
806 | static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value) | 877 | static int shpc_get_max_bus_speed(struct controller *ctrl) |
807 | { | 878 | { |
808 | int retval = 0; | 879 | int retval = 0; |
809 | struct controller *ctrl = slot->ctrl; | 880 | struct pci_bus *bus = ctrl->pci_dev->subordinate; |
810 | enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; | 881 | enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; |
811 | u8 pi = shpc_readb(ctrl, PROG_INTERFACE); | 882 | u8 pi = shpc_readb(ctrl, PROG_INTERFACE); |
812 | u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1); | 883 | u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1); |
@@ -842,79 +913,12 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value) | |||
842 | retval = -ENODEV; | 913 | retval = -ENODEV; |
843 | } | 914 | } |
844 | 915 | ||
845 | *value = bus_speed; | 916 | bus->max_bus_speed = bus_speed; |
846 | ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed); | 917 | ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed); |
847 | 918 | ||
848 | return retval; | 919 | return retval; |
849 | } | 920 | } |
850 | 921 | ||
851 | static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value) | ||
852 | { | ||
853 | int retval = 0; | ||
854 | struct controller *ctrl = slot->ctrl; | ||
855 | enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; | ||
856 | u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG); | ||
857 | u8 pi = shpc_readb(ctrl, PROG_INTERFACE); | ||
858 | u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7); | ||
859 | |||
860 | if ((pi == 1) && (speed_mode > 4)) { | ||
861 | *value = PCI_SPEED_UNKNOWN; | ||
862 | return -ENODEV; | ||
863 | } | ||
864 | |||
865 | switch (speed_mode) { | ||
866 | case 0x0: | ||
867 | *value = PCI_SPEED_33MHz; | ||
868 | break; | ||
869 | case 0x1: | ||
870 | *value = PCI_SPEED_66MHz; | ||
871 | break; | ||
872 | case 0x2: | ||
873 | *value = PCI_SPEED_66MHz_PCIX; | ||
874 | break; | ||
875 | case 0x3: | ||
876 | *value = PCI_SPEED_100MHz_PCIX; | ||
877 | break; | ||
878 | case 0x4: | ||
879 | *value = PCI_SPEED_133MHz_PCIX; | ||
880 | break; | ||
881 | case 0x5: | ||
882 | *value = PCI_SPEED_66MHz_PCIX_ECC; | ||
883 | break; | ||
884 | case 0x6: | ||
885 | *value = PCI_SPEED_100MHz_PCIX_ECC; | ||
886 | break; | ||
887 | case 0x7: | ||
888 | *value = PCI_SPEED_133MHz_PCIX_ECC; | ||
889 | break; | ||
890 | case 0x8: | ||
891 | *value = PCI_SPEED_66MHz_PCIX_266; | ||
892 | break; | ||
893 | case 0x9: | ||
894 | *value = PCI_SPEED_100MHz_PCIX_266; | ||
895 | break; | ||
896 | case 0xa: | ||
897 | *value = PCI_SPEED_133MHz_PCIX_266; | ||
898 | break; | ||
899 | case 0xb: | ||
900 | *value = PCI_SPEED_66MHz_PCIX_533; | ||
901 | break; | ||
902 | case 0xc: | ||
903 | *value = PCI_SPEED_100MHz_PCIX_533; | ||
904 | break; | ||
905 | case 0xd: | ||
906 | *value = PCI_SPEED_133MHz_PCIX_533; | ||
907 | break; | ||
908 | default: | ||
909 | *value = PCI_SPEED_UNKNOWN; | ||
910 | retval = -ENODEV; | ||
911 | break; | ||
912 | } | ||
913 | |||
914 | ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed); | ||
915 | return retval; | ||
916 | } | ||
917 | |||
918 | static struct hpc_ops shpchp_hpc_ops = { | 922 | static struct hpc_ops shpchp_hpc_ops = { |
919 | .power_on_slot = hpc_power_on_slot, | 923 | .power_on_slot = hpc_power_on_slot, |
920 | .slot_enable = hpc_slot_enable, | 924 | .slot_enable = hpc_slot_enable, |
@@ -926,8 +930,6 @@ static struct hpc_ops shpchp_hpc_ops = { | |||
926 | .get_latch_status = hpc_get_latch_status, | 930 | .get_latch_status = hpc_get_latch_status, |
927 | .get_adapter_status = hpc_get_adapter_status, | 931 | .get_adapter_status = hpc_get_adapter_status, |
928 | 932 | ||
929 | .get_max_bus_speed = hpc_get_max_bus_speed, | ||
930 | .get_cur_bus_speed = hpc_get_cur_bus_speed, | ||
931 | .get_adapter_speed = hpc_get_adapter_speed, | 933 | .get_adapter_speed = hpc_get_adapter_speed, |
932 | .get_mode1_ECC_cap = hpc_get_mode1_ECC_cap, | 934 | .get_mode1_ECC_cap = hpc_get_mode1_ECC_cap, |
933 | .get_prog_int = hpc_get_prog_int, | 935 | .get_prog_int = hpc_get_prog_int, |
@@ -1086,6 +1088,9 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) | |||
1086 | } | 1088 | } |
1087 | ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq); | 1089 | ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq); |
1088 | 1090 | ||
1091 | shpc_get_max_bus_speed(ctrl); | ||
1092 | shpc_get_cur_bus_speed(ctrl); | ||
1093 | |||
1089 | /* | 1094 | /* |
1090 | * If this is the first controller to be initialized, | 1095 | * If this is the first controller to be initialized, |
1091 | * initialize the shpchpd work queue | 1096 | * initialize the shpchpd work queue |
diff --git a/drivers/pci/hotplug/shpchp_sysfs.c b/drivers/pci/hotplug/shpchp_sysfs.c index 29fa9d26adae..071b7dc0094b 100644 --- a/drivers/pci/hotplug/shpchp_sysfs.c +++ b/drivers/pci/hotplug/shpchp_sysfs.c | |||
@@ -47,8 +47,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha | |||
47 | bus = pdev->subordinate; | 47 | bus = pdev->subordinate; |
48 | 48 | ||
49 | out += sprintf(buf, "Free resources: memory\n"); | 49 | out += sprintf(buf, "Free resources: memory\n"); |
50 | for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) { | 50 | pci_bus_for_each_resource(bus, res, index) { |
51 | res = bus->resource[index]; | ||
52 | if (res && (res->flags & IORESOURCE_MEM) && | 51 | if (res && (res->flags & IORESOURCE_MEM) && |
53 | !(res->flags & IORESOURCE_PREFETCH)) { | 52 | !(res->flags & IORESOURCE_PREFETCH)) { |
54 | out += sprintf(out, "start = %8.8llx, " | 53 | out += sprintf(out, "start = %8.8llx, " |
@@ -58,8 +57,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha | |||
58 | } | 57 | } |
59 | } | 58 | } |
60 | out += sprintf(out, "Free resources: prefetchable memory\n"); | 59 | out += sprintf(out, "Free resources: prefetchable memory\n"); |
61 | for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) { | 60 | pci_bus_for_each_resource(bus, res, index) { |
62 | res = bus->resource[index]; | ||
63 | if (res && (res->flags & IORESOURCE_MEM) && | 61 | if (res && (res->flags & IORESOURCE_MEM) && |
64 | (res->flags & IORESOURCE_PREFETCH)) { | 62 | (res->flags & IORESOURCE_PREFETCH)) { |
65 | out += sprintf(out, "start = %8.8llx, " | 63 | out += sprintf(out, "start = %8.8llx, " |
@@ -69,8 +67,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha | |||
69 | } | 67 | } |
70 | } | 68 | } |
71 | out += sprintf(out, "Free resources: IO\n"); | 69 | out += sprintf(out, "Free resources: IO\n"); |
72 | for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) { | 70 | pci_bus_for_each_resource(bus, res, index) { |
73 | res = bus->resource[index]; | ||
74 | if (res && (res->flags & IORESOURCE_IO)) { | 71 | if (res && (res->flags & IORESOURCE_IO)) { |
75 | out += sprintf(out, "start = %8.8llx, " | 72 | out += sprintf(out, "start = %8.8llx, " |
76 | "length = %8.8llx\n", | 73 | "length = %8.8llx\n", |
diff --git a/drivers/pci/legacy.c b/drivers/pci/legacy.c deleted file mode 100644 index 871f65c15936..000000000000 --- a/drivers/pci/legacy.c +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/pci.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/interrupt.h> | ||
5 | #include "pci.h" | ||
6 | |||
7 | /** | ||
8 | * pci_find_device - begin or continue searching for a PCI device by vendor/device id | ||
9 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | ||
10 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | ||
11 | * @from: Previous PCI device found in search, or %NULL for new search. | ||
12 | * | ||
13 | * Iterates through the list of known PCI devices. If a PCI device is found | ||
14 | * with a matching @vendor and @device, a pointer to its device structure is | ||
15 | * returned. Otherwise, %NULL is returned. | ||
16 | * A new search is initiated by passing %NULL as the @from argument. | ||
17 | * Otherwise if @from is not %NULL, searches continue from next device | ||
18 | * on the global list. | ||
19 | * | ||
20 | * NOTE: Do not use this function any more; use pci_get_device() instead, as | ||
21 | * the PCI device returned by this function can disappear at any moment in | ||
22 | * time. | ||
23 | */ | ||
24 | struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, | ||
25 | struct pci_dev *from) | ||
26 | { | ||
27 | struct pci_dev *pdev; | ||
28 | |||
29 | pci_dev_get(from); | ||
30 | pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); | ||
31 | pci_dev_put(pdev); | ||
32 | return pdev; | ||
33 | } | ||
34 | EXPORT_SYMBOL(pci_find_device); | ||
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 7e2829538a4c..c0c73913833d 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -16,8 +16,144 @@ | |||
16 | #include <acpi/acpi_bus.h> | 16 | #include <acpi/acpi_bus.h> |
17 | 17 | ||
18 | #include <linux/pci-acpi.h> | 18 | #include <linux/pci-acpi.h> |
19 | #include <linux/pm_runtime.h> | ||
19 | #include "pci.h" | 20 | #include "pci.h" |
20 | 21 | ||
22 | static DEFINE_MUTEX(pci_acpi_pm_notify_mtx); | ||
23 | |||
24 | /** | ||
25 | * pci_acpi_wake_bus - Wake-up notification handler for root buses. | ||
26 | * @handle: ACPI handle of a device the notification is for. | ||
27 | * @event: Type of the signaled event. | ||
28 | * @context: PCI root bus to wake up devices on. | ||
29 | */ | ||
30 | static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context) | ||
31 | { | ||
32 | struct pci_bus *pci_bus = context; | ||
33 | |||
34 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus) | ||
35 | pci_pme_wakeup_bus(pci_bus); | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * pci_acpi_wake_dev - Wake-up notification handler for PCI devices. | ||
40 | * @handle: ACPI handle of a device the notification is for. | ||
41 | * @event: Type of the signaled event. | ||
42 | * @context: PCI device object to wake up. | ||
43 | */ | ||
44 | static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) | ||
45 | { | ||
46 | struct pci_dev *pci_dev = context; | ||
47 | |||
48 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { | ||
49 | pci_check_pme_status(pci_dev); | ||
50 | pm_runtime_resume(&pci_dev->dev); | ||
51 | if (pci_dev->subordinate) | ||
52 | pci_pme_wakeup_bus(pci_dev->subordinate); | ||
53 | } | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * add_pm_notifier - Register PM notifier for given ACPI device. | ||
58 | * @dev: ACPI device to add the notifier for. | ||
59 | * @context: PCI device or bus to check for PME status if an event is signaled. | ||
60 | * | ||
61 | * NOTE: @dev need not be a run-wake or wake-up device to be a valid source of | ||
62 | * PM wake-up events. For example, wake-up events may be generated for bridges | ||
63 | * if one of the devices below the bridge is signaling PME, even if the bridge | ||
64 | * itself doesn't have a wake-up GPE associated with it. | ||
65 | */ | ||
66 | static acpi_status add_pm_notifier(struct acpi_device *dev, | ||
67 | acpi_notify_handler handler, | ||
68 | void *context) | ||
69 | { | ||
70 | acpi_status status = AE_ALREADY_EXISTS; | ||
71 | |||
72 | mutex_lock(&pci_acpi_pm_notify_mtx); | ||
73 | |||
74 | if (dev->wakeup.flags.notifier_present) | ||
75 | goto out; | ||
76 | |||
77 | status = acpi_install_notify_handler(dev->handle, | ||
78 | ACPI_SYSTEM_NOTIFY, | ||
79 | handler, context); | ||
80 | if (ACPI_FAILURE(status)) | ||
81 | goto out; | ||
82 | |||
83 | dev->wakeup.flags.notifier_present = true; | ||
84 | |||
85 | out: | ||
86 | mutex_unlock(&pci_acpi_pm_notify_mtx); | ||
87 | return status; | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * remove_pm_notifier - Unregister PM notifier from given ACPI device. | ||
92 | * @dev: ACPI device to remove the notifier from. | ||
93 | */ | ||
94 | static acpi_status remove_pm_notifier(struct acpi_device *dev, | ||
95 | acpi_notify_handler handler) | ||
96 | { | ||
97 | acpi_status status = AE_BAD_PARAMETER; | ||
98 | |||
99 | mutex_lock(&pci_acpi_pm_notify_mtx); | ||
100 | |||
101 | if (!dev->wakeup.flags.notifier_present) | ||
102 | goto out; | ||
103 | |||
104 | status = acpi_remove_notify_handler(dev->handle, | ||
105 | ACPI_SYSTEM_NOTIFY, | ||
106 | handler); | ||
107 | if (ACPI_FAILURE(status)) | ||
108 | goto out; | ||
109 | |||
110 | dev->wakeup.flags.notifier_present = false; | ||
111 | |||
112 | out: | ||
113 | mutex_unlock(&pci_acpi_pm_notify_mtx); | ||
114 | return status; | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. | ||
119 | * @dev: ACPI device to add the notifier for. | ||
120 | * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. | ||
121 | */ | ||
122 | acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, | ||
123 | struct pci_bus *pci_bus) | ||
124 | { | ||
125 | return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier. | ||
130 | * @dev: ACPI device to remove the notifier from. | ||
131 | */ | ||
132 | acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) | ||
133 | { | ||
134 | return remove_pm_notifier(dev, pci_acpi_wake_bus); | ||
135 | } | ||
136 | |||
137 | /** | ||
138 | * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. | ||
139 | * @dev: ACPI device to add the notifier for. | ||
140 | * @pci_dev: PCI device to check for the PME status if an event is signaled. | ||
141 | */ | ||
142 | acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, | ||
143 | struct pci_dev *pci_dev) | ||
144 | { | ||
145 | return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier. | ||
150 | * @dev: ACPI device to remove the notifier from. | ||
151 | */ | ||
152 | acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) | ||
153 | { | ||
154 | return remove_pm_notifier(dev, pci_acpi_wake_dev); | ||
155 | } | ||
156 | |||
21 | /* | 157 | /* |
22 | * _SxD returns the D-state with the highest power | 158 | * _SxD returns the D-state with the highest power |
23 | * (lowest D-state number) supported in the S-state "x". | 159 | * (lowest D-state number) supported in the S-state "x". |
@@ -131,12 +267,87 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
131 | return 0; | 267 | return 0; |
132 | } | 268 | } |
133 | 269 | ||
270 | /** | ||
271 | * acpi_dev_run_wake - Enable/disable wake-up for given device. | ||
272 | * @phys_dev: Device to enable/disable the platform to wake-up the system for. | ||
273 | * @enable: Whether enable or disable the wake-up functionality. | ||
274 | * | ||
275 | * Find the ACPI device object corresponding to @pci_dev and try to | ||
276 | * enable/disable the GPE associated with it. | ||
277 | */ | ||
278 | static int acpi_dev_run_wake(struct device *phys_dev, bool enable) | ||
279 | { | ||
280 | struct acpi_device *dev; | ||
281 | acpi_handle handle; | ||
282 | int error = -ENODEV; | ||
283 | |||
284 | if (!device_run_wake(phys_dev)) | ||
285 | return -EINVAL; | ||
286 | |||
287 | handle = DEVICE_ACPI_HANDLE(phys_dev); | ||
288 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) { | ||
289 | dev_dbg(phys_dev, "ACPI handle has no context in %s!\n", | ||
290 | __func__); | ||
291 | return -ENODEV; | ||
292 | } | ||
293 | |||
294 | if (enable) { | ||
295 | if (!dev->wakeup.run_wake_count++) { | ||
296 | acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0); | ||
297 | acpi_enable_gpe(dev->wakeup.gpe_device, | ||
298 | dev->wakeup.gpe_number, | ||
299 | ACPI_GPE_TYPE_RUNTIME); | ||
300 | } | ||
301 | } else if (dev->wakeup.run_wake_count > 0) { | ||
302 | if (!--dev->wakeup.run_wake_count) { | ||
303 | acpi_disable_gpe(dev->wakeup.gpe_device, | ||
304 | dev->wakeup.gpe_number, | ||
305 | ACPI_GPE_TYPE_RUNTIME); | ||
306 | acpi_disable_wakeup_device_power(dev); | ||
307 | } | ||
308 | } else { | ||
309 | error = -EALREADY; | ||
310 | } | ||
311 | |||
312 | return error; | ||
313 | } | ||
314 | |||
315 | static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) | ||
316 | { | ||
317 | while (bus->parent) { | ||
318 | struct pci_dev *bridge = bus->self; | ||
319 | |||
320 | if (bridge->pme_interrupt) | ||
321 | return; | ||
322 | if (!acpi_dev_run_wake(&bridge->dev, enable)) | ||
323 | return; | ||
324 | bus = bus->parent; | ||
325 | } | ||
326 | |||
327 | /* We have reached the root bus. */ | ||
328 | if (bus->bridge) | ||
329 | acpi_dev_run_wake(bus->bridge, enable); | ||
330 | } | ||
331 | |||
332 | static int acpi_pci_run_wake(struct pci_dev *dev, bool enable) | ||
333 | { | ||
334 | if (dev->pme_interrupt) | ||
335 | return 0; | ||
336 | |||
337 | if (!acpi_dev_run_wake(&dev->dev, enable)) | ||
338 | return 0; | ||
339 | |||
340 | acpi_pci_propagate_run_wake(dev->bus, enable); | ||
341 | return 0; | ||
342 | } | ||
343 | |||
134 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { | 344 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { |
135 | .is_manageable = acpi_pci_power_manageable, | 345 | .is_manageable = acpi_pci_power_manageable, |
136 | .set_state = acpi_pci_set_power_state, | 346 | .set_state = acpi_pci_set_power_state, |
137 | .choose_state = acpi_pci_choose_state, | 347 | .choose_state = acpi_pci_choose_state, |
138 | .can_wakeup = acpi_pci_can_wakeup, | 348 | .can_wakeup = acpi_pci_can_wakeup, |
139 | .sleep_wake = acpi_pci_sleep_wake, | 349 | .sleep_wake = acpi_pci_sleep_wake, |
350 | .run_wake = acpi_pci_run_wake, | ||
140 | }; | 351 | }; |
141 | 352 | ||
142 | /* ACPI bus type */ | 353 | /* ACPI bus type */ |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index e5d47be3c6d7..f9a0aec3abcf 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include <linux/pm_runtime.h> | ||
20 | #include "pci.h" | 21 | #include "pci.h" |
21 | 22 | ||
22 | struct pci_dynid { | 23 | struct pci_dynid { |
@@ -404,6 +405,35 @@ static void pci_device_shutdown(struct device *dev) | |||
404 | pci_msix_shutdown(pci_dev); | 405 | pci_msix_shutdown(pci_dev); |
405 | } | 406 | } |
406 | 407 | ||
408 | #ifdef CONFIG_PM_OPS | ||
409 | |||
410 | /* Auxiliary functions used for system resume and run-time resume. */ | ||
411 | |||
412 | /** | ||
413 | * pci_restore_standard_config - restore standard config registers of PCI device | ||
414 | * @pci_dev: PCI device to handle | ||
415 | */ | ||
416 | static int pci_restore_standard_config(struct pci_dev *pci_dev) | ||
417 | { | ||
418 | pci_update_current_state(pci_dev, PCI_UNKNOWN); | ||
419 | |||
420 | if (pci_dev->current_state != PCI_D0) { | ||
421 | int error = pci_set_power_state(pci_dev, PCI_D0); | ||
422 | if (error) | ||
423 | return error; | ||
424 | } | ||
425 | |||
426 | return pci_restore_state(pci_dev); | ||
427 | } | ||
428 | |||
429 | static void pci_pm_default_resume_early(struct pci_dev *pci_dev) | ||
430 | { | ||
431 | pci_restore_standard_config(pci_dev); | ||
432 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | ||
433 | } | ||
434 | |||
435 | #endif | ||
436 | |||
407 | #ifdef CONFIG_PM_SLEEP | 437 | #ifdef CONFIG_PM_SLEEP |
408 | 438 | ||
409 | /* | 439 | /* |
@@ -520,29 +550,6 @@ static int pci_legacy_resume(struct device *dev) | |||
520 | 550 | ||
521 | /* Auxiliary functions used by the new power management framework */ | 551 | /* Auxiliary functions used by the new power management framework */ |
522 | 552 | ||
523 | /** | ||
524 | * pci_restore_standard_config - restore standard config registers of PCI device | ||
525 | * @pci_dev: PCI device to handle | ||
526 | */ | ||
527 | static int pci_restore_standard_config(struct pci_dev *pci_dev) | ||
528 | { | ||
529 | pci_update_current_state(pci_dev, PCI_UNKNOWN); | ||
530 | |||
531 | if (pci_dev->current_state != PCI_D0) { | ||
532 | int error = pci_set_power_state(pci_dev, PCI_D0); | ||
533 | if (error) | ||
534 | return error; | ||
535 | } | ||
536 | |||
537 | return pci_restore_state(pci_dev); | ||
538 | } | ||
539 | |||
540 | static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) | ||
541 | { | ||
542 | pci_restore_standard_config(pci_dev); | ||
543 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | ||
544 | } | ||
545 | |||
546 | static void pci_pm_default_resume(struct pci_dev *pci_dev) | 553 | static void pci_pm_default_resume(struct pci_dev *pci_dev) |
547 | { | 554 | { |
548 | pci_fixup_device(pci_fixup_resume, pci_dev); | 555 | pci_fixup_device(pci_fixup_resume, pci_dev); |
@@ -581,6 +588,17 @@ static int pci_pm_prepare(struct device *dev) | |||
581 | struct device_driver *drv = dev->driver; | 588 | struct device_driver *drv = dev->driver; |
582 | int error = 0; | 589 | int error = 0; |
583 | 590 | ||
591 | /* | ||
592 | * PCI devices suspended at run time need to be resumed at this | ||
593 | * point, because in general it is necessary to reconfigure them for | ||
594 | * system suspend. Namely, if the device is supposed to wake up the | ||
595 | * system from the sleep state, we may need to reconfigure it for this | ||
596 | * purpose. In turn, if the device is not supposed to wake up the | ||
597 | * system from the sleep state, we'll have to prevent it from signaling | ||
598 | * wake-up. | ||
599 | */ | ||
600 | pm_runtime_resume(dev); | ||
601 | |||
584 | if (drv && drv->pm && drv->pm->prepare) | 602 | if (drv && drv->pm && drv->pm->prepare) |
585 | error = drv->pm->prepare(dev); | 603 | error = drv->pm->prepare(dev); |
586 | 604 | ||
@@ -595,6 +613,13 @@ static void pci_pm_complete(struct device *dev) | |||
595 | drv->pm->complete(dev); | 613 | drv->pm->complete(dev); |
596 | } | 614 | } |
597 | 615 | ||
616 | #else /* !CONFIG_PM_SLEEP */ | ||
617 | |||
618 | #define pci_pm_prepare NULL | ||
619 | #define pci_pm_complete NULL | ||
620 | |||
621 | #endif /* !CONFIG_PM_SLEEP */ | ||
622 | |||
598 | #ifdef CONFIG_SUSPEND | 623 | #ifdef CONFIG_SUSPEND |
599 | 624 | ||
600 | static int pci_pm_suspend(struct device *dev) | 625 | static int pci_pm_suspend(struct device *dev) |
@@ -681,7 +706,7 @@ static int pci_pm_resume_noirq(struct device *dev) | |||
681 | struct device_driver *drv = dev->driver; | 706 | struct device_driver *drv = dev->driver; |
682 | int error = 0; | 707 | int error = 0; |
683 | 708 | ||
684 | pci_pm_default_resume_noirq(pci_dev); | 709 | pci_pm_default_resume_early(pci_dev); |
685 | 710 | ||
686 | if (pci_has_legacy_pm_support(pci_dev)) | 711 | if (pci_has_legacy_pm_support(pci_dev)) |
687 | return pci_legacy_resume_early(dev); | 712 | return pci_legacy_resume_early(dev); |
@@ -879,7 +904,7 @@ static int pci_pm_restore_noirq(struct device *dev) | |||
879 | struct device_driver *drv = dev->driver; | 904 | struct device_driver *drv = dev->driver; |
880 | int error = 0; | 905 | int error = 0; |
881 | 906 | ||
882 | pci_pm_default_resume_noirq(pci_dev); | 907 | pci_pm_default_resume_early(pci_dev); |
883 | 908 | ||
884 | if (pci_has_legacy_pm_support(pci_dev)) | 909 | if (pci_has_legacy_pm_support(pci_dev)) |
885 | return pci_legacy_resume_early(dev); | 910 | return pci_legacy_resume_early(dev); |
@@ -931,6 +956,84 @@ static int pci_pm_restore(struct device *dev) | |||
931 | 956 | ||
932 | #endif /* !CONFIG_HIBERNATION */ | 957 | #endif /* !CONFIG_HIBERNATION */ |
933 | 958 | ||
959 | #ifdef CONFIG_PM_RUNTIME | ||
960 | |||
961 | static int pci_pm_runtime_suspend(struct device *dev) | ||
962 | { | ||
963 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
964 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
965 | pci_power_t prev = pci_dev->current_state; | ||
966 | int error; | ||
967 | |||
968 | if (!pm || !pm->runtime_suspend) | ||
969 | return -ENOSYS; | ||
970 | |||
971 | error = pm->runtime_suspend(dev); | ||
972 | suspend_report_result(pm->runtime_suspend, error); | ||
973 | if (error) | ||
974 | return error; | ||
975 | |||
976 | pci_fixup_device(pci_fixup_suspend, pci_dev); | ||
977 | |||
978 | if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 | ||
979 | && pci_dev->current_state != PCI_UNKNOWN) { | ||
980 | WARN_ONCE(pci_dev->current_state != prev, | ||
981 | "PCI PM: State of device not saved by %pF\n", | ||
982 | pm->runtime_suspend); | ||
983 | return 0; | ||
984 | } | ||
985 | |||
986 | if (!pci_dev->state_saved) | ||
987 | pci_save_state(pci_dev); | ||
988 | |||
989 | pci_finish_runtime_suspend(pci_dev); | ||
990 | |||
991 | return 0; | ||
992 | } | ||
993 | |||
994 | static int pci_pm_runtime_resume(struct device *dev) | ||
995 | { | ||
996 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
997 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
998 | |||
999 | if (!pm || !pm->runtime_resume) | ||
1000 | return -ENOSYS; | ||
1001 | |||
1002 | pci_pm_default_resume_early(pci_dev); | ||
1003 | __pci_enable_wake(pci_dev, PCI_D0, true, false); | ||
1004 | pci_fixup_device(pci_fixup_resume, pci_dev); | ||
1005 | |||
1006 | return pm->runtime_resume(dev); | ||
1007 | } | ||
1008 | |||
1009 | static int pci_pm_runtime_idle(struct device *dev) | ||
1010 | { | ||
1011 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
1012 | |||
1013 | if (!pm) | ||
1014 | return -ENOSYS; | ||
1015 | |||
1016 | if (pm->runtime_idle) { | ||
1017 | int ret = pm->runtime_idle(dev); | ||
1018 | if (ret) | ||
1019 | return ret; | ||
1020 | } | ||
1021 | |||
1022 | pm_runtime_suspend(dev); | ||
1023 | |||
1024 | return 0; | ||
1025 | } | ||
1026 | |||
1027 | #else /* !CONFIG_PM_RUNTIME */ | ||
1028 | |||
1029 | #define pci_pm_runtime_suspend NULL | ||
1030 | #define pci_pm_runtime_resume NULL | ||
1031 | #define pci_pm_runtime_idle NULL | ||
1032 | |||
1033 | #endif /* !CONFIG_PM_RUNTIME */ | ||
1034 | |||
1035 | #ifdef CONFIG_PM_OPS | ||
1036 | |||
934 | const struct dev_pm_ops pci_dev_pm_ops = { | 1037 | const struct dev_pm_ops pci_dev_pm_ops = { |
935 | .prepare = pci_pm_prepare, | 1038 | .prepare = pci_pm_prepare, |
936 | .complete = pci_pm_complete, | 1039 | .complete = pci_pm_complete, |
@@ -946,15 +1049,18 @@ const struct dev_pm_ops pci_dev_pm_ops = { | |||
946 | .thaw_noirq = pci_pm_thaw_noirq, | 1049 | .thaw_noirq = pci_pm_thaw_noirq, |
947 | .poweroff_noirq = pci_pm_poweroff_noirq, | 1050 | .poweroff_noirq = pci_pm_poweroff_noirq, |
948 | .restore_noirq = pci_pm_restore_noirq, | 1051 | .restore_noirq = pci_pm_restore_noirq, |
1052 | .runtime_suspend = pci_pm_runtime_suspend, | ||
1053 | .runtime_resume = pci_pm_runtime_resume, | ||
1054 | .runtime_idle = pci_pm_runtime_idle, | ||
949 | }; | 1055 | }; |
950 | 1056 | ||
951 | #define PCI_PM_OPS_PTR (&pci_dev_pm_ops) | 1057 | #define PCI_PM_OPS_PTR (&pci_dev_pm_ops) |
952 | 1058 | ||
953 | #else /* !CONFIG_PM_SLEEP */ | 1059 | #else /* !COMFIG_PM_OPS */ |
954 | 1060 | ||
955 | #define PCI_PM_OPS_PTR NULL | 1061 | #define PCI_PM_OPS_PTR NULL |
956 | 1062 | ||
957 | #endif /* !CONFIG_PM_SLEEP */ | 1063 | #endif /* !COMFIG_PM_OPS */ |
958 | 1064 | ||
959 | /** | 1065 | /** |
960 | * __pci_register_driver - register a new pci driver | 1066 | * __pci_register_driver - register a new pci driver |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 315fea47e784..f4a2738bf0bf 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -19,8 +19,8 @@ | |||
19 | #include <linux/pci-aspm.h> | 19 | #include <linux/pci-aspm.h> |
20 | #include <linux/pm_wakeup.h> | 20 | #include <linux/pm_wakeup.h> |
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ | ||
23 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/pm_runtime.h> | ||
24 | #include <asm/setup.h> | 24 | #include <asm/setup.h> |
25 | #include "pci.h" | 25 | #include "pci.h" |
26 | 26 | ||
@@ -29,6 +29,12 @@ const char *pci_power_names[] = { | |||
29 | }; | 29 | }; |
30 | EXPORT_SYMBOL_GPL(pci_power_names); | 30 | EXPORT_SYMBOL_GPL(pci_power_names); |
31 | 31 | ||
32 | int isa_dma_bridge_buggy; | ||
33 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | ||
34 | |||
35 | int pci_pci_problems; | ||
36 | EXPORT_SYMBOL(pci_pci_problems); | ||
37 | |||
32 | unsigned int pci_pm_d3_delay; | 38 | unsigned int pci_pm_d3_delay; |
33 | 39 | ||
34 | static void pci_dev_d3_sleep(struct pci_dev *dev) | 40 | static void pci_dev_d3_sleep(struct pci_dev *dev) |
@@ -380,10 +386,9 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) | |||
380 | { | 386 | { |
381 | const struct pci_bus *bus = dev->bus; | 387 | const struct pci_bus *bus = dev->bus; |
382 | int i; | 388 | int i; |
383 | struct resource *best = NULL; | 389 | struct resource *best = NULL, *r; |
384 | 390 | ||
385 | for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 391 | pci_bus_for_each_resource(bus, r, i) { |
386 | struct resource *r = bus->resource[i]; | ||
387 | if (!r) | 392 | if (!r) |
388 | continue; | 393 | continue; |
389 | if (res->start && !(res->start >= r->start && res->end <= r->end)) | 394 | if (res->start && !(res->start >= r->start && res->end <= r->end)) |
@@ -457,6 +462,12 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
457 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; | 462 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; |
458 | } | 463 | } |
459 | 464 | ||
465 | static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) | ||
466 | { | ||
467 | return pci_platform_pm ? | ||
468 | pci_platform_pm->run_wake(dev, enable) : -ENODEV; | ||
469 | } | ||
470 | |||
460 | /** | 471 | /** |
461 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of | 472 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of |
462 | * given PCI device | 473 | * given PCI device |
@@ -1190,6 +1201,66 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) | |||
1190 | } | 1201 | } |
1191 | 1202 | ||
1192 | /** | 1203 | /** |
1204 | * pci_check_pme_status - Check if given device has generated PME. | ||
1205 | * @dev: Device to check. | ||
1206 | * | ||
1207 | * Check the PME status of the device and if set, clear it and clear PME enable | ||
1208 | * (if set). Return 'true' if PME status and PME enable were both set or | ||
1209 | * 'false' otherwise. | ||
1210 | */ | ||
1211 | bool pci_check_pme_status(struct pci_dev *dev) | ||
1212 | { | ||
1213 | int pmcsr_pos; | ||
1214 | u16 pmcsr; | ||
1215 | bool ret = false; | ||
1216 | |||
1217 | if (!dev->pm_cap) | ||
1218 | return false; | ||
1219 | |||
1220 | pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; | ||
1221 | pci_read_config_word(dev, pmcsr_pos, &pmcsr); | ||
1222 | if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) | ||
1223 | return false; | ||
1224 | |||
1225 | /* Clear PME status. */ | ||
1226 | pmcsr |= PCI_PM_CTRL_PME_STATUS; | ||
1227 | if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { | ||
1228 | /* Disable PME to avoid interrupt flood. */ | ||
1229 | pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; | ||
1230 | ret = true; | ||
1231 | } | ||
1232 | |||
1233 | pci_write_config_word(dev, pmcsr_pos, pmcsr); | ||
1234 | |||
1235 | return ret; | ||
1236 | } | ||
1237 | |||
1238 | /** | ||
1239 | * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. | ||
1240 | * @dev: Device to handle. | ||
1241 | * @ign: Ignored. | ||
1242 | * | ||
1243 | * Check if @dev has generated PME and queue a resume request for it in that | ||
1244 | * case. | ||
1245 | */ | ||
1246 | static int pci_pme_wakeup(struct pci_dev *dev, void *ign) | ||
1247 | { | ||
1248 | if (pci_check_pme_status(dev)) | ||
1249 | pm_request_resume(&dev->dev); | ||
1250 | return 0; | ||
1251 | } | ||
1252 | |||
1253 | /** | ||
1254 | * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. | ||
1255 | * @bus: Top bus of the subtree to walk. | ||
1256 | */ | ||
1257 | void pci_pme_wakeup_bus(struct pci_bus *bus) | ||
1258 | { | ||
1259 | if (bus) | ||
1260 | pci_walk_bus(bus, pci_pme_wakeup, NULL); | ||
1261 | } | ||
1262 | |||
1263 | /** | ||
1193 | * pci_pme_capable - check the capability of PCI device to generate PME# | 1264 | * pci_pme_capable - check the capability of PCI device to generate PME# |
1194 | * @dev: PCI device to handle. | 1265 | * @dev: PCI device to handle. |
1195 | * @state: PCI state from which device will issue PME#. | 1266 | * @state: PCI state from which device will issue PME#. |
@@ -1230,9 +1301,10 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1230 | } | 1301 | } |
1231 | 1302 | ||
1232 | /** | 1303 | /** |
1233 | * pci_enable_wake - enable PCI device as wakeup event source | 1304 | * __pci_enable_wake - enable PCI device as wakeup event source |
1234 | * @dev: PCI device affected | 1305 | * @dev: PCI device affected |
1235 | * @state: PCI state from which device will issue wakeup events | 1306 | * @state: PCI state from which device will issue wakeup events |
1307 | * @runtime: True if the events are to be generated at run time | ||
1236 | * @enable: True to enable event generation; false to disable | 1308 | * @enable: True to enable event generation; false to disable |
1237 | * | 1309 | * |
1238 | * This enables the device as a wakeup event source, or disables it. | 1310 | * This enables the device as a wakeup event source, or disables it. |
@@ -1248,11 +1320,12 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1248 | * Error code depending on the platform is returned if both the platform and | 1320 | * Error code depending on the platform is returned if both the platform and |
1249 | * the native mechanism fail to enable the generation of wake-up events | 1321 | * the native mechanism fail to enable the generation of wake-up events |
1250 | */ | 1322 | */ |
1251 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | 1323 | int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, |
1324 | bool runtime, bool enable) | ||
1252 | { | 1325 | { |
1253 | int ret = 0; | 1326 | int ret = 0; |
1254 | 1327 | ||
1255 | if (enable && !device_may_wakeup(&dev->dev)) | 1328 | if (enable && !runtime && !device_may_wakeup(&dev->dev)) |
1256 | return -EINVAL; | 1329 | return -EINVAL; |
1257 | 1330 | ||
1258 | /* Don't do the same thing twice in a row for one device. */ | 1331 | /* Don't do the same thing twice in a row for one device. */ |
@@ -1272,19 +1345,24 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | |||
1272 | pci_pme_active(dev, true); | 1345 | pci_pme_active(dev, true); |
1273 | else | 1346 | else |
1274 | ret = 1; | 1347 | ret = 1; |
1275 | error = platform_pci_sleep_wake(dev, true); | 1348 | error = runtime ? platform_pci_run_wake(dev, true) : |
1349 | platform_pci_sleep_wake(dev, true); | ||
1276 | if (ret) | 1350 | if (ret) |
1277 | ret = error; | 1351 | ret = error; |
1278 | if (!ret) | 1352 | if (!ret) |
1279 | dev->wakeup_prepared = true; | 1353 | dev->wakeup_prepared = true; |
1280 | } else { | 1354 | } else { |
1281 | platform_pci_sleep_wake(dev, false); | 1355 | if (runtime) |
1356 | platform_pci_run_wake(dev, false); | ||
1357 | else | ||
1358 | platform_pci_sleep_wake(dev, false); | ||
1282 | pci_pme_active(dev, false); | 1359 | pci_pme_active(dev, false); |
1283 | dev->wakeup_prepared = false; | 1360 | dev->wakeup_prepared = false; |
1284 | } | 1361 | } |
1285 | 1362 | ||
1286 | return ret; | 1363 | return ret; |
1287 | } | 1364 | } |
1365 | EXPORT_SYMBOL(__pci_enable_wake); | ||
1288 | 1366 | ||
1289 | /** | 1367 | /** |
1290 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold | 1368 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold |
@@ -1394,6 +1472,66 @@ int pci_back_from_sleep(struct pci_dev *dev) | |||
1394 | } | 1472 | } |
1395 | 1473 | ||
1396 | /** | 1474 | /** |
1475 | * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. | ||
1476 | * @dev: PCI device being suspended. | ||
1477 | * | ||
1478 | * Prepare @dev to generate wake-up events at run time and put it into a low | ||
1479 | * power state. | ||
1480 | */ | ||
1481 | int pci_finish_runtime_suspend(struct pci_dev *dev) | ||
1482 | { | ||
1483 | pci_power_t target_state = pci_target_state(dev); | ||
1484 | int error; | ||
1485 | |||
1486 | if (target_state == PCI_POWER_ERROR) | ||
1487 | return -EIO; | ||
1488 | |||
1489 | __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); | ||
1490 | |||
1491 | error = pci_set_power_state(dev, target_state); | ||
1492 | |||
1493 | if (error) | ||
1494 | __pci_enable_wake(dev, target_state, true, false); | ||
1495 | |||
1496 | return error; | ||
1497 | } | ||
1498 | |||
1499 | /** | ||
1500 | * pci_dev_run_wake - Check if device can generate run-time wake-up events. | ||
1501 | * @dev: Device to check. | ||
1502 | * | ||
1503 | * Return true if the device itself is cabable of generating wake-up events | ||
1504 | * (through the platform or using the native PCIe PME) or if the device supports | ||
1505 | * PME and one of its upstream bridges can generate wake-up events. | ||
1506 | */ | ||
1507 | bool pci_dev_run_wake(struct pci_dev *dev) | ||
1508 | { | ||
1509 | struct pci_bus *bus = dev->bus; | ||
1510 | |||
1511 | if (device_run_wake(&dev->dev)) | ||
1512 | return true; | ||
1513 | |||
1514 | if (!dev->pme_support) | ||
1515 | return false; | ||
1516 | |||
1517 | while (bus->parent) { | ||
1518 | struct pci_dev *bridge = bus->self; | ||
1519 | |||
1520 | if (device_run_wake(&bridge->dev)) | ||
1521 | return true; | ||
1522 | |||
1523 | bus = bus->parent; | ||
1524 | } | ||
1525 | |||
1526 | /* We have reached the root bus. */ | ||
1527 | if (bus->bridge) | ||
1528 | return device_run_wake(bus->bridge); | ||
1529 | |||
1530 | return false; | ||
1531 | } | ||
1532 | EXPORT_SYMBOL_GPL(pci_dev_run_wake); | ||
1533 | |||
1534 | /** | ||
1397 | * pci_pm_init - Initialize PM functions of given PCI device | 1535 | * pci_pm_init - Initialize PM functions of given PCI device |
1398 | * @dev: PCI device to handle. | 1536 | * @dev: PCI device to handle. |
1399 | */ | 1537 | */ |
@@ -2871,7 +3009,6 @@ EXPORT_SYMBOL(pci_save_state); | |||
2871 | EXPORT_SYMBOL(pci_restore_state); | 3009 | EXPORT_SYMBOL(pci_restore_state); |
2872 | EXPORT_SYMBOL(pci_pme_capable); | 3010 | EXPORT_SYMBOL(pci_pme_capable); |
2873 | EXPORT_SYMBOL(pci_pme_active); | 3011 | EXPORT_SYMBOL(pci_pme_active); |
2874 | EXPORT_SYMBOL(pci_enable_wake); | ||
2875 | EXPORT_SYMBOL(pci_wake_from_d3); | 3012 | EXPORT_SYMBOL(pci_wake_from_d3); |
2876 | EXPORT_SYMBOL(pci_target_state); | 3013 | EXPORT_SYMBOL(pci_target_state); |
2877 | EXPORT_SYMBOL(pci_prepare_to_sleep); | 3014 | EXPORT_SYMBOL(pci_prepare_to_sleep); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index fbd0e3adbca3..4eb10f48d270 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -35,6 +35,10 @@ int pci_probe_reset_function(struct pci_dev *dev); | |||
35 | * | 35 | * |
36 | * @sleep_wake: enables/disables the system wake up capability of given device | 36 | * @sleep_wake: enables/disables the system wake up capability of given device |
37 | * | 37 | * |
38 | * @run_wake: enables/disables the platform to generate run-time wake-up events | ||
39 | * for given device (the device's wake-up capability has to be | ||
40 | * enabled by @sleep_wake for this feature to work) | ||
41 | * | ||
38 | * If given platform is generally capable of power managing PCI devices, all of | 42 | * If given platform is generally capable of power managing PCI devices, all of |
39 | * these callbacks are mandatory. | 43 | * these callbacks are mandatory. |
40 | */ | 44 | */ |
@@ -44,11 +48,16 @@ struct pci_platform_pm_ops { | |||
44 | pci_power_t (*choose_state)(struct pci_dev *dev); | 48 | pci_power_t (*choose_state)(struct pci_dev *dev); |
45 | bool (*can_wakeup)(struct pci_dev *dev); | 49 | bool (*can_wakeup)(struct pci_dev *dev); |
46 | int (*sleep_wake)(struct pci_dev *dev, bool enable); | 50 | int (*sleep_wake)(struct pci_dev *dev, bool enable); |
51 | int (*run_wake)(struct pci_dev *dev, bool enable); | ||
47 | }; | 52 | }; |
48 | 53 | ||
49 | extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); | 54 | extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); |
50 | extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); | 55 | extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); |
51 | extern void pci_disable_enabled_device(struct pci_dev *dev); | 56 | extern void pci_disable_enabled_device(struct pci_dev *dev); |
57 | extern bool pci_check_pme_status(struct pci_dev *dev); | ||
58 | extern int pci_finish_runtime_suspend(struct pci_dev *dev); | ||
59 | extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); | ||
60 | extern void pci_pme_wakeup_bus(struct pci_bus *bus); | ||
52 | extern void pci_pm_init(struct pci_dev *dev); | 61 | extern void pci_pm_init(struct pci_dev *dev); |
53 | extern void platform_pci_wakeup_init(struct pci_dev *dev); | 62 | extern void platform_pci_wakeup_init(struct pci_dev *dev); |
54 | extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); | 63 | extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); |
@@ -319,6 +328,13 @@ struct pci_dev_reset_methods { | |||
319 | int (*reset)(struct pci_dev *dev, int probe); | 328 | int (*reset)(struct pci_dev *dev, int probe); |
320 | }; | 329 | }; |
321 | 330 | ||
331 | #ifdef CONFIG_PCI_QUIRKS | ||
322 | extern int pci_dev_specific_reset(struct pci_dev *dev, int probe); | 332 | extern int pci_dev_specific_reset(struct pci_dev *dev, int probe); |
333 | #else | ||
334 | static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe) | ||
335 | { | ||
336 | return -ENOTTY; | ||
337 | } | ||
338 | #endif | ||
323 | 339 | ||
324 | #endif /* DRIVERS_PCI_H */ | 340 | #endif /* DRIVERS_PCI_H */ |
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 5a0c6ad53f8e..b8b494b3e0d0 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig | |||
@@ -46,3 +46,7 @@ config PCIEASPM_DEBUG | |||
46 | help | 46 | help |
47 | This enables PCI Express ASPM debug support. It will add per-device | 47 | This enables PCI Express ASPM debug support. It will add per-device |
48 | interface to control ASPM. | 48 | interface to control ASPM. |
49 | |||
50 | config PCIE_PME | ||
51 | def_bool y | ||
52 | depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI | ||
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index 11f6bb1eae24..ea654545e7c4 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile | |||
@@ -11,3 +11,5 @@ obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o | |||
11 | 11 | ||
12 | # Build PCI Express AER if needed | 12 | # Build PCI Express AER if needed |
13 | obj-$(CONFIG_PCIEAER) += aer/ | 13 | obj-$(CONFIG_PCIEAER) += aer/ |
14 | |||
15 | obj-$(CONFIG_PCIE_PME) += pme/ | ||
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile new file mode 100644 index 000000000000..8b9238053080 --- /dev/null +++ b/drivers/pci/pcie/pme/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Makefile for PCI-Express Root Port PME signaling driver | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_PCIE_PME) += pmedriver.o | ||
6 | |||
7 | pmedriver-objs := pcie_pme.o | ||
8 | pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o | ||
diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme/pcie_pme.c new file mode 100644 index 000000000000..7b3cbff547ee --- /dev/null +++ b/drivers/pci/pcie/pme/pcie_pme.c | |||
@@ -0,0 +1,505 @@ | |||
1 | /* | ||
2 | * PCIe Native PME support | ||
3 | * | ||
4 | * Copyright (C) 2007 - 2009 Intel Corp | ||
5 | * Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com> | ||
6 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License V2. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/device.h> | ||
20 | #include <linux/pcieport_if.h> | ||
21 | #include <linux/acpi.h> | ||
22 | #include <linux/pci-acpi.h> | ||
23 | #include <linux/pm_runtime.h> | ||
24 | |||
25 | #include "../../pci.h" | ||
26 | #include "pcie_pme.h" | ||
27 | |||
28 | #define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ | ||
29 | #define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ | ||
30 | |||
31 | /* | ||
32 | * If set, this switch will prevent the PCIe root port PME service driver from | ||
33 | * being registered. Consequently, the interrupt-based PCIe PME signaling will | ||
34 | * not be used by any PCIe root ports in that case. | ||
35 | */ | ||
36 | static bool pcie_pme_disabled; | ||
37 | |||
38 | /* | ||
39 | * The PCI Express Base Specification 2.0, Section 6.1.8, states the following: | ||
40 | * "In order to maintain compatibility with non-PCI Express-aware system | ||
41 | * software, system power management logic must be configured by firmware to use | ||
42 | * the legacy mechanism of signaling PME by default. PCI Express-aware system | ||
43 | * software must notify the firmware prior to enabling native, interrupt-based | ||
44 | * PME signaling." However, if the platform doesn't provide us with a suitable | ||
45 | * notification mechanism or the notification fails, it is not clear whether or | ||
46 | * not we are supposed to use the interrupt-based PCIe PME signaling. The | ||
47 | * switch below can be used to indicate the desired behaviour. When set, it | ||
48 | * will make the kernel use the interrupt-based PCIe PME signaling regardless of | ||
49 | * the platform notification status, although the kernel will attempt to notify | ||
50 | * the platform anyway. When unset, it will prevent the kernel from using the | ||
51 | * the interrupt-based PCIe PME signaling if the platform notification fails, | ||
52 | * which is the default. | ||
53 | */ | ||
54 | static bool pcie_pme_force_enable; | ||
55 | |||
56 | /* | ||
57 | * If this switch is set, MSI will not be used for PCIe PME signaling. This | ||
58 | * causes the PCIe port driver to use INTx interrupts only, but it turns out | ||
59 | * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based | ||
60 | * wake-up from system sleep states. | ||
61 | */ | ||
62 | bool pcie_pme_msi_disabled; | ||
63 | |||
64 | static int __init pcie_pme_setup(char *str) | ||
65 | { | ||
66 | if (!strcmp(str, "off")) | ||
67 | pcie_pme_disabled = true; | ||
68 | else if (!strcmp(str, "force")) | ||
69 | pcie_pme_force_enable = true; | ||
70 | else if (!strcmp(str, "nomsi")) | ||
71 | pcie_pme_msi_disabled = true; | ||
72 | return 1; | ||
73 | } | ||
74 | __setup("pcie_pme=", pcie_pme_setup); | ||
75 | |||
76 | /** | ||
77 | * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME. | ||
78 | * @srv: PCIe PME root port service to use for carrying out the check. | ||
79 | * | ||
80 | * Notify the platform that the native PCIe PME is going to be used and return | ||
81 | * 'true' if the control of the PCIe PME registers has been acquired from the | ||
82 | * platform. | ||
83 | */ | ||
84 | static bool pcie_pme_platform_setup(struct pcie_device *srv) | ||
85 | { | ||
86 | if (!pcie_pme_platform_notify(srv)) | ||
87 | return true; | ||
88 | return pcie_pme_force_enable; | ||
89 | } | ||
90 | |||
91 | struct pcie_pme_service_data { | ||
92 | spinlock_t lock; | ||
93 | struct pcie_device *srv; | ||
94 | struct work_struct work; | ||
95 | bool noirq; /* Don't enable the PME interrupt used by this service. */ | ||
96 | }; | ||
97 | |||
98 | /** | ||
99 | * pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation. | ||
100 | * @dev: PCIe root port or event collector. | ||
101 | * @enable: Enable or disable the interrupt. | ||
102 | */ | ||
103 | static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) | ||
104 | { | ||
105 | int rtctl_pos; | ||
106 | u16 rtctl; | ||
107 | |||
108 | rtctl_pos = pci_pcie_cap(dev) + PCI_EXP_RTCTL; | ||
109 | |||
110 | pci_read_config_word(dev, rtctl_pos, &rtctl); | ||
111 | if (enable) | ||
112 | rtctl |= PCI_EXP_RTCTL_PMEIE; | ||
113 | else | ||
114 | rtctl &= ~PCI_EXP_RTCTL_PMEIE; | ||
115 | pci_write_config_word(dev, rtctl_pos, rtctl); | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * pcie_pme_clear_status - Clear root port PME interrupt status. | ||
120 | * @dev: PCIe root port or event collector. | ||
121 | */ | ||
122 | static void pcie_pme_clear_status(struct pci_dev *dev) | ||
123 | { | ||
124 | int rtsta_pos; | ||
125 | u32 rtsta; | ||
126 | |||
127 | rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA; | ||
128 | |||
129 | pci_read_config_dword(dev, rtsta_pos, &rtsta); | ||
130 | rtsta |= PCI_EXP_RTSTA_PME; | ||
131 | pci_write_config_dword(dev, rtsta_pos, rtsta); | ||
132 | } | ||
133 | |||
134 | /** | ||
135 | * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#. | ||
136 | * @bus: PCI bus to scan. | ||
137 | * | ||
138 | * Scan given PCI bus and all buses under it for devices asserting PME#. | ||
139 | */ | ||
140 | static bool pcie_pme_walk_bus(struct pci_bus *bus) | ||
141 | { | ||
142 | struct pci_dev *dev; | ||
143 | bool ret = false; | ||
144 | |||
145 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
146 | /* Skip PCIe devices in case we started from a root port. */ | ||
147 | if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { | ||
148 | pm_request_resume(&dev->dev); | ||
149 | ret = true; | ||
150 | } | ||
151 | |||
152 | if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate)) | ||
153 | ret = true; | ||
154 | } | ||
155 | |||
156 | return ret; | ||
157 | } | ||
158 | |||
159 | /** | ||
160 | * pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME. | ||
161 | * @bus: Secondary bus of the bridge. | ||
162 | * @devfn: Device/function number to check. | ||
163 | * | ||
164 | * PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band | ||
165 | * PCIe PME message. In such that case the bridge should use the Requester ID | ||
166 | * of device/function number 0 on its secondary bus. | ||
167 | */ | ||
168 | static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn) | ||
169 | { | ||
170 | struct pci_dev *dev; | ||
171 | bool found = false; | ||
172 | |||
173 | if (devfn) | ||
174 | return false; | ||
175 | |||
176 | dev = pci_dev_get(bus->self); | ||
177 | if (!dev) | ||
178 | return false; | ||
179 | |||
180 | if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { | ||
181 | down_read(&pci_bus_sem); | ||
182 | if (pcie_pme_walk_bus(bus)) | ||
183 | found = true; | ||
184 | up_read(&pci_bus_sem); | ||
185 | } | ||
186 | |||
187 | pci_dev_put(dev); | ||
188 | return found; | ||
189 | } | ||
190 | |||
191 | /** | ||
192 | * pcie_pme_handle_request - Find device that generated PME and handle it. | ||
193 | * @port: Root port or event collector that generated the PME interrupt. | ||
194 | * @req_id: PCIe Requester ID of the device that generated the PME. | ||
195 | */ | ||
196 | static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) | ||
197 | { | ||
198 | u8 busnr = req_id >> 8, devfn = req_id & 0xff; | ||
199 | struct pci_bus *bus; | ||
200 | struct pci_dev *dev; | ||
201 | bool found = false; | ||
202 | |||
203 | /* First, check if the PME is from the root port itself. */ | ||
204 | if (port->devfn == devfn && port->bus->number == busnr) { | ||
205 | if (pci_check_pme_status(port)) { | ||
206 | pm_request_resume(&port->dev); | ||
207 | found = true; | ||
208 | } else { | ||
209 | /* | ||
210 | * Apparently, the root port generated the PME on behalf | ||
211 | * of a non-PCIe device downstream. If this is done by | ||
212 | * a root port, the Requester ID field in its status | ||
213 | * register may contain either the root port's, or the | ||
214 | * source device's information (PCI Express Base | ||
215 | * Specification, Rev. 2.0, Section 6.1.9). | ||
216 | */ | ||
217 | down_read(&pci_bus_sem); | ||
218 | found = pcie_pme_walk_bus(port->subordinate); | ||
219 | up_read(&pci_bus_sem); | ||
220 | } | ||
221 | goto out; | ||
222 | } | ||
223 | |||
224 | /* Second, find the bus the source device is on. */ | ||
225 | bus = pci_find_bus(pci_domain_nr(port->bus), busnr); | ||
226 | if (!bus) | ||
227 | goto out; | ||
228 | |||
229 | /* Next, check if the PME is from a PCIe-PCI bridge. */ | ||
230 | found = pcie_pme_from_pci_bridge(bus, devfn); | ||
231 | if (found) | ||
232 | goto out; | ||
233 | |||
234 | /* Finally, try to find the PME source on the bus. */ | ||
235 | down_read(&pci_bus_sem); | ||
236 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
237 | pci_dev_get(dev); | ||
238 | if (dev->devfn == devfn) { | ||
239 | found = true; | ||
240 | break; | ||
241 | } | ||
242 | pci_dev_put(dev); | ||
243 | } | ||
244 | up_read(&pci_bus_sem); | ||
245 | |||
246 | if (found) { | ||
247 | /* The device is there, but we have to check its PME status. */ | ||
248 | found = pci_check_pme_status(dev); | ||
249 | if (found) | ||
250 | pm_request_resume(&dev->dev); | ||
251 | pci_dev_put(dev); | ||
252 | } else if (devfn) { | ||
253 | /* | ||
254 | * The device is not there, but we can still try to recover by | ||
255 | * assuming that the PME was reported by a PCIe-PCI bridge that | ||
256 | * used devfn different from zero. | ||
257 | */ | ||
258 | dev_dbg(&port->dev, "PME interrupt generated for " | ||
259 | "non-existent device %02x:%02x.%d\n", | ||
260 | busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); | ||
261 | found = pcie_pme_from_pci_bridge(bus, 0); | ||
262 | } | ||
263 | |||
264 | out: | ||
265 | if (!found) | ||
266 | dev_dbg(&port->dev, "Spurious native PME interrupt!\n"); | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * pcie_pme_work_fn - Work handler for PCIe PME interrupt. | ||
271 | * @work: Work structure giving access to service data. | ||
272 | */ | ||
273 | static void pcie_pme_work_fn(struct work_struct *work) | ||
274 | { | ||
275 | struct pcie_pme_service_data *data = | ||
276 | container_of(work, struct pcie_pme_service_data, work); | ||
277 | struct pci_dev *port = data->srv->port; | ||
278 | int rtsta_pos; | ||
279 | u32 rtsta; | ||
280 | |||
281 | rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; | ||
282 | |||
283 | spin_lock_irq(&data->lock); | ||
284 | |||
285 | for (;;) { | ||
286 | if (data->noirq) | ||
287 | break; | ||
288 | |||
289 | pci_read_config_dword(port, rtsta_pos, &rtsta); | ||
290 | if (rtsta & PCI_EXP_RTSTA_PME) { | ||
291 | /* | ||
292 | * Clear PME status of the port. If there are other | ||
293 | * pending PMEs, the status will be set again. | ||
294 | */ | ||
295 | pcie_pme_clear_status(port); | ||
296 | |||
297 | spin_unlock_irq(&data->lock); | ||
298 | pcie_pme_handle_request(port, rtsta & 0xffff); | ||
299 | spin_lock_irq(&data->lock); | ||
300 | |||
301 | continue; | ||
302 | } | ||
303 | |||
304 | /* No need to loop if there are no more PMEs pending. */ | ||
305 | if (!(rtsta & PCI_EXP_RTSTA_PENDING)) | ||
306 | break; | ||
307 | |||
308 | spin_unlock_irq(&data->lock); | ||
309 | cpu_relax(); | ||
310 | spin_lock_irq(&data->lock); | ||
311 | } | ||
312 | |||
313 | if (!data->noirq) | ||
314 | pcie_pme_interrupt_enable(port, true); | ||
315 | |||
316 | spin_unlock_irq(&data->lock); | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt. | ||
321 | * @irq: Interrupt vector. | ||
322 | * @context: Interrupt context pointer. | ||
323 | */ | ||
324 | static irqreturn_t pcie_pme_irq(int irq, void *context) | ||
325 | { | ||
326 | struct pci_dev *port; | ||
327 | struct pcie_pme_service_data *data; | ||
328 | int rtsta_pos; | ||
329 | u32 rtsta; | ||
330 | unsigned long flags; | ||
331 | |||
332 | port = ((struct pcie_device *)context)->port; | ||
333 | data = get_service_data((struct pcie_device *)context); | ||
334 | |||
335 | rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; | ||
336 | |||
337 | spin_lock_irqsave(&data->lock, flags); | ||
338 | pci_read_config_dword(port, rtsta_pos, &rtsta); | ||
339 | |||
340 | if (!(rtsta & PCI_EXP_RTSTA_PME)) { | ||
341 | spin_unlock_irqrestore(&data->lock, flags); | ||
342 | return IRQ_NONE; | ||
343 | } | ||
344 | |||
345 | pcie_pme_interrupt_enable(port, false); | ||
346 | spin_unlock_irqrestore(&data->lock, flags); | ||
347 | |||
348 | /* We don't use pm_wq, because it's freezable. */ | ||
349 | schedule_work(&data->work); | ||
350 | |||
351 | return IRQ_HANDLED; | ||
352 | } | ||
353 | |||
354 | /** | ||
355 | * pcie_pme_set_native - Set the PME interrupt flag for given device. | ||
356 | * @dev: PCI device to handle. | ||
357 | * @ign: Ignored. | ||
358 | */ | ||
359 | static int pcie_pme_set_native(struct pci_dev *dev, void *ign) | ||
360 | { | ||
361 | dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n"); | ||
362 | |||
363 | device_set_run_wake(&dev->dev, true); | ||
364 | dev->pme_interrupt = true; | ||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | /** | ||
369 | * pcie_pme_mark_devices - Set the PME interrupt flag for devices below a port. | ||
370 | * @port: PCIe root port or event collector to handle. | ||
371 | * | ||
372 | * For each device below given root port, including the port itself (or for each | ||
373 | * root complex integrated endpoint if @port is a root complex event collector) | ||
374 | * set the flag indicating that it can signal run-time wake-up events via PCIe | ||
375 | * PME interrupts. | ||
376 | */ | ||
377 | static void pcie_pme_mark_devices(struct pci_dev *port) | ||
378 | { | ||
379 | pcie_pme_set_native(port, NULL); | ||
380 | if (port->subordinate) { | ||
381 | pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL); | ||
382 | } else { | ||
383 | struct pci_bus *bus = port->bus; | ||
384 | struct pci_dev *dev; | ||
385 | |||
386 | /* Check if this is a root port event collector. */ | ||
387 | if (port->pcie_type != PCI_EXP_TYPE_RC_EC || !bus) | ||
388 | return; | ||
389 | |||
390 | down_read(&pci_bus_sem); | ||
391 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
392 | if (pci_is_pcie(dev) | ||
393 | && dev->pcie_type == PCI_EXP_TYPE_RC_END) | ||
394 | pcie_pme_set_native(dev, NULL); | ||
395 | up_read(&pci_bus_sem); | ||
396 | } | ||
397 | } | ||
398 | |||
399 | /** | ||
400 | * pcie_pme_probe - Initialize PCIe PME service for given root port. | ||
401 | * @srv: PCIe service to initialize. | ||
402 | */ | ||
403 | static int pcie_pme_probe(struct pcie_device *srv) | ||
404 | { | ||
405 | struct pci_dev *port; | ||
406 | struct pcie_pme_service_data *data; | ||
407 | int ret; | ||
408 | |||
409 | if (!pcie_pme_platform_setup(srv)) | ||
410 | return -EACCES; | ||
411 | |||
412 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
413 | if (!data) | ||
414 | return -ENOMEM; | ||
415 | |||
416 | spin_lock_init(&data->lock); | ||
417 | INIT_WORK(&data->work, pcie_pme_work_fn); | ||
418 | data->srv = srv; | ||
419 | set_service_data(srv, data); | ||
420 | |||
421 | port = srv->port; | ||
422 | pcie_pme_interrupt_enable(port, false); | ||
423 | pcie_pme_clear_status(port); | ||
424 | |||
425 | ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv); | ||
426 | if (ret) { | ||
427 | kfree(data); | ||
428 | } else { | ||
429 | pcie_pme_mark_devices(port); | ||
430 | pcie_pme_interrupt_enable(port, true); | ||
431 | } | ||
432 | |||
433 | return ret; | ||
434 | } | ||
435 | |||
436 | /** | ||
437 | * pcie_pme_suspend - Suspend PCIe PME service device. | ||
438 | * @srv: PCIe service device to suspend. | ||
439 | */ | ||
440 | static int pcie_pme_suspend(struct pcie_device *srv) | ||
441 | { | ||
442 | struct pcie_pme_service_data *data = get_service_data(srv); | ||
443 | struct pci_dev *port = srv->port; | ||
444 | |||
445 | spin_lock_irq(&data->lock); | ||
446 | pcie_pme_interrupt_enable(port, false); | ||
447 | pcie_pme_clear_status(port); | ||
448 | data->noirq = true; | ||
449 | spin_unlock_irq(&data->lock); | ||
450 | |||
451 | synchronize_irq(srv->irq); | ||
452 | |||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | /** | ||
457 | * pcie_pme_resume - Resume PCIe PME service device. | ||
458 | * @srv - PCIe service device to resume. | ||
459 | */ | ||
460 | static int pcie_pme_resume(struct pcie_device *srv) | ||
461 | { | ||
462 | struct pcie_pme_service_data *data = get_service_data(srv); | ||
463 | struct pci_dev *port = srv->port; | ||
464 | |||
465 | spin_lock_irq(&data->lock); | ||
466 | data->noirq = false; | ||
467 | pcie_pme_clear_status(port); | ||
468 | pcie_pme_interrupt_enable(port, true); | ||
469 | spin_unlock_irq(&data->lock); | ||
470 | |||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | /** | ||
475 | * pcie_pme_remove - Prepare PCIe PME service device for removal. | ||
476 | * @srv - PCIe service device to resume. | ||
477 | */ | ||
478 | static void pcie_pme_remove(struct pcie_device *srv) | ||
479 | { | ||
480 | pcie_pme_suspend(srv); | ||
481 | free_irq(srv->irq, srv); | ||
482 | kfree(get_service_data(srv)); | ||
483 | } | ||
484 | |||
485 | static struct pcie_port_service_driver pcie_pme_driver = { | ||
486 | .name = "pcie_pme", | ||
487 | .port_type = PCI_EXP_TYPE_ROOT_PORT, | ||
488 | .service = PCIE_PORT_SERVICE_PME, | ||
489 | |||
490 | .probe = pcie_pme_probe, | ||
491 | .suspend = pcie_pme_suspend, | ||
492 | .resume = pcie_pme_resume, | ||
493 | .remove = pcie_pme_remove, | ||
494 | }; | ||
495 | |||
496 | /** | ||
497 | * pcie_pme_service_init - Register the PCIe PME service driver. | ||
498 | */ | ||
499 | static int __init pcie_pme_service_init(void) | ||
500 | { | ||
501 | return pcie_pme_disabled ? | ||
502 | -ENODEV : pcie_port_service_register(&pcie_pme_driver); | ||
503 | } | ||
504 | |||
505 | module_init(pcie_pme_service_init); | ||
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h new file mode 100644 index 000000000000..b30d2b7c9775 --- /dev/null +++ b/drivers/pci/pcie/pme/pcie_pme.h | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * drivers/pci/pcie/pme/pcie_pme.h | ||
3 | * | ||
4 | * PCI Express Root Port PME signaling support | ||
5 | * | ||
6 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
7 | */ | ||
8 | |||
9 | #ifndef _PCIE_PME_H_ | ||
10 | #define _PCIE_PME_H_ | ||
11 | |||
12 | struct pcie_device; | ||
13 | |||
14 | #ifdef CONFIG_ACPI | ||
15 | extern int pcie_pme_acpi_setup(struct pcie_device *srv); | ||
16 | |||
17 | static inline int pcie_pme_platform_notify(struct pcie_device *srv) | ||
18 | { | ||
19 | return pcie_pme_acpi_setup(srv); | ||
20 | } | ||
21 | #else /* !CONFIG_ACPI */ | ||
22 | static inline int pcie_pme_platform_notify(struct pcie_device *srv) | ||
23 | { | ||
24 | return 0; | ||
25 | } | ||
26 | #endif /* !CONFIG_ACPI */ | ||
27 | |||
28 | #endif | ||
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c new file mode 100644 index 000000000000..83ab2287ae3f --- /dev/null +++ b/drivers/pci/pcie/pme/pcie_pme_acpi.c | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * PCIe Native PME support, ACPI-related part | ||
3 | * | ||
4 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License V2. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/pci.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/acpi.h> | ||
15 | #include <linux/pci-acpi.h> | ||
16 | #include <linux/pcieport_if.h> | ||
17 | |||
18 | /** | ||
19 | * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME. | ||
20 | * @srv - PCIe PME service for a root port or event collector. | ||
21 | * | ||
22 | * Invoked when the PCIe bus type loads PCIe PME service driver. To avoid | ||
23 | * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME | ||
24 | * control to the kernel. | ||
25 | */ | ||
26 | int pcie_pme_acpi_setup(struct pcie_device *srv) | ||
27 | { | ||
28 | acpi_status status = AE_NOT_FOUND; | ||
29 | struct pci_dev *port = srv->port; | ||
30 | acpi_handle handle; | ||
31 | int error = 0; | ||
32 | |||
33 | if (acpi_pci_disabled) | ||
34 | return -ENOSYS; | ||
35 | |||
36 | dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n"); | ||
37 | |||
38 | handle = acpi_find_root_bridge_handle(port); | ||
39 | if (!handle) | ||
40 | return -EINVAL; | ||
41 | |||
42 | status = acpi_pci_osc_control_set(handle, | ||
43 | OSC_PCI_EXPRESS_PME_CONTROL | | ||
44 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | ||
45 | if (ACPI_FAILURE(status)) { | ||
46 | dev_info(&port->dev, | ||
47 | "Failed to receive control of PCIe PME service: %s\n", | ||
48 | (status == AE_SUPPORT || status == AE_NOT_FOUND) ? | ||
49 | "no _OSC support" : "ACPI _OSC failed"); | ||
50 | error = -ENODEV; | ||
51 | } | ||
52 | |||
53 | return error; | ||
54 | } | ||
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index aaeb9d21cba5..813a5c3427b6 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h | |||
@@ -30,4 +30,21 @@ extern void pcie_port_device_remove(struct pci_dev *dev); | |||
30 | extern int __must_check pcie_port_bus_register(void); | 30 | extern int __must_check pcie_port_bus_register(void); |
31 | extern void pcie_port_bus_unregister(void); | 31 | extern void pcie_port_bus_unregister(void); |
32 | 32 | ||
33 | #ifdef CONFIG_PCIE_PME | ||
34 | extern bool pcie_pme_msi_disabled; | ||
35 | |||
36 | static inline void pcie_pme_disable_msi(void) | ||
37 | { | ||
38 | pcie_pme_msi_disabled = true; | ||
39 | } | ||
40 | |||
41 | static inline bool pcie_pme_no_msi(void) | ||
42 | { | ||
43 | return pcie_pme_msi_disabled; | ||
44 | } | ||
45 | #else /* !CONFIG_PCIE_PME */ | ||
46 | static inline void pcie_pme_disable_msi(void) {} | ||
47 | static inline bool pcie_pme_no_msi(void) { return false; } | ||
48 | #endif /* !CONFIG_PCIE_PME */ | ||
49 | |||
33 | #endif /* _PORTDRV_H_ */ | 50 | #endif /* _PORTDRV_H_ */ |
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index b174188ac121..0d34ff415399 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -186,16 +186,24 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) | |||
186 | */ | 186 | */ |
187 | static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) | 187 | static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) |
188 | { | 188 | { |
189 | int i, irq; | 189 | int i, irq = -1; |
190 | |||
191 | /* We have to use INTx if MSI cannot be used for PCIe PME. */ | ||
192 | if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) { | ||
193 | if (dev->pin) | ||
194 | irq = dev->irq; | ||
195 | goto no_msi; | ||
196 | } | ||
190 | 197 | ||
191 | /* Try to use MSI-X if supported */ | 198 | /* Try to use MSI-X if supported */ |
192 | if (!pcie_port_enable_msix(dev, irqs, mask)) | 199 | if (!pcie_port_enable_msix(dev, irqs, mask)) |
193 | return 0; | 200 | return 0; |
201 | |||
194 | /* We're not going to use MSI-X, so try MSI and fall back to INTx */ | 202 | /* We're not going to use MSI-X, so try MSI and fall back to INTx */ |
195 | irq = -1; | ||
196 | if (!pci_enable_msi(dev) || dev->pin) | 203 | if (!pci_enable_msi(dev) || dev->pin) |
197 | irq = dev->irq; | 204 | irq = dev->irq; |
198 | 205 | ||
206 | no_msi: | ||
199 | for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) | 207 | for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) |
200 | irqs[i] = irq; | 208 | irqs[i] = irq; |
201 | irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; | 209 | irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 13c8972886e6..127e8f169d9c 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/pcieport_if.h> | 16 | #include <linux/pcieport_if.h> |
17 | #include <linux/aer.h> | 17 | #include <linux/aer.h> |
18 | #include <linux/dmi.h> | ||
18 | 19 | ||
19 | #include "portdrv.h" | 20 | #include "portdrv.h" |
20 | #include "aer/aerdrv.h" | 21 | #include "aer/aerdrv.h" |
@@ -273,10 +274,36 @@ static struct pci_driver pcie_portdriver = { | |||
273 | .driver.pm = PCIE_PORTDRV_PM_OPS, | 274 | .driver.pm = PCIE_PORTDRV_PM_OPS, |
274 | }; | 275 | }; |
275 | 276 | ||
277 | static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) | ||
278 | { | ||
279 | pr_notice("%s detected: will not use MSI for PCIe PME signaling\n", | ||
280 | d->ident); | ||
281 | pcie_pme_disable_msi(); | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = { | ||
286 | /* | ||
287 | * Boxes that should not use MSI for PCIe PME signaling. | ||
288 | */ | ||
289 | { | ||
290 | .callback = dmi_pcie_pme_disable_msi, | ||
291 | .ident = "MSI Wind U-100", | ||
292 | .matches = { | ||
293 | DMI_MATCH(DMI_SYS_VENDOR, | ||
294 | "MICRO-STAR INTERNATIONAL CO., LTD"), | ||
295 | DMI_MATCH(DMI_PRODUCT_NAME, "U-100"), | ||
296 | }, | ||
297 | }, | ||
298 | {} | ||
299 | }; | ||
300 | |||
276 | static int __init pcie_portdrv_init(void) | 301 | static int __init pcie_portdrv_init(void) |
277 | { | 302 | { |
278 | int retval; | 303 | int retval; |
279 | 304 | ||
305 | dmi_check_system(pcie_portdrv_dmi_table); | ||
306 | |||
280 | retval = pcie_port_bus_register(); | 307 | retval = pcie_port_bus_register(); |
281 | if (retval) { | 308 | if (retval) { |
282 | printk(KERN_WARNING "PCIE: bus_register error: %d\n", retval); | 309 | printk(KERN_WARNING "PCIE: bus_register error: %d\n", retval); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 446e4a94d7d3..270d069819f7 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -89,6 +89,7 @@ static void release_pcibus_dev(struct device *dev) | |||
89 | 89 | ||
90 | if (pci_bus->bridge) | 90 | if (pci_bus->bridge) |
91 | put_device(pci_bus->bridge); | 91 | put_device(pci_bus->bridge); |
92 | pci_bus_remove_resources(pci_bus); | ||
92 | kfree(pci_bus); | 93 | kfree(pci_bus); |
93 | } | 94 | } |
94 | 95 | ||
@@ -281,26 +282,12 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) | |||
281 | } | 282 | } |
282 | } | 283 | } |
283 | 284 | ||
284 | void __devinit pci_read_bridge_bases(struct pci_bus *child) | 285 | static void __devinit pci_read_bridge_io(struct pci_bus *child) |
285 | { | 286 | { |
286 | struct pci_dev *dev = child->self; | 287 | struct pci_dev *dev = child->self; |
287 | u8 io_base_lo, io_limit_lo; | 288 | u8 io_base_lo, io_limit_lo; |
288 | u16 mem_base_lo, mem_limit_lo; | ||
289 | unsigned long base, limit; | 289 | unsigned long base, limit; |
290 | struct resource *res; | 290 | struct resource *res; |
291 | int i; | ||
292 | |||
293 | if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ | ||
294 | return; | ||
295 | |||
296 | dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n", | ||
297 | child->secondary, child->subordinate, | ||
298 | dev->transparent ? " (subtractive decode)": ""); | ||
299 | |||
300 | if (dev->transparent) { | ||
301 | for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) | ||
302 | child->resource[i] = child->parent->resource[i - 3]; | ||
303 | } | ||
304 | 291 | ||
305 | res = child->resource[0]; | 292 | res = child->resource[0]; |
306 | pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); | 293 | pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); |
@@ -316,26 +303,50 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
316 | limit |= (io_limit_hi << 16); | 303 | limit |= (io_limit_hi << 16); |
317 | } | 304 | } |
318 | 305 | ||
319 | if (base <= limit) { | 306 | if (base && base <= limit) { |
320 | res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; | 307 | res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; |
321 | if (!res->start) | 308 | if (!res->start) |
322 | res->start = base; | 309 | res->start = base; |
323 | if (!res->end) | 310 | if (!res->end) |
324 | res->end = limit + 0xfff; | 311 | res->end = limit + 0xfff; |
325 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 312 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
313 | } else { | ||
314 | dev_printk(KERN_DEBUG, &dev->dev, | ||
315 | " bridge window [io %04lx - %04lx] reg reading\n", | ||
316 | base, limit); | ||
326 | } | 317 | } |
318 | } | ||
319 | |||
320 | static void __devinit pci_read_bridge_mmio(struct pci_bus *child) | ||
321 | { | ||
322 | struct pci_dev *dev = child->self; | ||
323 | u16 mem_base_lo, mem_limit_lo; | ||
324 | unsigned long base, limit; | ||
325 | struct resource *res; | ||
327 | 326 | ||
328 | res = child->resource[1]; | 327 | res = child->resource[1]; |
329 | pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); | 328 | pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); |
330 | pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); | 329 | pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); |
331 | base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; | 330 | base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; |
332 | limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; | 331 | limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; |
333 | if (base <= limit) { | 332 | if (base && base <= limit) { |
334 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; | 333 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; |
335 | res->start = base; | 334 | res->start = base; |
336 | res->end = limit + 0xfffff; | 335 | res->end = limit + 0xfffff; |
337 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 336 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
337 | } else { | ||
338 | dev_printk(KERN_DEBUG, &dev->dev, | ||
339 | " bridge window [mem 0x%08lx - 0x%08lx] reg reading\n", | ||
340 | base, limit + 0xfffff); | ||
338 | } | 341 | } |
342 | } | ||
343 | |||
344 | static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child) | ||
345 | { | ||
346 | struct pci_dev *dev = child->self; | ||
347 | u16 mem_base_lo, mem_limit_lo; | ||
348 | unsigned long base, limit; | ||
349 | struct resource *res; | ||
339 | 350 | ||
340 | res = child->resource[2]; | 351 | res = child->resource[2]; |
341 | pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); | 352 | pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); |
@@ -366,7 +377,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
366 | #endif | 377 | #endif |
367 | } | 378 | } |
368 | } | 379 | } |
369 | if (base <= limit) { | 380 | if (base && base <= limit) { |
370 | res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | | 381 | res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | |
371 | IORESOURCE_MEM | IORESOURCE_PREFETCH; | 382 | IORESOURCE_MEM | IORESOURCE_PREFETCH; |
372 | if (res->flags & PCI_PREF_RANGE_TYPE_64) | 383 | if (res->flags & PCI_PREF_RANGE_TYPE_64) |
@@ -374,6 +385,44 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
374 | res->start = base; | 385 | res->start = base; |
375 | res->end = limit + 0xfffff; | 386 | res->end = limit + 0xfffff; |
376 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); | 387 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
388 | } else { | ||
389 | dev_printk(KERN_DEBUG, &dev->dev, | ||
390 | " bridge window [mem 0x%08lx - %08lx pref] reg reading\n", | ||
391 | base, limit + 0xfffff); | ||
392 | } | ||
393 | } | ||
394 | |||
395 | void __devinit pci_read_bridge_bases(struct pci_bus *child) | ||
396 | { | ||
397 | struct pci_dev *dev = child->self; | ||
398 | struct resource *res; | ||
399 | int i; | ||
400 | |||
401 | if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ | ||
402 | return; | ||
403 | |||
404 | dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n", | ||
405 | child->secondary, child->subordinate, | ||
406 | dev->transparent ? " (subtractive decode)" : ""); | ||
407 | |||
408 | pci_bus_remove_resources(child); | ||
409 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) | ||
410 | child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; | ||
411 | |||
412 | pci_read_bridge_io(child); | ||
413 | pci_read_bridge_mmio(child); | ||
414 | pci_read_bridge_mmio_pref(child); | ||
415 | |||
416 | if (dev->transparent) { | ||
417 | pci_bus_for_each_resource(child->parent, res, i) { | ||
418 | if (res) { | ||
419 | pci_bus_add_resource(child, res, | ||
420 | PCI_SUBTRACTIVE_DECODE); | ||
421 | dev_printk(KERN_DEBUG, &dev->dev, | ||
422 | " bridge window %pR (subtractive decode)\n", | ||
423 | res); | ||
424 | } | ||
425 | } | ||
377 | } | 426 | } |
378 | } | 427 | } |
379 | 428 | ||
@@ -387,10 +436,147 @@ static struct pci_bus * pci_alloc_bus(void) | |||
387 | INIT_LIST_HEAD(&b->children); | 436 | INIT_LIST_HEAD(&b->children); |
388 | INIT_LIST_HEAD(&b->devices); | 437 | INIT_LIST_HEAD(&b->devices); |
389 | INIT_LIST_HEAD(&b->slots); | 438 | INIT_LIST_HEAD(&b->slots); |
439 | INIT_LIST_HEAD(&b->resources); | ||
440 | b->max_bus_speed = PCI_SPEED_UNKNOWN; | ||
441 | b->cur_bus_speed = PCI_SPEED_UNKNOWN; | ||
390 | } | 442 | } |
391 | return b; | 443 | return b; |
392 | } | 444 | } |
393 | 445 | ||
446 | static unsigned char pcix_bus_speed[] = { | ||
447 | PCI_SPEED_UNKNOWN, /* 0 */ | ||
448 | PCI_SPEED_66MHz_PCIX, /* 1 */ | ||
449 | PCI_SPEED_100MHz_PCIX, /* 2 */ | ||
450 | PCI_SPEED_133MHz_PCIX, /* 3 */ | ||
451 | PCI_SPEED_UNKNOWN, /* 4 */ | ||
452 | PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ | ||
453 | PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ | ||
454 | PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ | ||
455 | PCI_SPEED_UNKNOWN, /* 8 */ | ||
456 | PCI_SPEED_66MHz_PCIX_266, /* 9 */ | ||
457 | PCI_SPEED_100MHz_PCIX_266, /* A */ | ||
458 | PCI_SPEED_133MHz_PCIX_266, /* B */ | ||
459 | PCI_SPEED_UNKNOWN, /* C */ | ||
460 | PCI_SPEED_66MHz_PCIX_533, /* D */ | ||
461 | PCI_SPEED_100MHz_PCIX_533, /* E */ | ||
462 | PCI_SPEED_133MHz_PCIX_533 /* F */ | ||
463 | }; | ||
464 | |||
465 | static unsigned char pcie_link_speed[] = { | ||
466 | PCI_SPEED_UNKNOWN, /* 0 */ | ||
467 | PCIE_SPEED_2_5GT, /* 1 */ | ||
468 | PCIE_SPEED_5_0GT, /* 2 */ | ||
469 | PCIE_SPEED_8_0GT, /* 3 */ | ||
470 | PCI_SPEED_UNKNOWN, /* 4 */ | ||
471 | PCI_SPEED_UNKNOWN, /* 5 */ | ||
472 | PCI_SPEED_UNKNOWN, /* 6 */ | ||
473 | PCI_SPEED_UNKNOWN, /* 7 */ | ||
474 | PCI_SPEED_UNKNOWN, /* 8 */ | ||
475 | PCI_SPEED_UNKNOWN, /* 9 */ | ||
476 | PCI_SPEED_UNKNOWN, /* A */ | ||
477 | PCI_SPEED_UNKNOWN, /* B */ | ||
478 | PCI_SPEED_UNKNOWN, /* C */ | ||
479 | PCI_SPEED_UNKNOWN, /* D */ | ||
480 | PCI_SPEED_UNKNOWN, /* E */ | ||
481 | PCI_SPEED_UNKNOWN /* F */ | ||
482 | }; | ||
483 | |||
484 | void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) | ||
485 | { | ||
486 | bus->cur_bus_speed = pcie_link_speed[linksta & 0xf]; | ||
487 | } | ||
488 | EXPORT_SYMBOL_GPL(pcie_update_link_speed); | ||
489 | |||
490 | static unsigned char agp_speeds[] = { | ||
491 | AGP_UNKNOWN, | ||
492 | AGP_1X, | ||
493 | AGP_2X, | ||
494 | AGP_4X, | ||
495 | AGP_8X | ||
496 | }; | ||
497 | |||
498 | static enum pci_bus_speed agp_speed(int agp3, int agpstat) | ||
499 | { | ||
500 | int index = 0; | ||
501 | |||
502 | if (agpstat & 4) | ||
503 | index = 3; | ||
504 | else if (agpstat & 2) | ||
505 | index = 2; | ||
506 | else if (agpstat & 1) | ||
507 | index = 1; | ||
508 | else | ||
509 | goto out; | ||
510 | |||
511 | if (agp3) { | ||
512 | index += 2; | ||
513 | if (index == 5) | ||
514 | index = 0; | ||
515 | } | ||
516 | |||
517 | out: | ||
518 | return agp_speeds[index]; | ||
519 | } | ||
520 | |||
521 | |||
522 | static void pci_set_bus_speed(struct pci_bus *bus) | ||
523 | { | ||
524 | struct pci_dev *bridge = bus->self; | ||
525 | int pos; | ||
526 | |||
527 | pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); | ||
528 | if (!pos) | ||
529 | pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); | ||
530 | if (pos) { | ||
531 | u32 agpstat, agpcmd; | ||
532 | |||
533 | pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); | ||
534 | bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); | ||
535 | |||
536 | pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); | ||
537 | bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); | ||
538 | } | ||
539 | |||
540 | pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); | ||
541 | if (pos) { | ||
542 | u16 status; | ||
543 | enum pci_bus_speed max; | ||
544 | pci_read_config_word(bridge, pos + 2, &status); | ||
545 | |||
546 | if (status & 0x8000) { | ||
547 | max = PCI_SPEED_133MHz_PCIX_533; | ||
548 | } else if (status & 0x4000) { | ||
549 | max = PCI_SPEED_133MHz_PCIX_266; | ||
550 | } else if (status & 0x0002) { | ||
551 | if (((status >> 12) & 0x3) == 2) { | ||
552 | max = PCI_SPEED_133MHz_PCIX_ECC; | ||
553 | } else { | ||
554 | max = PCI_SPEED_133MHz_PCIX; | ||
555 | } | ||
556 | } else { | ||
557 | max = PCI_SPEED_66MHz_PCIX; | ||
558 | } | ||
559 | |||
560 | bus->max_bus_speed = max; | ||
561 | bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf]; | ||
562 | |||
563 | return; | ||
564 | } | ||
565 | |||
566 | pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); | ||
567 | if (pos) { | ||
568 | u32 linkcap; | ||
569 | u16 linksta; | ||
570 | |||
571 | pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap); | ||
572 | bus->max_bus_speed = pcie_link_speed[linkcap & 0xf]; | ||
573 | |||
574 | pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta); | ||
575 | pcie_update_link_speed(bus, linksta); | ||
576 | } | ||
577 | } | ||
578 | |||
579 | |||
394 | static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, | 580 | static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, |
395 | struct pci_dev *bridge, int busnr) | 581 | struct pci_dev *bridge, int busnr) |
396 | { | 582 | { |
@@ -430,6 +616,8 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, | |||
430 | child->self = bridge; | 616 | child->self = bridge; |
431 | child->bridge = get_device(&bridge->dev); | 617 | child->bridge = get_device(&bridge->dev); |
432 | 618 | ||
619 | pci_set_bus_speed(child); | ||
620 | |||
433 | /* Set up default resource pointers and names.. */ | 621 | /* Set up default resource pointers and names.. */ |
434 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { | 622 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { |
435 | child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; | 623 | child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; |
@@ -1081,6 +1269,45 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) | |||
1081 | } | 1269 | } |
1082 | EXPORT_SYMBOL(pci_scan_single_device); | 1270 | EXPORT_SYMBOL(pci_scan_single_device); |
1083 | 1271 | ||
1272 | static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn) | ||
1273 | { | ||
1274 | u16 cap; | ||
1275 | unsigned pos, next_fn; | ||
1276 | |||
1277 | if (!dev) | ||
1278 | return 0; | ||
1279 | |||
1280 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); | ||
1281 | if (!pos) | ||
1282 | return 0; | ||
1283 | pci_read_config_word(dev, pos + 4, &cap); | ||
1284 | next_fn = cap >> 8; | ||
1285 | if (next_fn <= fn) | ||
1286 | return 0; | ||
1287 | return next_fn; | ||
1288 | } | ||
1289 | |||
1290 | static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn) | ||
1291 | { | ||
1292 | return (fn + 1) % 8; | ||
1293 | } | ||
1294 | |||
1295 | static unsigned no_next_fn(struct pci_dev *dev, unsigned fn) | ||
1296 | { | ||
1297 | return 0; | ||
1298 | } | ||
1299 | |||
1300 | static int only_one_child(struct pci_bus *bus) | ||
1301 | { | ||
1302 | struct pci_dev *parent = bus->self; | ||
1303 | if (!parent || !pci_is_pcie(parent)) | ||
1304 | return 0; | ||
1305 | if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT || | ||
1306 | parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) | ||
1307 | return 1; | ||
1308 | return 0; | ||
1309 | } | ||
1310 | |||
1084 | /** | 1311 | /** |
1085 | * pci_scan_slot - scan a PCI slot on a bus for devices. | 1312 | * pci_scan_slot - scan a PCI slot on a bus for devices. |
1086 | * @bus: PCI bus to scan | 1313 | * @bus: PCI bus to scan |
@@ -1094,21 +1321,30 @@ EXPORT_SYMBOL(pci_scan_single_device); | |||
1094 | */ | 1321 | */ |
1095 | int pci_scan_slot(struct pci_bus *bus, int devfn) | 1322 | int pci_scan_slot(struct pci_bus *bus, int devfn) |
1096 | { | 1323 | { |
1097 | int fn, nr = 0; | 1324 | unsigned fn, nr = 0; |
1098 | struct pci_dev *dev; | 1325 | struct pci_dev *dev; |
1326 | unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn; | ||
1327 | |||
1328 | if (only_one_child(bus) && (devfn > 0)) | ||
1329 | return 0; /* Already scanned the entire slot */ | ||
1099 | 1330 | ||
1100 | dev = pci_scan_single_device(bus, devfn); | 1331 | dev = pci_scan_single_device(bus, devfn); |
1101 | if (dev && !dev->is_added) /* new device? */ | 1332 | if (!dev) |
1333 | return 0; | ||
1334 | if (!dev->is_added) | ||
1102 | nr++; | 1335 | nr++; |
1103 | 1336 | ||
1104 | if (dev && dev->multifunction) { | 1337 | if (pci_ari_enabled(bus)) |
1105 | for (fn = 1; fn < 8; fn++) { | 1338 | next_fn = next_ari_fn; |
1106 | dev = pci_scan_single_device(bus, devfn + fn); | 1339 | else if (dev->multifunction) |
1107 | if (dev) { | 1340 | next_fn = next_trad_fn; |
1108 | if (!dev->is_added) | 1341 | |
1109 | nr++; | 1342 | for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) { |
1110 | dev->multifunction = 1; | 1343 | dev = pci_scan_single_device(bus, devfn + fn); |
1111 | } | 1344 | if (dev) { |
1345 | if (!dev->is_added) | ||
1346 | nr++; | ||
1347 | dev->multifunction = 1; | ||
1112 | } | 1348 | } |
1113 | } | 1349 | } |
1114 | 1350 | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index d58b94030ef3..790eb69a4aa9 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -25,14 +25,9 @@ | |||
25 | #include <linux/dmi.h> | 25 | #include <linux/dmi.h> |
26 | #include <linux/pci-aspm.h> | 26 | #include <linux/pci-aspm.h> |
27 | #include <linux/ioport.h> | 27 | #include <linux/ioport.h> |
28 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ | ||
28 | #include "pci.h" | 29 | #include "pci.h" |
29 | 30 | ||
30 | int isa_dma_bridge_buggy; | ||
31 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | ||
32 | int pci_pci_problems; | ||
33 | EXPORT_SYMBOL(pci_pci_problems); | ||
34 | |||
35 | #ifdef CONFIG_PCI_QUIRKS | ||
36 | /* | 31 | /* |
37 | * This quirk function disables memory decoding and releases memory resources | 32 | * This quirk function disables memory decoding and releases memory resources |
38 | * of the device specified by kernel's boot parameter 'pci=resource_alignment='. | 33 | * of the device specified by kernel's boot parameter 'pci=resource_alignment='. |
@@ -2612,6 +2607,7 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) | |||
2612 | } | 2607 | } |
2613 | pci_do_fixups(dev, start, end); | 2608 | pci_do_fixups(dev, start, end); |
2614 | } | 2609 | } |
2610 | EXPORT_SYMBOL(pci_fixup_device); | ||
2615 | 2611 | ||
2616 | static int __init pci_apply_final_quirks(void) | 2612 | static int __init pci_apply_final_quirks(void) |
2617 | { | 2613 | { |
@@ -2723,9 +2719,3 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe) | |||
2723 | 2719 | ||
2724 | return -ENOTTY; | 2720 | return -ENOTTY; |
2725 | } | 2721 | } |
2726 | |||
2727 | #else | ||
2728 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} | ||
2729 | int pci_dev_specific_reset(struct pci_dev *dev, int probe) { return -ENOTTY; } | ||
2730 | #endif | ||
2731 | EXPORT_SYMBOL(pci_fixup_device); | ||
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index c48cd377b3f5..bf32f07c4efb 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -27,37 +27,83 @@ | |||
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include "pci.h" | 28 | #include "pci.h" |
29 | 29 | ||
30 | static void pbus_assign_resources_sorted(const struct pci_bus *bus) | 30 | struct resource_list_x { |
31 | { | 31 | struct resource_list_x *next; |
32 | struct pci_dev *dev; | ||
33 | struct resource *res; | 32 | struct resource *res; |
34 | struct resource_list head, *list, *tmp; | 33 | struct pci_dev *dev; |
35 | int idx; | 34 | resource_size_t start; |
35 | resource_size_t end; | ||
36 | unsigned long flags; | ||
37 | }; | ||
36 | 38 | ||
37 | head.next = NULL; | 39 | static void add_to_failed_list(struct resource_list_x *head, |
38 | list_for_each_entry(dev, &bus->devices, bus_list) { | 40 | struct pci_dev *dev, struct resource *res) |
39 | u16 class = dev->class >> 8; | 41 | { |
42 | struct resource_list_x *list = head; | ||
43 | struct resource_list_x *ln = list->next; | ||
44 | struct resource_list_x *tmp; | ||
40 | 45 | ||
41 | /* Don't touch classless devices or host bridges or ioapics. */ | 46 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); |
42 | if (class == PCI_CLASS_NOT_DEFINED || | 47 | if (!tmp) { |
43 | class == PCI_CLASS_BRIDGE_HOST) | 48 | pr_warning("add_to_failed_list: kmalloc() failed!\n"); |
44 | continue; | 49 | return; |
50 | } | ||
45 | 51 | ||
46 | /* Don't touch ioapic devices already enabled by firmware */ | 52 | tmp->next = ln; |
47 | if (class == PCI_CLASS_SYSTEM_PIC) { | 53 | tmp->res = res; |
48 | u16 command; | 54 | tmp->dev = dev; |
49 | pci_read_config_word(dev, PCI_COMMAND, &command); | 55 | tmp->start = res->start; |
50 | if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) | 56 | tmp->end = res->end; |
51 | continue; | 57 | tmp->flags = res->flags; |
52 | } | 58 | list->next = tmp; |
59 | } | ||
60 | |||
61 | static void free_failed_list(struct resource_list_x *head) | ||
62 | { | ||
63 | struct resource_list_x *list, *tmp; | ||
53 | 64 | ||
54 | pdev_sort_resources(dev, &head); | 65 | for (list = head->next; list;) { |
66 | tmp = list; | ||
67 | list = list->next; | ||
68 | kfree(tmp); | ||
55 | } | 69 | } |
56 | 70 | ||
57 | for (list = head.next; list;) { | 71 | head->next = NULL; |
72 | } | ||
73 | |||
74 | static void __dev_sort_resources(struct pci_dev *dev, | ||
75 | struct resource_list *head) | ||
76 | { | ||
77 | u16 class = dev->class >> 8; | ||
78 | |||
79 | /* Don't touch classless devices or host bridges or ioapics. */ | ||
80 | if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST) | ||
81 | return; | ||
82 | |||
83 | /* Don't touch ioapic devices already enabled by firmware */ | ||
84 | if (class == PCI_CLASS_SYSTEM_PIC) { | ||
85 | u16 command; | ||
86 | pci_read_config_word(dev, PCI_COMMAND, &command); | ||
87 | if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) | ||
88 | return; | ||
89 | } | ||
90 | |||
91 | pdev_sort_resources(dev, head); | ||
92 | } | ||
93 | |||
94 | static void __assign_resources_sorted(struct resource_list *head, | ||
95 | struct resource_list_x *fail_head) | ||
96 | { | ||
97 | struct resource *res; | ||
98 | struct resource_list *list, *tmp; | ||
99 | int idx; | ||
100 | |||
101 | for (list = head->next; list;) { | ||
58 | res = list->res; | 102 | res = list->res; |
59 | idx = res - &list->dev->resource[0]; | 103 | idx = res - &list->dev->resource[0]; |
60 | if (pci_assign_resource(list->dev, idx)) { | 104 | if (pci_assign_resource(list->dev, idx)) { |
105 | if (fail_head && !pci_is_root_bus(list->dev->bus)) | ||
106 | add_to_failed_list(fail_head, list->dev, res); | ||
61 | res->start = 0; | 107 | res->start = 0; |
62 | res->end = 0; | 108 | res->end = 0; |
63 | res->flags = 0; | 109 | res->flags = 0; |
@@ -68,6 +114,30 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus) | |||
68 | } | 114 | } |
69 | } | 115 | } |
70 | 116 | ||
117 | static void pdev_assign_resources_sorted(struct pci_dev *dev, | ||
118 | struct resource_list_x *fail_head) | ||
119 | { | ||
120 | struct resource_list head; | ||
121 | |||
122 | head.next = NULL; | ||
123 | __dev_sort_resources(dev, &head); | ||
124 | __assign_resources_sorted(&head, fail_head); | ||
125 | |||
126 | } | ||
127 | |||
128 | static void pbus_assign_resources_sorted(const struct pci_bus *bus, | ||
129 | struct resource_list_x *fail_head) | ||
130 | { | ||
131 | struct pci_dev *dev; | ||
132 | struct resource_list head; | ||
133 | |||
134 | head.next = NULL; | ||
135 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
136 | __dev_sort_resources(dev, &head); | ||
137 | |||
138 | __assign_resources_sorted(&head, fail_head); | ||
139 | } | ||
140 | |||
71 | void pci_setup_cardbus(struct pci_bus *bus) | 141 | void pci_setup_cardbus(struct pci_bus *bus) |
72 | { | 142 | { |
73 | struct pci_dev *bridge = bus->self; | 143 | struct pci_dev *bridge = bus->self; |
@@ -134,18 +204,12 @@ EXPORT_SYMBOL(pci_setup_cardbus); | |||
134 | config space writes, so it's quite possible that an I/O window of | 204 | config space writes, so it's quite possible that an I/O window of |
135 | the bridge will have some undesirable address (e.g. 0) after the | 205 | the bridge will have some undesirable address (e.g. 0) after the |
136 | first write. Ditto 64-bit prefetchable MMIO. */ | 206 | first write. Ditto 64-bit prefetchable MMIO. */ |
137 | static void pci_setup_bridge(struct pci_bus *bus) | 207 | static void pci_setup_bridge_io(struct pci_bus *bus) |
138 | { | 208 | { |
139 | struct pci_dev *bridge = bus->self; | 209 | struct pci_dev *bridge = bus->self; |
140 | struct resource *res; | 210 | struct resource *res; |
141 | struct pci_bus_region region; | 211 | struct pci_bus_region region; |
142 | u32 l, bu, lu, io_upper16; | 212 | u32 l, io_upper16; |
143 | |||
144 | if (pci_is_enabled(bridge)) | ||
145 | return; | ||
146 | |||
147 | dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", | ||
148 | bus->secondary, bus->subordinate); | ||
149 | 213 | ||
150 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ | 214 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ |
151 | res = bus->resource[0]; | 215 | res = bus->resource[0]; |
@@ -158,8 +222,7 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
158 | /* Set up upper 16 bits of I/O base/limit. */ | 222 | /* Set up upper 16 bits of I/O base/limit. */ |
159 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); | 223 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); |
160 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 224 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
161 | } | 225 | } else { |
162 | else { | ||
163 | /* Clear upper 16 bits of I/O base/limit. */ | 226 | /* Clear upper 16 bits of I/O base/limit. */ |
164 | io_upper16 = 0; | 227 | io_upper16 = 0; |
165 | l = 0x00f0; | 228 | l = 0x00f0; |
@@ -171,21 +234,35 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
171 | pci_write_config_dword(bridge, PCI_IO_BASE, l); | 234 | pci_write_config_dword(bridge, PCI_IO_BASE, l); |
172 | /* Update upper 16 bits of I/O base/limit. */ | 235 | /* Update upper 16 bits of I/O base/limit. */ |
173 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); | 236 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); |
237 | } | ||
174 | 238 | ||
175 | /* Set up the top and bottom of the PCI Memory segment | 239 | static void pci_setup_bridge_mmio(struct pci_bus *bus) |
176 | for this bus. */ | 240 | { |
241 | struct pci_dev *bridge = bus->self; | ||
242 | struct resource *res; | ||
243 | struct pci_bus_region region; | ||
244 | u32 l; | ||
245 | |||
246 | /* Set up the top and bottom of the PCI Memory segment for this bus. */ | ||
177 | res = bus->resource[1]; | 247 | res = bus->resource[1]; |
178 | pcibios_resource_to_bus(bridge, ®ion, res); | 248 | pcibios_resource_to_bus(bridge, ®ion, res); |
179 | if (res->flags & IORESOURCE_MEM) { | 249 | if (res->flags & IORESOURCE_MEM) { |
180 | l = (region.start >> 16) & 0xfff0; | 250 | l = (region.start >> 16) & 0xfff0; |
181 | l |= region.end & 0xfff00000; | 251 | l |= region.end & 0xfff00000; |
182 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 252 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
183 | } | 253 | } else { |
184 | else { | ||
185 | l = 0x0000fff0; | 254 | l = 0x0000fff0; |
186 | dev_info(&bridge->dev, " bridge window [mem disabled]\n"); | 255 | dev_info(&bridge->dev, " bridge window [mem disabled]\n"); |
187 | } | 256 | } |
188 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); | 257 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); |
258 | } | ||
259 | |||
260 | static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) | ||
261 | { | ||
262 | struct pci_dev *bridge = bus->self; | ||
263 | struct resource *res; | ||
264 | struct pci_bus_region region; | ||
265 | u32 l, bu, lu; | ||
189 | 266 | ||
190 | /* Clear out the upper 32 bits of PREF limit. | 267 | /* Clear out the upper 32 bits of PREF limit. |
191 | If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily | 268 | If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily |
@@ -204,8 +281,7 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
204 | lu = upper_32_bits(region.end); | 281 | lu = upper_32_bits(region.end); |
205 | } | 282 | } |
206 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 283 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
207 | } | 284 | } else { |
208 | else { | ||
209 | l = 0x0000fff0; | 285 | l = 0x0000fff0; |
210 | dev_info(&bridge->dev, " bridge window [mem pref disabled]\n"); | 286 | dev_info(&bridge->dev, " bridge window [mem pref disabled]\n"); |
211 | } | 287 | } |
@@ -214,10 +290,35 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
214 | /* Set the upper 32 bits of PREF base & limit. */ | 290 | /* Set the upper 32 bits of PREF base & limit. */ |
215 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); | 291 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); |
216 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); | 292 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); |
293 | } | ||
294 | |||
295 | static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) | ||
296 | { | ||
297 | struct pci_dev *bridge = bus->self; | ||
298 | |||
299 | dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", | ||
300 | bus->secondary, bus->subordinate); | ||
301 | |||
302 | if (type & IORESOURCE_IO) | ||
303 | pci_setup_bridge_io(bus); | ||
304 | |||
305 | if (type & IORESOURCE_MEM) | ||
306 | pci_setup_bridge_mmio(bus); | ||
307 | |||
308 | if (type & IORESOURCE_PREFETCH) | ||
309 | pci_setup_bridge_mmio_pref(bus); | ||
217 | 310 | ||
218 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); | 311 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); |
219 | } | 312 | } |
220 | 313 | ||
314 | static void pci_setup_bridge(struct pci_bus *bus) | ||
315 | { | ||
316 | unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | | ||
317 | IORESOURCE_PREFETCH; | ||
318 | |||
319 | __pci_setup_bridge(bus, type); | ||
320 | } | ||
321 | |||
221 | /* Check whether the bridge supports optional I/O and | 322 | /* Check whether the bridge supports optional I/O and |
222 | prefetchable memory ranges. If not, the respective | 323 | prefetchable memory ranges. If not, the respective |
223 | base/limit registers must be read-only and read as 0. */ | 324 | base/limit registers must be read-only and read as 0. */ |
@@ -253,8 +354,11 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) | |||
253 | } | 354 | } |
254 | if (pmem) { | 355 | if (pmem) { |
255 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; | 356 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; |
256 | if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) | 357 | if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == |
358 | PCI_PREF_RANGE_TYPE_64) { | ||
257 | b_res[2].flags |= IORESOURCE_MEM_64; | 359 | b_res[2].flags |= IORESOURCE_MEM_64; |
360 | b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; | ||
361 | } | ||
258 | } | 362 | } |
259 | 363 | ||
260 | /* double check if bridge does support 64 bit pref */ | 364 | /* double check if bridge does support 64 bit pref */ |
@@ -283,8 +387,7 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon | |||
283 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | 387 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | |
284 | IORESOURCE_PREFETCH; | 388 | IORESOURCE_PREFETCH; |
285 | 389 | ||
286 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 390 | pci_bus_for_each_resource(bus, r, i) { |
287 | r = bus->resource[i]; | ||
288 | if (r == &ioport_resource || r == &iomem_resource) | 391 | if (r == &ioport_resource || r == &iomem_resource) |
289 | continue; | 392 | continue; |
290 | if (r && (r->flags & type_mask) == type && !r->parent) | 393 | if (r && (r->flags & type_mask) == type && !r->parent) |
@@ -301,7 +404,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size) | |||
301 | { | 404 | { |
302 | struct pci_dev *dev; | 405 | struct pci_dev *dev; |
303 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); | 406 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); |
304 | unsigned long size = 0, size1 = 0; | 407 | unsigned long size = 0, size1 = 0, old_size; |
305 | 408 | ||
306 | if (!b_res) | 409 | if (!b_res) |
307 | return; | 410 | return; |
@@ -326,12 +429,17 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size) | |||
326 | } | 429 | } |
327 | if (size < min_size) | 430 | if (size < min_size) |
328 | size = min_size; | 431 | size = min_size; |
432 | old_size = resource_size(b_res); | ||
433 | if (old_size == 1) | ||
434 | old_size = 0; | ||
329 | /* To be fixed in 2.5: we should have sort of HAVE_ISA | 435 | /* To be fixed in 2.5: we should have sort of HAVE_ISA |
330 | flag in the struct pci_bus. */ | 436 | flag in the struct pci_bus. */ |
331 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) | 437 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) |
332 | size = (size & 0xff) + ((size & ~0xffUL) << 2); | 438 | size = (size & 0xff) + ((size & ~0xffUL) << 2); |
333 | #endif | 439 | #endif |
334 | size = ALIGN(size + size1, 4096); | 440 | size = ALIGN(size + size1, 4096); |
441 | if (size < old_size) | ||
442 | size = old_size; | ||
335 | if (!size) { | 443 | if (!size) { |
336 | if (b_res->start || b_res->end) | 444 | if (b_res->start || b_res->end) |
337 | dev_info(&bus->self->dev, "disabling bridge window " | 445 | dev_info(&bus->self->dev, "disabling bridge window " |
@@ -352,7 +460,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
352 | unsigned long type, resource_size_t min_size) | 460 | unsigned long type, resource_size_t min_size) |
353 | { | 461 | { |
354 | struct pci_dev *dev; | 462 | struct pci_dev *dev; |
355 | resource_size_t min_align, align, size; | 463 | resource_size_t min_align, align, size, old_size; |
356 | resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ | 464 | resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ |
357 | int order, max_order; | 465 | int order, max_order; |
358 | struct resource *b_res = find_free_bus_resource(bus, type); | 466 | struct resource *b_res = find_free_bus_resource(bus, type); |
@@ -402,6 +510,11 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
402 | } | 510 | } |
403 | if (size < min_size) | 511 | if (size < min_size) |
404 | size = min_size; | 512 | size = min_size; |
513 | old_size = resource_size(b_res); | ||
514 | if (old_size == 1) | ||
515 | old_size = 0; | ||
516 | if (size < old_size) | ||
517 | size = old_size; | ||
405 | 518 | ||
406 | align = 0; | 519 | align = 0; |
407 | min_align = 0; | 520 | min_align = 0; |
@@ -538,23 +651,25 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
538 | } | 651 | } |
539 | EXPORT_SYMBOL(pci_bus_size_bridges); | 652 | EXPORT_SYMBOL(pci_bus_size_bridges); |
540 | 653 | ||
541 | void __ref pci_bus_assign_resources(const struct pci_bus *bus) | 654 | static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, |
655 | struct resource_list_x *fail_head) | ||
542 | { | 656 | { |
543 | struct pci_bus *b; | 657 | struct pci_bus *b; |
544 | struct pci_dev *dev; | 658 | struct pci_dev *dev; |
545 | 659 | ||
546 | pbus_assign_resources_sorted(bus); | 660 | pbus_assign_resources_sorted(bus, fail_head); |
547 | 661 | ||
548 | list_for_each_entry(dev, &bus->devices, bus_list) { | 662 | list_for_each_entry(dev, &bus->devices, bus_list) { |
549 | b = dev->subordinate; | 663 | b = dev->subordinate; |
550 | if (!b) | 664 | if (!b) |
551 | continue; | 665 | continue; |
552 | 666 | ||
553 | pci_bus_assign_resources(b); | 667 | __pci_bus_assign_resources(b, fail_head); |
554 | 668 | ||
555 | switch (dev->class >> 8) { | 669 | switch (dev->class >> 8) { |
556 | case PCI_CLASS_BRIDGE_PCI: | 670 | case PCI_CLASS_BRIDGE_PCI: |
557 | pci_setup_bridge(b); | 671 | if (!pci_is_enabled(dev)) |
672 | pci_setup_bridge(b); | ||
558 | break; | 673 | break; |
559 | 674 | ||
560 | case PCI_CLASS_BRIDGE_CARDBUS: | 675 | case PCI_CLASS_BRIDGE_CARDBUS: |
@@ -568,15 +683,130 @@ void __ref pci_bus_assign_resources(const struct pci_bus *bus) | |||
568 | } | 683 | } |
569 | } | 684 | } |
570 | } | 685 | } |
686 | |||
687 | void __ref pci_bus_assign_resources(const struct pci_bus *bus) | ||
688 | { | ||
689 | __pci_bus_assign_resources(bus, NULL); | ||
690 | } | ||
571 | EXPORT_SYMBOL(pci_bus_assign_resources); | 691 | EXPORT_SYMBOL(pci_bus_assign_resources); |
572 | 692 | ||
693 | static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge, | ||
694 | struct resource_list_x *fail_head) | ||
695 | { | ||
696 | struct pci_bus *b; | ||
697 | |||
698 | pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head); | ||
699 | |||
700 | b = bridge->subordinate; | ||
701 | if (!b) | ||
702 | return; | ||
703 | |||
704 | __pci_bus_assign_resources(b, fail_head); | ||
705 | |||
706 | switch (bridge->class >> 8) { | ||
707 | case PCI_CLASS_BRIDGE_PCI: | ||
708 | pci_setup_bridge(b); | ||
709 | break; | ||
710 | |||
711 | case PCI_CLASS_BRIDGE_CARDBUS: | ||
712 | pci_setup_cardbus(b); | ||
713 | break; | ||
714 | |||
715 | default: | ||
716 | dev_info(&bridge->dev, "not setting up bridge for bus " | ||
717 | "%04x:%02x\n", pci_domain_nr(b), b->number); | ||
718 | break; | ||
719 | } | ||
720 | } | ||
721 | static void pci_bridge_release_resources(struct pci_bus *bus, | ||
722 | unsigned long type) | ||
723 | { | ||
724 | int idx; | ||
725 | bool changed = false; | ||
726 | struct pci_dev *dev; | ||
727 | struct resource *r; | ||
728 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | ||
729 | IORESOURCE_PREFETCH; | ||
730 | |||
731 | dev = bus->self; | ||
732 | for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END; | ||
733 | idx++) { | ||
734 | r = &dev->resource[idx]; | ||
735 | if ((r->flags & type_mask) != type) | ||
736 | continue; | ||
737 | if (!r->parent) | ||
738 | continue; | ||
739 | /* | ||
740 | * if there are children under that, we should release them | ||
741 | * all | ||
742 | */ | ||
743 | release_child_resources(r); | ||
744 | if (!release_resource(r)) { | ||
745 | dev_printk(KERN_DEBUG, &dev->dev, | ||
746 | "resource %d %pR released\n", idx, r); | ||
747 | /* keep the old size */ | ||
748 | r->end = resource_size(r) - 1; | ||
749 | r->start = 0; | ||
750 | r->flags = 0; | ||
751 | changed = true; | ||
752 | } | ||
753 | } | ||
754 | |||
755 | if (changed) { | ||
756 | /* avoiding touch the one without PREF */ | ||
757 | if (type & IORESOURCE_PREFETCH) | ||
758 | type = IORESOURCE_PREFETCH; | ||
759 | __pci_setup_bridge(bus, type); | ||
760 | } | ||
761 | } | ||
762 | |||
763 | enum release_type { | ||
764 | leaf_only, | ||
765 | whole_subtree, | ||
766 | }; | ||
767 | /* | ||
768 | * try to release pci bridge resources that is from leaf bridge, | ||
769 | * so we can allocate big new one later | ||
770 | */ | ||
771 | static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus, | ||
772 | unsigned long type, | ||
773 | enum release_type rel_type) | ||
774 | { | ||
775 | struct pci_dev *dev; | ||
776 | bool is_leaf_bridge = true; | ||
777 | |||
778 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
779 | struct pci_bus *b = dev->subordinate; | ||
780 | if (!b) | ||
781 | continue; | ||
782 | |||
783 | is_leaf_bridge = false; | ||
784 | |||
785 | if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) | ||
786 | continue; | ||
787 | |||
788 | if (rel_type == whole_subtree) | ||
789 | pci_bus_release_bridge_resources(b, type, | ||
790 | whole_subtree); | ||
791 | } | ||
792 | |||
793 | if (pci_is_root_bus(bus)) | ||
794 | return; | ||
795 | |||
796 | if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI) | ||
797 | return; | ||
798 | |||
799 | if ((rel_type == whole_subtree) || is_leaf_bridge) | ||
800 | pci_bridge_release_resources(bus, type); | ||
801 | } | ||
802 | |||
573 | static void pci_bus_dump_res(struct pci_bus *bus) | 803 | static void pci_bus_dump_res(struct pci_bus *bus) |
574 | { | 804 | { |
575 | int i; | 805 | struct resource *res; |
806 | int i; | ||
576 | 807 | ||
577 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 808 | pci_bus_for_each_resource(bus, res, i) { |
578 | struct resource *res = bus->resource[i]; | 809 | if (!res || !res->end || !res->flags) |
579 | if (!res || !res->end) | ||
580 | continue; | 810 | continue; |
581 | 811 | ||
582 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); | 812 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); |
@@ -600,11 +830,65 @@ static void pci_bus_dump_resources(struct pci_bus *bus) | |||
600 | } | 830 | } |
601 | } | 831 | } |
602 | 832 | ||
833 | static int __init pci_bus_get_depth(struct pci_bus *bus) | ||
834 | { | ||
835 | int depth = 0; | ||
836 | struct pci_dev *dev; | ||
837 | |||
838 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
839 | int ret; | ||
840 | struct pci_bus *b = dev->subordinate; | ||
841 | if (!b) | ||
842 | continue; | ||
843 | |||
844 | ret = pci_bus_get_depth(b); | ||
845 | if (ret + 1 > depth) | ||
846 | depth = ret + 1; | ||
847 | } | ||
848 | |||
849 | return depth; | ||
850 | } | ||
851 | static int __init pci_get_max_depth(void) | ||
852 | { | ||
853 | int depth = 0; | ||
854 | struct pci_bus *bus; | ||
855 | |||
856 | list_for_each_entry(bus, &pci_root_buses, node) { | ||
857 | int ret; | ||
858 | |||
859 | ret = pci_bus_get_depth(bus); | ||
860 | if (ret > depth) | ||
861 | depth = ret; | ||
862 | } | ||
863 | |||
864 | return depth; | ||
865 | } | ||
866 | |||
867 | /* | ||
868 | * first try will not touch pci bridge res | ||
869 | * second and later try will clear small leaf bridge res | ||
870 | * will stop till to the max deepth if can not find good one | ||
871 | */ | ||
603 | void __init | 872 | void __init |
604 | pci_assign_unassigned_resources(void) | 873 | pci_assign_unassigned_resources(void) |
605 | { | 874 | { |
606 | struct pci_bus *bus; | 875 | struct pci_bus *bus; |
876 | int tried_times = 0; | ||
877 | enum release_type rel_type = leaf_only; | ||
878 | struct resource_list_x head, *list; | ||
879 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | ||
880 | IORESOURCE_PREFETCH; | ||
881 | unsigned long failed_type; | ||
882 | int max_depth = pci_get_max_depth(); | ||
883 | int pci_try_num; | ||
607 | 884 | ||
885 | head.next = NULL; | ||
886 | |||
887 | pci_try_num = max_depth + 1; | ||
888 | printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", | ||
889 | max_depth, pci_try_num); | ||
890 | |||
891 | again: | ||
608 | /* Depth first, calculate sizes and alignments of all | 892 | /* Depth first, calculate sizes and alignments of all |
609 | subordinate buses. */ | 893 | subordinate buses. */ |
610 | list_for_each_entry(bus, &pci_root_buses, node) { | 894 | list_for_each_entry(bus, &pci_root_buses, node) { |
@@ -612,12 +896,130 @@ pci_assign_unassigned_resources(void) | |||
612 | } | 896 | } |
613 | /* Depth last, allocate resources and update the hardware. */ | 897 | /* Depth last, allocate resources and update the hardware. */ |
614 | list_for_each_entry(bus, &pci_root_buses, node) { | 898 | list_for_each_entry(bus, &pci_root_buses, node) { |
615 | pci_bus_assign_resources(bus); | 899 | __pci_bus_assign_resources(bus, &head); |
616 | pci_enable_bridges(bus); | ||
617 | } | 900 | } |
901 | tried_times++; | ||
902 | |||
903 | /* any device complain? */ | ||
904 | if (!head.next) | ||
905 | goto enable_and_dump; | ||
906 | failed_type = 0; | ||
907 | for (list = head.next; list;) { | ||
908 | failed_type |= list->flags; | ||
909 | list = list->next; | ||
910 | } | ||
911 | /* | ||
912 | * io port are tight, don't try extra | ||
913 | * or if reach the limit, don't want to try more | ||
914 | */ | ||
915 | failed_type &= type_mask; | ||
916 | if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) { | ||
917 | free_failed_list(&head); | ||
918 | goto enable_and_dump; | ||
919 | } | ||
920 | |||
921 | printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", | ||
922 | tried_times + 1); | ||
923 | |||
924 | /* third times and later will not check if it is leaf */ | ||
925 | if ((tried_times + 1) > 2) | ||
926 | rel_type = whole_subtree; | ||
927 | |||
928 | /* | ||
929 | * Try to release leaf bridge's resources that doesn't fit resource of | ||
930 | * child device under that bridge | ||
931 | */ | ||
932 | for (list = head.next; list;) { | ||
933 | bus = list->dev->bus; | ||
934 | pci_bus_release_bridge_resources(bus, list->flags & type_mask, | ||
935 | rel_type); | ||
936 | list = list->next; | ||
937 | } | ||
938 | /* restore size and flags */ | ||
939 | for (list = head.next; list;) { | ||
940 | struct resource *res = list->res; | ||
941 | |||
942 | res->start = list->start; | ||
943 | res->end = list->end; | ||
944 | res->flags = list->flags; | ||
945 | if (list->dev->subordinate) | ||
946 | res->flags = 0; | ||
947 | |||
948 | list = list->next; | ||
949 | } | ||
950 | free_failed_list(&head); | ||
951 | |||
952 | goto again; | ||
953 | |||
954 | enable_and_dump: | ||
955 | /* Depth last, update the hardware. */ | ||
956 | list_for_each_entry(bus, &pci_root_buses, node) | ||
957 | pci_enable_bridges(bus); | ||
618 | 958 | ||
619 | /* dump the resource on buses */ | 959 | /* dump the resource on buses */ |
620 | list_for_each_entry(bus, &pci_root_buses, node) { | 960 | list_for_each_entry(bus, &pci_root_buses, node) { |
621 | pci_bus_dump_resources(bus); | 961 | pci_bus_dump_resources(bus); |
622 | } | 962 | } |
623 | } | 963 | } |
964 | |||
965 | void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) | ||
966 | { | ||
967 | struct pci_bus *parent = bridge->subordinate; | ||
968 | int tried_times = 0; | ||
969 | struct resource_list_x head, *list; | ||
970 | int retval; | ||
971 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | ||
972 | IORESOURCE_PREFETCH; | ||
973 | |||
974 | head.next = NULL; | ||
975 | |||
976 | again: | ||
977 | pci_bus_size_bridges(parent); | ||
978 | __pci_bridge_assign_resources(bridge, &head); | ||
979 | retval = pci_reenable_device(bridge); | ||
980 | pci_set_master(bridge); | ||
981 | pci_enable_bridges(parent); | ||
982 | |||
983 | tried_times++; | ||
984 | |||
985 | if (!head.next) | ||
986 | return; | ||
987 | |||
988 | if (tried_times >= 2) { | ||
989 | /* still fail, don't need to try more */ | ||
990 | free_failed_list(&head); | ||
991 | return; | ||
992 | } | ||
993 | |||
994 | printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", | ||
995 | tried_times + 1); | ||
996 | |||
997 | /* | ||
998 | * Try to release leaf bridge's resources that doesn't fit resource of | ||
999 | * child device under that bridge | ||
1000 | */ | ||
1001 | for (list = head.next; list;) { | ||
1002 | struct pci_bus *bus = list->dev->bus; | ||
1003 | unsigned long flags = list->flags; | ||
1004 | |||
1005 | pci_bus_release_bridge_resources(bus, flags & type_mask, | ||
1006 | whole_subtree); | ||
1007 | list = list->next; | ||
1008 | } | ||
1009 | /* restore size and flags */ | ||
1010 | for (list = head.next; list;) { | ||
1011 | struct resource *res = list->res; | ||
1012 | |||
1013 | res->start = list->start; | ||
1014 | res->end = list->end; | ||
1015 | res->flags = list->flags; | ||
1016 | if (list->dev->subordinate) | ||
1017 | res->flags = 0; | ||
1018 | |||
1019 | list = list->next; | ||
1020 | } | ||
1021 | free_failed_list(&head); | ||
1022 | |||
1023 | goto again; | ||
1024 | } | ||
1025 | EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources); | ||
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 8c02b6c53bdb..49c9e6c9779a 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c | |||
@@ -47,6 +47,55 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf) | |||
47 | slot->number); | 47 | slot->number); |
48 | } | 48 | } |
49 | 49 | ||
50 | /* these strings match up with the values in pci_bus_speed */ | ||
51 | static char *pci_bus_speed_strings[] = { | ||
52 | "33 MHz PCI", /* 0x00 */ | ||
53 | "66 MHz PCI", /* 0x01 */ | ||
54 | "66 MHz PCI-X", /* 0x02 */ | ||
55 | "100 MHz PCI-X", /* 0x03 */ | ||
56 | "133 MHz PCI-X", /* 0x04 */ | ||
57 | NULL, /* 0x05 */ | ||
58 | NULL, /* 0x06 */ | ||
59 | NULL, /* 0x07 */ | ||
60 | NULL, /* 0x08 */ | ||
61 | "66 MHz PCI-X 266", /* 0x09 */ | ||
62 | "100 MHz PCI-X 266", /* 0x0a */ | ||
63 | "133 MHz PCI-X 266", /* 0x0b */ | ||
64 | "Unknown AGP", /* 0x0c */ | ||
65 | "1x AGP", /* 0x0d */ | ||
66 | "2x AGP", /* 0x0e */ | ||
67 | "4x AGP", /* 0x0f */ | ||
68 | "8x AGP", /* 0x10 */ | ||
69 | "66 MHz PCI-X 533", /* 0x11 */ | ||
70 | "100 MHz PCI-X 533", /* 0x12 */ | ||
71 | "133 MHz PCI-X 533", /* 0x13 */ | ||
72 | "2.5 GT/s PCIe", /* 0x14 */ | ||
73 | "5.0 GT/s PCIe", /* 0x15 */ | ||
74 | "8.0 GT/s PCIe", /* 0x16 */ | ||
75 | }; | ||
76 | |||
77 | static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf) | ||
78 | { | ||
79 | const char *speed_string; | ||
80 | |||
81 | if (speed < ARRAY_SIZE(pci_bus_speed_strings)) | ||
82 | speed_string = pci_bus_speed_strings[speed]; | ||
83 | else | ||
84 | speed_string = "Unknown"; | ||
85 | |||
86 | return sprintf(buf, "%s\n", speed_string); | ||
87 | } | ||
88 | |||
89 | static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf) | ||
90 | { | ||
91 | return bus_speed_read(slot->bus->max_bus_speed, buf); | ||
92 | } | ||
93 | |||
94 | static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf) | ||
95 | { | ||
96 | return bus_speed_read(slot->bus->cur_bus_speed, buf); | ||
97 | } | ||
98 | |||
50 | static void pci_slot_release(struct kobject *kobj) | 99 | static void pci_slot_release(struct kobject *kobj) |
51 | { | 100 | { |
52 | struct pci_dev *dev; | 101 | struct pci_dev *dev; |
@@ -66,9 +115,15 @@ static void pci_slot_release(struct kobject *kobj) | |||
66 | 115 | ||
67 | static struct pci_slot_attribute pci_slot_attr_address = | 116 | static struct pci_slot_attribute pci_slot_attr_address = |
68 | __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL); | 117 | __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL); |
118 | static struct pci_slot_attribute pci_slot_attr_max_speed = | ||
119 | __ATTR(max_bus_speed, (S_IFREG | S_IRUGO), max_speed_read_file, NULL); | ||
120 | static struct pci_slot_attribute pci_slot_attr_cur_speed = | ||
121 | __ATTR(cur_bus_speed, (S_IFREG | S_IRUGO), cur_speed_read_file, NULL); | ||
69 | 122 | ||
70 | static struct attribute *pci_slot_default_attrs[] = { | 123 | static struct attribute *pci_slot_default_attrs[] = { |
71 | &pci_slot_attr_address.attr, | 124 | &pci_slot_attr_address.attr, |
125 | &pci_slot_attr_max_speed.attr, | ||
126 | &pci_slot_attr_cur_speed.attr, | ||
72 | NULL, | 127 | NULL, |
73 | }; | 128 | }; |
74 | 129 | ||
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c index 52db17263d8b..f8401a0ef89b 100644 --- a/drivers/pcmcia/rsrc_mgr.c +++ b/drivers/pcmcia/rsrc_mgr.c | |||
@@ -114,22 +114,21 @@ struct pcmcia_align_data { | |||
114 | unsigned long offset; | 114 | unsigned long offset; |
115 | }; | 115 | }; |
116 | 116 | ||
117 | static void pcmcia_align(void *align_data, struct resource *res, | 117 | static resource_size_t pcmcia_align(void *align_data, |
118 | unsigned long size, unsigned long align) | 118 | const struct resource *res, |
119 | resource_size_t size, resource_size_t align) | ||
119 | { | 120 | { |
120 | struct pcmcia_align_data *data = align_data; | 121 | struct pcmcia_align_data *data = align_data; |
121 | unsigned long start; | 122 | resource_size_t start; |
122 | 123 | ||
123 | start = (res->start & ~data->mask) + data->offset; | 124 | start = (res->start & ~data->mask) + data->offset; |
124 | if (start < res->start) | 125 | if (start < res->start) |
125 | start += data->mask + 1; | 126 | start += data->mask + 1; |
126 | res->start = start; | ||
127 | 127 | ||
128 | #ifdef CONFIG_X86 | 128 | #ifdef CONFIG_X86 |
129 | if (res->flags & IORESOURCE_IO) { | 129 | if (res->flags & IORESOURCE_IO) { |
130 | if (start & 0x300) { | 130 | if (start & 0x300) { |
131 | start = (start + 0x3ff) & ~0x3ff; | 131 | start = (start + 0x3ff) & ~0x3ff; |
132 | res->start = start; | ||
133 | } | 132 | } |
134 | } | 133 | } |
135 | #endif | 134 | #endif |
@@ -137,9 +136,11 @@ static void pcmcia_align(void *align_data, struct resource *res, | |||
137 | #ifdef CONFIG_M68K | 136 | #ifdef CONFIG_M68K |
138 | if (res->flags & IORESOURCE_IO) { | 137 | if (res->flags & IORESOURCE_IO) { |
139 | if ((res->start + size - 1) >= 1024) | 138 | if ((res->start + size - 1) >= 1024) |
140 | res->start = res->end; | 139 | start = res->end; |
141 | } | 140 | } |
142 | #endif | 141 | #endif |
142 | |||
143 | return start; | ||
143 | } | 144 | } |
144 | 145 | ||
145 | 146 | ||
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 9b0dc433a8c3..c67638fe6914 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c | |||
@@ -533,8 +533,8 @@ struct pcmcia_align_data { | |||
533 | struct resource_map *map; | 533 | struct resource_map *map; |
534 | }; | 534 | }; |
535 | 535 | ||
536 | static void | 536 | static resource_size_t |
537 | pcmcia_common_align(void *align_data, struct resource *res, | 537 | pcmcia_common_align(void *align_data, const struct resource *res, |
538 | resource_size_t size, resource_size_t align) | 538 | resource_size_t size, resource_size_t align) |
539 | { | 539 | { |
540 | struct pcmcia_align_data *data = align_data; | 540 | struct pcmcia_align_data *data = align_data; |
@@ -545,17 +545,18 @@ pcmcia_common_align(void *align_data, struct resource *res, | |||
545 | start = (res->start & ~data->mask) + data->offset; | 545 | start = (res->start & ~data->mask) + data->offset; |
546 | if (start < res->start) | 546 | if (start < res->start) |
547 | start += data->mask + 1; | 547 | start += data->mask + 1; |
548 | res->start = start; | 548 | return start; |
549 | } | 549 | } |
550 | 550 | ||
551 | static void | 551 | static resource_size_t |
552 | pcmcia_align(void *align_data, struct resource *res, resource_size_t size, | 552 | pcmcia_align(void *align_data, const struct resource *res, |
553 | resource_size_t align) | 553 | resource_size_t size, resource_size_t align) |
554 | { | 554 | { |
555 | struct pcmcia_align_data *data = align_data; | 555 | struct pcmcia_align_data *data = align_data; |
556 | struct resource_map *m; | 556 | struct resource_map *m; |
557 | resource_size_t start; | ||
557 | 558 | ||
558 | pcmcia_common_align(data, res, size, align); | 559 | start = pcmcia_common_align(data, res, size, align); |
559 | 560 | ||
560 | for (m = data->map->next; m != data->map; m = m->next) { | 561 | for (m = data->map->next; m != data->map; m = m->next) { |
561 | unsigned long start = m->base; | 562 | unsigned long start = m->base; |
@@ -567,8 +568,7 @@ pcmcia_align(void *align_data, struct resource *res, resource_size_t size, | |||
567 | * fit here. | 568 | * fit here. |
568 | */ | 569 | */ |
569 | if (res->start < start) { | 570 | if (res->start < start) { |
570 | res->start = start; | 571 | start = pcmcia_common_align(data, res, size, align); |
571 | pcmcia_common_align(data, res, size, align); | ||
572 | } | 572 | } |
573 | 573 | ||
574 | /* | 574 | /* |
@@ -586,7 +586,9 @@ pcmcia_align(void *align_data, struct resource *res, resource_size_t size, | |||
586 | * If we failed to find something suitable, ensure we fail. | 586 | * If we failed to find something suitable, ensure we fail. |
587 | */ | 587 | */ |
588 | if (m == data->map) | 588 | if (m == data->map) |
589 | res->start = res->end; | 589 | start = res->end; |
590 | |||
591 | return start; | ||
590 | } | 592 | } |
591 | 593 | ||
592 | /* | 594 | /* |
@@ -801,8 +803,7 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s) | |||
801 | return -EINVAL; | 803 | return -EINVAL; |
802 | #endif | 804 | #endif |
803 | 805 | ||
804 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 806 | pci_bus_for_each_resource(s->cb_dev->bus, res, i) { |
805 | res = s->cb_dev->bus->resource[i]; | ||
806 | if (!res) | 807 | if (!res) |
807 | continue; | 808 | continue; |
808 | 809 | ||
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index e4d12acdd525..1f2039d5e966 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c | |||
@@ -649,9 +649,10 @@ static int yenta_search_one_res(struct resource *root, struct resource *res, | |||
649 | static int yenta_search_res(struct yenta_socket *socket, struct resource *res, | 649 | static int yenta_search_res(struct yenta_socket *socket, struct resource *res, |
650 | u32 min) | 650 | u32 min) |
651 | { | 651 | { |
652 | struct resource *root; | ||
652 | int i; | 653 | int i; |
653 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 654 | |
654 | struct resource *root = socket->dev->bus->resource[i]; | 655 | pci_bus_for_each_resource(socket->dev->bus, root, i) { |
655 | if (!root) | 656 | if (!root) |
656 | continue; | 657 | continue; |
657 | 658 | ||
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 07d14dfdf0b4..226b3e93498c 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c | |||
@@ -934,7 +934,7 @@ static int __devinit acer_backlight_init(struct device *dev) | |||
934 | acer_backlight_device = bd; | 934 | acer_backlight_device = bd; |
935 | 935 | ||
936 | bd->props.power = FB_BLANK_UNBLANK; | 936 | bd->props.power = FB_BLANK_UNBLANK; |
937 | bd->props.brightness = max_brightness; | 937 | bd->props.brightness = read_brightness(bd); |
938 | bd->props.max_brightness = max_brightness; | 938 | bd->props.max_brightness = max_brightness; |
939 | backlight_update_status(bd); | 939 | backlight_update_status(bd); |
940 | return 0; | 940 | return 0; |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index e67e4feb35cb..eb603f1d55ca 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -5771,7 +5771,7 @@ static void thermal_exit(void) | |||
5771 | case TPACPI_THERMAL_ACPI_TMP07: | 5771 | case TPACPI_THERMAL_ACPI_TMP07: |
5772 | case TPACPI_THERMAL_ACPI_UPDT: | 5772 | case TPACPI_THERMAL_ACPI_UPDT: |
5773 | sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, | 5773 | sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, |
5774 | &thermal_temp_input16_group); | 5774 | &thermal_temp_input8_group); |
5775 | break; | 5775 | break; |
5776 | case TPACPI_THERMAL_NONE: | 5776 | case TPACPI_THERMAL_NONE: |
5777 | default: | 5777 | default: |
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c index 75ac19b1192f..fc2f676e984d 100644 --- a/drivers/sbus/char/openprom.c +++ b/drivers/sbus/char/openprom.c | |||
@@ -233,7 +233,7 @@ static int opromnext(void __user *argp, unsigned int cmd, struct device_node *dp | |||
233 | 233 | ||
234 | ph = 0; | 234 | ph = 0; |
235 | if (dp) | 235 | if (dp) |
236 | ph = dp->node; | 236 | ph = dp->phandle; |
237 | 237 | ||
238 | data->current_node = dp; | 238 | data->current_node = dp; |
239 | *((int *) op->oprom_array) = ph; | 239 | *((int *) op->oprom_array) = ph; |
@@ -256,7 +256,7 @@ static int oprompci2node(void __user *argp, struct device_node *dp, struct openp | |||
256 | 256 | ||
257 | dp = pci_device_to_OF_node(pdev); | 257 | dp = pci_device_to_OF_node(pdev); |
258 | data->current_node = dp; | 258 | data->current_node = dp; |
259 | *((int *)op->oprom_array) = dp->node; | 259 | *((int *)op->oprom_array) = dp->phandle; |
260 | op->oprom_size = sizeof(int); | 260 | op->oprom_size = sizeof(int); |
261 | err = copyout(argp, op, bufsize + sizeof(int)); | 261 | err = copyout(argp, op, bufsize + sizeof(int)); |
262 | 262 | ||
@@ -273,7 +273,7 @@ static int oprompath2node(void __user *argp, struct device_node *dp, struct open | |||
273 | 273 | ||
274 | dp = of_find_node_by_path(op->oprom_array); | 274 | dp = of_find_node_by_path(op->oprom_array); |
275 | if (dp) | 275 | if (dp) |
276 | ph = dp->node; | 276 | ph = dp->phandle; |
277 | data->current_node = dp; | 277 | data->current_node = dp; |
278 | *((int *)op->oprom_array) = ph; | 278 | *((int *)op->oprom_array) = ph; |
279 | op->oprom_size = sizeof(int); | 279 | op->oprom_size = sizeof(int); |
@@ -540,7 +540,7 @@ static int opiocgetnext(unsigned int cmd, void __user *argp) | |||
540 | } | 540 | } |
541 | } | 541 | } |
542 | if (dp) | 542 | if (dp) |
543 | nd = dp->node; | 543 | nd = dp->phandle; |
544 | if (copy_to_user(argp, &nd, sizeof(phandle))) | 544 | if (copy_to_user(argp, &nd, sizeof(phandle))) |
545 | return -EFAULT; | 545 | return -EFAULT; |
546 | 546 | ||
@@ -570,7 +570,7 @@ static int openprom_bsd_ioctl(struct inode * inode, struct file * file, | |||
570 | case OPIOCGETOPTNODE: | 570 | case OPIOCGETOPTNODE: |
571 | BUILD_BUG_ON(sizeof(phandle) != sizeof(int)); | 571 | BUILD_BUG_ON(sizeof(phandle) != sizeof(int)); |
572 | 572 | ||
573 | if (copy_to_user(argp, &options_node->node, sizeof(phandle))) | 573 | if (copy_to_user(argp, &options_node->phandle, sizeof(phandle))) |
574 | return -EFAULT; | 574 | return -EFAULT; |
575 | 575 | ||
576 | return 0; | 576 | return 0; |
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 477542602284..9e71ac611146 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c | |||
@@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt) | |||
2516 | if (info->scsi.phase == PHASE_IDLE) | 2516 | if (info->scsi.phase == PHASE_IDLE) |
2517 | fas216_kick(info); | 2517 | fas216_kick(info); |
2518 | 2518 | ||
2519 | mod_timer(&info->eh_timer, 30 * HZ); | 2519 | mod_timer(&info->eh_timer, jiffies + 30 * HZ); |
2520 | spin_unlock_irqrestore(&info->host_lock, flags); | 2520 | spin_unlock_irqrestore(&info->host_lock, flags); |
2521 | 2521 | ||
2522 | /* | 2522 | /* |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 10be9f36a4cc..2f47ae7cce91 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -2009,6 +2009,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) | |||
2009 | fcoe_interface_cleanup(fcoe); | 2009 | fcoe_interface_cleanup(fcoe); |
2010 | rtnl_unlock(); | 2010 | rtnl_unlock(); |
2011 | fcoe_if_destroy(fcoe->ctlr.lp); | 2011 | fcoe_if_destroy(fcoe->ctlr.lp); |
2012 | module_put(THIS_MODULE); | ||
2013 | |||
2012 | out_putdev: | 2014 | out_putdev: |
2013 | dev_put(netdev); | 2015 | dev_put(netdev); |
2014 | out_nodev: | 2016 | out_nodev: |
@@ -2059,6 +2061,11 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) | |||
2059 | } | 2061 | } |
2060 | #endif | 2062 | #endif |
2061 | 2063 | ||
2064 | if (!try_module_get(THIS_MODULE)) { | ||
2065 | rc = -EINVAL; | ||
2066 | goto out_nomod; | ||
2067 | } | ||
2068 | |||
2062 | rtnl_lock(); | 2069 | rtnl_lock(); |
2063 | netdev = fcoe_if_to_netdev(buffer); | 2070 | netdev = fcoe_if_to_netdev(buffer); |
2064 | if (!netdev) { | 2071 | if (!netdev) { |
@@ -2099,17 +2106,24 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) | |||
2099 | if (!fcoe_link_ok(lport)) | 2106 | if (!fcoe_link_ok(lport)) |
2100 | fcoe_ctlr_link_up(&fcoe->ctlr); | 2107 | fcoe_ctlr_link_up(&fcoe->ctlr); |
2101 | 2108 | ||
2102 | rc = 0; | ||
2103 | out_free: | ||
2104 | /* | 2109 | /* |
2105 | * Release from init in fcoe_interface_create(), on success lport | 2110 | * Release from init in fcoe_interface_create(), on success lport |
2106 | * should be holding a reference taken in fcoe_if_create(). | 2111 | * should be holding a reference taken in fcoe_if_create(). |
2107 | */ | 2112 | */ |
2108 | fcoe_interface_put(fcoe); | 2113 | fcoe_interface_put(fcoe); |
2114 | dev_put(netdev); | ||
2115 | rtnl_unlock(); | ||
2116 | mutex_unlock(&fcoe_config_mutex); | ||
2117 | |||
2118 | return 0; | ||
2119 | out_free: | ||
2120 | fcoe_interface_put(fcoe); | ||
2109 | out_putdev: | 2121 | out_putdev: |
2110 | dev_put(netdev); | 2122 | dev_put(netdev); |
2111 | out_nodev: | 2123 | out_nodev: |
2112 | rtnl_unlock(); | 2124 | rtnl_unlock(); |
2125 | module_put(THIS_MODULE); | ||
2126 | out_nomod: | ||
2113 | mutex_unlock(&fcoe_config_mutex); | 2127 | mutex_unlock(&fcoe_config_mutex); |
2114 | return rc; | 2128 | return rc; |
2115 | } | 2129 | } |
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 9823291395ad..511cb6b371ee 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c | |||
@@ -1187,7 +1187,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) | |||
1187 | next_timer = fip->ctlr_ka_time; | 1187 | next_timer = fip->ctlr_ka_time; |
1188 | 1188 | ||
1189 | if (time_after_eq(jiffies, fip->port_ka_time)) { | 1189 | if (time_after_eq(jiffies, fip->port_ka_time)) { |
1190 | fip->port_ka_time += jiffies + | 1190 | fip->port_ka_time = jiffies + |
1191 | msecs_to_jiffies(FIP_VN_KA_PERIOD); | 1191 | msecs_to_jiffies(FIP_VN_KA_PERIOD); |
1192 | fip->send_port_ka = 1; | 1192 | fip->send_port_ka = 1; |
1193 | } | 1193 | } |
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 19d711cb938c..7f4364770e4a 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c | |||
@@ -1890,7 +1890,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, | |||
1890 | fc_exch_setup_hdr(ep, fp, ep->f_ctl); | 1890 | fc_exch_setup_hdr(ep, fp, ep->f_ctl); |
1891 | sp->cnt++; | 1891 | sp->cnt++; |
1892 | 1892 | ||
1893 | if (ep->xid <= lport->lro_xid) | 1893 | if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) |
1894 | fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); | 1894 | fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); |
1895 | 1895 | ||
1896 | if (unlikely(lport->tt.frame_send(lport, fp))) | 1896 | if (unlikely(lport->tt.frame_send(lport, fp))) |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 881d5dfe8c74..6fde2fabfd9b 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -298,9 +298,6 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) | |||
298 | { | 298 | { |
299 | struct fc_lport *lport; | 299 | struct fc_lport *lport; |
300 | 300 | ||
301 | if (!fsp) | ||
302 | return; | ||
303 | |||
304 | lport = fsp->lp; | 301 | lport = fsp->lp; |
305 | if ((fsp->req_flags & FC_SRB_READ) && | 302 | if ((fsp->req_flags & FC_SRB_READ) && |
306 | (lport->lro_enabled) && (lport->tt.ddp_setup)) { | 303 | (lport->lro_enabled) && (lport->tt.ddp_setup)) { |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 0b165024a219..7ec8ce75007c 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -1800,7 +1800,8 @@ int fc_lport_bsg_request(struct fc_bsg_job *job) | |||
1800 | u32 did; | 1800 | u32 did; |
1801 | 1801 | ||
1802 | job->reply->reply_payload_rcv_len = 0; | 1802 | job->reply->reply_payload_rcv_len = 0; |
1803 | rsp->resid_len = job->reply_payload.payload_len; | 1803 | if (rsp) |
1804 | rsp->resid_len = job->reply_payload.payload_len; | ||
1804 | 1805 | ||
1805 | mutex_lock(&lport->lp_mutex); | 1806 | mutex_lock(&lport->lp_mutex); |
1806 | 1807 | ||
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 02300523b234..97923bb07765 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -623,7 +623,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
623 | 623 | ||
624 | tov = ntohl(plp->fl_csp.sp_e_d_tov); | 624 | tov = ntohl(plp->fl_csp.sp_e_d_tov); |
625 | if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) | 625 | if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) |
626 | tov /= 1000; | 626 | tov /= 1000000; |
627 | if (tov > rdata->e_d_tov) | 627 | if (tov > rdata->e_d_tov) |
628 | rdata->e_d_tov = tov; | 628 | rdata->e_d_tov = tov; |
629 | csp_seq = ntohs(plp->fl_csp.sp_tot_seq); | 629 | csp_seq = ntohs(plp->fl_csp.sp_tot_seq); |
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index db6856c138fc..4ad87fd74ddd 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c | |||
@@ -992,12 +992,10 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) | |||
992 | if (r2t == NULL) { | 992 | if (r2t == NULL) { |
993 | if (kfifo_out(&tcp_task->r2tqueue, | 993 | if (kfifo_out(&tcp_task->r2tqueue, |
994 | (void *)&tcp_task->r2t, sizeof(void *)) != | 994 | (void *)&tcp_task->r2t, sizeof(void *)) != |
995 | sizeof(void *)) { | 995 | sizeof(void *)) |
996 | WARN_ONCE(1, "unexpected fifo state"); | ||
997 | r2t = NULL; | 996 | r2t = NULL; |
998 | } | 997 | else |
999 | 998 | r2t = tcp_task->r2t; | |
1000 | r2t = tcp_task->r2t; | ||
1001 | } | 999 | } |
1002 | spin_unlock_bh(&session->lock); | 1000 | spin_unlock_bh(&session->lock); |
1003 | } | 1001 | } |
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 708ea3157b60..d9b8ca5116bc 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c | |||
@@ -3781,6 +3781,7 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) | |||
3781 | compat_alloc_user_space(sizeof(struct megasas_iocpacket)); | 3781 | compat_alloc_user_space(sizeof(struct megasas_iocpacket)); |
3782 | int i; | 3782 | int i; |
3783 | int error = 0; | 3783 | int error = 0; |
3784 | compat_uptr_t ptr; | ||
3784 | 3785 | ||
3785 | if (clear_user(ioc, sizeof(*ioc))) | 3786 | if (clear_user(ioc, sizeof(*ioc))) |
3786 | return -EFAULT; | 3787 | return -EFAULT; |
@@ -3793,9 +3794,22 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) | |||
3793 | copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) | 3794 | copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) |
3794 | return -EFAULT; | 3795 | return -EFAULT; |
3795 | 3796 | ||
3796 | for (i = 0; i < MAX_IOCTL_SGE; i++) { | 3797 | /* |
3797 | compat_uptr_t ptr; | 3798 | * The sense_ptr is used in megasas_mgmt_fw_ioctl only when |
3799 | * sense_len is not null, so prepare the 64bit value under | ||
3800 | * the same condition. | ||
3801 | */ | ||
3802 | if (ioc->sense_len) { | ||
3803 | void __user **sense_ioc_ptr = | ||
3804 | (void __user **)(ioc->frame.raw + ioc->sense_off); | ||
3805 | compat_uptr_t *sense_cioc_ptr = | ||
3806 | (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off); | ||
3807 | if (get_user(ptr, sense_cioc_ptr) || | ||
3808 | put_user(compat_ptr(ptr), sense_ioc_ptr)) | ||
3809 | return -EFAULT; | ||
3810 | } | ||
3798 | 3811 | ||
3812 | for (i = 0; i < MAX_IOCTL_SGE; i++) { | ||
3799 | if (get_user(ptr, &cioc->sgl[i].iov_base) || | 3813 | if (get_user(ptr, &cioc->sgl[i].iov_base) || |
3800 | put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || | 3814 | put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || |
3801 | copy_in_user(&ioc->sgl[i].iov_len, | 3815 | copy_in_user(&ioc->sgl[i].iov_len, |
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index c3e37c8e7e26..e9b15c3746fa 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c | |||
@@ -83,6 +83,9 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */ | |||
83 | 83 | ||
84 | #define PASS_LIMIT 256 | 84 | #define PASS_LIMIT 256 |
85 | 85 | ||
86 | #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) | ||
87 | |||
88 | |||
86 | /* | 89 | /* |
87 | * We default to IRQ0 for the "no irq" hack. Some | 90 | * We default to IRQ0 for the "no irq" hack. Some |
88 | * machine types want others as well - they're free | 91 | * machine types want others as well - they're free |
@@ -1792,7 +1795,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port) | |||
1792 | up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; | 1795 | up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; |
1793 | spin_unlock_irqrestore(&up->port.lock, flags); | 1796 | spin_unlock_irqrestore(&up->port.lock, flags); |
1794 | 1797 | ||
1795 | return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; | 1798 | return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; |
1796 | } | 1799 | } |
1797 | 1800 | ||
1798 | static unsigned int serial8250_get_mctrl(struct uart_port *port) | 1801 | static unsigned int serial8250_get_mctrl(struct uart_port *port) |
@@ -1850,8 +1853,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state) | |||
1850 | spin_unlock_irqrestore(&up->port.lock, flags); | 1853 | spin_unlock_irqrestore(&up->port.lock, flags); |
1851 | } | 1854 | } |
1852 | 1855 | ||
1853 | #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) | ||
1854 | |||
1855 | /* | 1856 | /* |
1856 | * Wait for transmitter & holding register to empty | 1857 | * Wait for transmitter & holding register to empty |
1857 | */ | 1858 | */ |
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c index 683e66f18e8c..3e2ae4807ae2 100644 --- a/drivers/serial/pmac_zilog.c +++ b/drivers/serial/pmac_zilog.c | |||
@@ -2031,9 +2031,9 @@ static int __init pmz_console_setup(struct console *co, char *options) | |||
2031 | /* | 2031 | /* |
2032 | * XServe's default to 57600 bps | 2032 | * XServe's default to 57600 bps |
2033 | */ | 2033 | */ |
2034 | if (machine_is_compatible("RackMac1,1") | 2034 | if (of_machine_is_compatible("RackMac1,1") |
2035 | || machine_is_compatible("RackMac1,2") | 2035 | || of_machine_is_compatible("RackMac1,2") |
2036 | || machine_is_compatible("MacRISC4")) | 2036 | || of_machine_is_compatible("MacRISC4")) |
2037 | baud = 57600; | 2037 | baud = 57600; |
2038 | 2038 | ||
2039 | /* | 2039 | /* |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f55eb0107336..0fee95cd9a49 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -100,6 +100,23 @@ config SPI_BUTTERFLY | |||
100 | inexpensive battery powered microcontroller evaluation board. | 100 | inexpensive battery powered microcontroller evaluation board. |
101 | This same cable can be used to flash new firmware. | 101 | This same cable can be used to flash new firmware. |
102 | 102 | ||
103 | config SPI_COLDFIRE_QSPI | ||
104 | tristate "Freescale Coldfire QSPI controller" | ||
105 | depends on (M520x || M523x || M5249 || M527x || M528x || M532x) | ||
106 | help | ||
107 | This enables support for the Coldfire QSPI controller in master | ||
108 | mode. | ||
109 | |||
110 | This driver can also be built as a module. If so, the module | ||
111 | will be called coldfire_qspi. | ||
112 | |||
113 | config SPI_DAVINCI | ||
114 | tristate "SPI controller driver for DaVinci/DA8xx SoC's" | ||
115 | depends on SPI_MASTER && ARCH_DAVINCI | ||
116 | select SPI_BITBANG | ||
117 | help | ||
118 | SPI master controller for DaVinci and DA8xx SPI modules. | ||
119 | |||
103 | config SPI_GPIO | 120 | config SPI_GPIO |
104 | tristate "GPIO-based bitbanging SPI Master" | 121 | tristate "GPIO-based bitbanging SPI Master" |
105 | depends on GENERIC_GPIO | 122 | depends on GENERIC_GPIO |
@@ -308,7 +325,7 @@ config SPI_NUC900 | |||
308 | # | 325 | # |
309 | 326 | ||
310 | config SPI_DESIGNWARE | 327 | config SPI_DESIGNWARE |
311 | bool "DesignWare SPI controller core support" | 328 | tristate "DesignWare SPI controller core support" |
312 | depends on SPI_MASTER | 329 | depends on SPI_MASTER |
313 | help | 330 | help |
314 | general driver for SPI controller core from DesignWare | 331 | general driver for SPI controller core from DesignWare |
@@ -317,6 +334,10 @@ config SPI_DW_PCI | |||
317 | tristate "PCI interface driver for DW SPI core" | 334 | tristate "PCI interface driver for DW SPI core" |
318 | depends on SPI_DESIGNWARE && PCI | 335 | depends on SPI_DESIGNWARE && PCI |
319 | 336 | ||
337 | config SPI_DW_MMIO | ||
338 | tristate "Memory-mapped io interface driver for DW SPI core" | ||
339 | depends on SPI_DESIGNWARE && HAVE_CLK | ||
340 | |||
320 | # | 341 | # |
321 | # There are lots of SPI device types, with sensors and memory | 342 | # There are lots of SPI device types, with sensors and memory |
322 | # being probably the most widely used ones. | 343 | # being probably the most widely used ones. |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index f3d2810ba11c..d7d0f89b797b 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -16,8 +16,11 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o | |||
16 | obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o | 16 | obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o |
17 | obj-$(CONFIG_SPI_AU1550) += au1550_spi.o | 17 | obj-$(CONFIG_SPI_AU1550) += au1550_spi.o |
18 | obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o | 18 | obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o |
19 | obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o | ||
20 | obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o | ||
19 | obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o | 21 | obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o |
20 | obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o | 22 | obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o |
23 | obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o | ||
21 | obj-$(CONFIG_SPI_GPIO) += spi_gpio.o | 24 | obj-$(CONFIG_SPI_GPIO) += spi_gpio.o |
22 | obj-$(CONFIG_SPI_IMX) += spi_imx.o | 25 | obj-$(CONFIG_SPI_IMX) += spi_imx.o |
23 | obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o | 26 | obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o |
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c new file mode 100644 index 000000000000..59be3efe0636 --- /dev/null +++ b/drivers/spi/coldfire_qspi.c | |||
@@ -0,0 +1,640 @@ | |||
1 | /* | ||
2 | * Freescale/Motorola Coldfire Queued SPI driver | ||
3 | * | ||
4 | * Copyright 2010 Steven King <sfking@fdwdc.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/workqueue.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/io.h> | ||
30 | #include <linux/clk.h> | ||
31 | #include <linux/err.h> | ||
32 | #include <linux/spi/spi.h> | ||
33 | |||
34 | #include <asm/coldfire.h> | ||
35 | #include <asm/mcfqspi.h> | ||
36 | |||
37 | #define DRIVER_NAME "mcfqspi" | ||
38 | |||
39 | #define MCFQSPI_BUSCLK (MCF_BUSCLK / 2) | ||
40 | |||
41 | #define MCFQSPI_QMR 0x00 | ||
42 | #define MCFQSPI_QMR_MSTR 0x8000 | ||
43 | #define MCFQSPI_QMR_CPOL 0x0200 | ||
44 | #define MCFQSPI_QMR_CPHA 0x0100 | ||
45 | #define MCFQSPI_QDLYR 0x04 | ||
46 | #define MCFQSPI_QDLYR_SPE 0x8000 | ||
47 | #define MCFQSPI_QWR 0x08 | ||
48 | #define MCFQSPI_QWR_HALT 0x8000 | ||
49 | #define MCFQSPI_QWR_WREN 0x4000 | ||
50 | #define MCFQSPI_QWR_CSIV 0x1000 | ||
51 | #define MCFQSPI_QIR 0x0C | ||
52 | #define MCFQSPI_QIR_WCEFB 0x8000 | ||
53 | #define MCFQSPI_QIR_ABRTB 0x4000 | ||
54 | #define MCFQSPI_QIR_ABRTL 0x1000 | ||
55 | #define MCFQSPI_QIR_WCEFE 0x0800 | ||
56 | #define MCFQSPI_QIR_ABRTE 0x0400 | ||
57 | #define MCFQSPI_QIR_SPIFE 0x0100 | ||
58 | #define MCFQSPI_QIR_WCEF 0x0008 | ||
59 | #define MCFQSPI_QIR_ABRT 0x0004 | ||
60 | #define MCFQSPI_QIR_SPIF 0x0001 | ||
61 | #define MCFQSPI_QAR 0x010 | ||
62 | #define MCFQSPI_QAR_TXBUF 0x00 | ||
63 | #define MCFQSPI_QAR_RXBUF 0x10 | ||
64 | #define MCFQSPI_QAR_CMDBUF 0x20 | ||
65 | #define MCFQSPI_QDR 0x014 | ||
66 | #define MCFQSPI_QCR 0x014 | ||
67 | #define MCFQSPI_QCR_CONT 0x8000 | ||
68 | #define MCFQSPI_QCR_BITSE 0x4000 | ||
69 | #define MCFQSPI_QCR_DT 0x2000 | ||
70 | |||
71 | struct mcfqspi { | ||
72 | void __iomem *iobase; | ||
73 | int irq; | ||
74 | struct clk *clk; | ||
75 | struct mcfqspi_cs_control *cs_control; | ||
76 | |||
77 | wait_queue_head_t waitq; | ||
78 | |||
79 | struct work_struct work; | ||
80 | struct workqueue_struct *workq; | ||
81 | spinlock_t lock; | ||
82 | struct list_head msgq; | ||
83 | }; | ||
84 | |||
85 | static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val) | ||
86 | { | ||
87 | writew(val, mcfqspi->iobase + MCFQSPI_QMR); | ||
88 | } | ||
89 | |||
90 | static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val) | ||
91 | { | ||
92 | writew(val, mcfqspi->iobase + MCFQSPI_QDLYR); | ||
93 | } | ||
94 | |||
95 | static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi) | ||
96 | { | ||
97 | return readw(mcfqspi->iobase + MCFQSPI_QDLYR); | ||
98 | } | ||
99 | |||
100 | static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val) | ||
101 | { | ||
102 | writew(val, mcfqspi->iobase + MCFQSPI_QWR); | ||
103 | } | ||
104 | |||
105 | static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val) | ||
106 | { | ||
107 | writew(val, mcfqspi->iobase + MCFQSPI_QIR); | ||
108 | } | ||
109 | |||
110 | static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val) | ||
111 | { | ||
112 | writew(val, mcfqspi->iobase + MCFQSPI_QAR); | ||
113 | } | ||
114 | |||
115 | static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val) | ||
116 | { | ||
117 | writew(val, mcfqspi->iobase + MCFQSPI_QDR); | ||
118 | } | ||
119 | |||
120 | static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi) | ||
121 | { | ||
122 | return readw(mcfqspi->iobase + MCFQSPI_QDR); | ||
123 | } | ||
124 | |||
125 | static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select, | ||
126 | bool cs_high) | ||
127 | { | ||
128 | mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high); | ||
129 | } | ||
130 | |||
131 | static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select, | ||
132 | bool cs_high) | ||
133 | { | ||
134 | mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high); | ||
135 | } | ||
136 | |||
137 | static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi) | ||
138 | { | ||
139 | return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ? | ||
140 | mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0; | ||
141 | } | ||
142 | |||
143 | static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi) | ||
144 | { | ||
145 | if (mcfqspi->cs_control && mcfqspi->cs_control->teardown) | ||
146 | mcfqspi->cs_control->teardown(mcfqspi->cs_control); | ||
147 | } | ||
148 | |||
149 | static u8 mcfqspi_qmr_baud(u32 speed_hz) | ||
150 | { | ||
151 | return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u); | ||
152 | } | ||
153 | |||
154 | static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi) | ||
155 | { | ||
156 | return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE; | ||
157 | } | ||
158 | |||
159 | static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id) | ||
160 | { | ||
161 | struct mcfqspi *mcfqspi = dev_id; | ||
162 | |||
163 | /* clear interrupt */ | ||
164 | mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF); | ||
165 | wake_up(&mcfqspi->waitq); | ||
166 | |||
167 | return IRQ_HANDLED; | ||
168 | } | ||
169 | |||
170 | static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count, | ||
171 | const u8 *txbuf, u8 *rxbuf) | ||
172 | { | ||
173 | unsigned i, n, offset = 0; | ||
174 | |||
175 | n = min(count, 16u); | ||
176 | |||
177 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); | ||
178 | for (i = 0; i < n; ++i) | ||
179 | mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); | ||
180 | |||
181 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); | ||
182 | if (txbuf) | ||
183 | for (i = 0; i < n; ++i) | ||
184 | mcfqspi_wr_qdr(mcfqspi, *txbuf++); | ||
185 | else | ||
186 | for (i = 0; i < count; ++i) | ||
187 | mcfqspi_wr_qdr(mcfqspi, 0); | ||
188 | |||
189 | count -= n; | ||
190 | if (count) { | ||
191 | u16 qwr = 0xf08; | ||
192 | mcfqspi_wr_qwr(mcfqspi, 0x700); | ||
193 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
194 | |||
195 | do { | ||
196 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
197 | mcfqspi_wr_qwr(mcfqspi, qwr); | ||
198 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
199 | if (rxbuf) { | ||
200 | mcfqspi_wr_qar(mcfqspi, | ||
201 | MCFQSPI_QAR_RXBUF + offset); | ||
202 | for (i = 0; i < 8; ++i) | ||
203 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
204 | } | ||
205 | n = min(count, 8u); | ||
206 | if (txbuf) { | ||
207 | mcfqspi_wr_qar(mcfqspi, | ||
208 | MCFQSPI_QAR_TXBUF + offset); | ||
209 | for (i = 0; i < n; ++i) | ||
210 | mcfqspi_wr_qdr(mcfqspi, *txbuf++); | ||
211 | } | ||
212 | qwr = (offset ? 0x808 : 0) + ((n - 1) << 8); | ||
213 | offset ^= 8; | ||
214 | count -= n; | ||
215 | } while (count); | ||
216 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
217 | mcfqspi_wr_qwr(mcfqspi, qwr); | ||
218 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
219 | if (rxbuf) { | ||
220 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); | ||
221 | for (i = 0; i < 8; ++i) | ||
222 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
223 | offset ^= 8; | ||
224 | } | ||
225 | } else { | ||
226 | mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); | ||
227 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
228 | } | ||
229 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
230 | if (rxbuf) { | ||
231 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); | ||
232 | for (i = 0; i < n; ++i) | ||
233 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count, | ||
238 | const u16 *txbuf, u16 *rxbuf) | ||
239 | { | ||
240 | unsigned i, n, offset = 0; | ||
241 | |||
242 | n = min(count, 16u); | ||
243 | |||
244 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); | ||
245 | for (i = 0; i < n; ++i) | ||
246 | mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); | ||
247 | |||
248 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); | ||
249 | if (txbuf) | ||
250 | for (i = 0; i < n; ++i) | ||
251 | mcfqspi_wr_qdr(mcfqspi, *txbuf++); | ||
252 | else | ||
253 | for (i = 0; i < count; ++i) | ||
254 | mcfqspi_wr_qdr(mcfqspi, 0); | ||
255 | |||
256 | count -= n; | ||
257 | if (count) { | ||
258 | u16 qwr = 0xf08; | ||
259 | mcfqspi_wr_qwr(mcfqspi, 0x700); | ||
260 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
261 | |||
262 | do { | ||
263 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
264 | mcfqspi_wr_qwr(mcfqspi, qwr); | ||
265 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
266 | if (rxbuf) { | ||
267 | mcfqspi_wr_qar(mcfqspi, | ||
268 | MCFQSPI_QAR_RXBUF + offset); | ||
269 | for (i = 0; i < 8; ++i) | ||
270 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
271 | } | ||
272 | n = min(count, 8u); | ||
273 | if (txbuf) { | ||
274 | mcfqspi_wr_qar(mcfqspi, | ||
275 | MCFQSPI_QAR_TXBUF + offset); | ||
276 | for (i = 0; i < n; ++i) | ||
277 | mcfqspi_wr_qdr(mcfqspi, *txbuf++); | ||
278 | } | ||
279 | qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8); | ||
280 | offset ^= 8; | ||
281 | count -= n; | ||
282 | } while (count); | ||
283 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
284 | mcfqspi_wr_qwr(mcfqspi, qwr); | ||
285 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
286 | if (rxbuf) { | ||
287 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); | ||
288 | for (i = 0; i < 8; ++i) | ||
289 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
290 | offset ^= 8; | ||
291 | } | ||
292 | } else { | ||
293 | mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); | ||
294 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
295 | } | ||
296 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
297 | if (rxbuf) { | ||
298 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); | ||
299 | for (i = 0; i < n; ++i) | ||
300 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | static void mcfqspi_work(struct work_struct *work) | ||
305 | { | ||
306 | struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work); | ||
307 | unsigned long flags; | ||
308 | |||
309 | spin_lock_irqsave(&mcfqspi->lock, flags); | ||
310 | while (!list_empty(&mcfqspi->msgq)) { | ||
311 | struct spi_message *msg; | ||
312 | struct spi_device *spi; | ||
313 | struct spi_transfer *xfer; | ||
314 | int status = 0; | ||
315 | |||
316 | msg = container_of(mcfqspi->msgq.next, struct spi_message, | ||
317 | queue); | ||
318 | |||
319 | list_del_init(&mcfqspi->msgq); | ||
320 | spin_unlock_irqrestore(&mcfqspi->lock, flags); | ||
321 | |||
322 | spi = msg->spi; | ||
323 | |||
324 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
325 | bool cs_high = spi->mode & SPI_CS_HIGH; | ||
326 | u16 qmr = MCFQSPI_QMR_MSTR; | ||
327 | |||
328 | if (xfer->bits_per_word) | ||
329 | qmr |= xfer->bits_per_word << 10; | ||
330 | else | ||
331 | qmr |= spi->bits_per_word << 10; | ||
332 | if (spi->mode & SPI_CPHA) | ||
333 | qmr |= MCFQSPI_QMR_CPHA; | ||
334 | if (spi->mode & SPI_CPOL) | ||
335 | qmr |= MCFQSPI_QMR_CPOL; | ||
336 | if (xfer->speed_hz) | ||
337 | qmr |= mcfqspi_qmr_baud(xfer->speed_hz); | ||
338 | else | ||
339 | qmr |= mcfqspi_qmr_baud(spi->max_speed_hz); | ||
340 | mcfqspi_wr_qmr(mcfqspi, qmr); | ||
341 | |||
342 | mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high); | ||
343 | |||
344 | mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE); | ||
345 | if ((xfer->bits_per_word ? xfer->bits_per_word : | ||
346 | spi->bits_per_word) == 8) | ||
347 | mcfqspi_transfer_msg8(mcfqspi, xfer->len, | ||
348 | xfer->tx_buf, | ||
349 | xfer->rx_buf); | ||
350 | else | ||
351 | mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2, | ||
352 | xfer->tx_buf, | ||
353 | xfer->rx_buf); | ||
354 | mcfqspi_wr_qir(mcfqspi, 0); | ||
355 | |||
356 | if (xfer->delay_usecs) | ||
357 | udelay(xfer->delay_usecs); | ||
358 | if (xfer->cs_change) { | ||
359 | if (!list_is_last(&xfer->transfer_list, | ||
360 | &msg->transfers)) | ||
361 | mcfqspi_cs_deselect(mcfqspi, | ||
362 | spi->chip_select, | ||
363 | cs_high); | ||
364 | } else { | ||
365 | if (list_is_last(&xfer->transfer_list, | ||
366 | &msg->transfers)) | ||
367 | mcfqspi_cs_deselect(mcfqspi, | ||
368 | spi->chip_select, | ||
369 | cs_high); | ||
370 | } | ||
371 | msg->actual_length += xfer->len; | ||
372 | } | ||
373 | msg->status = status; | ||
374 | msg->complete(msg->context); | ||
375 | |||
376 | spin_lock_irqsave(&mcfqspi->lock, flags); | ||
377 | } | ||
378 | spin_unlock_irqrestore(&mcfqspi->lock, flags); | ||
379 | } | ||
380 | |||
381 | static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg) | ||
382 | { | ||
383 | struct mcfqspi *mcfqspi; | ||
384 | struct spi_transfer *xfer; | ||
385 | unsigned long flags; | ||
386 | |||
387 | mcfqspi = spi_master_get_devdata(spi->master); | ||
388 | |||
389 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
390 | if (xfer->bits_per_word && ((xfer->bits_per_word < 8) | ||
391 | || (xfer->bits_per_word > 16))) { | ||
392 | dev_dbg(&spi->dev, | ||
393 | "%d bits per word is not supported\n", | ||
394 | xfer->bits_per_word); | ||
395 | goto fail; | ||
396 | } | ||
397 | if (xfer->speed_hz) { | ||
398 | u32 real_speed = MCFQSPI_BUSCLK / | ||
399 | mcfqspi_qmr_baud(xfer->speed_hz); | ||
400 | if (real_speed != xfer->speed_hz) | ||
401 | dev_dbg(&spi->dev, | ||
402 | "using speed %d instead of %d\n", | ||
403 | real_speed, xfer->speed_hz); | ||
404 | } | ||
405 | } | ||
406 | msg->status = -EINPROGRESS; | ||
407 | msg->actual_length = 0; | ||
408 | |||
409 | spin_lock_irqsave(&mcfqspi->lock, flags); | ||
410 | list_add_tail(&msg->queue, &mcfqspi->msgq); | ||
411 | queue_work(mcfqspi->workq, &mcfqspi->work); | ||
412 | spin_unlock_irqrestore(&mcfqspi->lock, flags); | ||
413 | |||
414 | return 0; | ||
415 | fail: | ||
416 | msg->status = -EINVAL; | ||
417 | return -EINVAL; | ||
418 | } | ||
419 | |||
420 | static int mcfqspi_setup(struct spi_device *spi) | ||
421 | { | ||
422 | if ((spi->bits_per_word < 8) || (spi->bits_per_word > 16)) { | ||
423 | dev_dbg(&spi->dev, "%d bits per word is not supported\n", | ||
424 | spi->bits_per_word); | ||
425 | return -EINVAL; | ||
426 | } | ||
427 | if (spi->chip_select >= spi->master->num_chipselect) { | ||
428 | dev_dbg(&spi->dev, "%d chip select is out of range\n", | ||
429 | spi->chip_select); | ||
430 | return -EINVAL; | ||
431 | } | ||
432 | |||
433 | mcfqspi_cs_deselect(spi_master_get_devdata(spi->master), | ||
434 | spi->chip_select, spi->mode & SPI_CS_HIGH); | ||
435 | |||
436 | dev_dbg(&spi->dev, | ||
437 | "bits per word %d, chip select %d, speed %d KHz\n", | ||
438 | spi->bits_per_word, spi->chip_select, | ||
439 | (MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz)) | ||
440 | / 1000); | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | static int __devinit mcfqspi_probe(struct platform_device *pdev) | ||
446 | { | ||
447 | struct spi_master *master; | ||
448 | struct mcfqspi *mcfqspi; | ||
449 | struct resource *res; | ||
450 | struct mcfqspi_platform_data *pdata; | ||
451 | int status; | ||
452 | |||
453 | master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi)); | ||
454 | if (master == NULL) { | ||
455 | dev_dbg(&pdev->dev, "spi_alloc_master failed\n"); | ||
456 | return -ENOMEM; | ||
457 | } | ||
458 | |||
459 | mcfqspi = spi_master_get_devdata(master); | ||
460 | |||
461 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
462 | if (!res) { | ||
463 | dev_dbg(&pdev->dev, "platform_get_resource failed\n"); | ||
464 | status = -ENXIO; | ||
465 | goto fail0; | ||
466 | } | ||
467 | |||
468 | if (!request_mem_region(res->start, resource_size(res), pdev->name)) { | ||
469 | dev_dbg(&pdev->dev, "request_mem_region failed\n"); | ||
470 | status = -EBUSY; | ||
471 | goto fail0; | ||
472 | } | ||
473 | |||
474 | mcfqspi->iobase = ioremap(res->start, resource_size(res)); | ||
475 | if (!mcfqspi->iobase) { | ||
476 | dev_dbg(&pdev->dev, "ioremap failed\n"); | ||
477 | status = -ENOMEM; | ||
478 | goto fail1; | ||
479 | } | ||
480 | |||
481 | mcfqspi->irq = platform_get_irq(pdev, 0); | ||
482 | if (mcfqspi->irq < 0) { | ||
483 | dev_dbg(&pdev->dev, "platform_get_irq failed\n"); | ||
484 | status = -ENXIO; | ||
485 | goto fail2; | ||
486 | } | ||
487 | |||
488 | status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, IRQF_DISABLED, | ||
489 | pdev->name, mcfqspi); | ||
490 | if (status) { | ||
491 | dev_dbg(&pdev->dev, "request_irq failed\n"); | ||
492 | goto fail2; | ||
493 | } | ||
494 | |||
495 | mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk"); | ||
496 | if (IS_ERR(mcfqspi->clk)) { | ||
497 | dev_dbg(&pdev->dev, "clk_get failed\n"); | ||
498 | status = PTR_ERR(mcfqspi->clk); | ||
499 | goto fail3; | ||
500 | } | ||
501 | clk_enable(mcfqspi->clk); | ||
502 | |||
503 | mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent)); | ||
504 | if (!mcfqspi->workq) { | ||
505 | dev_dbg(&pdev->dev, "create_workqueue failed\n"); | ||
506 | status = -ENOMEM; | ||
507 | goto fail4; | ||
508 | } | ||
509 | INIT_WORK(&mcfqspi->work, mcfqspi_work); | ||
510 | spin_lock_init(&mcfqspi->lock); | ||
511 | INIT_LIST_HEAD(&mcfqspi->msgq); | ||
512 | init_waitqueue_head(&mcfqspi->waitq); | ||
513 | |||
514 | pdata = pdev->dev.platform_data; | ||
515 | if (!pdata) { | ||
516 | dev_dbg(&pdev->dev, "platform data is missing\n"); | ||
517 | goto fail5; | ||
518 | } | ||
519 | master->bus_num = pdata->bus_num; | ||
520 | master->num_chipselect = pdata->num_chipselect; | ||
521 | |||
522 | mcfqspi->cs_control = pdata->cs_control; | ||
523 | status = mcfqspi_cs_setup(mcfqspi); | ||
524 | if (status) { | ||
525 | dev_dbg(&pdev->dev, "error initializing cs_control\n"); | ||
526 | goto fail5; | ||
527 | } | ||
528 | |||
529 | master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; | ||
530 | master->setup = mcfqspi_setup; | ||
531 | master->transfer = mcfqspi_transfer; | ||
532 | |||
533 | platform_set_drvdata(pdev, master); | ||
534 | |||
535 | status = spi_register_master(master); | ||
536 | if (status) { | ||
537 | dev_dbg(&pdev->dev, "spi_register_master failed\n"); | ||
538 | goto fail6; | ||
539 | } | ||
540 | dev_info(&pdev->dev, "Coldfire QSPI bus driver\n"); | ||
541 | |||
542 | return 0; | ||
543 | |||
544 | fail6: | ||
545 | mcfqspi_cs_teardown(mcfqspi); | ||
546 | fail5: | ||
547 | destroy_workqueue(mcfqspi->workq); | ||
548 | fail4: | ||
549 | clk_disable(mcfqspi->clk); | ||
550 | clk_put(mcfqspi->clk); | ||
551 | fail3: | ||
552 | free_irq(mcfqspi->irq, mcfqspi); | ||
553 | fail2: | ||
554 | iounmap(mcfqspi->iobase); | ||
555 | fail1: | ||
556 | release_mem_region(res->start, resource_size(res)); | ||
557 | fail0: | ||
558 | spi_master_put(master); | ||
559 | |||
560 | dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n"); | ||
561 | |||
562 | return status; | ||
563 | } | ||
564 | |||
565 | static int __devexit mcfqspi_remove(struct platform_device *pdev) | ||
566 | { | ||
567 | struct spi_master *master = platform_get_drvdata(pdev); | ||
568 | struct mcfqspi *mcfqspi = spi_master_get_devdata(master); | ||
569 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
570 | |||
571 | /* disable the hardware (set the baud rate to 0) */ | ||
572 | mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR); | ||
573 | |||
574 | platform_set_drvdata(pdev, NULL); | ||
575 | mcfqspi_cs_teardown(mcfqspi); | ||
576 | destroy_workqueue(mcfqspi->workq); | ||
577 | clk_disable(mcfqspi->clk); | ||
578 | clk_put(mcfqspi->clk); | ||
579 | free_irq(mcfqspi->irq, mcfqspi); | ||
580 | iounmap(mcfqspi->iobase); | ||
581 | release_mem_region(res->start, resource_size(res)); | ||
582 | spi_unregister_master(master); | ||
583 | spi_master_put(master); | ||
584 | |||
585 | return 0; | ||
586 | } | ||
587 | |||
588 | #ifdef CONFIG_PM | ||
589 | |||
590 | static int mcfqspi_suspend(struct device *dev) | ||
591 | { | ||
592 | struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); | ||
593 | |||
594 | clk_disable(mcfqspi->clk); | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static int mcfqspi_resume(struct device *dev) | ||
600 | { | ||
601 | struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); | ||
602 | |||
603 | clk_enable(mcfqspi->clk); | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | static struct dev_pm_ops mcfqspi_dev_pm_ops = { | ||
609 | .suspend = mcfqspi_suspend, | ||
610 | .resume = mcfqspi_resume, | ||
611 | }; | ||
612 | |||
613 | #define MCFQSPI_DEV_PM_OPS (&mcfqspi_dev_pm_ops) | ||
614 | #else | ||
615 | #define MCFQSPI_DEV_PM_OPS NULL | ||
616 | #endif | ||
617 | |||
618 | static struct platform_driver mcfqspi_driver = { | ||
619 | .driver.name = DRIVER_NAME, | ||
620 | .driver.owner = THIS_MODULE, | ||
621 | .driver.pm = MCFQSPI_DEV_PM_OPS, | ||
622 | .remove = __devexit_p(mcfqspi_remove), | ||
623 | }; | ||
624 | |||
625 | static int __init mcfqspi_init(void) | ||
626 | { | ||
627 | return platform_driver_probe(&mcfqspi_driver, mcfqspi_probe); | ||
628 | } | ||
629 | module_init(mcfqspi_init); | ||
630 | |||
631 | static void __exit mcfqspi_exit(void) | ||
632 | { | ||
633 | platform_driver_unregister(&mcfqspi_driver); | ||
634 | } | ||
635 | module_exit(mcfqspi_exit); | ||
636 | |||
637 | MODULE_AUTHOR("Steven King <sfking@fdwdc.com>"); | ||
638 | MODULE_DESCRIPTION("Coldfire QSPI Controller Driver"); | ||
639 | MODULE_LICENSE("GPL"); | ||
640 | MODULE_ALIAS("platform:" DRIVER_NAME); | ||
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c new file mode 100644 index 000000000000..225ab60b02c4 --- /dev/null +++ b/drivers/spi/davinci_spi.c | |||
@@ -0,0 +1,1255 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Texas Instruments. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/gpio.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/clk.h> | ||
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/spi/spi.h> | ||
29 | #include <linux/spi/spi_bitbang.h> | ||
30 | |||
31 | #include <mach/spi.h> | ||
32 | #include <mach/edma.h> | ||
33 | |||
34 | #define SPI_NO_RESOURCE ((resource_size_t)-1) | ||
35 | |||
36 | #define SPI_MAX_CHIPSELECT 2 | ||
37 | |||
38 | #define CS_DEFAULT 0xFF | ||
39 | |||
40 | #define SPI_BUFSIZ (SMP_CACHE_BYTES + 1) | ||
41 | #define DAVINCI_DMA_DATA_TYPE_S8 0x01 | ||
42 | #define DAVINCI_DMA_DATA_TYPE_S16 0x02 | ||
43 | #define DAVINCI_DMA_DATA_TYPE_S32 0x04 | ||
44 | |||
45 | #define SPIFMT_PHASE_MASK BIT(16) | ||
46 | #define SPIFMT_POLARITY_MASK BIT(17) | ||
47 | #define SPIFMT_DISTIMER_MASK BIT(18) | ||
48 | #define SPIFMT_SHIFTDIR_MASK BIT(20) | ||
49 | #define SPIFMT_WAITENA_MASK BIT(21) | ||
50 | #define SPIFMT_PARITYENA_MASK BIT(22) | ||
51 | #define SPIFMT_ODD_PARITY_MASK BIT(23) | ||
52 | #define SPIFMT_WDELAY_MASK 0x3f000000u | ||
53 | #define SPIFMT_WDELAY_SHIFT 24 | ||
54 | #define SPIFMT_CHARLEN_MASK 0x0000001Fu | ||
55 | |||
56 | /* SPIGCR1 */ | ||
57 | #define SPIGCR1_SPIENA_MASK 0x01000000u | ||
58 | |||
59 | /* SPIPC0 */ | ||
60 | #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ | ||
61 | #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ | ||
62 | #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ | ||
63 | #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ | ||
64 | #define SPIPC0_EN1FUN_MASK BIT(1) | ||
65 | #define SPIPC0_EN0FUN_MASK BIT(0) | ||
66 | |||
67 | #define SPIINT_MASKALL 0x0101035F | ||
68 | #define SPI_INTLVL_1 0x000001FFu | ||
69 | #define SPI_INTLVL_0 0x00000000u | ||
70 | |||
71 | /* SPIDAT1 */ | ||
72 | #define SPIDAT1_CSHOLD_SHIFT 28 | ||
73 | #define SPIDAT1_CSNR_SHIFT 16 | ||
74 | #define SPIGCR1_CLKMOD_MASK BIT(1) | ||
75 | #define SPIGCR1_MASTER_MASK BIT(0) | ||
76 | #define SPIGCR1_LOOPBACK_MASK BIT(16) | ||
77 | |||
78 | /* SPIBUF */ | ||
79 | #define SPIBUF_TXFULL_MASK BIT(29) | ||
80 | #define SPIBUF_RXEMPTY_MASK BIT(31) | ||
81 | |||
82 | /* Error Masks */ | ||
83 | #define SPIFLG_DLEN_ERR_MASK BIT(0) | ||
84 | #define SPIFLG_TIMEOUT_MASK BIT(1) | ||
85 | #define SPIFLG_PARERR_MASK BIT(2) | ||
86 | #define SPIFLG_DESYNC_MASK BIT(3) | ||
87 | #define SPIFLG_BITERR_MASK BIT(4) | ||
88 | #define SPIFLG_OVRRUN_MASK BIT(6) | ||
89 | #define SPIFLG_RX_INTR_MASK BIT(8) | ||
90 | #define SPIFLG_TX_INTR_MASK BIT(9) | ||
91 | #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) | ||
92 | #define SPIFLG_MASK (SPIFLG_DLEN_ERR_MASK \ | ||
93 | | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ | ||
94 | | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ | ||
95 | | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \ | ||
96 | | SPIFLG_TX_INTR_MASK \ | ||
97 | | SPIFLG_BUF_INIT_ACTIVE_MASK) | ||
98 | |||
99 | #define SPIINT_DLEN_ERR_INTR BIT(0) | ||
100 | #define SPIINT_TIMEOUT_INTR BIT(1) | ||
101 | #define SPIINT_PARERR_INTR BIT(2) | ||
102 | #define SPIINT_DESYNC_INTR BIT(3) | ||
103 | #define SPIINT_BITERR_INTR BIT(4) | ||
104 | #define SPIINT_OVRRUN_INTR BIT(6) | ||
105 | #define SPIINT_RX_INTR BIT(8) | ||
106 | #define SPIINT_TX_INTR BIT(9) | ||
107 | #define SPIINT_DMA_REQ_EN BIT(16) | ||
108 | #define SPIINT_ENABLE_HIGHZ BIT(24) | ||
109 | |||
110 | #define SPI_T2CDELAY_SHIFT 16 | ||
111 | #define SPI_C2TDELAY_SHIFT 24 | ||
112 | |||
113 | /* SPI Controller registers */ | ||
114 | #define SPIGCR0 0x00 | ||
115 | #define SPIGCR1 0x04 | ||
116 | #define SPIINT 0x08 | ||
117 | #define SPILVL 0x0c | ||
118 | #define SPIFLG 0x10 | ||
119 | #define SPIPC0 0x14 | ||
120 | #define SPIPC1 0x18 | ||
121 | #define SPIPC2 0x1c | ||
122 | #define SPIPC3 0x20 | ||
123 | #define SPIPC4 0x24 | ||
124 | #define SPIPC5 0x28 | ||
125 | #define SPIPC6 0x2c | ||
126 | #define SPIPC7 0x30 | ||
127 | #define SPIPC8 0x34 | ||
128 | #define SPIDAT0 0x38 | ||
129 | #define SPIDAT1 0x3c | ||
130 | #define SPIBUF 0x40 | ||
131 | #define SPIEMU 0x44 | ||
132 | #define SPIDELAY 0x48 | ||
133 | #define SPIDEF 0x4c | ||
134 | #define SPIFMT0 0x50 | ||
135 | #define SPIFMT1 0x54 | ||
136 | #define SPIFMT2 0x58 | ||
137 | #define SPIFMT3 0x5c | ||
138 | #define TGINTVEC0 0x60 | ||
139 | #define TGINTVEC1 0x64 | ||
140 | |||
141 | struct davinci_spi_slave { | ||
142 | u32 cmd_to_write; | ||
143 | u32 clk_ctrl_to_write; | ||
144 | u32 bytes_per_word; | ||
145 | u8 active_cs; | ||
146 | }; | ||
147 | |||
148 | /* We have 2 DMA channels per CS, one for RX and one for TX */ | ||
149 | struct davinci_spi_dma { | ||
150 | int dma_tx_channel; | ||
151 | int dma_rx_channel; | ||
152 | int dma_tx_sync_dev; | ||
153 | int dma_rx_sync_dev; | ||
154 | enum dma_event_q eventq; | ||
155 | |||
156 | struct completion dma_tx_completion; | ||
157 | struct completion dma_rx_completion; | ||
158 | }; | ||
159 | |||
160 | /* SPI Controller driver's private data. */ | ||
161 | struct davinci_spi { | ||
162 | struct spi_bitbang bitbang; | ||
163 | struct clk *clk; | ||
164 | |||
165 | u8 version; | ||
166 | resource_size_t pbase; | ||
167 | void __iomem *base; | ||
168 | size_t region_size; | ||
169 | u32 irq; | ||
170 | struct completion done; | ||
171 | |||
172 | const void *tx; | ||
173 | void *rx; | ||
174 | u8 *tmp_buf; | ||
175 | int count; | ||
176 | struct davinci_spi_dma *dma_channels; | ||
177 | struct davinci_spi_platform_data *pdata; | ||
178 | |||
179 | void (*get_rx)(u32 rx_data, struct davinci_spi *); | ||
180 | u32 (*get_tx)(struct davinci_spi *); | ||
181 | |||
182 | struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT]; | ||
183 | }; | ||
184 | |||
185 | static unsigned use_dma; | ||
186 | |||
187 | static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi) | ||
188 | { | ||
189 | u8 *rx = davinci_spi->rx; | ||
190 | |||
191 | *rx++ = (u8)data; | ||
192 | davinci_spi->rx = rx; | ||
193 | } | ||
194 | |||
195 | static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi) | ||
196 | { | ||
197 | u16 *rx = davinci_spi->rx; | ||
198 | |||
199 | *rx++ = (u16)data; | ||
200 | davinci_spi->rx = rx; | ||
201 | } | ||
202 | |||
203 | static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi) | ||
204 | { | ||
205 | u32 data; | ||
206 | const u8 *tx = davinci_spi->tx; | ||
207 | |||
208 | data = *tx++; | ||
209 | davinci_spi->tx = tx; | ||
210 | return data; | ||
211 | } | ||
212 | |||
213 | static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi) | ||
214 | { | ||
215 | u32 data; | ||
216 | const u16 *tx = davinci_spi->tx; | ||
217 | |||
218 | data = *tx++; | ||
219 | davinci_spi->tx = tx; | ||
220 | return data; | ||
221 | } | ||
222 | |||
223 | static inline void set_io_bits(void __iomem *addr, u32 bits) | ||
224 | { | ||
225 | u32 v = ioread32(addr); | ||
226 | |||
227 | v |= bits; | ||
228 | iowrite32(v, addr); | ||
229 | } | ||
230 | |||
231 | static inline void clear_io_bits(void __iomem *addr, u32 bits) | ||
232 | { | ||
233 | u32 v = ioread32(addr); | ||
234 | |||
235 | v &= ~bits; | ||
236 | iowrite32(v, addr); | ||
237 | } | ||
238 | |||
239 | static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num) | ||
240 | { | ||
241 | set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); | ||
242 | } | ||
243 | |||
244 | static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num) | ||
245 | { | ||
246 | clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); | ||
247 | } | ||
248 | |||
249 | static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable) | ||
250 | { | ||
251 | struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); | ||
252 | |||
253 | if (enable) | ||
254 | set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); | ||
255 | else | ||
256 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * Interface to control the chip select signal | ||
261 | */ | ||
262 | static void davinci_spi_chipselect(struct spi_device *spi, int value) | ||
263 | { | ||
264 | struct davinci_spi *davinci_spi; | ||
265 | struct davinci_spi_platform_data *pdata; | ||
266 | u32 data1_reg_val = 0; | ||
267 | |||
268 | davinci_spi = spi_master_get_devdata(spi->master); | ||
269 | pdata = davinci_spi->pdata; | ||
270 | |||
271 | /* | ||
272 | * Board specific chip select logic decides the polarity and cs | ||
273 | * line for the controller | ||
274 | */ | ||
275 | if (value == BITBANG_CS_INACTIVE) { | ||
276 | set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT); | ||
277 | |||
278 | data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT; | ||
279 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | ||
280 | |||
281 | while ((ioread32(davinci_spi->base + SPIBUF) | ||
282 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
283 | cpu_relax(); | ||
284 | } | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * davinci_spi_setup_transfer - This functions will determine transfer method | ||
289 | * @spi: spi device on which data transfer to be done | ||
290 | * @t: spi transfer in which transfer info is filled | ||
291 | * | ||
292 | * This function determines data transfer method (8/16/32 bit transfer). | ||
293 | * It will also set the SPI Clock Control register according to | ||
294 | * SPI slave device freq. | ||
295 | */ | ||
296 | static int davinci_spi_setup_transfer(struct spi_device *spi, | ||
297 | struct spi_transfer *t) | ||
298 | { | ||
299 | |||
300 | struct davinci_spi *davinci_spi; | ||
301 | struct davinci_spi_platform_data *pdata; | ||
302 | u8 bits_per_word = 0; | ||
303 | u32 hz = 0, prescale; | ||
304 | |||
305 | davinci_spi = spi_master_get_devdata(spi->master); | ||
306 | pdata = davinci_spi->pdata; | ||
307 | |||
308 | if (t) { | ||
309 | bits_per_word = t->bits_per_word; | ||
310 | hz = t->speed_hz; | ||
311 | } | ||
312 | |||
313 | /* if bits_per_word is not set then set it default */ | ||
314 | if (!bits_per_word) | ||
315 | bits_per_word = spi->bits_per_word; | ||
316 | |||
317 | /* | ||
318 | * Assign function pointer to appropriate transfer method | ||
319 | * 8bit, 16bit or 32bit transfer | ||
320 | */ | ||
321 | if (bits_per_word <= 8 && bits_per_word >= 2) { | ||
322 | davinci_spi->get_rx = davinci_spi_rx_buf_u8; | ||
323 | davinci_spi->get_tx = davinci_spi_tx_buf_u8; | ||
324 | davinci_spi->slave[spi->chip_select].bytes_per_word = 1; | ||
325 | } else if (bits_per_word <= 16 && bits_per_word >= 2) { | ||
326 | davinci_spi->get_rx = davinci_spi_rx_buf_u16; | ||
327 | davinci_spi->get_tx = davinci_spi_tx_buf_u16; | ||
328 | davinci_spi->slave[spi->chip_select].bytes_per_word = 2; | ||
329 | } else | ||
330 | return -EINVAL; | ||
331 | |||
332 | if (!hz) | ||
333 | hz = spi->max_speed_hz; | ||
334 | |||
335 | clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK, | ||
336 | spi->chip_select); | ||
337 | set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f, | ||
338 | spi->chip_select); | ||
339 | |||
340 | prescale = ((clk_get_rate(davinci_spi->clk) / hz) - 1) & 0xff; | ||
341 | |||
342 | clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select); | ||
343 | set_fmt_bits(davinci_spi->base, prescale << 8, spi->chip_select); | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data) | ||
349 | { | ||
350 | struct spi_device *spi = (struct spi_device *)data; | ||
351 | struct davinci_spi *davinci_spi; | ||
352 | struct davinci_spi_dma *davinci_spi_dma; | ||
353 | struct davinci_spi_platform_data *pdata; | ||
354 | |||
355 | davinci_spi = spi_master_get_devdata(spi->master); | ||
356 | davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); | ||
357 | pdata = davinci_spi->pdata; | ||
358 | |||
359 | if (ch_status == DMA_COMPLETE) | ||
360 | edma_stop(davinci_spi_dma->dma_rx_channel); | ||
361 | else | ||
362 | edma_clean_channel(davinci_spi_dma->dma_rx_channel); | ||
363 | |||
364 | complete(&davinci_spi_dma->dma_rx_completion); | ||
365 | /* We must disable the DMA RX request */ | ||
366 | davinci_spi_set_dma_req(spi, 0); | ||
367 | } | ||
368 | |||
369 | static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data) | ||
370 | { | ||
371 | struct spi_device *spi = (struct spi_device *)data; | ||
372 | struct davinci_spi *davinci_spi; | ||
373 | struct davinci_spi_dma *davinci_spi_dma; | ||
374 | struct davinci_spi_platform_data *pdata; | ||
375 | |||
376 | davinci_spi = spi_master_get_devdata(spi->master); | ||
377 | davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); | ||
378 | pdata = davinci_spi->pdata; | ||
379 | |||
380 | if (ch_status == DMA_COMPLETE) | ||
381 | edma_stop(davinci_spi_dma->dma_tx_channel); | ||
382 | else | ||
383 | edma_clean_channel(davinci_spi_dma->dma_tx_channel); | ||
384 | |||
385 | complete(&davinci_spi_dma->dma_tx_completion); | ||
386 | /* We must disable the DMA TX request */ | ||
387 | davinci_spi_set_dma_req(spi, 0); | ||
388 | } | ||
389 | |||
390 | static int davinci_spi_request_dma(struct spi_device *spi) | ||
391 | { | ||
392 | struct davinci_spi *davinci_spi; | ||
393 | struct davinci_spi_dma *davinci_spi_dma; | ||
394 | struct davinci_spi_platform_data *pdata; | ||
395 | struct device *sdev; | ||
396 | int r; | ||
397 | |||
398 | davinci_spi = spi_master_get_devdata(spi->master); | ||
399 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
400 | pdata = davinci_spi->pdata; | ||
401 | sdev = davinci_spi->bitbang.master->dev.parent; | ||
402 | |||
403 | r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev, | ||
404 | davinci_spi_dma_rx_callback, spi, | ||
405 | davinci_spi_dma->eventq); | ||
406 | if (r < 0) { | ||
407 | dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n"); | ||
408 | return -EAGAIN; | ||
409 | } | ||
410 | davinci_spi_dma->dma_rx_channel = r; | ||
411 | r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev, | ||
412 | davinci_spi_dma_tx_callback, spi, | ||
413 | davinci_spi_dma->eventq); | ||
414 | if (r < 0) { | ||
415 | edma_free_channel(davinci_spi_dma->dma_rx_channel); | ||
416 | davinci_spi_dma->dma_rx_channel = -1; | ||
417 | dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n"); | ||
418 | return -EAGAIN; | ||
419 | } | ||
420 | davinci_spi_dma->dma_tx_channel = r; | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | /** | ||
426 | * davinci_spi_setup - This functions will set default transfer method | ||
427 | * @spi: spi device on which data transfer to be done | ||
428 | * | ||
429 | * This functions sets the default transfer method. | ||
430 | */ | ||
431 | |||
432 | static int davinci_spi_setup(struct spi_device *spi) | ||
433 | { | ||
434 | int retval; | ||
435 | struct davinci_spi *davinci_spi; | ||
436 | struct davinci_spi_dma *davinci_spi_dma; | ||
437 | struct device *sdev; | ||
438 | |||
439 | davinci_spi = spi_master_get_devdata(spi->master); | ||
440 | sdev = davinci_spi->bitbang.master->dev.parent; | ||
441 | |||
442 | /* if bits per word length is zero then set it default 8 */ | ||
443 | if (!spi->bits_per_word) | ||
444 | spi->bits_per_word = 8; | ||
445 | |||
446 | davinci_spi->slave[spi->chip_select].cmd_to_write = 0; | ||
447 | |||
448 | if (use_dma && davinci_spi->dma_channels) { | ||
449 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
450 | |||
451 | if ((davinci_spi_dma->dma_rx_channel == -1) | ||
452 | || (davinci_spi_dma->dma_tx_channel == -1)) { | ||
453 | retval = davinci_spi_request_dma(spi); | ||
454 | if (retval < 0) | ||
455 | return retval; | ||
456 | } | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * SPI in DaVinci and DA8xx operate between | ||
461 | * 600 KHz and 50 MHz | ||
462 | */ | ||
463 | if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) { | ||
464 | dev_dbg(sdev, "Operating frequency is not in acceptable " | ||
465 | "range\n"); | ||
466 | return -EINVAL; | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * Set up SPIFMTn register, unique to this chipselect. | ||
471 | * | ||
472 | * NOTE: we could do all of these with one write. Also, some | ||
473 | * of the "version 2" features are found in chips that don't | ||
474 | * support all of them... | ||
475 | */ | ||
476 | if (spi->mode & SPI_LSB_FIRST) | ||
477 | set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, | ||
478 | spi->chip_select); | ||
479 | else | ||
480 | clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, | ||
481 | spi->chip_select); | ||
482 | |||
483 | if (spi->mode & SPI_CPOL) | ||
484 | set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, | ||
485 | spi->chip_select); | ||
486 | else | ||
487 | clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, | ||
488 | spi->chip_select); | ||
489 | |||
490 | if (!(spi->mode & SPI_CPHA)) | ||
491 | set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, | ||
492 | spi->chip_select); | ||
493 | else | ||
494 | clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, | ||
495 | spi->chip_select); | ||
496 | |||
497 | /* | ||
498 | * Version 1 hardware supports two basic SPI modes: | ||
499 | * - Standard SPI mode uses 4 pins, with chipselect | ||
500 | * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) | ||
501 | * (distinct from SPI_3WIRE, with just one data wire; | ||
502 | * or similar variants without MOSI or without MISO) | ||
503 | * | ||
504 | * Version 2 hardware supports an optional handshaking signal, | ||
505 | * so it can support two more modes: | ||
506 | * - 5 pin SPI variant is standard SPI plus SPI_READY | ||
507 | * - 4 pin with enable is (SPI_READY | SPI_NO_CS) | ||
508 | */ | ||
509 | |||
510 | if (davinci_spi->version == SPI_VERSION_2) { | ||
511 | clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK, | ||
512 | spi->chip_select); | ||
513 | set_fmt_bits(davinci_spi->base, | ||
514 | (davinci_spi->pdata->wdelay | ||
515 | << SPIFMT_WDELAY_SHIFT) | ||
516 | & SPIFMT_WDELAY_MASK, | ||
517 | spi->chip_select); | ||
518 | |||
519 | if (davinci_spi->pdata->odd_parity) | ||
520 | set_fmt_bits(davinci_spi->base, | ||
521 | SPIFMT_ODD_PARITY_MASK, | ||
522 | spi->chip_select); | ||
523 | else | ||
524 | clear_fmt_bits(davinci_spi->base, | ||
525 | SPIFMT_ODD_PARITY_MASK, | ||
526 | spi->chip_select); | ||
527 | |||
528 | if (davinci_spi->pdata->parity_enable) | ||
529 | set_fmt_bits(davinci_spi->base, | ||
530 | SPIFMT_PARITYENA_MASK, | ||
531 | spi->chip_select); | ||
532 | else | ||
533 | clear_fmt_bits(davinci_spi->base, | ||
534 | SPIFMT_PARITYENA_MASK, | ||
535 | spi->chip_select); | ||
536 | |||
537 | if (davinci_spi->pdata->wait_enable) | ||
538 | set_fmt_bits(davinci_spi->base, | ||
539 | SPIFMT_WAITENA_MASK, | ||
540 | spi->chip_select); | ||
541 | else | ||
542 | clear_fmt_bits(davinci_spi->base, | ||
543 | SPIFMT_WAITENA_MASK, | ||
544 | spi->chip_select); | ||
545 | |||
546 | if (davinci_spi->pdata->timer_disable) | ||
547 | set_fmt_bits(davinci_spi->base, | ||
548 | SPIFMT_DISTIMER_MASK, | ||
549 | spi->chip_select); | ||
550 | else | ||
551 | clear_fmt_bits(davinci_spi->base, | ||
552 | SPIFMT_DISTIMER_MASK, | ||
553 | spi->chip_select); | ||
554 | } | ||
555 | |||
556 | retval = davinci_spi_setup_transfer(spi, NULL); | ||
557 | |||
558 | return retval; | ||
559 | } | ||
560 | |||
561 | static void davinci_spi_cleanup(struct spi_device *spi) | ||
562 | { | ||
563 | struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); | ||
564 | struct davinci_spi_dma *davinci_spi_dma; | ||
565 | |||
566 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
567 | |||
568 | if (use_dma && davinci_spi->dma_channels) { | ||
569 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
570 | |||
571 | if ((davinci_spi_dma->dma_rx_channel != -1) | ||
572 | && (davinci_spi_dma->dma_tx_channel != -1)) { | ||
573 | edma_free_channel(davinci_spi_dma->dma_tx_channel); | ||
574 | edma_free_channel(davinci_spi_dma->dma_rx_channel); | ||
575 | } | ||
576 | } | ||
577 | } | ||
578 | |||
579 | static int davinci_spi_bufs_prep(struct spi_device *spi, | ||
580 | struct davinci_spi *davinci_spi) | ||
581 | { | ||
582 | int op_mode = 0; | ||
583 | |||
584 | /* | ||
585 | * REVISIT unless devices disagree about SPI_LOOP or | ||
586 | * SPI_READY (SPI_NO_CS only allows one device!), this | ||
587 | * should not need to be done before each message... | ||
588 | * optimize for both flags staying cleared. | ||
589 | */ | ||
590 | |||
591 | op_mode = SPIPC0_DIFUN_MASK | ||
592 | | SPIPC0_DOFUN_MASK | ||
593 | | SPIPC0_CLKFUN_MASK; | ||
594 | if (!(spi->mode & SPI_NO_CS)) | ||
595 | op_mode |= 1 << spi->chip_select; | ||
596 | if (spi->mode & SPI_READY) | ||
597 | op_mode |= SPIPC0_SPIENA_MASK; | ||
598 | |||
599 | iowrite32(op_mode, davinci_spi->base + SPIPC0); | ||
600 | |||
601 | if (spi->mode & SPI_LOOP) | ||
602 | set_io_bits(davinci_spi->base + SPIGCR1, | ||
603 | SPIGCR1_LOOPBACK_MASK); | ||
604 | else | ||
605 | clear_io_bits(davinci_spi->base + SPIGCR1, | ||
606 | SPIGCR1_LOOPBACK_MASK); | ||
607 | |||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | static int davinci_spi_check_error(struct davinci_spi *davinci_spi, | ||
612 | int int_status) | ||
613 | { | ||
614 | struct device *sdev = davinci_spi->bitbang.master->dev.parent; | ||
615 | |||
616 | if (int_status & SPIFLG_TIMEOUT_MASK) { | ||
617 | dev_dbg(sdev, "SPI Time-out Error\n"); | ||
618 | return -ETIMEDOUT; | ||
619 | } | ||
620 | if (int_status & SPIFLG_DESYNC_MASK) { | ||
621 | dev_dbg(sdev, "SPI Desynchronization Error\n"); | ||
622 | return -EIO; | ||
623 | } | ||
624 | if (int_status & SPIFLG_BITERR_MASK) { | ||
625 | dev_dbg(sdev, "SPI Bit error\n"); | ||
626 | return -EIO; | ||
627 | } | ||
628 | |||
629 | if (davinci_spi->version == SPI_VERSION_2) { | ||
630 | if (int_status & SPIFLG_DLEN_ERR_MASK) { | ||
631 | dev_dbg(sdev, "SPI Data Length Error\n"); | ||
632 | return -EIO; | ||
633 | } | ||
634 | if (int_status & SPIFLG_PARERR_MASK) { | ||
635 | dev_dbg(sdev, "SPI Parity Error\n"); | ||
636 | return -EIO; | ||
637 | } | ||
638 | if (int_status & SPIFLG_OVRRUN_MASK) { | ||
639 | dev_dbg(sdev, "SPI Data Overrun error\n"); | ||
640 | return -EIO; | ||
641 | } | ||
642 | if (int_status & SPIFLG_TX_INTR_MASK) { | ||
643 | dev_dbg(sdev, "SPI TX intr bit set\n"); | ||
644 | return -EIO; | ||
645 | } | ||
646 | if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { | ||
647 | dev_dbg(sdev, "SPI Buffer Init Active\n"); | ||
648 | return -EBUSY; | ||
649 | } | ||
650 | } | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | /** | ||
656 | * davinci_spi_bufs - functions which will handle transfer data | ||
657 | * @spi: spi device on which data transfer to be done | ||
658 | * @t: spi transfer in which transfer info is filled | ||
659 | * | ||
660 | * This function will put data to be transferred into data register | ||
661 | * of SPI controller and then wait until the completion will be marked | ||
662 | * by the IRQ Handler. | ||
663 | */ | ||
664 | static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t) | ||
665 | { | ||
666 | struct davinci_spi *davinci_spi; | ||
667 | int int_status, count, ret; | ||
668 | u8 conv, tmp; | ||
669 | u32 tx_data, data1_reg_val; | ||
670 | u32 buf_val, flg_val; | ||
671 | struct davinci_spi_platform_data *pdata; | ||
672 | |||
673 | davinci_spi = spi_master_get_devdata(spi->master); | ||
674 | pdata = davinci_spi->pdata; | ||
675 | |||
676 | davinci_spi->tx = t->tx_buf; | ||
677 | davinci_spi->rx = t->rx_buf; | ||
678 | |||
679 | /* convert len to words based on bits_per_word */ | ||
680 | conv = davinci_spi->slave[spi->chip_select].bytes_per_word; | ||
681 | davinci_spi->count = t->len / conv; | ||
682 | |||
683 | INIT_COMPLETION(davinci_spi->done); | ||
684 | |||
685 | ret = davinci_spi_bufs_prep(spi, davinci_spi); | ||
686 | if (ret) | ||
687 | return ret; | ||
688 | |||
689 | /* Enable SPI */ | ||
690 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
691 | |||
692 | iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | | ||
693 | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), | ||
694 | davinci_spi->base + SPIDELAY); | ||
695 | |||
696 | count = davinci_spi->count; | ||
697 | data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; | ||
698 | tmp = ~(0x1 << spi->chip_select); | ||
699 | |||
700 | clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); | ||
701 | |||
702 | data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; | ||
703 | |||
704 | while ((ioread32(davinci_spi->base + SPIBUF) | ||
705 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
706 | cpu_relax(); | ||
707 | |||
708 | /* Determine the command to execute READ or WRITE */ | ||
709 | if (t->tx_buf) { | ||
710 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); | ||
711 | |||
712 | while (1) { | ||
713 | tx_data = davinci_spi->get_tx(davinci_spi); | ||
714 | |||
715 | data1_reg_val &= ~(0xFFFF); | ||
716 | data1_reg_val |= (0xFFFF & tx_data); | ||
717 | |||
718 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
719 | if ((buf_val & SPIBUF_TXFULL_MASK) == 0) { | ||
720 | iowrite32(data1_reg_val, | ||
721 | davinci_spi->base + SPIDAT1); | ||
722 | |||
723 | count--; | ||
724 | } | ||
725 | while (ioread32(davinci_spi->base + SPIBUF) | ||
726 | & SPIBUF_RXEMPTY_MASK) | ||
727 | cpu_relax(); | ||
728 | |||
729 | /* getting the returned byte */ | ||
730 | if (t->rx_buf) { | ||
731 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
732 | davinci_spi->get_rx(buf_val, davinci_spi); | ||
733 | } | ||
734 | if (count <= 0) | ||
735 | break; | ||
736 | } | ||
737 | } else { | ||
738 | if (pdata->poll_mode) { | ||
739 | while (1) { | ||
740 | /* keeps the serial clock going */ | ||
741 | if ((ioread32(davinci_spi->base + SPIBUF) | ||
742 | & SPIBUF_TXFULL_MASK) == 0) | ||
743 | iowrite32(data1_reg_val, | ||
744 | davinci_spi->base + SPIDAT1); | ||
745 | |||
746 | while (ioread32(davinci_spi->base + SPIBUF) & | ||
747 | SPIBUF_RXEMPTY_MASK) | ||
748 | cpu_relax(); | ||
749 | |||
750 | flg_val = ioread32(davinci_spi->base + SPIFLG); | ||
751 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
752 | |||
753 | davinci_spi->get_rx(buf_val, davinci_spi); | ||
754 | |||
755 | count--; | ||
756 | if (count <= 0) | ||
757 | break; | ||
758 | } | ||
759 | } else { /* Receive in Interrupt mode */ | ||
760 | int i; | ||
761 | |||
762 | for (i = 0; i < davinci_spi->count; i++) { | ||
763 | set_io_bits(davinci_spi->base + SPIINT, | ||
764 | SPIINT_BITERR_INTR | ||
765 | | SPIINT_OVRRUN_INTR | ||
766 | | SPIINT_RX_INTR); | ||
767 | |||
768 | iowrite32(data1_reg_val, | ||
769 | davinci_spi->base + SPIDAT1); | ||
770 | |||
771 | while (ioread32(davinci_spi->base + SPIINT) & | ||
772 | SPIINT_RX_INTR) | ||
773 | cpu_relax(); | ||
774 | } | ||
775 | iowrite32((data1_reg_val & 0x0ffcffff), | ||
776 | davinci_spi->base + SPIDAT1); | ||
777 | } | ||
778 | } | ||
779 | |||
780 | /* | ||
781 | * Check for bit error, desync error,parity error,timeout error and | ||
782 | * receive overflow errors | ||
783 | */ | ||
784 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
785 | |||
786 | ret = davinci_spi_check_error(davinci_spi, int_status); | ||
787 | if (ret != 0) | ||
788 | return ret; | ||
789 | |||
790 | /* SPI Framework maintains the count only in bytes so convert back */ | ||
791 | davinci_spi->count *= conv; | ||
792 | |||
793 | return t->len; | ||
794 | } | ||
795 | |||
796 | #define DAVINCI_DMA_DATA_TYPE_S8 0x01 | ||
797 | #define DAVINCI_DMA_DATA_TYPE_S16 0x02 | ||
798 | #define DAVINCI_DMA_DATA_TYPE_S32 0x04 | ||
799 | |||
800 | static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | ||
801 | { | ||
802 | struct davinci_spi *davinci_spi; | ||
803 | int int_status = 0; | ||
804 | int count, temp_count; | ||
805 | u8 conv = 1; | ||
806 | u8 tmp; | ||
807 | u32 data1_reg_val; | ||
808 | struct davinci_spi_dma *davinci_spi_dma; | ||
809 | int word_len, data_type, ret; | ||
810 | unsigned long tx_reg, rx_reg; | ||
811 | struct davinci_spi_platform_data *pdata; | ||
812 | struct device *sdev; | ||
813 | |||
814 | davinci_spi = spi_master_get_devdata(spi->master); | ||
815 | pdata = davinci_spi->pdata; | ||
816 | sdev = davinci_spi->bitbang.master->dev.parent; | ||
817 | |||
818 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
819 | |||
820 | tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; | ||
821 | rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; | ||
822 | |||
823 | davinci_spi->tx = t->tx_buf; | ||
824 | davinci_spi->rx = t->rx_buf; | ||
825 | |||
826 | /* convert len to words based on bits_per_word */ | ||
827 | conv = davinci_spi->slave[spi->chip_select].bytes_per_word; | ||
828 | davinci_spi->count = t->len / conv; | ||
829 | |||
830 | INIT_COMPLETION(davinci_spi->done); | ||
831 | |||
832 | init_completion(&davinci_spi_dma->dma_rx_completion); | ||
833 | init_completion(&davinci_spi_dma->dma_tx_completion); | ||
834 | |||
835 | word_len = conv * 8; | ||
836 | |||
837 | if (word_len <= 8) | ||
838 | data_type = DAVINCI_DMA_DATA_TYPE_S8; | ||
839 | else if (word_len <= 16) | ||
840 | data_type = DAVINCI_DMA_DATA_TYPE_S16; | ||
841 | else if (word_len <= 32) | ||
842 | data_type = DAVINCI_DMA_DATA_TYPE_S32; | ||
843 | else | ||
844 | return -EINVAL; | ||
845 | |||
846 | ret = davinci_spi_bufs_prep(spi, davinci_spi); | ||
847 | if (ret) | ||
848 | return ret; | ||
849 | |||
850 | /* Put delay val if required */ | ||
851 | iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | | ||
852 | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), | ||
853 | davinci_spi->base + SPIDELAY); | ||
854 | |||
855 | count = davinci_spi->count; /* the number of elements */ | ||
856 | data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; | ||
857 | |||
858 | /* CS default = 0xFF */ | ||
859 | tmp = ~(0x1 << spi->chip_select); | ||
860 | |||
861 | clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); | ||
862 | |||
863 | data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; | ||
864 | |||
865 | /* disable all interrupts for dma transfers */ | ||
866 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); | ||
867 | /* Disable SPI to write configuration bits in SPIDAT */ | ||
868 | clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
869 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | ||
870 | /* Enable SPI */ | ||
871 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
872 | |||
873 | while ((ioread32(davinci_spi->base + SPIBUF) | ||
874 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
875 | cpu_relax(); | ||
876 | |||
877 | |||
878 | if (t->tx_buf) { | ||
879 | t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, | ||
880 | DMA_TO_DEVICE); | ||
881 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | ||
882 | dev_dbg(sdev, "Unable to DMA map a %d bytes" | ||
883 | " TX buffer\n", count); | ||
884 | return -ENOMEM; | ||
885 | } | ||
886 | temp_count = count; | ||
887 | } else { | ||
888 | /* We need TX clocking for RX transaction */ | ||
889 | t->tx_dma = dma_map_single(&spi->dev, | ||
890 | (void *)davinci_spi->tmp_buf, count + 1, | ||
891 | DMA_TO_DEVICE); | ||
892 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | ||
893 | dev_dbg(sdev, "Unable to DMA map a %d bytes" | ||
894 | " TX tmp buffer\n", count); | ||
895 | return -ENOMEM; | ||
896 | } | ||
897 | temp_count = count + 1; | ||
898 | } | ||
899 | |||
900 | edma_set_transfer_params(davinci_spi_dma->dma_tx_channel, | ||
901 | data_type, temp_count, 1, 0, ASYNC); | ||
902 | edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); | ||
903 | edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); | ||
904 | edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); | ||
905 | edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); | ||
906 | |||
907 | if (t->rx_buf) { | ||
908 | /* initiate transaction */ | ||
909 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | ||
910 | |||
911 | t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, | ||
912 | DMA_FROM_DEVICE); | ||
913 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { | ||
914 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", | ||
915 | count); | ||
916 | if (t->tx_buf != NULL) | ||
917 | dma_unmap_single(NULL, t->tx_dma, | ||
918 | count, DMA_TO_DEVICE); | ||
919 | return -ENOMEM; | ||
920 | } | ||
921 | edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, | ||
922 | data_type, count, 1, 0, ASYNC); | ||
923 | edma_set_src(davinci_spi_dma->dma_rx_channel, | ||
924 | rx_reg, INCR, W8BIT); | ||
925 | edma_set_dest(davinci_spi_dma->dma_rx_channel, | ||
926 | t->rx_dma, INCR, W8BIT); | ||
927 | edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); | ||
928 | edma_set_dest_index(davinci_spi_dma->dma_rx_channel, | ||
929 | data_type, 0); | ||
930 | } | ||
931 | |||
932 | if ((t->tx_buf) || (t->rx_buf)) | ||
933 | edma_start(davinci_spi_dma->dma_tx_channel); | ||
934 | |||
935 | if (t->rx_buf) | ||
936 | edma_start(davinci_spi_dma->dma_rx_channel); | ||
937 | |||
938 | if ((t->rx_buf) || (t->tx_buf)) | ||
939 | davinci_spi_set_dma_req(spi, 1); | ||
940 | |||
941 | if (t->tx_buf) | ||
942 | wait_for_completion_interruptible( | ||
943 | &davinci_spi_dma->dma_tx_completion); | ||
944 | |||
945 | if (t->rx_buf) | ||
946 | wait_for_completion_interruptible( | ||
947 | &davinci_spi_dma->dma_rx_completion); | ||
948 | |||
949 | dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE); | ||
950 | |||
951 | if (t->rx_buf) | ||
952 | dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); | ||
953 | |||
954 | /* | ||
955 | * Check for bit error, desync error,parity error,timeout error and | ||
956 | * receive overflow errors | ||
957 | */ | ||
958 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
959 | |||
960 | ret = davinci_spi_check_error(davinci_spi, int_status); | ||
961 | if (ret != 0) | ||
962 | return ret; | ||
963 | |||
964 | /* SPI Framework maintains the count only in bytes so convert back */ | ||
965 | davinci_spi->count *= conv; | ||
966 | |||
967 | return t->len; | ||
968 | } | ||
969 | |||
970 | /** | ||
971 | * davinci_spi_irq - IRQ handler for DaVinci SPI | ||
972 | * @irq: IRQ number for this SPI Master | ||
973 | * @context_data: structure for SPI Master controller davinci_spi | ||
974 | */ | ||
975 | static irqreturn_t davinci_spi_irq(s32 irq, void *context_data) | ||
976 | { | ||
977 | struct davinci_spi *davinci_spi = context_data; | ||
978 | u32 int_status, rx_data = 0; | ||
979 | irqreturn_t ret = IRQ_NONE; | ||
980 | |||
981 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
982 | |||
983 | while ((int_status & SPIFLG_RX_INTR_MASK)) { | ||
984 | if (likely(int_status & SPIFLG_RX_INTR_MASK)) { | ||
985 | ret = IRQ_HANDLED; | ||
986 | |||
987 | rx_data = ioread32(davinci_spi->base + SPIBUF); | ||
988 | davinci_spi->get_rx(rx_data, davinci_spi); | ||
989 | |||
990 | /* Disable Receive Interrupt */ | ||
991 | iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR), | ||
992 | davinci_spi->base + SPIINT); | ||
993 | } else | ||
994 | (void)davinci_spi_check_error(davinci_spi, int_status); | ||
995 | |||
996 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
997 | } | ||
998 | |||
999 | return ret; | ||
1000 | } | ||
1001 | |||
1002 | /** | ||
1003 | * davinci_spi_probe - probe function for SPI Master Controller | ||
1004 | * @pdev: platform_device structure which contains plateform specific data | ||
1005 | */ | ||
1006 | static int davinci_spi_probe(struct platform_device *pdev) | ||
1007 | { | ||
1008 | struct spi_master *master; | ||
1009 | struct davinci_spi *davinci_spi; | ||
1010 | struct davinci_spi_platform_data *pdata; | ||
1011 | struct resource *r, *mem; | ||
1012 | resource_size_t dma_rx_chan = SPI_NO_RESOURCE; | ||
1013 | resource_size_t dma_tx_chan = SPI_NO_RESOURCE; | ||
1014 | resource_size_t dma_eventq = SPI_NO_RESOURCE; | ||
1015 | int i = 0, ret = 0; | ||
1016 | |||
1017 | pdata = pdev->dev.platform_data; | ||
1018 | if (pdata == NULL) { | ||
1019 | ret = -ENODEV; | ||
1020 | goto err; | ||
1021 | } | ||
1022 | |||
1023 | master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi)); | ||
1024 | if (master == NULL) { | ||
1025 | ret = -ENOMEM; | ||
1026 | goto err; | ||
1027 | } | ||
1028 | |||
1029 | dev_set_drvdata(&pdev->dev, master); | ||
1030 | |||
1031 | davinci_spi = spi_master_get_devdata(master); | ||
1032 | if (davinci_spi == NULL) { | ||
1033 | ret = -ENOENT; | ||
1034 | goto free_master; | ||
1035 | } | ||
1036 | |||
1037 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1038 | if (r == NULL) { | ||
1039 | ret = -ENOENT; | ||
1040 | goto free_master; | ||
1041 | } | ||
1042 | |||
1043 | davinci_spi->pbase = r->start; | ||
1044 | davinci_spi->region_size = resource_size(r); | ||
1045 | davinci_spi->pdata = pdata; | ||
1046 | |||
1047 | mem = request_mem_region(r->start, davinci_spi->region_size, | ||
1048 | pdev->name); | ||
1049 | if (mem == NULL) { | ||
1050 | ret = -EBUSY; | ||
1051 | goto free_master; | ||
1052 | } | ||
1053 | |||
1054 | davinci_spi->base = (struct davinci_spi_reg __iomem *) | ||
1055 | ioremap(r->start, davinci_spi->region_size); | ||
1056 | if (davinci_spi->base == NULL) { | ||
1057 | ret = -ENOMEM; | ||
1058 | goto release_region; | ||
1059 | } | ||
1060 | |||
1061 | davinci_spi->irq = platform_get_irq(pdev, 0); | ||
1062 | if (davinci_spi->irq <= 0) { | ||
1063 | ret = -EINVAL; | ||
1064 | goto unmap_io; | ||
1065 | } | ||
1066 | |||
1067 | ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED, | ||
1068 | dev_name(&pdev->dev), davinci_spi); | ||
1069 | if (ret) | ||
1070 | goto unmap_io; | ||
1071 | |||
1072 | /* Allocate tmp_buf for tx_buf */ | ||
1073 | davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL); | ||
1074 | if (davinci_spi->tmp_buf == NULL) { | ||
1075 | ret = -ENOMEM; | ||
1076 | goto irq_free; | ||
1077 | } | ||
1078 | |||
1079 | davinci_spi->bitbang.master = spi_master_get(master); | ||
1080 | if (davinci_spi->bitbang.master == NULL) { | ||
1081 | ret = -ENODEV; | ||
1082 | goto free_tmp_buf; | ||
1083 | } | ||
1084 | |||
1085 | davinci_spi->clk = clk_get(&pdev->dev, NULL); | ||
1086 | if (IS_ERR(davinci_spi->clk)) { | ||
1087 | ret = -ENODEV; | ||
1088 | goto put_master; | ||
1089 | } | ||
1090 | clk_enable(davinci_spi->clk); | ||
1091 | |||
1092 | |||
1093 | master->bus_num = pdev->id; | ||
1094 | master->num_chipselect = pdata->num_chipselect; | ||
1095 | master->setup = davinci_spi_setup; | ||
1096 | master->cleanup = davinci_spi_cleanup; | ||
1097 | |||
1098 | davinci_spi->bitbang.chipselect = davinci_spi_chipselect; | ||
1099 | davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer; | ||
1100 | |||
1101 | davinci_spi->version = pdata->version; | ||
1102 | use_dma = pdata->use_dma; | ||
1103 | |||
1104 | davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; | ||
1105 | if (davinci_spi->version == SPI_VERSION_2) | ||
1106 | davinci_spi->bitbang.flags |= SPI_READY; | ||
1107 | |||
1108 | if (use_dma) { | ||
1109 | r = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
1110 | if (r) | ||
1111 | dma_rx_chan = r->start; | ||
1112 | r = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
1113 | if (r) | ||
1114 | dma_tx_chan = r->start; | ||
1115 | r = platform_get_resource(pdev, IORESOURCE_DMA, 2); | ||
1116 | if (r) | ||
1117 | dma_eventq = r->start; | ||
1118 | } | ||
1119 | |||
1120 | if (!use_dma || | ||
1121 | dma_rx_chan == SPI_NO_RESOURCE || | ||
1122 | dma_tx_chan == SPI_NO_RESOURCE || | ||
1123 | dma_eventq == SPI_NO_RESOURCE) { | ||
1124 | davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio; | ||
1125 | use_dma = 0; | ||
1126 | } else { | ||
1127 | davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma; | ||
1128 | davinci_spi->dma_channels = kzalloc(master->num_chipselect | ||
1129 | * sizeof(struct davinci_spi_dma), GFP_KERNEL); | ||
1130 | if (davinci_spi->dma_channels == NULL) { | ||
1131 | ret = -ENOMEM; | ||
1132 | goto free_clk; | ||
1133 | } | ||
1134 | |||
1135 | for (i = 0; i < master->num_chipselect; i++) { | ||
1136 | davinci_spi->dma_channels[i].dma_rx_channel = -1; | ||
1137 | davinci_spi->dma_channels[i].dma_rx_sync_dev = | ||
1138 | dma_rx_chan; | ||
1139 | davinci_spi->dma_channels[i].dma_tx_channel = -1; | ||
1140 | davinci_spi->dma_channels[i].dma_tx_sync_dev = | ||
1141 | dma_tx_chan; | ||
1142 | davinci_spi->dma_channels[i].eventq = dma_eventq; | ||
1143 | } | ||
1144 | dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n" | ||
1145 | "Using RX channel = %d , TX channel = %d and " | ||
1146 | "event queue = %d", dma_rx_chan, dma_tx_chan, | ||
1147 | dma_eventq); | ||
1148 | } | ||
1149 | |||
1150 | davinci_spi->get_rx = davinci_spi_rx_buf_u8; | ||
1151 | davinci_spi->get_tx = davinci_spi_tx_buf_u8; | ||
1152 | |||
1153 | init_completion(&davinci_spi->done); | ||
1154 | |||
1155 | /* Reset In/OUT SPI module */ | ||
1156 | iowrite32(0, davinci_spi->base + SPIGCR0); | ||
1157 | udelay(100); | ||
1158 | iowrite32(1, davinci_spi->base + SPIGCR0); | ||
1159 | |||
1160 | /* Clock internal */ | ||
1161 | if (davinci_spi->pdata->clk_internal) | ||
1162 | set_io_bits(davinci_spi->base + SPIGCR1, | ||
1163 | SPIGCR1_CLKMOD_MASK); | ||
1164 | else | ||
1165 | clear_io_bits(davinci_spi->base + SPIGCR1, | ||
1166 | SPIGCR1_CLKMOD_MASK); | ||
1167 | |||
1168 | /* master mode default */ | ||
1169 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK); | ||
1170 | |||
1171 | if (davinci_spi->pdata->intr_level) | ||
1172 | iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL); | ||
1173 | else | ||
1174 | iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL); | ||
1175 | |||
1176 | ret = spi_bitbang_start(&davinci_spi->bitbang); | ||
1177 | if (ret) | ||
1178 | goto free_clk; | ||
1179 | |||
1180 | dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base); | ||
1181 | |||
1182 | if (!pdata->poll_mode) | ||
1183 | dev_info(&pdev->dev, "Operating in interrupt mode" | ||
1184 | " using IRQ %d\n", davinci_spi->irq); | ||
1185 | |||
1186 | return ret; | ||
1187 | |||
1188 | free_clk: | ||
1189 | clk_disable(davinci_spi->clk); | ||
1190 | clk_put(davinci_spi->clk); | ||
1191 | put_master: | ||
1192 | spi_master_put(master); | ||
1193 | free_tmp_buf: | ||
1194 | kfree(davinci_spi->tmp_buf); | ||
1195 | irq_free: | ||
1196 | free_irq(davinci_spi->irq, davinci_spi); | ||
1197 | unmap_io: | ||
1198 | iounmap(davinci_spi->base); | ||
1199 | release_region: | ||
1200 | release_mem_region(davinci_spi->pbase, davinci_spi->region_size); | ||
1201 | free_master: | ||
1202 | kfree(master); | ||
1203 | err: | ||
1204 | return ret; | ||
1205 | } | ||
1206 | |||
1207 | /** | ||
1208 | * davinci_spi_remove - remove function for SPI Master Controller | ||
1209 | * @pdev: platform_device structure which contains plateform specific data | ||
1210 | * | ||
1211 | * This function will do the reverse action of davinci_spi_probe function | ||
1212 | * It will free the IRQ and SPI controller's memory region. | ||
1213 | * It will also call spi_bitbang_stop to destroy the work queue which was | ||
1214 | * created by spi_bitbang_start. | ||
1215 | */ | ||
1216 | static int __exit davinci_spi_remove(struct platform_device *pdev) | ||
1217 | { | ||
1218 | struct davinci_spi *davinci_spi; | ||
1219 | struct spi_master *master; | ||
1220 | |||
1221 | master = dev_get_drvdata(&pdev->dev); | ||
1222 | davinci_spi = spi_master_get_devdata(master); | ||
1223 | |||
1224 | spi_bitbang_stop(&davinci_spi->bitbang); | ||
1225 | |||
1226 | clk_disable(davinci_spi->clk); | ||
1227 | clk_put(davinci_spi->clk); | ||
1228 | spi_master_put(master); | ||
1229 | kfree(davinci_spi->tmp_buf); | ||
1230 | free_irq(davinci_spi->irq, davinci_spi); | ||
1231 | iounmap(davinci_spi->base); | ||
1232 | release_mem_region(davinci_spi->pbase, davinci_spi->region_size); | ||
1233 | |||
1234 | return 0; | ||
1235 | } | ||
1236 | |||
1237 | static struct platform_driver davinci_spi_driver = { | ||
1238 | .driver.name = "spi_davinci", | ||
1239 | .remove = __exit_p(davinci_spi_remove), | ||
1240 | }; | ||
1241 | |||
1242 | static int __init davinci_spi_init(void) | ||
1243 | { | ||
1244 | return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe); | ||
1245 | } | ||
1246 | module_init(davinci_spi_init); | ||
1247 | |||
1248 | static void __exit davinci_spi_exit(void) | ||
1249 | { | ||
1250 | platform_driver_unregister(&davinci_spi_driver); | ||
1251 | } | ||
1252 | module_exit(davinci_spi_exit); | ||
1253 | |||
1254 | MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver"); | ||
1255 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c index 31620fae77be..8ed38f1d6c18 100644 --- a/drivers/spi/dw_spi.c +++ b/drivers/spi/dw_spi.c | |||
@@ -152,6 +152,7 @@ static void mrst_spi_debugfs_remove(struct dw_spi *dws) | |||
152 | #else | 152 | #else |
153 | static inline int mrst_spi_debugfs_init(struct dw_spi *dws) | 153 | static inline int mrst_spi_debugfs_init(struct dw_spi *dws) |
154 | { | 154 | { |
155 | return 0; | ||
155 | } | 156 | } |
156 | 157 | ||
157 | static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) | 158 | static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) |
@@ -161,14 +162,14 @@ static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) | |||
161 | 162 | ||
162 | static void wait_till_not_busy(struct dw_spi *dws) | 163 | static void wait_till_not_busy(struct dw_spi *dws) |
163 | { | 164 | { |
164 | unsigned long end = jiffies + usecs_to_jiffies(1000); | 165 | unsigned long end = jiffies + 1 + usecs_to_jiffies(1000); |
165 | 166 | ||
166 | while (time_before(jiffies, end)) { | 167 | while (time_before(jiffies, end)) { |
167 | if (!(dw_readw(dws, sr) & SR_BUSY)) | 168 | if (!(dw_readw(dws, sr) & SR_BUSY)) |
168 | return; | 169 | return; |
169 | } | 170 | } |
170 | dev_err(&dws->master->dev, | 171 | dev_err(&dws->master->dev, |
171 | "DW SPI: Stutus keeps busy for 1000us after a read/write!\n"); | 172 | "DW SPI: Status keeps busy for 1000us after a read/write!\n"); |
172 | } | 173 | } |
173 | 174 | ||
174 | static void flush(struct dw_spi *dws) | 175 | static void flush(struct dw_spi *dws) |
@@ -358,6 +359,8 @@ static void transfer_complete(struct dw_spi *dws) | |||
358 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) | 359 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) |
359 | { | 360 | { |
360 | u16 irq_status, irq_mask = 0x3f; | 361 | u16 irq_status, irq_mask = 0x3f; |
362 | u32 int_level = dws->fifo_len / 2; | ||
363 | u32 left; | ||
361 | 364 | ||
362 | irq_status = dw_readw(dws, isr) & irq_mask; | 365 | irq_status = dw_readw(dws, isr) & irq_mask; |
363 | /* Error handling */ | 366 | /* Error handling */ |
@@ -369,22 +372,23 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws) | |||
369 | return IRQ_HANDLED; | 372 | return IRQ_HANDLED; |
370 | } | 373 | } |
371 | 374 | ||
372 | /* INT comes from tx */ | 375 | if (irq_status & SPI_INT_TXEI) { |
373 | if (dws->tx && (irq_status & SPI_INT_TXEI)) { | 376 | spi_mask_intr(dws, SPI_INT_TXEI); |
374 | while (dws->tx < dws->tx_end) | 377 | |
378 | left = (dws->tx_end - dws->tx) / dws->n_bytes; | ||
379 | left = (left > int_level) ? int_level : left; | ||
380 | |||
381 | while (left--) | ||
375 | dws->write(dws); | 382 | dws->write(dws); |
383 | dws->read(dws); | ||
376 | 384 | ||
377 | if (dws->tx == dws->tx_end) { | 385 | /* Re-enable the IRQ if there is still data left to tx */ |
378 | spi_mask_intr(dws, SPI_INT_TXEI); | 386 | if (dws->tx_end > dws->tx) |
387 | spi_umask_intr(dws, SPI_INT_TXEI); | ||
388 | else | ||
379 | transfer_complete(dws); | 389 | transfer_complete(dws); |
380 | } | ||
381 | } | 390 | } |
382 | 391 | ||
383 | /* INT comes from rx */ | ||
384 | if (dws->rx && (irq_status & SPI_INT_RXFI)) { | ||
385 | if (dws->read(dws)) | ||
386 | transfer_complete(dws); | ||
387 | } | ||
388 | return IRQ_HANDLED; | 392 | return IRQ_HANDLED; |
389 | } | 393 | } |
390 | 394 | ||
@@ -404,12 +408,9 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id) | |||
404 | /* Must be called inside pump_transfers() */ | 408 | /* Must be called inside pump_transfers() */ |
405 | static void poll_transfer(struct dw_spi *dws) | 409 | static void poll_transfer(struct dw_spi *dws) |
406 | { | 410 | { |
407 | if (dws->tx) { | 411 | while (dws->write(dws)) |
408 | while (dws->write(dws)) | 412 | dws->read(dws); |
409 | dws->read(dws); | ||
410 | } | ||
411 | 413 | ||
412 | dws->read(dws); | ||
413 | transfer_complete(dws); | 414 | transfer_complete(dws); |
414 | } | 415 | } |
415 | 416 | ||
@@ -428,6 +429,7 @@ static void pump_transfers(unsigned long data) | |||
428 | u8 bits = 0; | 429 | u8 bits = 0; |
429 | u8 imask = 0; | 430 | u8 imask = 0; |
430 | u8 cs_change = 0; | 431 | u8 cs_change = 0; |
432 | u16 txint_level = 0; | ||
431 | u16 clk_div = 0; | 433 | u16 clk_div = 0; |
432 | u32 speed = 0; | 434 | u32 speed = 0; |
433 | u32 cr0 = 0; | 435 | u32 cr0 = 0; |
@@ -438,6 +440,9 @@ static void pump_transfers(unsigned long data) | |||
438 | chip = dws->cur_chip; | 440 | chip = dws->cur_chip; |
439 | spi = message->spi; | 441 | spi = message->spi; |
440 | 442 | ||
443 | if (unlikely(!chip->clk_div)) | ||
444 | chip->clk_div = dws->max_freq / chip->speed_hz; | ||
445 | |||
441 | if (message->state == ERROR_STATE) { | 446 | if (message->state == ERROR_STATE) { |
442 | message->status = -EIO; | 447 | message->status = -EIO; |
443 | goto early_exit; | 448 | goto early_exit; |
@@ -492,7 +497,7 @@ static void pump_transfers(unsigned long data) | |||
492 | 497 | ||
493 | /* clk_div doesn't support odd number */ | 498 | /* clk_div doesn't support odd number */ |
494 | clk_div = dws->max_freq / speed; | 499 | clk_div = dws->max_freq / speed; |
495 | clk_div = (clk_div >> 1) << 1; | 500 | clk_div = (clk_div + 1) & 0xfffe; |
496 | 501 | ||
497 | chip->speed_hz = speed; | 502 | chip->speed_hz = speed; |
498 | chip->clk_div = clk_div; | 503 | chip->clk_div = clk_div; |
@@ -532,14 +537,35 @@ static void pump_transfers(unsigned long data) | |||
532 | } | 537 | } |
533 | message->state = RUNNING_STATE; | 538 | message->state = RUNNING_STATE; |
534 | 539 | ||
540 | /* | ||
541 | * Adjust transfer mode if necessary. Requires platform dependent | ||
542 | * chipselect mechanism. | ||
543 | */ | ||
544 | if (dws->cs_control) { | ||
545 | if (dws->rx && dws->tx) | ||
546 | chip->tmode = 0x00; | ||
547 | else if (dws->rx) | ||
548 | chip->tmode = 0x02; | ||
549 | else | ||
550 | chip->tmode = 0x01; | ||
551 | |||
552 | cr0 &= ~(0x3 << SPI_MODE_OFFSET); | ||
553 | cr0 |= (chip->tmode << SPI_TMOD_OFFSET); | ||
554 | } | ||
555 | |||
535 | /* Check if current transfer is a DMA transaction */ | 556 | /* Check if current transfer is a DMA transaction */ |
536 | dws->dma_mapped = map_dma_buffers(dws); | 557 | dws->dma_mapped = map_dma_buffers(dws); |
537 | 558 | ||
559 | /* | ||
560 | * Interrupt mode | ||
561 | * we only need set the TXEI IRQ, as TX/RX always happen syncronizely | ||
562 | */ | ||
538 | if (!dws->dma_mapped && !chip->poll_mode) { | 563 | if (!dws->dma_mapped && !chip->poll_mode) { |
539 | if (dws->rx) | 564 | int templen = dws->len / dws->n_bytes; |
540 | imask |= SPI_INT_RXFI; | 565 | txint_level = dws->fifo_len / 2; |
541 | if (dws->tx) | 566 | txint_level = (templen > txint_level) ? txint_level : templen; |
542 | imask |= SPI_INT_TXEI; | 567 | |
568 | imask |= SPI_INT_TXEI; | ||
543 | dws->transfer_handler = interrupt_transfer; | 569 | dws->transfer_handler = interrupt_transfer; |
544 | } | 570 | } |
545 | 571 | ||
@@ -549,21 +575,23 @@ static void pump_transfers(unsigned long data) | |||
549 | * 2. clk_div is changed | 575 | * 2. clk_div is changed |
550 | * 3. control value changes | 576 | * 3. control value changes |
551 | */ | 577 | */ |
552 | if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div) { | 578 | if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) { |
553 | spi_enable_chip(dws, 0); | 579 | spi_enable_chip(dws, 0); |
554 | 580 | ||
555 | if (dw_readw(dws, ctrl0) != cr0) | 581 | if (dw_readw(dws, ctrl0) != cr0) |
556 | dw_writew(dws, ctrl0, cr0); | 582 | dw_writew(dws, ctrl0, cr0); |
557 | 583 | ||
584 | spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); | ||
585 | spi_chip_sel(dws, spi->chip_select); | ||
586 | |||
558 | /* Set the interrupt mask, for poll mode just diable all int */ | 587 | /* Set the interrupt mask, for poll mode just diable all int */ |
559 | spi_mask_intr(dws, 0xff); | 588 | spi_mask_intr(dws, 0xff); |
560 | if (!chip->poll_mode) | 589 | if (imask) |
561 | spi_umask_intr(dws, imask); | 590 | spi_umask_intr(dws, imask); |
591 | if (txint_level) | ||
592 | dw_writew(dws, txfltr, txint_level); | ||
562 | 593 | ||
563 | spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); | ||
564 | spi_chip_sel(dws, spi->chip_select); | ||
565 | spi_enable_chip(dws, 1); | 594 | spi_enable_chip(dws, 1); |
566 | |||
567 | if (cs_change) | 595 | if (cs_change) |
568 | dws->prev_chip = chip; | 596 | dws->prev_chip = chip; |
569 | } | 597 | } |
@@ -712,11 +740,11 @@ static int dw_spi_setup(struct spi_device *spi) | |||
712 | } | 740 | } |
713 | chip->bits_per_word = spi->bits_per_word; | 741 | chip->bits_per_word = spi->bits_per_word; |
714 | 742 | ||
743 | if (!spi->max_speed_hz) { | ||
744 | dev_err(&spi->dev, "No max speed HZ parameter\n"); | ||
745 | return -EINVAL; | ||
746 | } | ||
715 | chip->speed_hz = spi->max_speed_hz; | 747 | chip->speed_hz = spi->max_speed_hz; |
716 | if (chip->speed_hz) | ||
717 | chip->clk_div = 25000000 / chip->speed_hz; | ||
718 | else | ||
719 | chip->clk_div = 8; /* default value */ | ||
720 | 748 | ||
721 | chip->tmode = 0; /* Tx & Rx */ | 749 | chip->tmode = 0; /* Tx & Rx */ |
722 | /* Default SPI mode is SCPOL = 0, SCPH = 0 */ | 750 | /* Default SPI mode is SCPOL = 0, SCPH = 0 */ |
@@ -735,7 +763,7 @@ static void dw_spi_cleanup(struct spi_device *spi) | |||
735 | kfree(chip); | 763 | kfree(chip); |
736 | } | 764 | } |
737 | 765 | ||
738 | static int __init init_queue(struct dw_spi *dws) | 766 | static int __devinit init_queue(struct dw_spi *dws) |
739 | { | 767 | { |
740 | INIT_LIST_HEAD(&dws->queue); | 768 | INIT_LIST_HEAD(&dws->queue); |
741 | spin_lock_init(&dws->lock); | 769 | spin_lock_init(&dws->lock); |
@@ -817,6 +845,22 @@ static void spi_hw_init(struct dw_spi *dws) | |||
817 | spi_mask_intr(dws, 0xff); | 845 | spi_mask_intr(dws, 0xff); |
818 | spi_enable_chip(dws, 1); | 846 | spi_enable_chip(dws, 1); |
819 | flush(dws); | 847 | flush(dws); |
848 | |||
849 | /* | ||
850 | * Try to detect the FIFO depth if not set by interface driver, | ||
851 | * the depth could be from 2 to 256 from HW spec | ||
852 | */ | ||
853 | if (!dws->fifo_len) { | ||
854 | u32 fifo; | ||
855 | for (fifo = 2; fifo <= 257; fifo++) { | ||
856 | dw_writew(dws, txfltr, fifo); | ||
857 | if (fifo != dw_readw(dws, txfltr)) | ||
858 | break; | ||
859 | } | ||
860 | |||
861 | dws->fifo_len = (fifo == 257) ? 0 : fifo; | ||
862 | dw_writew(dws, txfltr, 0); | ||
863 | } | ||
820 | } | 864 | } |
821 | 865 | ||
822 | int __devinit dw_spi_add_host(struct dw_spi *dws) | 866 | int __devinit dw_spi_add_host(struct dw_spi *dws) |
@@ -913,6 +957,7 @@ void __devexit dw_spi_remove_host(struct dw_spi *dws) | |||
913 | /* Disconnect from the SPI framework */ | 957 | /* Disconnect from the SPI framework */ |
914 | spi_unregister_master(dws->master); | 958 | spi_unregister_master(dws->master); |
915 | } | 959 | } |
960 | EXPORT_SYMBOL(dw_spi_remove_host); | ||
916 | 961 | ||
917 | int dw_spi_suspend_host(struct dw_spi *dws) | 962 | int dw_spi_suspend_host(struct dw_spi *dws) |
918 | { | 963 | { |
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c new file mode 100644 index 000000000000..e35b45ac5174 --- /dev/null +++ b/drivers/spi/dw_spi_mmio.c | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core | ||
3 | * | ||
4 | * Copyright (c) 2010, Octasic semiconductor. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/clk.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/spi/dw_spi.h> | ||
15 | #include <linux/spi/spi.h> | ||
16 | |||
17 | #define DRIVER_NAME "dw_spi_mmio" | ||
18 | |||
19 | struct dw_spi_mmio { | ||
20 | struct dw_spi dws; | ||
21 | struct clk *clk; | ||
22 | }; | ||
23 | |||
24 | static int __devinit dw_spi_mmio_probe(struct platform_device *pdev) | ||
25 | { | ||
26 | struct dw_spi_mmio *dwsmmio; | ||
27 | struct dw_spi *dws; | ||
28 | struct resource *mem, *ioarea; | ||
29 | int ret; | ||
30 | |||
31 | dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL); | ||
32 | if (!dwsmmio) { | ||
33 | ret = -ENOMEM; | ||
34 | goto err_end; | ||
35 | } | ||
36 | |||
37 | dws = &dwsmmio->dws; | ||
38 | |||
39 | /* Get basic io resource and map it */ | ||
40 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
41 | if (!mem) { | ||
42 | dev_err(&pdev->dev, "no mem resource?\n"); | ||
43 | ret = -EINVAL; | ||
44 | goto err_kfree; | ||
45 | } | ||
46 | |||
47 | ioarea = request_mem_region(mem->start, resource_size(mem), | ||
48 | pdev->name); | ||
49 | if (!ioarea) { | ||
50 | dev_err(&pdev->dev, "SPI region already claimed\n"); | ||
51 | ret = -EBUSY; | ||
52 | goto err_kfree; | ||
53 | } | ||
54 | |||
55 | dws->regs = ioremap_nocache(mem->start, resource_size(mem)); | ||
56 | if (!dws->regs) { | ||
57 | dev_err(&pdev->dev, "SPI region already mapped\n"); | ||
58 | ret = -ENOMEM; | ||
59 | goto err_release_reg; | ||
60 | } | ||
61 | |||
62 | dws->irq = platform_get_irq(pdev, 0); | ||
63 | if (dws->irq < 0) { | ||
64 | dev_err(&pdev->dev, "no irq resource?\n"); | ||
65 | ret = dws->irq; /* -ENXIO */ | ||
66 | goto err_unmap; | ||
67 | } | ||
68 | |||
69 | dwsmmio->clk = clk_get(&pdev->dev, NULL); | ||
70 | if (!dwsmmio->clk) { | ||
71 | ret = -ENODEV; | ||
72 | goto err_irq; | ||
73 | } | ||
74 | clk_enable(dwsmmio->clk); | ||
75 | |||
76 | dws->parent_dev = &pdev->dev; | ||
77 | dws->bus_num = 0; | ||
78 | dws->num_cs = 4; | ||
79 | dws->max_freq = clk_get_rate(dwsmmio->clk); | ||
80 | |||
81 | ret = dw_spi_add_host(dws); | ||
82 | if (ret) | ||
83 | goto err_clk; | ||
84 | |||
85 | platform_set_drvdata(pdev, dwsmmio); | ||
86 | return 0; | ||
87 | |||
88 | err_clk: | ||
89 | clk_disable(dwsmmio->clk); | ||
90 | clk_put(dwsmmio->clk); | ||
91 | dwsmmio->clk = NULL; | ||
92 | err_irq: | ||
93 | free_irq(dws->irq, dws); | ||
94 | err_unmap: | ||
95 | iounmap(dws->regs); | ||
96 | err_release_reg: | ||
97 | release_mem_region(mem->start, resource_size(mem)); | ||
98 | err_kfree: | ||
99 | kfree(dwsmmio); | ||
100 | err_end: | ||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | static int __devexit dw_spi_mmio_remove(struct platform_device *pdev) | ||
105 | { | ||
106 | struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev); | ||
107 | struct resource *mem; | ||
108 | |||
109 | platform_set_drvdata(pdev, NULL); | ||
110 | |||
111 | clk_disable(dwsmmio->clk); | ||
112 | clk_put(dwsmmio->clk); | ||
113 | dwsmmio->clk = NULL; | ||
114 | |||
115 | free_irq(dwsmmio->dws.irq, &dwsmmio->dws); | ||
116 | dw_spi_remove_host(&dwsmmio->dws); | ||
117 | iounmap(dwsmmio->dws.regs); | ||
118 | kfree(dwsmmio); | ||
119 | |||
120 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
121 | release_mem_region(mem->start, resource_size(mem)); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static struct platform_driver dw_spi_mmio_driver = { | ||
126 | .remove = __devexit_p(dw_spi_mmio_remove), | ||
127 | .driver = { | ||
128 | .name = DRIVER_NAME, | ||
129 | .owner = THIS_MODULE, | ||
130 | }, | ||
131 | }; | ||
132 | |||
133 | static int __init dw_spi_mmio_init(void) | ||
134 | { | ||
135 | return platform_driver_probe(&dw_spi_mmio_driver, dw_spi_mmio_probe); | ||
136 | } | ||
137 | module_init(dw_spi_mmio_init); | ||
138 | |||
139 | static void __exit dw_spi_mmio_exit(void) | ||
140 | { | ||
141 | platform_driver_unregister(&dw_spi_mmio_driver); | ||
142 | } | ||
143 | module_exit(dw_spi_mmio_exit); | ||
144 | |||
145 | MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>"); | ||
146 | MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core"); | ||
147 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c index 34ba69161734..1f0735f9cc76 100644 --- a/drivers/spi/dw_spi_pci.c +++ b/drivers/spi/dw_spi_pci.c | |||
@@ -73,6 +73,7 @@ static int __devinit spi_pci_probe(struct pci_dev *pdev, | |||
73 | dws->num_cs = 4; | 73 | dws->num_cs = 4; |
74 | dws->max_freq = 25000000; /* for Moorestwon */ | 74 | dws->max_freq = 25000000; /* for Moorestwon */ |
75 | dws->irq = pdev->irq; | 75 | dws->irq = pdev->irq; |
76 | dws->fifo_len = 40; /* FIFO has 40 words buffer */ | ||
76 | 77 | ||
77 | ret = dw_spi_add_host(dws); | 78 | ret = dw_spi_add_host(dws); |
78 | if (ret) | 79 | if (ret) |
@@ -98,6 +99,7 @@ static void __devexit spi_pci_remove(struct pci_dev *pdev) | |||
98 | struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); | 99 | struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); |
99 | 100 | ||
100 | pci_set_drvdata(pdev, NULL); | 101 | pci_set_drvdata(pdev, NULL); |
102 | dw_spi_remove_host(&dwpci->dws); | ||
101 | iounmap(dwpci->dws.regs); | 103 | iounmap(dwpci->dws.regs); |
102 | pci_release_region(pdev, 0); | 104 | pci_release_region(pdev, 0); |
103 | kfree(dwpci); | 105 | kfree(dwpci); |
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index f50c81df336a..04747868d6c4 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c | |||
@@ -503,7 +503,7 @@ static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op) | |||
503 | return mpc52xx_psc_spi_do_remove(&op->dev); | 503 | return mpc52xx_psc_spi_do_remove(&op->dev); |
504 | } | 504 | } |
505 | 505 | ||
506 | static struct of_device_id mpc52xx_psc_spi_of_match[] = { | 506 | static const struct of_device_id mpc52xx_psc_spi_of_match[] = { |
507 | { .compatible = "fsl,mpc5200-psc-spi", }, | 507 | { .compatible = "fsl,mpc5200-psc-spi", }, |
508 | { .compatible = "mpc5200-psc-spi", }, /* old */ | 508 | { .compatible = "mpc5200-psc-spi", }, /* old */ |
509 | {} | 509 | {} |
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c index 45bfe6458173..6eab46537a0a 100644 --- a/drivers/spi/mpc52xx_spi.c +++ b/drivers/spi/mpc52xx_spi.c | |||
@@ -550,7 +550,7 @@ static int __devexit mpc52xx_spi_remove(struct of_device *op) | |||
550 | return 0; | 550 | return 0; |
551 | } | 551 | } |
552 | 552 | ||
553 | static struct of_device_id mpc52xx_spi_match[] __devinitdata = { | 553 | static const struct of_device_id mpc52xx_spi_match[] __devinitconst = { |
554 | { .compatible = "fsl,mpc5200-spi", }, | 554 | { .compatible = "fsl,mpc5200-spi", }, |
555 | {} | 555 | {} |
556 | }; | 556 | }; |
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index 1893f1e96dc4..0ddbbe45e834 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c | |||
@@ -469,7 +469,7 @@ static int spi_imx_setup(struct spi_device *spi) | |||
469 | struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); | 469 | struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); |
470 | int gpio = spi_imx->chipselect[spi->chip_select]; | 470 | int gpio = spi_imx->chipselect[spi->chip_select]; |
471 | 471 | ||
472 | pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__, | 472 | dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, |
473 | spi->mode, spi->bits_per_word, spi->max_speed_hz); | 473 | spi->mode, spi->bits_per_word, spi->max_speed_hz); |
474 | 474 | ||
475 | if (gpio >= 0) | 475 | if (gpio >= 0) |
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c index 1fb2a6ea328c..4f0cc9d457e0 100644 --- a/drivers/spi/spi_mpc8xxx.c +++ b/drivers/spi/spi_mpc8xxx.c | |||
@@ -365,7 +365,7 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
365 | 365 | ||
366 | if ((mpc8xxx_spi->spibrg / hz) > 64) { | 366 | if ((mpc8xxx_spi->spibrg / hz) > 64) { |
367 | cs->hw_mode |= SPMODE_DIV16; | 367 | cs->hw_mode |= SPMODE_DIV16; |
368 | pm = mpc8xxx_spi->spibrg / (hz * 64); | 368 | pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; |
369 | 369 | ||
370 | WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " | 370 | WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " |
371 | "Will use %d Hz instead.\n", dev_name(&spi->dev), | 371 | "Will use %d Hz instead.\n", dev_name(&spi->dev), |
@@ -373,7 +373,7 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
373 | if (pm > 16) | 373 | if (pm > 16) |
374 | pm = 16; | 374 | pm = 16; |
375 | } else | 375 | } else |
376 | pm = mpc8xxx_spi->spibrg / (hz * 4); | 376 | pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; |
377 | if (pm) | 377 | if (pm) |
378 | pm--; | 378 | pm--; |
379 | 379 | ||
@@ -1328,7 +1328,7 @@ static struct of_platform_driver of_mpc8xxx_spi_driver = { | |||
1328 | static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) | 1328 | static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) |
1329 | { | 1329 | { |
1330 | struct resource *mem; | 1330 | struct resource *mem; |
1331 | unsigned int irq; | 1331 | int irq; |
1332 | struct spi_master *master; | 1332 | struct spi_master *master; |
1333 | 1333 | ||
1334 | if (!pdev->dev.platform_data) | 1334 | if (!pdev->dev.platform_data) |
@@ -1339,7 +1339,7 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) | |||
1339 | return -EINVAL; | 1339 | return -EINVAL; |
1340 | 1340 | ||
1341 | irq = platform_get_irq(pdev, 0); | 1341 | irq = platform_get_irq(pdev, 0); |
1342 | if (!irq) | 1342 | if (irq <= 0) |
1343 | return -EINVAL; | 1343 | return -EINVAL; |
1344 | 1344 | ||
1345 | master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); | 1345 | master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); |
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c index 140a18d6cf3e..6d8d4026a07a 100644 --- a/drivers/spi/spi_ppc4xx.c +++ b/drivers/spi/spi_ppc4xx.c | |||
@@ -578,7 +578,7 @@ static int __exit spi_ppc4xx_of_remove(struct of_device *op) | |||
578 | return 0; | 578 | return 0; |
579 | } | 579 | } |
580 | 580 | ||
581 | static struct of_device_id spi_ppc4xx_of_match[] = { | 581 | static const struct of_device_id spi_ppc4xx_of_match[] = { |
582 | { .compatible = "ibm,ppc4xx-spi", }, | 582 | { .compatible = "ibm,ppc4xx-spi", }, |
583 | {}, | 583 | {}, |
584 | }; | 584 | }; |
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c index 88a456dba967..97365815a729 100644 --- a/drivers/spi/spi_s3c64xx.c +++ b/drivers/spi/spi_s3c64xx.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/spi/spi.h> | 28 | #include <linux/spi/spi.h> |
29 | 29 | ||
30 | #include <mach/dma.h> | 30 | #include <mach/dma.h> |
31 | #include <plat/spi.h> | 31 | #include <plat/s3c64xx-spi.h> |
32 | 32 | ||
33 | /* Registers and bit-fields */ | 33 | /* Registers and bit-fields */ |
34 | 34 | ||
@@ -137,6 +137,7 @@ | |||
137 | /** | 137 | /** |
138 | * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. | 138 | * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. |
139 | * @clk: Pointer to the spi clock. | 139 | * @clk: Pointer to the spi clock. |
140 | * @src_clk: Pointer to the clock used to generate SPI signals. | ||
140 | * @master: Pointer to the SPI Protocol master. | 141 | * @master: Pointer to the SPI Protocol master. |
141 | * @workqueue: Work queue for the SPI xfer requests. | 142 | * @workqueue: Work queue for the SPI xfer requests. |
142 | * @cntrlr_info: Platform specific data for the controller this driver manages. | 143 | * @cntrlr_info: Platform specific data for the controller this driver manages. |
@@ -157,10 +158,11 @@ | |||
157 | struct s3c64xx_spi_driver_data { | 158 | struct s3c64xx_spi_driver_data { |
158 | void __iomem *regs; | 159 | void __iomem *regs; |
159 | struct clk *clk; | 160 | struct clk *clk; |
161 | struct clk *src_clk; | ||
160 | struct platform_device *pdev; | 162 | struct platform_device *pdev; |
161 | struct spi_master *master; | 163 | struct spi_master *master; |
162 | struct workqueue_struct *workqueue; | 164 | struct workqueue_struct *workqueue; |
163 | struct s3c64xx_spi_cntrlr_info *cntrlr_info; | 165 | struct s3c64xx_spi_info *cntrlr_info; |
164 | struct spi_device *tgl_spi; | 166 | struct spi_device *tgl_spi; |
165 | struct work_struct work; | 167 | struct work_struct work; |
166 | struct list_head queue; | 168 | struct list_head queue; |
@@ -180,7 +182,7 @@ static struct s3c2410_dma_client s3c64xx_spi_dma_client = { | |||
180 | 182 | ||
181 | static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) | 183 | static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) |
182 | { | 184 | { |
183 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 185 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
184 | void __iomem *regs = sdd->regs; | 186 | void __iomem *regs = sdd->regs; |
185 | unsigned long loops; | 187 | unsigned long loops; |
186 | u32 val; | 188 | u32 val; |
@@ -225,7 +227,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
225 | struct spi_device *spi, | 227 | struct spi_device *spi, |
226 | struct spi_transfer *xfer, int dma_mode) | 228 | struct spi_transfer *xfer, int dma_mode) |
227 | { | 229 | { |
228 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 230 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
229 | void __iomem *regs = sdd->regs; | 231 | void __iomem *regs = sdd->regs; |
230 | u32 modecfg, chcfg; | 232 | u32 modecfg, chcfg; |
231 | 233 | ||
@@ -298,19 +300,20 @@ static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, | |||
298 | if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ | 300 | if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ |
299 | /* Deselect the last toggled device */ | 301 | /* Deselect the last toggled device */ |
300 | cs = sdd->tgl_spi->controller_data; | 302 | cs = sdd->tgl_spi->controller_data; |
301 | cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1); | 303 | cs->set_level(cs->line, |
304 | spi->mode & SPI_CS_HIGH ? 0 : 1); | ||
302 | } | 305 | } |
303 | sdd->tgl_spi = NULL; | 306 | sdd->tgl_spi = NULL; |
304 | } | 307 | } |
305 | 308 | ||
306 | cs = spi->controller_data; | 309 | cs = spi->controller_data; |
307 | cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0); | 310 | cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); |
308 | } | 311 | } |
309 | 312 | ||
310 | static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, | 313 | static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, |
311 | struct spi_transfer *xfer, int dma_mode) | 314 | struct spi_transfer *xfer, int dma_mode) |
312 | { | 315 | { |
313 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 316 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
314 | void __iomem *regs = sdd->regs; | 317 | void __iomem *regs = sdd->regs; |
315 | unsigned long val; | 318 | unsigned long val; |
316 | int ms; | 319 | int ms; |
@@ -384,12 +387,11 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, | |||
384 | if (sdd->tgl_spi == spi) | 387 | if (sdd->tgl_spi == spi) |
385 | sdd->tgl_spi = NULL; | 388 | sdd->tgl_spi = NULL; |
386 | 389 | ||
387 | cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1); | 390 | cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); |
388 | } | 391 | } |
389 | 392 | ||
390 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | 393 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) |
391 | { | 394 | { |
392 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | ||
393 | void __iomem *regs = sdd->regs; | 395 | void __iomem *regs = sdd->regs; |
394 | u32 val; | 396 | u32 val; |
395 | 397 | ||
@@ -435,7 +437,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | |||
435 | /* Configure Clock */ | 437 | /* Configure Clock */ |
436 | val = readl(regs + S3C64XX_SPI_CLK_CFG); | 438 | val = readl(regs + S3C64XX_SPI_CLK_CFG); |
437 | val &= ~S3C64XX_SPI_PSR_MASK; | 439 | val &= ~S3C64XX_SPI_PSR_MASK; |
438 | val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1) | 440 | val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) |
439 | & S3C64XX_SPI_PSR_MASK); | 441 | & S3C64XX_SPI_PSR_MASK); |
440 | writel(val, regs + S3C64XX_SPI_CLK_CFG); | 442 | writel(val, regs + S3C64XX_SPI_CLK_CFG); |
441 | 443 | ||
@@ -558,7 +560,7 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, | |||
558 | static void handle_msg(struct s3c64xx_spi_driver_data *sdd, | 560 | static void handle_msg(struct s3c64xx_spi_driver_data *sdd, |
559 | struct spi_message *msg) | 561 | struct spi_message *msg) |
560 | { | 562 | { |
561 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 563 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
562 | struct spi_device *spi = msg->spi; | 564 | struct spi_device *spi = msg->spi; |
563 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; | 565 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; |
564 | struct spi_transfer *xfer; | 566 | struct spi_transfer *xfer; |
@@ -632,8 +634,8 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd, | |||
632 | S3C64XX_SPI_DEACT(sdd); | 634 | S3C64XX_SPI_DEACT(sdd); |
633 | 635 | ||
634 | if (status) { | 636 | if (status) { |
635 | dev_err(&spi->dev, "I/O Error: \ | 637 | dev_err(&spi->dev, "I/O Error: " |
636 | rx-%d tx-%d res:rx-%c tx-%c len-%d\n", | 638 | "rx-%d tx-%d res:rx-%c tx-%c len-%d\n", |
637 | xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, | 639 | xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, |
638 | (sdd->state & RXBUSY) ? 'f' : 'p', | 640 | (sdd->state & RXBUSY) ? 'f' : 'p', |
639 | (sdd->state & TXBUSY) ? 'f' : 'p', | 641 | (sdd->state & TXBUSY) ? 'f' : 'p', |
@@ -786,7 +788,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
786 | { | 788 | { |
787 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; | 789 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; |
788 | struct s3c64xx_spi_driver_data *sdd; | 790 | struct s3c64xx_spi_driver_data *sdd; |
789 | struct s3c64xx_spi_cntrlr_info *sci; | 791 | struct s3c64xx_spi_info *sci; |
790 | struct spi_message *msg; | 792 | struct spi_message *msg; |
791 | u32 psr, speed; | 793 | u32 psr, speed; |
792 | unsigned long flags; | 794 | unsigned long flags; |
@@ -831,17 +833,17 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
831 | } | 833 | } |
832 | 834 | ||
833 | /* Check if we can provide the requested rate */ | 835 | /* Check if we can provide the requested rate */ |
834 | speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */ | 836 | speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */ |
835 | 837 | ||
836 | if (spi->max_speed_hz > speed) | 838 | if (spi->max_speed_hz > speed) |
837 | spi->max_speed_hz = speed; | 839 | spi->max_speed_hz = speed; |
838 | 840 | ||
839 | psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1; | 841 | psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; |
840 | psr &= S3C64XX_SPI_PSR_MASK; | 842 | psr &= S3C64XX_SPI_PSR_MASK; |
841 | if (psr == S3C64XX_SPI_PSR_MASK) | 843 | if (psr == S3C64XX_SPI_PSR_MASK) |
842 | psr--; | 844 | psr--; |
843 | 845 | ||
844 | speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1); | 846 | speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); |
845 | if (spi->max_speed_hz < speed) { | 847 | if (spi->max_speed_hz < speed) { |
846 | if (psr+1 < S3C64XX_SPI_PSR_MASK) { | 848 | if (psr+1 < S3C64XX_SPI_PSR_MASK) { |
847 | psr++; | 849 | psr++; |
@@ -851,7 +853,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
851 | } | 853 | } |
852 | } | 854 | } |
853 | 855 | ||
854 | speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1); | 856 | speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); |
855 | if (spi->max_speed_hz >= speed) | 857 | if (spi->max_speed_hz >= speed) |
856 | spi->max_speed_hz = speed; | 858 | spi->max_speed_hz = speed; |
857 | else | 859 | else |
@@ -867,7 +869,7 @@ setup_exit: | |||
867 | 869 | ||
868 | static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) | 870 | static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) |
869 | { | 871 | { |
870 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 872 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
871 | void __iomem *regs = sdd->regs; | 873 | void __iomem *regs = sdd->regs; |
872 | unsigned int val; | 874 | unsigned int val; |
873 | 875 | ||
@@ -902,7 +904,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
902 | { | 904 | { |
903 | struct resource *mem_res, *dmatx_res, *dmarx_res; | 905 | struct resource *mem_res, *dmatx_res, *dmarx_res; |
904 | struct s3c64xx_spi_driver_data *sdd; | 906 | struct s3c64xx_spi_driver_data *sdd; |
905 | struct s3c64xx_spi_cntrlr_info *sci; | 907 | struct s3c64xx_spi_info *sci; |
906 | struct spi_master *master; | 908 | struct spi_master *master; |
907 | int ret; | 909 | int ret; |
908 | 910 | ||
@@ -1000,18 +1002,15 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
1000 | goto err4; | 1002 | goto err4; |
1001 | } | 1003 | } |
1002 | 1004 | ||
1003 | if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK) | 1005 | sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name); |
1004 | sci->src_clk = sdd->clk; | 1006 | if (IS_ERR(sdd->src_clk)) { |
1005 | else | ||
1006 | sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name); | ||
1007 | if (IS_ERR(sci->src_clk)) { | ||
1008 | dev_err(&pdev->dev, | 1007 | dev_err(&pdev->dev, |
1009 | "Unable to acquire clock '%s'\n", sci->src_clk_name); | 1008 | "Unable to acquire clock '%s'\n", sci->src_clk_name); |
1010 | ret = PTR_ERR(sci->src_clk); | 1009 | ret = PTR_ERR(sdd->src_clk); |
1011 | goto err5; | 1010 | goto err5; |
1012 | } | 1011 | } |
1013 | 1012 | ||
1014 | if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) { | 1013 | if (clk_enable(sdd->src_clk)) { |
1015 | dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", | 1014 | dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", |
1016 | sci->src_clk_name); | 1015 | sci->src_clk_name); |
1017 | ret = -EBUSY; | 1016 | ret = -EBUSY; |
@@ -1040,11 +1039,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
1040 | goto err8; | 1039 | goto err8; |
1041 | } | 1040 | } |
1042 | 1041 | ||
1043 | dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \ | 1042 | dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d " |
1044 | with %d Slaves attached\n", | 1043 | "with %d Slaves attached\n", |
1045 | pdev->id, master->num_chipselect); | 1044 | pdev->id, master->num_chipselect); |
1046 | dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\ | 1045 | dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", |
1047 | \tDMA=[Rx-%d, Tx-%d]\n", | ||
1048 | mem_res->end, mem_res->start, | 1046 | mem_res->end, mem_res->start, |
1049 | sdd->rx_dmach, sdd->tx_dmach); | 1047 | sdd->rx_dmach, sdd->tx_dmach); |
1050 | 1048 | ||
@@ -1053,11 +1051,9 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
1053 | err8: | 1051 | err8: |
1054 | destroy_workqueue(sdd->workqueue); | 1052 | destroy_workqueue(sdd->workqueue); |
1055 | err7: | 1053 | err7: |
1056 | if (sci->src_clk != sdd->clk) | 1054 | clk_disable(sdd->src_clk); |
1057 | clk_disable(sci->src_clk); | ||
1058 | err6: | 1055 | err6: |
1059 | if (sci->src_clk != sdd->clk) | 1056 | clk_put(sdd->src_clk); |
1060 | clk_put(sci->src_clk); | ||
1061 | err5: | 1057 | err5: |
1062 | clk_disable(sdd->clk); | 1058 | clk_disable(sdd->clk); |
1063 | err4: | 1059 | err4: |
@@ -1078,7 +1074,6 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) | |||
1078 | { | 1074 | { |
1079 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | 1075 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); |
1080 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); | 1076 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); |
1081 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | ||
1082 | struct resource *mem_res; | 1077 | struct resource *mem_res; |
1083 | unsigned long flags; | 1078 | unsigned long flags; |
1084 | 1079 | ||
@@ -1093,11 +1088,8 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) | |||
1093 | 1088 | ||
1094 | destroy_workqueue(sdd->workqueue); | 1089 | destroy_workqueue(sdd->workqueue); |
1095 | 1090 | ||
1096 | if (sci->src_clk != sdd->clk) | 1091 | clk_disable(sdd->src_clk); |
1097 | clk_disable(sci->src_clk); | 1092 | clk_put(sdd->src_clk); |
1098 | |||
1099 | if (sci->src_clk != sdd->clk) | ||
1100 | clk_put(sci->src_clk); | ||
1101 | 1093 | ||
1102 | clk_disable(sdd->clk); | 1094 | clk_disable(sdd->clk); |
1103 | clk_put(sdd->clk); | 1095 | clk_put(sdd->clk); |
@@ -1105,7 +1097,8 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) | |||
1105 | iounmap((void *) sdd->regs); | 1097 | iounmap((void *) sdd->regs); |
1106 | 1098 | ||
1107 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1099 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1108 | release_mem_region(mem_res->start, resource_size(mem_res)); | 1100 | if (mem_res != NULL) |
1101 | release_mem_region(mem_res->start, resource_size(mem_res)); | ||
1109 | 1102 | ||
1110 | platform_set_drvdata(pdev, NULL); | 1103 | platform_set_drvdata(pdev, NULL); |
1111 | spi_master_put(master); | 1104 | spi_master_put(master); |
@@ -1118,8 +1111,6 @@ static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state) | |||
1118 | { | 1111 | { |
1119 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | 1112 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); |
1120 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); | 1113 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); |
1121 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | ||
1122 | struct s3c64xx_spi_csinfo *cs; | ||
1123 | unsigned long flags; | 1114 | unsigned long flags; |
1124 | 1115 | ||
1125 | spin_lock_irqsave(&sdd->lock, flags); | 1116 | spin_lock_irqsave(&sdd->lock, flags); |
@@ -1130,9 +1121,7 @@ static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state) | |||
1130 | msleep(10); | 1121 | msleep(10); |
1131 | 1122 | ||
1132 | /* Disable the clock */ | 1123 | /* Disable the clock */ |
1133 | if (sci->src_clk != sdd->clk) | 1124 | clk_disable(sdd->src_clk); |
1134 | clk_disable(sci->src_clk); | ||
1135 | |||
1136 | clk_disable(sdd->clk); | 1125 | clk_disable(sdd->clk); |
1137 | 1126 | ||
1138 | sdd->cur_speed = 0; /* Output Clock is stopped */ | 1127 | sdd->cur_speed = 0; /* Output Clock is stopped */ |
@@ -1144,15 +1133,13 @@ static int s3c64xx_spi_resume(struct platform_device *pdev) | |||
1144 | { | 1133 | { |
1145 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | 1134 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); |
1146 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); | 1135 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); |
1147 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 1136 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
1148 | unsigned long flags; | 1137 | unsigned long flags; |
1149 | 1138 | ||
1150 | sci->cfg_gpio(pdev); | 1139 | sci->cfg_gpio(pdev); |
1151 | 1140 | ||
1152 | /* Enable the clock */ | 1141 | /* Enable the clock */ |
1153 | if (sci->src_clk != sdd->clk) | 1142 | clk_enable(sdd->src_clk); |
1154 | clk_enable(sci->src_clk); | ||
1155 | |||
1156 | clk_enable(sdd->clk); | 1143 | clk_enable(sdd->clk); |
1157 | 1144 | ||
1158 | s3c64xx_spi_hwinit(sdd, pdev->id); | 1145 | s3c64xx_spi_hwinit(sdd, pdev->id); |
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c index 30973ec16a93..d93b66743ba7 100644 --- a/drivers/spi/spi_sh_msiof.c +++ b/drivers/spi/spi_sh_msiof.c | |||
@@ -20,12 +20,12 @@ | |||
20 | #include <linux/bitmap.h> | 20 | #include <linux/bitmap.h> |
21 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/err.h> | ||
23 | 24 | ||
24 | #include <linux/spi/spi.h> | 25 | #include <linux/spi/spi.h> |
25 | #include <linux/spi/spi_bitbang.h> | 26 | #include <linux/spi/spi_bitbang.h> |
26 | #include <linux/spi/sh_msiof.h> | 27 | #include <linux/spi/sh_msiof.h> |
27 | 28 | ||
28 | #include <asm/spi.h> | ||
29 | #include <asm/unaligned.h> | 29 | #include <asm/unaligned.h> |
30 | 30 | ||
31 | struct sh_msiof_spi_priv { | 31 | struct sh_msiof_spi_priv { |
diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi_stmp.c index 2552bb364005..fadff76eb7e0 100644 --- a/drivers/spi/spi_stmp.c +++ b/drivers/spi/spi_stmp.c | |||
@@ -76,7 +76,7 @@ struct stmp_spi { | |||
76 | break; \ | 76 | break; \ |
77 | } \ | 77 | } \ |
78 | cpu_relax(); \ | 78 | cpu_relax(); \ |
79 | } while (time_before(end_jiffies, jiffies)); \ | 79 | } while (time_before(jiffies, end_jiffies)); \ |
80 | succeeded; \ | 80 | succeeded; \ |
81 | }) | 81 | }) |
82 | 82 | ||
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 9f386379c169..1b47363cb73f 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c | |||
@@ -93,6 +93,26 @@ struct xilinx_spi { | |||
93 | void (*rx_fn) (struct xilinx_spi *); | 93 | void (*rx_fn) (struct xilinx_spi *); |
94 | }; | 94 | }; |
95 | 95 | ||
96 | static void xspi_write32(u32 val, void __iomem *addr) | ||
97 | { | ||
98 | iowrite32(val, addr); | ||
99 | } | ||
100 | |||
101 | static unsigned int xspi_read32(void __iomem *addr) | ||
102 | { | ||
103 | return ioread32(addr); | ||
104 | } | ||
105 | |||
106 | static void xspi_write32_be(u32 val, void __iomem *addr) | ||
107 | { | ||
108 | iowrite32be(val, addr); | ||
109 | } | ||
110 | |||
111 | static unsigned int xspi_read32_be(void __iomem *addr) | ||
112 | { | ||
113 | return ioread32be(addr); | ||
114 | } | ||
115 | |||
96 | static void xspi_tx8(struct xilinx_spi *xspi) | 116 | static void xspi_tx8(struct xilinx_spi *xspi) |
97 | { | 117 | { |
98 | xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); | 118 | xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); |
@@ -374,11 +394,11 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, | |||
374 | xspi->mem = *mem; | 394 | xspi->mem = *mem; |
375 | xspi->irq = irq; | 395 | xspi->irq = irq; |
376 | if (pdata->little_endian) { | 396 | if (pdata->little_endian) { |
377 | xspi->read_fn = ioread32; | 397 | xspi->read_fn = xspi_read32; |
378 | xspi->write_fn = iowrite32; | 398 | xspi->write_fn = xspi_write32; |
379 | } else { | 399 | } else { |
380 | xspi->read_fn = ioread32be; | 400 | xspi->read_fn = xspi_read32_be; |
381 | xspi->write_fn = iowrite32be; | 401 | xspi->write_fn = xspi_write32_be; |
382 | } | 402 | } |
383 | xspi->bits_per_word = pdata->bits_per_word; | 403 | xspi->bits_per_word = pdata->bits_per_word; |
384 | if (xspi->bits_per_word == 8) { | 404 | if (xspi->bits_per_word == 8) { |
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c index 71dc3adc0495..ed34a8d419c7 100644 --- a/drivers/spi/xilinx_spi_of.c +++ b/drivers/spi/xilinx_spi_of.c | |||
@@ -99,7 +99,7 @@ static int __exit xilinx_spi_of_remove(struct of_device *op) | |||
99 | return xilinx_spi_remove(op); | 99 | return xilinx_spi_remove(op); |
100 | } | 100 | } |
101 | 101 | ||
102 | static struct of_device_id xilinx_spi_of_match[] = { | 102 | static const struct of_device_id xilinx_spi_of_match[] = { |
103 | { .compatible = "xlnx,xps-spi-2.00.a", }, | 103 | { .compatible = "xlnx,xps-spi-2.00.a", }, |
104 | { .compatible = "xlnx,xps-spi-2.00.b", }, | 104 | { .compatible = "xlnx,xps-spi-2.00.b", }, |
105 | {} | 105 | {} |
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 5681ebed9c65..03dfd27c4bfb 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c | |||
@@ -494,8 +494,7 @@ static int ssb_devices_register(struct ssb_bus *bus) | |||
494 | #endif | 494 | #endif |
495 | break; | 495 | break; |
496 | case SSB_BUSTYPE_SDIO: | 496 | case SSB_BUSTYPE_SDIO: |
497 | #ifdef CONFIG_SSB_SDIO | 497 | #ifdef CONFIG_SSB_SDIOHOST |
498 | sdev->irq = bus->host_sdio->dev.irq; | ||
499 | dev->parent = &bus->host_sdio->dev; | 498 | dev->parent = &bus->host_sdio->dev; |
500 | #endif | 499 | #endif |
501 | break; | 500 | break; |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 6e8bcdfd23b4..a678186f218f 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg) | |||
1312 | void __user *addr = as->userurb; | 1312 | void __user *addr = as->userurb; |
1313 | unsigned int i; | 1313 | unsigned int i; |
1314 | 1314 | ||
1315 | if (as->userbuffer) | 1315 | if (as->userbuffer && urb->actual_length) |
1316 | if (copy_to_user(as->userbuffer, urb->transfer_buffer, | 1316 | if (copy_to_user(as->userbuffer, urb->transfer_buffer, |
1317 | urb->transfer_buffer_length)) | 1317 | urb->actual_length)) |
1318 | goto err_out; | 1318 | goto err_out; |
1319 | if (put_user(as->status, &userurb->status)) | 1319 | if (put_user(as->status, &userurb->status)) |
1320 | goto err_out; | 1320 | goto err_out; |
@@ -1334,14 +1334,11 @@ static int processcompl(struct async *as, void __user * __user *arg) | |||
1334 | } | 1334 | } |
1335 | } | 1335 | } |
1336 | 1336 | ||
1337 | free_async(as); | ||
1338 | |||
1339 | if (put_user(addr, (void __user * __user *)arg)) | 1337 | if (put_user(addr, (void __user * __user *)arg)) |
1340 | return -EFAULT; | 1338 | return -EFAULT; |
1341 | return 0; | 1339 | return 0; |
1342 | 1340 | ||
1343 | err_out: | 1341 | err_out: |
1344 | free_async(as); | ||
1345 | return -EFAULT; | 1342 | return -EFAULT; |
1346 | } | 1343 | } |
1347 | 1344 | ||
@@ -1371,8 +1368,11 @@ static struct async *reap_as(struct dev_state *ps) | |||
1371 | static int proc_reapurb(struct dev_state *ps, void __user *arg) | 1368 | static int proc_reapurb(struct dev_state *ps, void __user *arg) |
1372 | { | 1369 | { |
1373 | struct async *as = reap_as(ps); | 1370 | struct async *as = reap_as(ps); |
1374 | if (as) | 1371 | if (as) { |
1375 | return processcompl(as, (void __user * __user *)arg); | 1372 | int retval = processcompl(as, (void __user * __user *)arg); |
1373 | free_async(as); | ||
1374 | return retval; | ||
1375 | } | ||
1376 | if (signal_pending(current)) | 1376 | if (signal_pending(current)) |
1377 | return -EINTR; | 1377 | return -EINTR; |
1378 | return -EIO; | 1378 | return -EIO; |
@@ -1380,11 +1380,16 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg) | |||
1380 | 1380 | ||
1381 | static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) | 1381 | static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) |
1382 | { | 1382 | { |
1383 | int retval; | ||
1383 | struct async *as; | 1384 | struct async *as; |
1384 | 1385 | ||
1385 | if (!(as = async_getcompleted(ps))) | 1386 | as = async_getcompleted(ps); |
1386 | return -EAGAIN; | 1387 | retval = -EAGAIN; |
1387 | return processcompl(as, (void __user * __user *)arg); | 1388 | if (as) { |
1389 | retval = processcompl(as, (void __user * __user *)arg); | ||
1390 | free_async(as); | ||
1391 | } | ||
1392 | return retval; | ||
1388 | } | 1393 | } |
1389 | 1394 | ||
1390 | #ifdef CONFIG_COMPAT | 1395 | #ifdef CONFIG_COMPAT |
@@ -1475,9 +1480,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) | |||
1475 | void __user *addr = as->userurb; | 1480 | void __user *addr = as->userurb; |
1476 | unsigned int i; | 1481 | unsigned int i; |
1477 | 1482 | ||
1478 | if (as->userbuffer) | 1483 | if (as->userbuffer && urb->actual_length) |
1479 | if (copy_to_user(as->userbuffer, urb->transfer_buffer, | 1484 | if (copy_to_user(as->userbuffer, urb->transfer_buffer, |
1480 | urb->transfer_buffer_length)) | 1485 | urb->actual_length)) |
1481 | return -EFAULT; | 1486 | return -EFAULT; |
1482 | if (put_user(as->status, &userurb->status)) | 1487 | if (put_user(as->status, &userurb->status)) |
1483 | return -EFAULT; | 1488 | return -EFAULT; |
@@ -1497,7 +1502,6 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) | |||
1497 | } | 1502 | } |
1498 | } | 1503 | } |
1499 | 1504 | ||
1500 | free_async(as); | ||
1501 | if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) | 1505 | if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) |
1502 | return -EFAULT; | 1506 | return -EFAULT; |
1503 | return 0; | 1507 | return 0; |
@@ -1506,8 +1510,11 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) | |||
1506 | static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) | 1510 | static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) |
1507 | { | 1511 | { |
1508 | struct async *as = reap_as(ps); | 1512 | struct async *as = reap_as(ps); |
1509 | if (as) | 1513 | if (as) { |
1510 | return processcompl_compat(as, (void __user * __user *)arg); | 1514 | int retval = processcompl_compat(as, (void __user * __user *)arg); |
1515 | free_async(as); | ||
1516 | return retval; | ||
1517 | } | ||
1511 | if (signal_pending(current)) | 1518 | if (signal_pending(current)) |
1512 | return -EINTR; | 1519 | return -EINTR; |
1513 | return -EIO; | 1520 | return -EIO; |
@@ -1515,11 +1522,16 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) | |||
1515 | 1522 | ||
1516 | static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) | 1523 | static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) |
1517 | { | 1524 | { |
1525 | int retval; | ||
1518 | struct async *as; | 1526 | struct async *as; |
1519 | 1527 | ||
1520 | if (!(as = async_getcompleted(ps))) | 1528 | retval = -EAGAIN; |
1521 | return -EAGAIN; | 1529 | as = async_getcompleted(ps); |
1522 | return processcompl_compat(as, (void __user * __user *)arg); | 1530 | if (as) { |
1531 | retval = processcompl_compat(as, (void __user * __user *)arg); | ||
1532 | free_async(as); | ||
1533 | } | ||
1534 | return retval; | ||
1523 | } | 1535 | } |
1524 | 1536 | ||
1525 | 1537 | ||
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c index 0a577d5694fd..d4f0db58a8ad 100644 --- a/drivers/usb/gadget/f_eem.c +++ b/drivers/usb/gadget/f_eem.c | |||
@@ -358,7 +358,7 @@ done: | |||
358 | * b15: bmType (0 == data) | 358 | * b15: bmType (0 == data) |
359 | */ | 359 | */ |
360 | len = skb->len; | 360 | len = skb->len; |
361 | put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2)); | 361 | put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2)); |
362 | 362 | ||
363 | /* add a zero-length EEM packet, if needed */ | 363 | /* add a zero-length EEM packet, if needed */ |
364 | if (padlen) | 364 | if (padlen) |
@@ -464,7 +464,6 @@ static int eem_unwrap(struct gether *port, | |||
464 | } | 464 | } |
465 | 465 | ||
466 | /* validate CRC */ | 466 | /* validate CRC */ |
467 | crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN); | ||
468 | if (header & BIT(14)) { | 467 | if (header & BIT(14)) { |
469 | crc = get_unaligned_le32(skb->data + len | 468 | crc = get_unaligned_le32(skb->data + len |
470 | - ETH_FCS_LEN); | 469 | - ETH_FCS_LEN); |
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c index 429560100b10..76496f5d272c 100644 --- a/drivers/usb/gadget/multi.c +++ b/drivers/usb/gadget/multi.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #if defined USB_ETH_RNDIS | 29 | #if defined USB_ETH_RNDIS |
30 | # undef USB_ETH_RNDIS | 30 | # undef USB_ETH_RNDIS |
31 | #endif | 31 | #endif |
32 | #ifdef CONFIG_USB_ETH_RNDIS | 32 | #ifdef CONFIG_USB_G_MULTI_RNDIS |
33 | # define USB_ETH_RNDIS y | 33 | # define USB_ETH_RNDIS y |
34 | #endif | 34 | #endif |
35 | 35 | ||
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index e220fb8091a3..8b45145b9136 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/io.h> | 26 | #include <linux/io.h> |
27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | #include <linux/clk.h> | 28 | #include <linux/clk.h> |
29 | #include <linux/err.h> | ||
29 | 30 | ||
30 | #include <linux/usb/ch9.h> | 31 | #include <linux/usb/ch9.h> |
31 | #include <linux/usb/gadget.h> | 32 | #include <linux/usb/gadget.h> |
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index 4b5dbd0127f5..5fc80a104150 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c | |||
@@ -2582,6 +2582,7 @@ err: | |||
2582 | hsotg->gadget.dev.driver = NULL; | 2582 | hsotg->gadget.dev.driver = NULL; |
2583 | return ret; | 2583 | return ret; |
2584 | } | 2584 | } |
2585 | EXPORT_SYMBOL(usb_gadget_register_driver); | ||
2585 | 2586 | ||
2586 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | 2587 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) |
2587 | { | 2588 | { |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index c75d9270c752..19372673bf09 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -196,7 +196,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) | |||
196 | if (hostpc_reg) { | 196 | if (hostpc_reg) { |
197 | u32 t3; | 197 | u32 t3; |
198 | 198 | ||
199 | spin_unlock_irq(&ehci->lock); | ||
199 | msleep(5);/* 5ms for HCD enter low pwr mode */ | 200 | msleep(5);/* 5ms for HCD enter low pwr mode */ |
201 | spin_lock_irq(&ehci->lock); | ||
200 | t3 = ehci_readl(ehci, hostpc_reg); | 202 | t3 = ehci_readl(ehci, hostpc_reg); |
201 | ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); | 203 | ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); |
202 | t3 = ehci_readl(ehci, hostpc_reg); | 204 | t3 = ehci_readl(ehci, hostpc_reg); |
@@ -904,17 +906,18 @@ static int ehci_hub_control ( | |||
904 | if ((temp & PORT_PE) == 0 | 906 | if ((temp & PORT_PE) == 0 |
905 | || (temp & PORT_RESET) != 0) | 907 | || (temp & PORT_RESET) != 0) |
906 | goto error; | 908 | goto error; |
907 | ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); | 909 | |
908 | /* After above check the port must be connected. | 910 | /* After above check the port must be connected. |
909 | * Set appropriate bit thus could put phy into low power | 911 | * Set appropriate bit thus could put phy into low power |
910 | * mode if we have hostpc feature | 912 | * mode if we have hostpc feature |
911 | */ | 913 | */ |
914 | temp &= ~PORT_WKCONN_E; | ||
915 | temp |= PORT_WKDISC_E | PORT_WKOC_E; | ||
916 | ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); | ||
912 | if (hostpc_reg) { | 917 | if (hostpc_reg) { |
913 | temp &= ~PORT_WKCONN_E; | 918 | spin_unlock_irqrestore(&ehci->lock, flags); |
914 | temp |= (PORT_WKDISC_E | PORT_WKOC_E); | ||
915 | ehci_writel(ehci, temp | PORT_SUSPEND, | ||
916 | status_reg); | ||
917 | msleep(5);/* 5ms for HCD enter low pwr mode */ | 919 | msleep(5);/* 5ms for HCD enter low pwr mode */ |
920 | spin_lock_irqsave(&ehci->lock, flags); | ||
918 | temp1 = ehci_readl(ehci, hostpc_reg); | 921 | temp1 = ehci_readl(ehci, hostpc_reg); |
919 | ehci_writel(ehci, temp1 | HOSTPC_PHCD, | 922 | ehci_writel(ehci, temp1 | HOSTPC_PHCD, |
920 | hostpc_reg); | 923 | hostpc_reg); |
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c index d224ab467a40..e1232890c78b 100644 --- a/drivers/usb/host/fhci-tds.c +++ b/drivers/usb/host/fhci-tds.c | |||
@@ -105,7 +105,7 @@ void fhci_ep0_free(struct fhci_usb *usb) | |||
105 | if (ep->td_base) | 105 | if (ep->td_base) |
106 | cpm_muram_free(cpm_muram_offset(ep->td_base)); | 106 | cpm_muram_free(cpm_muram_offset(ep->td_base)); |
107 | 107 | ||
108 | if (ep->conf_frame_Q) { | 108 | if (kfifo_initialized(&ep->conf_frame_Q)) { |
109 | size = cq_howmany(&ep->conf_frame_Q); | 109 | size = cq_howmany(&ep->conf_frame_Q); |
110 | for (; size; size--) { | 110 | for (; size; size--) { |
111 | struct packet *pkt = cq_get(&ep->conf_frame_Q); | 111 | struct packet *pkt = cq_get(&ep->conf_frame_Q); |
@@ -115,7 +115,7 @@ void fhci_ep0_free(struct fhci_usb *usb) | |||
115 | cq_delete(&ep->conf_frame_Q); | 115 | cq_delete(&ep->conf_frame_Q); |
116 | } | 116 | } |
117 | 117 | ||
118 | if (ep->empty_frame_Q) { | 118 | if (kfifo_initialized(&ep->empty_frame_Q)) { |
119 | size = cq_howmany(&ep->empty_frame_Q); | 119 | size = cq_howmany(&ep->empty_frame_Q); |
120 | for (; size; size--) { | 120 | for (; size; size--) { |
121 | struct packet *pkt = cq_get(&ep->empty_frame_Q); | 121 | struct packet *pkt = cq_get(&ep->empty_frame_Q); |
@@ -125,7 +125,7 @@ void fhci_ep0_free(struct fhci_usb *usb) | |||
125 | cq_delete(&ep->empty_frame_Q); | 125 | cq_delete(&ep->empty_frame_Q); |
126 | } | 126 | } |
127 | 127 | ||
128 | if (ep->dummy_packets_Q) { | 128 | if (kfifo_initialized(&ep->dummy_packets_Q)) { |
129 | size = cq_howmany(&ep->dummy_packets_Q); | 129 | size = cq_howmany(&ep->dummy_packets_Q); |
130 | for (; size; size--) { | 130 | for (; size; size--) { |
131 | u8 *buff = cq_get(&ep->dummy_packets_Q); | 131 | u8 *buff = cq_get(&ep->dummy_packets_Q); |
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 0025847743f3..8b37a4b9839e 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c | |||
@@ -3245,6 +3245,7 @@ static struct usb_device_id sisusb_table [] = { | |||
3245 | { USB_DEVICE(0x0711, 0x0902) }, | 3245 | { USB_DEVICE(0x0711, 0x0902) }, |
3246 | { USB_DEVICE(0x0711, 0x0903) }, | 3246 | { USB_DEVICE(0x0711, 0x0903) }, |
3247 | { USB_DEVICE(0x0711, 0x0918) }, | 3247 | { USB_DEVICE(0x0711, 0x0918) }, |
3248 | { USB_DEVICE(0x0711, 0x0920) }, | ||
3248 | { USB_DEVICE(0x182d, 0x021c) }, | 3249 | { USB_DEVICE(0x182d, 0x021c) }, |
3249 | { USB_DEVICE(0x182d, 0x0269) }, | 3250 | { USB_DEVICE(0x182d, 0x0269) }, |
3250 | { } | 3251 | { } |
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig index de56b3d743d7..3d2d3e549bd1 100644 --- a/drivers/usb/otg/Kconfig +++ b/drivers/usb/otg/Kconfig | |||
@@ -44,6 +44,7 @@ config ISP1301_OMAP | |||
44 | config USB_ULPI | 44 | config USB_ULPI |
45 | bool "Generic ULPI Transceiver Driver" | 45 | bool "Generic ULPI Transceiver Driver" |
46 | depends on ARM | 46 | depends on ARM |
47 | select USB_OTG_UTILS | ||
47 | help | 48 | help |
48 | Enable this to support ULPI connected USB OTG transceivers which | 49 | Enable this to support ULPI connected USB OTG transceivers which |
49 | are likely found on embedded boards. | 50 | are likely found on embedded boards. |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 216f187582ab..7638828e7317 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -50,7 +50,7 @@ | |||
50 | * Version Information | 50 | * Version Information |
51 | */ | 51 | */ |
52 | #define DRIVER_VERSION "v1.5.0" | 52 | #define DRIVER_VERSION "v1.5.0" |
53 | #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>" | 53 | #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr" |
54 | #define DRIVER_DESC "USB FTDI Serial Converters Driver" | 54 | #define DRIVER_DESC "USB FTDI Serial Converters Driver" |
55 | 55 | ||
56 | static int debug; | 56 | static int debug; |
@@ -145,10 +145,15 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { | |||
145 | 145 | ||
146 | 146 | ||
147 | 147 | ||
148 | /* | ||
149 | * Device ID not listed? Test via module params product/vendor or | ||
150 | * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! | ||
151 | */ | ||
148 | static struct usb_device_id id_table_combined [] = { | 152 | static struct usb_device_id id_table_combined [] = { |
149 | { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, | 153 | { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, |
150 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, | 154 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, |
151 | { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, | 155 | { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, |
156 | { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, | ||
152 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, | 157 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, |
153 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, | 158 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, |
154 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, | 159 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, |
@@ -552,9 +557,16 @@ static struct usb_device_id id_table_combined [] = { | |||
552 | { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, | 557 | { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, |
553 | { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, | 558 | { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, |
554 | /* | 559 | /* |
555 | * Due to many user requests for multiple ELV devices we enable | 560 | * ELV devices: |
556 | * them by default. | ||
557 | */ | 561 | */ |
562 | { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, | ||
563 | { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, | ||
564 | { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, | ||
565 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) }, | ||
566 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) }, | ||
567 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) }, | ||
568 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) }, | ||
569 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) }, | ||
558 | { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, | 570 | { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, |
559 | { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, | 571 | { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, |
560 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, | 572 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, |
@@ -571,11 +583,17 @@ static struct usb_device_id id_table_combined [] = { | |||
571 | { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, | 583 | { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, |
572 | { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, | 584 | { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, |
573 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, | 585 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, |
586 | { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) }, | ||
574 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, | 587 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, |
588 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) }, | ||
575 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, | 589 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, |
576 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, | 590 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, |
577 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, | 591 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, |
578 | { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, | 592 | { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, |
593 | { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) }, | ||
594 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, | ||
595 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, | ||
596 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, | ||
579 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, | 597 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, |
580 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, | 598 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, |
581 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, | 599 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, |
@@ -697,6 +715,7 @@ static struct usb_device_id id_table_combined [] = { | |||
697 | { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, | 715 | { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, |
698 | { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, | 716 | { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, |
699 | { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, | 717 | { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, |
718 | { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) }, | ||
700 | { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, | 719 | { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, |
701 | { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, | 720 | { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, |
702 | { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, | 721 | { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index da92b4952ffb..c8951aeed983 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -38,6 +38,8 @@ | |||
38 | /* www.candapter.com Ewert Energy Systems CANdapter device */ | 38 | /* www.candapter.com Ewert Energy Systems CANdapter device */ |
39 | #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ | 39 | #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ |
40 | 40 | ||
41 | #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ | ||
42 | |||
41 | /* OOCDlink by Joern Kaipf <joernk@web.de> | 43 | /* OOCDlink by Joern Kaipf <joernk@web.de> |
42 | * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ | 44 | * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ |
43 | #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ | 45 | #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ |
@@ -161,22 +163,37 @@ | |||
161 | /* | 163 | /* |
162 | * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). | 164 | * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). |
163 | * All of these devices use FTDI's vendor ID (0x0403). | 165 | * All of these devices use FTDI's vendor ID (0x0403). |
166 | * Further IDs taken from ELV Windows .inf file. | ||
164 | * | 167 | * |
165 | * The previously included PID for the UO 100 module was incorrect. | 168 | * The previously included PID for the UO 100 module was incorrect. |
166 | * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58). | 169 | * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58). |
167 | * | 170 | * |
168 | * Armin Laeuger originally sent the PID for the UM 100 module. | 171 | * Armin Laeuger originally sent the PID for the UM 100 module. |
169 | */ | 172 | */ |
173 | #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ | ||
174 | #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ | ||
175 | #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */ | ||
176 | #define FTDI_ELV_WS550_PID 0xE004 /* WS 550 */ | ||
177 | #define FTDI_ELV_EC3000_PID 0xE006 /* ENERGY CONTROL 3000 USB */ | ||
178 | #define FTDI_ELV_WS888_PID 0xE008 /* WS 888 */ | ||
179 | #define FTDI_ELV_TWS550_PID 0xE009 /* Technoline WS 550 */ | ||
180 | #define FTDI_ELV_FEM_PID 0xE00A /* Funk Energie Monitor */ | ||
170 | #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ | 181 | #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ |
171 | #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ | 182 | #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ |
172 | #define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ | 183 | #define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ |
184 | #define FTDI_ELV_UMS100_PID 0xE0EB /* ELV USB Master-Slave Schaltsteckdose UMS 100 */ | ||
185 | #define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */ | ||
186 | #define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */ | ||
187 | #define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */ | ||
173 | #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ | 188 | #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ |
174 | #define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ | 189 | #define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ |
175 | #define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ | 190 | #define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ |
176 | #define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ | 191 | #define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ |
177 | #define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */ | 192 | #define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */ |
178 | #define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */ | 193 | #define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */ |
194 | #define FTDI_ELV_UTP8_PID 0xE0F5 /* ELV UTP 8 */ | ||
179 | #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ | 195 | #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ |
196 | #define FTDI_ELV_WS444PC_PID 0xE0F7 /* Conrad WS 444 PC */ | ||
180 | #define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ | 197 | #define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ |
181 | #define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */ | 198 | #define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */ |
182 | #define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */ | 199 | #define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */ |
@@ -968,6 +985,7 @@ | |||
968 | #define PAPOUCH_VID 0x5050 /* Vendor ID */ | 985 | #define PAPOUCH_VID 0x5050 /* Vendor ID */ |
969 | #define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ | 986 | #define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ |
970 | #define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ | 987 | #define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ |
988 | #define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */ | ||
971 | 989 | ||
972 | /* | 990 | /* |
973 | * Marvell SheevaPlug | 991 | * Marvell SheevaPlug |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index ac1b6449fb6a..3eb6143bb646 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -298,6 +298,7 @@ static struct usb_device_id id_table [] = { | |||
298 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ | 298 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ |
299 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | 299 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist |
300 | }, | 300 | }, |
301 | { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ | ||
301 | 302 | ||
302 | { } | 303 | { } |
303 | }; | 304 | }; |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index c932f9053188..49575fba3756 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -941,7 +941,7 @@ UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0000, 0x9999, | |||
941 | UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133, | 941 | UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133, |
942 | "Microtech", | 942 | "Microtech", |
943 | "USB-SCSI-DB25", | 943 | "USB-SCSI-DB25", |
944 | US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init, | 944 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init, |
945 | US_FL_SCM_MULT_TARG ), | 945 | US_FL_SCM_MULT_TARG ), |
946 | 946 | ||
947 | UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100, | 947 | UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100, |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index e9f995486ec1..bbeeb92a2131 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
@@ -78,7 +78,7 @@ MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); | |||
78 | MODULE_DESCRIPTION("USB Mass Storage driver for Linux"); | 78 | MODULE_DESCRIPTION("USB Mass Storage driver for Linux"); |
79 | MODULE_LICENSE("GPL"); | 79 | MODULE_LICENSE("GPL"); |
80 | 80 | ||
81 | static unsigned int delay_use = 5; | 81 | static unsigned int delay_use = 1; |
82 | module_param(delay_use, uint, S_IRUGO | S_IWUSR); | 82 | module_param(delay_use, uint, S_IRUGO | S_IWUSR); |
83 | MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device"); | 83 | MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device"); |
84 | 84 | ||
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index e4e4d433b007..9ee67d6da710 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c | |||
@@ -1931,22 +1931,22 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i | |||
1931 | * PowerMac2,2 summer 2000 iMacs | 1931 | * PowerMac2,2 summer 2000 iMacs |
1932 | * PowerMac4,1 january 2001 iMacs "flower power" | 1932 | * PowerMac4,1 january 2001 iMacs "flower power" |
1933 | */ | 1933 | */ |
1934 | if (machine_is_compatible("PowerMac2,1") || | 1934 | if (of_machine_is_compatible("PowerMac2,1") || |
1935 | machine_is_compatible("PowerMac2,2") || | 1935 | of_machine_is_compatible("PowerMac2,2") || |
1936 | machine_is_compatible("PowerMac4,1")) | 1936 | of_machine_is_compatible("PowerMac4,1")) |
1937 | default_vmode = VMODE_1024_768_75; | 1937 | default_vmode = VMODE_1024_768_75; |
1938 | 1938 | ||
1939 | /* iBook SE */ | 1939 | /* iBook SE */ |
1940 | if (machine_is_compatible("PowerBook2,2")) | 1940 | if (of_machine_is_compatible("PowerBook2,2")) |
1941 | default_vmode = VMODE_800_600_60; | 1941 | default_vmode = VMODE_800_600_60; |
1942 | 1942 | ||
1943 | /* PowerBook Firewire (Pismo), iBook Dual USB */ | 1943 | /* PowerBook Firewire (Pismo), iBook Dual USB */ |
1944 | if (machine_is_compatible("PowerBook3,1") || | 1944 | if (of_machine_is_compatible("PowerBook3,1") || |
1945 | machine_is_compatible("PowerBook4,1")) | 1945 | of_machine_is_compatible("PowerBook4,1")) |
1946 | default_vmode = VMODE_1024_768_60; | 1946 | default_vmode = VMODE_1024_768_60; |
1947 | 1947 | ||
1948 | /* PowerBook Titanium */ | 1948 | /* PowerBook Titanium */ |
1949 | if (machine_is_compatible("PowerBook3,2")) | 1949 | if (of_machine_is_compatible("PowerBook3,2")) |
1950 | default_vmode = VMODE_1152_768_60; | 1950 | default_vmode = VMODE_1152_768_60; |
1951 | 1951 | ||
1952 | if (default_cmode > 16) | 1952 | if (default_cmode > 16) |
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index 1ddeb4c34763..e45ab8db2ddc 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c | |||
@@ -2439,7 +2439,7 @@ static int __devinit aty_init(struct fb_info *info) | |||
2439 | * The Apple iBook1 uses non-standard memory frequencies. | 2439 | * The Apple iBook1 uses non-standard memory frequencies. |
2440 | * We detect it and set the frequency manually. | 2440 | * We detect it and set the frequency manually. |
2441 | */ | 2441 | */ |
2442 | if (machine_is_compatible("PowerBook2,1")) { | 2442 | if (of_machine_is_compatible("PowerBook2,1")) { |
2443 | par->pll_limits.mclk = 70; | 2443 | par->pll_limits.mclk = 70; |
2444 | par->pll_limits.xclk = 53; | 2444 | par->pll_limits.xclk = 53; |
2445 | } | 2445 | } |
@@ -2659,7 +2659,7 @@ static int __devinit aty_init(struct fb_info *info) | |||
2659 | FBINFO_HWACCEL_YPAN; | 2659 | FBINFO_HWACCEL_YPAN; |
2660 | 2660 | ||
2661 | #ifdef CONFIG_PMAC_BACKLIGHT | 2661 | #ifdef CONFIG_PMAC_BACKLIGHT |
2662 | if (M64_HAS(G3_PB_1_1) && machine_is_compatible("PowerBook1,1")) { | 2662 | if (M64_HAS(G3_PB_1_1) && of_machine_is_compatible("PowerBook1,1")) { |
2663 | /* | 2663 | /* |
2664 | * these bits let the 101 powerbook | 2664 | * these bits let the 101 powerbook |
2665 | * wake up from sleep -- paulus | 2665 | * wake up from sleep -- paulus |
@@ -2690,9 +2690,9 @@ static int __devinit aty_init(struct fb_info *info) | |||
2690 | if (M64_HAS(G3_PB_1024x768)) | 2690 | if (M64_HAS(G3_PB_1024x768)) |
2691 | /* G3 PowerBook with 1024x768 LCD */ | 2691 | /* G3 PowerBook with 1024x768 LCD */ |
2692 | default_vmode = VMODE_1024_768_60; | 2692 | default_vmode = VMODE_1024_768_60; |
2693 | else if (machine_is_compatible("iMac")) | 2693 | else if (of_machine_is_compatible("iMac")) |
2694 | default_vmode = VMODE_1024_768_75; | 2694 | default_vmode = VMODE_1024_768_75; |
2695 | else if (machine_is_compatible("PowerBook2,1")) | 2695 | else if (of_machine_is_compatible("PowerBook2,1")) |
2696 | /* iBook with 800x600 LCD */ | 2696 | /* iBook with 800x600 LCD */ |
2697 | default_vmode = VMODE_800_600_60; | 2697 | default_vmode = VMODE_800_600_60; |
2698 | else | 2698 | else |
@@ -3104,7 +3104,7 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev, | |||
3104 | } | 3104 | } |
3105 | 3105 | ||
3106 | dp = pci_device_to_OF_node(pdev); | 3106 | dp = pci_device_to_OF_node(pdev); |
3107 | if (node == dp->node) { | 3107 | if (node == dp->phandle) { |
3108 | struct fb_var_screeninfo *var = &default_var; | 3108 | struct fb_var_screeninfo *var = &default_var; |
3109 | unsigned int N, P, Q, M, T, R; | 3109 | unsigned int N, P, Q, M, T, R; |
3110 | u32 v_total, h_total; | 3110 | u32 v_total, h_total; |
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c index 1a056adb61c8..fa1198c4ccc5 100644 --- a/drivers/video/aty/radeon_backlight.c +++ b/drivers/video/aty/radeon_backlight.c | |||
@@ -175,9 +175,9 @@ void radeonfb_bl_init(struct radeonfb_info *rinfo) | |||
175 | 175 | ||
176 | #ifdef CONFIG_PMAC_BACKLIGHT | 176 | #ifdef CONFIG_PMAC_BACKLIGHT |
177 | pdata->negative = pdata->negative || | 177 | pdata->negative = pdata->negative || |
178 | machine_is_compatible("PowerBook4,3") || | 178 | of_machine_is_compatible("PowerBook4,3") || |
179 | machine_is_compatible("PowerBook6,3") || | 179 | of_machine_is_compatible("PowerBook6,3") || |
180 | machine_is_compatible("PowerBook6,5"); | 180 | of_machine_is_compatible("PowerBook6,5"); |
181 | #endif | 181 | #endif |
182 | 182 | ||
183 | rinfo->info->bl_dev = bd; | 183 | rinfo->info->bl_dev = bd; |
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c index eb12182b2059..d25df51bb0d2 100644 --- a/drivers/video/efifb.c +++ b/drivers/video/efifb.c | |||
@@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, | |||
161 | return 0; | 161 | return 0; |
162 | } | 162 | } |
163 | 163 | ||
164 | static void efifb_destroy(struct fb_info *info) | ||
165 | { | ||
166 | if (info->screen_base) | ||
167 | iounmap(info->screen_base); | ||
168 | release_mem_region(info->aperture_base, info->aperture_size); | ||
169 | framebuffer_release(info); | ||
170 | } | ||
171 | |||
164 | static struct fb_ops efifb_ops = { | 172 | static struct fb_ops efifb_ops = { |
165 | .owner = THIS_MODULE, | 173 | .owner = THIS_MODULE, |
174 | .fb_destroy = efifb_destroy, | ||
166 | .fb_setcolreg = efifb_setcolreg, | 175 | .fb_setcolreg = efifb_setcolreg, |
167 | .fb_fillrect = cfb_fillrect, | 176 | .fb_fillrect = cfb_fillrect, |
168 | .fb_copyarea = cfb_copyarea, | 177 | .fb_copyarea = cfb_copyarea, |
@@ -281,7 +290,7 @@ static int __init efifb_probe(struct platform_device *dev) | |||
281 | info->par = NULL; | 290 | info->par = NULL; |
282 | 291 | ||
283 | info->aperture_base = efifb_fix.smem_start; | 292 | info->aperture_base = efifb_fix.smem_start; |
284 | info->aperture_size = size_total; | 293 | info->aperture_size = size_remap; |
285 | 294 | ||
286 | info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); | 295 | info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); |
287 | if (!info->screen_base) { | 296 | if (!info->screen_base) { |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 505be88c82ae..369f2eebbad1 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -28,7 +28,7 @@ | |||
28 | struct virtio_balloon | 28 | struct virtio_balloon |
29 | { | 29 | { |
30 | struct virtio_device *vdev; | 30 | struct virtio_device *vdev; |
31 | struct virtqueue *inflate_vq, *deflate_vq; | 31 | struct virtqueue *inflate_vq, *deflate_vq, *stats_vq; |
32 | 32 | ||
33 | /* Where the ballooning thread waits for config to change. */ | 33 | /* Where the ballooning thread waits for config to change. */ |
34 | wait_queue_head_t config_change; | 34 | wait_queue_head_t config_change; |
@@ -49,6 +49,10 @@ struct virtio_balloon | |||
49 | /* The array of pfns we tell the Host about. */ | 49 | /* The array of pfns we tell the Host about. */ |
50 | unsigned int num_pfns; | 50 | unsigned int num_pfns; |
51 | u32 pfns[256]; | 51 | u32 pfns[256]; |
52 | |||
53 | /* Memory statistics */ | ||
54 | int need_stats_update; | ||
55 | struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; | ||
52 | }; | 56 | }; |
53 | 57 | ||
54 | static struct virtio_device_id id_table[] = { | 58 | static struct virtio_device_id id_table[] = { |
@@ -154,6 +158,72 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num) | |||
154 | } | 158 | } |
155 | } | 159 | } |
156 | 160 | ||
161 | static inline void update_stat(struct virtio_balloon *vb, int idx, | ||
162 | u16 tag, u64 val) | ||
163 | { | ||
164 | BUG_ON(idx >= VIRTIO_BALLOON_S_NR); | ||
165 | vb->stats[idx].tag = tag; | ||
166 | vb->stats[idx].val = val; | ||
167 | } | ||
168 | |||
169 | #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) | ||
170 | |||
171 | static void update_balloon_stats(struct virtio_balloon *vb) | ||
172 | { | ||
173 | unsigned long events[NR_VM_EVENT_ITEMS]; | ||
174 | struct sysinfo i; | ||
175 | int idx = 0; | ||
176 | |||
177 | all_vm_events(events); | ||
178 | si_meminfo(&i); | ||
179 | |||
180 | update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, | ||
181 | pages_to_bytes(events[PSWPIN])); | ||
182 | update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, | ||
183 | pages_to_bytes(events[PSWPOUT])); | ||
184 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); | ||
185 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); | ||
186 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, | ||
187 | pages_to_bytes(i.freeram)); | ||
188 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, | ||
189 | pages_to_bytes(i.totalram)); | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * While most virtqueues communicate guest-initiated requests to the hypervisor, | ||
194 | * the stats queue operates in reverse. The driver initializes the virtqueue | ||
195 | * with a single buffer. From that point forward, all conversations consist of | ||
196 | * a hypervisor request (a call to this function) which directs us to refill | ||
197 | * the virtqueue with a fresh stats buffer. Since stats collection can sleep, | ||
198 | * we notify our kthread which does the actual work via stats_handle_request(). | ||
199 | */ | ||
200 | static void stats_request(struct virtqueue *vq) | ||
201 | { | ||
202 | struct virtio_balloon *vb; | ||
203 | unsigned int len; | ||
204 | |||
205 | vb = vq->vq_ops->get_buf(vq, &len); | ||
206 | if (!vb) | ||
207 | return; | ||
208 | vb->need_stats_update = 1; | ||
209 | wake_up(&vb->config_change); | ||
210 | } | ||
211 | |||
212 | static void stats_handle_request(struct virtio_balloon *vb) | ||
213 | { | ||
214 | struct virtqueue *vq; | ||
215 | struct scatterlist sg; | ||
216 | |||
217 | vb->need_stats_update = 0; | ||
218 | update_balloon_stats(vb); | ||
219 | |||
220 | vq = vb->stats_vq; | ||
221 | sg_init_one(&sg, vb->stats, sizeof(vb->stats)); | ||
222 | if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0) | ||
223 | BUG(); | ||
224 | vq->vq_ops->kick(vq); | ||
225 | } | ||
226 | |||
157 | static void virtballoon_changed(struct virtio_device *vdev) | 227 | static void virtballoon_changed(struct virtio_device *vdev) |
158 | { | 228 | { |
159 | struct virtio_balloon *vb = vdev->priv; | 229 | struct virtio_balloon *vb = vdev->priv; |
@@ -190,8 +260,11 @@ static int balloon(void *_vballoon) | |||
190 | try_to_freeze(); | 260 | try_to_freeze(); |
191 | wait_event_interruptible(vb->config_change, | 261 | wait_event_interruptible(vb->config_change, |
192 | (diff = towards_target(vb)) != 0 | 262 | (diff = towards_target(vb)) != 0 |
263 | || vb->need_stats_update | ||
193 | || kthread_should_stop() | 264 | || kthread_should_stop() |
194 | || freezing(current)); | 265 | || freezing(current)); |
266 | if (vb->need_stats_update) | ||
267 | stats_handle_request(vb); | ||
195 | if (diff > 0) | 268 | if (diff > 0) |
196 | fill_balloon(vb, diff); | 269 | fill_balloon(vb, diff); |
197 | else if (diff < 0) | 270 | else if (diff < 0) |
@@ -204,10 +277,10 @@ static int balloon(void *_vballoon) | |||
204 | static int virtballoon_probe(struct virtio_device *vdev) | 277 | static int virtballoon_probe(struct virtio_device *vdev) |
205 | { | 278 | { |
206 | struct virtio_balloon *vb; | 279 | struct virtio_balloon *vb; |
207 | struct virtqueue *vqs[2]; | 280 | struct virtqueue *vqs[3]; |
208 | vq_callback_t *callbacks[] = { balloon_ack, balloon_ack }; | 281 | vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request }; |
209 | const char *names[] = { "inflate", "deflate" }; | 282 | const char *names[] = { "inflate", "deflate", "stats" }; |
210 | int err; | 283 | int err, nvqs; |
211 | 284 | ||
212 | vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); | 285 | vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); |
213 | if (!vb) { | 286 | if (!vb) { |
@@ -219,14 +292,31 @@ static int virtballoon_probe(struct virtio_device *vdev) | |||
219 | vb->num_pages = 0; | 292 | vb->num_pages = 0; |
220 | init_waitqueue_head(&vb->config_change); | 293 | init_waitqueue_head(&vb->config_change); |
221 | vb->vdev = vdev; | 294 | vb->vdev = vdev; |
295 | vb->need_stats_update = 0; | ||
222 | 296 | ||
223 | /* We expect two virtqueues. */ | 297 | /* We expect two virtqueues: inflate and deflate, |
224 | err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); | 298 | * and optionally stat. */ |
299 | nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; | ||
300 | err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); | ||
225 | if (err) | 301 | if (err) |
226 | goto out_free_vb; | 302 | goto out_free_vb; |
227 | 303 | ||
228 | vb->inflate_vq = vqs[0]; | 304 | vb->inflate_vq = vqs[0]; |
229 | vb->deflate_vq = vqs[1]; | 305 | vb->deflate_vq = vqs[1]; |
306 | if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { | ||
307 | struct scatterlist sg; | ||
308 | vb->stats_vq = vqs[2]; | ||
309 | |||
310 | /* | ||
311 | * Prime this virtqueue with one buffer so the hypervisor can | ||
312 | * use it to signal us later. | ||
313 | */ | ||
314 | sg_init_one(&sg, vb->stats, sizeof vb->stats); | ||
315 | if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq, | ||
316 | &sg, 1, 0, vb) < 0) | ||
317 | BUG(); | ||
318 | vb->stats_vq->vq_ops->kick(vb->stats_vq); | ||
319 | } | ||
230 | 320 | ||
231 | vb->thread = kthread_run(balloon, vb, "vballoon"); | 321 | vb->thread = kthread_run(balloon, vb, "vballoon"); |
232 | if (IS_ERR(vb->thread)) { | 322 | if (IS_ERR(vb->thread)) { |
@@ -264,7 +354,10 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev) | |||
264 | kfree(vb); | 354 | kfree(vb); |
265 | } | 355 | } |
266 | 356 | ||
267 | static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST }; | 357 | static unsigned int features[] = { |
358 | VIRTIO_BALLOON_F_MUST_TELL_HOST, | ||
359 | VIRTIO_BALLOON_F_STATS_VQ, | ||
360 | }; | ||
268 | 361 | ||
269 | static struct virtio_driver virtio_balloon_driver = { | 362 | static struct virtio_driver virtio_balloon_driver = { |
270 | .feature_table = features, | 363 | .feature_table = features, |
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 28d9cf7cf72f..1d5191fab62e 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -702,7 +702,7 @@ static struct pci_driver virtio_pci_driver = { | |||
702 | .name = "virtio-pci", | 702 | .name = "virtio-pci", |
703 | .id_table = virtio_pci_id_table, | 703 | .id_table = virtio_pci_id_table, |
704 | .probe = virtio_pci_probe, | 704 | .probe = virtio_pci_probe, |
705 | .remove = virtio_pci_remove, | 705 | .remove = __devexit_p(virtio_pci_remove), |
706 | #ifdef CONFIG_PM | 706 | #ifdef CONFIG_PM |
707 | .suspend = virtio_pci_suspend, | 707 | .suspend = virtio_pci_suspend, |
708 | .resume = virtio_pci_resume, | 708 | .resume = virtio_pci_resume, |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index fbd2ecde93e4..0db906b3c95d 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -21,6 +21,24 @@ | |||
21 | #include <linux/virtio_config.h> | 21 | #include <linux/virtio_config.h> |
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | 23 | ||
24 | /* virtio guest is communicating with a virtual "device" that actually runs on | ||
25 | * a host processor. Memory barriers are used to control SMP effects. */ | ||
26 | #ifdef CONFIG_SMP | ||
27 | /* Where possible, use SMP barriers which are more lightweight than mandatory | ||
28 | * barriers, because mandatory barriers control MMIO effects on accesses | ||
29 | * through relaxed memory I/O windows (which virtio does not use). */ | ||
30 | #define virtio_mb() smp_mb() | ||
31 | #define virtio_rmb() smp_rmb() | ||
32 | #define virtio_wmb() smp_wmb() | ||
33 | #else | ||
34 | /* We must force memory ordering even if guest is UP since host could be | ||
35 | * running on another CPU, but SMP barriers are defined to barrier() in that | ||
36 | * configuration. So fall back to mandatory barriers instead. */ | ||
37 | #define virtio_mb() mb() | ||
38 | #define virtio_rmb() rmb() | ||
39 | #define virtio_wmb() wmb() | ||
40 | #endif | ||
41 | |||
24 | #ifdef DEBUG | 42 | #ifdef DEBUG |
25 | /* For development, we want to crash whenever the ring is screwed. */ | 43 | /* For development, we want to crash whenever the ring is screwed. */ |
26 | #define BAD_RING(_vq, fmt, args...) \ | 44 | #define BAD_RING(_vq, fmt, args...) \ |
@@ -36,10 +54,9 @@ | |||
36 | panic("%s:in_use = %i\n", \ | 54 | panic("%s:in_use = %i\n", \ |
37 | (_vq)->vq.name, (_vq)->in_use); \ | 55 | (_vq)->vq.name, (_vq)->in_use); \ |
38 | (_vq)->in_use = __LINE__; \ | 56 | (_vq)->in_use = __LINE__; \ |
39 | mb(); \ | ||
40 | } while (0) | 57 | } while (0) |
41 | #define END_USE(_vq) \ | 58 | #define END_USE(_vq) \ |
42 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) | 59 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) |
43 | #else | 60 | #else |
44 | #define BAD_RING(_vq, fmt, args...) \ | 61 | #define BAD_RING(_vq, fmt, args...) \ |
45 | do { \ | 62 | do { \ |
@@ -221,13 +238,13 @@ static void vring_kick(struct virtqueue *_vq) | |||
221 | START_USE(vq); | 238 | START_USE(vq); |
222 | /* Descriptors and available array need to be set before we expose the | 239 | /* Descriptors and available array need to be set before we expose the |
223 | * new available array entries. */ | 240 | * new available array entries. */ |
224 | wmb(); | 241 | virtio_wmb(); |
225 | 242 | ||
226 | vq->vring.avail->idx += vq->num_added; | 243 | vq->vring.avail->idx += vq->num_added; |
227 | vq->num_added = 0; | 244 | vq->num_added = 0; |
228 | 245 | ||
229 | /* Need to update avail index before checking if we should notify */ | 246 | /* Need to update avail index before checking if we should notify */ |
230 | mb(); | 247 | virtio_mb(); |
231 | 248 | ||
232 | if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) | 249 | if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) |
233 | /* Prod other side to tell it about changes. */ | 250 | /* Prod other side to tell it about changes. */ |
@@ -286,7 +303,7 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len) | |||
286 | } | 303 | } |
287 | 304 | ||
288 | /* Only get used array entries after they have been exposed by host. */ | 305 | /* Only get used array entries after they have been exposed by host. */ |
289 | rmb(); | 306 | virtio_rmb(); |
290 | 307 | ||
291 | i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; | 308 | i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; |
292 | *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; | 309 | *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; |
@@ -324,7 +341,7 @@ static bool vring_enable_cb(struct virtqueue *_vq) | |||
324 | /* We optimistically turn back on interrupts, then check if there was | 341 | /* We optimistically turn back on interrupts, then check if there was |
325 | * more to do. */ | 342 | * more to do. */ |
326 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; | 343 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
327 | mb(); | 344 | virtio_mb(); |
328 | if (unlikely(more_used(vq))) { | 345 | if (unlikely(more_used(vq))) { |
329 | END_USE(vq); | 346 | END_USE(vq); |
330 | return false; | 347 | return false; |
@@ -334,6 +351,30 @@ static bool vring_enable_cb(struct virtqueue *_vq) | |||
334 | return true; | 351 | return true; |
335 | } | 352 | } |
336 | 353 | ||
354 | static void *vring_detach_unused_buf(struct virtqueue *_vq) | ||
355 | { | ||
356 | struct vring_virtqueue *vq = to_vvq(_vq); | ||
357 | unsigned int i; | ||
358 | void *buf; | ||
359 | |||
360 | START_USE(vq); | ||
361 | |||
362 | for (i = 0; i < vq->vring.num; i++) { | ||
363 | if (!vq->data[i]) | ||
364 | continue; | ||
365 | /* detach_buf clears data, so grab it now. */ | ||
366 | buf = vq->data[i]; | ||
367 | detach_buf(vq, i); | ||
368 | END_USE(vq); | ||
369 | return buf; | ||
370 | } | ||
371 | /* That should have freed everything. */ | ||
372 | BUG_ON(vq->num_free != vq->vring.num); | ||
373 | |||
374 | END_USE(vq); | ||
375 | return NULL; | ||
376 | } | ||
377 | |||
337 | irqreturn_t vring_interrupt(int irq, void *_vq) | 378 | irqreturn_t vring_interrupt(int irq, void *_vq) |
338 | { | 379 | { |
339 | struct vring_virtqueue *vq = to_vvq(_vq); | 380 | struct vring_virtqueue *vq = to_vvq(_vq); |
@@ -360,6 +401,7 @@ static struct virtqueue_ops vring_vq_ops = { | |||
360 | .kick = vring_kick, | 401 | .kick = vring_kick, |
361 | .disable_cb = vring_disable_cb, | 402 | .disable_cb = vring_disable_cb, |
362 | .enable_cb = vring_enable_cb, | 403 | .enable_cb = vring_enable_cb, |
404 | .detach_unused_buf = vring_detach_unused_buf, | ||
363 | }; | 405 | }; |
364 | 406 | ||
365 | struct virtqueue *vring_new_virtqueue(unsigned int num, | 407 | struct virtqueue *vring_new_virtqueue(unsigned int num, |
@@ -406,8 +448,11 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, | |||
406 | /* Put everything in free lists. */ | 448 | /* Put everything in free lists. */ |
407 | vq->num_free = num; | 449 | vq->num_free = num; |
408 | vq->free_head = 0; | 450 | vq->free_head = 0; |
409 | for (i = 0; i < num-1; i++) | 451 | for (i = 0; i < num-1; i++) { |
410 | vq->vring.desc[i].next = i+1; | 452 | vq->vring.desc[i].next = i+1; |
453 | vq->data[i] = NULL; | ||
454 | } | ||
455 | vq->data[i] = NULL; | ||
411 | 456 | ||
412 | return &vq->vq; | 457 | return &vq->vq; |
413 | } | 458 | } |
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c index c7b3f9df2317..2159e668751c 100644 --- a/drivers/watchdog/bfin_wdt.c +++ b/drivers/watchdog/bfin_wdt.c | |||
@@ -1,9 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Blackfin On-Chip Watchdog Driver | 2 | * Blackfin On-Chip Watchdog Driver |
3 | * Supports BF53[123]/BF53[467]/BF54[2489]/BF561 | ||
4 | * | 3 | * |
5 | * Originally based on softdog.c | 4 | * Originally based on softdog.c |
6 | * Copyright 2006-2007 Analog Devices Inc. | 5 | * Copyright 2006-2010 Analog Devices Inc. |
7 | * Copyright 2006-2007 Michele d'Amico | 6 | * Copyright 2006-2007 Michele d'Amico |
8 | * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk> | 7 | * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk> |
9 | * | 8 | * |
@@ -137,13 +136,15 @@ static int bfin_wdt_running(void) | |||
137 | */ | 136 | */ |
138 | static int bfin_wdt_set_timeout(unsigned long t) | 137 | static int bfin_wdt_set_timeout(unsigned long t) |
139 | { | 138 | { |
140 | u32 cnt; | 139 | u32 cnt, max_t, sclk; |
141 | unsigned long flags; | 140 | unsigned long flags; |
142 | 141 | ||
143 | stampit(); | 142 | sclk = get_sclk(); |
143 | max_t = -1 / sclk; | ||
144 | cnt = t * sclk; | ||
145 | stamp("maxtimeout=%us newtimeout=%lus (cnt=%#x)", max_t, t, cnt); | ||
144 | 146 | ||
145 | cnt = t * get_sclk(); | 147 | if (t > max_t) { |
146 | if (cnt < get_sclk()) { | ||
147 | printk(KERN_WARNING PFX "timeout value is too large\n"); | 148 | printk(KERN_WARNING PFX "timeout value is too large\n"); |
148 | return -EINVAL; | 149 | return -EINVAL; |
149 | } | 150 | } |