diff options
| author | Chris Zankel <chris@zankel.net> | 2013-09-05 20:12:08 -0400 |
|---|---|---|
| committer | Chris Zankel <chris@zankel.net> | 2013-09-05 20:12:08 -0400 |
| commit | 0bc2ba94a65d2cd2ad01004d2d3f94f31c6064bd (patch) | |
| tree | fc0354e4da0ff5ab8b8eb65d60d9a8359d02634d /drivers | |
| parent | 3b2f64d00c46e1e4e9bd0bb9bb12619adac27a4b (diff) | |
| parent | 6e4664525b1db28f8c4e1130957f70a94c19213e (diff) | |
Merge tag 'v3.11' into for_next
Update Xtensa tree to Linux 3.11 (merging)
Diffstat (limited to 'drivers')
539 files changed, 8924 insertions, 4842 deletions
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c index d21167bfc865..dc34a5b8bcee 100644 --- a/drivers/accessibility/braille/braille_console.c +++ b/drivers/accessibility/braille/braille_console.c | |||
| @@ -359,6 +359,9 @@ int braille_register_console(struct console *console, int index, | |||
| 359 | char *console_options, char *braille_options) | 359 | char *console_options, char *braille_options) |
| 360 | { | 360 | { |
| 361 | int ret; | 361 | int ret; |
| 362 | |||
| 363 | if (!(console->flags & CON_BRL)) | ||
| 364 | return 0; | ||
| 362 | if (!console_options) | 365 | if (!console_options) |
| 363 | /* Only support VisioBraille for now */ | 366 | /* Only support VisioBraille for now */ |
| 364 | console_options = "57600o8"; | 367 | console_options = "57600o8"; |
| @@ -374,15 +377,17 @@ int braille_register_console(struct console *console, int index, | |||
| 374 | braille_co = console; | 377 | braille_co = console; |
| 375 | register_keyboard_notifier(&keyboard_notifier_block); | 378 | register_keyboard_notifier(&keyboard_notifier_block); |
| 376 | register_vt_notifier(&vt_notifier_block); | 379 | register_vt_notifier(&vt_notifier_block); |
| 377 | return 0; | 380 | return 1; |
| 378 | } | 381 | } |
| 379 | 382 | ||
| 380 | int braille_unregister_console(struct console *console) | 383 | int braille_unregister_console(struct console *console) |
| 381 | { | 384 | { |
| 382 | if (braille_co != console) | 385 | if (braille_co != console) |
| 383 | return -EINVAL; | 386 | return -EINVAL; |
| 387 | if (!(console->flags & CON_BRL)) | ||
| 388 | return 0; | ||
| 384 | unregister_keyboard_notifier(&keyboard_notifier_block); | 389 | unregister_keyboard_notifier(&keyboard_notifier_block); |
| 385 | unregister_vt_notifier(&vt_notifier_block); | 390 | unregister_vt_notifier(&vt_notifier_block); |
| 386 | braille_co = NULL; | 391 | braille_co = NULL; |
| 387 | return 0; | 392 | return 1; |
| 388 | } | 393 | } |
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index fd6c51cc3acb..5a74a9c1e42c 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c | |||
| @@ -451,7 +451,6 @@ static void acpi_processor_remove(struct acpi_device *device) | |||
| 451 | /* Clean up. */ | 451 | /* Clean up. */ |
| 452 | per_cpu(processor_device_array, pr->id) = NULL; | 452 | per_cpu(processor_device_array, pr->id) = NULL; |
| 453 | per_cpu(processors, pr->id) = NULL; | 453 | per_cpu(processors, pr->id) = NULL; |
| 454 | try_offline_node(cpu_to_node(pr->id)); | ||
| 455 | 454 | ||
| 456 | /* Remove the CPU. */ | 455 | /* Remove the CPU. */ |
| 457 | get_online_cpus(); | 456 | get_online_cpus(); |
| @@ -459,6 +458,8 @@ static void acpi_processor_remove(struct acpi_device *device) | |||
| 459 | acpi_unmap_lsapic(pr->id); | 458 | acpi_unmap_lsapic(pr->id); |
| 460 | put_online_cpus(); | 459 | put_online_cpus(); |
| 461 | 460 | ||
| 461 | try_offline_node(cpu_to_node(pr->id)); | ||
| 462 | |||
| 462 | out: | 463 | out: |
| 463 | free_cpumask_var(pr->throttling.shared_cpu_map); | 464 | free_cpumask_var(pr->throttling.shared_cpu_map); |
| 464 | kfree(pr); | 465 | kfree(pr); |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 082b4dd252a8..d405fbad406a 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
| @@ -117,6 +117,7 @@ struct acpi_battery { | |||
| 117 | struct acpi_device *device; | 117 | struct acpi_device *device; |
| 118 | struct notifier_block pm_nb; | 118 | struct notifier_block pm_nb; |
| 119 | unsigned long update_time; | 119 | unsigned long update_time; |
| 120 | int revision; | ||
| 120 | int rate_now; | 121 | int rate_now; |
| 121 | int capacity_now; | 122 | int capacity_now; |
| 122 | int voltage_now; | 123 | int voltage_now; |
| @@ -359,6 +360,7 @@ static struct acpi_offsets info_offsets[] = { | |||
| 359 | }; | 360 | }; |
| 360 | 361 | ||
| 361 | static struct acpi_offsets extended_info_offsets[] = { | 362 | static struct acpi_offsets extended_info_offsets[] = { |
| 363 | {offsetof(struct acpi_battery, revision), 0}, | ||
| 362 | {offsetof(struct acpi_battery, power_unit), 0}, | 364 | {offsetof(struct acpi_battery, power_unit), 0}, |
| 363 | {offsetof(struct acpi_battery, design_capacity), 0}, | 365 | {offsetof(struct acpi_battery, design_capacity), 0}, |
| 364 | {offsetof(struct acpi_battery, full_charge_capacity), 0}, | 366 | {offsetof(struct acpi_battery, full_charge_capacity), 0}, |
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index f68095756fb7..408f6b2a5fa8 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
| @@ -31,6 +31,7 @@ static LIST_HEAD(bus_type_list); | |||
| 31 | static DECLARE_RWSEM(bus_type_sem); | 31 | static DECLARE_RWSEM(bus_type_sem); |
| 32 | 32 | ||
| 33 | #define PHYSICAL_NODE_STRING "physical_node" | 33 | #define PHYSICAL_NODE_STRING "physical_node" |
| 34 | #define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10) | ||
| 34 | 35 | ||
| 35 | int register_acpi_bus_type(struct acpi_bus_type *type) | 36 | int register_acpi_bus_type(struct acpi_bus_type *type) |
| 36 | { | 37 | { |
| @@ -78,41 +79,108 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev) | |||
| 78 | return ret; | 79 | return ret; |
| 79 | } | 80 | } |
| 80 | 81 | ||
| 81 | static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used, | 82 | static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used, |
| 82 | void *addr_p, void **ret_p) | 83 | void *not_used, void **ret_p) |
| 83 | { | 84 | { |
| 84 | unsigned long long addr, sta; | 85 | struct acpi_device *adev = NULL; |
| 85 | acpi_status status; | ||
| 86 | 86 | ||
| 87 | status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); | 87 | acpi_bus_get_device(handle, &adev); |
| 88 | if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) { | 88 | if (adev) { |
| 89 | *ret_p = handle; | 89 | *ret_p = handle; |
| 90 | status = acpi_bus_get_status_handle(handle, &sta); | 90 | return AE_CTRL_TERMINATE; |
| 91 | if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED)) | ||
| 92 | return AE_CTRL_TERMINATE; | ||
| 93 | } | 91 | } |
| 94 | return AE_OK; | 92 | return AE_OK; |
| 95 | } | 93 | } |
| 96 | 94 | ||
| 97 | acpi_handle acpi_get_child(acpi_handle parent, u64 address) | 95 | static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge) |
| 98 | { | 96 | { |
| 99 | void *ret = NULL; | 97 | unsigned long long sta; |
| 98 | acpi_status status; | ||
| 99 | |||
| 100 | status = acpi_bus_get_status_handle(handle, &sta); | ||
| 101 | if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) | ||
| 102 | return false; | ||
| 103 | |||
| 104 | if (is_bridge) { | ||
| 105 | void *test = NULL; | ||
| 106 | |||
| 107 | /* Check if this object has at least one child device. */ | ||
| 108 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
| 109 | acpi_dev_present, NULL, NULL, &test); | ||
| 110 | return !!test; | ||
| 111 | } | ||
| 112 | return true; | ||
| 113 | } | ||
| 114 | |||
| 115 | struct find_child_context { | ||
| 116 | u64 addr; | ||
| 117 | bool is_bridge; | ||
| 118 | acpi_handle ret; | ||
| 119 | bool ret_checked; | ||
| 120 | }; | ||
| 121 | |||
| 122 | static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used, | ||
| 123 | void *data, void **not_used) | ||
| 124 | { | ||
| 125 | struct find_child_context *context = data; | ||
| 126 | unsigned long long addr; | ||
| 127 | acpi_status status; | ||
| 100 | 128 | ||
| 101 | if (!parent) | 129 | status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); |
| 102 | return NULL; | 130 | if (ACPI_FAILURE(status) || addr != context->addr) |
| 131 | return AE_OK; | ||
| 103 | 132 | ||
| 104 | acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL, | 133 | if (!context->ret) { |
| 105 | do_acpi_find_child, &address, &ret); | 134 | /* This is the first matching object. Save its handle. */ |
| 106 | return (acpi_handle)ret; | 135 | context->ret = handle; |
| 136 | return AE_OK; | ||
| 137 | } | ||
| 138 | /* | ||
| 139 | * There is more than one matching object with the same _ADR value. | ||
| 140 | * That really is unexpected, so we are kind of beyond the scope of the | ||
| 141 | * spec here. We have to choose which one to return, though. | ||
| 142 | * | ||
| 143 | * First, check if the previously found object is good enough and return | ||
| 144 | * its handle if so. Second, check the same for the object that we've | ||
| 145 | * just found. | ||
| 146 | */ | ||
| 147 | if (!context->ret_checked) { | ||
| 148 | if (acpi_extra_checks_passed(context->ret, context->is_bridge)) | ||
| 149 | return AE_CTRL_TERMINATE; | ||
| 150 | else | ||
| 151 | context->ret_checked = true; | ||
| 152 | } | ||
| 153 | if (acpi_extra_checks_passed(handle, context->is_bridge)) { | ||
| 154 | context->ret = handle; | ||
| 155 | return AE_CTRL_TERMINATE; | ||
| 156 | } | ||
| 157 | return AE_OK; | ||
| 107 | } | 158 | } |
| 108 | EXPORT_SYMBOL(acpi_get_child); | 159 | |
| 160 | acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge) | ||
| 161 | { | ||
| 162 | if (parent) { | ||
| 163 | struct find_child_context context = { | ||
| 164 | .addr = addr, | ||
| 165 | .is_bridge = is_bridge, | ||
| 166 | }; | ||
| 167 | |||
| 168 | acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child, | ||
| 169 | NULL, &context, NULL); | ||
| 170 | return context.ret; | ||
| 171 | } | ||
| 172 | return NULL; | ||
| 173 | } | ||
| 174 | EXPORT_SYMBOL_GPL(acpi_find_child); | ||
| 109 | 175 | ||
| 110 | int acpi_bind_one(struct device *dev, acpi_handle handle) | 176 | int acpi_bind_one(struct device *dev, acpi_handle handle) |
| 111 | { | 177 | { |
| 112 | struct acpi_device *acpi_dev; | 178 | struct acpi_device *acpi_dev; |
| 113 | acpi_status status; | 179 | acpi_status status; |
| 114 | struct acpi_device_physical_node *physical_node, *pn; | 180 | struct acpi_device_physical_node *physical_node, *pn; |
| 115 | char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; | 181 | char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; |
| 182 | struct list_head *physnode_list; | ||
| 183 | unsigned int node_id; | ||
| 116 | int retval = -EINVAL; | 184 | int retval = -EINVAL; |
| 117 | 185 | ||
| 118 | if (ACPI_HANDLE(dev)) { | 186 | if (ACPI_HANDLE(dev)) { |
| @@ -139,25 +207,27 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
| 139 | 207 | ||
| 140 | mutex_lock(&acpi_dev->physical_node_lock); | 208 | mutex_lock(&acpi_dev->physical_node_lock); |
| 141 | 209 | ||
| 142 | /* Sanity check. */ | 210 | /* |
| 143 | list_for_each_entry(pn, &acpi_dev->physical_node_list, node) | 211 | * Keep the list sorted by node_id so that the IDs of removed nodes can |
| 212 | * be recycled easily. | ||
| 213 | */ | ||
| 214 | physnode_list = &acpi_dev->physical_node_list; | ||
| 215 | node_id = 0; | ||
| 216 | list_for_each_entry(pn, &acpi_dev->physical_node_list, node) { | ||
| 217 | /* Sanity check. */ | ||
| 144 | if (pn->dev == dev) { | 218 | if (pn->dev == dev) { |
| 145 | dev_warn(dev, "Already associated with ACPI node\n"); | 219 | dev_warn(dev, "Already associated with ACPI node\n"); |
| 146 | goto err_free; | 220 | goto err_free; |
| 147 | } | 221 | } |
| 148 | 222 | if (pn->node_id == node_id) { | |
| 149 | /* allocate physical node id according to physical_node_id_bitmap */ | 223 | physnode_list = &pn->node; |
| 150 | physical_node->node_id = | 224 | node_id++; |
| 151 | find_first_zero_bit(acpi_dev->physical_node_id_bitmap, | 225 | } |
| 152 | ACPI_MAX_PHYSICAL_NODE); | ||
| 153 | if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) { | ||
| 154 | retval = -ENOSPC; | ||
| 155 | goto err_free; | ||
| 156 | } | 226 | } |
| 157 | 227 | ||
| 158 | set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap); | 228 | physical_node->node_id = node_id; |
| 159 | physical_node->dev = dev; | 229 | physical_node->dev = dev; |
| 160 | list_add_tail(&physical_node->node, &acpi_dev->physical_node_list); | 230 | list_add(&physical_node->node, physnode_list); |
| 161 | acpi_dev->physical_node_count++; | 231 | acpi_dev->physical_node_count++; |
| 162 | 232 | ||
| 163 | mutex_unlock(&acpi_dev->physical_node_lock); | 233 | mutex_unlock(&acpi_dev->physical_node_lock); |
| @@ -208,7 +278,7 @@ int acpi_unbind_one(struct device *dev) | |||
| 208 | 278 | ||
| 209 | mutex_lock(&acpi_dev->physical_node_lock); | 279 | mutex_lock(&acpi_dev->physical_node_lock); |
| 210 | list_for_each_safe(node, next, &acpi_dev->physical_node_list) { | 280 | list_for_each_safe(node, next, &acpi_dev->physical_node_list) { |
| 211 | char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; | 281 | char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; |
| 212 | 282 | ||
| 213 | entry = list_entry(node, struct acpi_device_physical_node, | 283 | entry = list_entry(node, struct acpi_device_physical_node, |
| 214 | node); | 284 | node); |
| @@ -216,7 +286,6 @@ int acpi_unbind_one(struct device *dev) | |||
| 216 | continue; | 286 | continue; |
| 217 | 287 | ||
| 218 | list_del(node); | 288 | list_del(node); |
| 219 | clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap); | ||
| 220 | 289 | ||
| 221 | acpi_dev->physical_node_count--; | 290 | acpi_dev->physical_node_count--; |
| 222 | 291 | ||
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 227aca77ee1e..5da44e81dd4d 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
| @@ -169,10 +169,8 @@ int acpi_create_platform_device(struct acpi_device *adev, | |||
| 169 | -------------------------------------------------------------------------- */ | 169 | -------------------------------------------------------------------------- */ |
| 170 | #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) | 170 | #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) |
| 171 | bool acpi_video_backlight_quirks(void); | 171 | bool acpi_video_backlight_quirks(void); |
| 172 | bool acpi_video_verify_backlight_support(void); | ||
| 173 | #else | 172 | #else |
| 174 | static inline bool acpi_video_backlight_quirks(void) { return false; } | 173 | static inline bool acpi_video_backlight_quirks(void) { return false; } |
| 175 | static inline bool acpi_video_verify_backlight_support(void) { return false; } | ||
| 176 | #endif | 174 | #endif |
| 177 | 175 | ||
| 178 | #endif /* _ACPI_INTERNAL_H_ */ | 176 | #endif /* _ACPI_INTERNAL_H_ */ |
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index aa1227a7e3f2..04a13784dd20 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c | |||
| @@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | |||
| 311 | dev->pnp.bus_id, | 311 | dev->pnp.bus_id, |
| 312 | (u32) dev->wakeup.sleep_state); | 312 | (u32) dev->wakeup.sleep_state); |
| 313 | 313 | ||
| 314 | mutex_lock(&dev->physical_node_lock); | ||
| 315 | |||
| 314 | if (!dev->physical_node_count) { | 316 | if (!dev->physical_node_count) { |
| 315 | seq_printf(seq, "%c%-8s\n", | 317 | seq_printf(seq, "%c%-8s\n", |
| 316 | dev->wakeup.flags.run_wake ? '*' : ' ', | 318 | dev->wakeup.flags.run_wake ? '*' : ' ', |
| @@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | |||
| 338 | put_device(ldev); | 340 | put_device(ldev); |
| 339 | } | 341 | } |
| 340 | } | 342 | } |
| 343 | |||
| 344 | mutex_unlock(&dev->physical_node_lock); | ||
| 341 | } | 345 | } |
| 342 | mutex_unlock(&acpi_device_lock); | 346 | mutex_unlock(&acpi_device_lock); |
| 343 | return 0; | 347 | return 0; |
| @@ -347,12 +351,16 @@ static void physical_device_enable_wakeup(struct acpi_device *adev) | |||
| 347 | { | 351 | { |
| 348 | struct acpi_device_physical_node *entry; | 352 | struct acpi_device_physical_node *entry; |
| 349 | 353 | ||
| 354 | mutex_lock(&adev->physical_node_lock); | ||
| 355 | |||
| 350 | list_for_each_entry(entry, | 356 | list_for_each_entry(entry, |
| 351 | &adev->physical_node_list, node) | 357 | &adev->physical_node_list, node) |
| 352 | if (entry->dev && device_can_wakeup(entry->dev)) { | 358 | if (entry->dev && device_can_wakeup(entry->dev)) { |
| 353 | bool enable = !device_may_wakeup(entry->dev); | 359 | bool enable = !device_may_wakeup(entry->dev); |
| 354 | device_set_wakeup_enable(entry->dev, enable); | 360 | device_set_wakeup_enable(entry->dev, enable); |
| 355 | } | 361 | } |
| 362 | |||
| 363 | mutex_unlock(&adev->physical_node_lock); | ||
| 356 | } | 364 | } |
| 357 | 365 | ||
| 358 | static ssize_t | 366 | static ssize_t |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 6dd237e79b4f..3270d3c8ba4e 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
| @@ -689,7 +689,7 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device, | |||
| 689 | * Some systems always report current brightness level as maximum | 689 | * Some systems always report current brightness level as maximum |
| 690 | * through _BQC, we need to test another value for them. | 690 | * through _BQC, we need to test another value for them. |
| 691 | */ | 691 | */ |
| 692 | test_level = current_level == max_level ? br->levels[2] : max_level; | 692 | test_level = current_level == max_level ? br->levels[3] : max_level; |
| 693 | 693 | ||
| 694 | result = acpi_video_device_lcd_set_level(device, test_level); | 694 | result = acpi_video_device_lcd_set_level(device, test_level); |
| 695 | if (result) | 695 | if (result) |
| @@ -908,10 +908,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
| 908 | device->cap._DDC = 1; | 908 | device->cap._DDC = 1; |
| 909 | } | 909 | } |
| 910 | 910 | ||
| 911 | if (acpi_video_init_brightness(device)) | 911 | if (acpi_video_backlight_support()) { |
| 912 | return; | ||
| 913 | |||
| 914 | if (acpi_video_verify_backlight_support()) { | ||
| 915 | struct backlight_properties props; | 912 | struct backlight_properties props; |
| 916 | struct pci_dev *pdev; | 913 | struct pci_dev *pdev; |
| 917 | acpi_handle acpi_parent; | 914 | acpi_handle acpi_parent; |
| @@ -920,6 +917,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
| 920 | static int count = 0; | 917 | static int count = 0; |
| 921 | char *name; | 918 | char *name; |
| 922 | 919 | ||
| 920 | result = acpi_video_init_brightness(device); | ||
| 921 | if (result) | ||
| 922 | return; | ||
| 923 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); | 923 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); |
| 924 | if (!name) | 924 | if (!name) |
| 925 | return; | 925 | return; |
| @@ -979,11 +979,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
| 979 | if (result) | 979 | if (result) |
| 980 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | 980 | printk(KERN_ERR PREFIX "Create sysfs link\n"); |
| 981 | 981 | ||
| 982 | } else { | ||
| 983 | /* Remove the brightness object. */ | ||
| 984 | kfree(device->brightness->levels); | ||
| 985 | kfree(device->brightness); | ||
| 986 | device->brightness = NULL; | ||
| 987 | } | 982 | } |
| 988 | } | 983 | } |
| 989 | 984 | ||
| @@ -1366,8 +1361,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event) | |||
| 1366 | unsigned long long level_current, level_next; | 1361 | unsigned long long level_current, level_next; |
| 1367 | int result = -EINVAL; | 1362 | int result = -EINVAL; |
| 1368 | 1363 | ||
| 1369 | /* no warning message if acpi_backlight=vendor or a quirk is used */ | 1364 | /* no warning message if acpi_backlight=vendor is used */ |
| 1370 | if (!acpi_video_verify_backlight_support()) | 1365 | if (!acpi_video_backlight_support()) |
| 1371 | return 0; | 1366 | return 0; |
| 1372 | 1367 | ||
| 1373 | if (!device->brightness) | 1368 | if (!device->brightness) |
| @@ -1875,46 +1870,6 @@ static int acpi_video_bus_remove(struct acpi_device *device) | |||
| 1875 | return 0; | 1870 | return 0; |
| 1876 | } | 1871 | } |
| 1877 | 1872 | ||
| 1878 | static acpi_status video_unregister_backlight(acpi_handle handle, u32 lvl, | ||
| 1879 | void *context, void **rv) | ||
| 1880 | { | ||
| 1881 | struct acpi_device *acpi_dev; | ||
| 1882 | struct acpi_video_bus *video; | ||
| 1883 | struct acpi_video_device *dev, *next; | ||
| 1884 | |||
| 1885 | if (acpi_bus_get_device(handle, &acpi_dev)) | ||
| 1886 | return AE_OK; | ||
| 1887 | |||
| 1888 | if (acpi_match_device_ids(acpi_dev, video_device_ids)) | ||
| 1889 | return AE_OK; | ||
| 1890 | |||
| 1891 | video = acpi_driver_data(acpi_dev); | ||
| 1892 | if (!video) | ||
| 1893 | return AE_OK; | ||
| 1894 | |||
| 1895 | acpi_video_bus_stop_devices(video); | ||
| 1896 | mutex_lock(&video->device_list_lock); | ||
| 1897 | list_for_each_entry_safe(dev, next, &video->video_device_list, entry) { | ||
| 1898 | if (dev->backlight) { | ||
| 1899 | backlight_device_unregister(dev->backlight); | ||
| 1900 | dev->backlight = NULL; | ||
| 1901 | kfree(dev->brightness->levels); | ||
| 1902 | kfree(dev->brightness); | ||
| 1903 | } | ||
| 1904 | if (dev->cooling_dev) { | ||
| 1905 | sysfs_remove_link(&dev->dev->dev.kobj, | ||
| 1906 | "thermal_cooling"); | ||
| 1907 | sysfs_remove_link(&dev->cooling_dev->device.kobj, | ||
| 1908 | "device"); | ||
| 1909 | thermal_cooling_device_unregister(dev->cooling_dev); | ||
| 1910 | dev->cooling_dev = NULL; | ||
| 1911 | } | ||
| 1912 | } | ||
| 1913 | mutex_unlock(&video->device_list_lock); | ||
| 1914 | acpi_video_bus_start_devices(video); | ||
| 1915 | return AE_OK; | ||
| 1916 | } | ||
| 1917 | |||
| 1918 | static int __init is_i740(struct pci_dev *dev) | 1873 | static int __init is_i740(struct pci_dev *dev) |
| 1919 | { | 1874 | { |
| 1920 | if (dev->device == 0x00D1) | 1875 | if (dev->device == 0x00D1) |
| @@ -1946,25 +1901,14 @@ static int __init intel_opregion_present(void) | |||
| 1946 | return opregion; | 1901 | return opregion; |
| 1947 | } | 1902 | } |
| 1948 | 1903 | ||
| 1949 | int __acpi_video_register(bool backlight_quirks) | 1904 | int acpi_video_register(void) |
| 1950 | { | 1905 | { |
| 1951 | bool no_backlight; | 1906 | int result = 0; |
| 1952 | int result; | ||
| 1953 | |||
| 1954 | no_backlight = backlight_quirks ? acpi_video_backlight_quirks() : false; | ||
| 1955 | |||
| 1956 | if (register_count) { | 1907 | if (register_count) { |
| 1957 | /* | 1908 | /* |
| 1958 | * If acpi_video_register() has been called already, don't try | 1909 | * if the function of acpi_video_register is already called, |
| 1959 | * to register acpi_video_bus, but unregister backlight devices | 1910 | * don't register the acpi_vide_bus again and return no error. |
| 1960 | * if no backlight support is requested. | ||
| 1961 | */ | 1911 | */ |
| 1962 | if (no_backlight) | ||
| 1963 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
| 1964 | ACPI_UINT32_MAX, | ||
| 1965 | video_unregister_backlight, | ||
| 1966 | NULL, NULL, NULL); | ||
| 1967 | |||
| 1968 | return 0; | 1912 | return 0; |
| 1969 | } | 1913 | } |
| 1970 | 1914 | ||
| @@ -1980,7 +1924,7 @@ int __acpi_video_register(bool backlight_quirks) | |||
| 1980 | 1924 | ||
| 1981 | return 0; | 1925 | return 0; |
| 1982 | } | 1926 | } |
| 1983 | EXPORT_SYMBOL(__acpi_video_register); | 1927 | EXPORT_SYMBOL(acpi_video_register); |
| 1984 | 1928 | ||
| 1985 | void acpi_video_unregister(void) | 1929 | void acpi_video_unregister(void) |
| 1986 | { | 1930 | { |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 826e52def080..c3397748ba46 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
| @@ -238,12 +238,7 @@ static void acpi_video_caps_check(void) | |||
| 238 | 238 | ||
| 239 | bool acpi_video_backlight_quirks(void) | 239 | bool acpi_video_backlight_quirks(void) |
| 240 | { | 240 | { |
| 241 | if (acpi_gbl_osi_data >= ACPI_OSI_WIN_8) { | 241 | return acpi_gbl_osi_data >= ACPI_OSI_WIN_8; |
| 242 | acpi_video_caps_check(); | ||
| 243 | acpi_video_support |= ACPI_VIDEO_SKIP_BACKLIGHT; | ||
| 244 | return true; | ||
| 245 | } | ||
| 246 | return false; | ||
| 247 | } | 242 | } |
| 248 | EXPORT_SYMBOL(acpi_video_backlight_quirks); | 243 | EXPORT_SYMBOL(acpi_video_backlight_quirks); |
| 249 | 244 | ||
| @@ -291,14 +286,6 @@ int acpi_video_backlight_support(void) | |||
| 291 | } | 286 | } |
| 292 | EXPORT_SYMBOL(acpi_video_backlight_support); | 287 | EXPORT_SYMBOL(acpi_video_backlight_support); |
| 293 | 288 | ||
| 294 | /* For the ACPI video driver use only. */ | ||
| 295 | bool acpi_video_verify_backlight_support(void) | ||
| 296 | { | ||
| 297 | return (acpi_video_support & ACPI_VIDEO_SKIP_BACKLIGHT) ? | ||
| 298 | false : acpi_video_backlight_support(); | ||
| 299 | } | ||
| 300 | EXPORT_SYMBOL(acpi_video_verify_backlight_support); | ||
| 301 | |||
| 302 | /* | 289 | /* |
| 303 | * Use acpi_backlight=vendor/video to force that backlight switching | 290 | * Use acpi_backlight=vendor/video to force that backlight switching |
| 304 | * is processed by vendor specific acpi drivers or video.ko driver. | 291 | * is processed by vendor specific acpi drivers or video.ko driver. |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 80dc988f01e4..4e737728aee2 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
| @@ -97,6 +97,15 @@ config SATA_AHCI_PLATFORM | |||
| 97 | 97 | ||
| 98 | If unsure, say N. | 98 | If unsure, say N. |
| 99 | 99 | ||
| 100 | config AHCI_IMX | ||
| 101 | tristate "Freescale i.MX AHCI SATA support" | ||
| 102 | depends on SATA_AHCI_PLATFORM && MFD_SYSCON | ||
| 103 | help | ||
| 104 | This option enables support for the Freescale i.MX SoC's | ||
| 105 | onboard AHCI SATA. | ||
| 106 | |||
| 107 | If unsure, say N. | ||
| 108 | |||
| 100 | config SATA_FSL | 109 | config SATA_FSL |
| 101 | tristate "Freescale 3.0Gbps SATA support" | 110 | tristate "Freescale 3.0Gbps SATA support" |
| 102 | depends on FSL_SOC | 111 | depends on FSL_SOC |
| @@ -107,7 +116,7 @@ config SATA_FSL | |||
| 107 | If unsure, say N. | 116 | If unsure, say N. |
| 108 | 117 | ||
| 109 | config SATA_INIC162X | 118 | config SATA_INIC162X |
| 110 | tristate "Initio 162x SATA support" | 119 | tristate "Initio 162x SATA support (Very Experimental)" |
| 111 | depends on PCI | 120 | depends on PCI |
| 112 | help | 121 | help |
| 113 | This option enables support for Initio 162x Serial ATA. | 122 | This option enables support for Initio 162x Serial ATA. |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index c04d0fd038a3..46518c622460 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
| @@ -10,6 +10,7 @@ obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o | |||
| 10 | obj-$(CONFIG_SATA_SIL24) += sata_sil24.o | 10 | obj-$(CONFIG_SATA_SIL24) += sata_sil24.o |
| 11 | obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o | 11 | obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o |
| 12 | obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o | 12 | obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o |
| 13 | obj-$(CONFIG_AHCI_IMX) += ahci_imx.o | ||
| 13 | 14 | ||
| 14 | # SFF w/ custom DMA | 15 | # SFF w/ custom DMA |
| 15 | obj-$(CONFIG_PDC_ADMA) += pdc_adma.o | 16 | obj-$(CONFIG_PDC_ADMA) += pdc_adma.o |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 5064f3ea20f1..db4380d70031 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
| @@ -1146,11 +1146,18 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis) | |||
| 1146 | return rc; | 1146 | return rc; |
| 1147 | 1147 | ||
| 1148 | for (i = 0; i < host->n_ports; i++) { | 1148 | for (i = 0; i < host->n_ports; i++) { |
| 1149 | const char* desc; | ||
| 1149 | struct ahci_port_priv *pp = host->ports[i]->private_data; | 1150 | struct ahci_port_priv *pp = host->ports[i]->private_data; |
| 1150 | 1151 | ||
| 1152 | /* pp is NULL for dummy ports */ | ||
| 1153 | if (pp) | ||
| 1154 | desc = pp->irq_desc; | ||
| 1155 | else | ||
| 1156 | desc = dev_driver_string(host->dev); | ||
| 1157 | |||
| 1151 | rc = devm_request_threaded_irq(host->dev, | 1158 | rc = devm_request_threaded_irq(host->dev, |
| 1152 | irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED, | 1159 | irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED, |
| 1153 | pp->irq_desc, host->ports[i]); | 1160 | desc, host->ports[i]); |
| 1154 | if (rc) | 1161 | if (rc) |
| 1155 | goto out_free_irqs; | 1162 | goto out_free_irqs; |
| 1156 | } | 1163 | } |
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c new file mode 100644 index 000000000000..58debb0acc3a --- /dev/null +++ b/drivers/ata/ahci_imx.c | |||
| @@ -0,0 +1,236 @@ | |||
| 1 | /* | ||
| 2 | * Freescale IMX AHCI SATA platform driver | ||
| 3 | * Copyright 2013 Freescale Semiconductor, Inc. | ||
| 4 | * | ||
| 5 | * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms and conditions of the GNU General Public License, | ||
| 9 | * version 2, as published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 14 | * more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License along with | ||
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 18 | */ | ||
| 19 | |||
| 20 | #include <linux/kernel.h> | ||
| 21 | #include <linux/module.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/regmap.h> | ||
| 24 | #include <linux/ahci_platform.h> | ||
| 25 | #include <linux/of_device.h> | ||
| 26 | #include <linux/mfd/syscon.h> | ||
| 27 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | ||
| 28 | #include "ahci.h" | ||
| 29 | |||
| 30 | enum { | ||
| 31 | HOST_TIMER1MS = 0xe0, /* Timer 1-ms */ | ||
| 32 | }; | ||
| 33 | |||
| 34 | struct imx_ahci_priv { | ||
| 35 | struct platform_device *ahci_pdev; | ||
| 36 | struct clk *sata_ref_clk; | ||
| 37 | struct clk *ahb_clk; | ||
| 38 | struct regmap *gpr; | ||
| 39 | }; | ||
| 40 | |||
| 41 | static int imx6q_sata_init(struct device *dev, void __iomem *mmio) | ||
| 42 | { | ||
| 43 | int ret = 0; | ||
| 44 | unsigned int reg_val; | ||
| 45 | struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent); | ||
| 46 | |||
| 47 | imxpriv->gpr = | ||
| 48 | syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); | ||
| 49 | if (IS_ERR(imxpriv->gpr)) { | ||
| 50 | dev_err(dev, "failed to find fsl,imx6q-iomux-gpr regmap\n"); | ||
| 51 | return PTR_ERR(imxpriv->gpr); | ||
| 52 | } | ||
| 53 | |||
| 54 | ret = clk_prepare_enable(imxpriv->sata_ref_clk); | ||
| 55 | if (ret < 0) { | ||
| 56 | dev_err(dev, "prepare-enable sata_ref clock err:%d\n", ret); | ||
| 57 | return ret; | ||
| 58 | } | ||
| 59 | |||
| 60 | /* | ||
| 61 | * set PHY Paremeters, two steps to configure the GPR13, | ||
| 62 | * one write for rest of parameters, mask of first write | ||
| 63 | * is 0x07fffffd, and the other one write for setting | ||
| 64 | * the mpll_clk_en. | ||
| 65 | */ | ||
| 66 | regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK | ||
| 67 | | IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK | ||
| 68 | | IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK | ||
| 69 | | IMX6Q_GPR13_SATA_SPD_MODE_MASK | ||
| 70 | | IMX6Q_GPR13_SATA_MPLL_SS_EN | ||
| 71 | | IMX6Q_GPR13_SATA_TX_ATTEN_MASK | ||
| 72 | | IMX6Q_GPR13_SATA_TX_BOOST_MASK | ||
| 73 | | IMX6Q_GPR13_SATA_TX_LVL_MASK | ||
| 74 | | IMX6Q_GPR13_SATA_TX_EDGE_RATE | ||
| 75 | , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB | ||
| 76 | | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M | ||
| 77 | | IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F | ||
| 78 | | IMX6Q_GPR13_SATA_SPD_MODE_3P0G | ||
| 79 | | IMX6Q_GPR13_SATA_MPLL_SS_EN | ||
| 80 | | IMX6Q_GPR13_SATA_TX_ATTEN_9_16 | ||
| 81 | | IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB | ||
| 82 | | IMX6Q_GPR13_SATA_TX_LVL_1_025_V); | ||
| 83 | regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN, | ||
| 84 | IMX6Q_GPR13_SATA_MPLL_CLK_EN); | ||
| 85 | usleep_range(100, 200); | ||
| 86 | |||
| 87 | /* | ||
| 88 | * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, | ||
| 89 | * and IP vendor specific register HOST_TIMER1MS. | ||
| 90 | * Configure CAP_SSS (support stagered spin up). | ||
| 91 | * Implement the port0. | ||
| 92 | * Get the ahb clock rate, and configure the TIMER1MS register. | ||
| 93 | */ | ||
| 94 | reg_val = readl(mmio + HOST_CAP); | ||
| 95 | if (!(reg_val & HOST_CAP_SSS)) { | ||
| 96 | reg_val |= HOST_CAP_SSS; | ||
| 97 | writel(reg_val, mmio + HOST_CAP); | ||
| 98 | } | ||
| 99 | reg_val = readl(mmio + HOST_PORTS_IMPL); | ||
| 100 | if (!(reg_val & 0x1)) { | ||
| 101 | reg_val |= 0x1; | ||
| 102 | writel(reg_val, mmio + HOST_PORTS_IMPL); | ||
| 103 | } | ||
| 104 | |||
| 105 | reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; | ||
| 106 | writel(reg_val, mmio + HOST_TIMER1MS); | ||
| 107 | |||
| 108 | return 0; | ||
| 109 | } | ||
| 110 | |||
| 111 | static void imx6q_sata_exit(struct device *dev) | ||
| 112 | { | ||
| 113 | struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent); | ||
| 114 | |||
| 115 | regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN, | ||
| 116 | !IMX6Q_GPR13_SATA_MPLL_CLK_EN); | ||
| 117 | clk_disable_unprepare(imxpriv->sata_ref_clk); | ||
| 118 | } | ||
| 119 | |||
| 120 | static struct ahci_platform_data imx6q_sata_pdata = { | ||
| 121 | .init = imx6q_sata_init, | ||
| 122 | .exit = imx6q_sata_exit, | ||
| 123 | }; | ||
| 124 | |||
| 125 | static const struct of_device_id imx_ahci_of_match[] = { | ||
| 126 | { .compatible = "fsl,imx6q-ahci", .data = &imx6q_sata_pdata}, | ||
| 127 | {}, | ||
| 128 | }; | ||
| 129 | MODULE_DEVICE_TABLE(of, imx_ahci_of_match); | ||
| 130 | |||
| 131 | static int imx_ahci_probe(struct platform_device *pdev) | ||
| 132 | { | ||
| 133 | struct device *dev = &pdev->dev; | ||
| 134 | struct resource *mem, *irq, res[2]; | ||
| 135 | const struct of_device_id *of_id; | ||
| 136 | const struct ahci_platform_data *pdata = NULL; | ||
| 137 | struct imx_ahci_priv *imxpriv; | ||
| 138 | struct device *ahci_dev; | ||
| 139 | struct platform_device *ahci_pdev; | ||
| 140 | int ret; | ||
| 141 | |||
| 142 | imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL); | ||
| 143 | if (!imxpriv) { | ||
| 144 | dev_err(dev, "can't alloc ahci_host_priv\n"); | ||
| 145 | return -ENOMEM; | ||
| 146 | } | ||
| 147 | |||
| 148 | ahci_pdev = platform_device_alloc("ahci", -1); | ||
| 149 | if (!ahci_pdev) | ||
| 150 | return -ENODEV; | ||
| 151 | |||
| 152 | ahci_dev = &ahci_pdev->dev; | ||
| 153 | ahci_dev->parent = dev; | ||
| 154 | |||
| 155 | imxpriv->ahb_clk = devm_clk_get(dev, "ahb"); | ||
| 156 | if (IS_ERR(imxpriv->ahb_clk)) { | ||
| 157 | dev_err(dev, "can't get ahb clock.\n"); | ||
| 158 | ret = PTR_ERR(imxpriv->ahb_clk); | ||
| 159 | goto err_out; | ||
| 160 | } | ||
| 161 | |||
| 162 | imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref"); | ||
| 163 | if (IS_ERR(imxpriv->sata_ref_clk)) { | ||
| 164 | dev_err(dev, "can't get sata_ref clock.\n"); | ||
| 165 | ret = PTR_ERR(imxpriv->sata_ref_clk); | ||
| 166 | goto err_out; | ||
| 167 | } | ||
| 168 | |||
| 169 | imxpriv->ahci_pdev = ahci_pdev; | ||
| 170 | platform_set_drvdata(pdev, imxpriv); | ||
| 171 | |||
| 172 | of_id = of_match_device(imx_ahci_of_match, dev); | ||
| 173 | if (of_id) { | ||
| 174 | pdata = of_id->data; | ||
| 175 | } else { | ||
| 176 | ret = -EINVAL; | ||
| 177 | goto err_out; | ||
| 178 | } | ||
| 179 | |||
| 180 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 181 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
| 182 | if (!mem || !irq) { | ||
| 183 | dev_err(dev, "no mmio/irq resource\n"); | ||
| 184 | ret = -ENOMEM; | ||
| 185 | goto err_out; | ||
| 186 | } | ||
| 187 | |||
| 188 | res[0] = *mem; | ||
| 189 | res[1] = *irq; | ||
| 190 | |||
| 191 | ahci_dev->coherent_dma_mask = DMA_BIT_MASK(32); | ||
| 192 | ahci_dev->dma_mask = &ahci_dev->coherent_dma_mask; | ||
| 193 | ahci_dev->of_node = dev->of_node; | ||
| 194 | |||
| 195 | ret = platform_device_add_resources(ahci_pdev, res, 2); | ||
| 196 | if (ret) | ||
| 197 | goto err_out; | ||
| 198 | |||
| 199 | ret = platform_device_add_data(ahci_pdev, pdata, sizeof(*pdata)); | ||
| 200 | if (ret) | ||
| 201 | goto err_out; | ||
| 202 | |||
| 203 | ret = platform_device_add(ahci_pdev); | ||
| 204 | if (ret) { | ||
| 205 | err_out: | ||
| 206 | platform_device_put(ahci_pdev); | ||
| 207 | return ret; | ||
| 208 | } | ||
| 209 | |||
| 210 | return 0; | ||
| 211 | } | ||
| 212 | |||
| 213 | static int imx_ahci_remove(struct platform_device *pdev) | ||
| 214 | { | ||
| 215 | struct imx_ahci_priv *imxpriv = platform_get_drvdata(pdev); | ||
| 216 | struct platform_device *ahci_pdev = imxpriv->ahci_pdev; | ||
| 217 | |||
| 218 | platform_device_unregister(ahci_pdev); | ||
| 219 | return 0; | ||
| 220 | } | ||
| 221 | |||
| 222 | static struct platform_driver imx_ahci_driver = { | ||
| 223 | .probe = imx_ahci_probe, | ||
| 224 | .remove = imx_ahci_remove, | ||
| 225 | .driver = { | ||
| 226 | .name = "ahci-imx", | ||
| 227 | .owner = THIS_MODULE, | ||
| 228 | .of_match_table = imx_ahci_of_match, | ||
| 229 | }, | ||
| 230 | }; | ||
| 231 | module_platform_driver(imx_ahci_driver); | ||
| 232 | |||
| 233 | MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver"); | ||
| 234 | MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>"); | ||
| 235 | MODULE_LICENSE("GPL"); | ||
| 236 | MODULE_ALIAS("ahci:imx"); | ||
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index b52a10c8eeb9..513ad7ed0c99 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
| @@ -330,7 +330,7 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
| 330 | /* SATA Controller IDE (Wellsburg) */ | 330 | /* SATA Controller IDE (Wellsburg) */ |
| 331 | { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, | 331 | { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, |
| 332 | /* SATA Controller IDE (Wellsburg) */ | 332 | /* SATA Controller IDE (Wellsburg) */ |
| 333 | { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 333 | { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, |
| 334 | /* SATA Controller IDE (Wellsburg) */ | 334 | /* SATA Controller IDE (Wellsburg) */ |
| 335 | { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, | 335 | { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, |
| 336 | /* SATA Controller IDE (Wellsburg) */ | 336 | /* SATA Controller IDE (Wellsburg) */ |
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 1c41722bb7e2..20fd337a5731 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c | |||
| @@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info) | |||
| 289 | 289 | ||
| 290 | /* Disable sending Early R_OK. | 290 | /* Disable sending Early R_OK. |
| 291 | * With "cached read" HDD testing and multiple ports busy on a SATA | 291 | * With "cached read" HDD testing and multiple ports busy on a SATA |
| 292 | * host controller, 3726 PMP will very rarely drop a deferred | 292 | * host controller, 3x26 PMP will very rarely drop a deferred |
| 293 | * R_OK that was intended for the host. Symptom will be all | 293 | * R_OK that was intended for the host. Symptom will be all |
| 294 | * 5 drives under test will timeout, get reset, and recover. | 294 | * 5 drives under test will timeout, get reset, and recover. |
| 295 | */ | 295 | */ |
| 296 | if (vendor == 0x1095 && devid == 0x3726) { | 296 | if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { |
| 297 | u32 reg; | 297 | u32 reg; |
| 298 | 298 | ||
| 299 | err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); | 299 | err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); |
| 300 | if (err_mask) { | 300 | if (err_mask) { |
| 301 | rc = -EIO; | 301 | rc = -EIO; |
| 302 | reason = "failed to read Sil3726 Private Register"; | 302 | reason = "failed to read Sil3x26 Private Register"; |
| 303 | goto fail; | 303 | goto fail; |
| 304 | } | 304 | } |
| 305 | reg &= ~0x1; | 305 | reg &= ~0x1; |
| 306 | err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); | 306 | err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); |
| 307 | if (err_mask) { | 307 | if (err_mask) { |
| 308 | rc = -EIO; | 308 | rc = -EIO; |
| 309 | reason = "failed to write Sil3726 Private Register"; | 309 | reason = "failed to write Sil3x26 Private Register"; |
| 310 | goto fail; | 310 | goto fail; |
| 311 | } | 311 | } |
| 312 | } | 312 | } |
| @@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap) | |||
| 383 | u16 devid = sata_pmp_gscr_devid(gscr); | 383 | u16 devid = sata_pmp_gscr_devid(gscr); |
| 384 | struct ata_link *link; | 384 | struct ata_link *link; |
| 385 | 385 | ||
| 386 | if (vendor == 0x1095 && devid == 0x3726) { | 386 | if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { |
| 387 | /* sil3726 quirks */ | 387 | /* sil3x26 quirks */ |
| 388 | ata_for_each_link(link, ap, EDGE) { | 388 | ata_for_each_link(link, ap, EDGE) { |
| 389 | /* link reports offline after LPM */ | 389 | /* link reports offline after LPM */ |
| 390 | link->flags |= ATA_LFLAG_NO_LPM; | 390 | link->flags |= ATA_LFLAG_NO_LPM; |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 83c08907e042..b1e880a3c3da 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
| @@ -206,8 +206,10 @@ static ssize_t ata_scsi_park_store(struct device *device, | |||
| 206 | unsigned long flags; | 206 | unsigned long flags; |
| 207 | int rc; | 207 | int rc; |
| 208 | 208 | ||
| 209 | rc = strict_strtol(buf, 10, &input); | 209 | rc = kstrtol(buf, 10, &input); |
| 210 | if (rc || input < -2) | 210 | if (rc) |
| 211 | return rc; | ||
| 212 | if (input < -2) | ||
| 211 | return -EINVAL; | 213 | return -EINVAL; |
| 212 | if (input > ATA_TMOUT_MAX_PARK) { | 214 | if (input > ATA_TMOUT_MAX_PARK) { |
| 213 | rc = -EOVERFLOW; | 215 | rc = -EOVERFLOW; |
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c index 4ec7c04b3f82..26386f0b89a8 100644 --- a/drivers/ata/pata_imx.c +++ b/drivers/ata/pata_imx.c | |||
| @@ -237,6 +237,7 @@ static const struct of_device_id imx_pata_dt_ids[] = { | |||
| 237 | /* sentinel */ | 237 | /* sentinel */ |
| 238 | } | 238 | } |
| 239 | }; | 239 | }; |
| 240 | MODULE_DEVICE_TABLE(of, imx_pata_dt_ids); | ||
| 240 | 241 | ||
| 241 | static struct platform_driver pata_imx_driver = { | 242 | static struct platform_driver pata_imx_driver = { |
| 242 | .probe = pata_imx_probe, | 243 | .probe = pata_imx_probe, |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 19720a0a4a65..851bd3f43ac6 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
| @@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, | |||
| 293 | { | 293 | { |
| 294 | struct sata_fsl_host_priv *host_priv = host->private_data; | 294 | struct sata_fsl_host_priv *host_priv = host->private_data; |
| 295 | void __iomem *hcr_base = host_priv->hcr_base; | 295 | void __iomem *hcr_base = host_priv->hcr_base; |
| 296 | unsigned long flags; | ||
| 296 | 297 | ||
| 297 | if (count > ICC_MAX_INT_COUNT_THRESHOLD) | 298 | if (count > ICC_MAX_INT_COUNT_THRESHOLD) |
| 298 | count = ICC_MAX_INT_COUNT_THRESHOLD; | 299 | count = ICC_MAX_INT_COUNT_THRESHOLD; |
| @@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, | |||
| 305 | (count > ICC_MIN_INT_COUNT_THRESHOLD)) | 306 | (count > ICC_MIN_INT_COUNT_THRESHOLD)) |
| 306 | ticks = ICC_SAFE_INT_TICKS; | 307 | ticks = ICC_SAFE_INT_TICKS; |
| 307 | 308 | ||
| 308 | spin_lock(&host->lock); | 309 | spin_lock_irqsave(&host->lock, flags); |
| 309 | iowrite32((count << 24 | ticks), hcr_base + ICC); | 310 | iowrite32((count << 24 | ticks), hcr_base + ICC); |
| 310 | 311 | ||
| 311 | intr_coalescing_count = count; | 312 | intr_coalescing_count = count; |
| 312 | intr_coalescing_ticks = ticks; | 313 | intr_coalescing_ticks = ticks; |
| 313 | spin_unlock(&host->lock); | 314 | spin_unlock_irqrestore(&host->lock, flags); |
| 314 | 315 | ||
| 315 | DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", | 316 | DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", |
| 316 | intr_coalescing_count, intr_coalescing_ticks); | 317 | intr_coalescing_count, intr_coalescing_ticks); |
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index d047d92a456f..e9a4f46d962e 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c | |||
| @@ -86,11 +86,11 @@ struct ecx_plat_data { | |||
| 86 | 86 | ||
| 87 | #define SGPIO_SIGNALS 3 | 87 | #define SGPIO_SIGNALS 3 |
| 88 | #define ECX_ACTIVITY_BITS 0x300000 | 88 | #define ECX_ACTIVITY_BITS 0x300000 |
| 89 | #define ECX_ACTIVITY_SHIFT 2 | 89 | #define ECX_ACTIVITY_SHIFT 0 |
| 90 | #define ECX_LOCATE_BITS 0x80000 | 90 | #define ECX_LOCATE_BITS 0x80000 |
| 91 | #define ECX_LOCATE_SHIFT 1 | 91 | #define ECX_LOCATE_SHIFT 1 |
| 92 | #define ECX_FAULT_BITS 0x400000 | 92 | #define ECX_FAULT_BITS 0x400000 |
| 93 | #define ECX_FAULT_SHIFT 0 | 93 | #define ECX_FAULT_SHIFT 2 |
| 94 | static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, | 94 | static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, |
| 95 | u32 shift) | 95 | u32 shift) |
| 96 | { | 96 | { |
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index e45131748248..5c54d957370a 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c | |||
| @@ -6,6 +6,18 @@ | |||
| 6 | * | 6 | * |
| 7 | * This file is released under GPL v2. | 7 | * This file is released under GPL v2. |
| 8 | * | 8 | * |
| 9 | * **** WARNING **** | ||
| 10 | * | ||
| 11 | * This driver never worked properly and unfortunately data corruption is | ||
| 12 | * relatively common. There isn't anyone working on the driver and there's | ||
| 13 | * no support from the vendor. Do not use this driver in any production | ||
| 14 | * environment. | ||
| 15 | * | ||
| 16 | * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491 | ||
| 17 | * https://bugzilla.kernel.org/show_bug.cgi?id=60565 | ||
| 18 | * | ||
| 19 | * ***************** | ||
| 20 | * | ||
| 9 | * This controller is eccentric and easily locks up if something isn't | 21 | * This controller is eccentric and easily locks up if something isn't |
| 10 | * right. Documentation is available at initio's website but it only | 22 | * right. Documentation is available at initio's website but it only |
| 11 | * documents registers (not programming model). | 23 | * documents registers (not programming model). |
| @@ -807,6 +819,8 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 807 | 819 | ||
| 808 | ata_print_version_once(&pdev->dev, DRV_VERSION); | 820 | ata_print_version_once(&pdev->dev, DRV_VERSION); |
| 809 | 821 | ||
| 822 | dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n"); | ||
| 823 | |||
| 810 | /* alloc host */ | 824 | /* alloc host */ |
| 811 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); | 825 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); |
| 812 | hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); | 826 | hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 2b7813ec6d02..ec386ee9cb22 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
| @@ -141,6 +141,8 @@ static ssize_t show_mem_removable(struct device *dev, | |||
| 141 | container_of(dev, struct memory_block, dev); | 141 | container_of(dev, struct memory_block, dev); |
| 142 | 142 | ||
| 143 | for (i = 0; i < sections_per_block; i++) { | 143 | for (i = 0; i < sections_per_block; i++) { |
| 144 | if (!present_section_nr(mem->start_section_nr + i)) | ||
| 145 | continue; | ||
| 144 | pfn = section_nr_to_pfn(mem->start_section_nr + i); | 146 | pfn = section_nr_to_pfn(mem->start_section_nr + i); |
| 145 | ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); | 147 | ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); |
| 146 | } | 148 | } |
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 5c1435c4e210..0fccc99881fd 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
| @@ -332,7 +332,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) | |||
| 332 | } | 332 | } |
| 333 | 333 | ||
| 334 | if (!rbnode->blklen) { | 334 | if (!rbnode->blklen) { |
| 335 | rbnode->blklen = sizeof(*rbnode); | 335 | rbnode->blklen = 1; |
| 336 | rbnode->base_reg = reg; | 336 | rbnode->base_reg = reg; |
| 337 | } | 337 | } |
| 338 | 338 | ||
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index e69102696533..3455f833e473 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c | |||
| @@ -719,7 +719,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block, | |||
| 719 | } | 719 | } |
| 720 | } | 720 | } |
| 721 | 721 | ||
| 722 | return regcache_sync_block_raw_flush(map, &data, base, regtmp); | 722 | return regcache_sync_block_raw_flush(map, &data, base, regtmp + |
| 723 | map->reg_stride); | ||
| 723 | } | 724 | } |
| 724 | 725 | ||
| 725 | int regcache_sync_block(struct regmap *map, void *block, | 726 | int regcache_sync_block(struct regmap *map, void *block, |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index b81ddfea1da0..e07a5fd58ad7 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
| @@ -532,11 +532,11 @@ config BLK_DEV_RBD | |||
| 532 | If unsure, say N. | 532 | If unsure, say N. |
| 533 | 533 | ||
| 534 | config BLK_DEV_RSXX | 534 | config BLK_DEV_RSXX |
| 535 | tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver" | 535 | tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver" |
| 536 | depends on PCI | 536 | depends on PCI |
| 537 | help | 537 | help |
| 538 | Device driver for IBM's high speed PCIe SSD | 538 | Device driver for IBM's high speed PCIe SSD |
| 539 | storage devices: FlashSystem-70 and FlashSystem-80. | 539 | storage device: Flash Adapter 900GB Full Height. |
| 540 | 540 | ||
| 541 | To compile this driver as a module, choose M here: the | 541 | To compile this driver as a module, choose M here: the |
| 542 | module will be called rsxx. | 542 | module will be called rsxx. |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 99cb944a002d..4d45dba7fb8f 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
| @@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio) | |||
| 906 | int i; | 906 | int i; |
| 907 | 907 | ||
| 908 | bio_for_each_segment(bv, bio, i) { | 908 | bio_for_each_segment(bv, bio, i) { |
| 909 | page = bv->bv_page; | ||
| 910 | /* Non-zero page count for non-head members of | 909 | /* Non-zero page count for non-head members of |
| 911 | * compound pages is no longer allowed by the kernel, | 910 | * compound pages is no longer allowed by the kernel. |
| 912 | * but this has never been seen here. | ||
| 913 | */ | 911 | */ |
| 914 | if (unlikely(PageCompound(page))) | 912 | page = compound_trans_head(bv->bv_page); |
| 915 | if (compound_trans_head(page) != page) { | ||
| 916 | pr_crit("page tail used for block I/O\n"); | ||
| 917 | BUG(); | ||
| 918 | } | ||
| 919 | atomic_inc(&page->_count); | 913 | atomic_inc(&page->_count); |
| 920 | } | 914 | } |
| 921 | } | 915 | } |
| @@ -924,10 +918,13 @@ static void | |||
| 924 | bio_pagedec(struct bio *bio) | 918 | bio_pagedec(struct bio *bio) |
| 925 | { | 919 | { |
| 926 | struct bio_vec *bv; | 920 | struct bio_vec *bv; |
| 921 | struct page *page; | ||
| 927 | int i; | 922 | int i; |
| 928 | 923 | ||
| 929 | bio_for_each_segment(bv, bio, i) | 924 | bio_for_each_segment(bv, bio, i) { |
| 930 | atomic_dec(&bv->bv_page->_count); | 925 | page = compound_trans_head(bv->bv_page); |
| 926 | atomic_dec(&page->_count); | ||
| 927 | } | ||
| 931 | } | 928 | } |
| 932 | 929 | ||
| 933 | static void | 930 | static void |
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 6608076dc39e..28c73ca320a8 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
| @@ -659,6 +659,27 @@ void drbd_al_shrink(struct drbd_conf *mdev) | |||
| 659 | wake_up(&mdev->al_wait); | 659 | wake_up(&mdev->al_wait); |
| 660 | } | 660 | } |
| 661 | 661 | ||
| 662 | int drbd_initialize_al(struct drbd_conf *mdev, void *buffer) | ||
| 663 | { | ||
| 664 | struct al_transaction_on_disk *al = buffer; | ||
| 665 | struct drbd_md *md = &mdev->ldev->md; | ||
| 666 | sector_t al_base = md->md_offset + md->al_offset; | ||
| 667 | int al_size_4k = md->al_stripes * md->al_stripe_size_4k; | ||
| 668 | int i; | ||
| 669 | |||
| 670 | memset(al, 0, 4096); | ||
| 671 | al->magic = cpu_to_be32(DRBD_AL_MAGIC); | ||
| 672 | al->transaction_type = cpu_to_be16(AL_TR_INITIALIZED); | ||
| 673 | al->crc32c = cpu_to_be32(crc32c(0, al, 4096)); | ||
| 674 | |||
| 675 | for (i = 0; i < al_size_4k; i++) { | ||
| 676 | int err = drbd_md_sync_page_io(mdev, mdev->ldev, al_base + i * 8, WRITE); | ||
| 677 | if (err) | ||
| 678 | return err; | ||
| 679 | } | ||
| 680 | return 0; | ||
| 681 | } | ||
| 682 | |||
| 662 | static int w_update_odbm(struct drbd_work *w, int unused) | 683 | static int w_update_odbm(struct drbd_work *w, int unused) |
| 663 | { | 684 | { |
| 664 | struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); | 685 | struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index f943aacfdad8..2d7f608d181c 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
| @@ -832,6 +832,7 @@ struct drbd_tconn { /* is a resource from the config file */ | |||
| 832 | unsigned susp_nod:1; /* IO suspended because no data */ | 832 | unsigned susp_nod:1; /* IO suspended because no data */ |
| 833 | unsigned susp_fen:1; /* IO suspended because fence peer handler runs */ | 833 | unsigned susp_fen:1; /* IO suspended because fence peer handler runs */ |
| 834 | struct mutex cstate_mutex; /* Protects graceful disconnects */ | 834 | struct mutex cstate_mutex; /* Protects graceful disconnects */ |
| 835 | unsigned int connect_cnt; /* Inc each time a connection is established */ | ||
| 835 | 836 | ||
| 836 | unsigned long flags; | 837 | unsigned long flags; |
| 837 | struct net_conf *net_conf; /* content protected by rcu */ | 838 | struct net_conf *net_conf; /* content protected by rcu */ |
| @@ -1132,6 +1133,7 @@ extern void drbd_mdev_cleanup(struct drbd_conf *mdev); | |||
| 1132 | void drbd_print_uuids(struct drbd_conf *mdev, const char *text); | 1133 | void drbd_print_uuids(struct drbd_conf *mdev, const char *text); |
| 1133 | 1134 | ||
| 1134 | extern void conn_md_sync(struct drbd_tconn *tconn); | 1135 | extern void conn_md_sync(struct drbd_tconn *tconn); |
| 1136 | extern void drbd_md_write(struct drbd_conf *mdev, void *buffer); | ||
| 1135 | extern void drbd_md_sync(struct drbd_conf *mdev); | 1137 | extern void drbd_md_sync(struct drbd_conf *mdev); |
| 1136 | extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); | 1138 | extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); |
| 1137 | extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); | 1139 | extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); |
| @@ -1466,8 +1468,16 @@ extern void drbd_suspend_io(struct drbd_conf *mdev); | |||
| 1466 | extern void drbd_resume_io(struct drbd_conf *mdev); | 1468 | extern void drbd_resume_io(struct drbd_conf *mdev); |
| 1467 | extern char *ppsize(char *buf, unsigned long long size); | 1469 | extern char *ppsize(char *buf, unsigned long long size); |
| 1468 | extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int); | 1470 | extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int); |
| 1469 | enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; | 1471 | enum determine_dev_size { |
| 1470 | extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); | 1472 | DS_ERROR_SHRINK = -3, |
| 1473 | DS_ERROR_SPACE_MD = -2, | ||
| 1474 | DS_ERROR = -1, | ||
| 1475 | DS_UNCHANGED = 0, | ||
| 1476 | DS_SHRUNK = 1, | ||
| 1477 | DS_GREW = 2 | ||
| 1478 | }; | ||
| 1479 | extern enum determine_dev_size | ||
| 1480 | drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local); | ||
| 1471 | extern void resync_after_online_grow(struct drbd_conf *); | 1481 | extern void resync_after_online_grow(struct drbd_conf *); |
| 1472 | extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev); | 1482 | extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev); |
| 1473 | extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, | 1483 | extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, |
| @@ -1633,6 +1643,7 @@ extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, | |||
| 1633 | #define drbd_set_out_of_sync(mdev, sector, size) \ | 1643 | #define drbd_set_out_of_sync(mdev, sector, size) \ |
| 1634 | __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) | 1644 | __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) |
| 1635 | extern void drbd_al_shrink(struct drbd_conf *mdev); | 1645 | extern void drbd_al_shrink(struct drbd_conf *mdev); |
| 1646 | extern int drbd_initialize_al(struct drbd_conf *, void *); | ||
| 1636 | 1647 | ||
| 1637 | /* drbd_nl.c */ | 1648 | /* drbd_nl.c */ |
| 1638 | /* state info broadcast */ | 1649 | /* state info broadcast */ |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index a5dca6affcbb..55635edf563b 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
| @@ -2762,8 +2762,6 @@ int __init drbd_init(void) | |||
| 2762 | /* | 2762 | /* |
| 2763 | * allocate all necessary structs | 2763 | * allocate all necessary structs |
| 2764 | */ | 2764 | */ |
| 2765 | err = -ENOMEM; | ||
| 2766 | |||
| 2767 | init_waitqueue_head(&drbd_pp_wait); | 2765 | init_waitqueue_head(&drbd_pp_wait); |
| 2768 | 2766 | ||
| 2769 | drbd_proc = NULL; /* play safe for drbd_cleanup */ | 2767 | drbd_proc = NULL; /* play safe for drbd_cleanup */ |
| @@ -2773,6 +2771,7 @@ int __init drbd_init(void) | |||
| 2773 | if (err) | 2771 | if (err) |
| 2774 | goto fail; | 2772 | goto fail; |
| 2775 | 2773 | ||
| 2774 | err = -ENOMEM; | ||
| 2776 | drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); | 2775 | drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); |
| 2777 | if (!drbd_proc) { | 2776 | if (!drbd_proc) { |
| 2778 | printk(KERN_ERR "drbd: unable to register proc file\n"); | 2777 | printk(KERN_ERR "drbd: unable to register proc file\n"); |
| @@ -2803,7 +2802,6 @@ int __init drbd_init(void) | |||
| 2803 | fail: | 2802 | fail: |
| 2804 | drbd_cleanup(); | 2803 | drbd_cleanup(); |
| 2805 | if (err == -ENOMEM) | 2804 | if (err == -ENOMEM) |
| 2806 | /* currently always the case */ | ||
| 2807 | printk(KERN_ERR "drbd: ran out of memory\n"); | 2805 | printk(KERN_ERR "drbd: ran out of memory\n"); |
| 2808 | else | 2806 | else |
| 2809 | printk(KERN_ERR "drbd: initialization failure\n"); | 2807 | printk(KERN_ERR "drbd: initialization failure\n"); |
| @@ -2881,34 +2879,14 @@ struct meta_data_on_disk { | |||
| 2881 | u8 reserved_u8[4096 - (7*8 + 10*4)]; | 2879 | u8 reserved_u8[4096 - (7*8 + 10*4)]; |
| 2882 | } __packed; | 2880 | } __packed; |
| 2883 | 2881 | ||
| 2884 | /** | 2882 | |
| 2885 | * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set | 2883 | |
| 2886 | * @mdev: DRBD device. | 2884 | void drbd_md_write(struct drbd_conf *mdev, void *b) |
| 2887 | */ | ||
| 2888 | void drbd_md_sync(struct drbd_conf *mdev) | ||
| 2889 | { | 2885 | { |
| 2890 | struct meta_data_on_disk *buffer; | 2886 | struct meta_data_on_disk *buffer = b; |
| 2891 | sector_t sector; | 2887 | sector_t sector; |
| 2892 | int i; | 2888 | int i; |
| 2893 | 2889 | ||
| 2894 | /* Don't accidentally change the DRBD meta data layout. */ | ||
| 2895 | BUILD_BUG_ON(UI_SIZE != 4); | ||
| 2896 | BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096); | ||
| 2897 | |||
| 2898 | del_timer(&mdev->md_sync_timer); | ||
| 2899 | /* timer may be rearmed by drbd_md_mark_dirty() now. */ | ||
| 2900 | if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) | ||
| 2901 | return; | ||
| 2902 | |||
| 2903 | /* We use here D_FAILED and not D_ATTACHING because we try to write | ||
| 2904 | * metadata even if we detach due to a disk failure! */ | ||
| 2905 | if (!get_ldev_if_state(mdev, D_FAILED)) | ||
| 2906 | return; | ||
| 2907 | |||
| 2908 | buffer = drbd_md_get_buffer(mdev); | ||
| 2909 | if (!buffer) | ||
| 2910 | goto out; | ||
| 2911 | |||
| 2912 | memset(buffer, 0, sizeof(*buffer)); | 2890 | memset(buffer, 0, sizeof(*buffer)); |
| 2913 | 2891 | ||
| 2914 | buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev)); | 2892 | buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev)); |
| @@ -2937,6 +2915,35 @@ void drbd_md_sync(struct drbd_conf *mdev) | |||
| 2937 | dev_err(DEV, "meta data update failed!\n"); | 2915 | dev_err(DEV, "meta data update failed!\n"); |
| 2938 | drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); | 2916 | drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); |
| 2939 | } | 2917 | } |
| 2918 | } | ||
| 2919 | |||
| 2920 | /** | ||
| 2921 | * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set | ||
| 2922 | * @mdev: DRBD device. | ||
| 2923 | */ | ||
| 2924 | void drbd_md_sync(struct drbd_conf *mdev) | ||
| 2925 | { | ||
| 2926 | struct meta_data_on_disk *buffer; | ||
| 2927 | |||
| 2928 | /* Don't accidentally change the DRBD meta data layout. */ | ||
| 2929 | BUILD_BUG_ON(UI_SIZE != 4); | ||
| 2930 | BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096); | ||
| 2931 | |||
| 2932 | del_timer(&mdev->md_sync_timer); | ||
| 2933 | /* timer may be rearmed by drbd_md_mark_dirty() now. */ | ||
| 2934 | if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) | ||
| 2935 | return; | ||
| 2936 | |||
| 2937 | /* We use here D_FAILED and not D_ATTACHING because we try to write | ||
| 2938 | * metadata even if we detach due to a disk failure! */ | ||
| 2939 | if (!get_ldev_if_state(mdev, D_FAILED)) | ||
| 2940 | return; | ||
| 2941 | |||
| 2942 | buffer = drbd_md_get_buffer(mdev); | ||
| 2943 | if (!buffer) | ||
| 2944 | goto out; | ||
| 2945 | |||
| 2946 | drbd_md_write(mdev, buffer); | ||
| 2940 | 2947 | ||
| 2941 | /* Update mdev->ldev->md.la_size_sect, | 2948 | /* Update mdev->ldev->md.la_size_sect, |
| 2942 | * since we updated it on metadata. */ | 2949 | * since we updated it on metadata. */ |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 9e3f441e7e84..8cc1e640f485 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
| @@ -417,6 +417,7 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn) | |||
| 417 | 417 | ||
| 418 | bool conn_try_outdate_peer(struct drbd_tconn *tconn) | 418 | bool conn_try_outdate_peer(struct drbd_tconn *tconn) |
| 419 | { | 419 | { |
| 420 | unsigned int connect_cnt; | ||
| 420 | union drbd_state mask = { }; | 421 | union drbd_state mask = { }; |
| 421 | union drbd_state val = { }; | 422 | union drbd_state val = { }; |
| 422 | enum drbd_fencing_p fp; | 423 | enum drbd_fencing_p fp; |
| @@ -428,6 +429,10 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn) | |||
| 428 | return false; | 429 | return false; |
| 429 | } | 430 | } |
| 430 | 431 | ||
| 432 | spin_lock_irq(&tconn->req_lock); | ||
| 433 | connect_cnt = tconn->connect_cnt; | ||
| 434 | spin_unlock_irq(&tconn->req_lock); | ||
| 435 | |||
| 431 | fp = highest_fencing_policy(tconn); | 436 | fp = highest_fencing_policy(tconn); |
| 432 | switch (fp) { | 437 | switch (fp) { |
| 433 | case FP_NOT_AVAIL: | 438 | case FP_NOT_AVAIL: |
| @@ -492,8 +497,14 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn) | |||
| 492 | here, because we might were able to re-establish the connection in the | 497 | here, because we might were able to re-establish the connection in the |
| 493 | meantime. */ | 498 | meantime. */ |
| 494 | spin_lock_irq(&tconn->req_lock); | 499 | spin_lock_irq(&tconn->req_lock); |
| 495 | if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) | 500 | if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) { |
| 496 | _conn_request_state(tconn, mask, val, CS_VERBOSE); | 501 | if (tconn->connect_cnt != connect_cnt) |
| 502 | /* In case the connection was established and droped | ||
| 503 | while the fence-peer handler was running, ignore it */ | ||
| 504 | conn_info(tconn, "Ignoring fence-peer exit code\n"); | ||
| 505 | else | ||
| 506 | _conn_request_state(tconn, mask, val, CS_VERBOSE); | ||
| 507 | } | ||
| 497 | spin_unlock_irq(&tconn->req_lock); | 508 | spin_unlock_irq(&tconn->req_lock); |
| 498 | 509 | ||
| 499 | return conn_highest_pdsk(tconn) <= D_OUTDATED; | 510 | return conn_highest_pdsk(tconn) <= D_OUTDATED; |
| @@ -816,15 +827,20 @@ void drbd_resume_io(struct drbd_conf *mdev) | |||
| 816 | * Returns 0 on success, negative return values indicate errors. | 827 | * Returns 0 on success, negative return values indicate errors. |
| 817 | * You should call drbd_md_sync() after calling this function. | 828 | * You should call drbd_md_sync() after calling this function. |
| 818 | */ | 829 | */ |
| 819 | enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) | 830 | enum determine_dev_size |
| 831 | drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct resize_parms *rs) __must_hold(local) | ||
| 820 | { | 832 | { |
| 821 | sector_t prev_first_sect, prev_size; /* previous meta location */ | 833 | sector_t prev_first_sect, prev_size; /* previous meta location */ |
| 822 | sector_t la_size_sect, u_size; | 834 | sector_t la_size_sect, u_size; |
| 835 | struct drbd_md *md = &mdev->ldev->md; | ||
| 836 | u32 prev_al_stripe_size_4k; | ||
| 837 | u32 prev_al_stripes; | ||
| 823 | sector_t size; | 838 | sector_t size; |
| 824 | char ppb[10]; | 839 | char ppb[10]; |
| 840 | void *buffer; | ||
| 825 | 841 | ||
| 826 | int md_moved, la_size_changed; | 842 | int md_moved, la_size_changed; |
| 827 | enum determine_dev_size rv = unchanged; | 843 | enum determine_dev_size rv = DS_UNCHANGED; |
| 828 | 844 | ||
| 829 | /* race: | 845 | /* race: |
| 830 | * application request passes inc_ap_bio, | 846 | * application request passes inc_ap_bio, |
| @@ -836,6 +852,11 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds | |||
| 836 | * still lock the act_log to not trigger ASSERTs there. | 852 | * still lock the act_log to not trigger ASSERTs there. |
| 837 | */ | 853 | */ |
| 838 | drbd_suspend_io(mdev); | 854 | drbd_suspend_io(mdev); |
| 855 | buffer = drbd_md_get_buffer(mdev); /* Lock meta-data IO */ | ||
| 856 | if (!buffer) { | ||
| 857 | drbd_resume_io(mdev); | ||
| 858 | return DS_ERROR; | ||
| 859 | } | ||
| 839 | 860 | ||
| 840 | /* no wait necessary anymore, actually we could assert that */ | 861 | /* no wait necessary anymore, actually we could assert that */ |
| 841 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); | 862 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); |
| @@ -844,7 +865,17 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds | |||
| 844 | prev_size = mdev->ldev->md.md_size_sect; | 865 | prev_size = mdev->ldev->md.md_size_sect; |
| 845 | la_size_sect = mdev->ldev->md.la_size_sect; | 866 | la_size_sect = mdev->ldev->md.la_size_sect; |
| 846 | 867 | ||
| 847 | /* TODO: should only be some assert here, not (re)init... */ | 868 | if (rs) { |
| 869 | /* rs is non NULL if we should change the AL layout only */ | ||
| 870 | |||
| 871 | prev_al_stripes = md->al_stripes; | ||
| 872 | prev_al_stripe_size_4k = md->al_stripe_size_4k; | ||
| 873 | |||
| 874 | md->al_stripes = rs->al_stripes; | ||
| 875 | md->al_stripe_size_4k = rs->al_stripe_size / 4; | ||
| 876 | md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4; | ||
| 877 | } | ||
| 878 | |||
| 848 | drbd_md_set_sector_offsets(mdev, mdev->ldev); | 879 | drbd_md_set_sector_offsets(mdev, mdev->ldev); |
| 849 | 880 | ||
| 850 | rcu_read_lock(); | 881 | rcu_read_lock(); |
| @@ -852,6 +883,21 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds | |||
| 852 | rcu_read_unlock(); | 883 | rcu_read_unlock(); |
| 853 | size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED); | 884 | size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED); |
| 854 | 885 | ||
| 886 | if (size < la_size_sect) { | ||
| 887 | if (rs && u_size == 0) { | ||
| 888 | /* Remove "rs &&" later. This check should always be active, but | ||
| 889 | right now the receiver expects the permissive behavior */ | ||
| 890 | dev_warn(DEV, "Implicit shrink not allowed. " | ||
| 891 | "Use --size=%llus for explicit shrink.\n", | ||
| 892 | (unsigned long long)size); | ||
| 893 | rv = DS_ERROR_SHRINK; | ||
| 894 | } | ||
| 895 | if (u_size > size) | ||
| 896 | rv = DS_ERROR_SPACE_MD; | ||
| 897 | if (rv != DS_UNCHANGED) | ||
| 898 | goto err_out; | ||
| 899 | } | ||
| 900 | |||
| 855 | if (drbd_get_capacity(mdev->this_bdev) != size || | 901 | if (drbd_get_capacity(mdev->this_bdev) != size || |
| 856 | drbd_bm_capacity(mdev) != size) { | 902 | drbd_bm_capacity(mdev) != size) { |
| 857 | int err; | 903 | int err; |
| @@ -867,7 +913,7 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds | |||
| 867 | "Leaving size unchanged at size = %lu KB\n", | 913 | "Leaving size unchanged at size = %lu KB\n", |
| 868 | (unsigned long)size); | 914 | (unsigned long)size); |
| 869 | } | 915 | } |
| 870 | rv = dev_size_error; | 916 | rv = DS_ERROR; |
| 871 | } | 917 | } |
| 872 | /* racy, see comments above. */ | 918 | /* racy, see comments above. */ |
| 873 | drbd_set_my_capacity(mdev, size); | 919 | drbd_set_my_capacity(mdev, size); |
| @@ -875,38 +921,57 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds | |||
| 875 | dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), | 921 | dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), |
| 876 | (unsigned long long)size>>1); | 922 | (unsigned long long)size>>1); |
| 877 | } | 923 | } |
| 878 | if (rv == dev_size_error) | 924 | if (rv <= DS_ERROR) |
| 879 | goto out; | 925 | goto err_out; |
| 880 | 926 | ||
| 881 | la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect); | 927 | la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect); |
| 882 | 928 | ||
| 883 | md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) | 929 | md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) |
| 884 | || prev_size != mdev->ldev->md.md_size_sect; | 930 | || prev_size != mdev->ldev->md.md_size_sect; |
| 885 | 931 | ||
| 886 | if (la_size_changed || md_moved) { | 932 | if (la_size_changed || md_moved || rs) { |
| 887 | int err; | 933 | u32 prev_flags; |
| 888 | 934 | ||
| 889 | drbd_al_shrink(mdev); /* All extents inactive. */ | 935 | drbd_al_shrink(mdev); /* All extents inactive. */ |
| 936 | |||
| 937 | prev_flags = md->flags; | ||
| 938 | md->flags &= ~MDF_PRIMARY_IND; | ||
| 939 | drbd_md_write(mdev, buffer); | ||
| 940 | |||
| 890 | dev_info(DEV, "Writing the whole bitmap, %s\n", | 941 | dev_info(DEV, "Writing the whole bitmap, %s\n", |
| 891 | la_size_changed && md_moved ? "size changed and md moved" : | 942 | la_size_changed && md_moved ? "size changed and md moved" : |
| 892 | la_size_changed ? "size changed" : "md moved"); | 943 | la_size_changed ? "size changed" : "md moved"); |
| 893 | /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ | 944 | /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ |
| 894 | err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write, | 945 | drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write, |
| 895 | "size changed", BM_LOCKED_MASK); | 946 | "size changed", BM_LOCKED_MASK); |
| 896 | if (err) { | 947 | drbd_initialize_al(mdev, buffer); |
| 897 | rv = dev_size_error; | 948 | |
| 898 | goto out; | 949 | md->flags = prev_flags; |
| 899 | } | 950 | drbd_md_write(mdev, buffer); |
| 900 | drbd_md_mark_dirty(mdev); | 951 | |
| 952 | if (rs) | ||
| 953 | dev_info(DEV, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n", | ||
| 954 | md->al_stripes, md->al_stripe_size_4k * 4); | ||
| 901 | } | 955 | } |
| 902 | 956 | ||
| 903 | if (size > la_size_sect) | 957 | if (size > la_size_sect) |
| 904 | rv = grew; | 958 | rv = DS_GREW; |
| 905 | if (size < la_size_sect) | 959 | if (size < la_size_sect) |
| 906 | rv = shrunk; | 960 | rv = DS_SHRUNK; |
| 907 | out: | 961 | |
| 962 | if (0) { | ||
| 963 | err_out: | ||
| 964 | if (rs) { | ||
| 965 | md->al_stripes = prev_al_stripes; | ||
| 966 | md->al_stripe_size_4k = prev_al_stripe_size_4k; | ||
| 967 | md->al_size_4k = (u64)prev_al_stripes * prev_al_stripe_size_4k; | ||
| 968 | |||
| 969 | drbd_md_set_sector_offsets(mdev, mdev->ldev); | ||
| 970 | } | ||
| 971 | } | ||
| 908 | lc_unlock(mdev->act_log); | 972 | lc_unlock(mdev->act_log); |
| 909 | wake_up(&mdev->al_wait); | 973 | wake_up(&mdev->al_wait); |
| 974 | drbd_md_put_buffer(mdev); | ||
| 910 | drbd_resume_io(mdev); | 975 | drbd_resume_io(mdev); |
| 911 | 976 | ||
| 912 | return rv; | 977 | return rv; |
| @@ -1607,11 +1672,11 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
| 1607 | !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) | 1672 | !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) |
| 1608 | set_bit(USE_DEGR_WFC_T, &mdev->flags); | 1673 | set_bit(USE_DEGR_WFC_T, &mdev->flags); |
| 1609 | 1674 | ||
| 1610 | dd = drbd_determine_dev_size(mdev, 0); | 1675 | dd = drbd_determine_dev_size(mdev, 0, NULL); |
| 1611 | if (dd == dev_size_error) { | 1676 | if (dd <= DS_ERROR) { |
| 1612 | retcode = ERR_NOMEM_BITMAP; | 1677 | retcode = ERR_NOMEM_BITMAP; |
| 1613 | goto force_diskless_dec; | 1678 | goto force_diskless_dec; |
| 1614 | } else if (dd == grew) | 1679 | } else if (dd == DS_GREW) |
| 1615 | set_bit(RESYNC_AFTER_NEG, &mdev->flags); | 1680 | set_bit(RESYNC_AFTER_NEG, &mdev->flags); |
| 1616 | 1681 | ||
| 1617 | if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) || | 1682 | if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) || |
| @@ -2305,6 +2370,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) | |||
| 2305 | struct drbd_conf *mdev; | 2370 | struct drbd_conf *mdev; |
| 2306 | enum drbd_ret_code retcode; | 2371 | enum drbd_ret_code retcode; |
| 2307 | enum determine_dev_size dd; | 2372 | enum determine_dev_size dd; |
| 2373 | bool change_al_layout = false; | ||
| 2308 | enum dds_flags ddsf; | 2374 | enum dds_flags ddsf; |
| 2309 | sector_t u_size; | 2375 | sector_t u_size; |
| 2310 | int err; | 2376 | int err; |
| @@ -2315,31 +2381,33 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) | |||
| 2315 | if (retcode != NO_ERROR) | 2381 | if (retcode != NO_ERROR) |
| 2316 | goto fail; | 2382 | goto fail; |
| 2317 | 2383 | ||
| 2384 | mdev = adm_ctx.mdev; | ||
| 2385 | if (!get_ldev(mdev)) { | ||
| 2386 | retcode = ERR_NO_DISK; | ||
| 2387 | goto fail; | ||
| 2388 | } | ||
| 2389 | |||
| 2318 | memset(&rs, 0, sizeof(struct resize_parms)); | 2390 | memset(&rs, 0, sizeof(struct resize_parms)); |
| 2391 | rs.al_stripes = mdev->ldev->md.al_stripes; | ||
| 2392 | rs.al_stripe_size = mdev->ldev->md.al_stripe_size_4k * 4; | ||
| 2319 | if (info->attrs[DRBD_NLA_RESIZE_PARMS]) { | 2393 | if (info->attrs[DRBD_NLA_RESIZE_PARMS]) { |
| 2320 | err = resize_parms_from_attrs(&rs, info); | 2394 | err = resize_parms_from_attrs(&rs, info); |
| 2321 | if (err) { | 2395 | if (err) { |
| 2322 | retcode = ERR_MANDATORY_TAG; | 2396 | retcode = ERR_MANDATORY_TAG; |
| 2323 | drbd_msg_put_info(from_attrs_err_to_txt(err)); | 2397 | drbd_msg_put_info(from_attrs_err_to_txt(err)); |
| 2324 | goto fail; | 2398 | goto fail_ldev; |
| 2325 | } | 2399 | } |
| 2326 | } | 2400 | } |
| 2327 | 2401 | ||
| 2328 | mdev = adm_ctx.mdev; | ||
| 2329 | if (mdev->state.conn > C_CONNECTED) { | 2402 | if (mdev->state.conn > C_CONNECTED) { |
| 2330 | retcode = ERR_RESIZE_RESYNC; | 2403 | retcode = ERR_RESIZE_RESYNC; |
| 2331 | goto fail; | 2404 | goto fail_ldev; |
| 2332 | } | 2405 | } |
| 2333 | 2406 | ||
| 2334 | if (mdev->state.role == R_SECONDARY && | 2407 | if (mdev->state.role == R_SECONDARY && |
| 2335 | mdev->state.peer == R_SECONDARY) { | 2408 | mdev->state.peer == R_SECONDARY) { |
| 2336 | retcode = ERR_NO_PRIMARY; | 2409 | retcode = ERR_NO_PRIMARY; |
| 2337 | goto fail; | 2410 | goto fail_ldev; |
| 2338 | } | ||
| 2339 | |||
| 2340 | if (!get_ldev(mdev)) { | ||
| 2341 | retcode = ERR_NO_DISK; | ||
| 2342 | goto fail; | ||
| 2343 | } | 2411 | } |
| 2344 | 2412 | ||
| 2345 | if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) { | 2413 | if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) { |
| @@ -2358,6 +2426,28 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) | |||
| 2358 | } | 2426 | } |
| 2359 | } | 2427 | } |
| 2360 | 2428 | ||
| 2429 | if (mdev->ldev->md.al_stripes != rs.al_stripes || | ||
| 2430 | mdev->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) { | ||
| 2431 | u32 al_size_k = rs.al_stripes * rs.al_stripe_size; | ||
| 2432 | |||
| 2433 | if (al_size_k > (16 * 1024 * 1024)) { | ||
| 2434 | retcode = ERR_MD_LAYOUT_TOO_BIG; | ||
| 2435 | goto fail_ldev; | ||
| 2436 | } | ||
| 2437 | |||
| 2438 | if (al_size_k < MD_32kB_SECT/2) { | ||
| 2439 | retcode = ERR_MD_LAYOUT_TOO_SMALL; | ||
| 2440 | goto fail_ldev; | ||
| 2441 | } | ||
| 2442 | |||
| 2443 | if (mdev->state.conn != C_CONNECTED) { | ||
| 2444 | retcode = ERR_MD_LAYOUT_CONNECTED; | ||
| 2445 | goto fail_ldev; | ||
| 2446 | } | ||
| 2447 | |||
| 2448 | change_al_layout = true; | ||
| 2449 | } | ||
| 2450 | |||
| 2361 | if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) | 2451 | if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) |
| 2362 | mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); | 2452 | mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); |
| 2363 | 2453 | ||
| @@ -2373,16 +2463,22 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) | |||
| 2373 | } | 2463 | } |
| 2374 | 2464 | ||
| 2375 | ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); | 2465 | ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); |
| 2376 | dd = drbd_determine_dev_size(mdev, ddsf); | 2466 | dd = drbd_determine_dev_size(mdev, ddsf, change_al_layout ? &rs : NULL); |
| 2377 | drbd_md_sync(mdev); | 2467 | drbd_md_sync(mdev); |
| 2378 | put_ldev(mdev); | 2468 | put_ldev(mdev); |
| 2379 | if (dd == dev_size_error) { | 2469 | if (dd == DS_ERROR) { |
| 2380 | retcode = ERR_NOMEM_BITMAP; | 2470 | retcode = ERR_NOMEM_BITMAP; |
| 2381 | goto fail; | 2471 | goto fail; |
| 2472 | } else if (dd == DS_ERROR_SPACE_MD) { | ||
| 2473 | retcode = ERR_MD_LAYOUT_NO_FIT; | ||
| 2474 | goto fail; | ||
| 2475 | } else if (dd == DS_ERROR_SHRINK) { | ||
| 2476 | retcode = ERR_IMPLICIT_SHRINK; | ||
| 2477 | goto fail; | ||
| 2382 | } | 2478 | } |
| 2383 | 2479 | ||
| 2384 | if (mdev->state.conn == C_CONNECTED) { | 2480 | if (mdev->state.conn == C_CONNECTED) { |
| 2385 | if (dd == grew) | 2481 | if (dd == DS_GREW) |
| 2386 | set_bit(RESIZE_PENDING, &mdev->flags); | 2482 | set_bit(RESIZE_PENDING, &mdev->flags); |
| 2387 | 2483 | ||
| 2388 | drbd_send_uuids(mdev); | 2484 | drbd_send_uuids(mdev); |
| @@ -2658,7 +2754,6 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev, | |||
| 2658 | const struct sib_info *sib) | 2754 | const struct sib_info *sib) |
| 2659 | { | 2755 | { |
| 2660 | struct state_info *si = NULL; /* for sizeof(si->member); */ | 2756 | struct state_info *si = NULL; /* for sizeof(si->member); */ |
| 2661 | struct net_conf *nc; | ||
| 2662 | struct nlattr *nla; | 2757 | struct nlattr *nla; |
| 2663 | int got_ldev; | 2758 | int got_ldev; |
| 2664 | int err = 0; | 2759 | int err = 0; |
| @@ -2688,13 +2783,19 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev, | |||
| 2688 | goto nla_put_failure; | 2783 | goto nla_put_failure; |
| 2689 | 2784 | ||
| 2690 | rcu_read_lock(); | 2785 | rcu_read_lock(); |
| 2691 | if (got_ldev) | 2786 | if (got_ldev) { |
| 2692 | if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive)) | 2787 | struct disk_conf *disk_conf; |
| 2693 | goto nla_put_failure; | ||
| 2694 | 2788 | ||
| 2695 | nc = rcu_dereference(mdev->tconn->net_conf); | 2789 | disk_conf = rcu_dereference(mdev->ldev->disk_conf); |
| 2696 | if (nc) | 2790 | err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive); |
| 2697 | err = net_conf_to_skb(skb, nc, exclude_sensitive); | 2791 | } |
| 2792 | if (!err) { | ||
| 2793 | struct net_conf *nc; | ||
| 2794 | |||
| 2795 | nc = rcu_dereference(mdev->tconn->net_conf); | ||
| 2796 | if (nc) | ||
| 2797 | err = net_conf_to_skb(skb, nc, exclude_sensitive); | ||
| 2798 | } | ||
| 2698 | rcu_read_unlock(); | 2799 | rcu_read_unlock(); |
| 2699 | if (err) | 2800 | if (err) |
| 2700 | goto nla_put_failure; | 2801 | goto nla_put_failure; |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 4222affff488..cc29cd3bf78b 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
| @@ -1039,6 +1039,8 @@ randomize: | |||
| 1039 | rcu_read_lock(); | 1039 | rcu_read_lock(); |
| 1040 | idr_for_each_entry(&tconn->volumes, mdev, vnr) { | 1040 | idr_for_each_entry(&tconn->volumes, mdev, vnr) { |
| 1041 | kref_get(&mdev->kref); | 1041 | kref_get(&mdev->kref); |
| 1042 | rcu_read_unlock(); | ||
| 1043 | |||
| 1042 | /* Prevent a race between resync-handshake and | 1044 | /* Prevent a race between resync-handshake and |
| 1043 | * being promoted to Primary. | 1045 | * being promoted to Primary. |
| 1044 | * | 1046 | * |
| @@ -1049,8 +1051,6 @@ randomize: | |||
| 1049 | mutex_lock(mdev->state_mutex); | 1051 | mutex_lock(mdev->state_mutex); |
| 1050 | mutex_unlock(mdev->state_mutex); | 1052 | mutex_unlock(mdev->state_mutex); |
| 1051 | 1053 | ||
| 1052 | rcu_read_unlock(); | ||
| 1053 | |||
| 1054 | if (discard_my_data) | 1054 | if (discard_my_data) |
| 1055 | set_bit(DISCARD_MY_DATA, &mdev->flags); | 1055 | set_bit(DISCARD_MY_DATA, &mdev->flags); |
| 1056 | else | 1056 | else |
| @@ -3545,7 +3545,7 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi) | |||
| 3545 | { | 3545 | { |
| 3546 | struct drbd_conf *mdev; | 3546 | struct drbd_conf *mdev; |
| 3547 | struct p_sizes *p = pi->data; | 3547 | struct p_sizes *p = pi->data; |
| 3548 | enum determine_dev_size dd = unchanged; | 3548 | enum determine_dev_size dd = DS_UNCHANGED; |
| 3549 | sector_t p_size, p_usize, my_usize; | 3549 | sector_t p_size, p_usize, my_usize; |
| 3550 | int ldsc = 0; /* local disk size changed */ | 3550 | int ldsc = 0; /* local disk size changed */ |
| 3551 | enum dds_flags ddsf; | 3551 | enum dds_flags ddsf; |
| @@ -3617,9 +3617,9 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi) | |||
| 3617 | 3617 | ||
| 3618 | ddsf = be16_to_cpu(p->dds_flags); | 3618 | ddsf = be16_to_cpu(p->dds_flags); |
| 3619 | if (get_ldev(mdev)) { | 3619 | if (get_ldev(mdev)) { |
| 3620 | dd = drbd_determine_dev_size(mdev, ddsf); | 3620 | dd = drbd_determine_dev_size(mdev, ddsf, NULL); |
| 3621 | put_ldev(mdev); | 3621 | put_ldev(mdev); |
| 3622 | if (dd == dev_size_error) | 3622 | if (dd == DS_ERROR) |
| 3623 | return -EIO; | 3623 | return -EIO; |
| 3624 | drbd_md_sync(mdev); | 3624 | drbd_md_sync(mdev); |
| 3625 | } else { | 3625 | } else { |
| @@ -3647,7 +3647,7 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi) | |||
| 3647 | drbd_send_sizes(mdev, 0, ddsf); | 3647 | drbd_send_sizes(mdev, 0, ddsf); |
| 3648 | } | 3648 | } |
| 3649 | if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || | 3649 | if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || |
| 3650 | (dd == grew && mdev->state.conn == C_CONNECTED)) { | 3650 | (dd == DS_GREW && mdev->state.conn == C_CONNECTED)) { |
| 3651 | if (mdev->state.pdsk >= D_INCONSISTENT && | 3651 | if (mdev->state.pdsk >= D_INCONSISTENT && |
| 3652 | mdev->state.disk >= D_INCONSISTENT) { | 3652 | mdev->state.disk >= D_INCONSISTENT) { |
| 3653 | if (ddsf & DDSF_NO_RESYNC) | 3653 | if (ddsf & DDSF_NO_RESYNC) |
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 90c5be2b1d30..216d47b7e88b 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c | |||
| @@ -1115,8 +1115,10 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, | |||
| 1115 | drbd_thread_restart_nowait(&mdev->tconn->receiver); | 1115 | drbd_thread_restart_nowait(&mdev->tconn->receiver); |
| 1116 | 1116 | ||
| 1117 | /* Resume AL writing if we get a connection */ | 1117 | /* Resume AL writing if we get a connection */ |
| 1118 | if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) | 1118 | if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { |
| 1119 | drbd_resume_al(mdev); | 1119 | drbd_resume_al(mdev); |
| 1120 | mdev->tconn->connect_cnt++; | ||
| 1121 | } | ||
| 1120 | 1122 | ||
| 1121 | /* remember last attach time so request_timer_fn() won't | 1123 | /* remember last attach time so request_timer_fn() won't |
| 1122 | * kill newly established sessions while we are still trying to thaw | 1124 | * kill newly established sessions while we are still trying to thaw |
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 5af21f2db29c..6e85e21445eb 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c | |||
| @@ -31,6 +31,8 @@ | |||
| 31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
| 32 | #include <linux/bitops.h> | 32 | #include <linux/bitops.h> |
| 33 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
| 34 | #include <linux/debugfs.h> | ||
| 35 | #include <linux/seq_file.h> | ||
| 34 | 36 | ||
| 35 | #include <linux/genhd.h> | 37 | #include <linux/genhd.h> |
| 36 | #include <linux/idr.h> | 38 | #include <linux/idr.h> |
| @@ -39,8 +41,9 @@ | |||
| 39 | #include "rsxx_cfg.h" | 41 | #include "rsxx_cfg.h" |
| 40 | 42 | ||
| 41 | #define NO_LEGACY 0 | 43 | #define NO_LEGACY 0 |
| 44 | #define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */ | ||
| 42 | 45 | ||
| 43 | MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver"); | 46 | MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver"); |
| 44 | MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM"); | 47 | MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM"); |
| 45 | MODULE_LICENSE("GPL"); | 48 | MODULE_LICENSE("GPL"); |
| 46 | MODULE_VERSION(DRIVER_VERSION); | 49 | MODULE_VERSION(DRIVER_VERSION); |
| @@ -49,9 +52,282 @@ static unsigned int force_legacy = NO_LEGACY; | |||
| 49 | module_param(force_legacy, uint, 0444); | 52 | module_param(force_legacy, uint, 0444); |
| 50 | MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts"); | 53 | MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts"); |
| 51 | 54 | ||
| 55 | static unsigned int sync_start = 1; | ||
| 56 | module_param(sync_start, uint, 0444); | ||
| 57 | MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete " | ||
| 58 | "until the card startup has completed."); | ||
| 59 | |||
| 52 | static DEFINE_IDA(rsxx_disk_ida); | 60 | static DEFINE_IDA(rsxx_disk_ida); |
| 53 | static DEFINE_SPINLOCK(rsxx_ida_lock); | 61 | static DEFINE_SPINLOCK(rsxx_ida_lock); |
| 54 | 62 | ||
| 63 | /* --------------------Debugfs Setup ------------------- */ | ||
| 64 | |||
| 65 | struct rsxx_cram { | ||
| 66 | u32 f_pos; | ||
| 67 | u32 offset; | ||
| 68 | void *i_private; | ||
| 69 | }; | ||
| 70 | |||
| 71 | static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p) | ||
| 72 | { | ||
| 73 | struct rsxx_cardinfo *card = m->private; | ||
| 74 | |||
| 75 | seq_printf(m, "HWID 0x%08x\n", | ||
| 76 | ioread32(card->regmap + HWID)); | ||
| 77 | seq_printf(m, "SCRATCH 0x%08x\n", | ||
| 78 | ioread32(card->regmap + SCRATCH)); | ||
| 79 | seq_printf(m, "IER 0x%08x\n", | ||
| 80 | ioread32(card->regmap + IER)); | ||
| 81 | seq_printf(m, "IPR 0x%08x\n", | ||
| 82 | ioread32(card->regmap + IPR)); | ||
| 83 | seq_printf(m, "CREG_CMD 0x%08x\n", | ||
| 84 | ioread32(card->regmap + CREG_CMD)); | ||
| 85 | seq_printf(m, "CREG_ADD 0x%08x\n", | ||
| 86 | ioread32(card->regmap + CREG_ADD)); | ||
| 87 | seq_printf(m, "CREG_CNT 0x%08x\n", | ||
| 88 | ioread32(card->regmap + CREG_CNT)); | ||
| 89 | seq_printf(m, "CREG_STAT 0x%08x\n", | ||
| 90 | ioread32(card->regmap + CREG_STAT)); | ||
| 91 | seq_printf(m, "CREG_DATA0 0x%08x\n", | ||
| 92 | ioread32(card->regmap + CREG_DATA0)); | ||
| 93 | seq_printf(m, "CREG_DATA1 0x%08x\n", | ||
| 94 | ioread32(card->regmap + CREG_DATA1)); | ||
| 95 | seq_printf(m, "CREG_DATA2 0x%08x\n", | ||
| 96 | ioread32(card->regmap + CREG_DATA2)); | ||
| 97 | seq_printf(m, "CREG_DATA3 0x%08x\n", | ||
| 98 | ioread32(card->regmap + CREG_DATA3)); | ||
| 99 | seq_printf(m, "CREG_DATA4 0x%08x\n", | ||
| 100 | ioread32(card->regmap + CREG_DATA4)); | ||
| 101 | seq_printf(m, "CREG_DATA5 0x%08x\n", | ||
| 102 | ioread32(card->regmap + CREG_DATA5)); | ||
| 103 | seq_printf(m, "CREG_DATA6 0x%08x\n", | ||
| 104 | ioread32(card->regmap + CREG_DATA6)); | ||
| 105 | seq_printf(m, "CREG_DATA7 0x%08x\n", | ||
| 106 | ioread32(card->regmap + CREG_DATA7)); | ||
| 107 | seq_printf(m, "INTR_COAL 0x%08x\n", | ||
| 108 | ioread32(card->regmap + INTR_COAL)); | ||
| 109 | seq_printf(m, "HW_ERROR 0x%08x\n", | ||
| 110 | ioread32(card->regmap + HW_ERROR)); | ||
| 111 | seq_printf(m, "DEBUG0 0x%08x\n", | ||
| 112 | ioread32(card->regmap + PCI_DEBUG0)); | ||
| 113 | seq_printf(m, "DEBUG1 0x%08x\n", | ||
| 114 | ioread32(card->regmap + PCI_DEBUG1)); | ||
| 115 | seq_printf(m, "DEBUG2 0x%08x\n", | ||
| 116 | ioread32(card->regmap + PCI_DEBUG2)); | ||
| 117 | seq_printf(m, "DEBUG3 0x%08x\n", | ||
| 118 | ioread32(card->regmap + PCI_DEBUG3)); | ||
| 119 | seq_printf(m, "DEBUG4 0x%08x\n", | ||
| 120 | ioread32(card->regmap + PCI_DEBUG4)); | ||
| 121 | seq_printf(m, "DEBUG5 0x%08x\n", | ||
| 122 | ioread32(card->regmap + PCI_DEBUG5)); | ||
| 123 | seq_printf(m, "DEBUG6 0x%08x\n", | ||
| 124 | ioread32(card->regmap + PCI_DEBUG6)); | ||
| 125 | seq_printf(m, "DEBUG7 0x%08x\n", | ||
| 126 | ioread32(card->regmap + PCI_DEBUG7)); | ||
| 127 | seq_printf(m, "RECONFIG 0x%08x\n", | ||
| 128 | ioread32(card->regmap + PCI_RECONFIG)); | ||
| 129 | |||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | static int rsxx_attr_stats_show(struct seq_file *m, void *p) | ||
| 134 | { | ||
| 135 | struct rsxx_cardinfo *card = m->private; | ||
| 136 | int i; | ||
| 137 | |||
| 138 | for (i = 0; i < card->n_targets; i++) { | ||
| 139 | seq_printf(m, "Ctrl %d CRC Errors = %d\n", | ||
| 140 | i, card->ctrl[i].stats.crc_errors); | ||
| 141 | seq_printf(m, "Ctrl %d Hard Errors = %d\n", | ||
| 142 | i, card->ctrl[i].stats.hard_errors); | ||
| 143 | seq_printf(m, "Ctrl %d Soft Errors = %d\n", | ||
| 144 | i, card->ctrl[i].stats.soft_errors); | ||
| 145 | seq_printf(m, "Ctrl %d Writes Issued = %d\n", | ||
| 146 | i, card->ctrl[i].stats.writes_issued); | ||
| 147 | seq_printf(m, "Ctrl %d Writes Failed = %d\n", | ||
| 148 | i, card->ctrl[i].stats.writes_failed); | ||
| 149 | seq_printf(m, "Ctrl %d Reads Issued = %d\n", | ||
| 150 | i, card->ctrl[i].stats.reads_issued); | ||
| 151 | seq_printf(m, "Ctrl %d Reads Failed = %d\n", | ||
| 152 | i, card->ctrl[i].stats.reads_failed); | ||
| 153 | seq_printf(m, "Ctrl %d Reads Retried = %d\n", | ||
| 154 | i, card->ctrl[i].stats.reads_retried); | ||
| 155 | seq_printf(m, "Ctrl %d Discards Issued = %d\n", | ||
| 156 | i, card->ctrl[i].stats.discards_issued); | ||
| 157 | seq_printf(m, "Ctrl %d Discards Failed = %d\n", | ||
| 158 | i, card->ctrl[i].stats.discards_failed); | ||
| 159 | seq_printf(m, "Ctrl %d DMA SW Errors = %d\n", | ||
| 160 | i, card->ctrl[i].stats.dma_sw_err); | ||
| 161 | seq_printf(m, "Ctrl %d DMA HW Faults = %d\n", | ||
| 162 | i, card->ctrl[i].stats.dma_hw_fault); | ||
| 163 | seq_printf(m, "Ctrl %d DMAs Cancelled = %d\n", | ||
| 164 | i, card->ctrl[i].stats.dma_cancelled); | ||
| 165 | seq_printf(m, "Ctrl %d SW Queue Depth = %d\n", | ||
| 166 | i, card->ctrl[i].stats.sw_q_depth); | ||
| 167 | seq_printf(m, "Ctrl %d HW Queue Depth = %d\n", | ||
| 168 | i, atomic_read(&card->ctrl[i].stats.hw_q_depth)); | ||
| 169 | } | ||
| 170 | |||
| 171 | return 0; | ||
| 172 | } | ||
| 173 | |||
| 174 | static int rsxx_attr_stats_open(struct inode *inode, struct file *file) | ||
| 175 | { | ||
| 176 | return single_open(file, rsxx_attr_stats_show, inode->i_private); | ||
| 177 | } | ||
| 178 | |||
| 179 | static int rsxx_attr_pci_regs_open(struct inode *inode, struct file *file) | ||
| 180 | { | ||
| 181 | return single_open(file, rsxx_attr_pci_regs_show, inode->i_private); | ||
| 182 | } | ||
| 183 | |||
| 184 | static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf, | ||
| 185 | size_t cnt, loff_t *ppos) | ||
| 186 | { | ||
| 187 | struct rsxx_cram *info = fp->private_data; | ||
| 188 | struct rsxx_cardinfo *card = info->i_private; | ||
| 189 | char *buf; | ||
| 190 | int st; | ||
| 191 | |||
| 192 | buf = kzalloc(sizeof(*buf) * cnt, GFP_KERNEL); | ||
| 193 | if (!buf) | ||
| 194 | return -ENOMEM; | ||
| 195 | |||
| 196 | info->f_pos = (u32)*ppos + info->offset; | ||
| 197 | |||
| 198 | st = rsxx_creg_read(card, CREG_ADD_CRAM + info->f_pos, cnt, buf, 1); | ||
| 199 | if (st) | ||
| 200 | return st; | ||
| 201 | |||
| 202 | st = copy_to_user(ubuf, buf, cnt); | ||
| 203 | if (st) | ||
| 204 | return st; | ||
| 205 | |||
| 206 | info->offset += cnt; | ||
| 207 | |||
| 208 | kfree(buf); | ||
| 209 | |||
| 210 | return cnt; | ||
| 211 | } | ||
| 212 | |||
| 213 | static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf, | ||
| 214 | size_t cnt, loff_t *ppos) | ||
| 215 | { | ||
| 216 | struct rsxx_cram *info = fp->private_data; | ||
| 217 | struct rsxx_cardinfo *card = info->i_private; | ||
| 218 | char *buf; | ||
| 219 | int st; | ||
| 220 | |||
| 221 | buf = kzalloc(sizeof(*buf) * cnt, GFP_KERNEL); | ||
| 222 | if (!buf) | ||
| 223 | return -ENOMEM; | ||
| 224 | |||
| 225 | st = copy_from_user(buf, ubuf, cnt); | ||
| 226 | if (st) | ||
| 227 | return st; | ||
| 228 | |||
| 229 | info->f_pos = (u32)*ppos + info->offset; | ||
| 230 | |||
| 231 | st = rsxx_creg_write(card, CREG_ADD_CRAM + info->f_pos, cnt, buf, 1); | ||
| 232 | if (st) | ||
| 233 | return st; | ||
| 234 | |||
| 235 | info->offset += cnt; | ||
| 236 | |||
| 237 | kfree(buf); | ||
| 238 | |||
| 239 | return cnt; | ||
| 240 | } | ||
| 241 | |||
| 242 | static int rsxx_cram_open(struct inode *inode, struct file *file) | ||
| 243 | { | ||
| 244 | struct rsxx_cram *info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
| 245 | if (!info) | ||
| 246 | return -ENOMEM; | ||
| 247 | |||
| 248 | info->i_private = inode->i_private; | ||
| 249 | info->f_pos = file->f_pos; | ||
| 250 | file->private_data = info; | ||
| 251 | |||
| 252 | return 0; | ||
| 253 | } | ||
| 254 | |||
| 255 | static int rsxx_cram_release(struct inode *inode, struct file *file) | ||
| 256 | { | ||
| 257 | struct rsxx_cram *info = file->private_data; | ||
| 258 | |||
| 259 | if (!info) | ||
| 260 | return 0; | ||
| 261 | |||
| 262 | kfree(info); | ||
| 263 | file->private_data = NULL; | ||
| 264 | |||
| 265 | return 0; | ||
| 266 | } | ||
| 267 | |||
| 268 | static const struct file_operations debugfs_cram_fops = { | ||
| 269 | .owner = THIS_MODULE, | ||
| 270 | .open = rsxx_cram_open, | ||
| 271 | .read = rsxx_cram_read, | ||
| 272 | .write = rsxx_cram_write, | ||
| 273 | .release = rsxx_cram_release, | ||
| 274 | }; | ||
| 275 | |||
| 276 | static const struct file_operations debugfs_stats_fops = { | ||
| 277 | .owner = THIS_MODULE, | ||
| 278 | .open = rsxx_attr_stats_open, | ||
| 279 | .read = seq_read, | ||
| 280 | .llseek = seq_lseek, | ||
| 281 | .release = single_release, | ||
| 282 | }; | ||
| 283 | |||
| 284 | static const struct file_operations debugfs_pci_regs_fops = { | ||
| 285 | .owner = THIS_MODULE, | ||
| 286 | .open = rsxx_attr_pci_regs_open, | ||
| 287 | .read = seq_read, | ||
| 288 | .llseek = seq_lseek, | ||
| 289 | .release = single_release, | ||
| 290 | }; | ||
| 291 | |||
| 292 | static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card) | ||
| 293 | { | ||
| 294 | struct dentry *debugfs_stats; | ||
| 295 | struct dentry *debugfs_pci_regs; | ||
| 296 | struct dentry *debugfs_cram; | ||
| 297 | |||
| 298 | card->debugfs_dir = debugfs_create_dir(card->gendisk->disk_name, NULL); | ||
| 299 | if (IS_ERR_OR_NULL(card->debugfs_dir)) | ||
| 300 | goto failed_debugfs_dir; | ||
| 301 | |||
| 302 | debugfs_stats = debugfs_create_file("stats", S_IRUGO, | ||
| 303 | card->debugfs_dir, card, | ||
| 304 | &debugfs_stats_fops); | ||
| 305 | if (IS_ERR_OR_NULL(debugfs_stats)) | ||
| 306 | goto failed_debugfs_stats; | ||
| 307 | |||
| 308 | debugfs_pci_regs = debugfs_create_file("pci_regs", S_IRUGO, | ||
| 309 | card->debugfs_dir, card, | ||
| 310 | &debugfs_pci_regs_fops); | ||
| 311 | if (IS_ERR_OR_NULL(debugfs_pci_regs)) | ||
| 312 | goto failed_debugfs_pci_regs; | ||
| 313 | |||
| 314 | debugfs_cram = debugfs_create_file("cram", S_IRUGO | S_IWUSR, | ||
| 315 | card->debugfs_dir, card, | ||
| 316 | &debugfs_cram_fops); | ||
| 317 | if (IS_ERR_OR_NULL(debugfs_cram)) | ||
| 318 | goto failed_debugfs_cram; | ||
| 319 | |||
| 320 | return; | ||
| 321 | failed_debugfs_cram: | ||
| 322 | debugfs_remove(debugfs_pci_regs); | ||
| 323 | failed_debugfs_pci_regs: | ||
| 324 | debugfs_remove(debugfs_stats); | ||
| 325 | failed_debugfs_stats: | ||
| 326 | debugfs_remove(card->debugfs_dir); | ||
| 327 | failed_debugfs_dir: | ||
| 328 | card->debugfs_dir = NULL; | ||
| 329 | } | ||
| 330 | |||
| 55 | /*----------------- Interrupt Control & Handling -------------------*/ | 331 | /*----------------- Interrupt Control & Handling -------------------*/ |
| 56 | 332 | ||
| 57 | static void rsxx_mask_interrupts(struct rsxx_cardinfo *card) | 333 | static void rsxx_mask_interrupts(struct rsxx_cardinfo *card) |
| @@ -163,12 +439,13 @@ static irqreturn_t rsxx_isr(int irq, void *pdata) | |||
| 163 | } | 439 | } |
| 164 | 440 | ||
| 165 | if (isr & CR_INTR_CREG) { | 441 | if (isr & CR_INTR_CREG) { |
| 166 | schedule_work(&card->creg_ctrl.done_work); | 442 | queue_work(card->creg_ctrl.creg_wq, |
| 443 | &card->creg_ctrl.done_work); | ||
| 167 | handled++; | 444 | handled++; |
| 168 | } | 445 | } |
| 169 | 446 | ||
| 170 | if (isr & CR_INTR_EVENT) { | 447 | if (isr & CR_INTR_EVENT) { |
| 171 | schedule_work(&card->event_work); | 448 | queue_work(card->event_wq, &card->event_work); |
| 172 | rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); | 449 | rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); |
| 173 | handled++; | 450 | handled++; |
| 174 | } | 451 | } |
| @@ -329,7 +606,7 @@ static int rsxx_eeh_frozen(struct pci_dev *dev) | |||
| 329 | int i; | 606 | int i; |
| 330 | int st; | 607 | int st; |
| 331 | 608 | ||
| 332 | dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n"); | 609 | dev_warn(&dev->dev, "IBM Flash Adapter PCI: preparing for slot reset.\n"); |
| 333 | 610 | ||
| 334 | card->eeh_state = 1; | 611 | card->eeh_state = 1; |
| 335 | rsxx_mask_interrupts(card); | 612 | rsxx_mask_interrupts(card); |
| @@ -367,15 +644,26 @@ static void rsxx_eeh_failure(struct pci_dev *dev) | |||
| 367 | { | 644 | { |
| 368 | struct rsxx_cardinfo *card = pci_get_drvdata(dev); | 645 | struct rsxx_cardinfo *card = pci_get_drvdata(dev); |
| 369 | int i; | 646 | int i; |
| 647 | int cnt = 0; | ||
| 370 | 648 | ||
| 371 | dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n"); | 649 | dev_err(&dev->dev, "IBM Flash Adapter PCI: disabling failed card.\n"); |
| 372 | 650 | ||
| 373 | card->eeh_state = 1; | 651 | card->eeh_state = 1; |
| 652 | card->halt = 1; | ||
| 374 | 653 | ||
| 375 | for (i = 0; i < card->n_targets; i++) | 654 | for (i = 0; i < card->n_targets; i++) { |
| 376 | del_timer_sync(&card->ctrl[i].activity_timer); | 655 | spin_lock_bh(&card->ctrl[i].queue_lock); |
| 656 | cnt = rsxx_cleanup_dma_queue(&card->ctrl[i], | ||
| 657 | &card->ctrl[i].queue); | ||
| 658 | spin_unlock_bh(&card->ctrl[i].queue_lock); | ||
| 659 | |||
| 660 | cnt += rsxx_dma_cancel(&card->ctrl[i]); | ||
| 377 | 661 | ||
| 378 | rsxx_eeh_cancel_dmas(card); | 662 | if (cnt) |
| 663 | dev_info(CARD_TO_DEV(card), | ||
| 664 | "Freed %d queued DMAs on channel %d\n", | ||
| 665 | cnt, card->ctrl[i].id); | ||
| 666 | } | ||
| 379 | } | 667 | } |
| 380 | 668 | ||
| 381 | static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) | 669 | static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) |
| @@ -432,7 +720,7 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) | |||
| 432 | int st; | 720 | int st; |
| 433 | 721 | ||
| 434 | dev_warn(&dev->dev, | 722 | dev_warn(&dev->dev, |
| 435 | "IBM FlashSystem PCI: recovering from slot reset.\n"); | 723 | "IBM Flash Adapter PCI: recovering from slot reset.\n"); |
| 436 | 724 | ||
| 437 | st = pci_enable_device(dev); | 725 | st = pci_enable_device(dev); |
| 438 | if (st) | 726 | if (st) |
| @@ -485,7 +773,7 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) | |||
| 485 | &card->ctrl[i].issue_dma_work); | 773 | &card->ctrl[i].issue_dma_work); |
| 486 | } | 774 | } |
| 487 | 775 | ||
| 488 | dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n"); | 776 | dev_info(&dev->dev, "IBM Flash Adapter PCI: recovery complete.\n"); |
| 489 | 777 | ||
| 490 | return PCI_ERS_RESULT_RECOVERED; | 778 | return PCI_ERS_RESULT_RECOVERED; |
| 491 | 779 | ||
| @@ -528,6 +816,7 @@ static int rsxx_pci_probe(struct pci_dev *dev, | |||
| 528 | { | 816 | { |
| 529 | struct rsxx_cardinfo *card; | 817 | struct rsxx_cardinfo *card; |
| 530 | int st; | 818 | int st; |
| 819 | unsigned int sync_timeout; | ||
| 531 | 820 | ||
| 532 | dev_info(&dev->dev, "PCI-Flash SSD discovered\n"); | 821 | dev_info(&dev->dev, "PCI-Flash SSD discovered\n"); |
| 533 | 822 | ||
| @@ -610,7 +899,11 @@ static int rsxx_pci_probe(struct pci_dev *dev, | |||
| 610 | } | 899 | } |
| 611 | 900 | ||
| 612 | /************* Setup Processor Command Interface *************/ | 901 | /************* Setup Processor Command Interface *************/ |
| 613 | rsxx_creg_setup(card); | 902 | st = rsxx_creg_setup(card); |
| 903 | if (st) { | ||
| 904 | dev_err(CARD_TO_DEV(card), "Failed to setup creg interface.\n"); | ||
| 905 | goto failed_creg_setup; | ||
| 906 | } | ||
| 614 | 907 | ||
| 615 | spin_lock_irq(&card->irq_lock); | 908 | spin_lock_irq(&card->irq_lock); |
| 616 | rsxx_enable_ier_and_isr(card, CR_INTR_CREG); | 909 | rsxx_enable_ier_and_isr(card, CR_INTR_CREG); |
| @@ -650,6 +943,12 @@ static int rsxx_pci_probe(struct pci_dev *dev, | |||
| 650 | } | 943 | } |
| 651 | 944 | ||
| 652 | /************* Setup Card Event Handler *************/ | 945 | /************* Setup Card Event Handler *************/ |
| 946 | card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event"); | ||
| 947 | if (!card->event_wq) { | ||
| 948 | dev_err(CARD_TO_DEV(card), "Failed card event setup.\n"); | ||
| 949 | goto failed_event_handler; | ||
| 950 | } | ||
| 951 | |||
| 653 | INIT_WORK(&card->event_work, card_event_handler); | 952 | INIT_WORK(&card->event_work, card_event_handler); |
| 654 | 953 | ||
| 655 | st = rsxx_setup_dev(card); | 954 | st = rsxx_setup_dev(card); |
| @@ -676,6 +975,33 @@ static int rsxx_pci_probe(struct pci_dev *dev, | |||
| 676 | if (st) | 975 | if (st) |
| 677 | dev_crit(CARD_TO_DEV(card), | 976 | dev_crit(CARD_TO_DEV(card), |
| 678 | "Failed issuing card startup\n"); | 977 | "Failed issuing card startup\n"); |
| 978 | if (sync_start) { | ||
| 979 | sync_timeout = SYNC_START_TIMEOUT; | ||
| 980 | |||
| 981 | dev_info(CARD_TO_DEV(card), | ||
| 982 | "Waiting for card to startup\n"); | ||
| 983 | |||
| 984 | do { | ||
| 985 | ssleep(1); | ||
| 986 | sync_timeout--; | ||
| 987 | |||
| 988 | rsxx_get_card_state(card, &card->state); | ||
| 989 | } while (sync_timeout && | ||
| 990 | (card->state == CARD_STATE_STARTING)); | ||
| 991 | |||
| 992 | if (card->state == CARD_STATE_STARTING) { | ||
| 993 | dev_warn(CARD_TO_DEV(card), | ||
| 994 | "Card startup timed out\n"); | ||
| 995 | card->size8 = 0; | ||
| 996 | } else { | ||
| 997 | dev_info(CARD_TO_DEV(card), | ||
| 998 | "card state: %s\n", | ||
| 999 | rsxx_card_state_to_str(card->state)); | ||
| 1000 | st = rsxx_get_card_size8(card, &card->size8); | ||
| 1001 | if (st) | ||
| 1002 | card->size8 = 0; | ||
| 1003 | } | ||
| 1004 | } | ||
| 679 | } else if (card->state == CARD_STATE_GOOD || | 1005 | } else if (card->state == CARD_STATE_GOOD || |
| 680 | card->state == CARD_STATE_RD_ONLY_FAULT) { | 1006 | card->state == CARD_STATE_RD_ONLY_FAULT) { |
| 681 | st = rsxx_get_card_size8(card, &card->size8); | 1007 | st = rsxx_get_card_size8(card, &card->size8); |
| @@ -685,12 +1011,21 @@ static int rsxx_pci_probe(struct pci_dev *dev, | |||
| 685 | 1011 | ||
| 686 | rsxx_attach_dev(card); | 1012 | rsxx_attach_dev(card); |
| 687 | 1013 | ||
| 1014 | /************* Setup Debugfs *************/ | ||
| 1015 | rsxx_debugfs_dev_new(card); | ||
| 1016 | |||
| 688 | return 0; | 1017 | return 0; |
| 689 | 1018 | ||
| 690 | failed_create_dev: | 1019 | failed_create_dev: |
| 1020 | destroy_workqueue(card->event_wq); | ||
| 1021 | card->event_wq = NULL; | ||
| 1022 | failed_event_handler: | ||
| 691 | rsxx_dma_destroy(card); | 1023 | rsxx_dma_destroy(card); |
| 692 | failed_dma_setup: | 1024 | failed_dma_setup: |
| 693 | failed_compatiblity_check: | 1025 | failed_compatiblity_check: |
| 1026 | destroy_workqueue(card->creg_ctrl.creg_wq); | ||
| 1027 | card->creg_ctrl.creg_wq = NULL; | ||
| 1028 | failed_creg_setup: | ||
| 694 | spin_lock_irq(&card->irq_lock); | 1029 | spin_lock_irq(&card->irq_lock); |
| 695 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); | 1030 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); |
| 696 | spin_unlock_irq(&card->irq_lock); | 1031 | spin_unlock_irq(&card->irq_lock); |
| @@ -756,6 +1091,8 @@ static void rsxx_pci_remove(struct pci_dev *dev) | |||
| 756 | /* Prevent work_structs from re-queuing themselves. */ | 1091 | /* Prevent work_structs from re-queuing themselves. */ |
| 757 | card->halt = 1; | 1092 | card->halt = 1; |
| 758 | 1093 | ||
| 1094 | debugfs_remove_recursive(card->debugfs_dir); | ||
| 1095 | |||
| 759 | free_irq(dev->irq, card); | 1096 | free_irq(dev->irq, card); |
| 760 | 1097 | ||
| 761 | if (!force_legacy) | 1098 | if (!force_legacy) |
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c index 4b5c020a0a65..926dce9c452f 100644 --- a/drivers/block/rsxx/cregs.c +++ b/drivers/block/rsxx/cregs.c | |||
| @@ -431,6 +431,15 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card, | |||
| 431 | *hw_stat = completion.creg_status; | 431 | *hw_stat = completion.creg_status; |
| 432 | 432 | ||
| 433 | if (completion.st) { | 433 | if (completion.st) { |
| 434 | /* | ||
| 435 | * This read is needed to verify that there has not been any | ||
| 436 | * extreme errors that might have occurred, i.e. EEH. The | ||
| 437 | * function iowrite32 will not detect EEH errors, so it is | ||
| 438 | * necessary that we recover if such an error is the reason | ||
| 439 | * for the timeout. This is a dummy read. | ||
| 440 | */ | ||
| 441 | ioread32(card->regmap + SCRATCH); | ||
| 442 | |||
| 434 | dev_warn(CARD_TO_DEV(card), | 443 | dev_warn(CARD_TO_DEV(card), |
| 435 | "creg command failed(%d x%08x)\n", | 444 | "creg command failed(%d x%08x)\n", |
| 436 | completion.st, addr); | 445 | completion.st, addr); |
| @@ -727,6 +736,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card) | |||
| 727 | { | 736 | { |
| 728 | card->creg_ctrl.active_cmd = NULL; | 737 | card->creg_ctrl.active_cmd = NULL; |
| 729 | 738 | ||
| 739 | card->creg_ctrl.creg_wq = | ||
| 740 | create_singlethread_workqueue(DRIVER_NAME"_creg"); | ||
| 741 | if (!card->creg_ctrl.creg_wq) | ||
| 742 | return -ENOMEM; | ||
| 743 | |||
| 730 | INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done); | 744 | INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done); |
| 731 | mutex_init(&card->creg_ctrl.reset_lock); | 745 | mutex_init(&card->creg_ctrl.reset_lock); |
| 732 | INIT_LIST_HEAD(&card->creg_ctrl.queue); | 746 | INIT_LIST_HEAD(&card->creg_ctrl.queue); |
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 4346d17d2949..d7af441880be 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c | |||
| @@ -155,7 +155,8 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card, | |||
| 155 | atomic_set(&meta->error, 1); | 155 | atomic_set(&meta->error, 1); |
| 156 | 156 | ||
| 157 | if (atomic_dec_and_test(&meta->pending_dmas)) { | 157 | if (atomic_dec_and_test(&meta->pending_dmas)) { |
| 158 | disk_stats_complete(card, meta->bio, meta->start_time); | 158 | if (!card->eeh_state && card->gendisk) |
| 159 | disk_stats_complete(card, meta->bio, meta->start_time); | ||
| 159 | 160 | ||
| 160 | bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); | 161 | bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); |
| 161 | kmem_cache_free(bio_meta_pool, meta); | 162 | kmem_cache_free(bio_meta_pool, meta); |
| @@ -170,6 +171,12 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) | |||
| 170 | 171 | ||
| 171 | might_sleep(); | 172 | might_sleep(); |
| 172 | 173 | ||
| 174 | if (!card) | ||
| 175 | goto req_err; | ||
| 176 | |||
| 177 | if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk)) | ||
| 178 | goto req_err; | ||
| 179 | |||
| 173 | if (unlikely(card->halt)) { | 180 | if (unlikely(card->halt)) { |
| 174 | st = -EFAULT; | 181 | st = -EFAULT; |
| 175 | goto req_err; | 182 | goto req_err; |
| @@ -196,7 +203,8 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) | |||
| 196 | atomic_set(&bio_meta->pending_dmas, 0); | 203 | atomic_set(&bio_meta->pending_dmas, 0); |
| 197 | bio_meta->start_time = jiffies; | 204 | bio_meta->start_time = jiffies; |
| 198 | 205 | ||
| 199 | disk_stats_start(card, bio); | 206 | if (!unlikely(card->halt)) |
| 207 | disk_stats_start(card, bio); | ||
| 200 | 208 | ||
| 201 | dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", | 209 | dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", |
| 202 | bio_data_dir(bio) ? 'W' : 'R', bio_meta, | 210 | bio_data_dir(bio) ? 'W' : 'R', bio_meta, |
| @@ -225,24 +233,6 @@ static bool rsxx_discard_supported(struct rsxx_cardinfo *card) | |||
| 225 | return (pci_rev >= RSXX_DISCARD_SUPPORT); | 233 | return (pci_rev >= RSXX_DISCARD_SUPPORT); |
| 226 | } | 234 | } |
| 227 | 235 | ||
| 228 | static unsigned short rsxx_get_logical_block_size( | ||
| 229 | struct rsxx_cardinfo *card) | ||
| 230 | { | ||
| 231 | u32 capabilities = 0; | ||
| 232 | int st; | ||
| 233 | |||
| 234 | st = rsxx_get_card_capabilities(card, &capabilities); | ||
| 235 | if (st) | ||
| 236 | dev_warn(CARD_TO_DEV(card), | ||
| 237 | "Failed reading card capabilities register\n"); | ||
| 238 | |||
| 239 | /* Earlier firmware did not have support for 512 byte accesses */ | ||
| 240 | if (capabilities & CARD_CAP_SUBPAGE_WRITES) | ||
| 241 | return 512; | ||
| 242 | else | ||
| 243 | return RSXX_HW_BLK_SIZE; | ||
| 244 | } | ||
| 245 | |||
| 246 | int rsxx_attach_dev(struct rsxx_cardinfo *card) | 236 | int rsxx_attach_dev(struct rsxx_cardinfo *card) |
| 247 | { | 237 | { |
| 248 | mutex_lock(&card->dev_lock); | 238 | mutex_lock(&card->dev_lock); |
| @@ -305,7 +295,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) | |||
| 305 | return -ENOMEM; | 295 | return -ENOMEM; |
| 306 | } | 296 | } |
| 307 | 297 | ||
| 308 | blk_size = rsxx_get_logical_block_size(card); | 298 | blk_size = card->config.data.block_size; |
| 309 | 299 | ||
| 310 | blk_queue_make_request(card->queue, rsxx_make_request); | 300 | blk_queue_make_request(card->queue, rsxx_make_request); |
| 311 | blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); | 301 | blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); |
| @@ -347,6 +337,7 @@ void rsxx_destroy_dev(struct rsxx_cardinfo *card) | |||
| 347 | card->gendisk = NULL; | 337 | card->gendisk = NULL; |
| 348 | 338 | ||
| 349 | blk_cleanup_queue(card->queue); | 339 | blk_cleanup_queue(card->queue); |
| 340 | card->queue->queuedata = NULL; | ||
| 350 | unregister_blkdev(card->major, DRIVER_NAME); | 341 | unregister_blkdev(card->major, DRIVER_NAME); |
| 351 | } | 342 | } |
| 352 | 343 | ||
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 0607513cfb41..bed32f16b084 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
| @@ -245,6 +245,22 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, | |||
| 245 | kmem_cache_free(rsxx_dma_pool, dma); | 245 | kmem_cache_free(rsxx_dma_pool, dma); |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, | ||
| 249 | struct list_head *q) | ||
| 250 | { | ||
| 251 | struct rsxx_dma *dma; | ||
| 252 | struct rsxx_dma *tmp; | ||
| 253 | int cnt = 0; | ||
| 254 | |||
| 255 | list_for_each_entry_safe(dma, tmp, q, list) { | ||
| 256 | list_del(&dma->list); | ||
| 257 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | ||
| 258 | cnt++; | ||
| 259 | } | ||
| 260 | |||
| 261 | return cnt; | ||
| 262 | } | ||
| 263 | |||
| 248 | static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, | 264 | static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, |
| 249 | struct rsxx_dma *dma) | 265 | struct rsxx_dma *dma) |
| 250 | { | 266 | { |
| @@ -252,9 +268,10 @@ static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, | |||
| 252 | * Requeued DMAs go to the front of the queue so they are issued | 268 | * Requeued DMAs go to the front of the queue so they are issued |
| 253 | * first. | 269 | * first. |
| 254 | */ | 270 | */ |
| 255 | spin_lock(&ctrl->queue_lock); | 271 | spin_lock_bh(&ctrl->queue_lock); |
| 272 | ctrl->stats.sw_q_depth++; | ||
| 256 | list_add(&dma->list, &ctrl->queue); | 273 | list_add(&dma->list, &ctrl->queue); |
| 257 | spin_unlock(&ctrl->queue_lock); | 274 | spin_unlock_bh(&ctrl->queue_lock); |
| 258 | } | 275 | } |
| 259 | 276 | ||
| 260 | static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | 277 | static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, |
| @@ -329,6 +346,7 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | |||
| 329 | static void dma_engine_stalled(unsigned long data) | 346 | static void dma_engine_stalled(unsigned long data) |
| 330 | { | 347 | { |
| 331 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; | 348 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; |
| 349 | int cnt; | ||
| 332 | 350 | ||
| 333 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || | 351 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || |
| 334 | unlikely(ctrl->card->eeh_state)) | 352 | unlikely(ctrl->card->eeh_state)) |
| @@ -349,18 +367,28 @@ static void dma_engine_stalled(unsigned long data) | |||
| 349 | "DMA channel %d has stalled, faulting interface.\n", | 367 | "DMA channel %d has stalled, faulting interface.\n", |
| 350 | ctrl->id); | 368 | ctrl->id); |
| 351 | ctrl->card->dma_fault = 1; | 369 | ctrl->card->dma_fault = 1; |
| 370 | |||
| 371 | /* Clean up the DMA queue */ | ||
| 372 | spin_lock(&ctrl->queue_lock); | ||
| 373 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); | ||
| 374 | spin_unlock(&ctrl->queue_lock); | ||
| 375 | |||
| 376 | cnt += rsxx_dma_cancel(ctrl); | ||
| 377 | |||
| 378 | if (cnt) | ||
| 379 | dev_info(CARD_TO_DEV(ctrl->card), | ||
| 380 | "Freed %d queued DMAs on channel %d\n", | ||
| 381 | cnt, ctrl->id); | ||
| 352 | } | 382 | } |
| 353 | } | 383 | } |
| 354 | 384 | ||
| 355 | static void rsxx_issue_dmas(struct work_struct *work) | 385 | static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) |
| 356 | { | 386 | { |
| 357 | struct rsxx_dma_ctrl *ctrl; | ||
| 358 | struct rsxx_dma *dma; | 387 | struct rsxx_dma *dma; |
| 359 | int tag; | 388 | int tag; |
| 360 | int cmds_pending = 0; | 389 | int cmds_pending = 0; |
| 361 | struct hw_cmd *hw_cmd_buf; | 390 | struct hw_cmd *hw_cmd_buf; |
| 362 | 391 | ||
| 363 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); | ||
| 364 | hw_cmd_buf = ctrl->cmd.buf; | 392 | hw_cmd_buf = ctrl->cmd.buf; |
| 365 | 393 | ||
| 366 | if (unlikely(ctrl->card->halt) || | 394 | if (unlikely(ctrl->card->halt) || |
| @@ -368,22 +396,22 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
| 368 | return; | 396 | return; |
| 369 | 397 | ||
| 370 | while (1) { | 398 | while (1) { |
| 371 | spin_lock(&ctrl->queue_lock); | 399 | spin_lock_bh(&ctrl->queue_lock); |
| 372 | if (list_empty(&ctrl->queue)) { | 400 | if (list_empty(&ctrl->queue)) { |
| 373 | spin_unlock(&ctrl->queue_lock); | 401 | spin_unlock_bh(&ctrl->queue_lock); |
| 374 | break; | 402 | break; |
| 375 | } | 403 | } |
| 376 | spin_unlock(&ctrl->queue_lock); | 404 | spin_unlock_bh(&ctrl->queue_lock); |
| 377 | 405 | ||
| 378 | tag = pop_tracker(ctrl->trackers); | 406 | tag = pop_tracker(ctrl->trackers); |
| 379 | if (tag == -1) | 407 | if (tag == -1) |
| 380 | break; | 408 | break; |
| 381 | 409 | ||
| 382 | spin_lock(&ctrl->queue_lock); | 410 | spin_lock_bh(&ctrl->queue_lock); |
| 383 | dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); | 411 | dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); |
| 384 | list_del(&dma->list); | 412 | list_del(&dma->list); |
| 385 | ctrl->stats.sw_q_depth--; | 413 | ctrl->stats.sw_q_depth--; |
| 386 | spin_unlock(&ctrl->queue_lock); | 414 | spin_unlock_bh(&ctrl->queue_lock); |
| 387 | 415 | ||
| 388 | /* | 416 | /* |
| 389 | * This will catch any DMAs that slipped in right before the | 417 | * This will catch any DMAs that slipped in right before the |
| @@ -440,9 +468,8 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
| 440 | } | 468 | } |
| 441 | } | 469 | } |
| 442 | 470 | ||
| 443 | static void rsxx_dma_done(struct work_struct *work) | 471 | static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl) |
| 444 | { | 472 | { |
| 445 | struct rsxx_dma_ctrl *ctrl; | ||
| 446 | struct rsxx_dma *dma; | 473 | struct rsxx_dma *dma; |
| 447 | unsigned long flags; | 474 | unsigned long flags; |
| 448 | u16 count; | 475 | u16 count; |
| @@ -450,7 +477,6 @@ static void rsxx_dma_done(struct work_struct *work) | |||
| 450 | u8 tag; | 477 | u8 tag; |
| 451 | struct hw_status *hw_st_buf; | 478 | struct hw_status *hw_st_buf; |
| 452 | 479 | ||
| 453 | ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); | ||
| 454 | hw_st_buf = ctrl->status.buf; | 480 | hw_st_buf = ctrl->status.buf; |
| 455 | 481 | ||
| 456 | if (unlikely(ctrl->card->halt) || | 482 | if (unlikely(ctrl->card->halt) || |
| @@ -520,33 +546,32 @@ static void rsxx_dma_done(struct work_struct *work) | |||
| 520 | rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); | 546 | rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); |
| 521 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | 547 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); |
| 522 | 548 | ||
| 523 | spin_lock(&ctrl->queue_lock); | 549 | spin_lock_bh(&ctrl->queue_lock); |
| 524 | if (ctrl->stats.sw_q_depth) | 550 | if (ctrl->stats.sw_q_depth) |
| 525 | queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); | 551 | queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); |
| 526 | spin_unlock(&ctrl->queue_lock); | 552 | spin_unlock_bh(&ctrl->queue_lock); |
| 527 | } | 553 | } |
| 528 | 554 | ||
| 529 | static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card, | 555 | static void rsxx_schedule_issue(struct work_struct *work) |
| 530 | struct list_head *q) | ||
| 531 | { | 556 | { |
| 532 | struct rsxx_dma *dma; | 557 | struct rsxx_dma_ctrl *ctrl; |
| 533 | struct rsxx_dma *tmp; | ||
| 534 | int cnt = 0; | ||
| 535 | 558 | ||
| 536 | list_for_each_entry_safe(dma, tmp, q, list) { | 559 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); |
| 537 | list_del(&dma->list); | ||
| 538 | 560 | ||
| 539 | if (dma->dma_addr) | 561 | mutex_lock(&ctrl->work_lock); |
| 540 | pci_unmap_page(card->dev, dma->dma_addr, | 562 | rsxx_issue_dmas(ctrl); |
| 541 | get_dma_size(dma), | 563 | mutex_unlock(&ctrl->work_lock); |
| 542 | (dma->cmd == HW_CMD_BLK_WRITE) ? | 564 | } |
| 543 | PCI_DMA_TODEVICE : | ||
| 544 | PCI_DMA_FROMDEVICE); | ||
| 545 | kmem_cache_free(rsxx_dma_pool, dma); | ||
| 546 | cnt++; | ||
| 547 | } | ||
| 548 | 565 | ||
| 549 | return cnt; | 566 | static void rsxx_schedule_done(struct work_struct *work) |
| 567 | { | ||
| 568 | struct rsxx_dma_ctrl *ctrl; | ||
| 569 | |||
| 570 | ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); | ||
| 571 | |||
| 572 | mutex_lock(&ctrl->work_lock); | ||
| 573 | rsxx_dma_done(ctrl); | ||
| 574 | mutex_unlock(&ctrl->work_lock); | ||
| 550 | } | 575 | } |
| 551 | 576 | ||
| 552 | static int rsxx_queue_discard(struct rsxx_cardinfo *card, | 577 | static int rsxx_queue_discard(struct rsxx_cardinfo *card, |
| @@ -698,10 +723,10 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
| 698 | 723 | ||
| 699 | for (i = 0; i < card->n_targets; i++) { | 724 | for (i = 0; i < card->n_targets; i++) { |
| 700 | if (!list_empty(&dma_list[i])) { | 725 | if (!list_empty(&dma_list[i])) { |
| 701 | spin_lock(&card->ctrl[i].queue_lock); | 726 | spin_lock_bh(&card->ctrl[i].queue_lock); |
| 702 | card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; | 727 | card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; |
| 703 | list_splice_tail(&dma_list[i], &card->ctrl[i].queue); | 728 | list_splice_tail(&dma_list[i], &card->ctrl[i].queue); |
| 704 | spin_unlock(&card->ctrl[i].queue_lock); | 729 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
| 705 | 730 | ||
| 706 | queue_work(card->ctrl[i].issue_wq, | 731 | queue_work(card->ctrl[i].issue_wq, |
| 707 | &card->ctrl[i].issue_dma_work); | 732 | &card->ctrl[i].issue_dma_work); |
| @@ -711,8 +736,11 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
| 711 | return 0; | 736 | return 0; |
| 712 | 737 | ||
| 713 | bvec_err: | 738 | bvec_err: |
| 714 | for (i = 0; i < card->n_targets; i++) | 739 | for (i = 0; i < card->n_targets; i++) { |
| 715 | rsxx_cleanup_dma_queue(card, &dma_list[i]); | 740 | spin_lock_bh(&card->ctrl[i].queue_lock); |
| 741 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]); | ||
| 742 | spin_unlock_bh(&card->ctrl[i].queue_lock); | ||
| 743 | } | ||
| 716 | 744 | ||
| 717 | return st; | 745 | return st; |
| 718 | } | 746 | } |
| @@ -780,6 +808,7 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev, | |||
| 780 | spin_lock_init(&ctrl->trackers->lock); | 808 | spin_lock_init(&ctrl->trackers->lock); |
| 781 | 809 | ||
| 782 | spin_lock_init(&ctrl->queue_lock); | 810 | spin_lock_init(&ctrl->queue_lock); |
| 811 | mutex_init(&ctrl->work_lock); | ||
| 783 | INIT_LIST_HEAD(&ctrl->queue); | 812 | INIT_LIST_HEAD(&ctrl->queue); |
| 784 | 813 | ||
| 785 | setup_timer(&ctrl->activity_timer, dma_engine_stalled, | 814 | setup_timer(&ctrl->activity_timer, dma_engine_stalled, |
| @@ -793,8 +822,8 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev, | |||
| 793 | if (!ctrl->done_wq) | 822 | if (!ctrl->done_wq) |
| 794 | return -ENOMEM; | 823 | return -ENOMEM; |
| 795 | 824 | ||
| 796 | INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); | 825 | INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue); |
| 797 | INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); | 826 | INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done); |
| 798 | 827 | ||
| 799 | st = rsxx_hw_buffers_init(dev, ctrl); | 828 | st = rsxx_hw_buffers_init(dev, ctrl); |
| 800 | if (st) | 829 | if (st) |
| @@ -918,13 +947,30 @@ failed_dma_setup: | |||
| 918 | return st; | 947 | return st; |
| 919 | } | 948 | } |
| 920 | 949 | ||
| 950 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl) | ||
| 951 | { | ||
| 952 | struct rsxx_dma *dma; | ||
| 953 | int i; | ||
| 954 | int cnt = 0; | ||
| 955 | |||
| 956 | /* Clean up issued DMAs */ | ||
| 957 | for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { | ||
| 958 | dma = get_tracker_dma(ctrl->trackers, i); | ||
| 959 | if (dma) { | ||
| 960 | atomic_dec(&ctrl->stats.hw_q_depth); | ||
| 961 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | ||
| 962 | push_tracker(ctrl->trackers, i); | ||
| 963 | cnt++; | ||
| 964 | } | ||
| 965 | } | ||
| 966 | |||
| 967 | return cnt; | ||
| 968 | } | ||
| 921 | 969 | ||
| 922 | void rsxx_dma_destroy(struct rsxx_cardinfo *card) | 970 | void rsxx_dma_destroy(struct rsxx_cardinfo *card) |
| 923 | { | 971 | { |
| 924 | struct rsxx_dma_ctrl *ctrl; | 972 | struct rsxx_dma_ctrl *ctrl; |
| 925 | struct rsxx_dma *dma; | 973 | int i; |
| 926 | int i, j; | ||
| 927 | int cnt = 0; | ||
| 928 | 974 | ||
| 929 | for (i = 0; i < card->n_targets; i++) { | 975 | for (i = 0; i < card->n_targets; i++) { |
| 930 | ctrl = &card->ctrl[i]; | 976 | ctrl = &card->ctrl[i]; |
| @@ -943,33 +989,11 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |||
| 943 | del_timer_sync(&ctrl->activity_timer); | 989 | del_timer_sync(&ctrl->activity_timer); |
| 944 | 990 | ||
| 945 | /* Clean up the DMA queue */ | 991 | /* Clean up the DMA queue */ |
| 946 | spin_lock(&ctrl->queue_lock); | 992 | spin_lock_bh(&ctrl->queue_lock); |
| 947 | cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue); | 993 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); |
| 948 | spin_unlock(&ctrl->queue_lock); | 994 | spin_unlock_bh(&ctrl->queue_lock); |
| 949 | |||
| 950 | if (cnt) | ||
| 951 | dev_info(CARD_TO_DEV(card), | ||
| 952 | "Freed %d queued DMAs on channel %d\n", | ||
| 953 | cnt, i); | ||
| 954 | |||
| 955 | /* Clean up issued DMAs */ | ||
| 956 | for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { | ||
| 957 | dma = get_tracker_dma(ctrl->trackers, j); | ||
| 958 | if (dma) { | ||
| 959 | pci_unmap_page(card->dev, dma->dma_addr, | ||
| 960 | get_dma_size(dma), | ||
| 961 | (dma->cmd == HW_CMD_BLK_WRITE) ? | ||
| 962 | PCI_DMA_TODEVICE : | ||
| 963 | PCI_DMA_FROMDEVICE); | ||
| 964 | kmem_cache_free(rsxx_dma_pool, dma); | ||
| 965 | cnt++; | ||
| 966 | } | ||
| 967 | } | ||
| 968 | 995 | ||
| 969 | if (cnt) | 996 | rsxx_dma_cancel(ctrl); |
| 970 | dev_info(CARD_TO_DEV(card), | ||
| 971 | "Freed %d pending DMAs on channel %d\n", | ||
| 972 | cnt, i); | ||
| 973 | 997 | ||
| 974 | vfree(ctrl->trackers); | 998 | vfree(ctrl->trackers); |
| 975 | 999 | ||
| @@ -1013,7 +1037,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
| 1013 | cnt++; | 1037 | cnt++; |
| 1014 | } | 1038 | } |
| 1015 | 1039 | ||
| 1016 | spin_lock(&card->ctrl[i].queue_lock); | 1040 | spin_lock_bh(&card->ctrl[i].queue_lock); |
| 1017 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); | 1041 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); |
| 1018 | 1042 | ||
| 1019 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | 1043 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); |
| @@ -1028,7 +1052,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
| 1028 | PCI_DMA_TODEVICE : | 1052 | PCI_DMA_TODEVICE : |
| 1029 | PCI_DMA_FROMDEVICE); | 1053 | PCI_DMA_FROMDEVICE); |
| 1030 | } | 1054 | } |
| 1031 | spin_unlock(&card->ctrl[i].queue_lock); | 1055 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
| 1032 | } | 1056 | } |
| 1033 | 1057 | ||
| 1034 | kfree(issued_dmas); | 1058 | kfree(issued_dmas); |
| @@ -1036,30 +1060,13 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
| 1036 | return 0; | 1060 | return 0; |
| 1037 | } | 1061 | } |
| 1038 | 1062 | ||
| 1039 | void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card) | ||
| 1040 | { | ||
| 1041 | struct rsxx_dma *dma; | ||
| 1042 | struct rsxx_dma *tmp; | ||
| 1043 | int i; | ||
| 1044 | |||
| 1045 | for (i = 0; i < card->n_targets; i++) { | ||
| 1046 | spin_lock(&card->ctrl[i].queue_lock); | ||
| 1047 | list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) { | ||
| 1048 | list_del(&dma->list); | ||
| 1049 | |||
| 1050 | rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED); | ||
| 1051 | } | ||
| 1052 | spin_unlock(&card->ctrl[i].queue_lock); | ||
| 1053 | } | ||
| 1054 | } | ||
| 1055 | |||
| 1056 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | 1063 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) |
| 1057 | { | 1064 | { |
| 1058 | struct rsxx_dma *dma; | 1065 | struct rsxx_dma *dma; |
| 1059 | int i; | 1066 | int i; |
| 1060 | 1067 | ||
| 1061 | for (i = 0; i < card->n_targets; i++) { | 1068 | for (i = 0; i < card->n_targets; i++) { |
| 1062 | spin_lock(&card->ctrl[i].queue_lock); | 1069 | spin_lock_bh(&card->ctrl[i].queue_lock); |
| 1063 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | 1070 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { |
| 1064 | dma->dma_addr = pci_map_page(card->dev, dma->page, | 1071 | dma->dma_addr = pci_map_page(card->dev, dma->page, |
| 1065 | dma->pg_off, get_dma_size(dma), | 1072 | dma->pg_off, get_dma_size(dma), |
| @@ -1067,12 +1074,12 @@ int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | |||
| 1067 | PCI_DMA_TODEVICE : | 1074 | PCI_DMA_TODEVICE : |
| 1068 | PCI_DMA_FROMDEVICE); | 1075 | PCI_DMA_FROMDEVICE); |
| 1069 | if (!dma->dma_addr) { | 1076 | if (!dma->dma_addr) { |
| 1070 | spin_unlock(&card->ctrl[i].queue_lock); | 1077 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
| 1071 | kmem_cache_free(rsxx_dma_pool, dma); | 1078 | kmem_cache_free(rsxx_dma_pool, dma); |
| 1072 | return -ENOMEM; | 1079 | return -ENOMEM; |
| 1073 | } | 1080 | } |
| 1074 | } | 1081 | } |
| 1075 | spin_unlock(&card->ctrl[i].queue_lock); | 1082 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
| 1076 | } | 1083 | } |
| 1077 | 1084 | ||
| 1078 | return 0; | 1085 | return 0; |
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h index 382e8bf5c03b..5ad5055a4104 100644 --- a/drivers/block/rsxx/rsxx_priv.h +++ b/drivers/block/rsxx/rsxx_priv.h | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <linux/vmalloc.h> | 39 | #include <linux/vmalloc.h> |
| 40 | #include <linux/timer.h> | 40 | #include <linux/timer.h> |
| 41 | #include <linux/ioctl.h> | 41 | #include <linux/ioctl.h> |
| 42 | #include <linux/delay.h> | ||
| 42 | 43 | ||
| 43 | #include "rsxx.h" | 44 | #include "rsxx.h" |
| 44 | #include "rsxx_cfg.h" | 45 | #include "rsxx_cfg.h" |
| @@ -114,6 +115,7 @@ struct rsxx_dma_ctrl { | |||
| 114 | struct timer_list activity_timer; | 115 | struct timer_list activity_timer; |
| 115 | struct dma_tracker_list *trackers; | 116 | struct dma_tracker_list *trackers; |
| 116 | struct rsxx_dma_stats stats; | 117 | struct rsxx_dma_stats stats; |
| 118 | struct mutex work_lock; | ||
| 117 | }; | 119 | }; |
| 118 | 120 | ||
| 119 | struct rsxx_cardinfo { | 121 | struct rsxx_cardinfo { |
| @@ -134,6 +136,7 @@ struct rsxx_cardinfo { | |||
| 134 | spinlock_t lock; | 136 | spinlock_t lock; |
| 135 | bool active; | 137 | bool active; |
| 136 | struct creg_cmd *active_cmd; | 138 | struct creg_cmd *active_cmd; |
| 139 | struct workqueue_struct *creg_wq; | ||
| 137 | struct work_struct done_work; | 140 | struct work_struct done_work; |
| 138 | struct list_head queue; | 141 | struct list_head queue; |
| 139 | unsigned int q_depth; | 142 | unsigned int q_depth; |
| @@ -154,6 +157,7 @@ struct rsxx_cardinfo { | |||
| 154 | int buf_len; | 157 | int buf_len; |
| 155 | } log; | 158 | } log; |
| 156 | 159 | ||
| 160 | struct workqueue_struct *event_wq; | ||
| 157 | struct work_struct event_work; | 161 | struct work_struct event_work; |
| 158 | unsigned int state; | 162 | unsigned int state; |
| 159 | u64 size8; | 163 | u64 size8; |
| @@ -181,6 +185,8 @@ struct rsxx_cardinfo { | |||
| 181 | 185 | ||
| 182 | int n_targets; | 186 | int n_targets; |
| 183 | struct rsxx_dma_ctrl *ctrl; | 187 | struct rsxx_dma_ctrl *ctrl; |
| 188 | |||
| 189 | struct dentry *debugfs_dir; | ||
| 184 | }; | 190 | }; |
| 185 | 191 | ||
| 186 | enum rsxx_pci_regmap { | 192 | enum rsxx_pci_regmap { |
| @@ -283,6 +289,7 @@ enum rsxx_creg_addr { | |||
| 283 | CREG_ADD_CAPABILITIES = 0x80001050, | 289 | CREG_ADD_CAPABILITIES = 0x80001050, |
| 284 | CREG_ADD_LOG = 0x80002000, | 290 | CREG_ADD_LOG = 0x80002000, |
| 285 | CREG_ADD_NUM_TARGETS = 0x80003000, | 291 | CREG_ADD_NUM_TARGETS = 0x80003000, |
| 292 | CREG_ADD_CRAM = 0xA0000000, | ||
| 286 | CREG_ADD_CONFIG = 0xB0000000, | 293 | CREG_ADD_CONFIG = 0xB0000000, |
| 287 | }; | 294 | }; |
| 288 | 295 | ||
| @@ -372,6 +379,8 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card, | |||
| 372 | int rsxx_dma_setup(struct rsxx_cardinfo *card); | 379 | int rsxx_dma_setup(struct rsxx_cardinfo *card); |
| 373 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); | 380 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); |
| 374 | int rsxx_dma_init(void); | 381 | int rsxx_dma_init(void); |
| 382 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q); | ||
| 383 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl); | ||
| 375 | void rsxx_dma_cleanup(void); | 384 | void rsxx_dma_cleanup(void); |
| 376 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); | 385 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); |
| 377 | int rsxx_dma_configure(struct rsxx_cardinfo *card); | 386 | int rsxx_dma_configure(struct rsxx_cardinfo *card); |
| @@ -382,7 +391,6 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
| 382 | void *cb_data); | 391 | void *cb_data); |
| 383 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl); | 392 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl); |
| 384 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card); | 393 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card); |
| 385 | void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card); | ||
| 386 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card); | 394 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card); |
| 387 | 395 | ||
| 388 | /***** cregs.c *****/ | 396 | /***** cregs.c *****/ |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index dd5b2fed97e9..bf4b9d282c04 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
| @@ -50,110 +50,118 @@ | |||
| 50 | #include "common.h" | 50 | #include "common.h" |
| 51 | 51 | ||
| 52 | /* | 52 | /* |
| 53 | * These are rather arbitrary. They are fairly large because adjacent requests | 53 | * Maximum number of unused free pages to keep in the internal buffer. |
| 54 | * pulled from a communication ring are quite likely to end up being part of | 54 | * Setting this to a value too low will reduce memory used in each backend, |
| 55 | * the same scatter/gather request at the disc. | 55 | * but can have a performance penalty. |
| 56 | * | 56 | * |
| 57 | * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW ** | 57 | * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can |
| 58 | * | 58 | * be set to a lower value that might degrade performance on some intensive |
| 59 | * This will increase the chances of being able to write whole tracks. | 59 | * IO workloads. |
| 60 | * 64 should be enough to keep us competitive with Linux. | ||
| 61 | */ | 60 | */ |
| 62 | static int xen_blkif_reqs = 64; | ||
| 63 | module_param_named(reqs, xen_blkif_reqs, int, 0); | ||
| 64 | MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate"); | ||
| 65 | 61 | ||
| 66 | /* Run-time switchable: /sys/module/blkback/parameters/ */ | 62 | static int xen_blkif_max_buffer_pages = 1024; |
| 67 | static unsigned int log_stats; | 63 | module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644); |
| 68 | module_param(log_stats, int, 0644); | 64 | MODULE_PARM_DESC(max_buffer_pages, |
| 65 | "Maximum number of free pages to keep in each block backend buffer"); | ||
| 69 | 66 | ||
| 70 | /* | 67 | /* |
| 71 | * Each outstanding request that we've passed to the lower device layers has a | 68 | * Maximum number of grants to map persistently in blkback. For maximum |
| 72 | * 'pending_req' allocated to it. Each buffer_head that completes decrements | 69 | * performance this should be the total numbers of grants that can be used |
| 73 | * the pendcnt towards zero. When it hits zero, the specified domain has a | 70 | * to fill the ring, but since this might become too high, specially with |
| 74 | * response queued for it, with the saved 'id' passed back. | 71 | * the use of indirect descriptors, we set it to a value that provides good |
| 72 | * performance without using too much memory. | ||
| 73 | * | ||
| 74 | * When the list of persistent grants is full we clean it up using a LRU | ||
| 75 | * algorithm. | ||
| 75 | */ | 76 | */ |
| 76 | struct pending_req { | ||
| 77 | struct xen_blkif *blkif; | ||
| 78 | u64 id; | ||
| 79 | int nr_pages; | ||
| 80 | atomic_t pendcnt; | ||
| 81 | unsigned short operation; | ||
| 82 | int status; | ||
| 83 | struct list_head free_list; | ||
| 84 | DECLARE_BITMAP(unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
| 85 | }; | ||
| 86 | 77 | ||
| 87 | #define BLKBACK_INVALID_HANDLE (~0) | 78 | static int xen_blkif_max_pgrants = 1056; |
| 79 | module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644); | ||
| 80 | MODULE_PARM_DESC(max_persistent_grants, | ||
| 81 | "Maximum number of grants to map persistently"); | ||
| 88 | 82 | ||
| 89 | struct xen_blkbk { | 83 | /* |
| 90 | struct pending_req *pending_reqs; | 84 | * The LRU mechanism to clean the lists of persistent grants needs to |
| 91 | /* List of all 'pending_req' available */ | 85 | * be executed periodically. The time interval between consecutive executions |
| 92 | struct list_head pending_free; | 86 | * of the purge mechanism is set in ms. |
| 93 | /* And its spinlock. */ | 87 | */ |
| 94 | spinlock_t pending_free_lock; | 88 | #define LRU_INTERVAL 100 |
| 95 | wait_queue_head_t pending_free_wq; | ||
| 96 | /* The list of all pages that are available. */ | ||
| 97 | struct page **pending_pages; | ||
| 98 | /* And the grant handles that are available. */ | ||
| 99 | grant_handle_t *pending_grant_handles; | ||
| 100 | }; | ||
| 101 | |||
| 102 | static struct xen_blkbk *blkbk; | ||
| 103 | 89 | ||
| 104 | /* | 90 | /* |
| 105 | * Maximum number of grant pages that can be mapped in blkback. | 91 | * When the persistent grants list is full we will remove unused grants |
| 106 | * BLKIF_MAX_SEGMENTS_PER_REQUEST * RING_SIZE is the maximum number of | 92 | * from the list. The percent number of grants to be removed at each LRU |
| 107 | * pages that blkback will persistently map. | 93 | * execution. |
| 108 | * Currently, this is: | ||
| 109 | * RING_SIZE = 32 (for all known ring types) | ||
| 110 | * BLKIF_MAX_SEGMENTS_PER_REQUEST = 11 | ||
| 111 | * sizeof(struct persistent_gnt) = 48 | ||
| 112 | * So the maximum memory used to store the grants is: | ||
| 113 | * 32 * 11 * 48 = 16896 bytes | ||
| 114 | */ | 94 | */ |
| 115 | static inline unsigned int max_mapped_grant_pages(enum blkif_protocol protocol) | 95 | #define LRU_PERCENT_CLEAN 5 |
| 96 | |||
| 97 | /* Run-time switchable: /sys/module/blkback/parameters/ */ | ||
| 98 | static unsigned int log_stats; | ||
| 99 | module_param(log_stats, int, 0644); | ||
| 100 | |||
| 101 | #define BLKBACK_INVALID_HANDLE (~0) | ||
| 102 | |||
| 103 | /* Number of free pages to remove on each call to free_xenballooned_pages */ | ||
| 104 | #define NUM_BATCH_FREE_PAGES 10 | ||
| 105 | |||
| 106 | static inline int get_free_page(struct xen_blkif *blkif, struct page **page) | ||
| 116 | { | 107 | { |
| 117 | switch (protocol) { | 108 | unsigned long flags; |
| 118 | case BLKIF_PROTOCOL_NATIVE: | 109 | |
| 119 | return __CONST_RING_SIZE(blkif, PAGE_SIZE) * | 110 | spin_lock_irqsave(&blkif->free_pages_lock, flags); |
| 120 | BLKIF_MAX_SEGMENTS_PER_REQUEST; | 111 | if (list_empty(&blkif->free_pages)) { |
| 121 | case BLKIF_PROTOCOL_X86_32: | 112 | BUG_ON(blkif->free_pages_num != 0); |
| 122 | return __CONST_RING_SIZE(blkif_x86_32, PAGE_SIZE) * | 113 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); |
| 123 | BLKIF_MAX_SEGMENTS_PER_REQUEST; | 114 | return alloc_xenballooned_pages(1, page, false); |
| 124 | case BLKIF_PROTOCOL_X86_64: | ||
| 125 | return __CONST_RING_SIZE(blkif_x86_64, PAGE_SIZE) * | ||
| 126 | BLKIF_MAX_SEGMENTS_PER_REQUEST; | ||
| 127 | default: | ||
| 128 | BUG(); | ||
| 129 | } | 115 | } |
| 116 | BUG_ON(blkif->free_pages_num == 0); | ||
| 117 | page[0] = list_first_entry(&blkif->free_pages, struct page, lru); | ||
| 118 | list_del(&page[0]->lru); | ||
| 119 | blkif->free_pages_num--; | ||
| 120 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); | ||
| 121 | |||
| 130 | return 0; | 122 | return 0; |
| 131 | } | 123 | } |
| 132 | 124 | ||
| 133 | 125 | static inline void put_free_pages(struct xen_blkif *blkif, struct page **page, | |
| 134 | /* | 126 | int num) |
| 135 | * Little helpful macro to figure out the index and virtual address of the | ||
| 136 | * pending_pages[..]. For each 'pending_req' we have have up to | ||
| 137 | * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through | ||
| 138 | * 10 and would index in the pending_pages[..]. | ||
| 139 | */ | ||
| 140 | static inline int vaddr_pagenr(struct pending_req *req, int seg) | ||
| 141 | { | 127 | { |
| 142 | return (req - blkbk->pending_reqs) * | 128 | unsigned long flags; |
| 143 | BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; | 129 | int i; |
| 144 | } | ||
| 145 | 130 | ||
| 146 | #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] | 131 | spin_lock_irqsave(&blkif->free_pages_lock, flags); |
| 132 | for (i = 0; i < num; i++) | ||
| 133 | list_add(&page[i]->lru, &blkif->free_pages); | ||
| 134 | blkif->free_pages_num += num; | ||
| 135 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); | ||
| 136 | } | ||
| 147 | 137 | ||
| 148 | static inline unsigned long vaddr(struct pending_req *req, int seg) | 138 | static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) |
| 149 | { | 139 | { |
| 150 | unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg)); | 140 | /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */ |
| 151 | return (unsigned long)pfn_to_kaddr(pfn); | 141 | struct page *page[NUM_BATCH_FREE_PAGES]; |
| 152 | } | 142 | unsigned int num_pages = 0; |
| 143 | unsigned long flags; | ||
| 153 | 144 | ||
| 154 | #define pending_handle(_req, _seg) \ | 145 | spin_lock_irqsave(&blkif->free_pages_lock, flags); |
| 155 | (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)]) | 146 | while (blkif->free_pages_num > num) { |
| 147 | BUG_ON(list_empty(&blkif->free_pages)); | ||
| 148 | page[num_pages] = list_first_entry(&blkif->free_pages, | ||
| 149 | struct page, lru); | ||
| 150 | list_del(&page[num_pages]->lru); | ||
| 151 | blkif->free_pages_num--; | ||
| 152 | if (++num_pages == NUM_BATCH_FREE_PAGES) { | ||
| 153 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); | ||
| 154 | free_xenballooned_pages(num_pages, page); | ||
| 155 | spin_lock_irqsave(&blkif->free_pages_lock, flags); | ||
| 156 | num_pages = 0; | ||
| 157 | } | ||
| 158 | } | ||
| 159 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); | ||
| 160 | if (num_pages != 0) | ||
| 161 | free_xenballooned_pages(num_pages, page); | ||
| 162 | } | ||
| 156 | 163 | ||
| 164 | #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) | ||
| 157 | 165 | ||
| 158 | static int do_block_io_op(struct xen_blkif *blkif); | 166 | static int do_block_io_op(struct xen_blkif *blkif); |
| 159 | static int dispatch_rw_block_io(struct xen_blkif *blkif, | 167 | static int dispatch_rw_block_io(struct xen_blkif *blkif, |
| @@ -170,13 +178,29 @@ static void make_response(struct xen_blkif *blkif, u64 id, | |||
| 170 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) | 178 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) |
| 171 | 179 | ||
| 172 | 180 | ||
| 173 | static void add_persistent_gnt(struct rb_root *root, | 181 | /* |
| 182 | * We don't need locking around the persistent grant helpers | ||
| 183 | * because blkback uses a single-thread for each backed, so we | ||
| 184 | * can be sure that this functions will never be called recursively. | ||
| 185 | * | ||
| 186 | * The only exception to that is put_persistent_grant, that can be called | ||
| 187 | * from interrupt context (by xen_blkbk_unmap), so we have to use atomic | ||
| 188 | * bit operations to modify the flags of a persistent grant and to count | ||
| 189 | * the number of used grants. | ||
| 190 | */ | ||
| 191 | static int add_persistent_gnt(struct xen_blkif *blkif, | ||
| 174 | struct persistent_gnt *persistent_gnt) | 192 | struct persistent_gnt *persistent_gnt) |
| 175 | { | 193 | { |
| 176 | struct rb_node **new = &(root->rb_node), *parent = NULL; | 194 | struct rb_node **new = NULL, *parent = NULL; |
| 177 | struct persistent_gnt *this; | 195 | struct persistent_gnt *this; |
| 178 | 196 | ||
| 197 | if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) { | ||
| 198 | if (!blkif->vbd.overflow_max_grants) | ||
| 199 | blkif->vbd.overflow_max_grants = 1; | ||
| 200 | return -EBUSY; | ||
| 201 | } | ||
| 179 | /* Figure out where to put new node */ | 202 | /* Figure out where to put new node */ |
| 203 | new = &blkif->persistent_gnts.rb_node; | ||
| 180 | while (*new) { | 204 | while (*new) { |
| 181 | this = container_of(*new, struct persistent_gnt, node); | 205 | this = container_of(*new, struct persistent_gnt, node); |
| 182 | 206 | ||
| @@ -186,22 +210,28 @@ static void add_persistent_gnt(struct rb_root *root, | |||
| 186 | else if (persistent_gnt->gnt > this->gnt) | 210 | else if (persistent_gnt->gnt > this->gnt) |
| 187 | new = &((*new)->rb_right); | 211 | new = &((*new)->rb_right); |
| 188 | else { | 212 | else { |
| 189 | pr_alert(DRV_PFX " trying to add a gref that's already in the tree\n"); | 213 | pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n"); |
| 190 | BUG(); | 214 | return -EINVAL; |
| 191 | } | 215 | } |
| 192 | } | 216 | } |
| 193 | 217 | ||
| 218 | bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); | ||
| 219 | set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | ||
| 194 | /* Add new node and rebalance tree. */ | 220 | /* Add new node and rebalance tree. */ |
| 195 | rb_link_node(&(persistent_gnt->node), parent, new); | 221 | rb_link_node(&(persistent_gnt->node), parent, new); |
| 196 | rb_insert_color(&(persistent_gnt->node), root); | 222 | rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts); |
| 223 | blkif->persistent_gnt_c++; | ||
| 224 | atomic_inc(&blkif->persistent_gnt_in_use); | ||
| 225 | return 0; | ||
| 197 | } | 226 | } |
| 198 | 227 | ||
| 199 | static struct persistent_gnt *get_persistent_gnt(struct rb_root *root, | 228 | static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif, |
| 200 | grant_ref_t gref) | 229 | grant_ref_t gref) |
| 201 | { | 230 | { |
| 202 | struct persistent_gnt *data; | 231 | struct persistent_gnt *data; |
| 203 | struct rb_node *node = root->rb_node; | 232 | struct rb_node *node = NULL; |
| 204 | 233 | ||
| 234 | node = blkif->persistent_gnts.rb_node; | ||
| 205 | while (node) { | 235 | while (node) { |
| 206 | data = container_of(node, struct persistent_gnt, node); | 236 | data = container_of(node, struct persistent_gnt, node); |
| 207 | 237 | ||
| @@ -209,13 +239,31 @@ static struct persistent_gnt *get_persistent_gnt(struct rb_root *root, | |||
| 209 | node = node->rb_left; | 239 | node = node->rb_left; |
| 210 | else if (gref > data->gnt) | 240 | else if (gref > data->gnt) |
| 211 | node = node->rb_right; | 241 | node = node->rb_right; |
| 212 | else | 242 | else { |
| 243 | if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { | ||
| 244 | pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n"); | ||
| 245 | return NULL; | ||
| 246 | } | ||
| 247 | set_bit(PERSISTENT_GNT_ACTIVE, data->flags); | ||
| 248 | atomic_inc(&blkif->persistent_gnt_in_use); | ||
| 213 | return data; | 249 | return data; |
| 250 | } | ||
| 214 | } | 251 | } |
| 215 | return NULL; | 252 | return NULL; |
| 216 | } | 253 | } |
| 217 | 254 | ||
| 218 | static void free_persistent_gnts(struct rb_root *root, unsigned int num) | 255 | static void put_persistent_gnt(struct xen_blkif *blkif, |
| 256 | struct persistent_gnt *persistent_gnt) | ||
| 257 | { | ||
| 258 | if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | ||
| 259 | pr_alert_ratelimited(DRV_PFX " freeing a grant already unused"); | ||
| 260 | set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | ||
| 261 | clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | ||
| 262 | atomic_dec(&blkif->persistent_gnt_in_use); | ||
| 263 | } | ||
| 264 | |||
| 265 | static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | ||
| 266 | unsigned int num) | ||
| 219 | { | 267 | { |
| 220 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 268 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
| 221 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 269 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
| @@ -240,7 +288,7 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num) | |||
| 240 | ret = gnttab_unmap_refs(unmap, NULL, pages, | 288 | ret = gnttab_unmap_refs(unmap, NULL, pages, |
| 241 | segs_to_unmap); | 289 | segs_to_unmap); |
| 242 | BUG_ON(ret); | 290 | BUG_ON(ret); |
| 243 | free_xenballooned_pages(segs_to_unmap, pages); | 291 | put_free_pages(blkif, pages, segs_to_unmap); |
| 244 | segs_to_unmap = 0; | 292 | segs_to_unmap = 0; |
| 245 | } | 293 | } |
| 246 | 294 | ||
| @@ -251,21 +299,148 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num) | |||
| 251 | BUG_ON(num != 0); | 299 | BUG_ON(num != 0); |
| 252 | } | 300 | } |
| 253 | 301 | ||
| 302 | static void unmap_purged_grants(struct work_struct *work) | ||
| 303 | { | ||
| 304 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
| 305 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
| 306 | struct persistent_gnt *persistent_gnt; | ||
| 307 | int ret, segs_to_unmap = 0; | ||
| 308 | struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); | ||
| 309 | |||
| 310 | while(!list_empty(&blkif->persistent_purge_list)) { | ||
| 311 | persistent_gnt = list_first_entry(&blkif->persistent_purge_list, | ||
| 312 | struct persistent_gnt, | ||
| 313 | remove_node); | ||
| 314 | list_del(&persistent_gnt->remove_node); | ||
| 315 | |||
| 316 | gnttab_set_unmap_op(&unmap[segs_to_unmap], | ||
| 317 | vaddr(persistent_gnt->page), | ||
| 318 | GNTMAP_host_map, | ||
| 319 | persistent_gnt->handle); | ||
| 320 | |||
| 321 | pages[segs_to_unmap] = persistent_gnt->page; | ||
| 322 | |||
| 323 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | ||
| 324 | ret = gnttab_unmap_refs(unmap, NULL, pages, | ||
| 325 | segs_to_unmap); | ||
| 326 | BUG_ON(ret); | ||
| 327 | put_free_pages(blkif, pages, segs_to_unmap); | ||
| 328 | segs_to_unmap = 0; | ||
| 329 | } | ||
| 330 | kfree(persistent_gnt); | ||
| 331 | } | ||
| 332 | if (segs_to_unmap > 0) { | ||
| 333 | ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); | ||
| 334 | BUG_ON(ret); | ||
| 335 | put_free_pages(blkif, pages, segs_to_unmap); | ||
| 336 | } | ||
| 337 | } | ||
| 338 | |||
| 339 | static void purge_persistent_gnt(struct xen_blkif *blkif) | ||
| 340 | { | ||
| 341 | struct persistent_gnt *persistent_gnt; | ||
| 342 | struct rb_node *n; | ||
| 343 | unsigned int num_clean, total; | ||
| 344 | bool scan_used = false, clean_used = false; | ||
| 345 | struct rb_root *root; | ||
| 346 | |||
| 347 | if (blkif->persistent_gnt_c < xen_blkif_max_pgrants || | ||
| 348 | (blkif->persistent_gnt_c == xen_blkif_max_pgrants && | ||
| 349 | !blkif->vbd.overflow_max_grants)) { | ||
| 350 | return; | ||
| 351 | } | ||
| 352 | |||
| 353 | if (work_pending(&blkif->persistent_purge_work)) { | ||
| 354 | pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); | ||
| 355 | return; | ||
| 356 | } | ||
| 357 | |||
| 358 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; | ||
| 359 | num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; | ||
| 360 | num_clean = min(blkif->persistent_gnt_c, num_clean); | ||
| 361 | if ((num_clean == 0) || | ||
| 362 | (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use)))) | ||
| 363 | return; | ||
| 364 | |||
| 365 | /* | ||
| 366 | * At this point, we can assure that there will be no calls | ||
| 367 | * to get_persistent_grant (because we are executing this code from | ||
| 368 | * xen_blkif_schedule), there can only be calls to put_persistent_gnt, | ||
| 369 | * which means that the number of currently used grants will go down, | ||
| 370 | * but never up, so we will always be able to remove the requested | ||
| 371 | * number of grants. | ||
| 372 | */ | ||
| 373 | |||
| 374 | total = num_clean; | ||
| 375 | |||
| 376 | pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); | ||
| 377 | |||
| 378 | INIT_LIST_HEAD(&blkif->persistent_purge_list); | ||
| 379 | root = &blkif->persistent_gnts; | ||
| 380 | purge_list: | ||
| 381 | foreach_grant_safe(persistent_gnt, n, root, node) { | ||
| 382 | BUG_ON(persistent_gnt->handle == | ||
| 383 | BLKBACK_INVALID_HANDLE); | ||
| 384 | |||
| 385 | if (clean_used) { | ||
| 386 | clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | ||
| 387 | continue; | ||
| 388 | } | ||
| 389 | |||
| 390 | if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | ||
| 391 | continue; | ||
| 392 | if (!scan_used && | ||
| 393 | (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) | ||
| 394 | continue; | ||
| 395 | |||
| 396 | rb_erase(&persistent_gnt->node, root); | ||
| 397 | list_add(&persistent_gnt->remove_node, | ||
| 398 | &blkif->persistent_purge_list); | ||
| 399 | if (--num_clean == 0) | ||
| 400 | goto finished; | ||
| 401 | } | ||
| 402 | /* | ||
| 403 | * If we get here it means we also need to start cleaning | ||
| 404 | * grants that were used since last purge in order to cope | ||
| 405 | * with the requested num | ||
| 406 | */ | ||
| 407 | if (!scan_used && !clean_used) { | ||
| 408 | pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean); | ||
| 409 | scan_used = true; | ||
| 410 | goto purge_list; | ||
| 411 | } | ||
| 412 | finished: | ||
| 413 | if (!clean_used) { | ||
| 414 | pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n"); | ||
| 415 | clean_used = true; | ||
| 416 | goto purge_list; | ||
| 417 | } | ||
| 418 | |||
| 419 | blkif->persistent_gnt_c -= (total - num_clean); | ||
| 420 | blkif->vbd.overflow_max_grants = 0; | ||
| 421 | |||
| 422 | /* We can defer this work */ | ||
| 423 | INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants); | ||
| 424 | schedule_work(&blkif->persistent_purge_work); | ||
| 425 | pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); | ||
| 426 | return; | ||
| 427 | } | ||
| 428 | |||
| 254 | /* | 429 | /* |
| 255 | * Retrieve from the 'pending_reqs' a free pending_req structure to be used. | 430 | * Retrieve from the 'pending_reqs' a free pending_req structure to be used. |
| 256 | */ | 431 | */ |
| 257 | static struct pending_req *alloc_req(void) | 432 | static struct pending_req *alloc_req(struct xen_blkif *blkif) |
| 258 | { | 433 | { |
| 259 | struct pending_req *req = NULL; | 434 | struct pending_req *req = NULL; |
| 260 | unsigned long flags; | 435 | unsigned long flags; |
| 261 | 436 | ||
| 262 | spin_lock_irqsave(&blkbk->pending_free_lock, flags); | 437 | spin_lock_irqsave(&blkif->pending_free_lock, flags); |
| 263 | if (!list_empty(&blkbk->pending_free)) { | 438 | if (!list_empty(&blkif->pending_free)) { |
| 264 | req = list_entry(blkbk->pending_free.next, struct pending_req, | 439 | req = list_entry(blkif->pending_free.next, struct pending_req, |
| 265 | free_list); | 440 | free_list); |
| 266 | list_del(&req->free_list); | 441 | list_del(&req->free_list); |
| 267 | } | 442 | } |
| 268 | spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); | 443 | spin_unlock_irqrestore(&blkif->pending_free_lock, flags); |
| 269 | return req; | 444 | return req; |
| 270 | } | 445 | } |
| 271 | 446 | ||
| @@ -273,17 +448,17 @@ static struct pending_req *alloc_req(void) | |||
| 273 | * Return the 'pending_req' structure back to the freepool. We also | 448 | * Return the 'pending_req' structure back to the freepool. We also |
| 274 | * wake up the thread if it was waiting for a free page. | 449 | * wake up the thread if it was waiting for a free page. |
| 275 | */ | 450 | */ |
| 276 | static void free_req(struct pending_req *req) | 451 | static void free_req(struct xen_blkif *blkif, struct pending_req *req) |
| 277 | { | 452 | { |
| 278 | unsigned long flags; | 453 | unsigned long flags; |
| 279 | int was_empty; | 454 | int was_empty; |
| 280 | 455 | ||
| 281 | spin_lock_irqsave(&blkbk->pending_free_lock, flags); | 456 | spin_lock_irqsave(&blkif->pending_free_lock, flags); |
| 282 | was_empty = list_empty(&blkbk->pending_free); | 457 | was_empty = list_empty(&blkif->pending_free); |
| 283 | list_add(&req->free_list, &blkbk->pending_free); | 458 | list_add(&req->free_list, &blkif->pending_free); |
| 284 | spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); | 459 | spin_unlock_irqrestore(&blkif->pending_free_lock, flags); |
| 285 | if (was_empty) | 460 | if (was_empty) |
| 286 | wake_up(&blkbk->pending_free_wq); | 461 | wake_up(&blkif->pending_free_wq); |
| 287 | } | 462 | } |
| 288 | 463 | ||
| 289 | /* | 464 | /* |
| @@ -382,10 +557,12 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id) | |||
| 382 | static void print_stats(struct xen_blkif *blkif) | 557 | static void print_stats(struct xen_blkif *blkif) |
| 383 | { | 558 | { |
| 384 | pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" | 559 | pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" |
| 385 | " | ds %4llu\n", | 560 | " | ds %4llu | pg: %4u/%4d\n", |
| 386 | current->comm, blkif->st_oo_req, | 561 | current->comm, blkif->st_oo_req, |
| 387 | blkif->st_rd_req, blkif->st_wr_req, | 562 | blkif->st_rd_req, blkif->st_wr_req, |
| 388 | blkif->st_f_req, blkif->st_ds_req); | 563 | blkif->st_f_req, blkif->st_ds_req, |
| 564 | blkif->persistent_gnt_c, | ||
| 565 | xen_blkif_max_pgrants); | ||
| 389 | blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); | 566 | blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); |
| 390 | blkif->st_rd_req = 0; | 567 | blkif->st_rd_req = 0; |
| 391 | blkif->st_wr_req = 0; | 568 | blkif->st_wr_req = 0; |
| @@ -397,6 +574,8 @@ int xen_blkif_schedule(void *arg) | |||
| 397 | { | 574 | { |
| 398 | struct xen_blkif *blkif = arg; | 575 | struct xen_blkif *blkif = arg; |
| 399 | struct xen_vbd *vbd = &blkif->vbd; | 576 | struct xen_vbd *vbd = &blkif->vbd; |
| 577 | unsigned long timeout; | ||
| 578 | int ret; | ||
| 400 | 579 | ||
| 401 | xen_blkif_get(blkif); | 580 | xen_blkif_get(blkif); |
| 402 | 581 | ||
| @@ -406,27 +585,52 @@ int xen_blkif_schedule(void *arg) | |||
| 406 | if (unlikely(vbd->size != vbd_sz(vbd))) | 585 | if (unlikely(vbd->size != vbd_sz(vbd))) |
| 407 | xen_vbd_resize(blkif); | 586 | xen_vbd_resize(blkif); |
| 408 | 587 | ||
| 409 | wait_event_interruptible( | 588 | timeout = msecs_to_jiffies(LRU_INTERVAL); |
| 589 | |||
| 590 | timeout = wait_event_interruptible_timeout( | ||
| 410 | blkif->wq, | 591 | blkif->wq, |
| 411 | blkif->waiting_reqs || kthread_should_stop()); | 592 | blkif->waiting_reqs || kthread_should_stop(), |
| 412 | wait_event_interruptible( | 593 | timeout); |
| 413 | blkbk->pending_free_wq, | 594 | if (timeout == 0) |
| 414 | !list_empty(&blkbk->pending_free) || | 595 | goto purge_gnt_list; |
| 415 | kthread_should_stop()); | 596 | timeout = wait_event_interruptible_timeout( |
| 597 | blkif->pending_free_wq, | ||
| 598 | !list_empty(&blkif->pending_free) || | ||
| 599 | kthread_should_stop(), | ||
| 600 | timeout); | ||
| 601 | if (timeout == 0) | ||
| 602 | goto purge_gnt_list; | ||
| 416 | 603 | ||
| 417 | blkif->waiting_reqs = 0; | 604 | blkif->waiting_reqs = 0; |
| 418 | smp_mb(); /* clear flag *before* checking for work */ | 605 | smp_mb(); /* clear flag *before* checking for work */ |
| 419 | 606 | ||
| 420 | if (do_block_io_op(blkif)) | 607 | ret = do_block_io_op(blkif); |
| 608 | if (ret > 0) | ||
| 421 | blkif->waiting_reqs = 1; | 609 | blkif->waiting_reqs = 1; |
| 610 | if (ret == -EACCES) | ||
| 611 | wait_event_interruptible(blkif->shutdown_wq, | ||
| 612 | kthread_should_stop()); | ||
| 613 | |||
| 614 | purge_gnt_list: | ||
| 615 | if (blkif->vbd.feature_gnt_persistent && | ||
| 616 | time_after(jiffies, blkif->next_lru)) { | ||
| 617 | purge_persistent_gnt(blkif); | ||
| 618 | blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL); | ||
| 619 | } | ||
| 620 | |||
| 621 | /* Shrink if we have more than xen_blkif_max_buffer_pages */ | ||
| 622 | shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages); | ||
| 422 | 623 | ||
| 423 | if (log_stats && time_after(jiffies, blkif->st_print)) | 624 | if (log_stats && time_after(jiffies, blkif->st_print)) |
| 424 | print_stats(blkif); | 625 | print_stats(blkif); |
| 425 | } | 626 | } |
| 426 | 627 | ||
| 628 | /* Since we are shutting down remove all pages from the buffer */ | ||
| 629 | shrink_free_pagepool(blkif, 0 /* All */); | ||
| 630 | |||
| 427 | /* Free all persistent grant pages */ | 631 | /* Free all persistent grant pages */ |
| 428 | if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) | 632 | if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) |
| 429 | free_persistent_gnts(&blkif->persistent_gnts, | 633 | free_persistent_gnts(blkif, &blkif->persistent_gnts, |
| 430 | blkif->persistent_gnt_c); | 634 | blkif->persistent_gnt_c); |
| 431 | 635 | ||
| 432 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); | 636 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); |
| @@ -441,148 +645,98 @@ int xen_blkif_schedule(void *arg) | |||
| 441 | return 0; | 645 | return 0; |
| 442 | } | 646 | } |
| 443 | 647 | ||
| 444 | struct seg_buf { | ||
| 445 | unsigned int offset; | ||
| 446 | unsigned int nsec; | ||
| 447 | }; | ||
| 448 | /* | 648 | /* |
| 449 | * Unmap the grant references, and also remove the M2P over-rides | 649 | * Unmap the grant references, and also remove the M2P over-rides |
| 450 | * used in the 'pending_req'. | 650 | * used in the 'pending_req'. |
| 451 | */ | 651 | */ |
| 452 | static void xen_blkbk_unmap(struct pending_req *req) | 652 | static void xen_blkbk_unmap(struct xen_blkif *blkif, |
| 653 | struct grant_page *pages[], | ||
| 654 | int num) | ||
| 453 | { | 655 | { |
| 454 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 656 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
| 455 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 657 | struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
| 456 | unsigned int i, invcount = 0; | 658 | unsigned int i, invcount = 0; |
| 457 | grant_handle_t handle; | ||
| 458 | int ret; | 659 | int ret; |
| 459 | 660 | ||
| 460 | for (i = 0; i < req->nr_pages; i++) { | 661 | for (i = 0; i < num; i++) { |
| 461 | if (!test_bit(i, req->unmap_seg)) | 662 | if (pages[i]->persistent_gnt != NULL) { |
| 663 | put_persistent_gnt(blkif, pages[i]->persistent_gnt); | ||
| 462 | continue; | 664 | continue; |
| 463 | handle = pending_handle(req, i); | 665 | } |
| 464 | if (handle == BLKBACK_INVALID_HANDLE) | 666 | if (pages[i]->handle == BLKBACK_INVALID_HANDLE) |
| 465 | continue; | 667 | continue; |
| 466 | gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), | 668 | unmap_pages[invcount] = pages[i]->page; |
| 467 | GNTMAP_host_map, handle); | 669 | gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page), |
| 468 | pending_handle(req, i) = BLKBACK_INVALID_HANDLE; | 670 | GNTMAP_host_map, pages[i]->handle); |
| 469 | pages[invcount] = virt_to_page(vaddr(req, i)); | 671 | pages[i]->handle = BLKBACK_INVALID_HANDLE; |
| 470 | invcount++; | 672 | if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
| 673 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, | ||
| 674 | invcount); | ||
| 675 | BUG_ON(ret); | ||
| 676 | put_free_pages(blkif, unmap_pages, invcount); | ||
| 677 | invcount = 0; | ||
| 678 | } | ||
| 679 | } | ||
| 680 | if (invcount) { | ||
| 681 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); | ||
| 682 | BUG_ON(ret); | ||
| 683 | put_free_pages(blkif, unmap_pages, invcount); | ||
| 471 | } | 684 | } |
| 472 | |||
| 473 | ret = gnttab_unmap_refs(unmap, NULL, pages, invcount); | ||
| 474 | BUG_ON(ret); | ||
| 475 | } | 685 | } |
| 476 | 686 | ||
| 477 | static int xen_blkbk_map(struct blkif_request *req, | 687 | static int xen_blkbk_map(struct xen_blkif *blkif, |
| 478 | struct pending_req *pending_req, | 688 | struct grant_page *pages[], |
| 479 | struct seg_buf seg[], | 689 | int num, bool ro) |
| 480 | struct page *pages[]) | ||
| 481 | { | 690 | { |
| 482 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 691 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
| 483 | struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
| 484 | struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 692 | struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
| 485 | struct persistent_gnt *persistent_gnt = NULL; | 693 | struct persistent_gnt *persistent_gnt = NULL; |
| 486 | struct xen_blkif *blkif = pending_req->blkif; | ||
| 487 | phys_addr_t addr = 0; | 694 | phys_addr_t addr = 0; |
| 488 | int i, j; | 695 | int i, seg_idx, new_map_idx; |
| 489 | bool new_map; | ||
| 490 | int nseg = req->u.rw.nr_segments; | ||
| 491 | int segs_to_map = 0; | 696 | int segs_to_map = 0; |
| 492 | int ret = 0; | 697 | int ret = 0; |
| 698 | int last_map = 0, map_until = 0; | ||
| 493 | int use_persistent_gnts; | 699 | int use_persistent_gnts; |
| 494 | 700 | ||
| 495 | use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); | 701 | use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); |
| 496 | 702 | ||
| 497 | BUG_ON(blkif->persistent_gnt_c > | ||
| 498 | max_mapped_grant_pages(pending_req->blkif->blk_protocol)); | ||
| 499 | |||
| 500 | /* | 703 | /* |
| 501 | * Fill out preq.nr_sects with proper amount of sectors, and setup | 704 | * Fill out preq.nr_sects with proper amount of sectors, and setup |
| 502 | * assign map[..] with the PFN of the page in our domain with the | 705 | * assign map[..] with the PFN of the page in our domain with the |
| 503 | * corresponding grant reference for each page. | 706 | * corresponding grant reference for each page. |
| 504 | */ | 707 | */ |
| 505 | for (i = 0; i < nseg; i++) { | 708 | again: |
| 709 | for (i = map_until; i < num; i++) { | ||
| 506 | uint32_t flags; | 710 | uint32_t flags; |
| 507 | 711 | ||
| 508 | if (use_persistent_gnts) | 712 | if (use_persistent_gnts) |
| 509 | persistent_gnt = get_persistent_gnt( | 713 | persistent_gnt = get_persistent_gnt( |
| 510 | &blkif->persistent_gnts, | 714 | blkif, |
| 511 | req->u.rw.seg[i].gref); | 715 | pages[i]->gref); |
| 512 | 716 | ||
| 513 | if (persistent_gnt) { | 717 | if (persistent_gnt) { |
| 514 | /* | 718 | /* |
| 515 | * We are using persistent grants and | 719 | * We are using persistent grants and |
| 516 | * the grant is already mapped | 720 | * the grant is already mapped |
| 517 | */ | 721 | */ |
| 518 | new_map = false; | 722 | pages[i]->page = persistent_gnt->page; |
| 519 | } else if (use_persistent_gnts && | 723 | pages[i]->persistent_gnt = persistent_gnt; |
| 520 | blkif->persistent_gnt_c < | ||
| 521 | max_mapped_grant_pages(blkif->blk_protocol)) { | ||
| 522 | /* | ||
| 523 | * We are using persistent grants, the grant is | ||
| 524 | * not mapped but we have room for it | ||
| 525 | */ | ||
| 526 | new_map = true; | ||
| 527 | persistent_gnt = kmalloc( | ||
| 528 | sizeof(struct persistent_gnt), | ||
| 529 | GFP_KERNEL); | ||
| 530 | if (!persistent_gnt) | ||
| 531 | return -ENOMEM; | ||
| 532 | if (alloc_xenballooned_pages(1, &persistent_gnt->page, | ||
| 533 | false)) { | ||
| 534 | kfree(persistent_gnt); | ||
| 535 | return -ENOMEM; | ||
| 536 | } | ||
| 537 | persistent_gnt->gnt = req->u.rw.seg[i].gref; | ||
| 538 | persistent_gnt->handle = BLKBACK_INVALID_HANDLE; | ||
| 539 | |||
| 540 | pages_to_gnt[segs_to_map] = | ||
| 541 | persistent_gnt->page; | ||
| 542 | addr = (unsigned long) pfn_to_kaddr( | ||
| 543 | page_to_pfn(persistent_gnt->page)); | ||
| 544 | |||
| 545 | add_persistent_gnt(&blkif->persistent_gnts, | ||
| 546 | persistent_gnt); | ||
| 547 | blkif->persistent_gnt_c++; | ||
| 548 | pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", | ||
| 549 | persistent_gnt->gnt, blkif->persistent_gnt_c, | ||
| 550 | max_mapped_grant_pages(blkif->blk_protocol)); | ||
| 551 | } else { | 724 | } else { |
| 552 | /* | 725 | if (get_free_page(blkif, &pages[i]->page)) |
| 553 | * We are either using persistent grants and | 726 | goto out_of_memory; |
| 554 | * hit the maximum limit of grants mapped, | 727 | addr = vaddr(pages[i]->page); |
| 555 | * or we are not using persistent grants. | 728 | pages_to_gnt[segs_to_map] = pages[i]->page; |
| 556 | */ | 729 | pages[i]->persistent_gnt = NULL; |
| 557 | if (use_persistent_gnts && | ||
| 558 | !blkif->vbd.overflow_max_grants) { | ||
| 559 | blkif->vbd.overflow_max_grants = 1; | ||
| 560 | pr_alert(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", | ||
| 561 | blkif->domid, blkif->vbd.handle); | ||
| 562 | } | ||
| 563 | new_map = true; | ||
| 564 | pages[i] = blkbk->pending_page(pending_req, i); | ||
| 565 | addr = vaddr(pending_req, i); | ||
| 566 | pages_to_gnt[segs_to_map] = | ||
| 567 | blkbk->pending_page(pending_req, i); | ||
| 568 | } | ||
| 569 | |||
| 570 | if (persistent_gnt) { | ||
| 571 | pages[i] = persistent_gnt->page; | ||
| 572 | persistent_gnts[i] = persistent_gnt; | ||
| 573 | } else { | ||
| 574 | persistent_gnts[i] = NULL; | ||
| 575 | } | ||
| 576 | |||
| 577 | if (new_map) { | ||
| 578 | flags = GNTMAP_host_map; | 730 | flags = GNTMAP_host_map; |
| 579 | if (!persistent_gnt && | 731 | if (!use_persistent_gnts && ro) |
| 580 | (pending_req->operation != BLKIF_OP_READ)) | ||
| 581 | flags |= GNTMAP_readonly; | 732 | flags |= GNTMAP_readonly; |
| 582 | gnttab_set_map_op(&map[segs_to_map++], addr, | 733 | gnttab_set_map_op(&map[segs_to_map++], addr, |
| 583 | flags, req->u.rw.seg[i].gref, | 734 | flags, pages[i]->gref, |
| 584 | blkif->domid); | 735 | blkif->domid); |
| 585 | } | 736 | } |
| 737 | map_until = i + 1; | ||
| 738 | if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST) | ||
| 739 | break; | ||
| 586 | } | 740 | } |
| 587 | 741 | ||
| 588 | if (segs_to_map) { | 742 | if (segs_to_map) { |
| @@ -595,49 +749,133 @@ static int xen_blkbk_map(struct blkif_request *req, | |||
| 595 | * so that when we access vaddr(pending_req,i) it has the contents of | 749 | * so that when we access vaddr(pending_req,i) it has the contents of |
| 596 | * the page from the other domain. | 750 | * the page from the other domain. |
| 597 | */ | 751 | */ |
| 598 | bitmap_zero(pending_req->unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST); | 752 | for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { |
| 599 | for (i = 0, j = 0; i < nseg; i++) { | 753 | if (!pages[seg_idx]->persistent_gnt) { |
| 600 | if (!persistent_gnts[i] || | ||
| 601 | persistent_gnts[i]->handle == BLKBACK_INVALID_HANDLE) { | ||
| 602 | /* This is a newly mapped grant */ | 754 | /* This is a newly mapped grant */ |
| 603 | BUG_ON(j >= segs_to_map); | 755 | BUG_ON(new_map_idx >= segs_to_map); |
| 604 | if (unlikely(map[j].status != 0)) { | 756 | if (unlikely(map[new_map_idx].status != 0)) { |
| 605 | pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); | 757 | pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); |
| 606 | map[j].handle = BLKBACK_INVALID_HANDLE; | 758 | pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; |
| 607 | ret |= 1; | 759 | ret |= 1; |
| 608 | if (persistent_gnts[i]) { | 760 | goto next; |
| 609 | rb_erase(&persistent_gnts[i]->node, | ||
| 610 | &blkif->persistent_gnts); | ||
| 611 | blkif->persistent_gnt_c--; | ||
| 612 | kfree(persistent_gnts[i]); | ||
| 613 | persistent_gnts[i] = NULL; | ||
| 614 | } | ||
| 615 | } | 761 | } |
| 762 | pages[seg_idx]->handle = map[new_map_idx].handle; | ||
| 763 | } else { | ||
| 764 | continue; | ||
| 616 | } | 765 | } |
| 617 | if (persistent_gnts[i]) { | 766 | if (use_persistent_gnts && |
| 618 | if (persistent_gnts[i]->handle == | 767 | blkif->persistent_gnt_c < xen_blkif_max_pgrants) { |
| 619 | BLKBACK_INVALID_HANDLE) { | 768 | /* |
| 769 | * We are using persistent grants, the grant is | ||
| 770 | * not mapped but we might have room for it. | ||
| 771 | */ | ||
| 772 | persistent_gnt = kmalloc(sizeof(struct persistent_gnt), | ||
| 773 | GFP_KERNEL); | ||
| 774 | if (!persistent_gnt) { | ||
| 620 | /* | 775 | /* |
| 621 | * If this is a new persistent grant | 776 | * If we don't have enough memory to |
| 622 | * save the handler | 777 | * allocate the persistent_gnt struct |
| 778 | * map this grant non-persistenly | ||
| 623 | */ | 779 | */ |
| 624 | persistent_gnts[i]->handle = map[j++].handle; | 780 | goto next; |
| 625 | } | 781 | } |
| 626 | pending_handle(pending_req, i) = | 782 | persistent_gnt->gnt = map[new_map_idx].ref; |
| 627 | persistent_gnts[i]->handle; | 783 | persistent_gnt->handle = map[new_map_idx].handle; |
| 784 | persistent_gnt->page = pages[seg_idx]->page; | ||
| 785 | if (add_persistent_gnt(blkif, | ||
| 786 | persistent_gnt)) { | ||
| 787 | kfree(persistent_gnt); | ||
| 788 | persistent_gnt = NULL; | ||
| 789 | goto next; | ||
| 790 | } | ||
| 791 | pages[seg_idx]->persistent_gnt = persistent_gnt; | ||
| 792 | pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", | ||
| 793 | persistent_gnt->gnt, blkif->persistent_gnt_c, | ||
| 794 | xen_blkif_max_pgrants); | ||
| 795 | goto next; | ||
| 796 | } | ||
| 797 | if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { | ||
| 798 | blkif->vbd.overflow_max_grants = 1; | ||
| 799 | pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", | ||
| 800 | blkif->domid, blkif->vbd.handle); | ||
| 801 | } | ||
| 802 | /* | ||
| 803 | * We could not map this grant persistently, so use it as | ||
| 804 | * a non-persistent grant. | ||
| 805 | */ | ||
| 806 | next: | ||
| 807 | new_map_idx++; | ||
| 808 | } | ||
| 809 | segs_to_map = 0; | ||
| 810 | last_map = map_until; | ||
| 811 | if (map_until != num) | ||
| 812 | goto again; | ||
| 628 | 813 | ||
| 629 | if (ret) | 814 | return ret; |
| 630 | continue; | 815 | |
| 631 | } else { | 816 | out_of_memory: |
| 632 | pending_handle(pending_req, i) = map[j++].handle; | 817 | pr_alert(DRV_PFX "%s: out of memory\n", __func__); |
| 633 | bitmap_set(pending_req->unmap_seg, i, 1); | 818 | put_free_pages(blkif, pages_to_gnt, segs_to_map); |
| 819 | return -ENOMEM; | ||
| 820 | } | ||
| 821 | |||
| 822 | static int xen_blkbk_map_seg(struct pending_req *pending_req) | ||
| 823 | { | ||
| 824 | int rc; | ||
| 825 | |||
| 826 | rc = xen_blkbk_map(pending_req->blkif, pending_req->segments, | ||
| 827 | pending_req->nr_pages, | ||
| 828 | (pending_req->operation != BLKIF_OP_READ)); | ||
| 829 | |||
| 830 | return rc; | ||
| 831 | } | ||
| 634 | 832 | ||
| 635 | if (ret) | 833 | static int xen_blkbk_parse_indirect(struct blkif_request *req, |
| 636 | continue; | 834 | struct pending_req *pending_req, |
| 835 | struct seg_buf seg[], | ||
| 836 | struct phys_req *preq) | ||
| 837 | { | ||
| 838 | struct grant_page **pages = pending_req->indirect_pages; | ||
| 839 | struct xen_blkif *blkif = pending_req->blkif; | ||
| 840 | int indirect_grefs, rc, n, nseg, i; | ||
| 841 | struct blkif_request_segment_aligned *segments = NULL; | ||
| 842 | |||
| 843 | nseg = pending_req->nr_pages; | ||
| 844 | indirect_grefs = INDIRECT_PAGES(nseg); | ||
| 845 | BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); | ||
| 846 | |||
| 847 | for (i = 0; i < indirect_grefs; i++) | ||
| 848 | pages[i]->gref = req->u.indirect.indirect_grefs[i]; | ||
| 849 | |||
| 850 | rc = xen_blkbk_map(blkif, pages, indirect_grefs, true); | ||
| 851 | if (rc) | ||
| 852 | goto unmap; | ||
| 853 | |||
| 854 | for (n = 0, i = 0; n < nseg; n++) { | ||
| 855 | if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { | ||
| 856 | /* Map indirect segments */ | ||
| 857 | if (segments) | ||
| 858 | kunmap_atomic(segments); | ||
| 859 | segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); | ||
| 860 | } | ||
| 861 | i = n % SEGS_PER_INDIRECT_FRAME; | ||
| 862 | pending_req->segments[n]->gref = segments[i].gref; | ||
| 863 | seg[n].nsec = segments[i].last_sect - | ||
| 864 | segments[i].first_sect + 1; | ||
| 865 | seg[n].offset = (segments[i].first_sect << 9); | ||
| 866 | if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) || | ||
| 867 | (segments[i].last_sect < segments[i].first_sect)) { | ||
| 868 | rc = -EINVAL; | ||
| 869 | goto unmap; | ||
| 637 | } | 870 | } |
| 638 | seg[i].offset = (req->u.rw.seg[i].first_sect << 9); | 871 | preq->nr_sects += seg[n].nsec; |
| 639 | } | 872 | } |
| 640 | return ret; | 873 | |
| 874 | unmap: | ||
| 875 | if (segments) | ||
| 876 | kunmap_atomic(segments); | ||
| 877 | xen_blkbk_unmap(blkif, pages, indirect_grefs); | ||
| 878 | return rc; | ||
| 641 | } | 879 | } |
| 642 | 880 | ||
| 643 | static int dispatch_discard_io(struct xen_blkif *blkif, | 881 | static int dispatch_discard_io(struct xen_blkif *blkif, |
| @@ -647,7 +885,18 @@ static int dispatch_discard_io(struct xen_blkif *blkif, | |||
| 647 | int status = BLKIF_RSP_OKAY; | 885 | int status = BLKIF_RSP_OKAY; |
| 648 | struct block_device *bdev = blkif->vbd.bdev; | 886 | struct block_device *bdev = blkif->vbd.bdev; |
| 649 | unsigned long secure; | 887 | unsigned long secure; |
| 888 | struct phys_req preq; | ||
| 889 | |||
| 890 | preq.sector_number = req->u.discard.sector_number; | ||
| 891 | preq.nr_sects = req->u.discard.nr_sectors; | ||
| 650 | 892 | ||
| 893 | err = xen_vbd_translate(&preq, blkif, WRITE); | ||
| 894 | if (err) { | ||
| 895 | pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n", | ||
| 896 | preq.sector_number, | ||
| 897 | preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); | ||
| 898 | goto fail_response; | ||
| 899 | } | ||
| 651 | blkif->st_ds_req++; | 900 | blkif->st_ds_req++; |
| 652 | 901 | ||
| 653 | xen_blkif_get(blkif); | 902 | xen_blkif_get(blkif); |
| @@ -658,7 +907,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif, | |||
| 658 | err = blkdev_issue_discard(bdev, req->u.discard.sector_number, | 907 | err = blkdev_issue_discard(bdev, req->u.discard.sector_number, |
| 659 | req->u.discard.nr_sectors, | 908 | req->u.discard.nr_sectors, |
| 660 | GFP_KERNEL, secure); | 909 | GFP_KERNEL, secure); |
| 661 | 910 | fail_response: | |
| 662 | if (err == -EOPNOTSUPP) { | 911 | if (err == -EOPNOTSUPP) { |
| 663 | pr_debug(DRV_PFX "discard op failed, not supported\n"); | 912 | pr_debug(DRV_PFX "discard op failed, not supported\n"); |
| 664 | status = BLKIF_RSP_EOPNOTSUPP; | 913 | status = BLKIF_RSP_EOPNOTSUPP; |
| @@ -674,7 +923,7 @@ static int dispatch_other_io(struct xen_blkif *blkif, | |||
| 674 | struct blkif_request *req, | 923 | struct blkif_request *req, |
| 675 | struct pending_req *pending_req) | 924 | struct pending_req *pending_req) |
| 676 | { | 925 | { |
| 677 | free_req(pending_req); | 926 | free_req(blkif, pending_req); |
| 678 | make_response(blkif, req->u.other.id, req->operation, | 927 | make_response(blkif, req->u.other.id, req->operation, |
| 679 | BLKIF_RSP_EOPNOTSUPP); | 928 | BLKIF_RSP_EOPNOTSUPP); |
| 680 | return -EIO; | 929 | return -EIO; |
| @@ -726,7 +975,9 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
| 726 | * the proper response on the ring. | 975 | * the proper response on the ring. |
| 727 | */ | 976 | */ |
| 728 | if (atomic_dec_and_test(&pending_req->pendcnt)) { | 977 | if (atomic_dec_and_test(&pending_req->pendcnt)) { |
| 729 | xen_blkbk_unmap(pending_req); | 978 | xen_blkbk_unmap(pending_req->blkif, |
| 979 | pending_req->segments, | ||
| 980 | pending_req->nr_pages); | ||
| 730 | make_response(pending_req->blkif, pending_req->id, | 981 | make_response(pending_req->blkif, pending_req->id, |
| 731 | pending_req->operation, pending_req->status); | 982 | pending_req->operation, pending_req->status); |
| 732 | xen_blkif_put(pending_req->blkif); | 983 | xen_blkif_put(pending_req->blkif); |
| @@ -734,7 +985,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
| 734 | if (atomic_read(&pending_req->blkif->drain)) | 985 | if (atomic_read(&pending_req->blkif->drain)) |
| 735 | complete(&pending_req->blkif->drain_complete); | 986 | complete(&pending_req->blkif->drain_complete); |
| 736 | } | 987 | } |
| 737 | free_req(pending_req); | 988 | free_req(pending_req->blkif, pending_req); |
| 738 | } | 989 | } |
| 739 | } | 990 | } |
| 740 | 991 | ||
| @@ -767,6 +1018,12 @@ __do_block_io_op(struct xen_blkif *blkif) | |||
| 767 | rp = blk_rings->common.sring->req_prod; | 1018 | rp = blk_rings->common.sring->req_prod; |
| 768 | rmb(); /* Ensure we see queued requests up to 'rp'. */ | 1019 | rmb(); /* Ensure we see queued requests up to 'rp'. */ |
| 769 | 1020 | ||
| 1021 | if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) { | ||
| 1022 | rc = blk_rings->common.rsp_prod_pvt; | ||
| 1023 | pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n", | ||
| 1024 | rp, rc, rp - rc, blkif->vbd.pdevice); | ||
| 1025 | return -EACCES; | ||
| 1026 | } | ||
| 770 | while (rc != rp) { | 1027 | while (rc != rp) { |
| 771 | 1028 | ||
| 772 | if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) | 1029 | if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) |
| @@ -777,7 +1034,7 @@ __do_block_io_op(struct xen_blkif *blkif) | |||
| 777 | break; | 1034 | break; |
| 778 | } | 1035 | } |
| 779 | 1036 | ||
| 780 | pending_req = alloc_req(); | 1037 | pending_req = alloc_req(blkif); |
| 781 | if (NULL == pending_req) { | 1038 | if (NULL == pending_req) { |
| 782 | blkif->st_oo_req++; | 1039 | blkif->st_oo_req++; |
| 783 | more_to_do = 1; | 1040 | more_to_do = 1; |
| @@ -807,11 +1064,12 @@ __do_block_io_op(struct xen_blkif *blkif) | |||
| 807 | case BLKIF_OP_WRITE: | 1064 | case BLKIF_OP_WRITE: |
| 808 | case BLKIF_OP_WRITE_BARRIER: | 1065 | case BLKIF_OP_WRITE_BARRIER: |
| 809 | case BLKIF_OP_FLUSH_DISKCACHE: | 1066 | case BLKIF_OP_FLUSH_DISKCACHE: |
| 1067 | case BLKIF_OP_INDIRECT: | ||
| 810 | if (dispatch_rw_block_io(blkif, &req, pending_req)) | 1068 | if (dispatch_rw_block_io(blkif, &req, pending_req)) |
| 811 | goto done; | 1069 | goto done; |
| 812 | break; | 1070 | break; |
| 813 | case BLKIF_OP_DISCARD: | 1071 | case BLKIF_OP_DISCARD: |
| 814 | free_req(pending_req); | 1072 | free_req(blkif, pending_req); |
| 815 | if (dispatch_discard_io(blkif, &req)) | 1073 | if (dispatch_discard_io(blkif, &req)) |
| 816 | goto done; | 1074 | goto done; |
| 817 | break; | 1075 | break; |
| @@ -853,17 +1111,28 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 853 | struct pending_req *pending_req) | 1111 | struct pending_req *pending_req) |
| 854 | { | 1112 | { |
| 855 | struct phys_req preq; | 1113 | struct phys_req preq; |
| 856 | struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 1114 | struct seg_buf *seg = pending_req->seg; |
| 857 | unsigned int nseg; | 1115 | unsigned int nseg; |
| 858 | struct bio *bio = NULL; | 1116 | struct bio *bio = NULL; |
| 859 | struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 1117 | struct bio **biolist = pending_req->biolist; |
| 860 | int i, nbio = 0; | 1118 | int i, nbio = 0; |
| 861 | int operation; | 1119 | int operation; |
| 862 | struct blk_plug plug; | 1120 | struct blk_plug plug; |
| 863 | bool drain = false; | 1121 | bool drain = false; |
| 864 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 1122 | struct grant_page **pages = pending_req->segments; |
| 1123 | unsigned short req_operation; | ||
| 1124 | |||
| 1125 | req_operation = req->operation == BLKIF_OP_INDIRECT ? | ||
| 1126 | req->u.indirect.indirect_op : req->operation; | ||
| 1127 | if ((req->operation == BLKIF_OP_INDIRECT) && | ||
| 1128 | (req_operation != BLKIF_OP_READ) && | ||
| 1129 | (req_operation != BLKIF_OP_WRITE)) { | ||
| 1130 | pr_debug(DRV_PFX "Invalid indirect operation (%u)\n", | ||
| 1131 | req_operation); | ||
| 1132 | goto fail_response; | ||
| 1133 | } | ||
| 865 | 1134 | ||
| 866 | switch (req->operation) { | 1135 | switch (req_operation) { |
| 867 | case BLKIF_OP_READ: | 1136 | case BLKIF_OP_READ: |
| 868 | blkif->st_rd_req++; | 1137 | blkif->st_rd_req++; |
| 869 | operation = READ; | 1138 | operation = READ; |
| @@ -885,33 +1154,47 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 885 | } | 1154 | } |
| 886 | 1155 | ||
| 887 | /* Check that the number of segments is sane. */ | 1156 | /* Check that the number of segments is sane. */ |
| 888 | nseg = req->u.rw.nr_segments; | 1157 | nseg = req->operation == BLKIF_OP_INDIRECT ? |
| 1158 | req->u.indirect.nr_segments : req->u.rw.nr_segments; | ||
| 889 | 1159 | ||
| 890 | if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || | 1160 | if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || |
| 891 | unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { | 1161 | unlikely((req->operation != BLKIF_OP_INDIRECT) && |
| 1162 | (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || | ||
| 1163 | unlikely((req->operation == BLKIF_OP_INDIRECT) && | ||
| 1164 | (nseg > MAX_INDIRECT_SEGMENTS))) { | ||
| 892 | pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", | 1165 | pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", |
| 893 | nseg); | 1166 | nseg); |
| 894 | /* Haven't submitted any bio's yet. */ | 1167 | /* Haven't submitted any bio's yet. */ |
| 895 | goto fail_response; | 1168 | goto fail_response; |
| 896 | } | 1169 | } |
| 897 | 1170 | ||
| 898 | preq.sector_number = req->u.rw.sector_number; | ||
| 899 | preq.nr_sects = 0; | 1171 | preq.nr_sects = 0; |
| 900 | 1172 | ||
| 901 | pending_req->blkif = blkif; | 1173 | pending_req->blkif = blkif; |
| 902 | pending_req->id = req->u.rw.id; | 1174 | pending_req->id = req->u.rw.id; |
| 903 | pending_req->operation = req->operation; | 1175 | pending_req->operation = req_operation; |
| 904 | pending_req->status = BLKIF_RSP_OKAY; | 1176 | pending_req->status = BLKIF_RSP_OKAY; |
| 905 | pending_req->nr_pages = nseg; | 1177 | pending_req->nr_pages = nseg; |
| 906 | 1178 | ||
| 907 | for (i = 0; i < nseg; i++) { | 1179 | if (req->operation != BLKIF_OP_INDIRECT) { |
| 908 | seg[i].nsec = req->u.rw.seg[i].last_sect - | 1180 | preq.dev = req->u.rw.handle; |
| 909 | req->u.rw.seg[i].first_sect + 1; | 1181 | preq.sector_number = req->u.rw.sector_number; |
| 910 | if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || | 1182 | for (i = 0; i < nseg; i++) { |
| 911 | (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) | 1183 | pages[i]->gref = req->u.rw.seg[i].gref; |
| 1184 | seg[i].nsec = req->u.rw.seg[i].last_sect - | ||
| 1185 | req->u.rw.seg[i].first_sect + 1; | ||
| 1186 | seg[i].offset = (req->u.rw.seg[i].first_sect << 9); | ||
| 1187 | if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || | ||
| 1188 | (req->u.rw.seg[i].last_sect < | ||
| 1189 | req->u.rw.seg[i].first_sect)) | ||
| 1190 | goto fail_response; | ||
| 1191 | preq.nr_sects += seg[i].nsec; | ||
| 1192 | } | ||
| 1193 | } else { | ||
| 1194 | preq.dev = req->u.indirect.handle; | ||
| 1195 | preq.sector_number = req->u.indirect.sector_number; | ||
| 1196 | if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq)) | ||
| 912 | goto fail_response; | 1197 | goto fail_response; |
| 913 | preq.nr_sects += seg[i].nsec; | ||
| 914 | |||
| 915 | } | 1198 | } |
| 916 | 1199 | ||
| 917 | if (xen_vbd_translate(&preq, blkif, operation) != 0) { | 1200 | if (xen_vbd_translate(&preq, blkif, operation) != 0) { |
| @@ -948,7 +1231,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 948 | * the hypercall to unmap the grants - that is all done in | 1231 | * the hypercall to unmap the grants - that is all done in |
| 949 | * xen_blkbk_unmap. | 1232 | * xen_blkbk_unmap. |
| 950 | */ | 1233 | */ |
| 951 | if (xen_blkbk_map(req, pending_req, seg, pages)) | 1234 | if (xen_blkbk_map_seg(pending_req)) |
| 952 | goto fail_flush; | 1235 | goto fail_flush; |
| 953 | 1236 | ||
| 954 | /* | 1237 | /* |
| @@ -960,11 +1243,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 960 | for (i = 0; i < nseg; i++) { | 1243 | for (i = 0; i < nseg; i++) { |
| 961 | while ((bio == NULL) || | 1244 | while ((bio == NULL) || |
| 962 | (bio_add_page(bio, | 1245 | (bio_add_page(bio, |
| 963 | pages[i], | 1246 | pages[i]->page, |
| 964 | seg[i].nsec << 9, | 1247 | seg[i].nsec << 9, |
| 965 | seg[i].offset) == 0)) { | 1248 | seg[i].offset) == 0)) { |
| 966 | 1249 | ||
| 967 | bio = bio_alloc(GFP_KERNEL, nseg-i); | 1250 | int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES); |
| 1251 | bio = bio_alloc(GFP_KERNEL, nr_iovecs); | ||
| 968 | if (unlikely(bio == NULL)) | 1252 | if (unlikely(bio == NULL)) |
| 969 | goto fail_put_bio; | 1253 | goto fail_put_bio; |
| 970 | 1254 | ||
| @@ -1009,11 +1293,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 1009 | return 0; | 1293 | return 0; |
| 1010 | 1294 | ||
| 1011 | fail_flush: | 1295 | fail_flush: |
| 1012 | xen_blkbk_unmap(pending_req); | 1296 | xen_blkbk_unmap(blkif, pending_req->segments, |
| 1297 | pending_req->nr_pages); | ||
| 1013 | fail_response: | 1298 | fail_response: |
| 1014 | /* Haven't submitted any bio's yet. */ | 1299 | /* Haven't submitted any bio's yet. */ |
| 1015 | make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR); | 1300 | make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR); |
| 1016 | free_req(pending_req); | 1301 | free_req(blkif, pending_req); |
| 1017 | msleep(1); /* back off a bit */ | 1302 | msleep(1); /* back off a bit */ |
| 1018 | return -EIO; | 1303 | return -EIO; |
| 1019 | 1304 | ||
| @@ -1070,73 +1355,20 @@ static void make_response(struct xen_blkif *blkif, u64 id, | |||
| 1070 | 1355 | ||
| 1071 | static int __init xen_blkif_init(void) | 1356 | static int __init xen_blkif_init(void) |
| 1072 | { | 1357 | { |
| 1073 | int i, mmap_pages; | ||
| 1074 | int rc = 0; | 1358 | int rc = 0; |
| 1075 | 1359 | ||
| 1076 | if (!xen_domain()) | 1360 | if (!xen_domain()) |
| 1077 | return -ENODEV; | 1361 | return -ENODEV; |
| 1078 | 1362 | ||
| 1079 | blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL); | ||
| 1080 | if (!blkbk) { | ||
| 1081 | pr_alert(DRV_PFX "%s: out of memory!\n", __func__); | ||
| 1082 | return -ENOMEM; | ||
| 1083 | } | ||
| 1084 | |||
| 1085 | mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; | ||
| 1086 | |||
| 1087 | blkbk->pending_reqs = kzalloc(sizeof(blkbk->pending_reqs[0]) * | ||
| 1088 | xen_blkif_reqs, GFP_KERNEL); | ||
| 1089 | blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) * | ||
| 1090 | mmap_pages, GFP_KERNEL); | ||
| 1091 | blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) * | ||
| 1092 | mmap_pages, GFP_KERNEL); | ||
| 1093 | |||
| 1094 | if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || | ||
| 1095 | !blkbk->pending_pages) { | ||
| 1096 | rc = -ENOMEM; | ||
| 1097 | goto out_of_memory; | ||
| 1098 | } | ||
| 1099 | |||
| 1100 | for (i = 0; i < mmap_pages; i++) { | ||
| 1101 | blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE; | ||
| 1102 | blkbk->pending_pages[i] = alloc_page(GFP_KERNEL); | ||
| 1103 | if (blkbk->pending_pages[i] == NULL) { | ||
| 1104 | rc = -ENOMEM; | ||
| 1105 | goto out_of_memory; | ||
| 1106 | } | ||
| 1107 | } | ||
| 1108 | rc = xen_blkif_interface_init(); | 1363 | rc = xen_blkif_interface_init(); |
| 1109 | if (rc) | 1364 | if (rc) |
| 1110 | goto failed_init; | 1365 | goto failed_init; |
| 1111 | 1366 | ||
| 1112 | INIT_LIST_HEAD(&blkbk->pending_free); | ||
| 1113 | spin_lock_init(&blkbk->pending_free_lock); | ||
| 1114 | init_waitqueue_head(&blkbk->pending_free_wq); | ||
| 1115 | |||
| 1116 | for (i = 0; i < xen_blkif_reqs; i++) | ||
| 1117 | list_add_tail(&blkbk->pending_reqs[i].free_list, | ||
| 1118 | &blkbk->pending_free); | ||
| 1119 | |||
| 1120 | rc = xen_blkif_xenbus_init(); | 1367 | rc = xen_blkif_xenbus_init(); |
| 1121 | if (rc) | 1368 | if (rc) |
| 1122 | goto failed_init; | 1369 | goto failed_init; |
| 1123 | 1370 | ||
| 1124 | return 0; | ||
| 1125 | |||
| 1126 | out_of_memory: | ||
| 1127 | pr_alert(DRV_PFX "%s: out of memory\n", __func__); | ||
| 1128 | failed_init: | 1371 | failed_init: |
| 1129 | kfree(blkbk->pending_reqs); | ||
| 1130 | kfree(blkbk->pending_grant_handles); | ||
| 1131 | if (blkbk->pending_pages) { | ||
| 1132 | for (i = 0; i < mmap_pages; i++) { | ||
| 1133 | if (blkbk->pending_pages[i]) | ||
| 1134 | __free_page(blkbk->pending_pages[i]); | ||
| 1135 | } | ||
| 1136 | kfree(blkbk->pending_pages); | ||
| 1137 | } | ||
| 1138 | kfree(blkbk); | ||
| 1139 | blkbk = NULL; | ||
| 1140 | return rc; | 1372 | return rc; |
| 1141 | } | 1373 | } |
| 1142 | 1374 | ||
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 60103e2517ba..8d8807563d99 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
| @@ -50,6 +50,19 @@ | |||
| 50 | __func__, __LINE__, ##args) | 50 | __func__, __LINE__, ##args) |
| 51 | 51 | ||
| 52 | 52 | ||
| 53 | /* | ||
| 54 | * This is the maximum number of segments that would be allowed in indirect | ||
| 55 | * requests. This value will also be passed to the frontend. | ||
| 56 | */ | ||
| 57 | #define MAX_INDIRECT_SEGMENTS 256 | ||
| 58 | |||
| 59 | #define SEGS_PER_INDIRECT_FRAME \ | ||
| 60 | (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) | ||
| 61 | #define MAX_INDIRECT_PAGES \ | ||
| 62 | ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) | ||
| 63 | #define INDIRECT_PAGES(_segs) \ | ||
| 64 | ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) | ||
| 65 | |||
| 53 | /* Not a real protocol. Used to generate ring structs which contain | 66 | /* Not a real protocol. Used to generate ring structs which contain |
| 54 | * the elements common to all protocols only. This way we get a | 67 | * the elements common to all protocols only. This way we get a |
| 55 | * compiler-checkable way to use common struct elements, so we can | 68 | * compiler-checkable way to use common struct elements, so we can |
| @@ -83,12 +96,31 @@ struct blkif_x86_32_request_other { | |||
| 83 | uint64_t id; /* private guest value, echoed in resp */ | 96 | uint64_t id; /* private guest value, echoed in resp */ |
| 84 | } __attribute__((__packed__)); | 97 | } __attribute__((__packed__)); |
| 85 | 98 | ||
| 99 | struct blkif_x86_32_request_indirect { | ||
| 100 | uint8_t indirect_op; | ||
| 101 | uint16_t nr_segments; | ||
| 102 | uint64_t id; | ||
| 103 | blkif_sector_t sector_number; | ||
| 104 | blkif_vdev_t handle; | ||
| 105 | uint16_t _pad1; | ||
| 106 | grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; | ||
| 107 | /* | ||
| 108 | * The maximum number of indirect segments (and pages) that will | ||
| 109 | * be used is determined by MAX_INDIRECT_SEGMENTS, this value | ||
| 110 | * is also exported to the guest (via xenstore | ||
| 111 | * feature-max-indirect-segments entry), so the frontend knows how | ||
| 112 | * many indirect segments the backend supports. | ||
| 113 | */ | ||
| 114 | uint64_t _pad2; /* make it 64 byte aligned */ | ||
| 115 | } __attribute__((__packed__)); | ||
| 116 | |||
| 86 | struct blkif_x86_32_request { | 117 | struct blkif_x86_32_request { |
| 87 | uint8_t operation; /* BLKIF_OP_??? */ | 118 | uint8_t operation; /* BLKIF_OP_??? */ |
| 88 | union { | 119 | union { |
| 89 | struct blkif_x86_32_request_rw rw; | 120 | struct blkif_x86_32_request_rw rw; |
| 90 | struct blkif_x86_32_request_discard discard; | 121 | struct blkif_x86_32_request_discard discard; |
| 91 | struct blkif_x86_32_request_other other; | 122 | struct blkif_x86_32_request_other other; |
| 123 | struct blkif_x86_32_request_indirect indirect; | ||
| 92 | } u; | 124 | } u; |
| 93 | } __attribute__((__packed__)); | 125 | } __attribute__((__packed__)); |
| 94 | 126 | ||
| @@ -127,12 +159,32 @@ struct blkif_x86_64_request_other { | |||
| 127 | uint64_t id; /* private guest value, echoed in resp */ | 159 | uint64_t id; /* private guest value, echoed in resp */ |
| 128 | } __attribute__((__packed__)); | 160 | } __attribute__((__packed__)); |
| 129 | 161 | ||
| 162 | struct blkif_x86_64_request_indirect { | ||
| 163 | uint8_t indirect_op; | ||
| 164 | uint16_t nr_segments; | ||
| 165 | uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */ | ||
| 166 | uint64_t id; | ||
| 167 | blkif_sector_t sector_number; | ||
| 168 | blkif_vdev_t handle; | ||
| 169 | uint16_t _pad2; | ||
| 170 | grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; | ||
| 171 | /* | ||
| 172 | * The maximum number of indirect segments (and pages) that will | ||
| 173 | * be used is determined by MAX_INDIRECT_SEGMENTS, this value | ||
| 174 | * is also exported to the guest (via xenstore | ||
| 175 | * feature-max-indirect-segments entry), so the frontend knows how | ||
| 176 | * many indirect segments the backend supports. | ||
| 177 | */ | ||
| 178 | uint32_t _pad3; /* make it 64 byte aligned */ | ||
| 179 | } __attribute__((__packed__)); | ||
| 180 | |||
| 130 | struct blkif_x86_64_request { | 181 | struct blkif_x86_64_request { |
| 131 | uint8_t operation; /* BLKIF_OP_??? */ | 182 | uint8_t operation; /* BLKIF_OP_??? */ |
| 132 | union { | 183 | union { |
| 133 | struct blkif_x86_64_request_rw rw; | 184 | struct blkif_x86_64_request_rw rw; |
| 134 | struct blkif_x86_64_request_discard discard; | 185 | struct blkif_x86_64_request_discard discard; |
| 135 | struct blkif_x86_64_request_other other; | 186 | struct blkif_x86_64_request_other other; |
| 187 | struct blkif_x86_64_request_indirect indirect; | ||
| 136 | } u; | 188 | } u; |
| 137 | } __attribute__((__packed__)); | 189 | } __attribute__((__packed__)); |
| 138 | 190 | ||
| @@ -182,12 +234,26 @@ struct xen_vbd { | |||
| 182 | 234 | ||
| 183 | struct backend_info; | 235 | struct backend_info; |
| 184 | 236 | ||
| 237 | /* Number of available flags */ | ||
| 238 | #define PERSISTENT_GNT_FLAGS_SIZE 2 | ||
| 239 | /* This persistent grant is currently in use */ | ||
| 240 | #define PERSISTENT_GNT_ACTIVE 0 | ||
| 241 | /* | ||
| 242 | * This persistent grant has been used, this flag is set when we remove the | ||
| 243 | * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently. | ||
| 244 | */ | ||
| 245 | #define PERSISTENT_GNT_WAS_ACTIVE 1 | ||
| 246 | |||
| 247 | /* Number of requests that we can fit in a ring */ | ||
| 248 | #define XEN_BLKIF_REQS 32 | ||
| 185 | 249 | ||
| 186 | struct persistent_gnt { | 250 | struct persistent_gnt { |
| 187 | struct page *page; | 251 | struct page *page; |
| 188 | grant_ref_t gnt; | 252 | grant_ref_t gnt; |
| 189 | grant_handle_t handle; | 253 | grant_handle_t handle; |
| 254 | DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); | ||
| 190 | struct rb_node node; | 255 | struct rb_node node; |
| 256 | struct list_head remove_node; | ||
| 191 | }; | 257 | }; |
| 192 | 258 | ||
| 193 | struct xen_blkif { | 259 | struct xen_blkif { |
| @@ -219,6 +285,23 @@ struct xen_blkif { | |||
| 219 | /* tree to store persistent grants */ | 285 | /* tree to store persistent grants */ |
| 220 | struct rb_root persistent_gnts; | 286 | struct rb_root persistent_gnts; |
| 221 | unsigned int persistent_gnt_c; | 287 | unsigned int persistent_gnt_c; |
| 288 | atomic_t persistent_gnt_in_use; | ||
| 289 | unsigned long next_lru; | ||
| 290 | |||
| 291 | /* used by the kworker that offload work from the persistent purge */ | ||
| 292 | struct list_head persistent_purge_list; | ||
| 293 | struct work_struct persistent_purge_work; | ||
| 294 | |||
| 295 | /* buffer of free pages to map grant refs */ | ||
| 296 | spinlock_t free_pages_lock; | ||
| 297 | int free_pages_num; | ||
| 298 | struct list_head free_pages; | ||
| 299 | |||
| 300 | /* List of all 'pending_req' available */ | ||
| 301 | struct list_head pending_free; | ||
| 302 | /* And its spinlock. */ | ||
| 303 | spinlock_t pending_free_lock; | ||
| 304 | wait_queue_head_t pending_free_wq; | ||
| 222 | 305 | ||
| 223 | /* statistics */ | 306 | /* statistics */ |
| 224 | unsigned long st_print; | 307 | unsigned long st_print; |
| @@ -231,6 +314,41 @@ struct xen_blkif { | |||
| 231 | unsigned long long st_wr_sect; | 314 | unsigned long long st_wr_sect; |
| 232 | 315 | ||
| 233 | wait_queue_head_t waiting_to_free; | 316 | wait_queue_head_t waiting_to_free; |
| 317 | /* Thread shutdown wait queue. */ | ||
| 318 | wait_queue_head_t shutdown_wq; | ||
| 319 | }; | ||
| 320 | |||
| 321 | struct seg_buf { | ||
| 322 | unsigned long offset; | ||
| 323 | unsigned int nsec; | ||
| 324 | }; | ||
| 325 | |||
| 326 | struct grant_page { | ||
| 327 | struct page *page; | ||
| 328 | struct persistent_gnt *persistent_gnt; | ||
| 329 | grant_handle_t handle; | ||
| 330 | grant_ref_t gref; | ||
| 331 | }; | ||
| 332 | |||
| 333 | /* | ||
| 334 | * Each outstanding request that we've passed to the lower device layers has a | ||
| 335 | * 'pending_req' allocated to it. Each buffer_head that completes decrements | ||
| 336 | * the pendcnt towards zero. When it hits zero, the specified domain has a | ||
| 337 | * response queued for it, with the saved 'id' passed back. | ||
| 338 | */ | ||
| 339 | struct pending_req { | ||
| 340 | struct xen_blkif *blkif; | ||
| 341 | u64 id; | ||
| 342 | int nr_pages; | ||
| 343 | atomic_t pendcnt; | ||
| 344 | unsigned short operation; | ||
| 345 | int status; | ||
| 346 | struct list_head free_list; | ||
| 347 | struct grant_page *segments[MAX_INDIRECT_SEGMENTS]; | ||
| 348 | /* Indirect descriptors */ | ||
| 349 | struct grant_page *indirect_pages[MAX_INDIRECT_PAGES]; | ||
| 350 | struct seg_buf seg[MAX_INDIRECT_SEGMENTS]; | ||
| 351 | struct bio *biolist[MAX_INDIRECT_SEGMENTS]; | ||
| 234 | }; | 352 | }; |
| 235 | 353 | ||
| 236 | 354 | ||
| @@ -257,6 +375,7 @@ int xen_blkif_xenbus_init(void); | |||
| 257 | 375 | ||
| 258 | irqreturn_t xen_blkif_be_int(int irq, void *dev_id); | 376 | irqreturn_t xen_blkif_be_int(int irq, void *dev_id); |
| 259 | int xen_blkif_schedule(void *arg); | 377 | int xen_blkif_schedule(void *arg); |
| 378 | int xen_blkif_purge_persistent(void *arg); | ||
| 260 | 379 | ||
| 261 | int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, | 380 | int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, |
| 262 | struct backend_info *be, int state); | 381 | struct backend_info *be, int state); |
| @@ -268,7 +387,7 @@ struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); | |||
| 268 | static inline void blkif_get_x86_32_req(struct blkif_request *dst, | 387 | static inline void blkif_get_x86_32_req(struct blkif_request *dst, |
| 269 | struct blkif_x86_32_request *src) | 388 | struct blkif_x86_32_request *src) |
| 270 | { | 389 | { |
| 271 | int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; | 390 | int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; |
| 272 | dst->operation = src->operation; | 391 | dst->operation = src->operation; |
| 273 | switch (src->operation) { | 392 | switch (src->operation) { |
| 274 | case BLKIF_OP_READ: | 393 | case BLKIF_OP_READ: |
| @@ -291,6 +410,18 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, | |||
| 291 | dst->u.discard.sector_number = src->u.discard.sector_number; | 410 | dst->u.discard.sector_number = src->u.discard.sector_number; |
| 292 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; | 411 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; |
| 293 | break; | 412 | break; |
| 413 | case BLKIF_OP_INDIRECT: | ||
| 414 | dst->u.indirect.indirect_op = src->u.indirect.indirect_op; | ||
| 415 | dst->u.indirect.nr_segments = src->u.indirect.nr_segments; | ||
| 416 | dst->u.indirect.handle = src->u.indirect.handle; | ||
| 417 | dst->u.indirect.id = src->u.indirect.id; | ||
| 418 | dst->u.indirect.sector_number = src->u.indirect.sector_number; | ||
| 419 | barrier(); | ||
| 420 | j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments)); | ||
| 421 | for (i = 0; i < j; i++) | ||
| 422 | dst->u.indirect.indirect_grefs[i] = | ||
| 423 | src->u.indirect.indirect_grefs[i]; | ||
| 424 | break; | ||
| 294 | default: | 425 | default: |
| 295 | /* | 426 | /* |
| 296 | * Don't know how to translate this op. Only get the | 427 | * Don't know how to translate this op. Only get the |
| @@ -304,7 +435,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, | |||
| 304 | static inline void blkif_get_x86_64_req(struct blkif_request *dst, | 435 | static inline void blkif_get_x86_64_req(struct blkif_request *dst, |
| 305 | struct blkif_x86_64_request *src) | 436 | struct blkif_x86_64_request *src) |
| 306 | { | 437 | { |
| 307 | int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; | 438 | int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; |
| 308 | dst->operation = src->operation; | 439 | dst->operation = src->operation; |
| 309 | switch (src->operation) { | 440 | switch (src->operation) { |
| 310 | case BLKIF_OP_READ: | 441 | case BLKIF_OP_READ: |
| @@ -327,6 +458,18 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, | |||
| 327 | dst->u.discard.sector_number = src->u.discard.sector_number; | 458 | dst->u.discard.sector_number = src->u.discard.sector_number; |
| 328 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; | 459 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; |
| 329 | break; | 460 | break; |
| 461 | case BLKIF_OP_INDIRECT: | ||
| 462 | dst->u.indirect.indirect_op = src->u.indirect.indirect_op; | ||
| 463 | dst->u.indirect.nr_segments = src->u.indirect.nr_segments; | ||
| 464 | dst->u.indirect.handle = src->u.indirect.handle; | ||
| 465 | dst->u.indirect.id = src->u.indirect.id; | ||
| 466 | dst->u.indirect.sector_number = src->u.indirect.sector_number; | ||
| 467 | barrier(); | ||
| 468 | j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments)); | ||
| 469 | for (i = 0; i < j; i++) | ||
| 470 | dst->u.indirect.indirect_grefs[i] = | ||
| 471 | src->u.indirect.indirect_grefs[i]; | ||
| 472 | break; | ||
| 330 | default: | 473 | default: |
| 331 | /* | 474 | /* |
| 332 | * Don't know how to translate this op. Only get the | 475 | * Don't know how to translate this op. Only get the |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 04608a6502d7..fe5c3cd10c34 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
| @@ -98,12 +98,17 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) | |||
| 98 | err = PTR_ERR(blkif->xenblkd); | 98 | err = PTR_ERR(blkif->xenblkd); |
| 99 | blkif->xenblkd = NULL; | 99 | blkif->xenblkd = NULL; |
| 100 | xenbus_dev_error(blkif->be->dev, err, "start xenblkd"); | 100 | xenbus_dev_error(blkif->be->dev, err, "start xenblkd"); |
| 101 | return; | ||
| 101 | } | 102 | } |
| 102 | } | 103 | } |
| 103 | 104 | ||
| 104 | static struct xen_blkif *xen_blkif_alloc(domid_t domid) | 105 | static struct xen_blkif *xen_blkif_alloc(domid_t domid) |
| 105 | { | 106 | { |
| 106 | struct xen_blkif *blkif; | 107 | struct xen_blkif *blkif; |
| 108 | struct pending_req *req, *n; | ||
| 109 | int i, j; | ||
| 110 | |||
| 111 | BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); | ||
| 107 | 112 | ||
| 108 | blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL); | 113 | blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL); |
| 109 | if (!blkif) | 114 | if (!blkif) |
| @@ -118,8 +123,57 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) | |||
| 118 | blkif->st_print = jiffies; | 123 | blkif->st_print = jiffies; |
| 119 | init_waitqueue_head(&blkif->waiting_to_free); | 124 | init_waitqueue_head(&blkif->waiting_to_free); |
| 120 | blkif->persistent_gnts.rb_node = NULL; | 125 | blkif->persistent_gnts.rb_node = NULL; |
| 126 | spin_lock_init(&blkif->free_pages_lock); | ||
| 127 | INIT_LIST_HEAD(&blkif->free_pages); | ||
| 128 | blkif->free_pages_num = 0; | ||
| 129 | atomic_set(&blkif->persistent_gnt_in_use, 0); | ||
| 130 | |||
| 131 | INIT_LIST_HEAD(&blkif->pending_free); | ||
| 132 | |||
| 133 | for (i = 0; i < XEN_BLKIF_REQS; i++) { | ||
| 134 | req = kzalloc(sizeof(*req), GFP_KERNEL); | ||
| 135 | if (!req) | ||
| 136 | goto fail; | ||
| 137 | list_add_tail(&req->free_list, | ||
| 138 | &blkif->pending_free); | ||
| 139 | for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) { | ||
| 140 | req->segments[j] = kzalloc(sizeof(*req->segments[0]), | ||
| 141 | GFP_KERNEL); | ||
| 142 | if (!req->segments[j]) | ||
| 143 | goto fail; | ||
| 144 | } | ||
| 145 | for (j = 0; j < MAX_INDIRECT_PAGES; j++) { | ||
| 146 | req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]), | ||
| 147 | GFP_KERNEL); | ||
| 148 | if (!req->indirect_pages[j]) | ||
| 149 | goto fail; | ||
| 150 | } | ||
| 151 | } | ||
| 152 | spin_lock_init(&blkif->pending_free_lock); | ||
| 153 | init_waitqueue_head(&blkif->pending_free_wq); | ||
| 154 | init_waitqueue_head(&blkif->shutdown_wq); | ||
| 121 | 155 | ||
| 122 | return blkif; | 156 | return blkif; |
| 157 | |||
| 158 | fail: | ||
| 159 | list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { | ||
| 160 | list_del(&req->free_list); | ||
| 161 | for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) { | ||
| 162 | if (!req->segments[j]) | ||
| 163 | break; | ||
| 164 | kfree(req->segments[j]); | ||
| 165 | } | ||
| 166 | for (j = 0; j < MAX_INDIRECT_PAGES; j++) { | ||
| 167 | if (!req->indirect_pages[j]) | ||
| 168 | break; | ||
| 169 | kfree(req->indirect_pages[j]); | ||
| 170 | } | ||
| 171 | kfree(req); | ||
| 172 | } | ||
| 173 | |||
| 174 | kmem_cache_free(xen_blkif_cachep, blkif); | ||
| 175 | |||
| 176 | return ERR_PTR(-ENOMEM); | ||
| 123 | } | 177 | } |
| 124 | 178 | ||
| 125 | static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, | 179 | static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, |
| @@ -178,6 +232,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif) | |||
| 178 | { | 232 | { |
| 179 | if (blkif->xenblkd) { | 233 | if (blkif->xenblkd) { |
| 180 | kthread_stop(blkif->xenblkd); | 234 | kthread_stop(blkif->xenblkd); |
| 235 | wake_up(&blkif->shutdown_wq); | ||
| 181 | blkif->xenblkd = NULL; | 236 | blkif->xenblkd = NULL; |
| 182 | } | 237 | } |
| 183 | 238 | ||
| @@ -198,8 +253,28 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif) | |||
| 198 | 253 | ||
| 199 | static void xen_blkif_free(struct xen_blkif *blkif) | 254 | static void xen_blkif_free(struct xen_blkif *blkif) |
| 200 | { | 255 | { |
| 256 | struct pending_req *req, *n; | ||
| 257 | int i = 0, j; | ||
| 258 | |||
| 201 | if (!atomic_dec_and_test(&blkif->refcnt)) | 259 | if (!atomic_dec_and_test(&blkif->refcnt)) |
| 202 | BUG(); | 260 | BUG(); |
| 261 | |||
| 262 | /* Check that there is no request in use */ | ||
| 263 | list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { | ||
| 264 | list_del(&req->free_list); | ||
| 265 | |||
| 266 | for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) | ||
| 267 | kfree(req->segments[j]); | ||
| 268 | |||
| 269 | for (j = 0; j < MAX_INDIRECT_PAGES; j++) | ||
| 270 | kfree(req->indirect_pages[j]); | ||
| 271 | |||
| 272 | kfree(req); | ||
| 273 | i++; | ||
| 274 | } | ||
| 275 | |||
| 276 | WARN_ON(i != XEN_BLKIF_REQS); | ||
| 277 | |||
| 203 | kmem_cache_free(xen_blkif_cachep, blkif); | 278 | kmem_cache_free(xen_blkif_cachep, blkif); |
| 204 | } | 279 | } |
| 205 | 280 | ||
| @@ -678,6 +753,11 @@ again: | |||
| 678 | dev->nodename); | 753 | dev->nodename); |
| 679 | goto abort; | 754 | goto abort; |
| 680 | } | 755 | } |
| 756 | err = xenbus_printf(xbt, dev->nodename, "feature-max-indirect-segments", "%u", | ||
| 757 | MAX_INDIRECT_SEGMENTS); | ||
| 758 | if (err) | ||
| 759 | dev_warn(&dev->dev, "writing %s/feature-max-indirect-segments (%d)", | ||
| 760 | dev->nodename, err); | ||
| 681 | 761 | ||
| 682 | err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", | 762 | err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", |
| 683 | (unsigned long long)vbd_sz(&be->blkif->vbd)); | 763 | (unsigned long long)vbd_sz(&be->blkif->vbd)); |
| @@ -704,6 +784,11 @@ again: | |||
| 704 | dev->nodename); | 784 | dev->nodename); |
| 705 | goto abort; | 785 | goto abort; |
| 706 | } | 786 | } |
| 787 | err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u", | ||
| 788 | bdev_physical_block_size(be->blkif->vbd.bdev)); | ||
| 789 | if (err) | ||
| 790 | xenbus_dev_error(dev, err, "writing %s/physical-sector-size", | ||
| 791 | dev->nodename); | ||
| 707 | 792 | ||
| 708 | err = xenbus_transaction_end(xbt, 0); | 793 | err = xenbus_transaction_end(xbt, 0); |
| 709 | if (err == -EAGAIN) | 794 | if (err == -EAGAIN) |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index d89ef86220f4..a4660bbee8a6 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -74,12 +74,30 @@ struct grant { | |||
| 74 | struct blk_shadow { | 74 | struct blk_shadow { |
| 75 | struct blkif_request req; | 75 | struct blkif_request req; |
| 76 | struct request *request; | 76 | struct request *request; |
| 77 | struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 77 | struct grant **grants_used; |
| 78 | struct grant **indirect_grants; | ||
| 79 | struct scatterlist *sg; | ||
| 80 | }; | ||
| 81 | |||
| 82 | struct split_bio { | ||
| 83 | struct bio *bio; | ||
| 84 | atomic_t pending; | ||
| 85 | int err; | ||
| 78 | }; | 86 | }; |
| 79 | 87 | ||
| 80 | static DEFINE_MUTEX(blkfront_mutex); | 88 | static DEFINE_MUTEX(blkfront_mutex); |
| 81 | static const struct block_device_operations xlvbd_block_fops; | 89 | static const struct block_device_operations xlvbd_block_fops; |
| 82 | 90 | ||
| 91 | /* | ||
| 92 | * Maximum number of segments in indirect requests, the actual value used by | ||
| 93 | * the frontend driver is the minimum of this value and the value provided | ||
| 94 | * by the backend driver. | ||
| 95 | */ | ||
| 96 | |||
| 97 | static unsigned int xen_blkif_max_segments = 32; | ||
| 98 | module_param_named(max, xen_blkif_max_segments, int, S_IRUGO); | ||
| 99 | MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)"); | ||
| 100 | |||
| 83 | #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) | 101 | #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) |
| 84 | 102 | ||
| 85 | /* | 103 | /* |
| @@ -98,7 +116,6 @@ struct blkfront_info | |||
| 98 | enum blkif_state connected; | 116 | enum blkif_state connected; |
| 99 | int ring_ref; | 117 | int ring_ref; |
| 100 | struct blkif_front_ring ring; | 118 | struct blkif_front_ring ring; |
| 101 | struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
| 102 | unsigned int evtchn, irq; | 119 | unsigned int evtchn, irq; |
| 103 | struct request_queue *rq; | 120 | struct request_queue *rq; |
| 104 | struct work_struct work; | 121 | struct work_struct work; |
| @@ -114,6 +131,7 @@ struct blkfront_info | |||
| 114 | unsigned int discard_granularity; | 131 | unsigned int discard_granularity; |
| 115 | unsigned int discard_alignment; | 132 | unsigned int discard_alignment; |
| 116 | unsigned int feature_persistent:1; | 133 | unsigned int feature_persistent:1; |
| 134 | unsigned int max_indirect_segments; | ||
| 117 | int is_ready; | 135 | int is_ready; |
| 118 | }; | 136 | }; |
| 119 | 137 | ||
| @@ -142,6 +160,13 @@ static DEFINE_SPINLOCK(minor_lock); | |||
| 142 | 160 | ||
| 143 | #define DEV_NAME "xvd" /* name in /dev */ | 161 | #define DEV_NAME "xvd" /* name in /dev */ |
| 144 | 162 | ||
| 163 | #define SEGS_PER_INDIRECT_FRAME \ | ||
| 164 | (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) | ||
| 165 | #define INDIRECT_GREFS(_segs) \ | ||
| 166 | ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) | ||
| 167 | |||
| 168 | static int blkfront_setup_indirect(struct blkfront_info *info); | ||
| 169 | |||
| 145 | static int get_id_from_freelist(struct blkfront_info *info) | 170 | static int get_id_from_freelist(struct blkfront_info *info) |
| 146 | { | 171 | { |
| 147 | unsigned long free = info->shadow_free; | 172 | unsigned long free = info->shadow_free; |
| @@ -358,7 +383,8 @@ static int blkif_queue_request(struct request *req) | |||
| 358 | struct blkif_request *ring_req; | 383 | struct blkif_request *ring_req; |
| 359 | unsigned long id; | 384 | unsigned long id; |
| 360 | unsigned int fsect, lsect; | 385 | unsigned int fsect, lsect; |
| 361 | int i, ref; | 386 | int i, ref, n; |
| 387 | struct blkif_request_segment_aligned *segments = NULL; | ||
| 362 | 388 | ||
| 363 | /* | 389 | /* |
| 364 | * Used to store if we are able to queue the request by just using | 390 | * Used to store if we are able to queue the request by just using |
| @@ -369,21 +395,27 @@ static int blkif_queue_request(struct request *req) | |||
| 369 | grant_ref_t gref_head; | 395 | grant_ref_t gref_head; |
| 370 | struct grant *gnt_list_entry = NULL; | 396 | struct grant *gnt_list_entry = NULL; |
| 371 | struct scatterlist *sg; | 397 | struct scatterlist *sg; |
| 398 | int nseg, max_grefs; | ||
| 372 | 399 | ||
| 373 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) | 400 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) |
| 374 | return 1; | 401 | return 1; |
| 375 | 402 | ||
| 376 | /* Check if we have enought grants to allocate a requests */ | 403 | max_grefs = info->max_indirect_segments ? |
| 377 | if (info->persistent_gnts_c < BLKIF_MAX_SEGMENTS_PER_REQUEST) { | 404 | info->max_indirect_segments + |
| 405 | INDIRECT_GREFS(info->max_indirect_segments) : | ||
| 406 | BLKIF_MAX_SEGMENTS_PER_REQUEST; | ||
| 407 | |||
| 408 | /* Check if we have enough grants to allocate a requests */ | ||
| 409 | if (info->persistent_gnts_c < max_grefs) { | ||
| 378 | new_persistent_gnts = 1; | 410 | new_persistent_gnts = 1; |
| 379 | if (gnttab_alloc_grant_references( | 411 | if (gnttab_alloc_grant_references( |
| 380 | BLKIF_MAX_SEGMENTS_PER_REQUEST - info->persistent_gnts_c, | 412 | max_grefs - info->persistent_gnts_c, |
| 381 | &gref_head) < 0) { | 413 | &gref_head) < 0) { |
| 382 | gnttab_request_free_callback( | 414 | gnttab_request_free_callback( |
| 383 | &info->callback, | 415 | &info->callback, |
| 384 | blkif_restart_queue_callback, | 416 | blkif_restart_queue_callback, |
| 385 | info, | 417 | info, |
| 386 | BLKIF_MAX_SEGMENTS_PER_REQUEST); | 418 | max_grefs); |
| 387 | return 1; | 419 | return 1; |
| 388 | } | 420 | } |
| 389 | } else | 421 | } else |
| @@ -394,42 +426,67 @@ static int blkif_queue_request(struct request *req) | |||
| 394 | id = get_id_from_freelist(info); | 426 | id = get_id_from_freelist(info); |
| 395 | info->shadow[id].request = req; | 427 | info->shadow[id].request = req; |
| 396 | 428 | ||
| 397 | ring_req->u.rw.id = id; | ||
| 398 | ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); | ||
| 399 | ring_req->u.rw.handle = info->handle; | ||
| 400 | |||
| 401 | ring_req->operation = rq_data_dir(req) ? | ||
| 402 | BLKIF_OP_WRITE : BLKIF_OP_READ; | ||
| 403 | |||
| 404 | if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { | ||
| 405 | /* | ||
| 406 | * Ideally we can do an unordered flush-to-disk. In case the | ||
| 407 | * backend onlysupports barriers, use that. A barrier request | ||
| 408 | * a superset of FUA, so we can implement it the same | ||
| 409 | * way. (It's also a FLUSH+FUA, since it is | ||
| 410 | * guaranteed ordered WRT previous writes.) | ||
| 411 | */ | ||
| 412 | ring_req->operation = info->flush_op; | ||
| 413 | } | ||
| 414 | |||
| 415 | if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { | 429 | if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { |
| 416 | /* id, sector_number and handle are set above. */ | ||
| 417 | ring_req->operation = BLKIF_OP_DISCARD; | 430 | ring_req->operation = BLKIF_OP_DISCARD; |
| 418 | ring_req->u.discard.nr_sectors = blk_rq_sectors(req); | 431 | ring_req->u.discard.nr_sectors = blk_rq_sectors(req); |
| 432 | ring_req->u.discard.id = id; | ||
| 433 | ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req); | ||
| 419 | if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) | 434 | if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) |
| 420 | ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; | 435 | ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; |
| 421 | else | 436 | else |
| 422 | ring_req->u.discard.flag = 0; | 437 | ring_req->u.discard.flag = 0; |
| 423 | } else { | 438 | } else { |
| 424 | ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req, | 439 | BUG_ON(info->max_indirect_segments == 0 && |
| 425 | info->sg); | 440 | req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); |
| 426 | BUG_ON(ring_req->u.rw.nr_segments > | 441 | BUG_ON(info->max_indirect_segments && |
| 427 | BLKIF_MAX_SEGMENTS_PER_REQUEST); | 442 | req->nr_phys_segments > info->max_indirect_segments); |
| 428 | 443 | nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); | |
| 429 | for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) { | 444 | ring_req->u.rw.id = id; |
| 445 | if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { | ||
| 446 | /* | ||
| 447 | * The indirect operation can only be a BLKIF_OP_READ or | ||
| 448 | * BLKIF_OP_WRITE | ||
| 449 | */ | ||
| 450 | BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); | ||
| 451 | ring_req->operation = BLKIF_OP_INDIRECT; | ||
| 452 | ring_req->u.indirect.indirect_op = rq_data_dir(req) ? | ||
| 453 | BLKIF_OP_WRITE : BLKIF_OP_READ; | ||
| 454 | ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); | ||
| 455 | ring_req->u.indirect.handle = info->handle; | ||
| 456 | ring_req->u.indirect.nr_segments = nseg; | ||
| 457 | } else { | ||
| 458 | ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); | ||
| 459 | ring_req->u.rw.handle = info->handle; | ||
| 460 | ring_req->operation = rq_data_dir(req) ? | ||
| 461 | BLKIF_OP_WRITE : BLKIF_OP_READ; | ||
| 462 | if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { | ||
| 463 | /* | ||
| 464 | * Ideally we can do an unordered flush-to-disk. In case the | ||
| 465 | * backend onlysupports barriers, use that. A barrier request | ||
| 466 | * a superset of FUA, so we can implement it the same | ||
| 467 | * way. (It's also a FLUSH+FUA, since it is | ||
| 468 | * guaranteed ordered WRT previous writes.) | ||
| 469 | */ | ||
| 470 | ring_req->operation = info->flush_op; | ||
| 471 | } | ||
| 472 | ring_req->u.rw.nr_segments = nseg; | ||
| 473 | } | ||
| 474 | for_each_sg(info->shadow[id].sg, sg, nseg, i) { | ||
| 430 | fsect = sg->offset >> 9; | 475 | fsect = sg->offset >> 9; |
| 431 | lsect = fsect + (sg->length >> 9) - 1; | 476 | lsect = fsect + (sg->length >> 9) - 1; |
| 432 | 477 | ||
| 478 | if ((ring_req->operation == BLKIF_OP_INDIRECT) && | ||
| 479 | (i % SEGS_PER_INDIRECT_FRAME == 0)) { | ||
| 480 | if (segments) | ||
| 481 | kunmap_atomic(segments); | ||
| 482 | |||
| 483 | n = i / SEGS_PER_INDIRECT_FRAME; | ||
| 484 | gnt_list_entry = get_grant(&gref_head, info); | ||
| 485 | info->shadow[id].indirect_grants[n] = gnt_list_entry; | ||
| 486 | segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); | ||
| 487 | ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; | ||
| 488 | } | ||
| 489 | |||
| 433 | gnt_list_entry = get_grant(&gref_head, info); | 490 | gnt_list_entry = get_grant(&gref_head, info); |
| 434 | ref = gnt_list_entry->gref; | 491 | ref = gnt_list_entry->gref; |
| 435 | 492 | ||
| @@ -441,8 +498,7 @@ static int blkif_queue_request(struct request *req) | |||
| 441 | 498 | ||
| 442 | BUG_ON(sg->offset + sg->length > PAGE_SIZE); | 499 | BUG_ON(sg->offset + sg->length > PAGE_SIZE); |
| 443 | 500 | ||
| 444 | shared_data = kmap_atomic( | 501 | shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); |
| 445 | pfn_to_page(gnt_list_entry->pfn)); | ||
| 446 | bvec_data = kmap_atomic(sg_page(sg)); | 502 | bvec_data = kmap_atomic(sg_page(sg)); |
| 447 | 503 | ||
| 448 | /* | 504 | /* |
| @@ -461,13 +517,23 @@ static int blkif_queue_request(struct request *req) | |||
| 461 | kunmap_atomic(bvec_data); | 517 | kunmap_atomic(bvec_data); |
| 462 | kunmap_atomic(shared_data); | 518 | kunmap_atomic(shared_data); |
| 463 | } | 519 | } |
| 464 | 520 | if (ring_req->operation != BLKIF_OP_INDIRECT) { | |
| 465 | ring_req->u.rw.seg[i] = | 521 | ring_req->u.rw.seg[i] = |
| 466 | (struct blkif_request_segment) { | 522 | (struct blkif_request_segment) { |
| 467 | .gref = ref, | 523 | .gref = ref, |
| 468 | .first_sect = fsect, | 524 | .first_sect = fsect, |
| 469 | .last_sect = lsect }; | 525 | .last_sect = lsect }; |
| 526 | } else { | ||
| 527 | n = i % SEGS_PER_INDIRECT_FRAME; | ||
| 528 | segments[n] = | ||
| 529 | (struct blkif_request_segment_aligned) { | ||
| 530 | .gref = ref, | ||
| 531 | .first_sect = fsect, | ||
| 532 | .last_sect = lsect }; | ||
| 533 | } | ||
| 470 | } | 534 | } |
| 535 | if (segments) | ||
| 536 | kunmap_atomic(segments); | ||
| 471 | } | 537 | } |
| 472 | 538 | ||
| 473 | info->ring.req_prod_pvt++; | 539 | info->ring.req_prod_pvt++; |
| @@ -542,7 +608,9 @@ wait: | |||
| 542 | flush_requests(info); | 608 | flush_requests(info); |
| 543 | } | 609 | } |
| 544 | 610 | ||
| 545 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) | 611 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, |
| 612 | unsigned int physical_sector_size, | ||
| 613 | unsigned int segments) | ||
| 546 | { | 614 | { |
| 547 | struct request_queue *rq; | 615 | struct request_queue *rq; |
| 548 | struct blkfront_info *info = gd->private_data; | 616 | struct blkfront_info *info = gd->private_data; |
| @@ -564,14 +632,15 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) | |||
| 564 | 632 | ||
| 565 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | 633 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ |
| 566 | blk_queue_logical_block_size(rq, sector_size); | 634 | blk_queue_logical_block_size(rq, sector_size); |
| 567 | blk_queue_max_hw_sectors(rq, 512); | 635 | blk_queue_physical_block_size(rq, physical_sector_size); |
| 636 | blk_queue_max_hw_sectors(rq, (segments * PAGE_SIZE) / 512); | ||
| 568 | 637 | ||
| 569 | /* Each segment in a request is up to an aligned page in size. */ | 638 | /* Each segment in a request is up to an aligned page in size. */ |
| 570 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | 639 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); |
| 571 | blk_queue_max_segment_size(rq, PAGE_SIZE); | 640 | blk_queue_max_segment_size(rq, PAGE_SIZE); |
| 572 | 641 | ||
| 573 | /* Ensure a merged request will fit in a single I/O ring slot. */ | 642 | /* Ensure a merged request will fit in a single I/O ring slot. */ |
| 574 | blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); | 643 | blk_queue_max_segments(rq, segments); |
| 575 | 644 | ||
| 576 | /* Make sure buffer addresses are sector-aligned. */ | 645 | /* Make sure buffer addresses are sector-aligned. */ |
| 577 | blk_queue_dma_alignment(rq, 511); | 646 | blk_queue_dma_alignment(rq, 511); |
| @@ -588,13 +657,16 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) | |||
| 588 | static void xlvbd_flush(struct blkfront_info *info) | 657 | static void xlvbd_flush(struct blkfront_info *info) |
| 589 | { | 658 | { |
| 590 | blk_queue_flush(info->rq, info->feature_flush); | 659 | blk_queue_flush(info->rq, info->feature_flush); |
| 591 | printk(KERN_INFO "blkfront: %s: %s: %s %s\n", | 660 | printk(KERN_INFO "blkfront: %s: %s: %s %s %s %s %s\n", |
| 592 | info->gd->disk_name, | 661 | info->gd->disk_name, |
| 593 | info->flush_op == BLKIF_OP_WRITE_BARRIER ? | 662 | info->flush_op == BLKIF_OP_WRITE_BARRIER ? |
| 594 | "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? | 663 | "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? |
| 595 | "flush diskcache" : "barrier or flush"), | 664 | "flush diskcache" : "barrier or flush"), |
| 596 | info->feature_flush ? "enabled" : "disabled", | 665 | info->feature_flush ? "enabled;" : "disabled;", |
| 597 | info->feature_persistent ? "using persistent grants" : ""); | 666 | "persistent grants:", |
| 667 | info->feature_persistent ? "enabled;" : "disabled;", | ||
| 668 | "indirect descriptors:", | ||
| 669 | info->max_indirect_segments ? "enabled;" : "disabled;"); | ||
| 598 | } | 670 | } |
| 599 | 671 | ||
| 600 | static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) | 672 | static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) |
| @@ -667,7 +739,8 @@ static char *encode_disk_name(char *ptr, unsigned int n) | |||
| 667 | 739 | ||
| 668 | static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | 740 | static int xlvbd_alloc_gendisk(blkif_sector_t capacity, |
| 669 | struct blkfront_info *info, | 741 | struct blkfront_info *info, |
| 670 | u16 vdisk_info, u16 sector_size) | 742 | u16 vdisk_info, u16 sector_size, |
| 743 | unsigned int physical_sector_size) | ||
| 671 | { | 744 | { |
| 672 | struct gendisk *gd; | 745 | struct gendisk *gd; |
| 673 | int nr_minors = 1; | 746 | int nr_minors = 1; |
| @@ -734,7 +807,9 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | |||
| 734 | gd->driverfs_dev = &(info->xbdev->dev); | 807 | gd->driverfs_dev = &(info->xbdev->dev); |
| 735 | set_capacity(gd, capacity); | 808 | set_capacity(gd, capacity); |
| 736 | 809 | ||
| 737 | if (xlvbd_init_blk_queue(gd, sector_size)) { | 810 | if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size, |
| 811 | info->max_indirect_segments ? : | ||
| 812 | BLKIF_MAX_SEGMENTS_PER_REQUEST)) { | ||
| 738 | del_gendisk(gd); | 813 | del_gendisk(gd); |
| 739 | goto release; | 814 | goto release; |
| 740 | } | 815 | } |
| @@ -818,6 +893,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
| 818 | { | 893 | { |
| 819 | struct grant *persistent_gnt; | 894 | struct grant *persistent_gnt; |
| 820 | struct grant *n; | 895 | struct grant *n; |
| 896 | int i, j, segs; | ||
| 821 | 897 | ||
| 822 | /* Prevent new requests being issued until we fix things up. */ | 898 | /* Prevent new requests being issued until we fix things up. */ |
| 823 | spin_lock_irq(&info->io_lock); | 899 | spin_lock_irq(&info->io_lock); |
| @@ -843,6 +919,47 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
| 843 | } | 919 | } |
| 844 | BUG_ON(info->persistent_gnts_c != 0); | 920 | BUG_ON(info->persistent_gnts_c != 0); |
| 845 | 921 | ||
| 922 | for (i = 0; i < BLK_RING_SIZE; i++) { | ||
| 923 | /* | ||
| 924 | * Clear persistent grants present in requests already | ||
| 925 | * on the shared ring | ||
| 926 | */ | ||
| 927 | if (!info->shadow[i].request) | ||
| 928 | goto free_shadow; | ||
| 929 | |||
| 930 | segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? | ||
| 931 | info->shadow[i].req.u.indirect.nr_segments : | ||
| 932 | info->shadow[i].req.u.rw.nr_segments; | ||
| 933 | for (j = 0; j < segs; j++) { | ||
| 934 | persistent_gnt = info->shadow[i].grants_used[j]; | ||
| 935 | gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); | ||
| 936 | __free_page(pfn_to_page(persistent_gnt->pfn)); | ||
| 937 | kfree(persistent_gnt); | ||
| 938 | } | ||
| 939 | |||
| 940 | if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT) | ||
| 941 | /* | ||
| 942 | * If this is not an indirect operation don't try to | ||
| 943 | * free indirect segments | ||
| 944 | */ | ||
| 945 | goto free_shadow; | ||
| 946 | |||
| 947 | for (j = 0; j < INDIRECT_GREFS(segs); j++) { | ||
| 948 | persistent_gnt = info->shadow[i].indirect_grants[j]; | ||
| 949 | gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); | ||
| 950 | __free_page(pfn_to_page(persistent_gnt->pfn)); | ||
| 951 | kfree(persistent_gnt); | ||
| 952 | } | ||
| 953 | |||
| 954 | free_shadow: | ||
| 955 | kfree(info->shadow[i].grants_used); | ||
| 956 | info->shadow[i].grants_used = NULL; | ||
| 957 | kfree(info->shadow[i].indirect_grants); | ||
| 958 | info->shadow[i].indirect_grants = NULL; | ||
| 959 | kfree(info->shadow[i].sg); | ||
| 960 | info->shadow[i].sg = NULL; | ||
| 961 | } | ||
| 962 | |||
| 846 | /* No more gnttab callback work. */ | 963 | /* No more gnttab callback work. */ |
| 847 | gnttab_cancel_free_callback(&info->callback); | 964 | gnttab_cancel_free_callback(&info->callback); |
| 848 | spin_unlock_irq(&info->io_lock); | 965 | spin_unlock_irq(&info->io_lock); |
| @@ -867,12 +984,13 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, | |||
| 867 | struct blkif_response *bret) | 984 | struct blkif_response *bret) |
| 868 | { | 985 | { |
| 869 | int i = 0; | 986 | int i = 0; |
| 870 | struct bio_vec *bvec; | 987 | struct scatterlist *sg; |
| 871 | struct req_iterator iter; | ||
| 872 | unsigned long flags; | ||
| 873 | char *bvec_data; | 988 | char *bvec_data; |
| 874 | void *shared_data; | 989 | void *shared_data; |
| 875 | unsigned int offset = 0; | 990 | int nseg; |
| 991 | |||
| 992 | nseg = s->req.operation == BLKIF_OP_INDIRECT ? | ||
| 993 | s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; | ||
| 876 | 994 | ||
| 877 | if (bret->operation == BLKIF_OP_READ) { | 995 | if (bret->operation == BLKIF_OP_READ) { |
| 878 | /* | 996 | /* |
| @@ -881,26 +999,29 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, | |||
| 881 | * than PAGE_SIZE, we have to keep track of the current offset, | 999 | * than PAGE_SIZE, we have to keep track of the current offset, |
| 882 | * to be sure we are copying the data from the right shared page. | 1000 | * to be sure we are copying the data from the right shared page. |
| 883 | */ | 1001 | */ |
| 884 | rq_for_each_segment(bvec, s->request, iter) { | 1002 | for_each_sg(s->sg, sg, nseg, i) { |
| 885 | BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); | 1003 | BUG_ON(sg->offset + sg->length > PAGE_SIZE); |
| 886 | if (bvec->bv_offset < offset) | ||
| 887 | i++; | ||
| 888 | BUG_ON(i >= s->req.u.rw.nr_segments); | ||
| 889 | shared_data = kmap_atomic( | 1004 | shared_data = kmap_atomic( |
| 890 | pfn_to_page(s->grants_used[i]->pfn)); | 1005 | pfn_to_page(s->grants_used[i]->pfn)); |
| 891 | bvec_data = bvec_kmap_irq(bvec, &flags); | 1006 | bvec_data = kmap_atomic(sg_page(sg)); |
| 892 | memcpy(bvec_data, shared_data + bvec->bv_offset, | 1007 | memcpy(bvec_data + sg->offset, |
| 893 | bvec->bv_len); | 1008 | shared_data + sg->offset, |
| 894 | bvec_kunmap_irq(bvec_data, &flags); | 1009 | sg->length); |
| 1010 | kunmap_atomic(bvec_data); | ||
| 895 | kunmap_atomic(shared_data); | 1011 | kunmap_atomic(shared_data); |
| 896 | offset = bvec->bv_offset + bvec->bv_len; | ||
| 897 | } | 1012 | } |
| 898 | } | 1013 | } |
| 899 | /* Add the persistent grant into the list of free grants */ | 1014 | /* Add the persistent grant into the list of free grants */ |
| 900 | for (i = 0; i < s->req.u.rw.nr_segments; i++) { | 1015 | for (i = 0; i < nseg; i++) { |
| 901 | list_add(&s->grants_used[i]->node, &info->persistent_gnts); | 1016 | list_add(&s->grants_used[i]->node, &info->persistent_gnts); |
| 902 | info->persistent_gnts_c++; | 1017 | info->persistent_gnts_c++; |
| 903 | } | 1018 | } |
| 1019 | if (s->req.operation == BLKIF_OP_INDIRECT) { | ||
| 1020 | for (i = 0; i < INDIRECT_GREFS(nseg); i++) { | ||
| 1021 | list_add(&s->indirect_grants[i]->node, &info->persistent_gnts); | ||
| 1022 | info->persistent_gnts_c++; | ||
| 1023 | } | ||
| 1024 | } | ||
| 904 | } | 1025 | } |
| 905 | 1026 | ||
| 906 | static irqreturn_t blkif_interrupt(int irq, void *dev_id) | 1027 | static irqreturn_t blkif_interrupt(int irq, void *dev_id) |
| @@ -1034,14 +1155,6 @@ static int setup_blkring(struct xenbus_device *dev, | |||
| 1034 | SHARED_RING_INIT(sring); | 1155 | SHARED_RING_INIT(sring); |
| 1035 | FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); | 1156 | FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); |
| 1036 | 1157 | ||
| 1037 | sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
| 1038 | |||
| 1039 | /* Allocate memory for grants */ | ||
| 1040 | err = fill_grant_buffer(info, BLK_RING_SIZE * | ||
| 1041 | BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
| 1042 | if (err) | ||
| 1043 | goto fail; | ||
| 1044 | |||
| 1045 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); | 1158 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); |
| 1046 | if (err < 0) { | 1159 | if (err < 0) { |
| 1047 | free_page((unsigned long)sring); | 1160 | free_page((unsigned long)sring); |
| @@ -1223,13 +1336,84 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
| 1223 | return 0; | 1336 | return 0; |
| 1224 | } | 1337 | } |
| 1225 | 1338 | ||
| 1339 | /* | ||
| 1340 | * This is a clone of md_trim_bio, used to split a bio into smaller ones | ||
| 1341 | */ | ||
| 1342 | static void trim_bio(struct bio *bio, int offset, int size) | ||
| 1343 | { | ||
| 1344 | /* 'bio' is a cloned bio which we need to trim to match | ||
| 1345 | * the given offset and size. | ||
| 1346 | * This requires adjusting bi_sector, bi_size, and bi_io_vec | ||
| 1347 | */ | ||
| 1348 | int i; | ||
| 1349 | struct bio_vec *bvec; | ||
| 1350 | int sofar = 0; | ||
| 1351 | |||
| 1352 | size <<= 9; | ||
| 1353 | if (offset == 0 && size == bio->bi_size) | ||
| 1354 | return; | ||
| 1355 | |||
| 1356 | bio->bi_sector += offset; | ||
| 1357 | bio->bi_size = size; | ||
| 1358 | offset <<= 9; | ||
| 1359 | clear_bit(BIO_SEG_VALID, &bio->bi_flags); | ||
| 1360 | |||
| 1361 | while (bio->bi_idx < bio->bi_vcnt && | ||
| 1362 | bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { | ||
| 1363 | /* remove this whole bio_vec */ | ||
| 1364 | offset -= bio->bi_io_vec[bio->bi_idx].bv_len; | ||
| 1365 | bio->bi_idx++; | ||
| 1366 | } | ||
| 1367 | if (bio->bi_idx < bio->bi_vcnt) { | ||
| 1368 | bio->bi_io_vec[bio->bi_idx].bv_offset += offset; | ||
| 1369 | bio->bi_io_vec[bio->bi_idx].bv_len -= offset; | ||
| 1370 | } | ||
| 1371 | /* avoid any complications with bi_idx being non-zero*/ | ||
| 1372 | if (bio->bi_idx) { | ||
| 1373 | memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, | ||
| 1374 | (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); | ||
| 1375 | bio->bi_vcnt -= bio->bi_idx; | ||
| 1376 | bio->bi_idx = 0; | ||
| 1377 | } | ||
| 1378 | /* Make sure vcnt and last bv are not too big */ | ||
| 1379 | bio_for_each_segment(bvec, bio, i) { | ||
| 1380 | if (sofar + bvec->bv_len > size) | ||
| 1381 | bvec->bv_len = size - sofar; | ||
| 1382 | if (bvec->bv_len == 0) { | ||
| 1383 | bio->bi_vcnt = i; | ||
| 1384 | break; | ||
| 1385 | } | ||
| 1386 | sofar += bvec->bv_len; | ||
| 1387 | } | ||
| 1388 | } | ||
| 1389 | |||
| 1390 | static void split_bio_end(struct bio *bio, int error) | ||
| 1391 | { | ||
| 1392 | struct split_bio *split_bio = bio->bi_private; | ||
| 1393 | |||
| 1394 | if (error) | ||
| 1395 | split_bio->err = error; | ||
| 1396 | |||
| 1397 | if (atomic_dec_and_test(&split_bio->pending)) { | ||
| 1398 | split_bio->bio->bi_phys_segments = 0; | ||
| 1399 | bio_endio(split_bio->bio, split_bio->err); | ||
| 1400 | kfree(split_bio); | ||
| 1401 | } | ||
| 1402 | bio_put(bio); | ||
| 1403 | } | ||
| 1226 | 1404 | ||
| 1227 | static int blkif_recover(struct blkfront_info *info) | 1405 | static int blkif_recover(struct blkfront_info *info) |
| 1228 | { | 1406 | { |
| 1229 | int i; | 1407 | int i; |
| 1230 | struct blkif_request *req; | 1408 | struct request *req, *n; |
| 1231 | struct blk_shadow *copy; | 1409 | struct blk_shadow *copy; |
| 1232 | int j; | 1410 | int rc; |
| 1411 | struct bio *bio, *cloned_bio; | ||
| 1412 | struct bio_list bio_list, merge_bio; | ||
| 1413 | unsigned int segs, offset; | ||
| 1414 | int pending, size; | ||
| 1415 | struct split_bio *split_bio; | ||
| 1416 | struct list_head requests; | ||
| 1233 | 1417 | ||
| 1234 | /* Stage 1: Make a safe copy of the shadow state. */ | 1418 | /* Stage 1: Make a safe copy of the shadow state. */ |
| 1235 | copy = kmemdup(info->shadow, sizeof(info->shadow), | 1419 | copy = kmemdup(info->shadow, sizeof(info->shadow), |
| @@ -1244,36 +1428,64 @@ static int blkif_recover(struct blkfront_info *info) | |||
| 1244 | info->shadow_free = info->ring.req_prod_pvt; | 1428 | info->shadow_free = info->ring.req_prod_pvt; |
| 1245 | info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; | 1429 | info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; |
| 1246 | 1430 | ||
| 1247 | /* Stage 3: Find pending requests and requeue them. */ | 1431 | rc = blkfront_setup_indirect(info); |
| 1432 | if (rc) { | ||
| 1433 | kfree(copy); | ||
| 1434 | return rc; | ||
| 1435 | } | ||
| 1436 | |||
| 1437 | segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; | ||
| 1438 | blk_queue_max_segments(info->rq, segs); | ||
| 1439 | bio_list_init(&bio_list); | ||
| 1440 | INIT_LIST_HEAD(&requests); | ||
| 1248 | for (i = 0; i < BLK_RING_SIZE; i++) { | 1441 | for (i = 0; i < BLK_RING_SIZE; i++) { |
| 1249 | /* Not in use? */ | 1442 | /* Not in use? */ |
| 1250 | if (!copy[i].request) | 1443 | if (!copy[i].request) |
| 1251 | continue; | 1444 | continue; |
| 1252 | 1445 | ||
| 1253 | /* Grab a request slot and copy shadow state into it. */ | 1446 | /* |
| 1254 | req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); | 1447 | * Get the bios in the request so we can re-queue them. |
| 1255 | *req = copy[i].req; | 1448 | */ |
| 1256 | 1449 | if (copy[i].request->cmd_flags & | |
| 1257 | /* We get a new request id, and must reset the shadow state. */ | 1450 | (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { |
| 1258 | req->u.rw.id = get_id_from_freelist(info); | 1451 | /* |
| 1259 | memcpy(&info->shadow[req->u.rw.id], ©[i], sizeof(copy[i])); | 1452 | * Flush operations don't contain bios, so |
| 1260 | 1453 | * we need to requeue the whole request | |
| 1261 | if (req->operation != BLKIF_OP_DISCARD) { | 1454 | */ |
| 1262 | /* Rewrite any grant references invalidated by susp/resume. */ | 1455 | list_add(©[i].request->queuelist, &requests); |
| 1263 | for (j = 0; j < req->u.rw.nr_segments; j++) | 1456 | continue; |
| 1264 | gnttab_grant_foreign_access_ref( | ||
| 1265 | req->u.rw.seg[j].gref, | ||
| 1266 | info->xbdev->otherend_id, | ||
| 1267 | pfn_to_mfn(copy[i].grants_used[j]->pfn), | ||
| 1268 | 0); | ||
| 1269 | } | 1457 | } |
| 1270 | info->shadow[req->u.rw.id].req = *req; | 1458 | merge_bio.head = copy[i].request->bio; |
| 1271 | 1459 | merge_bio.tail = copy[i].request->biotail; | |
| 1272 | info->ring.req_prod_pvt++; | 1460 | bio_list_merge(&bio_list, &merge_bio); |
| 1461 | copy[i].request->bio = NULL; | ||
| 1462 | blk_put_request(copy[i].request); | ||
| 1273 | } | 1463 | } |
| 1274 | 1464 | ||
| 1275 | kfree(copy); | 1465 | kfree(copy); |
| 1276 | 1466 | ||
| 1467 | /* | ||
| 1468 | * Empty the queue, this is important because we might have | ||
| 1469 | * requests in the queue with more segments than what we | ||
| 1470 | * can handle now. | ||
| 1471 | */ | ||
| 1472 | spin_lock_irq(&info->io_lock); | ||
| 1473 | while ((req = blk_fetch_request(info->rq)) != NULL) { | ||
| 1474 | if (req->cmd_flags & | ||
| 1475 | (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { | ||
| 1476 | list_add(&req->queuelist, &requests); | ||
| 1477 | continue; | ||
| 1478 | } | ||
| 1479 | merge_bio.head = req->bio; | ||
| 1480 | merge_bio.tail = req->biotail; | ||
| 1481 | bio_list_merge(&bio_list, &merge_bio); | ||
| 1482 | req->bio = NULL; | ||
| 1483 | if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) | ||
| 1484 | pr_alert("diskcache flush request found!\n"); | ||
| 1485 | __blk_put_request(info->rq, req); | ||
| 1486 | } | ||
| 1487 | spin_unlock_irq(&info->io_lock); | ||
| 1488 | |||
| 1277 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | 1489 | xenbus_switch_state(info->xbdev, XenbusStateConnected); |
| 1278 | 1490 | ||
| 1279 | spin_lock_irq(&info->io_lock); | 1491 | spin_lock_irq(&info->io_lock); |
| @@ -1281,14 +1493,50 @@ static int blkif_recover(struct blkfront_info *info) | |||
| 1281 | /* Now safe for us to use the shared ring */ | 1493 | /* Now safe for us to use the shared ring */ |
| 1282 | info->connected = BLKIF_STATE_CONNECTED; | 1494 | info->connected = BLKIF_STATE_CONNECTED; |
| 1283 | 1495 | ||
| 1284 | /* Send off requeued requests */ | ||
| 1285 | flush_requests(info); | ||
| 1286 | |||
| 1287 | /* Kick any other new requests queued since we resumed */ | 1496 | /* Kick any other new requests queued since we resumed */ |
| 1288 | kick_pending_request_queues(info); | 1497 | kick_pending_request_queues(info); |
| 1289 | 1498 | ||
| 1499 | list_for_each_entry_safe(req, n, &requests, queuelist) { | ||
| 1500 | /* Requeue pending requests (flush or discard) */ | ||
| 1501 | list_del_init(&req->queuelist); | ||
| 1502 | BUG_ON(req->nr_phys_segments > segs); | ||
| 1503 | blk_requeue_request(info->rq, req); | ||
| 1504 | } | ||
| 1290 | spin_unlock_irq(&info->io_lock); | 1505 | spin_unlock_irq(&info->io_lock); |
| 1291 | 1506 | ||
| 1507 | while ((bio = bio_list_pop(&bio_list)) != NULL) { | ||
| 1508 | /* Traverse the list of pending bios and re-queue them */ | ||
| 1509 | if (bio_segments(bio) > segs) { | ||
| 1510 | /* | ||
| 1511 | * This bio has more segments than what we can | ||
| 1512 | * handle, we have to split it. | ||
| 1513 | */ | ||
| 1514 | pending = (bio_segments(bio) + segs - 1) / segs; | ||
| 1515 | split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO); | ||
| 1516 | BUG_ON(split_bio == NULL); | ||
| 1517 | atomic_set(&split_bio->pending, pending); | ||
| 1518 | split_bio->bio = bio; | ||
| 1519 | for (i = 0; i < pending; i++) { | ||
| 1520 | offset = (i * segs * PAGE_SIZE) >> 9; | ||
| 1521 | size = min((unsigned int)(segs * PAGE_SIZE) >> 9, | ||
| 1522 | (unsigned int)(bio->bi_size >> 9) - offset); | ||
| 1523 | cloned_bio = bio_clone(bio, GFP_NOIO); | ||
| 1524 | BUG_ON(cloned_bio == NULL); | ||
| 1525 | trim_bio(cloned_bio, offset, size); | ||
| 1526 | cloned_bio->bi_private = split_bio; | ||
| 1527 | cloned_bio->bi_end_io = split_bio_end; | ||
| 1528 | submit_bio(cloned_bio->bi_rw, cloned_bio); | ||
| 1529 | } | ||
| 1530 | /* | ||
| 1531 | * Now we have to wait for all those smaller bios to | ||
| 1532 | * end, so we can also end the "parent" bio. | ||
| 1533 | */ | ||
| 1534 | continue; | ||
| 1535 | } | ||
| 1536 | /* We don't need to split this bio */ | ||
| 1537 | submit_bio(bio->bi_rw, bio); | ||
| 1538 | } | ||
| 1539 | |||
| 1292 | return 0; | 1540 | return 0; |
| 1293 | } | 1541 | } |
| 1294 | 1542 | ||
| @@ -1308,8 +1556,12 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
| 1308 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); | 1556 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); |
| 1309 | 1557 | ||
| 1310 | err = talk_to_blkback(dev, info); | 1558 | err = talk_to_blkback(dev, info); |
| 1311 | if (info->connected == BLKIF_STATE_SUSPENDED && !err) | 1559 | |
| 1312 | err = blkif_recover(info); | 1560 | /* |
| 1561 | * We have to wait for the backend to switch to | ||
| 1562 | * connected state, since we want to read which | ||
| 1563 | * features it supports. | ||
| 1564 | */ | ||
| 1313 | 1565 | ||
| 1314 | return err; | 1566 | return err; |
| 1315 | } | 1567 | } |
| @@ -1387,6 +1639,60 @@ static void blkfront_setup_discard(struct blkfront_info *info) | |||
| 1387 | kfree(type); | 1639 | kfree(type); |
| 1388 | } | 1640 | } |
| 1389 | 1641 | ||
| 1642 | static int blkfront_setup_indirect(struct blkfront_info *info) | ||
| 1643 | { | ||
| 1644 | unsigned int indirect_segments, segs; | ||
| 1645 | int err, i; | ||
| 1646 | |||
| 1647 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | ||
| 1648 | "feature-max-indirect-segments", "%u", &indirect_segments, | ||
| 1649 | NULL); | ||
| 1650 | if (err) { | ||
| 1651 | info->max_indirect_segments = 0; | ||
| 1652 | segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; | ||
| 1653 | } else { | ||
| 1654 | info->max_indirect_segments = min(indirect_segments, | ||
| 1655 | xen_blkif_max_segments); | ||
| 1656 | segs = info->max_indirect_segments; | ||
| 1657 | } | ||
| 1658 | |||
| 1659 | err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE); | ||
| 1660 | if (err) | ||
| 1661 | goto out_of_memory; | ||
| 1662 | |||
| 1663 | for (i = 0; i < BLK_RING_SIZE; i++) { | ||
| 1664 | info->shadow[i].grants_used = kzalloc( | ||
| 1665 | sizeof(info->shadow[i].grants_used[0]) * segs, | ||
| 1666 | GFP_NOIO); | ||
| 1667 | info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO); | ||
| 1668 | if (info->max_indirect_segments) | ||
| 1669 | info->shadow[i].indirect_grants = kzalloc( | ||
| 1670 | sizeof(info->shadow[i].indirect_grants[0]) * | ||
| 1671 | INDIRECT_GREFS(segs), | ||
| 1672 | GFP_NOIO); | ||
| 1673 | if ((info->shadow[i].grants_used == NULL) || | ||
| 1674 | (info->shadow[i].sg == NULL) || | ||
| 1675 | (info->max_indirect_segments && | ||
| 1676 | (info->shadow[i].indirect_grants == NULL))) | ||
| 1677 | goto out_of_memory; | ||
| 1678 | sg_init_table(info->shadow[i].sg, segs); | ||
| 1679 | } | ||
| 1680 | |||
| 1681 | |||
| 1682 | return 0; | ||
| 1683 | |||
| 1684 | out_of_memory: | ||
| 1685 | for (i = 0; i < BLK_RING_SIZE; i++) { | ||
| 1686 | kfree(info->shadow[i].grants_used); | ||
| 1687 | info->shadow[i].grants_used = NULL; | ||
| 1688 | kfree(info->shadow[i].sg); | ||
| 1689 | info->shadow[i].sg = NULL; | ||
| 1690 | kfree(info->shadow[i].indirect_grants); | ||
| 1691 | info->shadow[i].indirect_grants = NULL; | ||
| 1692 | } | ||
| 1693 | return -ENOMEM; | ||
| 1694 | } | ||
| 1695 | |||
| 1390 | /* | 1696 | /* |
| 1391 | * Invoked when the backend is finally 'ready' (and has told produced | 1697 | * Invoked when the backend is finally 'ready' (and has told produced |
| 1392 | * the details about the physical device - #sectors, size, etc). | 1698 | * the details about the physical device - #sectors, size, etc). |
| @@ -1395,6 +1701,7 @@ static void blkfront_connect(struct blkfront_info *info) | |||
| 1395 | { | 1701 | { |
| 1396 | unsigned long long sectors; | 1702 | unsigned long long sectors; |
| 1397 | unsigned long sector_size; | 1703 | unsigned long sector_size; |
| 1704 | unsigned int physical_sector_size; | ||
| 1398 | unsigned int binfo; | 1705 | unsigned int binfo; |
| 1399 | int err; | 1706 | int err; |
| 1400 | int barrier, flush, discard, persistent; | 1707 | int barrier, flush, discard, persistent; |
| @@ -1414,8 +1721,15 @@ static void blkfront_connect(struct blkfront_info *info) | |||
| 1414 | set_capacity(info->gd, sectors); | 1721 | set_capacity(info->gd, sectors); |
| 1415 | revalidate_disk(info->gd); | 1722 | revalidate_disk(info->gd); |
| 1416 | 1723 | ||
| 1417 | /* fall through */ | 1724 | return; |
| 1418 | case BLKIF_STATE_SUSPENDED: | 1725 | case BLKIF_STATE_SUSPENDED: |
| 1726 | /* | ||
| 1727 | * If we are recovering from suspension, we need to wait | ||
| 1728 | * for the backend to announce it's features before | ||
| 1729 | * reconnecting, at least we need to know if the backend | ||
| 1730 | * supports indirect descriptors, and how many. | ||
| 1731 | */ | ||
| 1732 | blkif_recover(info); | ||
| 1419 | return; | 1733 | return; |
| 1420 | 1734 | ||
| 1421 | default: | 1735 | default: |
| @@ -1437,6 +1751,16 @@ static void blkfront_connect(struct blkfront_info *info) | |||
| 1437 | return; | 1751 | return; |
| 1438 | } | 1752 | } |
| 1439 | 1753 | ||
| 1754 | /* | ||
| 1755 | * physcial-sector-size is a newer field, so old backends may not | ||
| 1756 | * provide this. Assume physical sector size to be the same as | ||
| 1757 | * sector_size in that case. | ||
| 1758 | */ | ||
| 1759 | err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, | ||
| 1760 | "physical-sector-size", "%u", &physical_sector_size); | ||
| 1761 | if (err != 1) | ||
| 1762 | physical_sector_size = sector_size; | ||
| 1763 | |||
| 1440 | info->feature_flush = 0; | 1764 | info->feature_flush = 0; |
| 1441 | info->flush_op = 0; | 1765 | info->flush_op = 0; |
| 1442 | 1766 | ||
| @@ -1483,7 +1807,15 @@ static void blkfront_connect(struct blkfront_info *info) | |||
| 1483 | else | 1807 | else |
| 1484 | info->feature_persistent = persistent; | 1808 | info->feature_persistent = persistent; |
| 1485 | 1809 | ||
| 1486 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); | 1810 | err = blkfront_setup_indirect(info); |
| 1811 | if (err) { | ||
| 1812 | xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", | ||
| 1813 | info->xbdev->otherend); | ||
| 1814 | return; | ||
| 1815 | } | ||
| 1816 | |||
| 1817 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size, | ||
| 1818 | physical_sector_size); | ||
| 1487 | if (err) { | 1819 | if (err) { |
| 1488 | xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", | 1820 | xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", |
| 1489 | info->xbdev->otherend); | 1821 | info->xbdev->otherend); |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 11f467c00d0a..a12b923bbaca 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
| @@ -91,6 +91,10 @@ static struct usb_device_id ath3k_table[] = { | |||
| 91 | { USB_DEVICE(0x0489, 0xe04e) }, | 91 | { USB_DEVICE(0x0489, 0xe04e) }, |
| 92 | { USB_DEVICE(0x0489, 0xe056) }, | 92 | { USB_DEVICE(0x0489, 0xe056) }, |
| 93 | { USB_DEVICE(0x0489, 0xe04d) }, | 93 | { USB_DEVICE(0x0489, 0xe04d) }, |
| 94 | { USB_DEVICE(0x04c5, 0x1330) }, | ||
| 95 | { USB_DEVICE(0x13d3, 0x3402) }, | ||
| 96 | { USB_DEVICE(0x0cf3, 0x3121) }, | ||
| 97 | { USB_DEVICE(0x0cf3, 0xe003) }, | ||
| 94 | 98 | ||
| 95 | /* Atheros AR5BBU12 with sflash firmware */ | 99 | /* Atheros AR5BBU12 with sflash firmware */ |
| 96 | { USB_DEVICE(0x0489, 0xE02C) }, | 100 | { USB_DEVICE(0x0489, 0xE02C) }, |
| @@ -128,6 +132,10 @@ static struct usb_device_id ath3k_blist_tbl[] = { | |||
| 128 | { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, | 132 | { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, |
| 129 | { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, | 133 | { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, |
| 130 | { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, | 134 | { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, |
| 135 | { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, | ||
| 136 | { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, | ||
| 137 | { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, | ||
| 138 | { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, | ||
| 131 | 139 | ||
| 132 | /* Atheros AR5BBU22 with sflash firmware */ | 140 | /* Atheros AR5BBU22 with sflash firmware */ |
| 133 | { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, | 141 | { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, |
| @@ -193,24 +201,44 @@ error: | |||
| 193 | 201 | ||
| 194 | static int ath3k_get_state(struct usb_device *udev, unsigned char *state) | 202 | static int ath3k_get_state(struct usb_device *udev, unsigned char *state) |
| 195 | { | 203 | { |
| 196 | int pipe = 0; | 204 | int ret, pipe = 0; |
| 205 | char *buf; | ||
| 206 | |||
| 207 | buf = kmalloc(sizeof(*buf), GFP_KERNEL); | ||
| 208 | if (!buf) | ||
| 209 | return -ENOMEM; | ||
| 197 | 210 | ||
| 198 | pipe = usb_rcvctrlpipe(udev, 0); | 211 | pipe = usb_rcvctrlpipe(udev, 0); |
| 199 | return usb_control_msg(udev, pipe, ATH3K_GETSTATE, | 212 | ret = usb_control_msg(udev, pipe, ATH3K_GETSTATE, |
| 200 | USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, | 213 | USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, |
| 201 | state, 0x01, USB_CTRL_SET_TIMEOUT); | 214 | buf, sizeof(*buf), USB_CTRL_SET_TIMEOUT); |
| 215 | |||
| 216 | *state = *buf; | ||
| 217 | kfree(buf); | ||
| 218 | |||
| 219 | return ret; | ||
| 202 | } | 220 | } |
| 203 | 221 | ||
| 204 | static int ath3k_get_version(struct usb_device *udev, | 222 | static int ath3k_get_version(struct usb_device *udev, |
| 205 | struct ath3k_version *version) | 223 | struct ath3k_version *version) |
| 206 | { | 224 | { |
| 207 | int pipe = 0; | 225 | int ret, pipe = 0; |
| 226 | struct ath3k_version *buf; | ||
| 227 | const int size = sizeof(*buf); | ||
| 228 | |||
| 229 | buf = kmalloc(size, GFP_KERNEL); | ||
| 230 | if (!buf) | ||
| 231 | return -ENOMEM; | ||
| 208 | 232 | ||
| 209 | pipe = usb_rcvctrlpipe(udev, 0); | 233 | pipe = usb_rcvctrlpipe(udev, 0); |
| 210 | return usb_control_msg(udev, pipe, ATH3K_GETVERSION, | 234 | ret = usb_control_msg(udev, pipe, ATH3K_GETVERSION, |
| 211 | USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, | 235 | USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, |
| 212 | sizeof(struct ath3k_version), | 236 | buf, size, USB_CTRL_SET_TIMEOUT); |
| 213 | USB_CTRL_SET_TIMEOUT); | 237 | |
| 238 | memcpy(version, buf, size); | ||
| 239 | kfree(buf); | ||
| 240 | |||
| 241 | return ret; | ||
| 214 | } | 242 | } |
| 215 | 243 | ||
| 216 | static int ath3k_load_fwfile(struct usb_device *udev, | 244 | static int ath3k_load_fwfile(struct usb_device *udev, |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index de4cf4daa2f4..8e16f0af6358 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
| @@ -154,6 +154,10 @@ static struct usb_device_id blacklist_table[] = { | |||
| 154 | { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, | 154 | { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, |
| 155 | { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, | 155 | { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, |
| 156 | { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, | 156 | { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, |
| 157 | { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, | ||
| 158 | { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, | ||
| 159 | { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, | ||
| 160 | { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, | ||
| 157 | 161 | ||
| 158 | /* Atheros AR5BBU12 with sflash firmware */ | 162 | /* Atheros AR5BBU12 with sflash firmware */ |
| 159 | { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, | 163 | { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, |
| @@ -1095,7 +1099,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev, | |||
| 1095 | if (IS_ERR(skb)) { | 1099 | if (IS_ERR(skb)) { |
| 1096 | BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)", | 1100 | BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)", |
| 1097 | hdev->name, cmd->opcode, PTR_ERR(skb)); | 1101 | hdev->name, cmd->opcode, PTR_ERR(skb)); |
| 1098 | return -PTR_ERR(skb); | 1102 | return PTR_ERR(skb); |
| 1099 | } | 1103 | } |
| 1100 | 1104 | ||
| 1101 | /* It ensures that the returned event matches the event data read from | 1105 | /* It ensures that the returned event matches the event data read from |
| @@ -1147,7 +1151,7 @@ static int btusb_setup_intel(struct hci_dev *hdev) | |||
| 1147 | if (IS_ERR(skb)) { | 1151 | if (IS_ERR(skb)) { |
| 1148 | BT_ERR("%s sending initial HCI reset command failed (%ld)", | 1152 | BT_ERR("%s sending initial HCI reset command failed (%ld)", |
| 1149 | hdev->name, PTR_ERR(skb)); | 1153 | hdev->name, PTR_ERR(skb)); |
| 1150 | return -PTR_ERR(skb); | 1154 | return PTR_ERR(skb); |
| 1151 | } | 1155 | } |
| 1152 | kfree_skb(skb); | 1156 | kfree_skb(skb); |
| 1153 | 1157 | ||
| @@ -1161,7 +1165,7 @@ static int btusb_setup_intel(struct hci_dev *hdev) | |||
| 1161 | if (IS_ERR(skb)) { | 1165 | if (IS_ERR(skb)) { |
| 1162 | BT_ERR("%s reading Intel fw version command failed (%ld)", | 1166 | BT_ERR("%s reading Intel fw version command failed (%ld)", |
| 1163 | hdev->name, PTR_ERR(skb)); | 1167 | hdev->name, PTR_ERR(skb)); |
| 1164 | return -PTR_ERR(skb); | 1168 | return PTR_ERR(skb); |
| 1165 | } | 1169 | } |
| 1166 | 1170 | ||
| 1167 | if (skb->len != sizeof(*ver)) { | 1171 | if (skb->len != sizeof(*ver)) { |
| @@ -1219,7 +1223,7 @@ static int btusb_setup_intel(struct hci_dev *hdev) | |||
| 1219 | BT_ERR("%s entering Intel manufacturer mode failed (%ld)", | 1223 | BT_ERR("%s entering Intel manufacturer mode failed (%ld)", |
| 1220 | hdev->name, PTR_ERR(skb)); | 1224 | hdev->name, PTR_ERR(skb)); |
| 1221 | release_firmware(fw); | 1225 | release_firmware(fw); |
| 1222 | return -PTR_ERR(skb); | 1226 | return PTR_ERR(skb); |
| 1223 | } | 1227 | } |
| 1224 | 1228 | ||
| 1225 | if (skb->data[0]) { | 1229 | if (skb->data[0]) { |
| @@ -1276,7 +1280,7 @@ static int btusb_setup_intel(struct hci_dev *hdev) | |||
| 1276 | if (IS_ERR(skb)) { | 1280 | if (IS_ERR(skb)) { |
| 1277 | BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", | 1281 | BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", |
| 1278 | hdev->name, PTR_ERR(skb)); | 1282 | hdev->name, PTR_ERR(skb)); |
| 1279 | return -PTR_ERR(skb); | 1283 | return PTR_ERR(skb); |
| 1280 | } | 1284 | } |
| 1281 | kfree_skb(skb); | 1285 | kfree_skb(skb); |
| 1282 | 1286 | ||
| @@ -1292,7 +1296,7 @@ exit_mfg_disable: | |||
| 1292 | if (IS_ERR(skb)) { | 1296 | if (IS_ERR(skb)) { |
| 1293 | BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", | 1297 | BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", |
| 1294 | hdev->name, PTR_ERR(skb)); | 1298 | hdev->name, PTR_ERR(skb)); |
| 1295 | return -PTR_ERR(skb); | 1299 | return PTR_ERR(skb); |
| 1296 | } | 1300 | } |
| 1297 | kfree_skb(skb); | 1301 | kfree_skb(skb); |
| 1298 | 1302 | ||
| @@ -1310,7 +1314,7 @@ exit_mfg_deactivate: | |||
| 1310 | if (IS_ERR(skb)) { | 1314 | if (IS_ERR(skb)) { |
| 1311 | BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", | 1315 | BT_ERR("%s exiting Intel manufacturer mode failed (%ld)", |
| 1312 | hdev->name, PTR_ERR(skb)); | 1316 | hdev->name, PTR_ERR(skb)); |
| 1313 | return -PTR_ERR(skb); | 1317 | return PTR_ERR(skb); |
| 1314 | } | 1318 | } |
| 1315 | kfree_skb(skb); | 1319 | kfree_skb(skb); |
| 1316 | 1320 | ||
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index bf5d2477cb77..15f2e7025b78 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c | |||
| @@ -129,7 +129,8 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
| 129 | off_t j, io_pg_start; | 129 | off_t j, io_pg_start; |
| 130 | int io_pg_count; | 130 | int io_pg_count; |
| 131 | 131 | ||
| 132 | if (type != 0 || mem->type != 0) { | 132 | if (type != mem->type || |
| 133 | agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) { | ||
| 133 | return -EINVAL; | 134 | return -EINVAL; |
| 134 | } | 135 | } |
| 135 | 136 | ||
| @@ -175,7 +176,8 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
| 175 | struct _parisc_agp_info *info = &parisc_agp_info; | 176 | struct _parisc_agp_info *info = &parisc_agp_info; |
| 176 | int i, io_pg_start, io_pg_count; | 177 | int i, io_pg_start, io_pg_count; |
| 177 | 178 | ||
| 178 | if (type != 0 || mem->type != 0) { | 179 | if (type != mem->type || |
| 180 | agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) { | ||
| 179 | return -EINVAL; | 181 | return -EINVAL; |
| 180 | } | 182 | } |
| 181 | 183 | ||
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 1b456fe9b87a..fc45567ad3ac 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
| @@ -272,9 +272,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, | |||
| 272 | unsigned long flags; | 272 | unsigned long flags; |
| 273 | 273 | ||
| 274 | spin_lock_irqsave(&portdev->ports_lock, flags); | 274 | spin_lock_irqsave(&portdev->ports_lock, flags); |
| 275 | list_for_each_entry(port, &portdev->ports, list) | 275 | list_for_each_entry(port, &portdev->ports, list) { |
| 276 | if (port->cdev->dev == dev) | 276 | if (port->cdev->dev == dev) { |
| 277 | kref_get(&port->kref); | ||
| 277 | goto out; | 278 | goto out; |
| 279 | } | ||
| 280 | } | ||
| 278 | port = NULL; | 281 | port = NULL; |
| 279 | out: | 282 | out: |
| 280 | spin_unlock_irqrestore(&portdev->ports_lock, flags); | 283 | spin_unlock_irqrestore(&portdev->ports_lock, flags); |
| @@ -746,6 +749,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | |||
| 746 | 749 | ||
| 747 | port = filp->private_data; | 750 | port = filp->private_data; |
| 748 | 751 | ||
| 752 | /* Port is hot-unplugged. */ | ||
| 753 | if (!port->guest_connected) | ||
| 754 | return -ENODEV; | ||
| 755 | |||
| 749 | if (!port_has_data(port)) { | 756 | if (!port_has_data(port)) { |
| 750 | /* | 757 | /* |
| 751 | * If nothing's connected on the host just return 0 in | 758 | * If nothing's connected on the host just return 0 in |
| @@ -762,7 +769,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | |||
| 762 | if (ret < 0) | 769 | if (ret < 0) |
| 763 | return ret; | 770 | return ret; |
| 764 | } | 771 | } |
| 765 | /* Port got hot-unplugged. */ | 772 | /* Port got hot-unplugged while we were waiting above. */ |
| 766 | if (!port->guest_connected) | 773 | if (!port->guest_connected) |
| 767 | return -ENODEV; | 774 | return -ENODEV; |
| 768 | /* | 775 | /* |
| @@ -932,13 +939,25 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | |||
| 932 | if (is_rproc_serial(port->out_vq->vdev)) | 939 | if (is_rproc_serial(port->out_vq->vdev)) |
| 933 | return -EINVAL; | 940 | return -EINVAL; |
| 934 | 941 | ||
| 942 | /* | ||
| 943 | * pipe->nrbufs == 0 means there are no data to transfer, | ||
| 944 | * so this returns just 0 for no data. | ||
| 945 | */ | ||
| 946 | pipe_lock(pipe); | ||
| 947 | if (!pipe->nrbufs) { | ||
| 948 | ret = 0; | ||
| 949 | goto error_out; | ||
| 950 | } | ||
| 951 | |||
| 935 | ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); | 952 | ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); |
| 936 | if (ret < 0) | 953 | if (ret < 0) |
| 937 | return ret; | 954 | goto error_out; |
| 938 | 955 | ||
| 939 | buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); | 956 | buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); |
| 940 | if (!buf) | 957 | if (!buf) { |
| 941 | return -ENOMEM; | 958 | ret = -ENOMEM; |
| 959 | goto error_out; | ||
| 960 | } | ||
| 942 | 961 | ||
| 943 | sgl.n = 0; | 962 | sgl.n = 0; |
| 944 | sgl.len = 0; | 963 | sgl.len = 0; |
| @@ -946,12 +965,17 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | |||
| 946 | sgl.sg = buf->sg; | 965 | sgl.sg = buf->sg; |
| 947 | sg_init_table(sgl.sg, sgl.size); | 966 | sg_init_table(sgl.sg, sgl.size); |
| 948 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); | 967 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); |
| 968 | pipe_unlock(pipe); | ||
| 949 | if (likely(ret > 0)) | 969 | if (likely(ret > 0)) |
| 950 | ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); | 970 | ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); |
| 951 | 971 | ||
| 952 | if (unlikely(ret <= 0)) | 972 | if (unlikely(ret <= 0)) |
| 953 | free_buf(buf, true); | 973 | free_buf(buf, true); |
| 954 | return ret; | 974 | return ret; |
| 975 | |||
| 976 | error_out: | ||
| 977 | pipe_unlock(pipe); | ||
| 978 | return ret; | ||
| 955 | } | 979 | } |
| 956 | 980 | ||
| 957 | static unsigned int port_fops_poll(struct file *filp, poll_table *wait) | 981 | static unsigned int port_fops_poll(struct file *filp, poll_table *wait) |
| @@ -1019,14 +1043,14 @@ static int port_fops_open(struct inode *inode, struct file *filp) | |||
| 1019 | struct port *port; | 1043 | struct port *port; |
| 1020 | int ret; | 1044 | int ret; |
| 1021 | 1045 | ||
| 1046 | /* We get the port with a kref here */ | ||
| 1022 | port = find_port_by_devt(cdev->dev); | 1047 | port = find_port_by_devt(cdev->dev); |
| 1048 | if (!port) { | ||
| 1049 | /* Port was unplugged before we could proceed */ | ||
| 1050 | return -ENXIO; | ||
| 1051 | } | ||
| 1023 | filp->private_data = port; | 1052 | filp->private_data = port; |
| 1024 | 1053 | ||
| 1025 | /* Prevent against a port getting hot-unplugged at the same time */ | ||
| 1026 | spin_lock_irq(&port->portdev->ports_lock); | ||
| 1027 | kref_get(&port->kref); | ||
| 1028 | spin_unlock_irq(&port->portdev->ports_lock); | ||
| 1029 | |||
| 1030 | /* | 1054 | /* |
| 1031 | * Don't allow opening of console port devices -- that's done | 1055 | * Don't allow opening of console port devices -- that's done |
| 1032 | * via /dev/hvc | 1056 | * via /dev/hvc |
| @@ -1498,14 +1522,6 @@ static void remove_port(struct kref *kref) | |||
| 1498 | 1522 | ||
| 1499 | port = container_of(kref, struct port, kref); | 1523 | port = container_of(kref, struct port, kref); |
| 1500 | 1524 | ||
| 1501 | sysfs_remove_group(&port->dev->kobj, &port_attribute_group); | ||
| 1502 | device_destroy(pdrvdata.class, port->dev->devt); | ||
| 1503 | cdev_del(port->cdev); | ||
| 1504 | |||
| 1505 | kfree(port->name); | ||
| 1506 | |||
| 1507 | debugfs_remove(port->debugfs_file); | ||
| 1508 | |||
| 1509 | kfree(port); | 1525 | kfree(port); |
| 1510 | } | 1526 | } |
| 1511 | 1527 | ||
| @@ -1539,12 +1555,14 @@ static void unplug_port(struct port *port) | |||
| 1539 | spin_unlock_irq(&port->portdev->ports_lock); | 1555 | spin_unlock_irq(&port->portdev->ports_lock); |
| 1540 | 1556 | ||
| 1541 | if (port->guest_connected) { | 1557 | if (port->guest_connected) { |
| 1558 | /* Let the app know the port is going down. */ | ||
| 1559 | send_sigio_to_port(port); | ||
| 1560 | |||
| 1561 | /* Do this after sigio is actually sent */ | ||
| 1542 | port->guest_connected = false; | 1562 | port->guest_connected = false; |
| 1543 | port->host_connected = false; | 1563 | port->host_connected = false; |
| 1544 | wake_up_interruptible(&port->waitqueue); | ||
| 1545 | 1564 | ||
| 1546 | /* Let the app know the port is going down. */ | 1565 | wake_up_interruptible(&port->waitqueue); |
| 1547 | send_sigio_to_port(port); | ||
| 1548 | } | 1566 | } |
| 1549 | 1567 | ||
| 1550 | if (is_console_port(port)) { | 1568 | if (is_console_port(port)) { |
| @@ -1563,6 +1581,14 @@ static void unplug_port(struct port *port) | |||
| 1563 | */ | 1581 | */ |
| 1564 | port->portdev = NULL; | 1582 | port->portdev = NULL; |
| 1565 | 1583 | ||
| 1584 | sysfs_remove_group(&port->dev->kobj, &port_attribute_group); | ||
| 1585 | device_destroy(pdrvdata.class, port->dev->devt); | ||
| 1586 | cdev_del(port->cdev); | ||
| 1587 | |||
| 1588 | kfree(port->name); | ||
| 1589 | |||
| 1590 | debugfs_remove(port->debugfs_file); | ||
| 1591 | |||
| 1566 | /* | 1592 | /* |
| 1567 | * Locks around here are not necessary - a port can't be | 1593 | * Locks around here are not necessary - a port can't be |
| 1568 | * opened after we removed the port struct from ports_list | 1594 | * opened after we removed the port struct from ports_list |
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 1bdb882c845b..4e5739773c33 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c | |||
| @@ -581,11 +581,15 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = { | |||
| 581 | DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), | 581 | DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), |
| 582 | DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), | 582 | DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), |
| 583 | DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), | 583 | DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), |
| 584 | DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3), | 584 | DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3, |
| 585 | DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3), | 585 | CLK_GET_RATE_NOCACHE, 0), |
| 586 | DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3, | ||
| 587 | CLK_GET_RATE_NOCACHE, 0), | ||
| 586 | DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), | 588 | DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), |
| 587 | DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3), | 589 | DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, |
| 588 | DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3), | 590 | 4, 3, CLK_GET_RATE_NOCACHE, 0), |
| 591 | DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, | ||
| 592 | 8, 3, CLK_GET_RATE_NOCACHE, 0), | ||
| 589 | DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), | 593 | DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), |
| 590 | }; | 594 | }; |
| 591 | 595 | ||
| @@ -863,57 +867,57 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { | |||
| 863 | GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", | 867 | GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", |
| 864 | E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), | 868 | E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), |
| 865 | GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, | 869 | GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, |
| 866 | CLK_IGNORE_UNUSED, 0), | 870 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 867 | GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, | 871 | GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, |
| 868 | CLK_IGNORE_UNUSED, 0), | 872 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 869 | GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, | 873 | GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, |
| 870 | CLK_IGNORE_UNUSED, 0), | 874 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 871 | GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, | 875 | GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, |
| 872 | CLK_IGNORE_UNUSED, 0), | 876 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 873 | GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, | 877 | GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, |
| 874 | CLK_IGNORE_UNUSED, 0), | 878 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 875 | GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, | 879 | GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, |
| 876 | CLK_IGNORE_UNUSED, 0), | 880 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 877 | GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, | 881 | GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, |
| 878 | CLK_IGNORE_UNUSED, 0), | 882 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 879 | GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, | 883 | GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, |
| 880 | CLK_IGNORE_UNUSED, 0), | 884 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 881 | GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, | 885 | GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, |
| 882 | CLK_IGNORE_UNUSED, 0), | 886 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 883 | GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, | 887 | GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, |
| 884 | CLK_IGNORE_UNUSED, 0), | 888 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 885 | GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, | 889 | GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, |
| 886 | CLK_IGNORE_UNUSED, 0), | 890 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 887 | GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, | 891 | GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, |
| 888 | CLK_IGNORE_UNUSED, 0), | 892 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 889 | GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, | 893 | GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, |
| 890 | CLK_IGNORE_UNUSED, 0), | 894 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 891 | GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, | 895 | GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, |
| 892 | CLK_IGNORE_UNUSED, 0), | 896 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 893 | GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, | 897 | GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, |
| 894 | CLK_IGNORE_UNUSED, 0), | 898 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 895 | GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, | 899 | GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, |
| 896 | CLK_IGNORE_UNUSED, 0), | 900 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 897 | GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, | 901 | GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, |
| 898 | CLK_IGNORE_UNUSED, 0), | 902 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 899 | GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, | 903 | GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, |
| 900 | CLK_IGNORE_UNUSED, 0), | 904 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 901 | GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, | 905 | GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, |
| 902 | CLK_IGNORE_UNUSED, 0), | 906 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 903 | GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, | 907 | GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, |
| 904 | CLK_IGNORE_UNUSED, 0), | 908 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 905 | GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, | 909 | GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, |
| 906 | CLK_IGNORE_UNUSED, 0), | 910 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 907 | GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, | 911 | GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, |
| 908 | CLK_IGNORE_UNUSED, 0), | 912 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 909 | GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, | 913 | GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, |
| 910 | CLK_IGNORE_UNUSED, 0), | 914 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 911 | GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, | 915 | GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, |
| 912 | CLK_IGNORE_UNUSED, 0), | 916 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 913 | GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, | 917 | GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, |
| 914 | CLK_IGNORE_UNUSED, 0), | 918 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 915 | GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, | 919 | GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, |
| 916 | CLK_IGNORE_UNUSED, 0), | 920 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
| 917 | GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), | 921 | GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), |
| 918 | }; | 922 | }; |
| 919 | 923 | ||
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 5c205b60a82a..089d3e30e221 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c | |||
| @@ -71,6 +71,7 @@ static DEFINE_SPINLOCK(armpll_lock); | |||
| 71 | static DEFINE_SPINLOCK(ddrpll_lock); | 71 | static DEFINE_SPINLOCK(ddrpll_lock); |
| 72 | static DEFINE_SPINLOCK(iopll_lock); | 72 | static DEFINE_SPINLOCK(iopll_lock); |
| 73 | static DEFINE_SPINLOCK(armclk_lock); | 73 | static DEFINE_SPINLOCK(armclk_lock); |
| 74 | static DEFINE_SPINLOCK(swdtclk_lock); | ||
| 74 | static DEFINE_SPINLOCK(ddrclk_lock); | 75 | static DEFINE_SPINLOCK(ddrclk_lock); |
| 75 | static DEFINE_SPINLOCK(dciclk_lock); | 76 | static DEFINE_SPINLOCK(dciclk_lock); |
| 76 | static DEFINE_SPINLOCK(gem0clk_lock); | 77 | static DEFINE_SPINLOCK(gem0clk_lock); |
| @@ -293,7 +294,7 @@ static void __init zynq_clk_setup(struct device_node *np) | |||
| 293 | } | 294 | } |
| 294 | clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], | 295 | clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], |
| 295 | swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, | 296 | swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, |
| 296 | SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock); | 297 | SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock); |
| 297 | 298 | ||
| 298 | /* DDR clocks */ | 299 | /* DDR clocks */ |
| 299 | clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, | 300 | clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, |
| @@ -364,8 +365,9 @@ static void __init zynq_clk_setup(struct device_node *np) | |||
| 364 | CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, | 365 | CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, |
| 365 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, | 366 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, |
| 366 | &gem0clk_lock); | 367 | &gem0clk_lock); |
| 367 | clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0, | 368 | clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, |
| 368 | SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock); | 369 | CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0, |
| 370 | &gem0clk_lock); | ||
| 369 | clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], | 371 | clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], |
| 370 | "gem0_emio_mux", CLK_SET_RATE_PARENT, | 372 | "gem0_emio_mux", CLK_SET_RATE_PARENT, |
| 371 | SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); | 373 | SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); |
| @@ -386,8 +388,9 @@ static void __init zynq_clk_setup(struct device_node *np) | |||
| 386 | CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, | 388 | CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, |
| 387 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, | 389 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, |
| 388 | &gem1clk_lock); | 390 | &gem1clk_lock); |
| 389 | clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0, | 391 | clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, |
| 390 | SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock); | 392 | CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0, |
| 393 | &gem1clk_lock); | ||
| 391 | clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], | 394 | clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], |
| 392 | "gem1_emio_mux", CLK_SET_RATE_PARENT, | 395 | "gem1_emio_mux", CLK_SET_RATE_PARENT, |
| 393 | SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); | 396 | SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a4ad7339588d..f0a5e2b0eb8a 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -1177,14 +1177,11 @@ static int __cpufreq_remove_dev(struct device *dev, | |||
| 1177 | __func__, cpu_dev->id, cpu); | 1177 | __func__, cpu_dev->id, cpu); |
| 1178 | } | 1178 | } |
| 1179 | 1179 | ||
| 1180 | if ((cpus == 1) && (cpufreq_driver->target)) | ||
| 1181 | __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); | ||
| 1182 | |||
| 1183 | pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); | ||
| 1184 | cpufreq_cpu_put(data); | ||
| 1185 | |||
| 1186 | /* If cpu is last user of policy, free policy */ | 1180 | /* If cpu is last user of policy, free policy */ |
| 1187 | if (cpus == 1) { | 1181 | if (cpus == 1) { |
| 1182 | if (cpufreq_driver->target) | ||
| 1183 | __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); | ||
| 1184 | |||
| 1188 | lock_policy_rwsem_read(cpu); | 1185 | lock_policy_rwsem_read(cpu); |
| 1189 | kobj = &data->kobj; | 1186 | kobj = &data->kobj; |
| 1190 | cmp = &data->kobj_unregister; | 1187 | cmp = &data->kobj_unregister; |
| @@ -1205,9 +1202,13 @@ static int __cpufreq_remove_dev(struct device *dev, | |||
| 1205 | free_cpumask_var(data->related_cpus); | 1202 | free_cpumask_var(data->related_cpus); |
| 1206 | free_cpumask_var(data->cpus); | 1203 | free_cpumask_var(data->cpus); |
| 1207 | kfree(data); | 1204 | kfree(data); |
| 1208 | } else if (cpufreq_driver->target) { | 1205 | } else { |
| 1209 | __cpufreq_governor(data, CPUFREQ_GOV_START); | 1206 | pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); |
| 1210 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | 1207 | cpufreq_cpu_put(data); |
| 1208 | if (cpufreq_driver->target) { | ||
| 1209 | __cpufreq_governor(data, CPUFREQ_GOV_START); | ||
| 1210 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | ||
| 1211 | } | ||
| 1211 | } | 1212 | } |
| 1212 | 1213 | ||
| 1213 | per_cpu(cpufreq_policy_cpu, cpu) = -1; | 1214 | per_cpu(cpufreq_policy_cpu, cpu) = -1; |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 0ceb2eff5a7e..f97cb3d8c5a2 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
| @@ -221,8 +221,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, | |||
| 221 | return count; | 221 | return count; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, | 224 | static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, |
| 225 | size_t count) | 225 | const char *buf, size_t count) |
| 226 | { | 226 | { |
| 227 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | 227 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
| 228 | unsigned int input, j; | 228 | unsigned int input, j; |
| @@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, | |||
| 235 | if (input > 1) | 235 | if (input > 1) |
| 236 | input = 1; | 236 | input = 1; |
| 237 | 237 | ||
| 238 | if (input == cs_tuners->ignore_nice) /* nothing to do */ | 238 | if (input == cs_tuners->ignore_nice_load) /* nothing to do */ |
| 239 | return count; | 239 | return count; |
| 240 | 240 | ||
| 241 | cs_tuners->ignore_nice = input; | 241 | cs_tuners->ignore_nice_load = input; |
| 242 | 242 | ||
| 243 | /* we need to re-evaluate prev_cpu_idle */ | 243 | /* we need to re-evaluate prev_cpu_idle */ |
| 244 | for_each_online_cpu(j) { | 244 | for_each_online_cpu(j) { |
| @@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, | |||
| 246 | dbs_info = &per_cpu(cs_cpu_dbs_info, j); | 246 | dbs_info = &per_cpu(cs_cpu_dbs_info, j); |
| 247 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, | 247 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
| 248 | &dbs_info->cdbs.prev_cpu_wall, 0); | 248 | &dbs_info->cdbs.prev_cpu_wall, 0); |
| 249 | if (cs_tuners->ignore_nice) | 249 | if (cs_tuners->ignore_nice_load) |
| 250 | dbs_info->cdbs.prev_cpu_nice = | 250 | dbs_info->cdbs.prev_cpu_nice = |
| 251 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 251 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
| 252 | } | 252 | } |
| @@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate); | |||
| 279 | show_store_one(cs, sampling_down_factor); | 279 | show_store_one(cs, sampling_down_factor); |
| 280 | show_store_one(cs, up_threshold); | 280 | show_store_one(cs, up_threshold); |
| 281 | show_store_one(cs, down_threshold); | 281 | show_store_one(cs, down_threshold); |
| 282 | show_store_one(cs, ignore_nice); | 282 | show_store_one(cs, ignore_nice_load); |
| 283 | show_store_one(cs, freq_step); | 283 | show_store_one(cs, freq_step); |
| 284 | declare_show_sampling_rate_min(cs); | 284 | declare_show_sampling_rate_min(cs); |
| 285 | 285 | ||
| @@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate); | |||
| 287 | gov_sys_pol_attr_rw(sampling_down_factor); | 287 | gov_sys_pol_attr_rw(sampling_down_factor); |
| 288 | gov_sys_pol_attr_rw(up_threshold); | 288 | gov_sys_pol_attr_rw(up_threshold); |
| 289 | gov_sys_pol_attr_rw(down_threshold); | 289 | gov_sys_pol_attr_rw(down_threshold); |
| 290 | gov_sys_pol_attr_rw(ignore_nice); | 290 | gov_sys_pol_attr_rw(ignore_nice_load); |
| 291 | gov_sys_pol_attr_rw(freq_step); | 291 | gov_sys_pol_attr_rw(freq_step); |
| 292 | gov_sys_pol_attr_ro(sampling_rate_min); | 292 | gov_sys_pol_attr_ro(sampling_rate_min); |
| 293 | 293 | ||
| @@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_gov_sys[] = { | |||
| 297 | &sampling_down_factor_gov_sys.attr, | 297 | &sampling_down_factor_gov_sys.attr, |
| 298 | &up_threshold_gov_sys.attr, | 298 | &up_threshold_gov_sys.attr, |
| 299 | &down_threshold_gov_sys.attr, | 299 | &down_threshold_gov_sys.attr, |
| 300 | &ignore_nice_gov_sys.attr, | 300 | &ignore_nice_load_gov_sys.attr, |
| 301 | &freq_step_gov_sys.attr, | 301 | &freq_step_gov_sys.attr, |
| 302 | NULL | 302 | NULL |
| 303 | }; | 303 | }; |
| @@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_gov_pol[] = { | |||
| 313 | &sampling_down_factor_gov_pol.attr, | 313 | &sampling_down_factor_gov_pol.attr, |
| 314 | &up_threshold_gov_pol.attr, | 314 | &up_threshold_gov_pol.attr, |
| 315 | &down_threshold_gov_pol.attr, | 315 | &down_threshold_gov_pol.attr, |
| 316 | &ignore_nice_gov_pol.attr, | 316 | &ignore_nice_load_gov_pol.attr, |
| 317 | &freq_step_gov_pol.attr, | 317 | &freq_step_gov_pol.attr, |
| 318 | NULL | 318 | NULL |
| 319 | }; | 319 | }; |
| @@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_data) | |||
| 338 | tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; | 338 | tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; |
| 339 | tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; | 339 | tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; |
| 340 | tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; | 340 | tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; |
| 341 | tuners->ignore_nice = 0; | 341 | tuners->ignore_nice_load = 0; |
| 342 | tuners->freq_step = DEF_FREQUENCY_STEP; | 342 | tuners->freq_step = DEF_FREQUENCY_STEP; |
| 343 | 343 | ||
| 344 | dbs_data->tuners = tuners; | 344 | dbs_data->tuners = tuners; |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 7b839a8db2a7..e59afaa9da23 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
| @@ -47,9 +47,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) | |||
| 47 | unsigned int j; | 47 | unsigned int j; |
| 48 | 48 | ||
| 49 | if (dbs_data->cdata->governor == GOV_ONDEMAND) | 49 | if (dbs_data->cdata->governor == GOV_ONDEMAND) |
| 50 | ignore_nice = od_tuners->ignore_nice; | 50 | ignore_nice = od_tuners->ignore_nice_load; |
| 51 | else | 51 | else |
| 52 | ignore_nice = cs_tuners->ignore_nice; | 52 | ignore_nice = cs_tuners->ignore_nice_load; |
| 53 | 53 | ||
| 54 | policy = cdbs->cur_policy; | 54 | policy = cdbs->cur_policy; |
| 55 | 55 | ||
| @@ -298,12 +298,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
| 298 | cs_tuners = dbs_data->tuners; | 298 | cs_tuners = dbs_data->tuners; |
| 299 | cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); | 299 | cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); |
| 300 | sampling_rate = cs_tuners->sampling_rate; | 300 | sampling_rate = cs_tuners->sampling_rate; |
| 301 | ignore_nice = cs_tuners->ignore_nice; | 301 | ignore_nice = cs_tuners->ignore_nice_load; |
| 302 | } else { | 302 | } else { |
| 303 | od_tuners = dbs_data->tuners; | 303 | od_tuners = dbs_data->tuners; |
| 304 | od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); | 304 | od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); |
| 305 | sampling_rate = od_tuners->sampling_rate; | 305 | sampling_rate = od_tuners->sampling_rate; |
| 306 | ignore_nice = od_tuners->ignore_nice; | 306 | ignore_nice = od_tuners->ignore_nice_load; |
| 307 | od_ops = dbs_data->cdata->gov_ops; | 307 | od_ops = dbs_data->cdata->gov_ops; |
| 308 | io_busy = od_tuners->io_is_busy; | 308 | io_busy = od_tuners->io_is_busy; |
| 309 | } | 309 | } |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 6663ec3b3056..d5f12b4b11b8 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
| @@ -165,7 +165,7 @@ struct cs_cpu_dbs_info_s { | |||
| 165 | 165 | ||
| 166 | /* Per policy Governers sysfs tunables */ | 166 | /* Per policy Governers sysfs tunables */ |
| 167 | struct od_dbs_tuners { | 167 | struct od_dbs_tuners { |
| 168 | unsigned int ignore_nice; | 168 | unsigned int ignore_nice_load; |
| 169 | unsigned int sampling_rate; | 169 | unsigned int sampling_rate; |
| 170 | unsigned int sampling_down_factor; | 170 | unsigned int sampling_down_factor; |
| 171 | unsigned int up_threshold; | 171 | unsigned int up_threshold; |
| @@ -175,7 +175,7 @@ struct od_dbs_tuners { | |||
| 175 | }; | 175 | }; |
| 176 | 176 | ||
| 177 | struct cs_dbs_tuners { | 177 | struct cs_dbs_tuners { |
| 178 | unsigned int ignore_nice; | 178 | unsigned int ignore_nice_load; |
| 179 | unsigned int sampling_rate; | 179 | unsigned int sampling_rate; |
| 180 | unsigned int sampling_down_factor; | 180 | unsigned int sampling_down_factor; |
| 181 | unsigned int up_threshold; | 181 | unsigned int up_threshold; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 93eb5cbcc1f6..c087347d6688 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
| @@ -403,8 +403,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, | |||
| 403 | return count; | 403 | return count; |
| 404 | } | 404 | } |
| 405 | 405 | ||
| 406 | static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, | 406 | static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, |
| 407 | size_t count) | 407 | const char *buf, size_t count) |
| 408 | { | 408 | { |
| 409 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | 409 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
| 410 | unsigned int input; | 410 | unsigned int input; |
| @@ -419,10 +419,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, | |||
| 419 | if (input > 1) | 419 | if (input > 1) |
| 420 | input = 1; | 420 | input = 1; |
| 421 | 421 | ||
| 422 | if (input == od_tuners->ignore_nice) { /* nothing to do */ | 422 | if (input == od_tuners->ignore_nice_load) { /* nothing to do */ |
| 423 | return count; | 423 | return count; |
| 424 | } | 424 | } |
| 425 | od_tuners->ignore_nice = input; | 425 | od_tuners->ignore_nice_load = input; |
| 426 | 426 | ||
| 427 | /* we need to re-evaluate prev_cpu_idle */ | 427 | /* we need to re-evaluate prev_cpu_idle */ |
| 428 | for_each_online_cpu(j) { | 428 | for_each_online_cpu(j) { |
| @@ -430,7 +430,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, | |||
| 430 | dbs_info = &per_cpu(od_cpu_dbs_info, j); | 430 | dbs_info = &per_cpu(od_cpu_dbs_info, j); |
| 431 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, | 431 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
| 432 | &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); | 432 | &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); |
| 433 | if (od_tuners->ignore_nice) | 433 | if (od_tuners->ignore_nice_load) |
| 434 | dbs_info->cdbs.prev_cpu_nice = | 434 | dbs_info->cdbs.prev_cpu_nice = |
| 435 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 435 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
| 436 | 436 | ||
| @@ -461,7 +461,7 @@ show_store_one(od, sampling_rate); | |||
| 461 | show_store_one(od, io_is_busy); | 461 | show_store_one(od, io_is_busy); |
| 462 | show_store_one(od, up_threshold); | 462 | show_store_one(od, up_threshold); |
| 463 | show_store_one(od, sampling_down_factor); | 463 | show_store_one(od, sampling_down_factor); |
| 464 | show_store_one(od, ignore_nice); | 464 | show_store_one(od, ignore_nice_load); |
| 465 | show_store_one(od, powersave_bias); | 465 | show_store_one(od, powersave_bias); |
| 466 | declare_show_sampling_rate_min(od); | 466 | declare_show_sampling_rate_min(od); |
| 467 | 467 | ||
| @@ -469,7 +469,7 @@ gov_sys_pol_attr_rw(sampling_rate); | |||
| 469 | gov_sys_pol_attr_rw(io_is_busy); | 469 | gov_sys_pol_attr_rw(io_is_busy); |
| 470 | gov_sys_pol_attr_rw(up_threshold); | 470 | gov_sys_pol_attr_rw(up_threshold); |
| 471 | gov_sys_pol_attr_rw(sampling_down_factor); | 471 | gov_sys_pol_attr_rw(sampling_down_factor); |
| 472 | gov_sys_pol_attr_rw(ignore_nice); | 472 | gov_sys_pol_attr_rw(ignore_nice_load); |
| 473 | gov_sys_pol_attr_rw(powersave_bias); | 473 | gov_sys_pol_attr_rw(powersave_bias); |
| 474 | gov_sys_pol_attr_ro(sampling_rate_min); | 474 | gov_sys_pol_attr_ro(sampling_rate_min); |
| 475 | 475 | ||
| @@ -478,7 +478,7 @@ static struct attribute *dbs_attributes_gov_sys[] = { | |||
| 478 | &sampling_rate_gov_sys.attr, | 478 | &sampling_rate_gov_sys.attr, |
| 479 | &up_threshold_gov_sys.attr, | 479 | &up_threshold_gov_sys.attr, |
| 480 | &sampling_down_factor_gov_sys.attr, | 480 | &sampling_down_factor_gov_sys.attr, |
| 481 | &ignore_nice_gov_sys.attr, | 481 | &ignore_nice_load_gov_sys.attr, |
| 482 | &powersave_bias_gov_sys.attr, | 482 | &powersave_bias_gov_sys.attr, |
| 483 | &io_is_busy_gov_sys.attr, | 483 | &io_is_busy_gov_sys.attr, |
| 484 | NULL | 484 | NULL |
| @@ -494,7 +494,7 @@ static struct attribute *dbs_attributes_gov_pol[] = { | |||
| 494 | &sampling_rate_gov_pol.attr, | 494 | &sampling_rate_gov_pol.attr, |
| 495 | &up_threshold_gov_pol.attr, | 495 | &up_threshold_gov_pol.attr, |
| 496 | &sampling_down_factor_gov_pol.attr, | 496 | &sampling_down_factor_gov_pol.attr, |
| 497 | &ignore_nice_gov_pol.attr, | 497 | &ignore_nice_load_gov_pol.attr, |
| 498 | &powersave_bias_gov_pol.attr, | 498 | &powersave_bias_gov_pol.attr, |
| 499 | &io_is_busy_gov_pol.attr, | 499 | &io_is_busy_gov_pol.attr, |
| 500 | NULL | 500 | NULL |
| @@ -544,7 +544,7 @@ static int od_init(struct dbs_data *dbs_data) | |||
| 544 | } | 544 | } |
| 545 | 545 | ||
| 546 | tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; | 546 | tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; |
| 547 | tuners->ignore_nice = 0; | 547 | tuners->ignore_nice_load = 0; |
| 548 | tuners->powersave_bias = default_powersave_bias; | 548 | tuners->powersave_bias = default_powersave_bias; |
| 549 | tuners->io_is_busy = should_io_be_busy(); | 549 | tuners->io_is_busy = should_io_be_busy(); |
| 550 | 550 | ||
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b012d7600e1a..7cde885011ed 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -103,10 +103,10 @@ struct pstate_adjust_policy { | |||
| 103 | static struct pstate_adjust_policy default_policy = { | 103 | static struct pstate_adjust_policy default_policy = { |
| 104 | .sample_rate_ms = 10, | 104 | .sample_rate_ms = 10, |
| 105 | .deadband = 0, | 105 | .deadband = 0, |
| 106 | .setpoint = 109, | 106 | .setpoint = 97, |
| 107 | .p_gain_pct = 17, | 107 | .p_gain_pct = 20, |
| 108 | .d_gain_pct = 0, | 108 | .d_gain_pct = 0, |
| 109 | .i_gain_pct = 4, | 109 | .i_gain_pct = 0, |
| 110 | }; | 110 | }; |
| 111 | 111 | ||
| 112 | struct perf_limits { | 112 | struct perf_limits { |
| @@ -468,12 +468,12 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | |||
| 468 | static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) | 468 | static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) |
| 469 | { | 469 | { |
| 470 | int32_t busy_scaled; | 470 | int32_t busy_scaled; |
| 471 | int32_t core_busy, turbo_pstate, current_pstate; | 471 | int32_t core_busy, max_pstate, current_pstate; |
| 472 | 472 | ||
| 473 | core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); | 473 | core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); |
| 474 | turbo_pstate = int_tofp(cpu->pstate.turbo_pstate); | 474 | max_pstate = int_tofp(cpu->pstate.max_pstate); |
| 475 | current_pstate = int_tofp(cpu->pstate.current_pstate); | 475 | current_pstate = int_tofp(cpu->pstate.current_pstate); |
| 476 | busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate)); | 476 | busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); |
| 477 | 477 | ||
| 478 | return fp_toint(busy_scaled); | 478 | return fp_toint(busy_scaled); |
| 479 | } | 479 | } |
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index bb838b985077..9536852c504a 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c | |||
| @@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
| 118 | clk_put(cpuclk); | 118 | clk_put(cpuclk); |
| 119 | return -EINVAL; | 119 | return -EINVAL; |
| 120 | } | 120 | } |
| 121 | ret = clk_set_rate(cpuclk, rate); | ||
| 122 | if (ret) { | ||
| 123 | clk_put(cpuclk); | ||
| 124 | return ret; | ||
| 125 | } | ||
| 126 | 121 | ||
| 127 | /* clock table init */ | 122 | /* clock table init */ |
| 128 | for (i = 2; | 123 | for (i = 2; |
| @@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
| 130 | i++) | 125 | i++) |
| 131 | loongson2_clockmod_table[i].frequency = (rate * i) / 8; | 126 | loongson2_clockmod_table[i].frequency = (rate * i) / 8; |
| 132 | 127 | ||
| 128 | ret = clk_set_rate(cpuclk, rate); | ||
| 129 | if (ret) { | ||
| 130 | clk_put(cpuclk); | ||
| 131 | return ret; | ||
| 132 | } | ||
| 133 | |||
| 133 | policy->cur = loongson2_cpufreq_get(policy->cpu); | 134 | policy->cur = loongson2_cpufreq_get(policy->cpu); |
| 134 | 135 | ||
| 135 | cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], | 136 | cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index fe343a06b7da..bc580b67a652 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
| @@ -28,13 +28,6 @@ | |||
| 28 | #define MAX_INTERESTING 50000 | 28 | #define MAX_INTERESTING 50000 |
| 29 | #define STDDEV_THRESH 400 | 29 | #define STDDEV_THRESH 400 |
| 30 | 30 | ||
| 31 | /* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */ | ||
| 32 | #define MAX_DEVIATION 60 | ||
| 33 | |||
| 34 | static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer); | ||
| 35 | static DEFINE_PER_CPU(int, hrtimer_status); | ||
| 36 | /* menu hrtimer mode */ | ||
| 37 | enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL}; | ||
| 38 | 31 | ||
| 39 | /* | 32 | /* |
| 40 | * Concepts and ideas behind the menu governor | 33 | * Concepts and ideas behind the menu governor |
| @@ -116,13 +109,6 @@ enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL}; | |||
| 116 | * | 109 | * |
| 117 | */ | 110 | */ |
| 118 | 111 | ||
| 119 | /* | ||
| 120 | * The C-state residency is so long that is is worthwhile to exit | ||
| 121 | * from the shallow C-state and re-enter into a deeper C-state. | ||
| 122 | */ | ||
| 123 | static unsigned int perfect_cstate_ms __read_mostly = 30; | ||
| 124 | module_param(perfect_cstate_ms, uint, 0000); | ||
| 125 | |||
| 126 | struct menu_device { | 112 | struct menu_device { |
| 127 | int last_state_idx; | 113 | int last_state_idx; |
| 128 | int needs_update; | 114 | int needs_update; |
| @@ -205,52 +191,17 @@ static u64 div_round64(u64 dividend, u32 divisor) | |||
| 205 | return div_u64(dividend + (divisor / 2), divisor); | 191 | return div_u64(dividend + (divisor / 2), divisor); |
| 206 | } | 192 | } |
| 207 | 193 | ||
| 208 | /* Cancel the hrtimer if it is not triggered yet */ | ||
| 209 | void menu_hrtimer_cancel(void) | ||
| 210 | { | ||
| 211 | int cpu = smp_processor_id(); | ||
| 212 | struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu); | ||
| 213 | |||
| 214 | /* The timer is still not time out*/ | ||
| 215 | if (per_cpu(hrtimer_status, cpu)) { | ||
| 216 | hrtimer_cancel(hrtmr); | ||
| 217 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP; | ||
| 218 | } | ||
| 219 | } | ||
| 220 | EXPORT_SYMBOL_GPL(menu_hrtimer_cancel); | ||
| 221 | |||
| 222 | /* Call back for hrtimer is triggered */ | ||
| 223 | static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer) | ||
| 224 | { | ||
| 225 | int cpu = smp_processor_id(); | ||
| 226 | struct menu_device *data = &per_cpu(menu_devices, cpu); | ||
| 227 | |||
| 228 | /* In general case, the expected residency is much larger than | ||
| 229 | * deepest C-state target residency, but prediction logic still | ||
| 230 | * predicts a small predicted residency, so the prediction | ||
| 231 | * history is totally broken if the timer is triggered. | ||
| 232 | * So reset the correction factor. | ||
| 233 | */ | ||
| 234 | if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL) | ||
| 235 | data->correction_factor[data->bucket] = RESOLUTION * DECAY; | ||
| 236 | |||
| 237 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP; | ||
| 238 | |||
| 239 | return HRTIMER_NORESTART; | ||
| 240 | } | ||
| 241 | |||
| 242 | /* | 194 | /* |
| 243 | * Try detecting repeating patterns by keeping track of the last 8 | 195 | * Try detecting repeating patterns by keeping track of the last 8 |
| 244 | * intervals, and checking if the standard deviation of that set | 196 | * intervals, and checking if the standard deviation of that set |
| 245 | * of points is below a threshold. If it is... then use the | 197 | * of points is below a threshold. If it is... then use the |
| 246 | * average of these 8 points as the estimated value. | 198 | * average of these 8 points as the estimated value. |
| 247 | */ | 199 | */ |
| 248 | static u32 get_typical_interval(struct menu_device *data) | 200 | static void get_typical_interval(struct menu_device *data) |
| 249 | { | 201 | { |
| 250 | int i = 0, divisor = 0; | 202 | int i = 0, divisor = 0; |
| 251 | uint64_t max = 0, avg = 0, stddev = 0; | 203 | uint64_t max = 0, avg = 0, stddev = 0; |
| 252 | int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */ | 204 | int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */ |
| 253 | unsigned int ret = 0; | ||
| 254 | 205 | ||
| 255 | again: | 206 | again: |
| 256 | 207 | ||
| @@ -291,16 +242,13 @@ again: | |||
| 291 | if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) | 242 | if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) |
| 292 | || stddev <= 20) { | 243 | || stddev <= 20) { |
| 293 | data->predicted_us = avg; | 244 | data->predicted_us = avg; |
| 294 | ret = 1; | 245 | return; |
| 295 | return ret; | ||
| 296 | 246 | ||
| 297 | } else if ((divisor * 4) > INTERVALS * 3) { | 247 | } else if ((divisor * 4) > INTERVALS * 3) { |
| 298 | /* Exclude the max interval */ | 248 | /* Exclude the max interval */ |
| 299 | thresh = max - 1; | 249 | thresh = max - 1; |
| 300 | goto again; | 250 | goto again; |
| 301 | } | 251 | } |
| 302 | |||
| 303 | return ret; | ||
| 304 | } | 252 | } |
| 305 | 253 | ||
| 306 | /** | 254 | /** |
| @@ -315,9 +263,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
| 315 | int i; | 263 | int i; |
| 316 | int multiplier; | 264 | int multiplier; |
| 317 | struct timespec t; | 265 | struct timespec t; |
| 318 | int repeat = 0, low_predicted = 0; | ||
| 319 | int cpu = smp_processor_id(); | ||
| 320 | struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu); | ||
| 321 | 266 | ||
| 322 | if (data->needs_update) { | 267 | if (data->needs_update) { |
| 323 | menu_update(drv, dev); | 268 | menu_update(drv, dev); |
| @@ -352,7 +297,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
| 352 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], | 297 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], |
| 353 | RESOLUTION * DECAY); | 298 | RESOLUTION * DECAY); |
| 354 | 299 | ||
| 355 | repeat = get_typical_interval(data); | 300 | get_typical_interval(data); |
| 356 | 301 | ||
| 357 | /* | 302 | /* |
| 358 | * We want to default to C1 (hlt), not to busy polling | 303 | * We want to default to C1 (hlt), not to busy polling |
| @@ -373,10 +318,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
| 373 | 318 | ||
| 374 | if (s->disabled || su->disable) | 319 | if (s->disabled || su->disable) |
| 375 | continue; | 320 | continue; |
| 376 | if (s->target_residency > data->predicted_us) { | 321 | if (s->target_residency > data->predicted_us) |
| 377 | low_predicted = 1; | ||
| 378 | continue; | 322 | continue; |
| 379 | } | ||
| 380 | if (s->exit_latency > latency_req) | 323 | if (s->exit_latency > latency_req) |
| 381 | continue; | 324 | continue; |
| 382 | if (s->exit_latency * multiplier > data->predicted_us) | 325 | if (s->exit_latency * multiplier > data->predicted_us) |
| @@ -386,44 +329,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
| 386 | data->exit_us = s->exit_latency; | 329 | data->exit_us = s->exit_latency; |
| 387 | } | 330 | } |
| 388 | 331 | ||
| 389 | /* not deepest C-state chosen for low predicted residency */ | ||
| 390 | if (low_predicted) { | ||
| 391 | unsigned int timer_us = 0; | ||
| 392 | unsigned int perfect_us = 0; | ||
| 393 | |||
| 394 | /* | ||
| 395 | * Set a timer to detect whether this sleep is much | ||
| 396 | * longer than repeat mode predicted. If the timer | ||
| 397 | * triggers, the code will evaluate whether to put | ||
| 398 | * the CPU into a deeper C-state. | ||
| 399 | * The timer is cancelled on CPU wakeup. | ||
| 400 | */ | ||
| 401 | timer_us = 2 * (data->predicted_us + MAX_DEVIATION); | ||
| 402 | |||
| 403 | perfect_us = perfect_cstate_ms * 1000; | ||
| 404 | |||
| 405 | if (repeat && (4 * timer_us < data->expected_us)) { | ||
| 406 | RCU_NONIDLE(hrtimer_start(hrtmr, | ||
| 407 | ns_to_ktime(1000 * timer_us), | ||
| 408 | HRTIMER_MODE_REL_PINNED)); | ||
| 409 | /* In repeat case, menu hrtimer is started */ | ||
| 410 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT; | ||
| 411 | } else if (perfect_us < data->expected_us) { | ||
| 412 | /* | ||
| 413 | * The next timer is long. This could be because | ||
| 414 | * we did not make a useful prediction. | ||
| 415 | * In that case, it makes sense to re-enter | ||
| 416 | * into a deeper C-state after some time. | ||
| 417 | */ | ||
| 418 | RCU_NONIDLE(hrtimer_start(hrtmr, | ||
| 419 | ns_to_ktime(1000 * timer_us), | ||
| 420 | HRTIMER_MODE_REL_PINNED)); | ||
| 421 | /* In general case, menu hrtimer is started */ | ||
| 422 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL; | ||
| 423 | } | ||
| 424 | |||
| 425 | } | ||
| 426 | |||
| 427 | return data->last_state_idx; | 332 | return data->last_state_idx; |
| 428 | } | 333 | } |
| 429 | 334 | ||
| @@ -514,9 +419,6 @@ static int menu_enable_device(struct cpuidle_driver *drv, | |||
| 514 | struct cpuidle_device *dev) | 419 | struct cpuidle_device *dev) |
| 515 | { | 420 | { |
| 516 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | 421 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); |
| 517 | struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu); | ||
| 518 | hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
| 519 | t->function = menu_hrtimer_notify; | ||
| 520 | 422 | ||
| 521 | memset(data, 0, sizeof(struct menu_device)); | 423 | memset(data, 0, sizeof(struct menu_device)); |
| 522 | 424 | ||
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 5996521a1caf..84573b4d6f92 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
| @@ -429,7 +429,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | |||
| 429 | dma_addr_t src_dma, dst_dma; | 429 | dma_addr_t src_dma, dst_dma; |
| 430 | int ret = 0; | 430 | int ret = 0; |
| 431 | 431 | ||
| 432 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | 432 | desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); |
| 433 | if (!desc) { | 433 | if (!desc) { |
| 434 | dev_err(jrdev, "unable to allocate key input memory\n"); | 434 | dev_err(jrdev, "unable to allocate key input memory\n"); |
| 435 | return -ENOMEM; | 435 | return -ENOMEM; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6825957c97fb..643d7c7a0d8e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -194,7 +194,7 @@ config SIRF_DMA | |||
| 194 | Enable support for the CSR SiRFprimaII DMA engine. | 194 | Enable support for the CSR SiRFprimaII DMA engine. |
| 195 | 195 | ||
| 196 | config TI_EDMA | 196 | config TI_EDMA |
| 197 | tristate "TI EDMA support" | 197 | bool "TI EDMA support" |
| 198 | depends on ARCH_DAVINCI || ARCH_OMAP | 198 | depends on ARCH_DAVINCI || ARCH_OMAP |
| 199 | select DMA_ENGINE | 199 | select DMA_ENGINE |
| 200 | select DMA_VIRTUAL_CHANNELS | 200 | select DMA_VIRTUAL_CHANNELS |
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index ce3dc3e9688c..0bbdea5059f3 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
| @@ -867,6 +867,7 @@ static int pch_dma_probe(struct pci_dev *pdev, | |||
| 867 | 867 | ||
| 868 | if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | 868 | if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { |
| 869 | dev_err(&pdev->dev, "Cannot find proper base address\n"); | 869 | dev_err(&pdev->dev, "Cannot find proper base address\n"); |
| 870 | err = -ENODEV; | ||
| 870 | goto err_disable_pdev; | 871 | goto err_disable_pdev; |
| 871 | } | 872 | } |
| 872 | 873 | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 593827b3fdd4..fa645d825009 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
| @@ -2505,6 +2505,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 2505 | /* Assign cookies to all nodes */ | 2505 | /* Assign cookies to all nodes */ |
| 2506 | while (!list_empty(&last->node)) { | 2506 | while (!list_empty(&last->node)) { |
| 2507 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); | 2507 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); |
| 2508 | if (pch->cyclic) { | ||
| 2509 | desc->txd.callback = last->txd.callback; | ||
| 2510 | desc->txd.callback_param = last->txd.callback_param; | ||
| 2511 | } | ||
| 2508 | 2512 | ||
| 2509 | dma_cookie_assign(&desc->txd); | 2513 | dma_cookie_assign(&desc->txd); |
| 2510 | 2514 | ||
| @@ -2688,45 +2692,82 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | |||
| 2688 | size_t period_len, enum dma_transfer_direction direction, | 2692 | size_t period_len, enum dma_transfer_direction direction, |
| 2689 | unsigned long flags, void *context) | 2693 | unsigned long flags, void *context) |
| 2690 | { | 2694 | { |
| 2691 | struct dma_pl330_desc *desc; | 2695 | struct dma_pl330_desc *desc = NULL, *first = NULL; |
| 2692 | struct dma_pl330_chan *pch = to_pchan(chan); | 2696 | struct dma_pl330_chan *pch = to_pchan(chan); |
| 2697 | struct dma_pl330_dmac *pdmac = pch->dmac; | ||
| 2698 | unsigned int i; | ||
| 2693 | dma_addr_t dst; | 2699 | dma_addr_t dst; |
| 2694 | dma_addr_t src; | 2700 | dma_addr_t src; |
| 2695 | 2701 | ||
| 2696 | desc = pl330_get_desc(pch); | 2702 | if (len % period_len != 0) |
| 2697 | if (!desc) { | ||
| 2698 | dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", | ||
| 2699 | __func__, __LINE__); | ||
| 2700 | return NULL; | 2703 | return NULL; |
| 2701 | } | ||
| 2702 | 2704 | ||
| 2703 | switch (direction) { | 2705 | if (!is_slave_direction(direction)) { |
| 2704 | case DMA_MEM_TO_DEV: | ||
| 2705 | desc->rqcfg.src_inc = 1; | ||
| 2706 | desc->rqcfg.dst_inc = 0; | ||
| 2707 | desc->req.rqtype = MEMTODEV; | ||
| 2708 | src = dma_addr; | ||
| 2709 | dst = pch->fifo_addr; | ||
| 2710 | break; | ||
| 2711 | case DMA_DEV_TO_MEM: | ||
| 2712 | desc->rqcfg.src_inc = 0; | ||
| 2713 | desc->rqcfg.dst_inc = 1; | ||
| 2714 | desc->req.rqtype = DEVTOMEM; | ||
| 2715 | src = pch->fifo_addr; | ||
| 2716 | dst = dma_addr; | ||
| 2717 | break; | ||
| 2718 | default: | ||
| 2719 | dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", | 2706 | dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", |
| 2720 | __func__, __LINE__); | 2707 | __func__, __LINE__); |
| 2721 | return NULL; | 2708 | return NULL; |
| 2722 | } | 2709 | } |
| 2723 | 2710 | ||
| 2724 | desc->rqcfg.brst_size = pch->burst_sz; | 2711 | for (i = 0; i < len / period_len; i++) { |
| 2725 | desc->rqcfg.brst_len = 1; | 2712 | desc = pl330_get_desc(pch); |
| 2713 | if (!desc) { | ||
| 2714 | dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", | ||
| 2715 | __func__, __LINE__); | ||
| 2726 | 2716 | ||
| 2727 | pch->cyclic = true; | 2717 | if (!first) |
| 2718 | return NULL; | ||
| 2719 | |||
| 2720 | spin_lock_irqsave(&pdmac->pool_lock, flags); | ||
| 2721 | |||
| 2722 | while (!list_empty(&first->node)) { | ||
| 2723 | desc = list_entry(first->node.next, | ||
| 2724 | struct dma_pl330_desc, node); | ||
| 2725 | list_move_tail(&desc->node, &pdmac->desc_pool); | ||
| 2726 | } | ||
| 2727 | |||
| 2728 | list_move_tail(&first->node, &pdmac->desc_pool); | ||
| 2728 | 2729 | ||
| 2729 | fill_px(&desc->px, dst, src, period_len); | 2730 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); |
| 2731 | |||
| 2732 | return NULL; | ||
| 2733 | } | ||
| 2734 | |||
| 2735 | switch (direction) { | ||
| 2736 | case DMA_MEM_TO_DEV: | ||
| 2737 | desc->rqcfg.src_inc = 1; | ||
| 2738 | desc->rqcfg.dst_inc = 0; | ||
| 2739 | desc->req.rqtype = MEMTODEV; | ||
| 2740 | src = dma_addr; | ||
| 2741 | dst = pch->fifo_addr; | ||
| 2742 | break; | ||
| 2743 | case DMA_DEV_TO_MEM: | ||
| 2744 | desc->rqcfg.src_inc = 0; | ||
| 2745 | desc->rqcfg.dst_inc = 1; | ||
| 2746 | desc->req.rqtype = DEVTOMEM; | ||
| 2747 | src = pch->fifo_addr; | ||
| 2748 | dst = dma_addr; | ||
| 2749 | break; | ||
| 2750 | default: | ||
| 2751 | break; | ||
| 2752 | } | ||
| 2753 | |||
| 2754 | desc->rqcfg.brst_size = pch->burst_sz; | ||
| 2755 | desc->rqcfg.brst_len = 1; | ||
| 2756 | fill_px(&desc->px, dst, src, period_len); | ||
| 2757 | |||
| 2758 | if (!first) | ||
| 2759 | first = desc; | ||
| 2760 | else | ||
| 2761 | list_add_tail(&desc->node, &first->node); | ||
| 2762 | |||
| 2763 | dma_addr += period_len; | ||
| 2764 | } | ||
| 2765 | |||
| 2766 | if (!desc) | ||
| 2767 | return NULL; | ||
| 2768 | |||
| 2769 | pch->cyclic = true; | ||
| 2770 | desc->txd.flags = flags; | ||
| 2730 | 2771 | ||
| 2731 | return &desc->txd; | 2772 | return &desc->txd; |
| 2732 | } | 2773 | } |
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index b67f45f5c271..5039fbc88254 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c | |||
| @@ -400,8 +400,8 @@ static size_t sh_dmae_get_partial(struct shdma_chan *schan, | |||
| 400 | shdma_chan); | 400 | shdma_chan); |
| 401 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | 401 | struct sh_dmae_desc *sh_desc = container_of(sdesc, |
| 402 | struct sh_dmae_desc, shdma_desc); | 402 | struct sh_dmae_desc, shdma_desc); |
| 403 | return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | 403 | return sh_desc->hw.tcr - |
| 404 | sh_chan->xmit_shift; | 404 | (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift); |
| 405 | } | 405 | } |
| 406 | 406 | ||
| 407 | /* Called from error IRQ or NMI */ | 407 | /* Called from error IRQ or NMI */ |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 27e86d938262..89e109022d78 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
| @@ -48,6 +48,8 @@ static LIST_HEAD(mc_devices); | |||
| 48 | */ | 48 | */ |
| 49 | static void const *edac_mc_owner; | 49 | static void const *edac_mc_owner; |
| 50 | 50 | ||
| 51 | static struct bus_type mc_bus[EDAC_MAX_MCS]; | ||
| 52 | |||
| 51 | unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, | 53 | unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, |
| 52 | unsigned len) | 54 | unsigned len) |
| 53 | { | 55 | { |
| @@ -723,6 +725,11 @@ int edac_mc_add_mc(struct mem_ctl_info *mci) | |||
| 723 | int ret = -EINVAL; | 725 | int ret = -EINVAL; |
| 724 | edac_dbg(0, "\n"); | 726 | edac_dbg(0, "\n"); |
| 725 | 727 | ||
| 728 | if (mci->mc_idx >= EDAC_MAX_MCS) { | ||
| 729 | pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx); | ||
| 730 | return -ENODEV; | ||
| 731 | } | ||
| 732 | |||
| 726 | #ifdef CONFIG_EDAC_DEBUG | 733 | #ifdef CONFIG_EDAC_DEBUG |
| 727 | if (edac_debug_level >= 3) | 734 | if (edac_debug_level >= 3) |
| 728 | edac_mc_dump_mci(mci); | 735 | edac_mc_dump_mci(mci); |
| @@ -762,6 +769,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci) | |||
| 762 | /* set load time so that error rate can be tracked */ | 769 | /* set load time so that error rate can be tracked */ |
| 763 | mci->start_time = jiffies; | 770 | mci->start_time = jiffies; |
| 764 | 771 | ||
| 772 | mci->bus = &mc_bus[mci->mc_idx]; | ||
| 773 | |||
| 765 | if (edac_create_sysfs_mci_device(mci)) { | 774 | if (edac_create_sysfs_mci_device(mci)) { |
| 766 | edac_mc_printk(mci, KERN_WARNING, | 775 | edac_mc_printk(mci, KERN_WARNING, |
| 767 | "failed to create sysfs device\n"); | 776 | "failed to create sysfs device\n"); |
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index ef15a7e613bc..e7c32c4f7837 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c | |||
| @@ -370,7 +370,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci, | |||
| 370 | return -ENODEV; | 370 | return -ENODEV; |
| 371 | 371 | ||
| 372 | csrow->dev.type = &csrow_attr_type; | 372 | csrow->dev.type = &csrow_attr_type; |
| 373 | csrow->dev.bus = &mci->bus; | 373 | csrow->dev.bus = mci->bus; |
| 374 | device_initialize(&csrow->dev); | 374 | device_initialize(&csrow->dev); |
| 375 | csrow->dev.parent = &mci->dev; | 375 | csrow->dev.parent = &mci->dev; |
| 376 | csrow->mci = mci; | 376 | csrow->mci = mci; |
| @@ -605,7 +605,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci, | |||
| 605 | dimm->mci = mci; | 605 | dimm->mci = mci; |
| 606 | 606 | ||
| 607 | dimm->dev.type = &dimm_attr_type; | 607 | dimm->dev.type = &dimm_attr_type; |
| 608 | dimm->dev.bus = &mci->bus; | 608 | dimm->dev.bus = mci->bus; |
| 609 | device_initialize(&dimm->dev); | 609 | device_initialize(&dimm->dev); |
| 610 | 610 | ||
| 611 | dimm->dev.parent = &mci->dev; | 611 | dimm->dev.parent = &mci->dev; |
| @@ -975,11 +975,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | |||
| 975 | * The memory controller needs its own bus, in order to avoid | 975 | * The memory controller needs its own bus, in order to avoid |
| 976 | * namespace conflicts at /sys/bus/edac. | 976 | * namespace conflicts at /sys/bus/edac. |
| 977 | */ | 977 | */ |
| 978 | mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); | 978 | mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); |
| 979 | if (!mci->bus.name) | 979 | if (!mci->bus->name) |
| 980 | return -ENOMEM; | 980 | return -ENOMEM; |
| 981 | edac_dbg(0, "creating bus %s\n", mci->bus.name); | 981 | |
| 982 | err = bus_register(&mci->bus); | 982 | edac_dbg(0, "creating bus %s\n", mci->bus->name); |
| 983 | |||
| 984 | err = bus_register(mci->bus); | ||
| 983 | if (err < 0) | 985 | if (err < 0) |
| 984 | return err; | 986 | return err; |
| 985 | 987 | ||
| @@ -988,7 +990,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | |||
| 988 | device_initialize(&mci->dev); | 990 | device_initialize(&mci->dev); |
| 989 | 991 | ||
| 990 | mci->dev.parent = mci_pdev; | 992 | mci->dev.parent = mci_pdev; |
| 991 | mci->dev.bus = &mci->bus; | 993 | mci->dev.bus = mci->bus; |
| 992 | dev_set_name(&mci->dev, "mc%d", mci->mc_idx); | 994 | dev_set_name(&mci->dev, "mc%d", mci->mc_idx); |
| 993 | dev_set_drvdata(&mci->dev, mci); | 995 | dev_set_drvdata(&mci->dev, mci); |
| 994 | pm_runtime_forbid(&mci->dev); | 996 | pm_runtime_forbid(&mci->dev); |
| @@ -997,8 +999,8 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | |||
| 997 | err = device_add(&mci->dev); | 999 | err = device_add(&mci->dev); |
| 998 | if (err < 0) { | 1000 | if (err < 0) { |
| 999 | edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); | 1001 | edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); |
| 1000 | bus_unregister(&mci->bus); | 1002 | bus_unregister(mci->bus); |
| 1001 | kfree(mci->bus.name); | 1003 | kfree(mci->bus->name); |
| 1002 | return err; | 1004 | return err; |
| 1003 | } | 1005 | } |
| 1004 | 1006 | ||
| @@ -1064,8 +1066,8 @@ fail: | |||
| 1064 | } | 1066 | } |
| 1065 | fail2: | 1067 | fail2: |
| 1066 | device_unregister(&mci->dev); | 1068 | device_unregister(&mci->dev); |
| 1067 | bus_unregister(&mci->bus); | 1069 | bus_unregister(mci->bus); |
| 1068 | kfree(mci->bus.name); | 1070 | kfree(mci->bus->name); |
| 1069 | return err; | 1071 | return err; |
| 1070 | } | 1072 | } |
| 1071 | 1073 | ||
| @@ -1098,8 +1100,8 @@ void edac_unregister_sysfs(struct mem_ctl_info *mci) | |||
| 1098 | { | 1100 | { |
| 1099 | edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); | 1101 | edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); |
| 1100 | device_unregister(&mci->dev); | 1102 | device_unregister(&mci->dev); |
| 1101 | bus_unregister(&mci->bus); | 1103 | bus_unregister(mci->bus); |
| 1102 | kfree(mci->bus.name); | 1104 | kfree(mci->bus->name); |
| 1103 | } | 1105 | } |
| 1104 | 1106 | ||
| 1105 | static void mc_attr_release(struct device *dev) | 1107 | static void mc_attr_release(struct device *dev) |
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index 1b635178cc44..157b934e8ce3 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c | |||
| @@ -974,7 +974,7 @@ static int i5100_setup_debugfs(struct mem_ctl_info *mci) | |||
| 974 | if (!i5100_debugfs) | 974 | if (!i5100_debugfs) |
| 975 | return -ENODEV; | 975 | return -ENODEV; |
| 976 | 976 | ||
| 977 | priv->debugfs = debugfs_create_dir(mci->bus.name, i5100_debugfs); | 977 | priv->debugfs = debugfs_create_dir(mci->bus->name, i5100_debugfs); |
| 978 | 978 | ||
| 979 | if (!priv->debugfs) | 979 | if (!priv->debugfs) |
| 980 | return -ENOMEM; | 980 | return -ENOMEM; |
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 7ef316fdc4d9..ac1b43a04285 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
| @@ -54,6 +54,7 @@ | |||
| 54 | #define FW_CDEV_KERNEL_VERSION 5 | 54 | #define FW_CDEV_KERNEL_VERSION 5 |
| 55 | #define FW_CDEV_VERSION_EVENT_REQUEST2 4 | 55 | #define FW_CDEV_VERSION_EVENT_REQUEST2 4 |
| 56 | #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 | 56 | #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 |
| 57 | #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5 | ||
| 57 | 58 | ||
| 58 | struct client { | 59 | struct client { |
| 59 | u32 version; | 60 | u32 version; |
| @@ -1005,6 +1006,8 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) | |||
| 1005 | a->channel, a->speed, a->header_size, cb, client); | 1006 | a->channel, a->speed, a->header_size, cb, client); |
| 1006 | if (IS_ERR(context)) | 1007 | if (IS_ERR(context)) |
| 1007 | return PTR_ERR(context); | 1008 | return PTR_ERR(context); |
| 1009 | if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW) | ||
| 1010 | context->drop_overflow_headers = true; | ||
| 1008 | 1011 | ||
| 1009 | /* We only support one context at this time. */ | 1012 | /* We only support one context at this time. */ |
| 1010 | spin_lock_irq(&client->lock); | 1013 | spin_lock_irq(&client->lock); |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 9e1db6490b9a..afb701ec90ca 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
| @@ -2749,8 +2749,11 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr) | |||
| 2749 | { | 2749 | { |
| 2750 | u32 *ctx_hdr; | 2750 | u32 *ctx_hdr; |
| 2751 | 2751 | ||
| 2752 | if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) | 2752 | if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { |
| 2753 | if (ctx->base.drop_overflow_headers) | ||
| 2754 | return; | ||
| 2753 | flush_iso_completions(ctx); | 2755 | flush_iso_completions(ctx); |
| 2756 | } | ||
| 2754 | 2757 | ||
| 2755 | ctx_hdr = ctx->header + ctx->header_length; | 2758 | ctx_hdr = ctx->header + ctx->header_length; |
| 2756 | ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); | 2759 | ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); |
| @@ -2910,8 +2913,11 @@ static int handle_it_packet(struct context *context, | |||
| 2910 | 2913 | ||
| 2911 | sync_it_packet_for_cpu(context, d); | 2914 | sync_it_packet_for_cpu(context, d); |
| 2912 | 2915 | ||
| 2913 | if (ctx->header_length + 4 > PAGE_SIZE) | 2916 | if (ctx->header_length + 4 > PAGE_SIZE) { |
| 2917 | if (ctx->base.drop_overflow_headers) | ||
| 2918 | return 1; | ||
| 2914 | flush_iso_completions(ctx); | 2919 | flush_iso_completions(ctx); |
| 2920 | } | ||
| 2915 | 2921 | ||
| 2916 | ctx_hdr = ctx->header + ctx->header_length; | 2922 | ctx_hdr = ctx->header + ctx->header_length; |
| 2917 | ctx->last_timestamp = le16_to_cpu(last->res_count); | 2923 | ctx->last_timestamp = le16_to_cpu(last->res_count); |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index eb760a218da4..232fa8fce26a 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
| @@ -419,6 +419,13 @@ static void __init dmi_format_ids(char *buf, size_t len) | |||
| 419 | dmi_get_system_info(DMI_BIOS_DATE)); | 419 | dmi_get_system_info(DMI_BIOS_DATE)); |
| 420 | } | 420 | } |
| 421 | 421 | ||
| 422 | /* | ||
| 423 | * Check for DMI/SMBIOS headers in the system firmware image. Any | ||
| 424 | * SMBIOS header must start 16 bytes before the DMI header, so take a | ||
| 425 | * 32 byte buffer and check for DMI at offset 16 and SMBIOS at offset | ||
| 426 | * 0. If the DMI header is present, set dmi_ver accordingly (SMBIOS | ||
| 427 | * takes precedence) and return 0. Otherwise return 1. | ||
| 428 | */ | ||
| 422 | static int __init dmi_present(const u8 *buf) | 429 | static int __init dmi_present(const u8 *buf) |
| 423 | { | 430 | { |
| 424 | int smbios_ver; | 431 | int smbios_ver; |
| @@ -506,6 +513,13 @@ void __init dmi_scan_machine(void) | |||
| 506 | if (p == NULL) | 513 | if (p == NULL) |
| 507 | goto error; | 514 | goto error; |
| 508 | 515 | ||
| 516 | /* | ||
| 517 | * Iterate over all possible DMI header addresses q. | ||
| 518 | * Maintain the 32 bytes around q in buf. On the | ||
| 519 | * first iteration, substitute zero for the | ||
| 520 | * out-of-range bytes so there is no chance of falsely | ||
| 521 | * detecting an SMBIOS header. | ||
| 522 | */ | ||
| 509 | memset(buf, 0, 16); | 523 | memset(buf, 0, 16); |
| 510 | for (q = p; q < p + 0x10000; q += 16) { | 524 | for (q = p; q < p + 0x10000; q += 16) { |
| 511 | memcpy_fromio(buf + 16, q, 16); | 525 | memcpy_fromio(buf + 16, q, 16); |
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c index e3ceaacde45c..73b73969d361 100644 --- a/drivers/gpio/gpio-msm-v1.c +++ b/drivers/gpio/gpio-msm-v1.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
| 23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
| 24 | #include <linux/err.h> | ||
| 24 | 25 | ||
| 25 | #include <mach/msm_gpiomux.h> | 26 | #include <mach/msm_gpiomux.h> |
| 26 | 27 | ||
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c index f4491a497cc8..c2fa77086eb5 100644 --- a/drivers/gpio/gpio-msm-v2.c +++ b/drivers/gpio/gpio-msm-v2.c | |||
| @@ -378,7 +378,7 @@ static int msm_gpio_probe(struct platform_device *pdev) | |||
| 378 | int ret, ngpio; | 378 | int ret, ngpio; |
| 379 | struct resource *res; | 379 | struct resource *res; |
| 380 | 380 | ||
| 381 | if (!of_property_read_u32(pdev->dev.of_node, "ngpio", &ngpio)) { | 381 | if (of_property_read_u32(pdev->dev.of_node, "ngpio", &ngpio)) { |
| 382 | dev_err(&pdev->dev, "%s: ngpio property missing\n", __func__); | 382 | dev_err(&pdev->dev, "%s: ngpio property missing\n", __func__); |
| 383 | return -EINVAL; | 383 | return -EINVAL; |
| 384 | } | 384 | } |
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 98d670825a1a..6e8887fe6c1b 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c | |||
| @@ -323,6 +323,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align, | |||
| 323 | 323 | ||
| 324 | astbo->gem.driver_private = NULL; | 324 | astbo->gem.driver_private = NULL; |
| 325 | astbo->bo.bdev = &ast->ttm.bdev; | 325 | astbo->bo.bdev = &ast->ttm.bdev; |
| 326 | astbo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
| 326 | 327 | ||
| 327 | ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 328 | ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
| 328 | 329 | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 0047012045c2..69fd8f1ac8df 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c | |||
| @@ -328,6 +328,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align, | |||
| 328 | 328 | ||
| 329 | cirrusbo->gem.driver_private = NULL; | 329 | cirrusbo->gem.driver_private = NULL; |
| 330 | cirrusbo->bo.bdev = &cirrus->ttm.bdev; | 330 | cirrusbo->bo.bdev = &cirrus->ttm.bdev; |
| 331 | cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
| 331 | 332 | ||
| 332 | cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 333 | cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
| 333 | 334 | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 738a4294d820..6a647493ca7f 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -677,6 +677,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 677 | /* don't break so fail path works correct */ | 677 | /* don't break so fail path works correct */ |
| 678 | fail = 1; | 678 | fail = 1; |
| 679 | break; | 679 | break; |
| 680 | |||
| 681 | if (connector->dpms != DRM_MODE_DPMS_ON) { | ||
| 682 | DRM_DEBUG_KMS("connector dpms not on, full mode switch\n"); | ||
| 683 | mode_changed = true; | ||
| 684 | } | ||
| 680 | } | 685 | } |
| 681 | } | 686 | } |
| 682 | 687 | ||
| @@ -754,6 +759,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 754 | ret = -EINVAL; | 759 | ret = -EINVAL; |
| 755 | goto fail; | 760 | goto fail; |
| 756 | } | 761 | } |
| 762 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
| 763 | for (i = 0; i < set->num_connectors; i++) { | ||
| 764 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
| 765 | drm_get_connector_name(set->connectors[i])); | ||
| 766 | set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); | ||
| 767 | } | ||
| 757 | } | 768 | } |
| 758 | drm_helper_disable_unused_functions(dev); | 769 | drm_helper_disable_unused_functions(dev); |
| 759 | } else if (fb_changed) { | 770 | } else if (fb_changed) { |
| @@ -771,22 +782,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 771 | } | 782 | } |
| 772 | } | 783 | } |
| 773 | 784 | ||
| 774 | /* | ||
| 775 | * crtc set_config helpers implicit set the crtc and all connected | ||
| 776 | * encoders to DPMS on for a full mode set. But for just an fb update it | ||
| 777 | * doesn't do that. To not confuse userspace, do an explicit DPMS_ON | ||
| 778 | * unconditionally. This will also ensure driver internal dpms state is | ||
| 779 | * consistent again. | ||
| 780 | */ | ||
| 781 | if (set->crtc->enabled) { | ||
| 782 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
| 783 | for (i = 0; i < set->num_connectors; i++) { | ||
| 784 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
| 785 | drm_get_connector_name(set->connectors[i])); | ||
| 786 | set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); | ||
| 787 | } | ||
| 788 | } | ||
| 789 | |||
| 790 | kfree(save_connectors); | 785 | kfree(save_connectors); |
| 791 | kfree(save_encoders); | 786 | kfree(save_encoders); |
| 792 | kfree(save_crtcs); | 787 | kfree(save_crtcs); |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 8bcce7866d36..f92da0a32f0d 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
| @@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
| 708 | /* Subtract time delta from raw timestamp to get final | 708 | /* Subtract time delta from raw timestamp to get final |
| 709 | * vblank_time timestamp for end of vblank. | 709 | * vblank_time timestamp for end of vblank. |
| 710 | */ | 710 | */ |
| 711 | etime = ktime_sub_ns(etime, delta_ns); | 711 | if (delta_ns < 0) |
| 712 | etime = ktime_add_ns(etime, -delta_ns); | ||
| 713 | else | ||
| 714 | etime = ktime_sub_ns(etime, delta_ns); | ||
| 712 | *vblank_time = ktime_to_timeval(etime); | 715 | *vblank_time = ktime_to_timeval(etime); |
| 713 | 716 | ||
| 714 | DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", | 717 | DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", |
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c index 95c75edef01a..30ef41bcd7b8 100644 --- a/drivers/gpu/drm/exynos/exynos_ddc.c +++ b/drivers/gpu/drm/exynos/exynos_ddc.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/i2c.h> | 17 | #include <linux/i2c.h> |
| 18 | #include <linux/module.h> | ||
| 19 | 18 | ||
| 20 | 19 | ||
| 21 | #include "exynos_drm_drv.h" | 20 | #include "exynos_drm_drv.h" |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 61b094f689a7..6e047bd53e2f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | * | 12 | * |
| 13 | */ | 13 | */ |
| 14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
| 17 | #include <linux/mfd/syscon.h> | 16 | #include <linux/mfd/syscon.h> |
| 18 | #include <linux/regmap.h> | 17 | #include <linux/regmap.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 3e106beca5b6..1c263dac3c1c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | #include <drm/drmP.h> | 14 | #include <drm/drmP.h> |
| 15 | 15 | ||
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/module.h> | ||
| 18 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
| 19 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
| 20 | #include <linux/of_device.h> | 19 | #include <linux/of_device.h> |
| @@ -130,7 +129,6 @@ static const struct of_device_id fimd_driver_dt_match[] = { | |||
| 130 | .data = &exynos5_fimd_driver_data }, | 129 | .data = &exynos5_fimd_driver_data }, |
| 131 | {}, | 130 | {}, |
| 132 | }; | 131 | }; |
| 133 | MODULE_DEVICE_TABLE(of, fimd_driver_dt_match); | ||
| 134 | #endif | 132 | #endif |
| 135 | 133 | ||
| 136 | static inline struct fimd_driver_data *drm_fimd_get_driver_data( | 134 | static inline struct fimd_driver_data *drm_fimd_get_driver_data( |
| @@ -1082,7 +1080,6 @@ static struct platform_device_id fimd_driver_ids[] = { | |||
| 1082 | }, | 1080 | }, |
| 1083 | {}, | 1081 | {}, |
| 1084 | }; | 1082 | }; |
| 1085 | MODULE_DEVICE_TABLE(platform, fimd_driver_ids); | ||
| 1086 | 1083 | ||
| 1087 | static const struct dev_pm_ops fimd_pm_ops = { | 1084 | static const struct dev_pm_ops fimd_pm_ops = { |
| 1088 | SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) | 1085 | SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 42a5a5466075..eddea4941483 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/clk.h> | 11 | #include <linux/clk.h> |
| 13 | #include <linux/err.h> | 12 | #include <linux/err.h> |
| 14 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
| @@ -806,9 +805,20 @@ static void g2d_dma_start(struct g2d_data *g2d, | |||
| 806 | struct g2d_cmdlist_node *node = | 805 | struct g2d_cmdlist_node *node = |
| 807 | list_first_entry(&runqueue_node->run_cmdlist, | 806 | list_first_entry(&runqueue_node->run_cmdlist, |
| 808 | struct g2d_cmdlist_node, list); | 807 | struct g2d_cmdlist_node, list); |
| 808 | int ret; | ||
| 809 | |||
| 810 | ret = pm_runtime_get_sync(g2d->dev); | ||
| 811 | if (ret < 0) { | ||
| 812 | dev_warn(g2d->dev, "failed pm power on.\n"); | ||
| 813 | return; | ||
| 814 | } | ||
| 809 | 815 | ||
| 810 | pm_runtime_get_sync(g2d->dev); | 816 | ret = clk_prepare_enable(g2d->gate_clk); |
| 811 | clk_enable(g2d->gate_clk); | 817 | if (ret < 0) { |
| 818 | dev_warn(g2d->dev, "failed to enable clock.\n"); | ||
| 819 | pm_runtime_put_sync(g2d->dev); | ||
| 820 | return; | ||
| 821 | } | ||
| 812 | 822 | ||
| 813 | writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); | 823 | writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); |
| 814 | writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); | 824 | writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); |
| @@ -861,7 +871,7 @@ static void g2d_runqueue_worker(struct work_struct *work) | |||
| 861 | runqueue_work); | 871 | runqueue_work); |
| 862 | 872 | ||
| 863 | mutex_lock(&g2d->runqueue_mutex); | 873 | mutex_lock(&g2d->runqueue_mutex); |
| 864 | clk_disable(g2d->gate_clk); | 874 | clk_disable_unprepare(g2d->gate_clk); |
| 865 | pm_runtime_put_sync(g2d->dev); | 875 | pm_runtime_put_sync(g2d->dev); |
| 866 | 876 | ||
| 867 | complete(&g2d->runqueue_node->complete); | 877 | complete(&g2d->runqueue_node->complete); |
| @@ -1521,7 +1531,6 @@ static const struct of_device_id exynos_g2d_match[] = { | |||
| 1521 | { .compatible = "samsung,exynos5250-g2d" }, | 1531 | { .compatible = "samsung,exynos5250-g2d" }, |
| 1522 | {}, | 1532 | {}, |
| 1523 | }; | 1533 | }; |
| 1524 | MODULE_DEVICE_TABLE(of, exynos_g2d_match); | ||
| 1525 | #endif | 1534 | #endif |
| 1526 | 1535 | ||
| 1527 | struct platform_driver g2d_driver = { | 1536 | struct platform_driver g2d_driver = { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 472e3b25e7f2..90b8a1a5344c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | * | 12 | * |
| 13 | */ | 13 | */ |
| 14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
| 17 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
| 18 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index aaa550d622f0..8d3bc01d6834 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/wait.h> | 17 | #include <linux/wait.h> |
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
| 20 | #include <linux/pm_runtime.h> | 19 | #include <linux/pm_runtime.h> |
| 21 | 20 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index b1ef8e7ff9c9..d2b6ab4def93 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | * | 12 | * |
| 13 | */ | 13 | */ |
| 14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
| 17 | #include <linux/types.h> | 16 | #include <linux/types.h> |
| 18 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
| @@ -342,10 +341,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, | |||
| 342 | */ | 341 | */ |
| 343 | ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, | 342 | ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, |
| 344 | prop_list->ipp_id); | 343 | prop_list->ipp_id); |
| 345 | if (!ippdrv) { | 344 | if (IS_ERR(ippdrv)) { |
| 346 | DRM_ERROR("not found ipp%d driver.\n", | 345 | DRM_ERROR("not found ipp%d driver.\n", |
| 347 | prop_list->ipp_id); | 346 | prop_list->ipp_id); |
| 348 | return -EINVAL; | 347 | return PTR_ERR(ippdrv); |
| 349 | } | 348 | } |
| 350 | 349 | ||
| 351 | prop_list = ippdrv->prop_list; | 350 | prop_list = ippdrv->prop_list; |
| @@ -970,9 +969,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, | |||
| 970 | /* find command node */ | 969 | /* find command node */ |
| 971 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, | 970 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, |
| 972 | qbuf->prop_id); | 971 | qbuf->prop_id); |
| 973 | if (!c_node) { | 972 | if (IS_ERR(c_node)) { |
| 974 | DRM_ERROR("failed to get command node.\n"); | 973 | DRM_ERROR("failed to get command node.\n"); |
| 975 | return -EFAULT; | 974 | return PTR_ERR(c_node); |
| 976 | } | 975 | } |
| 977 | 976 | ||
| 978 | /* buffer control */ | 977 | /* buffer control */ |
| @@ -1106,9 +1105,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, | |||
| 1106 | 1105 | ||
| 1107 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, | 1106 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, |
| 1108 | cmd_ctrl->prop_id); | 1107 | cmd_ctrl->prop_id); |
| 1109 | if (!c_node) { | 1108 | if (IS_ERR(c_node)) { |
| 1110 | DRM_ERROR("invalid command node list.\n"); | 1109 | DRM_ERROR("invalid command node list.\n"); |
| 1111 | return -EINVAL; | 1110 | return PTR_ERR(c_node); |
| 1112 | } | 1111 | } |
| 1113 | 1112 | ||
| 1114 | if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, | 1113 | if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 427640aa5148..49669aa24c45 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c | |||
| @@ -10,7 +10,6 @@ | |||
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
| 13 | #include <linux/module.h> | ||
| 14 | #include <linux/err.h> | 13 | #include <linux/err.h> |
| 15 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
| 16 | #include <linux/io.h> | 15 | #include <linux/io.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 41cc74d83e4e..c57c56519add 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | #include <drm/drmP.h> | 13 | #include <drm/drmP.h> |
| 14 | 14 | ||
| 15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
| 18 | 17 | ||
| 19 | #include <drm/exynos_drm.h> | 18 | #include <drm/exynos_drm.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 62ef5971ac3c..2f5c6942c968 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/wait.h> | 25 | #include <linux/wait.h> |
| 26 | #include <linux/i2c.h> | 26 | #include <linux/i2c.h> |
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
| 29 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
| 30 | #include <linux/irq.h> | 29 | #include <linux/irq.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c index ef04255076c7..6e320ae9afed 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c +++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/i2c.h> | 17 | #include <linux/i2c.h> |
| 18 | #include <linux/module.h> | ||
| 19 | 18 | ||
| 20 | #include "exynos_drm_drv.h" | 19 | #include "exynos_drm_drv.h" |
| 21 | #include "exynos_hdmi.h" | 20 | #include "exynos_hdmi.h" |
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 42ffb71c63bc..c9a137caea41 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
| 24 | #include <linux/wait.h> | 24 | #include <linux/wait.h> |
| 25 | #include <linux/i2c.h> | 25 | #include <linux/i2c.h> |
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
| 28 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
| 29 | #include <linux/irq.h> | 28 | #include <linux/irq.h> |
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 19e36603b23b..3bc8414533c9 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c | |||
| @@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo, | |||
| 500 | &status)) | 500 | &status)) |
| 501 | goto log_fail; | 501 | goto log_fail; |
| 502 | 502 | ||
| 503 | while (status == SDVO_CMD_STATUS_PENDING && retry--) { | 503 | while ((status == SDVO_CMD_STATUS_PENDING || |
| 504 | status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) { | ||
| 504 | udelay(15); | 505 | udelay(15); |
| 505 | if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, | 506 | if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, |
| 506 | SDVO_I2C_CMD_STATUS, | 507 | SDVO_I2C_CMD_STATUS, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index cf188ab7051a..f4669802a0fb 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -1495,6 +1495,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1495 | dev_priv->dev = dev; | 1495 | dev_priv->dev = dev; |
| 1496 | dev_priv->info = info; | 1496 | dev_priv->info = info; |
| 1497 | 1497 | ||
| 1498 | spin_lock_init(&dev_priv->irq_lock); | ||
| 1499 | spin_lock_init(&dev_priv->gpu_error.lock); | ||
| 1500 | spin_lock_init(&dev_priv->rps.lock); | ||
| 1501 | spin_lock_init(&dev_priv->gt_lock); | ||
| 1502 | spin_lock_init(&dev_priv->backlight.lock); | ||
| 1503 | mutex_init(&dev_priv->dpio_lock); | ||
| 1504 | mutex_init(&dev_priv->rps.hw_lock); | ||
| 1505 | mutex_init(&dev_priv->modeset_restore_lock); | ||
| 1506 | |||
| 1498 | i915_dump_device_info(dev_priv); | 1507 | i915_dump_device_info(dev_priv); |
| 1499 | 1508 | ||
| 1500 | if (i915_get_bridge_dev(dev)) { | 1509 | if (i915_get_bridge_dev(dev)) { |
| @@ -1585,6 +1594,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1585 | intel_detect_pch(dev); | 1594 | intel_detect_pch(dev); |
| 1586 | 1595 | ||
| 1587 | intel_irq_init(dev); | 1596 | intel_irq_init(dev); |
| 1597 | intel_pm_init(dev); | ||
| 1598 | intel_gt_sanitize(dev); | ||
| 1588 | intel_gt_init(dev); | 1599 | intel_gt_init(dev); |
| 1589 | 1600 | ||
| 1590 | /* Try to make sure MCHBAR is enabled before poking at it */ | 1601 | /* Try to make sure MCHBAR is enabled before poking at it */ |
| @@ -1610,15 +1621,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1610 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | 1621 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
| 1611 | pci_enable_msi(dev->pdev); | 1622 | pci_enable_msi(dev->pdev); |
| 1612 | 1623 | ||
| 1613 | spin_lock_init(&dev_priv->irq_lock); | ||
| 1614 | spin_lock_init(&dev_priv->gpu_error.lock); | ||
| 1615 | spin_lock_init(&dev_priv->rps.lock); | ||
| 1616 | spin_lock_init(&dev_priv->backlight.lock); | ||
| 1617 | mutex_init(&dev_priv->dpio_lock); | ||
| 1618 | |||
| 1619 | mutex_init(&dev_priv->rps.hw_lock); | ||
| 1620 | mutex_init(&dev_priv->modeset_restore_lock); | ||
| 1621 | |||
| 1622 | dev_priv->num_plane = 1; | 1624 | dev_priv->num_plane = 1; |
| 1623 | if (IS_VALLEYVIEW(dev)) | 1625 | if (IS_VALLEYVIEW(dev)) |
| 1624 | dev_priv->num_plane = 2; | 1626 | dev_priv->num_plane = 2; |
| @@ -1648,7 +1650,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1648 | if (INTEL_INFO(dev)->num_pipes) { | 1650 | if (INTEL_INFO(dev)->num_pipes) { |
| 1649 | /* Must be done after probing outputs */ | 1651 | /* Must be done after probing outputs */ |
| 1650 | intel_opregion_init(dev); | 1652 | intel_opregion_init(dev); |
| 1651 | acpi_video_register_with_quirks(); | 1653 | acpi_video_register(); |
| 1652 | } | 1654 | } |
| 1653 | 1655 | ||
| 1654 | if (IS_GEN5(dev)) | 1656 | if (IS_GEN5(dev)) |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f4af1ca0fb62..45b3c030f483 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -706,7 +706,7 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
| 706 | { | 706 | { |
| 707 | int error = 0; | 707 | int error = 0; |
| 708 | 708 | ||
| 709 | intel_gt_reset(dev); | 709 | intel_gt_sanitize(dev); |
| 710 | 710 | ||
| 711 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 711 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 712 | mutex_lock(&dev->struct_mutex); | 712 | mutex_lock(&dev->struct_mutex); |
| @@ -732,7 +732,7 @@ int i915_resume(struct drm_device *dev) | |||
| 732 | 732 | ||
| 733 | pci_set_master(dev->pdev); | 733 | pci_set_master(dev->pdev); |
| 734 | 734 | ||
| 735 | intel_gt_reset(dev); | 735 | intel_gt_sanitize(dev); |
| 736 | 736 | ||
| 737 | /* | 737 | /* |
| 738 | * Platforms with opregion should have sane BIOS, older ones (gen3 and | 738 | * Platforms with opregion should have sane BIOS, older ones (gen3 and |
| @@ -1253,21 +1253,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) | |||
| 1253 | 1253 | ||
| 1254 | #define __i915_read(x, y) \ | 1254 | #define __i915_read(x, y) \ |
| 1255 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | 1255 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
| 1256 | unsigned long irqflags; \ | ||
| 1256 | u##x val = 0; \ | 1257 | u##x val = 0; \ |
| 1258 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
| 1257 | if (IS_GEN5(dev_priv->dev)) \ | 1259 | if (IS_GEN5(dev_priv->dev)) \ |
| 1258 | ilk_dummy_write(dev_priv); \ | 1260 | ilk_dummy_write(dev_priv); \ |
| 1259 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 1261 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
| 1260 | unsigned long irqflags; \ | ||
| 1261 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
| 1262 | if (dev_priv->forcewake_count == 0) \ | 1262 | if (dev_priv->forcewake_count == 0) \ |
| 1263 | dev_priv->gt.force_wake_get(dev_priv); \ | 1263 | dev_priv->gt.force_wake_get(dev_priv); \ |
| 1264 | val = read##y(dev_priv->regs + reg); \ | 1264 | val = read##y(dev_priv->regs + reg); \ |
| 1265 | if (dev_priv->forcewake_count == 0) \ | 1265 | if (dev_priv->forcewake_count == 0) \ |
| 1266 | dev_priv->gt.force_wake_put(dev_priv); \ | 1266 | dev_priv->gt.force_wake_put(dev_priv); \ |
| 1267 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
| 1268 | } else { \ | 1267 | } else { \ |
| 1269 | val = read##y(dev_priv->regs + reg); \ | 1268 | val = read##y(dev_priv->regs + reg); \ |
| 1270 | } \ | 1269 | } \ |
| 1270 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
| 1271 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ | 1271 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ |
| 1272 | return val; \ | 1272 | return val; \ |
| 1273 | } | 1273 | } |
| @@ -1280,8 +1280,10 @@ __i915_read(64, q) | |||
| 1280 | 1280 | ||
| 1281 | #define __i915_write(x, y) \ | 1281 | #define __i915_write(x, y) \ |
| 1282 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | 1282 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ |
| 1283 | unsigned long irqflags; \ | ||
| 1283 | u32 __fifo_ret = 0; \ | 1284 | u32 __fifo_ret = 0; \ |
| 1284 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ | 1285 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ |
| 1286 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
| 1285 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 1287 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
| 1286 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | 1288 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
| 1287 | } \ | 1289 | } \ |
| @@ -1293,6 +1295,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | |||
| 1293 | gen6_gt_check_fifodbg(dev_priv); \ | 1295 | gen6_gt_check_fifodbg(dev_priv); \ |
| 1294 | } \ | 1296 | } \ |
| 1295 | hsw_unclaimed_reg_check(dev_priv, reg); \ | 1297 | hsw_unclaimed_reg_check(dev_priv, reg); \ |
| 1298 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
| 1296 | } | 1299 | } |
| 1297 | __i915_write(8, b) | 1300 | __i915_write(8, b) |
| 1298 | __i915_write(16, w) | 1301 | __i915_write(16, w) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a416645bcd23..1929bffc1c77 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -555,6 +555,7 @@ enum intel_sbi_destination { | |||
| 555 | #define QUIRK_PIPEA_FORCE (1<<0) | 555 | #define QUIRK_PIPEA_FORCE (1<<0) |
| 556 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) | 556 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
| 557 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) | 557 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) |
| 558 | #define QUIRK_NO_PCH_PWM_ENABLE (1<<3) | ||
| 558 | 559 | ||
| 559 | struct intel_fbdev; | 560 | struct intel_fbdev; |
| 560 | struct intel_fbc_work; | 561 | struct intel_fbc_work; |
| @@ -1581,9 +1582,10 @@ void i915_hangcheck_elapsed(unsigned long data); | |||
| 1581 | void i915_handle_error(struct drm_device *dev, bool wedged); | 1582 | void i915_handle_error(struct drm_device *dev, bool wedged); |
| 1582 | 1583 | ||
| 1583 | extern void intel_irq_init(struct drm_device *dev); | 1584 | extern void intel_irq_init(struct drm_device *dev); |
| 1585 | extern void intel_pm_init(struct drm_device *dev); | ||
| 1584 | extern void intel_hpd_init(struct drm_device *dev); | 1586 | extern void intel_hpd_init(struct drm_device *dev); |
| 1585 | extern void intel_gt_init(struct drm_device *dev); | 1587 | extern void intel_gt_init(struct drm_device *dev); |
| 1586 | extern void intel_gt_reset(struct drm_device *dev); | 1588 | extern void intel_gt_sanitize(struct drm_device *dev); |
| 1587 | 1589 | ||
| 1588 | void i915_error_state_free(struct kref *error_ref); | 1590 | void i915_error_state_free(struct kref *error_ref); |
| 1589 | 1591 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 97afd2639fb6..d9e2208cfe98 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -2258,7 +2258,17 @@ void i915_gem_restore_fences(struct drm_device *dev) | |||
| 2258 | 2258 | ||
| 2259 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 2259 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
| 2260 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | 2260 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
| 2261 | i915_gem_write_fence(dev, i, reg->obj); | 2261 | |
| 2262 | /* | ||
| 2263 | * Commit delayed tiling changes if we have an object still | ||
| 2264 | * attached to the fence, otherwise just clear the fence. | ||
| 2265 | */ | ||
| 2266 | if (reg->obj) { | ||
| 2267 | i915_gem_object_update_fence(reg->obj, reg, | ||
| 2268 | reg->obj->tiling_mode); | ||
| 2269 | } else { | ||
| 2270 | i915_gem_write_fence(dev, i, NULL); | ||
| 2271 | } | ||
| 2262 | } | 2272 | } |
| 2263 | } | 2273 | } |
| 2264 | 2274 | ||
| @@ -2795,6 +2805,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, | |||
| 2795 | if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) | 2805 | if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) |
| 2796 | mb(); | 2806 | mb(); |
| 2797 | 2807 | ||
| 2808 | WARN(obj && (!obj->stride || !obj->tiling_mode), | ||
| 2809 | "bogus fence setup with stride: 0x%x, tiling mode: %i\n", | ||
| 2810 | obj->stride, obj->tiling_mode); | ||
| 2811 | |||
| 2798 | switch (INTEL_INFO(dev)->gen) { | 2812 | switch (INTEL_INFO(dev)->gen) { |
| 2799 | case 7: | 2813 | case 7: |
| 2800 | case 6: | 2814 | case 6: |
| @@ -2836,6 +2850,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | |||
| 2836 | fence->obj = NULL; | 2850 | fence->obj = NULL; |
| 2837 | list_del_init(&fence->lru_list); | 2851 | list_del_init(&fence->lru_list); |
| 2838 | } | 2852 | } |
| 2853 | obj->fence_dirty = false; | ||
| 2839 | } | 2854 | } |
| 2840 | 2855 | ||
| 2841 | static int | 2856 | static int |
| @@ -2965,7 +2980,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) | |||
| 2965 | return 0; | 2980 | return 0; |
| 2966 | 2981 | ||
| 2967 | i915_gem_object_update_fence(obj, reg, enable); | 2982 | i915_gem_object_update_fence(obj, reg, enable); |
| 2968 | obj->fence_dirty = false; | ||
| 2969 | 2983 | ||
| 2970 | return 0; | 2984 | return 0; |
| 2971 | } | 2985 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index dc53a527126b..9e6578330801 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c | |||
| @@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, | |||
| 85 | struct sg_table *sg, | 85 | struct sg_table *sg, |
| 86 | enum dma_data_direction dir) | 86 | enum dma_data_direction dir) |
| 87 | { | 87 | { |
| 88 | struct drm_i915_gem_object *obj = attachment->dmabuf->priv; | ||
| 89 | |||
| 90 | mutex_lock(&obj->base.dev->struct_mutex); | ||
| 91 | |||
| 88 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); | 92 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); |
| 89 | sg_free_table(sg); | 93 | sg_free_table(sg); |
| 90 | kfree(sg); | 94 | kfree(sg); |
| 95 | |||
| 96 | i915_gem_object_unpin_pages(obj); | ||
| 97 | |||
| 98 | mutex_unlock(&obj->base.dev->struct_mutex); | ||
| 91 | } | 99 | } |
| 92 | 100 | ||
| 93 | static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) | 101 | static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f2326fc60ac9..342f1f336168 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -752,6 +752,8 @@ | |||
| 752 | will not assert AGPBUSY# and will only | 752 | will not assert AGPBUSY# and will only |
| 753 | be delivered when out of C3. */ | 753 | be delivered when out of C3. */ |
| 754 | #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ | 754 | #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ |
| 755 | #define INSTPM_TLB_INVALIDATE (1<<9) | ||
| 756 | #define INSTPM_SYNC_FLUSH (1<<5) | ||
| 755 | #define ACTHD 0x020c8 | 757 | #define ACTHD 0x020c8 |
| 756 | #define FW_BLC 0x020d8 | 758 | #define FW_BLC 0x020d8 |
| 757 | #define FW_BLC2 0x020dc | 759 | #define FW_BLC2 0x020dc |
| @@ -1856,10 +1858,16 @@ | |||
| 1856 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 1858 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
| 1857 | 1859 | ||
| 1858 | #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) | 1860 | #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) |
| 1859 | /* HDMI/DP bits are gen4+ */ | 1861 | /* |
| 1860 | #define PORTB_HOTPLUG_LIVE_STATUS (1 << 29) | 1862 | * HDMI/DP bits are gen4+ |
| 1863 | * | ||
| 1864 | * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. | ||
| 1865 | * Please check the detailed lore in the commit message for for experimental | ||
| 1866 | * evidence. | ||
| 1867 | */ | ||
| 1868 | #define PORTD_HOTPLUG_LIVE_STATUS (1 << 29) | ||
| 1861 | #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) | 1869 | #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) |
| 1862 | #define PORTD_HOTPLUG_LIVE_STATUS (1 << 27) | 1870 | #define PORTB_HOTPLUG_LIVE_STATUS (1 << 27) |
| 1863 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) | 1871 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) |
| 1864 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) | 1872 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) |
| 1865 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) | 1873 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) |
| @@ -4432,7 +4440,7 @@ | |||
| 4432 | #define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) | 4440 | #define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) |
| 4433 | #define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) | 4441 | #define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) |
| 4434 | #define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) | 4442 | #define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) |
| 4435 | #define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22) | 4443 | #define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22) |
| 4436 | 4444 | ||
| 4437 | /* legacy values */ | 4445 | /* legacy values */ |
| 4438 | #define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) | 4446 | #define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 324211ac9c55..b042ee5c4070 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -301,7 +301,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, | |||
| 301 | struct intel_digital_port *intel_dig_port = | 301 | struct intel_digital_port *intel_dig_port = |
| 302 | enc_to_dig_port(encoder); | 302 | enc_to_dig_port(encoder); |
| 303 | 303 | ||
| 304 | intel_dp->DP = intel_dig_port->port_reversal | | 304 | intel_dp->DP = intel_dig_port->saved_port_bits | |
| 305 | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; | 305 | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; |
| 306 | intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); | 306 | intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); |
| 307 | 307 | ||
| @@ -1109,7 +1109,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) | |||
| 1109 | * enabling the port. | 1109 | * enabling the port. |
| 1110 | */ | 1110 | */ |
| 1111 | I915_WRITE(DDI_BUF_CTL(port), | 1111 | I915_WRITE(DDI_BUF_CTL(port), |
| 1112 | intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE); | 1112 | intel_dig_port->saved_port_bits | |
| 1113 | DDI_BUF_CTL_ENABLE); | ||
| 1113 | } else if (type == INTEL_OUTPUT_EDP) { | 1114 | } else if (type == INTEL_OUTPUT_EDP) { |
| 1114 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1115 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
| 1115 | 1116 | ||
| @@ -1347,8 +1348,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
| 1347 | intel_encoder->get_config = intel_ddi_get_config; | 1348 | intel_encoder->get_config = intel_ddi_get_config; |
| 1348 | 1349 | ||
| 1349 | intel_dig_port->port = port; | 1350 | intel_dig_port->port = port; |
| 1350 | intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & | 1351 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & |
| 1351 | DDI_BUF_PORT_REVERSAL; | 1352 | (DDI_BUF_PORT_REVERSAL | |
| 1353 | DDI_A_4_LANES); | ||
| 1352 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); | 1354 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); |
| 1353 | 1355 | ||
| 1354 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; | 1356 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 85f3eb74d2b7..be79f477a38f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -4913,22 +4913,19 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, | |||
| 4913 | uint32_t tmp; | 4913 | uint32_t tmp; |
| 4914 | 4914 | ||
| 4915 | tmp = I915_READ(PFIT_CONTROL); | 4915 | tmp = I915_READ(PFIT_CONTROL); |
| 4916 | if (!(tmp & PFIT_ENABLE)) | ||
| 4917 | return; | ||
| 4916 | 4918 | ||
| 4919 | /* Check whether the pfit is attached to our pipe. */ | ||
| 4917 | if (INTEL_INFO(dev)->gen < 4) { | 4920 | if (INTEL_INFO(dev)->gen < 4) { |
| 4918 | if (crtc->pipe != PIPE_B) | 4921 | if (crtc->pipe != PIPE_B) |
| 4919 | return; | 4922 | return; |
| 4920 | |||
| 4921 | /* gen2/3 store dither state in pfit control, needs to match */ | ||
| 4922 | pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE; | ||
| 4923 | } else { | 4923 | } else { |
| 4924 | if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) | 4924 | if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) |
| 4925 | return; | 4925 | return; |
| 4926 | } | 4926 | } |
| 4927 | 4927 | ||
| 4928 | if (!(tmp & PFIT_ENABLE)) | 4928 | pipe_config->gmch_pfit.control = tmp; |
| 4929 | return; | ||
| 4930 | |||
| 4931 | pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL); | ||
| 4932 | pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); | 4929 | pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); |
| 4933 | if (INTEL_INFO(dev)->gen < 5) | 4930 | if (INTEL_INFO(dev)->gen < 5) |
| 4934 | pipe_config->gmch_pfit.lvds_border_bits = | 4931 | pipe_config->gmch_pfit.lvds_border_bits = |
| @@ -8272,9 +8269,11 @@ check_crtc_state(struct drm_device *dev) | |||
| 8272 | 8269 | ||
| 8273 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 8270 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
| 8274 | base.head) { | 8271 | base.head) { |
| 8272 | enum pipe pipe; | ||
| 8275 | if (encoder->base.crtc != &crtc->base) | 8273 | if (encoder->base.crtc != &crtc->base) |
| 8276 | continue; | 8274 | continue; |
| 8277 | if (encoder->get_config) | 8275 | if (encoder->get_config && |
| 8276 | encoder->get_hw_state(encoder, &pipe)) | ||
| 8278 | encoder->get_config(encoder, &pipe_config); | 8277 | encoder->get_config(encoder, &pipe_config); |
| 8279 | } | 8278 | } |
| 8280 | 8279 | ||
| @@ -8317,6 +8316,8 @@ check_shared_dpll_state(struct drm_device *dev) | |||
| 8317 | pll->active, pll->refcount); | 8316 | pll->active, pll->refcount); |
| 8318 | WARN(pll->active && !pll->on, | 8317 | WARN(pll->active && !pll->on, |
| 8319 | "pll in active use but not on in sw tracking\n"); | 8318 | "pll in active use but not on in sw tracking\n"); |
| 8319 | WARN(pll->on && !pll->active, | ||
| 8320 | "pll in on but not on in use in sw tracking\n"); | ||
| 8320 | WARN(pll->on != active, | 8321 | WARN(pll->on != active, |
| 8321 | "pll on state mismatch (expected %i, found %i)\n", | 8322 | "pll on state mismatch (expected %i, found %i)\n", |
| 8322 | pll->on, active); | 8323 | pll->on, active); |
| @@ -8541,15 +8542,20 @@ static void intel_set_config_restore_state(struct drm_device *dev, | |||
| 8541 | } | 8542 | } |
| 8542 | 8543 | ||
| 8543 | static bool | 8544 | static bool |
| 8544 | is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors, | 8545 | is_crtc_connector_off(struct drm_mode_set *set) |
| 8545 | int num_connectors) | ||
| 8546 | { | 8546 | { |
| 8547 | int i; | 8547 | int i; |
| 8548 | 8548 | ||
| 8549 | for (i = 0; i < num_connectors; i++) | 8549 | if (set->num_connectors == 0) |
| 8550 | if (connectors[i].encoder && | 8550 | return false; |
| 8551 | connectors[i].encoder->crtc == crtc && | 8551 | |
| 8552 | connectors[i].dpms != DRM_MODE_DPMS_ON) | 8552 | if (WARN_ON(set->connectors == NULL)) |
| 8553 | return false; | ||
| 8554 | |||
| 8555 | for (i = 0; i < set->num_connectors; i++) | ||
| 8556 | if (set->connectors[i]->encoder && | ||
| 8557 | set->connectors[i]->encoder->crtc == set->crtc && | ||
| 8558 | set->connectors[i]->dpms != DRM_MODE_DPMS_ON) | ||
| 8553 | return true; | 8559 | return true; |
| 8554 | 8560 | ||
| 8555 | return false; | 8561 | return false; |
| @@ -8562,10 +8568,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, | |||
| 8562 | 8568 | ||
| 8563 | /* We should be able to check here if the fb has the same properties | 8569 | /* We should be able to check here if the fb has the same properties |
| 8564 | * and then just flip_or_move it */ | 8570 | * and then just flip_or_move it */ |
| 8565 | if (set->connectors != NULL && | 8571 | if (is_crtc_connector_off(set)) { |
| 8566 | is_crtc_connector_off(set->crtc, *set->connectors, | 8572 | config->mode_changed = true; |
| 8567 | set->num_connectors)) { | ||
| 8568 | config->mode_changed = true; | ||
| 8569 | } else if (set->crtc->fb != set->fb) { | 8573 | } else if (set->crtc->fb != set->fb) { |
| 8570 | /* If we have no fb then treat it as a full mode set */ | 8574 | /* If we have no fb then treat it as a full mode set */ |
| 8571 | if (set->crtc->fb == NULL) { | 8575 | if (set->crtc->fb == NULL) { |
| @@ -9398,6 +9402,17 @@ static void quirk_invert_brightness(struct drm_device *dev) | |||
| 9398 | DRM_INFO("applying inverted panel brightness quirk\n"); | 9402 | DRM_INFO("applying inverted panel brightness quirk\n"); |
| 9399 | } | 9403 | } |
| 9400 | 9404 | ||
| 9405 | /* | ||
| 9406 | * Some machines (Dell XPS13) suffer broken backlight controls if | ||
| 9407 | * BLM_PCH_PWM_ENABLE is set. | ||
| 9408 | */ | ||
| 9409 | static void quirk_no_pcm_pwm_enable(struct drm_device *dev) | ||
| 9410 | { | ||
| 9411 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 9412 | dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE; | ||
| 9413 | DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n"); | ||
| 9414 | } | ||
| 9415 | |||
| 9401 | struct intel_quirk { | 9416 | struct intel_quirk { |
| 9402 | int device; | 9417 | int device; |
| 9403 | int subsystem_vendor; | 9418 | int subsystem_vendor; |
| @@ -9467,6 +9482,11 @@ static struct intel_quirk intel_quirks[] = { | |||
| 9467 | 9482 | ||
| 9468 | /* Acer Aspire 4736Z */ | 9483 | /* Acer Aspire 4736Z */ |
| 9469 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, | 9484 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, |
| 9485 | |||
| 9486 | /* Dell XPS13 HD Sandy Bridge */ | ||
| 9487 | { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, | ||
| 9488 | /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */ | ||
| 9489 | { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable }, | ||
| 9470 | }; | 9490 | }; |
| 9471 | 9491 | ||
| 9472 | static void intel_init_quirks(struct drm_device *dev) | 9492 | static void intel_init_quirks(struct drm_device *dev) |
| @@ -9817,8 +9837,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
| 9817 | } | 9837 | } |
| 9818 | pll->refcount = pll->active; | 9838 | pll->refcount = pll->active; |
| 9819 | 9839 | ||
| 9820 | DRM_DEBUG_KMS("%s hw state readout: refcount %i\n", | 9840 | DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", |
| 9821 | pll->name, pll->refcount); | 9841 | pll->name, pll->refcount, pll->on); |
| 9822 | } | 9842 | } |
| 9823 | 9843 | ||
| 9824 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9844 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
| @@ -9869,6 +9889,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
| 9869 | struct drm_plane *plane; | 9889 | struct drm_plane *plane; |
| 9870 | struct intel_crtc *crtc; | 9890 | struct intel_crtc *crtc; |
| 9871 | struct intel_encoder *encoder; | 9891 | struct intel_encoder *encoder; |
| 9892 | int i; | ||
| 9872 | 9893 | ||
| 9873 | intel_modeset_readout_hw_state(dev); | 9894 | intel_modeset_readout_hw_state(dev); |
| 9874 | 9895 | ||
| @@ -9884,6 +9905,18 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
| 9884 | intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); | 9905 | intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); |
| 9885 | } | 9906 | } |
| 9886 | 9907 | ||
| 9908 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | ||
| 9909 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; | ||
| 9910 | |||
| 9911 | if (!pll->on || pll->active) | ||
| 9912 | continue; | ||
| 9913 | |||
| 9914 | DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); | ||
| 9915 | |||
| 9916 | pll->disable(dev_priv, pll); | ||
| 9917 | pll->on = false; | ||
| 9918 | } | ||
| 9919 | |||
| 9887 | if (force_restore) { | 9920 | if (force_restore) { |
| 9888 | /* | 9921 | /* |
| 9889 | * We need to use raw interfaces for restoring state to avoid | 9922 | * We need to use raw interfaces for restoring state to avoid |
| @@ -10009,6 +10042,8 @@ struct intel_display_error_state { | |||
| 10009 | 10042 | ||
| 10010 | u32 power_well_driver; | 10043 | u32 power_well_driver; |
| 10011 | 10044 | ||
| 10045 | int num_transcoders; | ||
| 10046 | |||
| 10012 | struct intel_cursor_error_state { | 10047 | struct intel_cursor_error_state { |
| 10013 | u32 control; | 10048 | u32 control; |
| 10014 | u32 position; | 10049 | u32 position; |
| @@ -10017,16 +10052,7 @@ struct intel_display_error_state { | |||
| 10017 | } cursor[I915_MAX_PIPES]; | 10052 | } cursor[I915_MAX_PIPES]; |
| 10018 | 10053 | ||
| 10019 | struct intel_pipe_error_state { | 10054 | struct intel_pipe_error_state { |
| 10020 | enum transcoder cpu_transcoder; | ||
| 10021 | u32 conf; | ||
| 10022 | u32 source; | 10055 | u32 source; |
| 10023 | |||
| 10024 | u32 htotal; | ||
| 10025 | u32 hblank; | ||
| 10026 | u32 hsync; | ||
| 10027 | u32 vtotal; | ||
| 10028 | u32 vblank; | ||
| 10029 | u32 vsync; | ||
| 10030 | } pipe[I915_MAX_PIPES]; | 10056 | } pipe[I915_MAX_PIPES]; |
| 10031 | 10057 | ||
| 10032 | struct intel_plane_error_state { | 10058 | struct intel_plane_error_state { |
| @@ -10038,6 +10064,19 @@ struct intel_display_error_state { | |||
| 10038 | u32 surface; | 10064 | u32 surface; |
| 10039 | u32 tile_offset; | 10065 | u32 tile_offset; |
| 10040 | } plane[I915_MAX_PIPES]; | 10066 | } plane[I915_MAX_PIPES]; |
| 10067 | |||
| 10068 | struct intel_transcoder_error_state { | ||
| 10069 | enum transcoder cpu_transcoder; | ||
| 10070 | |||
| 10071 | u32 conf; | ||
| 10072 | |||
| 10073 | u32 htotal; | ||
| 10074 | u32 hblank; | ||
| 10075 | u32 hsync; | ||
| 10076 | u32 vtotal; | ||
| 10077 | u32 vblank; | ||
| 10078 | u32 vsync; | ||
| 10079 | } transcoder[4]; | ||
| 10041 | }; | 10080 | }; |
| 10042 | 10081 | ||
| 10043 | struct intel_display_error_state * | 10082 | struct intel_display_error_state * |
| @@ -10045,9 +10084,17 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
| 10045 | { | 10084 | { |
| 10046 | drm_i915_private_t *dev_priv = dev->dev_private; | 10085 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 10047 | struct intel_display_error_state *error; | 10086 | struct intel_display_error_state *error; |
| 10048 | enum transcoder cpu_transcoder; | 10087 | int transcoders[] = { |
| 10088 | TRANSCODER_A, | ||
| 10089 | TRANSCODER_B, | ||
| 10090 | TRANSCODER_C, | ||
| 10091 | TRANSCODER_EDP, | ||
| 10092 | }; | ||
| 10049 | int i; | 10093 | int i; |
| 10050 | 10094 | ||
| 10095 | if (INTEL_INFO(dev)->num_pipes == 0) | ||
| 10096 | return NULL; | ||
| 10097 | |||
| 10051 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 10098 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
| 10052 | if (error == NULL) | 10099 | if (error == NULL) |
| 10053 | return NULL; | 10100 | return NULL; |
| @@ -10056,9 +10103,6 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
| 10056 | error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); | 10103 | error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); |
| 10057 | 10104 | ||
| 10058 | for_each_pipe(i) { | 10105 | for_each_pipe(i) { |
| 10059 | cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); | ||
| 10060 | error->pipe[i].cpu_transcoder = cpu_transcoder; | ||
| 10061 | |||
| 10062 | if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { | 10106 | if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { |
| 10063 | error->cursor[i].control = I915_READ(CURCNTR(i)); | 10107 | error->cursor[i].control = I915_READ(CURCNTR(i)); |
| 10064 | error->cursor[i].position = I915_READ(CURPOS(i)); | 10108 | error->cursor[i].position = I915_READ(CURPOS(i)); |
| @@ -10082,14 +10126,25 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
| 10082 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); | 10126 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); |
| 10083 | } | 10127 | } |
| 10084 | 10128 | ||
| 10085 | error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); | ||
| 10086 | error->pipe[i].source = I915_READ(PIPESRC(i)); | 10129 | error->pipe[i].source = I915_READ(PIPESRC(i)); |
| 10087 | error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); | 10130 | } |
| 10088 | error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); | 10131 | |
| 10089 | error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); | 10132 | error->num_transcoders = INTEL_INFO(dev)->num_pipes; |
| 10090 | error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); | 10133 | if (HAS_DDI(dev_priv->dev)) |
| 10091 | error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); | 10134 | error->num_transcoders++; /* Account for eDP. */ |
| 10092 | error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); | 10135 | |
| 10136 | for (i = 0; i < error->num_transcoders; i++) { | ||
| 10137 | enum transcoder cpu_transcoder = transcoders[i]; | ||
| 10138 | |||
| 10139 | error->transcoder[i].cpu_transcoder = cpu_transcoder; | ||
| 10140 | |||
| 10141 | error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); | ||
| 10142 | error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); | ||
| 10143 | error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); | ||
| 10144 | error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
| 10145 | error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); | ||
| 10146 | error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); | ||
| 10147 | error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
| 10093 | } | 10148 | } |
| 10094 | 10149 | ||
| 10095 | /* In the code above we read the registers without checking if the power | 10150 | /* In the code above we read the registers without checking if the power |
| @@ -10111,22 +10166,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, | |||
| 10111 | { | 10166 | { |
| 10112 | int i; | 10167 | int i; |
| 10113 | 10168 | ||
| 10169 | if (!error) | ||
| 10170 | return; | ||
| 10171 | |||
| 10114 | err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); | 10172 | err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); |
| 10115 | if (HAS_POWER_WELL(dev)) | 10173 | if (HAS_POWER_WELL(dev)) |
| 10116 | err_printf(m, "PWR_WELL_CTL2: %08x\n", | 10174 | err_printf(m, "PWR_WELL_CTL2: %08x\n", |
| 10117 | error->power_well_driver); | 10175 | error->power_well_driver); |
| 10118 | for_each_pipe(i) { | 10176 | for_each_pipe(i) { |
| 10119 | err_printf(m, "Pipe [%d]:\n", i); | 10177 | err_printf(m, "Pipe [%d]:\n", i); |
| 10120 | err_printf(m, " CPU transcoder: %c\n", | ||
| 10121 | transcoder_name(error->pipe[i].cpu_transcoder)); | ||
| 10122 | err_printf(m, " CONF: %08x\n", error->pipe[i].conf); | ||
| 10123 | err_printf(m, " SRC: %08x\n", error->pipe[i].source); | 10178 | err_printf(m, " SRC: %08x\n", error->pipe[i].source); |
| 10124 | err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); | ||
| 10125 | err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); | ||
| 10126 | err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); | ||
| 10127 | err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); | ||
| 10128 | err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); | ||
| 10129 | err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); | ||
| 10130 | 10179 | ||
| 10131 | err_printf(m, "Plane [%d]:\n", i); | 10180 | err_printf(m, "Plane [%d]:\n", i); |
| 10132 | err_printf(m, " CNTR: %08x\n", error->plane[i].control); | 10181 | err_printf(m, " CNTR: %08x\n", error->plane[i].control); |
| @@ -10147,5 +10196,17 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, | |||
| 10147 | err_printf(m, " POS: %08x\n", error->cursor[i].position); | 10196 | err_printf(m, " POS: %08x\n", error->cursor[i].position); |
| 10148 | err_printf(m, " BASE: %08x\n", error->cursor[i].base); | 10197 | err_printf(m, " BASE: %08x\n", error->cursor[i].base); |
| 10149 | } | 10198 | } |
| 10199 | |||
| 10200 | for (i = 0; i < error->num_transcoders; i++) { | ||
| 10201 | err_printf(m, " CPU transcoder: %c\n", | ||
| 10202 | transcoder_name(error->transcoder[i].cpu_transcoder)); | ||
| 10203 | err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); | ||
| 10204 | err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); | ||
| 10205 | err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); | ||
| 10206 | err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); | ||
| 10207 | err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); | ||
| 10208 | err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); | ||
| 10209 | err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); | ||
| 10210 | } | ||
| 10150 | } | 10211 | } |
| 10151 | #endif | 10212 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c8c9b6f48230..b7d6e09456ce 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -504,7 +504,7 @@ struct intel_dp { | |||
| 504 | struct intel_digital_port { | 504 | struct intel_digital_port { |
| 505 | struct intel_encoder base; | 505 | struct intel_encoder base; |
| 506 | enum port port; | 506 | enum port port; |
| 507 | u32 port_reversal; | 507 | u32 saved_port_bits; |
| 508 | struct intel_dp dp; | 508 | struct intel_dp dp; |
| 509 | struct intel_hdmi hdmi; | 509 | struct intel_hdmi hdmi; |
| 510 | }; | 510 | }; |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 98df2a0c85bd..2fd3fd5b943e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -785,10 +785,22 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) | |||
| 785 | } | 785 | } |
| 786 | } | 786 | } |
| 787 | 787 | ||
| 788 | static int hdmi_portclock_limit(struct intel_hdmi *hdmi) | ||
| 789 | { | ||
| 790 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | ||
| 791 | |||
| 792 | if (IS_G4X(dev)) | ||
| 793 | return 165000; | ||
| 794 | else if (IS_HASWELL(dev)) | ||
| 795 | return 300000; | ||
| 796 | else | ||
| 797 | return 225000; | ||
| 798 | } | ||
| 799 | |||
| 788 | static int intel_hdmi_mode_valid(struct drm_connector *connector, | 800 | static int intel_hdmi_mode_valid(struct drm_connector *connector, |
| 789 | struct drm_display_mode *mode) | 801 | struct drm_display_mode *mode) |
| 790 | { | 802 | { |
| 791 | if (mode->clock > 165000) | 803 | if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) |
| 792 | return MODE_CLOCK_HIGH; | 804 | return MODE_CLOCK_HIGH; |
| 793 | if (mode->clock < 20000) | 805 | if (mode->clock < 20000) |
| 794 | return MODE_CLOCK_LOW; | 806 | return MODE_CLOCK_LOW; |
| @@ -806,6 +818,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 806 | struct drm_device *dev = encoder->base.dev; | 818 | struct drm_device *dev = encoder->base.dev; |
| 807 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; | 819 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; |
| 808 | int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; | 820 | int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; |
| 821 | int portclock_limit = hdmi_portclock_limit(intel_hdmi); | ||
| 809 | int desired_bpp; | 822 | int desired_bpp; |
| 810 | 823 | ||
| 811 | if (intel_hdmi->color_range_auto) { | 824 | if (intel_hdmi->color_range_auto) { |
| @@ -829,7 +842,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 829 | * outputs. We also need to check that the higher clock still fits | 842 | * outputs. We also need to check that the higher clock still fits |
| 830 | * within limits. | 843 | * within limits. |
| 831 | */ | 844 | */ |
| 832 | if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000 | 845 | if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit |
| 833 | && HAS_PCH_SPLIT(dev)) { | 846 | && HAS_PCH_SPLIT(dev)) { |
| 834 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); | 847 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); |
| 835 | desired_bpp = 12*3; | 848 | desired_bpp = 12*3; |
| @@ -846,7 +859,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 846 | pipe_config->pipe_bpp = desired_bpp; | 859 | pipe_config->pipe_bpp = desired_bpp; |
| 847 | } | 860 | } |
| 848 | 861 | ||
| 849 | if (adjusted_mode->clock > 225000) { | 862 | if (adjusted_mode->clock > portclock_limit) { |
| 850 | DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); | 863 | DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); |
| 851 | return false; | 864 | return false; |
| 852 | } | 865 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 021e8daa022d..61348eae2f04 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -109,6 +109,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, | |||
| 109 | flags |= DRM_MODE_FLAG_PVSYNC; | 109 | flags |= DRM_MODE_FLAG_PVSYNC; |
| 110 | 110 | ||
| 111 | pipe_config->adjusted_mode.flags |= flags; | 111 | pipe_config->adjusted_mode.flags |= flags; |
| 112 | |||
| 113 | /* gen2/3 store dither state in pfit control, needs to match */ | ||
| 114 | if (INTEL_INFO(dev)->gen < 4) { | ||
| 115 | tmp = I915_READ(PFIT_CONTROL); | ||
| 116 | |||
| 117 | pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; | ||
| 118 | } | ||
| 112 | } | 119 | } |
| 113 | 120 | ||
| 114 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 121 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
| @@ -290,14 +297,11 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, | |||
| 290 | 297 | ||
| 291 | intel_pch_panel_fitting(intel_crtc, pipe_config, | 298 | intel_pch_panel_fitting(intel_crtc, pipe_config, |
| 292 | intel_connector->panel.fitting_mode); | 299 | intel_connector->panel.fitting_mode); |
| 293 | return true; | ||
| 294 | } else { | 300 | } else { |
| 295 | intel_gmch_panel_fitting(intel_crtc, pipe_config, | 301 | intel_gmch_panel_fitting(intel_crtc, pipe_config, |
| 296 | intel_connector->panel.fitting_mode); | 302 | intel_connector->panel.fitting_mode); |
| 297 | } | ||
| 298 | 303 | ||
| 299 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 304 | } |
| 300 | pipe_config->timings_set = true; | ||
| 301 | 305 | ||
| 302 | /* | 306 | /* |
| 303 | * XXX: It would be nice to support lower refresh rates on the | 307 | * XXX: It would be nice to support lower refresh rates on the |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 80bea1d3209f..5950888ae1d0 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
| @@ -194,6 +194,9 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, | |||
| 194 | adjusted_mode->vdisplay == mode->vdisplay) | 194 | adjusted_mode->vdisplay == mode->vdisplay) |
| 195 | goto out; | 195 | goto out; |
| 196 | 196 | ||
| 197 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
| 198 | pipe_config->timings_set = true; | ||
| 199 | |||
| 197 | switch (fitting_mode) { | 200 | switch (fitting_mode) { |
| 198 | case DRM_MODE_SCALE_CENTER: | 201 | case DRM_MODE_SCALE_CENTER: |
| 199 | /* | 202 | /* |
| @@ -494,8 +497,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max) | |||
| 494 | goto out; | 497 | goto out; |
| 495 | } | 498 | } |
| 496 | 499 | ||
| 497 | /* scale to hardware */ | 500 | /* scale to hardware, but be careful to not overflow */ |
| 498 | level = level * freq / max; | 501 | if (freq < max) |
| 502 | level = level * freq / max; | ||
| 503 | else | ||
| 504 | level = freq / max * level; | ||
| 499 | 505 | ||
| 500 | dev_priv->backlight.level = level; | 506 | dev_priv->backlight.level = level; |
| 501 | if (dev_priv->backlight.device) | 507 | if (dev_priv->backlight.device) |
| @@ -512,6 +518,17 @@ void intel_panel_disable_backlight(struct drm_device *dev) | |||
| 512 | struct drm_i915_private *dev_priv = dev->dev_private; | 518 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 513 | unsigned long flags; | 519 | unsigned long flags; |
| 514 | 520 | ||
| 521 | /* | ||
| 522 | * Do not disable backlight on the vgaswitcheroo path. When switching | ||
| 523 | * away from i915, the other client may depend on i915 to handle the | ||
| 524 | * backlight. This will leave the backlight on unnecessarily when | ||
| 525 | * another client is not activated. | ||
| 526 | */ | ||
| 527 | if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { | ||
| 528 | DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); | ||
| 529 | return; | ||
| 530 | } | ||
| 531 | |||
| 515 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | 532 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); |
| 516 | 533 | ||
| 517 | dev_priv->backlight.enabled = false; | 534 | dev_priv->backlight.enabled = false; |
| @@ -580,7 +597,8 @@ void intel_panel_enable_backlight(struct drm_device *dev, | |||
| 580 | POSTING_READ(reg); | 597 | POSTING_READ(reg); |
| 581 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); | 598 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); |
| 582 | 599 | ||
| 583 | if (HAS_PCH_SPLIT(dev)) { | 600 | if (HAS_PCH_SPLIT(dev) && |
| 601 | !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { | ||
| 584 | tmp = I915_READ(BLC_PWM_PCH_CTL1); | 602 | tmp = I915_READ(BLC_PWM_PCH_CTL1); |
| 585 | tmp |= BLM_PCH_PWM_ENABLE; | 603 | tmp |= BLM_PCH_PWM_ENABLE; |
| 586 | tmp &= ~BLM_PCH_OVERRIDE_ENABLE; | 604 | tmp &= ~BLM_PCH_OVERRIDE_ENABLE; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d10e6735771f..b0e4a0bd1313 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -5063,8 +5063,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
| 5063 | } | 5063 | } |
| 5064 | } else { | 5064 | } else { |
| 5065 | if (enable_requested) { | 5065 | if (enable_requested) { |
| 5066 | unsigned long irqflags; | ||
| 5067 | enum pipe p; | ||
| 5068 | |||
| 5066 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); | 5069 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); |
| 5070 | POSTING_READ(HSW_PWR_WELL_DRIVER); | ||
| 5067 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); | 5071 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); |
| 5072 | |||
| 5073 | /* | ||
| 5074 | * After this, the registers on the pipes that are part | ||
| 5075 | * of the power well will become zero, so we have to | ||
| 5076 | * adjust our counters according to that. | ||
| 5077 | * | ||
| 5078 | * FIXME: Should we do this in general in | ||
| 5079 | * drm_vblank_post_modeset? | ||
| 5080 | */ | ||
| 5081 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
| 5082 | for_each_pipe(p) | ||
| 5083 | if (p != PIPE_A) | ||
| 5084 | dev->last_vblank[p] = 0; | ||
| 5085 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
| 5068 | } | 5086 | } |
| 5069 | } | 5087 | } |
| 5070 | } | 5088 | } |
| @@ -5476,7 +5494,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv) | |||
| 5476 | gen6_gt_check_fifodbg(dev_priv); | 5494 | gen6_gt_check_fifodbg(dev_priv); |
| 5477 | } | 5495 | } |
| 5478 | 5496 | ||
| 5479 | void intel_gt_reset(struct drm_device *dev) | 5497 | void intel_gt_sanitize(struct drm_device *dev) |
| 5480 | { | 5498 | { |
| 5481 | struct drm_i915_private *dev_priv = dev->dev_private; | 5499 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5482 | 5500 | ||
| @@ -5487,16 +5505,16 @@ void intel_gt_reset(struct drm_device *dev) | |||
| 5487 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 5505 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
| 5488 | __gen6_gt_force_wake_mt_reset(dev_priv); | 5506 | __gen6_gt_force_wake_mt_reset(dev_priv); |
| 5489 | } | 5507 | } |
| 5508 | |||
| 5509 | /* BIOS often leaves RC6 enabled, but disable it for hw init */ | ||
| 5510 | if (INTEL_INFO(dev)->gen >= 6) | ||
| 5511 | intel_disable_gt_powersave(dev); | ||
| 5490 | } | 5512 | } |
| 5491 | 5513 | ||
| 5492 | void intel_gt_init(struct drm_device *dev) | 5514 | void intel_gt_init(struct drm_device *dev) |
| 5493 | { | 5515 | { |
| 5494 | struct drm_i915_private *dev_priv = dev->dev_private; | 5516 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5495 | 5517 | ||
| 5496 | spin_lock_init(&dev_priv->gt_lock); | ||
| 5497 | |||
| 5498 | intel_gt_reset(dev); | ||
| 5499 | |||
| 5500 | if (IS_VALLEYVIEW(dev)) { | 5518 | if (IS_VALLEYVIEW(dev)) { |
| 5501 | dev_priv->gt.force_wake_get = vlv_force_wake_get; | 5519 | dev_priv->gt.force_wake_get = vlv_force_wake_get; |
| 5502 | dev_priv->gt.force_wake_put = vlv_force_wake_put; | 5520 | dev_priv->gt.force_wake_put = vlv_force_wake_put; |
| @@ -5536,6 +5554,12 @@ void intel_gt_init(struct drm_device *dev) | |||
| 5536 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; | 5554 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; |
| 5537 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; | 5555 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; |
| 5538 | } | 5556 | } |
| 5557 | } | ||
| 5558 | |||
| 5559 | void intel_pm_init(struct drm_device *dev) | ||
| 5560 | { | ||
| 5561 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5562 | |||
| 5539 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | 5563 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, |
| 5540 | intel_gen6_powersave_work); | 5564 | intel_gen6_powersave_work); |
| 5541 | } | 5565 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 664118d8c1d6..079ef0129e74 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -968,6 +968,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | |||
| 968 | 968 | ||
| 969 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); | 969 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
| 970 | POSTING_READ(mmio); | 970 | POSTING_READ(mmio); |
| 971 | |||
| 972 | /* Flush the TLB for this page */ | ||
| 973 | if (INTEL_INFO(dev)->gen >= 6) { | ||
| 974 | u32 reg = RING_INSTPM(ring->mmio_base); | ||
| 975 | I915_WRITE(reg, | ||
| 976 | _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | | ||
| 977 | INSTPM_SYNC_FLUSH)); | ||
| 978 | if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, | ||
| 979 | 1000)) | ||
| 980 | DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", | ||
| 981 | ring->name); | ||
| 982 | } | ||
| 971 | } | 983 | } |
| 972 | 984 | ||
| 973 | static int | 985 | static int |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 251784aa2225..503a414cbdad 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
| @@ -29,6 +29,7 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) | |||
| 29 | struct mga_crtc *mga_crtc = to_mga_crtc(crtc); | 29 | struct mga_crtc *mga_crtc = to_mga_crtc(crtc); |
| 30 | struct drm_device *dev = crtc->dev; | 30 | struct drm_device *dev = crtc->dev; |
| 31 | struct mga_device *mdev = dev->dev_private; | 31 | struct mga_device *mdev = dev->dev_private; |
| 32 | struct drm_framebuffer *fb = crtc->fb; | ||
| 32 | int i; | 33 | int i; |
| 33 | 34 | ||
| 34 | if (!crtc->enabled) | 35 | if (!crtc->enabled) |
| @@ -36,6 +37,28 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) | |||
| 36 | 37 | ||
| 37 | WREG8(DAC_INDEX + MGA1064_INDEX, 0); | 38 | WREG8(DAC_INDEX + MGA1064_INDEX, 0); |
| 38 | 39 | ||
| 40 | if (fb && fb->bits_per_pixel == 16) { | ||
| 41 | int inc = (fb->depth == 15) ? 8 : 4; | ||
| 42 | u8 r, b; | ||
| 43 | for (i = 0; i < MGAG200_LUT_SIZE; i += inc) { | ||
| 44 | if (fb->depth == 16) { | ||
| 45 | if (i > (MGAG200_LUT_SIZE >> 1)) { | ||
| 46 | r = b = 0; | ||
| 47 | } else { | ||
| 48 | r = mga_crtc->lut_r[i << 1]; | ||
| 49 | b = mga_crtc->lut_b[i << 1]; | ||
| 50 | } | ||
| 51 | } else { | ||
| 52 | r = mga_crtc->lut_r[i]; | ||
| 53 | b = mga_crtc->lut_b[i]; | ||
| 54 | } | ||
| 55 | /* VGA registers */ | ||
| 56 | WREG8(DAC_INDEX + MGA1064_COL_PAL, r); | ||
| 57 | WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]); | ||
| 58 | WREG8(DAC_INDEX + MGA1064_COL_PAL, b); | ||
| 59 | } | ||
| 60 | return; | ||
| 61 | } | ||
| 39 | for (i = 0; i < MGAG200_LUT_SIZE; i++) { | 62 | for (i = 0; i < MGAG200_LUT_SIZE; i++) { |
| 40 | /* VGA registers */ | 63 | /* VGA registers */ |
| 41 | WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]); | 64 | WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]); |
| @@ -877,7 +900,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
| 877 | 900 | ||
| 878 | pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8); | 901 | pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8); |
| 879 | if (crtc->fb->bits_per_pixel == 24) | 902 | if (crtc->fb->bits_per_pixel == 24) |
| 880 | pitch = pitch >> (4 - bppshift); | 903 | pitch = (pitch * 3) >> (4 - bppshift); |
| 881 | else | 904 | else |
| 882 | pitch = pitch >> (4 - bppshift); | 905 | pitch = pitch >> (4 - bppshift); |
| 883 | 906 | ||
| @@ -1251,6 +1274,24 @@ static void mga_crtc_destroy(struct drm_crtc *crtc) | |||
| 1251 | kfree(mga_crtc); | 1274 | kfree(mga_crtc); |
| 1252 | } | 1275 | } |
| 1253 | 1276 | ||
| 1277 | static void mga_crtc_disable(struct drm_crtc *crtc) | ||
| 1278 | { | ||
| 1279 | int ret; | ||
| 1280 | DRM_DEBUG_KMS("\n"); | ||
| 1281 | mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
| 1282 | if (crtc->fb) { | ||
| 1283 | struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->fb); | ||
| 1284 | struct drm_gem_object *obj = mga_fb->obj; | ||
| 1285 | struct mgag200_bo *bo = gem_to_mga_bo(obj); | ||
| 1286 | ret = mgag200_bo_reserve(bo, false); | ||
| 1287 | if (ret) | ||
| 1288 | return; | ||
| 1289 | mgag200_bo_push_sysram(bo); | ||
| 1290 | mgag200_bo_unreserve(bo); | ||
| 1291 | } | ||
| 1292 | crtc->fb = NULL; | ||
| 1293 | } | ||
| 1294 | |||
| 1254 | /* These provide the minimum set of functions required to handle a CRTC */ | 1295 | /* These provide the minimum set of functions required to handle a CRTC */ |
| 1255 | static const struct drm_crtc_funcs mga_crtc_funcs = { | 1296 | static const struct drm_crtc_funcs mga_crtc_funcs = { |
| 1256 | .cursor_set = mga_crtc_cursor_set, | 1297 | .cursor_set = mga_crtc_cursor_set, |
| @@ -1261,6 +1302,7 @@ static const struct drm_crtc_funcs mga_crtc_funcs = { | |||
| 1261 | }; | 1302 | }; |
| 1262 | 1303 | ||
| 1263 | static const struct drm_crtc_helper_funcs mga_helper_funcs = { | 1304 | static const struct drm_crtc_helper_funcs mga_helper_funcs = { |
| 1305 | .disable = mga_crtc_disable, | ||
| 1264 | .dpms = mga_crtc_dpms, | 1306 | .dpms = mga_crtc_dpms, |
| 1265 | .mode_fixup = mga_crtc_mode_fixup, | 1307 | .mode_fixup = mga_crtc_mode_fixup, |
| 1266 | .mode_set = mga_crtc_mode_set, | 1308 | .mode_set = mga_crtc_mode_set, |
| @@ -1581,6 +1623,8 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev) | |||
| 1581 | 1623 | ||
| 1582 | drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); | 1624 | drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); |
| 1583 | 1625 | ||
| 1626 | drm_sysfs_connector_add(connector); | ||
| 1627 | |||
| 1584 | mga_connector->i2c = mgag200_i2c_create(dev); | 1628 | mga_connector->i2c = mgag200_i2c_create(dev); |
| 1585 | if (!mga_connector->i2c) | 1629 | if (!mga_connector->i2c) |
| 1586 | DRM_ERROR("failed to add ddc bus\n"); | 1630 | DRM_ERROR("failed to add ddc bus\n"); |
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 3acb2b044c7b..d70e4a92773b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c | |||
| @@ -323,6 +323,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align, | |||
| 323 | 323 | ||
| 324 | mgabo->gem.driver_private = NULL; | 324 | mgabo->gem.driver_private = NULL; |
| 325 | mgabo->bo.bdev = &mdev->ttm.bdev; | 325 | mgabo->bo.bdev = &mdev->ttm.bdev; |
| 326 | mgabo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
| 326 | 327 | ||
| 327 | mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 328 | mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
| 328 | 329 | ||
| @@ -353,6 +354,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) | |||
| 353 | bo->pin_count++; | 354 | bo->pin_count++; |
| 354 | if (gpu_addr) | 355 | if (gpu_addr) |
| 355 | *gpu_addr = mgag200_bo_gpu_offset(bo); | 356 | *gpu_addr = mgag200_bo_gpu_offset(bo); |
| 357 | return 0; | ||
| 356 | } | 358 | } |
| 357 | 359 | ||
| 358 | mgag200_ttm_placement(bo, pl_flag); | 360 | mgag200_ttm_placement(bo, pl_flag); |
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c index d8291724dbd4..7a4e0891c5f8 100644 --- a/drivers/gpu/drm/nouveau/core/core/mm.c +++ b/drivers/gpu/drm/nouveau/core/core/mm.c | |||
| @@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, | |||
| 98 | u32 splitoff; | 98 | u32 splitoff; |
| 99 | u32 s, e; | 99 | u32 s, e; |
| 100 | 100 | ||
| 101 | BUG_ON(!type); | ||
| 102 | |||
| 101 | list_for_each_entry(this, &mm->free, fl_entry) { | 103 | list_for_each_entry(this, &mm->free, fl_entry) { |
| 102 | e = this->offset + this->length; | 104 | e = this->offset + this->length; |
| 103 | s = this->offset; | 105 | s = this->offset; |
| @@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, | |||
| 162 | struct nouveau_mm_node *prev, *this, *next; | 164 | struct nouveau_mm_node *prev, *this, *next; |
| 163 | u32 mask = align - 1; | 165 | u32 mask = align - 1; |
| 164 | 166 | ||
| 167 | BUG_ON(!type); | ||
| 168 | |||
| 165 | list_for_each_entry_reverse(this, &mm->free, fl_entry) { | 169 | list_for_each_entry_reverse(this, &mm->free, fl_entry) { |
| 166 | u32 e = this->offset + this->length; | 170 | u32 e = this->offset + this->length; |
| 167 | u32 s = this->offset; | 171 | u32 s = this->offset; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c index 262c9f5f5f60..ce860de43e61 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c | |||
| @@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 90 | return ret; | 90 | return ret; |
| 91 | 91 | ||
| 92 | nv_subdev(priv)->unit = 0x00008000; | 92 | nv_subdev(priv)->unit = 0x00008000; |
| 93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
| 93 | nv_engine(priv)->cclass = &nvc0_bsp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_bsp_cclass; |
| 94 | nv_engine(priv)->sclass = nvc0_bsp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_bsp_sclass; |
| 95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c index c46882c83982..ba6aeca0285e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c | |||
| @@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 90 | return ret; | 90 | return ret; |
| 91 | 91 | ||
| 92 | nv_subdev(priv)->unit = 0x00008000; | 92 | nv_subdev(priv)->unit = 0x00008000; |
| 93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
| 93 | nv_engine(priv)->cclass = &nve0_bsp_cclass; | 94 | nv_engine(priv)->cclass = &nve0_bsp_cclass; |
| 94 | nv_engine(priv)->sclass = nve0_bsp_sclass; | 95 | nv_engine(priv)->sclass = nve0_bsp_sclass; |
| 95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c index 373dbcc523b2..a19e7d79b847 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c | |||
| @@ -36,6 +36,8 @@ nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) | |||
| 36 | if (data && data[0]) { | 36 | if (data && data[0]) { |
| 37 | for (i = 0; i < size; i++) | 37 | for (i = 0; i < size; i++) |
| 38 | nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); | 38 | nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); |
| 39 | for (; i < 0x60; i++) | ||
| 40 | nv_wr32(priv, 0x61c440 + soff, (i << 8)); | ||
| 39 | nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); | 41 | nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); |
| 40 | } else | 42 | } else |
| 41 | if (data) { | 43 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c index dc57e24fc1df..717639386ced 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c | |||
| @@ -41,6 +41,8 @@ nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) | |||
| 41 | if (data && data[0]) { | 41 | if (data && data[0]) { |
| 42 | for (i = 0; i < size; i++) | 42 | for (i = 0; i < size; i++) |
| 43 | nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); | 43 | nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); |
| 44 | for (; i < 0x60; i++) | ||
| 45 | nv_wr32(priv, 0x10ec00 + soff, (i << 8)); | ||
| 44 | nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); | 46 | nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); |
| 45 | } else | 47 | } else |
| 46 | if (data) { | 48 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c index ab1e918469a8..526b75242899 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c | |||
| @@ -47,14 +47,8 @@ int | |||
| 47 | nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) | 47 | nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) |
| 48 | { | 48 | { |
| 49 | struct nv50_disp_priv *priv = (void *)object->engine; | 49 | struct nv50_disp_priv *priv = (void *)object->engine; |
| 50 | struct nouveau_bios *bios = nouveau_bios(priv); | ||
| 51 | const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12; | ||
| 52 | const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; | 50 | const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; |
| 53 | const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2; | ||
| 54 | const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); | 51 | const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); |
| 55 | const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or); | ||
| 56 | struct dcb_output outp; | ||
| 57 | u8 ver, hdr; | ||
| 58 | u32 data; | 52 | u32 data; |
| 59 | int ret = -EINVAL; | 53 | int ret = -EINVAL; |
| 60 | 54 | ||
| @@ -62,8 +56,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) | |||
| 62 | return -EINVAL; | 56 | return -EINVAL; |
| 63 | data = *(u32 *)args; | 57 | data = *(u32 *)args; |
| 64 | 58 | ||
| 65 | if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp)) | ||
| 66 | return -ENODEV; | ||
| 67 | 59 | ||
| 68 | switch (mthd & ~0x3f) { | 60 | switch (mthd & ~0x3f) { |
| 69 | case NV50_DISP_SOR_PWR: | 61 | case NV50_DISP_SOR_PWR: |
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c index 3c7a31f7590e..e03fc8e4dc1d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c | |||
| @@ -23,6 +23,25 @@ | |||
| 23 | #include <engine/falcon.h> | 23 | #include <engine/falcon.h> |
| 24 | #include <subdev/timer.h> | 24 | #include <subdev/timer.h> |
| 25 | 25 | ||
| 26 | void | ||
| 27 | nouveau_falcon_intr(struct nouveau_subdev *subdev) | ||
| 28 | { | ||
| 29 | struct nouveau_falcon *falcon = (void *)subdev; | ||
| 30 | u32 dispatch = nv_ro32(falcon, 0x01c); | ||
| 31 | u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16); | ||
| 32 | |||
| 33 | if (intr & 0x00000010) { | ||
| 34 | nv_debug(falcon, "ucode halted\n"); | ||
| 35 | nv_wo32(falcon, 0x004, 0x00000010); | ||
| 36 | intr &= ~0x00000010; | ||
| 37 | } | ||
| 38 | |||
| 39 | if (intr) { | ||
| 40 | nv_error(falcon, "unhandled intr 0x%08x\n", intr); | ||
| 41 | nv_wo32(falcon, 0x004, intr); | ||
| 42 | } | ||
| 43 | } | ||
| 44 | |||
| 26 | u32 | 45 | u32 |
| 27 | _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) | 46 | _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) |
| 28 | { | 47 | { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c index 49ecbb859b25..c19004301309 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c | |||
| @@ -265,8 +265,8 @@ nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 265 | int | 265 | int |
| 266 | nv31_mpeg_init(struct nouveau_object *object) | 266 | nv31_mpeg_init(struct nouveau_object *object) |
| 267 | { | 267 | { |
| 268 | struct nouveau_engine *engine = nv_engine(object->engine); | 268 | struct nouveau_engine *engine = nv_engine(object); |
| 269 | struct nv31_mpeg_priv *priv = (void *)engine; | 269 | struct nv31_mpeg_priv *priv = (void *)object; |
| 270 | struct nouveau_fb *pfb = nouveau_fb(object); | 270 | struct nouveau_fb *pfb = nouveau_fb(object); |
| 271 | int ret, i; | 271 | int ret, i; |
| 272 | 272 | ||
| @@ -284,7 +284,10 @@ nv31_mpeg_init(struct nouveau_object *object) | |||
| 284 | /* PMPEG init */ | 284 | /* PMPEG init */ |
| 285 | nv_wr32(priv, 0x00b32c, 0x00000000); | 285 | nv_wr32(priv, 0x00b32c, 0x00000000); |
| 286 | nv_wr32(priv, 0x00b314, 0x00000100); | 286 | nv_wr32(priv, 0x00b314, 0x00000100); |
| 287 | nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031); | 287 | if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv)) |
| 288 | nv_wr32(priv, 0x00b220, 0x00000044); | ||
| 289 | else | ||
| 290 | nv_wr32(priv, 0x00b220, 0x00000031); | ||
| 288 | nv_wr32(priv, 0x00b300, 0x02001ec1); | 291 | nv_wr32(priv, 0x00b300, 0x02001ec1); |
| 289 | nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); | 292 | nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); |
| 290 | 293 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c index f7c581ad1991..dd6196072e9c 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c | |||
| @@ -61,6 +61,7 @@ nv40_mpeg_context_ctor(struct nouveau_object *parent, | |||
| 61 | if (ret) | 61 | if (ret) |
| 62 | return ret; | 62 | return ret; |
| 63 | 63 | ||
| 64 | nv_wo32(&chan->base.base, 0x78, 0x02001ec1); | ||
| 64 | return 0; | 65 | return 0; |
| 65 | } | 66 | } |
| 66 | 67 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c index 98072c1ff360..73719aaa62d6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c | |||
| @@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 90 | return ret; | 90 | return ret; |
| 91 | 91 | ||
| 92 | nv_subdev(priv)->unit = 0x00000002; | 92 | nv_subdev(priv)->unit = 0x00000002; |
| 93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
| 93 | nv_engine(priv)->cclass = &nvc0_ppp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_ppp_cclass; |
| 94 | nv_engine(priv)->sclass = nvc0_ppp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_ppp_sclass; |
| 95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c index 1879229b60eb..ac1f62aace72 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c | |||
| @@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 90 | return ret; | 90 | return ret; |
| 91 | 91 | ||
| 92 | nv_subdev(priv)->unit = 0x00020000; | 92 | nv_subdev(priv)->unit = 0x00020000; |
| 93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
| 93 | nv_engine(priv)->cclass = &nvc0_vp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_vp_cclass; |
| 94 | nv_engine(priv)->sclass = nvc0_vp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_vp_sclass; |
| 95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c index d28ecbf7bc49..d4c3108479c9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c | |||
| @@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 90 | return ret; | 90 | return ret; |
| 91 | 91 | ||
| 92 | nv_subdev(priv)->unit = 0x00020000; | 92 | nv_subdev(priv)->unit = 0x00020000; |
| 93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
| 93 | nv_engine(priv)->cclass = &nve0_vp_cclass; | 94 | nv_engine(priv)->cclass = &nve0_vp_cclass; |
| 94 | nv_engine(priv)->sclass = nve0_vp_sclass; | 95 | nv_engine(priv)->sclass = nve0_vp_sclass; |
| 95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/core/engine/xtensa.c index 0639bc59d0a5..5f6ede7c4892 100644 --- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c +++ b/drivers/gpu/drm/nouveau/core/engine/xtensa.c | |||
| @@ -118,7 +118,13 @@ _nouveau_xtensa_init(struct nouveau_object *object) | |||
| 118 | return ret; | 118 | return ret; |
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | ret = nouveau_gpuobj_new(object, NULL, fw->size, 0x1000, 0, | 121 | if (fw->size > 0x40000) { |
| 122 | nv_warn(xtensa, "firmware %s too large\n", name); | ||
| 123 | release_firmware(fw); | ||
| 124 | return -EINVAL; | ||
| 125 | } | ||
| 126 | |||
| 127 | ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0, | ||
| 122 | &xtensa->gpu_fw); | 128 | &xtensa->gpu_fw); |
| 123 | if (ret) { | 129 | if (ret) { |
| 124 | release_firmware(fw); | 130 | release_firmware(fw); |
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h index 1edec386ab36..181aa7da524d 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h | |||
| @@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *, | |||
| 72 | struct nouveau_oclass *, u32, bool, const char *, | 72 | struct nouveau_oclass *, u32, bool, const char *, |
| 73 | const char *, int, void **); | 73 | const char *, int, void **); |
| 74 | 74 | ||
| 75 | void nouveau_falcon_intr(struct nouveau_subdev *subdev); | ||
| 76 | |||
| 75 | #define _nouveau_falcon_dtor _nouveau_engine_dtor | 77 | #define _nouveau_falcon_dtor _nouveau_engine_dtor |
| 76 | int _nouveau_falcon_init(struct nouveau_object *); | 78 | int _nouveau_falcon_init(struct nouveau_object *); |
| 77 | int _nouveau_falcon_fini(struct nouveau_object *, bool); | 79 | int _nouveau_falcon_fini(struct nouveau_object *, bool); |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index d5502267c30f..9d2cd2006250 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h | |||
| @@ -20,8 +20,8 @@ nouveau_mc(void *obj) | |||
| 20 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; | 20 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; |
| 21 | } | 21 | } |
| 22 | 22 | ||
| 23 | #define nouveau_mc_create(p,e,o,d) \ | 23 | #define nouveau_mc_create(p,e,o,m,d) \ |
| 24 | nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) | 24 | nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d) |
| 25 | #define nouveau_mc_destroy(p) ({ \ | 25 | #define nouveau_mc_destroy(p) ({ \ |
| 26 | struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ | 26 | struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ |
| 27 | }) | 27 | }) |
| @@ -33,7 +33,8 @@ nouveau_mc(void *obj) | |||
| 33 | }) | 33 | }) |
| 34 | 34 | ||
| 35 | int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, | 35 | int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, |
| 36 | struct nouveau_oclass *, int, void **); | 36 | struct nouveau_oclass *, const struct nouveau_mc_intr *, |
| 37 | int, void **); | ||
| 37 | void _nouveau_mc_dtor(struct nouveau_object *); | 38 | void _nouveau_mc_dtor(struct nouveau_object *); |
| 38 | int _nouveau_mc_init(struct nouveau_object *); | 39 | int _nouveau_mc_init(struct nouveau_object *); |
| 39 | int _nouveau_mc_fini(struct nouveau_object *, bool); | 40 | int _nouveau_mc_fini(struct nouveau_object *, bool); |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h index f2e87b105666..fcf57fa309bf 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h | |||
| @@ -55,7 +55,7 @@ struct nouveau_vma { | |||
| 55 | struct nouveau_vm { | 55 | struct nouveau_vm { |
| 56 | struct nouveau_vmmgr *vmm; | 56 | struct nouveau_vmmgr *vmm; |
| 57 | struct nouveau_mm mm; | 57 | struct nouveau_mm mm; |
| 58 | int refcount; | 58 | struct kref refcount; |
| 59 | 59 | ||
| 60 | struct list_head pgd_list; | 60 | struct list_head pgd_list; |
| 61 | atomic_t engref[NVDEV_SUBDEV_NR]; | 61 | atomic_t engref[NVDEV_SUBDEV_NR]; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h index 6c974dd83e8b..db9d6ddde52c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h | |||
| @@ -81,7 +81,7 @@ void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); | |||
| 81 | void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, | 81 | void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, |
| 82 | u32 pitch, u32 flags, struct nouveau_fb_tile *); | 82 | u32 pitch, u32 flags, struct nouveau_fb_tile *); |
| 83 | 83 | ||
| 84 | void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **); | 84 | void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *); |
| 85 | extern int nv50_fb_memtype[0x80]; | 85 | extern int nv50_fb_memtype[0x80]; |
| 86 | 86 | ||
| 87 | #endif | 87 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c index 19e3a9a63a02..ab7ef0ac9e34 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c | |||
| @@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 40 | return ret; | 40 | return ret; |
| 41 | 41 | ||
| 42 | switch (pfb914 & 0x00000003) { | 42 | switch (pfb914 & 0x00000003) { |
| 43 | case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break; | 43 | case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break; |
| 44 | case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break; | 44 | case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break; |
| 45 | case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break; | 45 | case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break; |
| 46 | case 0x00000003: break; | 46 | case 0x00000003: break; |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; | 49 | ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; |
| 50 | pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; | 50 | ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; |
| 51 | pfb->ram->tags = nv_rd32(pfb, 0x100320); | 51 | ram->tags = nv_rd32(pfb, 0x100320); |
| 52 | return 0; | 52 | return 0; |
| 53 | } | 53 | } |
| 54 | 54 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c index 7192aa6e5577..63a6aab86028 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c | |||
| @@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 38 | if (ret) | 38 | if (ret) |
| 39 | return ret; | 39 | return ret; |
| 40 | 40 | ||
| 41 | pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; | 41 | ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; |
| 42 | pfb->ram->type = NV_MEM_TYPE_STOLEN; | 42 | ram->type = NV_MEM_TYPE_STOLEN; |
| 43 | return 0; | 43 | return 0; |
| 44 | } | 44 | } |
| 45 | 45 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c index af5aa7ee8ad9..903baff77fdd 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c | |||
| @@ -27,17 +27,10 @@ | |||
| 27 | #include "priv.h" | 27 | #include "priv.h" |
| 28 | 28 | ||
| 29 | void | 29 | void |
| 30 | nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | 30 | __nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem) |
| 31 | { | 31 | { |
| 32 | struct nouveau_mm_node *this; | 32 | struct nouveau_mm_node *this; |
| 33 | struct nouveau_mem *mem; | ||
| 34 | 33 | ||
| 35 | mem = *pmem; | ||
| 36 | *pmem = NULL; | ||
| 37 | if (unlikely(mem == NULL)) | ||
| 38 | return; | ||
| 39 | |||
| 40 | mutex_lock(&pfb->base.mutex); | ||
| 41 | while (!list_empty(&mem->regions)) { | 34 | while (!list_empty(&mem->regions)) { |
| 42 | this = list_first_entry(&mem->regions, typeof(*this), rl_entry); | 35 | this = list_first_entry(&mem->regions, typeof(*this), rl_entry); |
| 43 | 36 | ||
| @@ -46,6 +39,19 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | |||
| 46 | } | 39 | } |
| 47 | 40 | ||
| 48 | nouveau_mm_free(&pfb->tags, &mem->tag); | 41 | nouveau_mm_free(&pfb->tags, &mem->tag); |
| 42 | } | ||
| 43 | |||
| 44 | void | ||
| 45 | nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | ||
| 46 | { | ||
| 47 | struct nouveau_mem *mem = *pmem; | ||
| 48 | |||
| 49 | *pmem = NULL; | ||
| 50 | if (unlikely(mem == NULL)) | ||
| 51 | return; | ||
| 52 | |||
| 53 | mutex_lock(&pfb->base.mutex); | ||
| 54 | __nv50_ram_put(pfb, mem); | ||
| 49 | mutex_unlock(&pfb->base.mutex); | 55 | mutex_unlock(&pfb->base.mutex); |
| 50 | 56 | ||
| 51 | kfree(mem); | 57 | kfree(mem); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c index 9c3634acbb9d..cf97c4de4a6b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c | |||
| @@ -33,11 +33,19 @@ void | |||
| 33 | nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | 33 | nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) |
| 34 | { | 34 | { |
| 35 | struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); | 35 | struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); |
| 36 | struct nouveau_mem *mem = *pmem; | ||
| 36 | 37 | ||
| 37 | if ((*pmem)->tag) | 38 | *pmem = NULL; |
| 38 | ltcg->tags_free(ltcg, &(*pmem)->tag); | 39 | if (unlikely(mem == NULL)) |
| 40 | return; | ||
| 39 | 41 | ||
| 40 | nv50_ram_put(pfb, pmem); | 42 | mutex_lock(&pfb->base.mutex); |
| 43 | if (mem->tag) | ||
| 44 | ltcg->tags_free(ltcg, &mem->tag); | ||
| 45 | __nv50_ram_put(pfb, mem); | ||
| 46 | mutex_unlock(&pfb->base.mutex); | ||
| 47 | |||
| 48 | kfree(mem); | ||
| 41 | } | 49 | } |
| 42 | 50 | ||
| 43 | int | 51 | int |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c index bf489dcf46e2..c4c1d415e7fe 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c | |||
| @@ -103,7 +103,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev) | |||
| 103 | int i; | 103 | int i; |
| 104 | 104 | ||
| 105 | intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050); | 105 | intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050); |
| 106 | if (nv_device(priv)->chipset >= 0x90) | 106 | if (nv_device(priv)->chipset > 0x92) |
| 107 | intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070); | 107 | intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070); |
| 108 | 108 | ||
| 109 | hi = (intr0 & 0x0000ffff) | (intr1 << 16); | 109 | hi = (intr0 & 0x0000ffff) | (intr1 << 16); |
| @@ -115,7 +115,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev) | |||
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | nv_wr32(priv, 0xe054, intr0); | 117 | nv_wr32(priv, 0xe054, intr0); |
| 118 | if (nv_device(priv)->chipset >= 0x90) | 118 | if (nv_device(priv)->chipset > 0x92) |
| 119 | nv_wr32(priv, 0xe074, intr1); | 119 | nv_wr32(priv, 0xe074, intr1); |
| 120 | } | 120 | } |
| 121 | 121 | ||
| @@ -146,7 +146,7 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 146 | int ret; | 146 | int ret; |
| 147 | 147 | ||
| 148 | ret = nouveau_gpio_create(parent, engine, oclass, | 148 | ret = nouveau_gpio_create(parent, engine, oclass, |
| 149 | nv_device(parent)->chipset >= 0x90 ? 32 : 16, | 149 | nv_device(parent)->chipset > 0x92 ? 32 : 16, |
| 150 | &priv); | 150 | &priv); |
| 151 | *pobject = nv_object(priv); | 151 | *pobject = nv_object(priv); |
| 152 | if (ret) | 152 | if (ret) |
| @@ -182,7 +182,7 @@ nv50_gpio_init(struct nouveau_object *object) | |||
| 182 | /* disable, and ack any pending gpio interrupts */ | 182 | /* disable, and ack any pending gpio interrupts */ |
| 183 | nv_wr32(priv, 0xe050, 0x00000000); | 183 | nv_wr32(priv, 0xe050, 0x00000000); |
| 184 | nv_wr32(priv, 0xe054, 0xffffffff); | 184 | nv_wr32(priv, 0xe054, 0xffffffff); |
| 185 | if (nv_device(priv)->chipset >= 0x90) { | 185 | if (nv_device(priv)->chipset > 0x92) { |
| 186 | nv_wr32(priv, 0xe070, 0x00000000); | 186 | nv_wr32(priv, 0xe070, 0x00000000); |
| 187 | nv_wr32(priv, 0xe074, 0xffffffff); | 187 | nv_wr32(priv, 0xe074, 0xffffffff); |
| 188 | } | 188 | } |
| @@ -195,7 +195,7 @@ nv50_gpio_fini(struct nouveau_object *object, bool suspend) | |||
| 195 | { | 195 | { |
| 196 | struct nv50_gpio_priv *priv = (void *)object; | 196 | struct nv50_gpio_priv *priv = (void *)object; |
| 197 | nv_wr32(priv, 0xe050, 0x00000000); | 197 | nv_wr32(priv, 0xe050, 0x00000000); |
| 198 | if (nv_device(priv)->chipset >= 0x90) | 198 | if (nv_device(priv)->chipset > 0x92) |
| 199 | nv_wr32(priv, 0xe070, 0x00000000); | 199 | nv_wr32(priv, 0xe070, 0x00000000); |
| 200 | return nouveau_gpio_fini(&priv->base, suspend); | 200 | return nouveau_gpio_fini(&priv->base, suspend); |
| 201 | } | 201 | } |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c index bcca883018f4..cce65cc56514 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c | |||
| @@ -30,8 +30,9 @@ struct nvc0_ltcg_priv { | |||
| 30 | struct nouveau_ltcg base; | 30 | struct nouveau_ltcg base; |
| 31 | u32 part_nr; | 31 | u32 part_nr; |
| 32 | u32 subp_nr; | 32 | u32 subp_nr; |
| 33 | struct nouveau_mm tags; | ||
| 34 | u32 num_tags; | 33 | u32 num_tags; |
| 34 | u32 tag_base; | ||
| 35 | struct nouveau_mm tags; | ||
| 35 | struct nouveau_mm_node *tag_ram; | 36 | struct nouveau_mm_node *tag_ram; |
| 36 | }; | 37 | }; |
| 37 | 38 | ||
| @@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) | |||
| 117 | u32 tag_size, tag_margin, tag_align; | 118 | u32 tag_size, tag_margin, tag_align; |
| 118 | int ret; | 119 | int ret; |
| 119 | 120 | ||
| 120 | nv_wr32(priv, 0x17e8d8, priv->part_nr); | ||
| 121 | if (nv_device(pfb)->card_type >= NV_E0) | ||
| 122 | nv_wr32(priv, 0x17e000, priv->part_nr); | ||
| 123 | |||
| 124 | /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ | 121 | /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ |
| 125 | priv->num_tags = (pfb->ram->size >> 17) / 4; | 122 | priv->num_tags = (pfb->ram->size >> 17) / 4; |
| 126 | if (priv->num_tags > (1 << 17)) | 123 | if (priv->num_tags > (1 << 17)) |
| @@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) | |||
| 142 | tag_size += tag_align; | 139 | tag_size += tag_align; |
| 143 | tag_size = (tag_size + 0xfff) >> 12; /* round up */ | 140 | tag_size = (tag_size + 0xfff) >> 12; /* round up */ |
| 144 | 141 | ||
| 145 | ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, | 142 | ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1, |
| 146 | &priv->tag_ram); | 143 | &priv->tag_ram); |
| 147 | if (ret) { | 144 | if (ret) { |
| 148 | priv->num_tags = 0; | 145 | priv->num_tags = 0; |
| @@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) | |||
| 152 | tag_base += tag_align - 1; | 149 | tag_base += tag_align - 1; |
| 153 | ret = do_div(tag_base, tag_align); | 150 | ret = do_div(tag_base, tag_align); |
| 154 | 151 | ||
| 155 | nv_wr32(priv, 0x17e8d4, tag_base); | 152 | priv->tag_base = tag_base; |
| 156 | } | 153 | } |
| 157 | ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); | 154 | ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); |
| 158 | 155 | ||
| @@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 182 | } | 179 | } |
| 183 | priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; | 180 | priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; |
| 184 | 181 | ||
| 185 | nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ | ||
| 186 | |||
| 187 | ret = nvc0_ltcg_init_tag_ram(pfb, priv); | 182 | ret = nvc0_ltcg_init_tag_ram(pfb, priv); |
| 188 | if (ret) | 183 | if (ret) |
| 189 | return ret; | 184 | return ret; |
| @@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object) | |||
| 209 | nouveau_ltcg_destroy(ltcg); | 204 | nouveau_ltcg_destroy(ltcg); |
| 210 | } | 205 | } |
| 211 | 206 | ||
| 207 | static int | ||
| 208 | nvc0_ltcg_init(struct nouveau_object *object) | ||
| 209 | { | ||
| 210 | struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; | ||
| 211 | struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; | ||
| 212 | int ret; | ||
| 213 | |||
| 214 | ret = nouveau_ltcg_init(ltcg); | ||
| 215 | if (ret) | ||
| 216 | return ret; | ||
| 217 | |||
| 218 | nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ | ||
| 219 | nv_wr32(priv, 0x17e8d8, priv->part_nr); | ||
| 220 | if (nv_device(ltcg)->card_type >= NV_E0) | ||
| 221 | nv_wr32(priv, 0x17e000, priv->part_nr); | ||
| 222 | nv_wr32(priv, 0x17e8d4, priv->tag_base); | ||
| 223 | return 0; | ||
| 224 | } | ||
| 225 | |||
| 212 | struct nouveau_oclass | 226 | struct nouveau_oclass |
| 213 | nvc0_ltcg_oclass = { | 227 | nvc0_ltcg_oclass = { |
| 214 | .handle = NV_SUBDEV(LTCG, 0xc0), | 228 | .handle = NV_SUBDEV(LTCG, 0xc0), |
| 215 | .ofuncs = &(struct nouveau_ofuncs) { | 229 | .ofuncs = &(struct nouveau_ofuncs) { |
| 216 | .ctor = nvc0_ltcg_ctor, | 230 | .ctor = nvc0_ltcg_ctor, |
| 217 | .dtor = nvc0_ltcg_dtor, | 231 | .dtor = nvc0_ltcg_dtor, |
| 218 | .init = _nouveau_ltcg_init, | 232 | .init = nvc0_ltcg_init, |
| 219 | .fini = _nouveau_ltcg_fini, | 233 | .fini = _nouveau_ltcg_fini, |
| 220 | }, | 234 | }, |
| 221 | }; | 235 | }; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 1c0330b8c9a4..ec9cd6f10f91 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c | |||
| @@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object) | |||
| 80 | 80 | ||
| 81 | int | 81 | int |
| 82 | nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, | 82 | nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, |
| 83 | struct nouveau_oclass *oclass, int length, void **pobject) | 83 | struct nouveau_oclass *oclass, |
| 84 | const struct nouveau_mc_intr *intr_map, | ||
| 85 | int length, void **pobject) | ||
| 84 | { | 86 | { |
| 85 | struct nouveau_device *device = nv_device(parent); | 87 | struct nouveau_device *device = nv_device(parent); |
| 86 | struct nouveau_mc *pmc; | 88 | struct nouveau_mc *pmc; |
| @@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 92 | if (ret) | 94 | if (ret) |
| 93 | return ret; | 95 | return ret; |
| 94 | 96 | ||
| 97 | pmc->intr_map = intr_map; | ||
| 98 | |||
| 95 | ret = request_irq(device->pdev->irq, nouveau_mc_intr, | 99 | ret = request_irq(device->pdev->irq, nouveau_mc_intr, |
| 96 | IRQF_SHARED, "nouveau", pmc); | 100 | IRQF_SHARED, "nouveau", pmc); |
| 97 | if (ret < 0) | 101 | if (ret < 0) |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c index 8c769715227b..64aa4edb0d9d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c | |||
| @@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 50 | struct nv04_mc_priv *priv; | 50 | struct nv04_mc_priv *priv; |
| 51 | int ret; | 51 | int ret; |
| 52 | 52 | ||
| 53 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 53 | ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); |
| 54 | *pobject = nv_object(priv); | 54 | *pobject = nv_object(priv); |
| 55 | if (ret) | 55 | if (ret) |
| 56 | return ret; | 56 | return ret; |
| 57 | 57 | ||
| 58 | priv->base.intr_map = nv04_mc_intr; | ||
| 59 | return 0; | 58 | return 0; |
| 60 | } | 59 | } |
| 61 | 60 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c index 51919371810f..d9891782bf28 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c | |||
| @@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 36 | struct nv44_mc_priv *priv; | 36 | struct nv44_mc_priv *priv; |
| 37 | int ret; | 37 | int ret; |
| 38 | 38 | ||
| 39 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 39 | ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); |
| 40 | *pobject = nv_object(priv); | 40 | *pobject = nv_object(priv); |
| 41 | if (ret) | 41 | if (ret) |
| 42 | return ret; | 42 | return ret; |
| 43 | 43 | ||
| 44 | priv->base.intr_map = nv04_mc_intr; | ||
| 45 | return 0; | 44 | return 0; |
| 46 | } | 45 | } |
| 47 | 46 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c index 0cb322a5e72c..2b1afe225db8 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c | |||
| @@ -41,7 +41,7 @@ nv50_mc_intr[] = { | |||
| 41 | { 0x04000000, NVDEV_ENGINE_DISP }, | 41 | { 0x04000000, NVDEV_ENGINE_DISP }, |
| 42 | { 0x10000000, NVDEV_SUBDEV_BUS }, | 42 | { 0x10000000, NVDEV_SUBDEV_BUS }, |
| 43 | { 0x80000000, NVDEV_ENGINE_SW }, | 43 | { 0x80000000, NVDEV_ENGINE_SW }, |
| 44 | { 0x0000d101, NVDEV_SUBDEV_FB }, | 44 | { 0x0002d101, NVDEV_SUBDEV_FB }, |
| 45 | {}, | 45 | {}, |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| @@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 53 | struct nv50_mc_priv *priv; | 53 | struct nv50_mc_priv *priv; |
| 54 | int ret; | 54 | int ret; |
| 55 | 55 | ||
| 56 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 56 | ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv); |
| 57 | *pobject = nv_object(priv); | 57 | *pobject = nv_object(priv); |
| 58 | if (ret) | 58 | if (ret) |
| 59 | return ret; | 59 | return ret; |
| 60 | 60 | ||
| 61 | priv->base.intr_map = nv50_mc_intr; | ||
| 62 | return 0; | 61 | return 0; |
| 63 | } | 62 | } |
| 64 | 63 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c index e82fd21b5041..0d57b4d3e001 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c | |||
| @@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 54 | struct nv98_mc_priv *priv; | 54 | struct nv98_mc_priv *priv; |
| 55 | int ret; | 55 | int ret; |
| 56 | 56 | ||
| 57 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 57 | ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv); |
| 58 | *pobject = nv_object(priv); | 58 | *pobject = nv_object(priv); |
| 59 | if (ret) | 59 | if (ret) |
| 60 | return ret; | 60 | return ret; |
| 61 | 61 | ||
| 62 | priv->base.intr_map = nv98_mc_intr; | ||
| 63 | return 0; | 62 | return 0; |
| 64 | } | 63 | } |
| 65 | 64 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c index c5da3babbc62..104175c5a2dd 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c | |||
| @@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 57 | struct nvc0_mc_priv *priv; | 57 | struct nvc0_mc_priv *priv; |
| 58 | int ret; | 58 | int ret; |
| 59 | 59 | ||
| 60 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 60 | ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv); |
| 61 | *pobject = nv_object(priv); | 61 | *pobject = nv_object(priv); |
| 62 | if (ret) | 62 | if (ret) |
| 63 | return ret; | 63 | return ret; |
| 64 | 64 | ||
| 65 | priv->base.intr_map = nvc0_mc_intr; | ||
| 66 | return 0; | 65 | return 0; |
| 67 | } | 66 | } |
| 68 | 67 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c index 67fcb6c852ac..ef3133e7575c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c | |||
| @@ -361,7 +361,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, | |||
| 361 | 361 | ||
| 362 | INIT_LIST_HEAD(&vm->pgd_list); | 362 | INIT_LIST_HEAD(&vm->pgd_list); |
| 363 | vm->vmm = vmm; | 363 | vm->vmm = vmm; |
| 364 | vm->refcount = 1; | 364 | kref_init(&vm->refcount); |
| 365 | vm->fpde = offset >> (vmm->pgt_bits + 12); | 365 | vm->fpde = offset >> (vmm->pgt_bits + 12); |
| 366 | vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12); | 366 | vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12); |
| 367 | 367 | ||
| @@ -441,8 +441,9 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) | |||
| 441 | } | 441 | } |
| 442 | 442 | ||
| 443 | static void | 443 | static void |
| 444 | nouveau_vm_del(struct nouveau_vm *vm) | 444 | nouveau_vm_del(struct kref *kref) |
| 445 | { | 445 | { |
| 446 | struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount); | ||
| 446 | struct nouveau_vm_pgd *vpgd, *tmp; | 447 | struct nouveau_vm_pgd *vpgd, *tmp; |
| 447 | 448 | ||
| 448 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | 449 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { |
| @@ -458,27 +459,19 @@ int | |||
| 458 | nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, | 459 | nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, |
| 459 | struct nouveau_gpuobj *pgd) | 460 | struct nouveau_gpuobj *pgd) |
| 460 | { | 461 | { |
| 461 | struct nouveau_vm *vm; | 462 | if (ref) { |
| 462 | int ret; | 463 | int ret = nouveau_vm_link(ref, pgd); |
| 463 | |||
| 464 | vm = ref; | ||
| 465 | if (vm) { | ||
| 466 | ret = nouveau_vm_link(vm, pgd); | ||
| 467 | if (ret) | 464 | if (ret) |
| 468 | return ret; | 465 | return ret; |
| 469 | 466 | ||
| 470 | vm->refcount++; | 467 | kref_get(&ref->refcount); |
| 471 | } | 468 | } |
| 472 | 469 | ||
| 473 | vm = *ptr; | 470 | if (*ptr) { |
| 474 | *ptr = ref; | 471 | nouveau_vm_unlink(*ptr, pgd); |
| 475 | 472 | kref_put(&(*ptr)->refcount, nouveau_vm_del); | |
| 476 | if (vm) { | ||
| 477 | nouveau_vm_unlink(vm, pgd); | ||
| 478 | |||
| 479 | if (--vm->refcount == 0) | ||
| 480 | nouveau_vm_del(vm); | ||
| 481 | } | 473 | } |
| 482 | 474 | ||
| 475 | *ptr = ref; | ||
| 483 | return 0; | 476 | return 0; |
| 484 | } | 477 | } |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 0782bd2f1e04..6a13ffb53bdb 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c | |||
| @@ -606,6 +606,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
| 606 | regp->ramdac_a34 = 0x1; | 606 | regp->ramdac_a34 = 0x1; |
| 607 | } | 607 | } |
| 608 | 608 | ||
| 609 | static int | ||
| 610 | nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) | ||
| 611 | { | ||
| 612 | struct nv04_display *disp = nv04_display(crtc->dev); | ||
| 613 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); | ||
| 614 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
| 615 | int ret; | ||
| 616 | |||
| 617 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); | ||
| 618 | if (ret == 0) { | ||
| 619 | if (disp->image[nv_crtc->index]) | ||
| 620 | nouveau_bo_unpin(disp->image[nv_crtc->index]); | ||
| 621 | nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]); | ||
| 622 | } | ||
| 623 | |||
| 624 | return ret; | ||
| 625 | } | ||
| 626 | |||
| 609 | /** | 627 | /** |
| 610 | * Sets up registers for the given mode/adjusted_mode pair. | 628 | * Sets up registers for the given mode/adjusted_mode pair. |
| 611 | * | 629 | * |
| @@ -622,10 +640,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
| 622 | struct drm_device *dev = crtc->dev; | 640 | struct drm_device *dev = crtc->dev; |
| 623 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 641 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 624 | struct nouveau_drm *drm = nouveau_drm(dev); | 642 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 643 | int ret; | ||
| 625 | 644 | ||
| 626 | NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); | 645 | NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); |
| 627 | drm_mode_debug_printmodeline(adjusted_mode); | 646 | drm_mode_debug_printmodeline(adjusted_mode); |
| 628 | 647 | ||
| 648 | ret = nv_crtc_swap_fbs(crtc, old_fb); | ||
| 649 | if (ret) | ||
| 650 | return ret; | ||
| 651 | |||
| 629 | /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ | 652 | /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ |
| 630 | nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); | 653 | nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); |
| 631 | 654 | ||
| @@ -722,6 +745,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc) | |||
| 722 | 745 | ||
| 723 | static void nv_crtc_destroy(struct drm_crtc *crtc) | 746 | static void nv_crtc_destroy(struct drm_crtc *crtc) |
| 724 | { | 747 | { |
| 748 | struct nv04_display *disp = nv04_display(crtc->dev); | ||
| 725 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 749 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 726 | 750 | ||
| 727 | if (!nv_crtc) | 751 | if (!nv_crtc) |
| @@ -729,6 +753,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) | |||
| 729 | 753 | ||
| 730 | drm_crtc_cleanup(crtc); | 754 | drm_crtc_cleanup(crtc); |
| 731 | 755 | ||
| 756 | if (disp->image[nv_crtc->index]) | ||
| 757 | nouveau_bo_unpin(disp->image[nv_crtc->index]); | ||
| 758 | nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); | ||
| 759 | |||
| 732 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | 760 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); |
| 733 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); | 761 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); |
| 734 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | 762 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); |
| @@ -754,6 +782,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc) | |||
| 754 | } | 782 | } |
| 755 | 783 | ||
| 756 | static void | 784 | static void |
| 785 | nv_crtc_disable(struct drm_crtc *crtc) | ||
| 786 | { | ||
| 787 | struct nv04_display *disp = nv04_display(crtc->dev); | ||
| 788 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
| 789 | if (disp->image[nv_crtc->index]) | ||
| 790 | nouveau_bo_unpin(disp->image[nv_crtc->index]); | ||
| 791 | nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); | ||
| 792 | } | ||
| 793 | |||
| 794 | static void | ||
| 757 | nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, | 795 | nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, |
| 758 | uint32_t size) | 796 | uint32_t size) |
| 759 | { | 797 | { |
| @@ -791,7 +829,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
| 791 | struct drm_framebuffer *drm_fb; | 829 | struct drm_framebuffer *drm_fb; |
| 792 | struct nouveau_framebuffer *fb; | 830 | struct nouveau_framebuffer *fb; |
| 793 | int arb_burst, arb_lwm; | 831 | int arb_burst, arb_lwm; |
| 794 | int ret; | ||
| 795 | 832 | ||
| 796 | NV_DEBUG(drm, "index %d\n", nv_crtc->index); | 833 | NV_DEBUG(drm, "index %d\n", nv_crtc->index); |
| 797 | 834 | ||
| @@ -801,10 +838,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
| 801 | return 0; | 838 | return 0; |
| 802 | } | 839 | } |
| 803 | 840 | ||
| 804 | |||
| 805 | /* If atomic, we want to switch to the fb we were passed, so | 841 | /* If atomic, we want to switch to the fb we were passed, so |
| 806 | * now we update pointers to do that. (We don't pin; just | 842 | * now we update pointers to do that. |
| 807 | * assume we're already pinned and update the base address.) | ||
| 808 | */ | 843 | */ |
| 809 | if (atomic) { | 844 | if (atomic) { |
| 810 | drm_fb = passed_fb; | 845 | drm_fb = passed_fb; |
| @@ -812,17 +847,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
| 812 | } else { | 847 | } else { |
| 813 | drm_fb = crtc->fb; | 848 | drm_fb = crtc->fb; |
| 814 | fb = nouveau_framebuffer(crtc->fb); | 849 | fb = nouveau_framebuffer(crtc->fb); |
| 815 | /* If not atomic, we can go ahead and pin, and unpin the | ||
| 816 | * old fb we were passed. | ||
| 817 | */ | ||
| 818 | ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); | ||
| 819 | if (ret) | ||
| 820 | return ret; | ||
| 821 | |||
| 822 | if (passed_fb) { | ||
| 823 | struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); | ||
| 824 | nouveau_bo_unpin(ofb->nvbo); | ||
| 825 | } | ||
| 826 | } | 850 | } |
| 827 | 851 | ||
| 828 | nv_crtc->fb.offset = fb->nvbo->bo.offset; | 852 | nv_crtc->fb.offset = fb->nvbo->bo.offset; |
| @@ -877,6 +901,9 @@ static int | |||
| 877 | nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | 901 | nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, |
| 878 | struct drm_framebuffer *old_fb) | 902 | struct drm_framebuffer *old_fb) |
| 879 | { | 903 | { |
| 904 | int ret = nv_crtc_swap_fbs(crtc, old_fb); | ||
| 905 | if (ret) | ||
| 906 | return ret; | ||
| 880 | return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); | 907 | return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); |
| 881 | } | 908 | } |
| 882 | 909 | ||
| @@ -1027,6 +1054,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = { | |||
| 1027 | .mode_set_base = nv04_crtc_mode_set_base, | 1054 | .mode_set_base = nv04_crtc_mode_set_base, |
| 1028 | .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, | 1055 | .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, |
| 1029 | .load_lut = nv_crtc_gamma_load, | 1056 | .load_lut = nv_crtc_gamma_load, |
| 1057 | .disable = nv_crtc_disable, | ||
| 1030 | }; | 1058 | }; |
| 1031 | 1059 | ||
| 1032 | int | 1060 | int |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index a0a031dad13f..9928187f0a7d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h | |||
| @@ -81,6 +81,7 @@ struct nv04_display { | |||
| 81 | uint32_t saved_vga_font[4][16384]; | 81 | uint32_t saved_vga_font[4][16384]; |
| 82 | uint32_t dac_users[4]; | 82 | uint32_t dac_users[4]; |
| 83 | struct nouveau_object *core; | 83 | struct nouveau_object *core; |
| 84 | struct nouveau_bo *image[2]; | ||
| 84 | }; | 85 | }; |
| 85 | 86 | ||
| 86 | static inline struct nv04_display * | 87 | static inline struct nv04_display * |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4b1afb131380..af20fba3a1a4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
| 148 | 148 | ||
| 149 | if (unlikely(nvbo->gem)) | 149 | if (unlikely(nvbo->gem)) |
| 150 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 150 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
| 151 | WARN_ON(nvbo->pin_refcnt > 0); | ||
| 151 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); | 152 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); |
| 152 | kfree(nvbo); | 153 | kfree(nvbo); |
| 153 | } | 154 | } |
| @@ -197,6 +198,17 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, | |||
| 197 | size_t acc_size; | 198 | size_t acc_size; |
| 198 | int ret; | 199 | int ret; |
| 199 | int type = ttm_bo_type_device; | 200 | int type = ttm_bo_type_device; |
| 201 | int lpg_shift = 12; | ||
| 202 | int max_size; | ||
| 203 | |||
| 204 | if (drm->client.base.vm) | ||
| 205 | lpg_shift = drm->client.base.vm->vmm->lpg_shift; | ||
| 206 | max_size = INT_MAX & ~((1 << lpg_shift) - 1); | ||
| 207 | |||
| 208 | if (size <= 0 || size > max_size) { | ||
| 209 | nv_warn(drm, "skipped size %x\n", (u32)size); | ||
| 210 | return -EINVAL; | ||
| 211 | } | ||
| 200 | 212 | ||
| 201 | if (sg) | 213 | if (sg) |
| 202 | type = ttm_bo_type_sg; | 214 | type = ttm_bo_type_sg; |
| @@ -340,13 +352,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) | |||
| 340 | { | 352 | { |
| 341 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | 353 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
| 342 | struct ttm_buffer_object *bo = &nvbo->bo; | 354 | struct ttm_buffer_object *bo = &nvbo->bo; |
| 343 | int ret; | 355 | int ret, ref; |
| 344 | 356 | ||
| 345 | ret = ttm_bo_reserve(bo, false, false, false, 0); | 357 | ret = ttm_bo_reserve(bo, false, false, false, 0); |
| 346 | if (ret) | 358 | if (ret) |
| 347 | return ret; | 359 | return ret; |
| 348 | 360 | ||
| 349 | if (--nvbo->pin_refcnt) | 361 | ref = --nvbo->pin_refcnt; |
| 362 | WARN_ON_ONCE(ref < 0); | ||
| 363 | if (ref) | ||
| 350 | goto out; | 364 | goto out; |
| 351 | 365 | ||
| 352 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); | 366 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
| @@ -578,7 +592,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) | |||
| 578 | int ret = RING_SPACE(chan, 2); | 592 | int ret = RING_SPACE(chan, 2); |
| 579 | if (ret == 0) { | 593 | if (ret == 0) { |
| 580 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); | 594 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); |
| 581 | OUT_RING (chan, handle); | 595 | OUT_RING (chan, handle & 0x0000ffff); |
| 582 | FIRE_RING (chan); | 596 | FIRE_RING (chan); |
| 583 | } | 597 | } |
| 584 | return ret; | 598 | return ret; |
| @@ -973,7 +987,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
| 973 | struct ttm_mem_reg *old_mem = &bo->mem; | 987 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 974 | int ret; | 988 | int ret; |
| 975 | 989 | ||
| 976 | mutex_lock(&chan->cli->mutex); | 990 | mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); |
| 977 | 991 | ||
| 978 | /* create temporary vmas for the transfer and attach them to the | 992 | /* create temporary vmas for the transfer and attach them to the |
| 979 | * old nouveau_mem node, these will get cleaned up after ttm has | 993 | * old nouveau_mem node, these will get cleaned up after ttm has |
| @@ -1014,7 +1028,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
| 1014 | struct ttm_mem_reg *, struct ttm_mem_reg *); | 1028 | struct ttm_mem_reg *, struct ttm_mem_reg *); |
| 1015 | int (*init)(struct nouveau_channel *, u32 handle); | 1029 | int (*init)(struct nouveau_channel *, u32 handle); |
| 1016 | } _methods[] = { | 1030 | } _methods[] = { |
| 1017 | { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, | 1031 | { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, |
| 1018 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, | 1032 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, |
| 1019 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, | 1033 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, |
| 1020 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, | 1034 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, |
| @@ -1034,7 +1048,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
| 1034 | struct nouveau_channel *chan; | 1048 | struct nouveau_channel *chan; |
| 1035 | u32 handle = (mthd->engine << 16) | mthd->oclass; | 1049 | u32 handle = (mthd->engine << 16) | mthd->oclass; |
| 1036 | 1050 | ||
| 1037 | if (mthd->init == nve0_bo_move_init) | 1051 | if (mthd->engine) |
| 1038 | chan = drm->cechan; | 1052 | chan = drm->cechan; |
| 1039 | else | 1053 | else |
| 1040 | chan = drm->channel; | 1054 | chan = drm->channel; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 708b2d1c0037..a03e75deacaf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
| @@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, | |||
| 138 | { | 138 | { |
| 139 | struct nouveau_framebuffer *nouveau_fb; | 139 | struct nouveau_framebuffer *nouveau_fb; |
| 140 | struct drm_gem_object *gem; | 140 | struct drm_gem_object *gem; |
| 141 | int ret; | 141 | int ret = -ENOMEM; |
| 142 | 142 | ||
| 143 | gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); | 143 | gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); |
| 144 | if (!gem) | 144 | if (!gem) |
| @@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev, | |||
| 146 | 146 | ||
| 147 | nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); | 147 | nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); |
| 148 | if (!nouveau_fb) | 148 | if (!nouveau_fb) |
| 149 | return ERR_PTR(-ENOMEM); | 149 | goto err_unref; |
| 150 | 150 | ||
| 151 | ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); | 151 | ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); |
| 152 | if (ret) { | 152 | if (ret) |
| 153 | drm_gem_object_unreference(gem); | 153 | goto err; |
| 154 | return ERR_PTR(ret); | ||
| 155 | } | ||
| 156 | 154 | ||
| 157 | return &nouveau_fb->base; | 155 | return &nouveau_fb->base; |
| 156 | |||
| 157 | err: | ||
| 158 | kfree(nouveau_fb); | ||
| 159 | err_unref: | ||
| 160 | drm_gem_object_unreference(gem); | ||
| 161 | return ERR_PTR(ret); | ||
| 158 | } | 162 | } |
| 159 | 163 | ||
| 160 | static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { | 164 | static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { |
| @@ -524,9 +528,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 524 | struct nouveau_page_flip_state *s; | 528 | struct nouveau_page_flip_state *s; |
| 525 | struct nouveau_channel *chan = NULL; | 529 | struct nouveau_channel *chan = NULL; |
| 526 | struct nouveau_fence *fence; | 530 | struct nouveau_fence *fence; |
| 527 | struct list_head res; | 531 | struct ttm_validate_buffer resv[2] = { |
| 528 | struct ttm_validate_buffer res_val[2]; | 532 | { .bo = &old_bo->bo }, |
| 533 | { .bo = &new_bo->bo }, | ||
| 534 | }; | ||
| 529 | struct ww_acquire_ctx ticket; | 535 | struct ww_acquire_ctx ticket; |
| 536 | LIST_HEAD(res); | ||
| 530 | int ret; | 537 | int ret; |
| 531 | 538 | ||
| 532 | if (!drm->channel) | 539 | if (!drm->channel) |
| @@ -545,27 +552,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 545 | chan = drm->channel; | 552 | chan = drm->channel; |
| 546 | spin_unlock(&old_bo->bo.bdev->fence_lock); | 553 | spin_unlock(&old_bo->bo.bdev->fence_lock); |
| 547 | 554 | ||
| 548 | mutex_lock(&chan->cli->mutex); | ||
| 549 | |||
| 550 | if (new_bo != old_bo) { | 555 | if (new_bo != old_bo) { |
| 551 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); | 556 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); |
| 552 | if (likely(!ret)) { | 557 | if (ret) |
| 553 | res_val[0].bo = &old_bo->bo; | 558 | goto fail_free; |
| 554 | res_val[1].bo = &new_bo->bo; | ||
| 555 | INIT_LIST_HEAD(&res); | ||
| 556 | list_add_tail(&res_val[0].head, &res); | ||
| 557 | list_add_tail(&res_val[1].head, &res); | ||
| 558 | ret = ttm_eu_reserve_buffers(&ticket, &res); | ||
| 559 | if (ret) | ||
| 560 | nouveau_bo_unpin(new_bo); | ||
| 561 | } | ||
| 562 | } else | ||
| 563 | ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0); | ||
| 564 | 559 | ||
| 565 | if (ret) { | 560 | list_add(&resv[1].head, &res); |
| 566 | mutex_unlock(&chan->cli->mutex); | ||
| 567 | goto fail_free; | ||
| 568 | } | 561 | } |
| 562 | list_add(&resv[0].head, &res); | ||
| 563 | |||
| 564 | mutex_lock(&chan->cli->mutex); | ||
| 565 | ret = ttm_eu_reserve_buffers(&ticket, &res); | ||
| 566 | if (ret) | ||
| 567 | goto fail_unpin; | ||
| 569 | 568 | ||
| 570 | /* Initialize a page flip struct */ | 569 | /* Initialize a page flip struct */ |
| 571 | *s = (struct nouveau_page_flip_state) | 570 | *s = (struct nouveau_page_flip_state) |
| @@ -576,10 +575,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 576 | /* Emit a page flip */ | 575 | /* Emit a page flip */ |
| 577 | if (nv_device(drm->device)->card_type >= NV_50) { | 576 | if (nv_device(drm->device)->card_type >= NV_50) { |
| 578 | ret = nv50_display_flip_next(crtc, fb, chan, 0); | 577 | ret = nv50_display_flip_next(crtc, fb, chan, 0); |
| 579 | if (ret) { | 578 | if (ret) |
| 580 | mutex_unlock(&chan->cli->mutex); | ||
| 581 | goto fail_unreserve; | 579 | goto fail_unreserve; |
| 582 | } | 580 | } else { |
| 581 | struct nv04_display *dispnv04 = nv04_display(dev); | ||
| 582 | nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]); | ||
| 583 | } | 583 | } |
| 584 | 584 | ||
| 585 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); | 585 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); |
| @@ -590,22 +590,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 590 | /* Update the crtc struct and cleanup */ | 590 | /* Update the crtc struct and cleanup */ |
| 591 | crtc->fb = fb; | 591 | crtc->fb = fb; |
| 592 | 592 | ||
| 593 | if (old_bo != new_bo) { | 593 | ttm_eu_fence_buffer_objects(&ticket, &res, fence); |
| 594 | ttm_eu_fence_buffer_objects(&ticket, &res, fence); | 594 | if (old_bo != new_bo) |
| 595 | nouveau_bo_unpin(old_bo); | 595 | nouveau_bo_unpin(old_bo); |
| 596 | } else { | ||
| 597 | nouveau_bo_fence(new_bo, fence); | ||
| 598 | ttm_bo_unreserve(&new_bo->bo); | ||
| 599 | } | ||
| 600 | nouveau_fence_unref(&fence); | 596 | nouveau_fence_unref(&fence); |
| 601 | return 0; | 597 | return 0; |
| 602 | 598 | ||
| 603 | fail_unreserve: | 599 | fail_unreserve: |
| 604 | if (old_bo != new_bo) { | 600 | ttm_eu_backoff_reservation(&ticket, &res); |
| 605 | ttm_eu_backoff_reservation(&ticket, &res); | 601 | fail_unpin: |
| 602 | mutex_unlock(&chan->cli->mutex); | ||
| 603 | if (old_bo != new_bo) | ||
| 606 | nouveau_bo_unpin(new_bo); | 604 | nouveau_bo_unpin(new_bo); |
| 607 | } else | ||
| 608 | ttm_bo_unreserve(&new_bo->bo); | ||
| 609 | fail_free: | 605 | fail_free: |
| 610 | kfree(s); | 606 | kfree(s); |
| 611 | return ret; | 607 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 218a4b522fe5..61972668fd05 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -192,6 +192,18 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
| 192 | 192 | ||
| 193 | arg0 = NVE0_CHANNEL_IND_ENGINE_GR; | 193 | arg0 = NVE0_CHANNEL_IND_ENGINE_GR; |
| 194 | arg1 = 1; | 194 | arg1 = 1; |
| 195 | } else | ||
| 196 | if (device->chipset >= 0xa3 && | ||
| 197 | device->chipset != 0xaa && | ||
| 198 | device->chipset != 0xac) { | ||
| 199 | ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, | ||
| 200 | NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, | ||
| 201 | &drm->cechan); | ||
| 202 | if (ret) | ||
| 203 | NV_ERROR(drm, "failed to create ce channel, %d\n", ret); | ||
| 204 | |||
| 205 | arg0 = NvDmaFB; | ||
| 206 | arg1 = NvDmaTT; | ||
| 195 | } else { | 207 | } else { |
| 196 | arg0 = NvDmaFB; | 208 | arg0 = NvDmaFB; |
| 197 | arg1 = NvDmaTT; | 209 | arg1 = NvDmaTT; |
| @@ -284,8 +296,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev, | |||
| 284 | return 0; | 296 | return 0; |
| 285 | } | 297 | } |
| 286 | 298 | ||
| 287 | static struct lock_class_key drm_client_lock_class_key; | ||
| 288 | |||
| 289 | static int | 299 | static int |
| 290 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) | 300 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) |
| 291 | { | 301 | { |
| @@ -297,7 +307,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
| 297 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); | 307 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); |
| 298 | if (ret) | 308 | if (ret) |
| 299 | return ret; | 309 | return ret; |
| 300 | lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key); | ||
| 301 | 310 | ||
| 302 | dev->dev_private = drm; | 311 | dev->dev_private = drm; |
| 303 | drm->dev = dev; | 312 | drm->dev = dev; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 9352010030e9..8f6d63d7edd3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -385,6 +385,7 @@ out_unlock: | |||
| 385 | mutex_unlock(&dev->struct_mutex); | 385 | mutex_unlock(&dev->struct_mutex); |
| 386 | if (chan) | 386 | if (chan) |
| 387 | nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); | 387 | nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); |
| 388 | nouveau_bo_unmap(nvbo); | ||
| 388 | out_unpin: | 389 | out_unpin: |
| 389 | nouveau_bo_unpin(nvbo); | 390 | nouveau_bo_unpin(nvbo); |
| 390 | out_unref: | 391 | out_unref: |
| @@ -397,7 +398,8 @@ void | |||
| 397 | nouveau_fbcon_output_poll_changed(struct drm_device *dev) | 398 | nouveau_fbcon_output_poll_changed(struct drm_device *dev) |
| 398 | { | 399 | { |
| 399 | struct nouveau_drm *drm = nouveau_drm(dev); | 400 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 400 | drm_fb_helper_hotplug_event(&drm->fbcon->helper); | 401 | if (drm->fbcon) |
| 402 | drm_fb_helper_hotplug_event(&drm->fbcon->helper); | ||
| 401 | } | 403 | } |
| 402 | 404 | ||
| 403 | static int | 405 | static int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 1680d9187bab..be3149932c2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
| @@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | |||
| 143 | int ret; | 143 | int ret; |
| 144 | 144 | ||
| 145 | fence->channel = chan; | 145 | fence->channel = chan; |
| 146 | fence->timeout = jiffies + (3 * DRM_HZ); | 146 | fence->timeout = jiffies + (15 * DRM_HZ); |
| 147 | fence->sequence = ++fctx->sequence; | 147 | fence->sequence = ++fctx->sequence; |
| 148 | 148 | ||
| 149 | ret = fctx->emit(fence); | 149 | ret = fctx->emit(fence); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index e72d09c068a8..830cb7bad922 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) | |||
| 50 | return; | 50 | return; |
| 51 | nvbo->gem = NULL; | 51 | nvbo->gem = NULL; |
| 52 | 52 | ||
| 53 | /* Lockdep hates you for doing reserve with gem object lock held */ | ||
| 54 | if (WARN_ON_ONCE(nvbo->pin_refcnt)) { | ||
| 55 | nvbo->pin_refcnt = 1; | ||
| 56 | nouveau_bo_unpin(nvbo); | ||
| 57 | } | ||
| 58 | |||
| 59 | if (gem->import_attach) | 53 | if (gem->import_attach) |
| 60 | drm_prime_gem_destroy(gem, nvbo->bo.sg); | 54 | drm_prime_gem_destroy(gem, nvbo->bo.sg); |
| 61 | 55 | ||
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 8e47a9bae8c3..22aa9963ea6f 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c | |||
| @@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan) | |||
| 76 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; | 76 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; |
| 77 | struct nouveau_object *object; | 77 | struct nouveau_object *object; |
| 78 | u32 start = mem->start * PAGE_SIZE; | 78 | u32 start = mem->start * PAGE_SIZE; |
| 79 | u32 limit = mem->start + mem->size - 1; | 79 | u32 limit = start + mem->size - 1; |
| 80 | int ret = 0; | 80 | int ret = 0; |
| 81 | 81 | ||
| 82 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); | 82 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c index 3af5bcd0b203..625f80d53dc2 100644 --- a/drivers/gpu/drm/nouveau/nv40_pm.c +++ b/drivers/gpu/drm/nouveau/nv40_pm.c | |||
| @@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll, | |||
| 131 | if (clk < pll->vco1.max_freq) | 131 | if (clk < pll->vco1.max_freq) |
| 132 | pll->vco2.max_freq = 0; | 132 | pll->vco2.max_freq = 0; |
| 133 | 133 | ||
| 134 | pclk->pll_calc(pclk, pll, clk, &coef); | 134 | ret = pclk->pll_calc(pclk, pll, clk, &coef); |
| 135 | if (ret == 0) | 135 | if (ret == 0) |
| 136 | return -ERANGE; | 136 | return -ERANGE; |
| 137 | 137 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 54dc6355b0c2..8b40a36c1b57 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -355,6 +355,7 @@ struct nv50_oimm { | |||
| 355 | 355 | ||
| 356 | struct nv50_head { | 356 | struct nv50_head { |
| 357 | struct nouveau_crtc base; | 357 | struct nouveau_crtc base; |
| 358 | struct nouveau_bo *image; | ||
| 358 | struct nv50_curs curs; | 359 | struct nv50_curs curs; |
| 359 | struct nv50_sync sync; | 360 | struct nv50_sync sync; |
| 360 | struct nv50_ovly ovly; | 361 | struct nv50_ovly ovly; |
| @@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 517 | { | 518 | { |
| 518 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); | 519 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); |
| 519 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 520 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 521 | struct nv50_head *head = nv50_head(crtc); | ||
| 520 | struct nv50_sync *sync = nv50_sync(crtc); | 522 | struct nv50_sync *sync = nv50_sync(crtc); |
| 521 | int head = nv_crtc->index, ret; | ||
| 522 | u32 *push; | 523 | u32 *push; |
| 524 | int ret; | ||
| 523 | 525 | ||
| 524 | swap_interval <<= 4; | 526 | swap_interval <<= 4; |
| 525 | if (swap_interval == 0) | 527 | if (swap_interval == 0) |
| @@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 537 | return ret; | 539 | return ret; |
| 538 | 540 | ||
| 539 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); | 541 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); |
| 540 | OUT_RING (chan, NvEvoSema0 + head); | 542 | OUT_RING (chan, NvEvoSema0 + nv_crtc->index); |
| 541 | OUT_RING (chan, sync->addr ^ 0x10); | 543 | OUT_RING (chan, sync->addr ^ 0x10); |
| 542 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); | 544 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); |
| 543 | OUT_RING (chan, sync->data + 1); | 545 | OUT_RING (chan, sync->data + 1); |
| @@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 546 | OUT_RING (chan, sync->data); | 548 | OUT_RING (chan, sync->data); |
| 547 | } else | 549 | } else |
| 548 | if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { | 550 | if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { |
| 549 | u64 addr = nv84_fence_crtc(chan, head) + sync->addr; | 551 | u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; |
| 550 | ret = RING_SPACE(chan, 12); | 552 | ret = RING_SPACE(chan, 12); |
| 551 | if (ret) | 553 | if (ret) |
| 552 | return ret; | 554 | return ret; |
| @@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 565 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); | 567 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); |
| 566 | } else | 568 | } else |
| 567 | if (chan) { | 569 | if (chan) { |
| 568 | u64 addr = nv84_fence_crtc(chan, head) + sync->addr; | 570 | u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; |
| 569 | ret = RING_SPACE(chan, 10); | 571 | ret = RING_SPACE(chan, 10); |
| 570 | if (ret) | 572 | if (ret) |
| 571 | return ret; | 573 | return ret; |
| @@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 630 | evo_mthd(push, 0x0080, 1); | 632 | evo_mthd(push, 0x0080, 1); |
| 631 | evo_data(push, 0x00000000); | 633 | evo_data(push, 0x00000000); |
| 632 | evo_kick(push, sync); | 634 | evo_kick(push, sync); |
| 635 | |||
| 636 | nouveau_bo_ref(nv_fb->nvbo, &head->image); | ||
| 633 | return 0; | 637 | return 0; |
| 634 | } | 638 | } |
| 635 | 639 | ||
| @@ -1038,18 +1042,17 @@ static int | |||
| 1038 | nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) | 1042 | nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) |
| 1039 | { | 1043 | { |
| 1040 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); | 1044 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); |
| 1045 | struct nv50_head *head = nv50_head(crtc); | ||
| 1041 | int ret; | 1046 | int ret; |
| 1042 | 1047 | ||
| 1043 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); | 1048 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); |
| 1044 | if (ret) | 1049 | if (ret == 0) { |
| 1045 | return ret; | 1050 | if (head->image) |
| 1046 | 1051 | nouveau_bo_unpin(head->image); | |
| 1047 | if (old_fb) { | 1052 | nouveau_bo_ref(nvfb->nvbo, &head->image); |
| 1048 | nvfb = nouveau_framebuffer(old_fb); | ||
| 1049 | nouveau_bo_unpin(nvfb->nvbo); | ||
| 1050 | } | 1053 | } |
| 1051 | 1054 | ||
| 1052 | return 0; | 1055 | return ret; |
| 1053 | } | 1056 | } |
| 1054 | 1057 | ||
| 1055 | static int | 1058 | static int |
| @@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc) | |||
| 1198 | } | 1201 | } |
| 1199 | } | 1202 | } |
| 1200 | 1203 | ||
| 1204 | static void | ||
| 1205 | nv50_crtc_disable(struct drm_crtc *crtc) | ||
| 1206 | { | ||
| 1207 | struct nv50_head *head = nv50_head(crtc); | ||
| 1208 | if (head->image) | ||
| 1209 | nouveau_bo_unpin(head->image); | ||
| 1210 | nouveau_bo_ref(NULL, &head->image); | ||
| 1211 | } | ||
| 1212 | |||
| 1201 | static int | 1213 | static int |
| 1202 | nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | 1214 | nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, |
| 1203 | uint32_t handle, uint32_t width, uint32_t height) | 1215 | uint32_t handle, uint32_t width, uint32_t height) |
| @@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc) | |||
| 1271 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 1283 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 1272 | struct nv50_disp *disp = nv50_disp(crtc->dev); | 1284 | struct nv50_disp *disp = nv50_disp(crtc->dev); |
| 1273 | struct nv50_head *head = nv50_head(crtc); | 1285 | struct nv50_head *head = nv50_head(crtc); |
| 1286 | |||
| 1274 | nv50_dmac_destroy(disp->core, &head->ovly.base); | 1287 | nv50_dmac_destroy(disp->core, &head->ovly.base); |
| 1275 | nv50_pioc_destroy(disp->core, &head->oimm.base); | 1288 | nv50_pioc_destroy(disp->core, &head->oimm.base); |
| 1276 | nv50_dmac_destroy(disp->core, &head->sync.base); | 1289 | nv50_dmac_destroy(disp->core, &head->sync.base); |
| 1277 | nv50_pioc_destroy(disp->core, &head->curs.base); | 1290 | nv50_pioc_destroy(disp->core, &head->curs.base); |
| 1291 | |||
| 1292 | /*XXX: this shouldn't be necessary, but the core doesn't call | ||
| 1293 | * disconnect() during the cleanup paths | ||
| 1294 | */ | ||
| 1295 | if (head->image) | ||
| 1296 | nouveau_bo_unpin(head->image); | ||
| 1297 | nouveau_bo_ref(NULL, &head->image); | ||
| 1298 | |||
| 1278 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | 1299 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); |
| 1279 | if (nv_crtc->cursor.nvbo) | 1300 | if (nv_crtc->cursor.nvbo) |
| 1280 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); | 1301 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); |
| 1281 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | 1302 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); |
| 1303 | |||
| 1282 | nouveau_bo_unmap(nv_crtc->lut.nvbo); | 1304 | nouveau_bo_unmap(nv_crtc->lut.nvbo); |
| 1283 | if (nv_crtc->lut.nvbo) | 1305 | if (nv_crtc->lut.nvbo) |
| 1284 | nouveau_bo_unpin(nv_crtc->lut.nvbo); | 1306 | nouveau_bo_unpin(nv_crtc->lut.nvbo); |
| 1285 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); | 1307 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); |
| 1308 | |||
| 1286 | drm_crtc_cleanup(crtc); | 1309 | drm_crtc_cleanup(crtc); |
| 1287 | kfree(crtc); | 1310 | kfree(crtc); |
| 1288 | } | 1311 | } |
| @@ -1296,6 +1319,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = { | |||
| 1296 | .mode_set_base = nv50_crtc_mode_set_base, | 1319 | .mode_set_base = nv50_crtc_mode_set_base, |
| 1297 | .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, | 1320 | .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, |
| 1298 | .load_lut = nv50_crtc_lut_load, | 1321 | .load_lut = nv50_crtc_lut_load, |
| 1322 | .disable = nv50_crtc_disable, | ||
| 1299 | }; | 1323 | }; |
| 1300 | 1324 | ||
| 1301 | static const struct drm_crtc_funcs nv50_crtc_func = { | 1325 | static const struct drm_crtc_funcs nv50_crtc_func = { |
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index f9701e567db8..0ee363840035 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c | |||
| @@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
| 39 | struct nv10_fence_chan *fctx; | 39 | struct nv10_fence_chan *fctx; |
| 40 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; | 40 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; |
| 41 | struct nouveau_object *object; | 41 | struct nouveau_object *object; |
| 42 | u32 start = mem->start * PAGE_SIZE; | ||
| 43 | u32 limit = start + mem->size - 1; | ||
| 42 | int ret, i; | 44 | int ret, i; |
| 43 | 45 | ||
| 44 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); | 46 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); |
| @@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
| 51 | fctx->base.sync = nv17_fence_sync; | 53 | fctx->base.sync = nv17_fence_sync; |
| 52 | 54 | ||
| 53 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 55 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, |
| 54 | NvSema, 0x0002, | 56 | NvSema, 0x003d, |
| 55 | &(struct nv_dma_class) { | 57 | &(struct nv_dma_class) { |
| 56 | .flags = NV_DMA_TARGET_VRAM | | 58 | .flags = NV_DMA_TARGET_VRAM | |
| 57 | NV_DMA_ACCESS_RDWR, | 59 | NV_DMA_ACCESS_RDWR, |
| 58 | .start = mem->start * PAGE_SIZE, | 60 | .start = start, |
| 59 | .limit = mem->size - 1, | 61 | .limit = limit, |
| 60 | }, sizeof(struct nv_dma_class), | 62 | }, sizeof(struct nv_dma_class), |
| 61 | &object); | 63 | &object); |
| 62 | 64 | ||
| 63 | /* dma objects for display sync channel semaphore blocks */ | 65 | /* dma objects for display sync channel semaphore blocks */ |
| 64 | for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { | 66 | for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { |
| 65 | struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); | 67 | struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); |
| 68 | u32 start = bo->bo.mem.start * PAGE_SIZE; | ||
| 69 | u32 limit = start + bo->bo.mem.size - 1; | ||
| 66 | 70 | ||
| 67 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 71 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, |
| 68 | NvEvoSema0 + i, 0x003d, | 72 | NvEvoSema0 + i, 0x003d, |
| 69 | &(struct nv_dma_class) { | 73 | &(struct nv_dma_class) { |
| 70 | .flags = NV_DMA_TARGET_VRAM | | 74 | .flags = NV_DMA_TARGET_VRAM | |
| 71 | NV_DMA_ACCESS_RDWR, | 75 | NV_DMA_ACCESS_RDWR, |
| 72 | .start = bo->bo.offset, | 76 | .start = start, |
| 73 | .limit = bo->bo.offset + 0xfff, | 77 | .limit = limit, |
| 74 | }, sizeof(struct nv_dma_class), | 78 | }, sizeof(struct nv_dma_class), |
| 75 | &object); | 79 | &object); |
| 76 | } | 80 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 93c2f2cceb51..eb89653a7a17 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c | |||
| @@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea | |||
| 179 | uint32_t type, bool interruptible) | 179 | uint32_t type, bool interruptible) |
| 180 | { | 180 | { |
| 181 | struct qxl_command cmd; | 181 | struct qxl_command cmd; |
| 182 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); | ||
| 182 | 183 | ||
| 183 | cmd.type = type; | 184 | cmd.type = type; |
| 184 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | 185 | cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); |
| 185 | 186 | ||
| 186 | return qxl_ring_push(qdev->command_ring, &cmd, interruptible); | 187 | return qxl_ring_push(qdev->command_ring, &cmd, interruptible); |
| 187 | } | 188 | } |
| @@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas | |||
| 191 | uint32_t type, bool interruptible) | 192 | uint32_t type, bool interruptible) |
| 192 | { | 193 | { |
| 193 | struct qxl_command cmd; | 194 | struct qxl_command cmd; |
| 195 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); | ||
| 194 | 196 | ||
| 195 | cmd.type = type; | 197 | cmd.type = type; |
| 196 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | 198 | cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); |
| 197 | 199 | ||
| 198 | return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); | 200 | return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); |
| 199 | } | 201 | } |
| @@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev) | |||
| 214 | struct qxl_release *release; | 216 | struct qxl_release *release; |
| 215 | uint64_t id, next_id; | 217 | uint64_t id, next_id; |
| 216 | int i = 0; | 218 | int i = 0; |
| 217 | int ret; | ||
| 218 | union qxl_release_info *info; | 219 | union qxl_release_info *info; |
| 219 | 220 | ||
| 220 | while (qxl_ring_pop(qdev->release_ring, &id)) { | 221 | while (qxl_ring_pop(qdev->release_ring, &id)) { |
| @@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev) | |||
| 224 | if (release == NULL) | 225 | if (release == NULL) |
| 225 | break; | 226 | break; |
| 226 | 227 | ||
| 227 | ret = qxl_release_reserve(qdev, release, false); | ||
| 228 | if (ret) { | ||
| 229 | qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id); | ||
| 230 | DRM_ERROR("failed to reserve release %lld\n", id); | ||
| 231 | } | ||
| 232 | |||
| 233 | info = qxl_release_map(qdev, release); | 228 | info = qxl_release_map(qdev, release); |
| 234 | next_id = info->next; | 229 | next_id = info->next; |
| 235 | qxl_release_unmap(qdev, release, info); | 230 | qxl_release_unmap(qdev, release, info); |
| 236 | 231 | ||
| 237 | qxl_release_unreserve(qdev, release); | ||
| 238 | QXL_INFO(qdev, "popped %lld, next %lld\n", id, | 232 | QXL_INFO(qdev, "popped %lld, next %lld\n", id, |
| 239 | next_id); | 233 | next_id); |
| 240 | 234 | ||
| @@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev) | |||
| 259 | return i; | 253 | return i; |
| 260 | } | 254 | } |
| 261 | 255 | ||
| 262 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, | 256 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, |
| 257 | struct qxl_release *release, | ||
| 258 | unsigned long size, | ||
| 263 | struct qxl_bo **_bo) | 259 | struct qxl_bo **_bo) |
| 264 | { | 260 | { |
| 265 | struct qxl_bo *bo; | 261 | struct qxl_bo *bo; |
| 266 | int ret; | 262 | int ret; |
| 267 | 263 | ||
| 268 | ret = qxl_bo_create(qdev, size, false /* not kernel - device */, | 264 | ret = qxl_bo_create(qdev, size, false /* not kernel - device */, |
| 269 | QXL_GEM_DOMAIN_VRAM, NULL, &bo); | 265 | false, QXL_GEM_DOMAIN_VRAM, NULL, &bo); |
| 270 | if (ret) { | 266 | if (ret) { |
| 271 | DRM_ERROR("failed to allocate VRAM BO\n"); | 267 | DRM_ERROR("failed to allocate VRAM BO\n"); |
| 272 | return ret; | 268 | return ret; |
| 273 | } | 269 | } |
| 274 | ret = qxl_bo_reserve(bo, false); | 270 | ret = qxl_release_list_add(release, bo); |
| 275 | if (unlikely(ret != 0)) | 271 | if (ret) |
| 276 | goto out_unref; | 272 | goto out_unref; |
| 277 | 273 | ||
| 278 | *_bo = bo; | 274 | *_bo = bo; |
| 279 | return 0; | 275 | return 0; |
| 280 | out_unref: | 276 | out_unref: |
| 281 | qxl_bo_unref(&bo); | 277 | qxl_bo_unref(&bo); |
| 282 | return 0; | 278 | return ret; |
| 283 | } | 279 | } |
| 284 | 280 | ||
| 285 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) | 281 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) |
| @@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, | |||
| 503 | if (ret) | 499 | if (ret) |
| 504 | return ret; | 500 | return ret; |
| 505 | 501 | ||
| 502 | ret = qxl_release_reserve_list(release, true); | ||
| 503 | if (ret) | ||
| 504 | return ret; | ||
| 505 | |||
| 506 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); | 506 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); |
| 507 | cmd->type = QXL_SURFACE_CMD_CREATE; | 507 | cmd->type = QXL_SURFACE_CMD_CREATE; |
| 508 | cmd->u.surface_create.format = surf->surf.format; | 508 | cmd->u.surface_create.format = surf->surf.format; |
| @@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, | |||
| 524 | 524 | ||
| 525 | surf->surf_create = release; | 525 | surf->surf_create = release; |
| 526 | 526 | ||
| 527 | /* no need to add a release to the fence for this bo, | 527 | /* no need to add a release to the fence for this surface bo, |
| 528 | since it is only released when we ask to destroy the surface | 528 | since it is only released when we ask to destroy the surface |
| 529 | and it would never signal otherwise */ | 529 | and it would never signal otherwise */ |
| 530 | qxl_fence_releaseable(qdev, release); | ||
| 531 | |||
| 532 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | 530 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); |
| 533 | 531 | qxl_release_fence_buffer_objects(release); | |
| 534 | qxl_release_unreserve(qdev, release); | ||
| 535 | 532 | ||
| 536 | surf->hw_surf_alloc = true; | 533 | surf->hw_surf_alloc = true; |
| 537 | spin_lock(&qdev->surf_id_idr_lock); | 534 | spin_lock(&qdev->surf_id_idr_lock); |
| @@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, | |||
| 573 | cmd->surface_id = id; | 570 | cmd->surface_id = id; |
| 574 | qxl_release_unmap(qdev, release, &cmd->release_info); | 571 | qxl_release_unmap(qdev, release, &cmd->release_info); |
| 575 | 572 | ||
| 576 | qxl_fence_releaseable(qdev, release); | ||
| 577 | |||
| 578 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | 573 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); |
| 579 | 574 | ||
| 580 | qxl_release_unreserve(qdev, release); | 575 | qxl_release_fence_buffer_objects(release); |
| 581 | |||
| 582 | 576 | ||
| 583 | return 0; | 577 | return 0; |
| 584 | } | 578 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index f76f5dd7bfc4..835caba026d3 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
| @@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc) | |||
| 179 | kfree(qxl_crtc); | 179 | kfree(qxl_crtc); |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | static void | 182 | static int |
| 183 | qxl_hide_cursor(struct qxl_device *qdev) | 183 | qxl_hide_cursor(struct qxl_device *qdev) |
| 184 | { | 184 | { |
| 185 | struct qxl_release *release; | 185 | struct qxl_release *release; |
| @@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev) | |||
| 188 | 188 | ||
| 189 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, | 189 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, |
| 190 | &release, NULL); | 190 | &release, NULL); |
| 191 | if (ret) | ||
| 192 | return ret; | ||
| 193 | |||
| 194 | ret = qxl_release_reserve_list(release, true); | ||
| 195 | if (ret) { | ||
| 196 | qxl_release_free(qdev, release); | ||
| 197 | return ret; | ||
| 198 | } | ||
| 191 | 199 | ||
| 192 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); | 200 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); |
| 193 | cmd->type = QXL_CURSOR_HIDE; | 201 | cmd->type = QXL_CURSOR_HIDE; |
| 194 | qxl_release_unmap(qdev, release, &cmd->release_info); | 202 | qxl_release_unmap(qdev, release, &cmd->release_info); |
| 195 | 203 | ||
| 196 | qxl_fence_releaseable(qdev, release); | ||
| 197 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 204 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
| 198 | qxl_release_unreserve(qdev, release); | 205 | qxl_release_fence_buffer_objects(release); |
| 206 | return 0; | ||
| 199 | } | 207 | } |
| 200 | 208 | ||
| 201 | static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | 209 | static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, |
| @@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 216 | 224 | ||
| 217 | int size = 64*64*4; | 225 | int size = 64*64*4; |
| 218 | int ret = 0; | 226 | int ret = 0; |
| 219 | if (!handle) { | 227 | if (!handle) |
| 220 | qxl_hide_cursor(qdev); | 228 | return qxl_hide_cursor(qdev); |
| 221 | return 0; | ||
| 222 | } | ||
| 223 | 229 | ||
| 224 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); | 230 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); |
| 225 | if (!obj) { | 231 | if (!obj) { |
| @@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 234 | goto out_unref; | 240 | goto out_unref; |
| 235 | 241 | ||
| 236 | ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); | 242 | ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); |
| 243 | qxl_bo_unreserve(user_bo); | ||
| 237 | if (ret) | 244 | if (ret) |
| 238 | goto out_unreserve; | 245 | goto out_unref; |
| 239 | 246 | ||
| 240 | ret = qxl_bo_kmap(user_bo, &user_ptr); | 247 | ret = qxl_bo_kmap(user_bo, &user_ptr); |
| 241 | if (ret) | 248 | if (ret) |
| @@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 246 | &release, NULL); | 253 | &release, NULL); |
| 247 | if (ret) | 254 | if (ret) |
| 248 | goto out_kunmap; | 255 | goto out_kunmap; |
| 249 | ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size, | 256 | |
| 250 | &cursor_bo); | 257 | ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size, |
| 258 | &cursor_bo); | ||
| 251 | if (ret) | 259 | if (ret) |
| 252 | goto out_free_release; | 260 | goto out_free_release; |
| 253 | ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); | 261 | |
| 262 | ret = qxl_release_reserve_list(release, false); | ||
| 254 | if (ret) | 263 | if (ret) |
| 255 | goto out_free_bo; | 264 | goto out_free_bo; |
| 256 | 265 | ||
| 266 | ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); | ||
| 267 | if (ret) | ||
| 268 | goto out_backoff; | ||
| 269 | |||
| 257 | cursor->header.unique = 0; | 270 | cursor->header.unique = 0; |
| 258 | cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; | 271 | cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; |
| 259 | cursor->header.width = 64; | 272 | cursor->header.width = 64; |
| @@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 269 | 282 | ||
| 270 | qxl_bo_kunmap(cursor_bo); | 283 | qxl_bo_kunmap(cursor_bo); |
| 271 | 284 | ||
| 272 | /* finish with the userspace bo */ | ||
| 273 | qxl_bo_kunmap(user_bo); | 285 | qxl_bo_kunmap(user_bo); |
| 274 | qxl_bo_unpin(user_bo); | ||
| 275 | qxl_bo_unreserve(user_bo); | ||
| 276 | drm_gem_object_unreference_unlocked(obj); | ||
| 277 | 286 | ||
| 278 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); | 287 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); |
| 279 | cmd->type = QXL_CURSOR_SET; | 288 | cmd->type = QXL_CURSOR_SET; |
| @@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 281 | cmd->u.set.position.y = qcrtc->cur_y; | 290 | cmd->u.set.position.y = qcrtc->cur_y; |
| 282 | 291 | ||
| 283 | cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); | 292 | cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); |
| 284 | qxl_release_add_res(qdev, release, cursor_bo); | ||
| 285 | 293 | ||
| 286 | cmd->u.set.visible = 1; | 294 | cmd->u.set.visible = 1; |
| 287 | qxl_release_unmap(qdev, release, &cmd->release_info); | 295 | qxl_release_unmap(qdev, release, &cmd->release_info); |
| 288 | 296 | ||
| 289 | qxl_fence_releaseable(qdev, release); | ||
| 290 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 297 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
| 291 | qxl_release_unreserve(qdev, release); | 298 | qxl_release_fence_buffer_objects(release); |
| 299 | |||
| 300 | /* finish with the userspace bo */ | ||
| 301 | ret = qxl_bo_reserve(user_bo, false); | ||
| 302 | if (!ret) { | ||
| 303 | qxl_bo_unpin(user_bo); | ||
| 304 | qxl_bo_unreserve(user_bo); | ||
| 305 | } | ||
| 306 | drm_gem_object_unreference_unlocked(obj); | ||
| 292 | 307 | ||
| 293 | qxl_bo_unreserve(cursor_bo); | ||
| 294 | qxl_bo_unref(&cursor_bo); | 308 | qxl_bo_unref(&cursor_bo); |
| 295 | 309 | ||
| 296 | return ret; | 310 | return ret; |
| 311 | |||
| 312 | out_backoff: | ||
| 313 | qxl_release_backoff_reserve_list(release); | ||
| 297 | out_free_bo: | 314 | out_free_bo: |
| 298 | qxl_bo_unref(&cursor_bo); | 315 | qxl_bo_unref(&cursor_bo); |
| 299 | out_free_release: | 316 | out_free_release: |
| 300 | qxl_release_unreserve(qdev, release); | ||
| 301 | qxl_release_free(qdev, release); | 317 | qxl_release_free(qdev, release); |
| 302 | out_kunmap: | 318 | out_kunmap: |
| 303 | qxl_bo_kunmap(user_bo); | 319 | qxl_bo_kunmap(user_bo); |
| 304 | out_unpin: | 320 | out_unpin: |
| 305 | qxl_bo_unpin(user_bo); | 321 | qxl_bo_unpin(user_bo); |
| 306 | out_unreserve: | ||
| 307 | qxl_bo_unreserve(user_bo); | ||
| 308 | out_unref: | 322 | out_unref: |
| 309 | drm_gem_object_unreference_unlocked(obj); | 323 | drm_gem_object_unreference_unlocked(obj); |
| 310 | return ret; | 324 | return ret; |
| @@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, | |||
| 322 | 336 | ||
| 323 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, | 337 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, |
| 324 | &release, NULL); | 338 | &release, NULL); |
| 339 | if (ret) | ||
| 340 | return ret; | ||
| 341 | |||
| 342 | ret = qxl_release_reserve_list(release, true); | ||
| 343 | if (ret) { | ||
| 344 | qxl_release_free(qdev, release); | ||
| 345 | return ret; | ||
| 346 | } | ||
| 325 | 347 | ||
| 326 | qcrtc->cur_x = x; | 348 | qcrtc->cur_x = x; |
| 327 | qcrtc->cur_y = y; | 349 | qcrtc->cur_y = y; |
| @@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, | |||
| 332 | cmd->u.position.y = qcrtc->cur_y; | 354 | cmd->u.position.y = qcrtc->cur_y; |
| 333 | qxl_release_unmap(qdev, release, &cmd->release_info); | 355 | qxl_release_unmap(qdev, release, &cmd->release_info); |
| 334 | 356 | ||
| 335 | qxl_fence_releaseable(qdev, release); | ||
| 336 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 357 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
| 337 | qxl_release_unreserve(qdev, release); | 358 | qxl_release_fence_buffer_objects(release); |
| 359 | |||
| 338 | return 0; | 360 | return 0; |
| 339 | } | 361 | } |
| 340 | 362 | ||
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 3c8c3dbf9378..56e1d633875e 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c | |||
| @@ -23,25 +23,29 @@ | |||
| 23 | #include "qxl_drv.h" | 23 | #include "qxl_drv.h" |
| 24 | #include "qxl_object.h" | 24 | #include "qxl_object.h" |
| 25 | 25 | ||
| 26 | static int alloc_clips(struct qxl_device *qdev, | ||
| 27 | struct qxl_release *release, | ||
| 28 | unsigned num_clips, | ||
| 29 | struct qxl_bo **clips_bo) | ||
| 30 | { | ||
| 31 | int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips; | ||
| 32 | |||
| 33 | return qxl_alloc_bo_reserved(qdev, release, size, clips_bo); | ||
| 34 | } | ||
| 35 | |||
| 26 | /* returns a pointer to the already allocated qxl_rect array inside | 36 | /* returns a pointer to the already allocated qxl_rect array inside |
| 27 | * the qxl_clip_rects. This is *not* the same as the memory allocated | 37 | * the qxl_clip_rects. This is *not* the same as the memory allocated |
| 28 | * on the device, it is offset to qxl_clip_rects.chunk.data */ | 38 | * on the device, it is offset to qxl_clip_rects.chunk.data */ |
| 29 | static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, | 39 | static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, |
| 30 | struct qxl_drawable *drawable, | 40 | struct qxl_drawable *drawable, |
| 31 | unsigned num_clips, | 41 | unsigned num_clips, |
| 32 | struct qxl_bo **clips_bo, | 42 | struct qxl_bo *clips_bo) |
| 33 | struct qxl_release *release) | ||
| 34 | { | 43 | { |
| 35 | struct qxl_clip_rects *dev_clips; | 44 | struct qxl_clip_rects *dev_clips; |
| 36 | int ret; | 45 | int ret; |
| 37 | int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips; | ||
| 38 | ret = qxl_alloc_bo_reserved(qdev, size, clips_bo); | ||
| 39 | if (ret) | ||
| 40 | return NULL; | ||
| 41 | 46 | ||
| 42 | ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips); | 47 | ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips); |
| 43 | if (ret) { | 48 | if (ret) { |
| 44 | qxl_bo_unref(clips_bo); | ||
| 45 | return NULL; | 49 | return NULL; |
| 46 | } | 50 | } |
| 47 | dev_clips->num_rects = num_clips; | 51 | dev_clips->num_rects = num_clips; |
| @@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, | |||
| 52 | } | 56 | } |
| 53 | 57 | ||
| 54 | static int | 58 | static int |
| 59 | alloc_drawable(struct qxl_device *qdev, struct qxl_release **release) | ||
| 60 | { | ||
| 61 | int ret; | ||
| 62 | ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable), | ||
| 63 | QXL_RELEASE_DRAWABLE, release, | ||
| 64 | NULL); | ||
| 65 | return ret; | ||
| 66 | } | ||
| 67 | |||
| 68 | static void | ||
| 69 | free_drawable(struct qxl_device *qdev, struct qxl_release *release) | ||
| 70 | { | ||
| 71 | qxl_release_free(qdev, release); | ||
| 72 | } | ||
| 73 | |||
| 74 | /* release needs to be reserved at this point */ | ||
| 75 | static int | ||
| 55 | make_drawable(struct qxl_device *qdev, int surface, uint8_t type, | 76 | make_drawable(struct qxl_device *qdev, int surface, uint8_t type, |
| 56 | const struct qxl_rect *rect, | 77 | const struct qxl_rect *rect, |
| 57 | struct qxl_release **release) | 78 | struct qxl_release *release) |
| 58 | { | 79 | { |
| 59 | struct qxl_drawable *drawable; | 80 | struct qxl_drawable *drawable; |
| 60 | int i, ret; | 81 | int i; |
| 61 | 82 | ||
| 62 | ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable), | 83 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
| 63 | QXL_RELEASE_DRAWABLE, release, | 84 | if (!drawable) |
| 64 | NULL); | 85 | return -ENOMEM; |
| 65 | if (ret) | ||
| 66 | return ret; | ||
| 67 | 86 | ||
| 68 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release); | ||
| 69 | drawable->type = type; | 87 | drawable->type = type; |
| 70 | 88 | ||
| 71 | drawable->surface_id = surface; /* Only primary for now */ | 89 | drawable->surface_id = surface; /* Only primary for now */ |
| @@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type, | |||
| 91 | drawable->bbox = *rect; | 109 | drawable->bbox = *rect; |
| 92 | 110 | ||
| 93 | drawable->mm_time = qdev->rom->mm_clock; | 111 | drawable->mm_time = qdev->rom->mm_clock; |
| 94 | qxl_release_unmap(qdev, *release, &drawable->release_info); | 112 | qxl_release_unmap(qdev, release, &drawable->release_info); |
| 95 | return 0; | 113 | return 0; |
| 96 | } | 114 | } |
| 97 | 115 | ||
| 98 | static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | 116 | static int alloc_palette_object(struct qxl_device *qdev, |
| 117 | struct qxl_release *release, | ||
| 118 | struct qxl_bo **palette_bo) | ||
| 119 | { | ||
| 120 | return qxl_alloc_bo_reserved(qdev, release, | ||
| 121 | sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, | ||
| 122 | palette_bo); | ||
| 123 | } | ||
| 124 | |||
| 125 | static int qxl_palette_create_1bit(struct qxl_bo *palette_bo, | ||
| 126 | struct qxl_release *release, | ||
| 99 | const struct qxl_fb_image *qxl_fb_image) | 127 | const struct qxl_fb_image *qxl_fb_image) |
| 100 | { | 128 | { |
| 101 | struct qxl_device *qdev = qxl_fb_image->qdev; | ||
| 102 | const struct fb_image *fb_image = &qxl_fb_image->fb_image; | 129 | const struct fb_image *fb_image = &qxl_fb_image->fb_image; |
| 103 | uint32_t visual = qxl_fb_image->visual; | 130 | uint32_t visual = qxl_fb_image->visual; |
| 104 | const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; | 131 | const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; |
| @@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | |||
| 108 | static uint64_t unique; /* we make no attempt to actually set this | 135 | static uint64_t unique; /* we make no attempt to actually set this |
| 109 | * correctly globaly, since that would require | 136 | * correctly globaly, since that would require |
| 110 | * tracking all of our palettes. */ | 137 | * tracking all of our palettes. */ |
| 111 | 138 | ret = qxl_bo_kmap(palette_bo, (void **)&pal); | |
| 112 | ret = qxl_alloc_bo_reserved(qdev, | ||
| 113 | sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, | ||
| 114 | palette_bo); | ||
| 115 | |||
| 116 | ret = qxl_bo_kmap(*palette_bo, (void **)&pal); | ||
| 117 | pal->num_ents = 2; | 139 | pal->num_ents = 2; |
| 118 | pal->unique = unique++; | 140 | pal->unique = unique++; |
| 119 | if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { | 141 | if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { |
| @@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | |||
| 126 | } | 148 | } |
| 127 | pal->ents[0] = bgcolor; | 149 | pal->ents[0] = bgcolor; |
| 128 | pal->ents[1] = fgcolor; | 150 | pal->ents[1] = fgcolor; |
| 129 | qxl_bo_kunmap(*palette_bo); | 151 | qxl_bo_kunmap(palette_bo); |
| 130 | return 0; | 152 | return 0; |
| 131 | } | 153 | } |
| 132 | 154 | ||
| @@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, | |||
| 144 | const char *src = fb_image->data; | 166 | const char *src = fb_image->data; |
| 145 | int depth = fb_image->depth; | 167 | int depth = fb_image->depth; |
| 146 | struct qxl_release *release; | 168 | struct qxl_release *release; |
| 147 | struct qxl_bo *image_bo; | ||
| 148 | struct qxl_image *image; | 169 | struct qxl_image *image; |
| 149 | int ret; | 170 | int ret; |
| 150 | 171 | struct qxl_drm_image *dimage; | |
| 172 | struct qxl_bo *palette_bo = NULL; | ||
| 151 | if (stride == 0) | 173 | if (stride == 0) |
| 152 | stride = depth * width / 8; | 174 | stride = depth * width / 8; |
| 153 | 175 | ||
| 176 | ret = alloc_drawable(qdev, &release); | ||
| 177 | if (ret) | ||
| 178 | return; | ||
| 179 | |||
| 180 | ret = qxl_image_alloc_objects(qdev, release, | ||
| 181 | &dimage, | ||
| 182 | height, stride); | ||
| 183 | if (ret) | ||
| 184 | goto out_free_drawable; | ||
| 185 | |||
| 186 | if (depth == 1) { | ||
| 187 | ret = alloc_palette_object(qdev, release, &palette_bo); | ||
| 188 | if (ret) | ||
| 189 | goto out_free_image; | ||
| 190 | } | ||
| 191 | |||
| 192 | /* do a reservation run over all the objects we just allocated */ | ||
| 193 | ret = qxl_release_reserve_list(release, true); | ||
| 194 | if (ret) | ||
| 195 | goto out_free_palette; | ||
| 196 | |||
| 154 | rect.left = x; | 197 | rect.left = x; |
| 155 | rect.right = x + width; | 198 | rect.right = x + width; |
| 156 | rect.top = y; | 199 | rect.top = y; |
| 157 | rect.bottom = y + height; | 200 | rect.bottom = y + height; |
| 158 | 201 | ||
| 159 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release); | 202 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release); |
| 160 | if (ret) | 203 | if (ret) { |
| 161 | return; | 204 | qxl_release_backoff_reserve_list(release); |
| 205 | goto out_free_palette; | ||
| 206 | } | ||
| 162 | 207 | ||
| 163 | ret = qxl_image_create(qdev, release, &image_bo, | 208 | ret = qxl_image_init(qdev, release, dimage, |
| 164 | (const uint8_t *)src, 0, 0, | 209 | (const uint8_t *)src, 0, 0, |
| 165 | width, height, depth, stride); | 210 | width, height, depth, stride); |
| 166 | if (ret) { | 211 | if (ret) { |
| 167 | qxl_release_unreserve(qdev, release); | 212 | qxl_release_backoff_reserve_list(release); |
| 168 | qxl_release_free(qdev, release); | 213 | qxl_release_free(qdev, release); |
| 169 | return; | 214 | return; |
| 170 | } | 215 | } |
| 171 | 216 | ||
| 172 | if (depth == 1) { | 217 | if (depth == 1) { |
| 173 | struct qxl_bo *palette_bo; | ||
| 174 | void *ptr; | 218 | void *ptr; |
| 175 | ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image); | 219 | ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image); |
| 176 | qxl_release_add_res(qdev, release, palette_bo); | ||
| 177 | 220 | ||
| 178 | ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); | 221 | ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0); |
| 179 | image = ptr; | 222 | image = ptr; |
| 180 | image->u.bitmap.palette = | 223 | image->u.bitmap.palette = |
| 181 | qxl_bo_physical_address(qdev, palette_bo, 0); | 224 | qxl_bo_physical_address(qdev, palette_bo, 0); |
| 182 | qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); | 225 | qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr); |
| 183 | qxl_bo_unreserve(palette_bo); | ||
| 184 | qxl_bo_unref(&palette_bo); | ||
| 185 | } | 226 | } |
| 186 | 227 | ||
| 187 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 228 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
| @@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, | |||
| 199 | drawable->u.copy.mask.bitmap = 0; | 240 | drawable->u.copy.mask.bitmap = 0; |
| 200 | 241 | ||
| 201 | drawable->u.copy.src_bitmap = | 242 | drawable->u.copy.src_bitmap = |
| 202 | qxl_bo_physical_address(qdev, image_bo, 0); | 243 | qxl_bo_physical_address(qdev, dimage->bo, 0); |
| 203 | qxl_release_unmap(qdev, release, &drawable->release_info); | 244 | qxl_release_unmap(qdev, release, &drawable->release_info); |
| 204 | 245 | ||
| 205 | qxl_release_add_res(qdev, release, image_bo); | ||
| 206 | qxl_bo_unreserve(image_bo); | ||
| 207 | qxl_bo_unref(&image_bo); | ||
| 208 | |||
| 209 | qxl_fence_releaseable(qdev, release); | ||
| 210 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 246 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
| 211 | qxl_release_unreserve(qdev, release); | 247 | qxl_release_fence_buffer_objects(release); |
| 248 | |||
| 249 | out_free_palette: | ||
| 250 | if (palette_bo) | ||
| 251 | qxl_bo_unref(&palette_bo); | ||
| 252 | out_free_image: | ||
| 253 | qxl_image_free_objects(qdev, dimage); | ||
| 254 | out_free_drawable: | ||
| 255 | if (ret) | ||
| 256 | free_drawable(qdev, release); | ||
| 212 | } | 257 | } |
| 213 | 258 | ||
| 214 | /* push a draw command using the given clipping rectangles as | 259 | /* push a draw command using the given clipping rectangles as |
| @@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
| 243 | int depth = qxl_fb->base.bits_per_pixel; | 288 | int depth = qxl_fb->base.bits_per_pixel; |
| 244 | uint8_t *surface_base; | 289 | uint8_t *surface_base; |
| 245 | struct qxl_release *release; | 290 | struct qxl_release *release; |
| 246 | struct qxl_bo *image_bo; | ||
| 247 | struct qxl_bo *clips_bo; | 291 | struct qxl_bo *clips_bo; |
| 292 | struct qxl_drm_image *dimage; | ||
| 248 | int ret; | 293 | int ret; |
| 249 | 294 | ||
| 295 | ret = alloc_drawable(qdev, &release); | ||
| 296 | if (ret) | ||
| 297 | return; | ||
| 298 | |||
| 250 | left = clips->x1; | 299 | left = clips->x1; |
| 251 | right = clips->x2; | 300 | right = clips->x2; |
| 252 | top = clips->y1; | 301 | top = clips->y1; |
| @@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
| 263 | 312 | ||
| 264 | width = right - left; | 313 | width = right - left; |
| 265 | height = bottom - top; | 314 | height = bottom - top; |
| 315 | |||
| 316 | ret = alloc_clips(qdev, release, num_clips, &clips_bo); | ||
| 317 | if (ret) | ||
| 318 | goto out_free_drawable; | ||
| 319 | |||
| 320 | ret = qxl_image_alloc_objects(qdev, release, | ||
| 321 | &dimage, | ||
| 322 | height, stride); | ||
| 323 | if (ret) | ||
| 324 | goto out_free_clips; | ||
| 325 | |||
| 326 | /* do a reservation run over all the objects we just allocated */ | ||
| 327 | ret = qxl_release_reserve_list(release, true); | ||
| 328 | if (ret) | ||
| 329 | goto out_free_image; | ||
| 330 | |||
| 266 | drawable_rect.left = left; | 331 | drawable_rect.left = left; |
| 267 | drawable_rect.right = right; | 332 | drawable_rect.right = right; |
| 268 | drawable_rect.top = top; | 333 | drawable_rect.top = top; |
| 269 | drawable_rect.bottom = bottom; | 334 | drawable_rect.bottom = bottom; |
| 335 | |||
| 270 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, | 336 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, |
| 271 | &release); | 337 | release); |
| 272 | if (ret) | 338 | if (ret) |
| 273 | return; | 339 | goto out_release_backoff; |
| 274 | 340 | ||
| 275 | ret = qxl_bo_kmap(bo, (void **)&surface_base); | 341 | ret = qxl_bo_kmap(bo, (void **)&surface_base); |
| 276 | if (ret) | 342 | if (ret) |
| 277 | goto out_unref; | 343 | goto out_release_backoff; |
| 278 | 344 | ||
| 279 | ret = qxl_image_create(qdev, release, &image_bo, surface_base, | 345 | |
| 280 | left, top, width, height, depth, stride); | 346 | ret = qxl_image_init(qdev, release, dimage, surface_base, |
| 347 | left, top, width, height, depth, stride); | ||
| 281 | qxl_bo_kunmap(bo); | 348 | qxl_bo_kunmap(bo); |
| 282 | if (ret) | 349 | if (ret) |
| 283 | goto out_unref; | 350 | goto out_release_backoff; |
| 351 | |||
| 352 | rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo); | ||
| 353 | if (!rects) | ||
| 354 | goto out_release_backoff; | ||
| 284 | 355 | ||
| 285 | rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release); | ||
| 286 | if (!rects) { | ||
| 287 | qxl_bo_unref(&image_bo); | ||
| 288 | goto out_unref; | ||
| 289 | } | ||
| 290 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 356 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
| 291 | 357 | ||
| 292 | drawable->clip.type = SPICE_CLIP_TYPE_RECTS; | 358 | drawable->clip.type = SPICE_CLIP_TYPE_RECTS; |
| 293 | drawable->clip.data = qxl_bo_physical_address(qdev, | 359 | drawable->clip.data = qxl_bo_physical_address(qdev, |
| 294 | clips_bo, 0); | 360 | clips_bo, 0); |
| 295 | qxl_release_add_res(qdev, release, clips_bo); | ||
| 296 | 361 | ||
| 297 | drawable->u.copy.src_area.top = 0; | 362 | drawable->u.copy.src_area.top = 0; |
| 298 | drawable->u.copy.src_area.bottom = height; | 363 | drawable->u.copy.src_area.bottom = height; |
| @@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
| 306 | drawable->u.copy.mask.pos.y = 0; | 371 | drawable->u.copy.mask.pos.y = 0; |
| 307 | drawable->u.copy.mask.bitmap = 0; | 372 | drawable->u.copy.mask.bitmap = 0; |
| 308 | 373 | ||
| 309 | drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0); | 374 | drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0); |
| 310 | qxl_release_unmap(qdev, release, &drawable->release_info); | 375 | qxl_release_unmap(qdev, release, &drawable->release_info); |
| 311 | qxl_release_add_res(qdev, release, image_bo); | 376 | |
| 312 | qxl_bo_unreserve(image_bo); | ||
| 313 | qxl_bo_unref(&image_bo); | ||
| 314 | clips_ptr = clips; | 377 | clips_ptr = clips; |
| 315 | for (i = 0; i < num_clips; i++, clips_ptr += inc) { | 378 | for (i = 0; i < num_clips; i++, clips_ptr += inc) { |
| 316 | rects[i].left = clips_ptr->x1; | 379 | rects[i].left = clips_ptr->x1; |
| @@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
| 319 | rects[i].bottom = clips_ptr->y2; | 382 | rects[i].bottom = clips_ptr->y2; |
| 320 | } | 383 | } |
| 321 | qxl_bo_kunmap(clips_bo); | 384 | qxl_bo_kunmap(clips_bo); |
| 322 | qxl_bo_unreserve(clips_bo); | ||
| 323 | qxl_bo_unref(&clips_bo); | ||
| 324 | 385 | ||
| 325 | qxl_fence_releaseable(qdev, release); | ||
| 326 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 386 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
| 327 | qxl_release_unreserve(qdev, release); | 387 | qxl_release_fence_buffer_objects(release); |
| 328 | return; | 388 | |
| 389 | out_release_backoff: | ||
| 390 | if (ret) | ||
| 391 | qxl_release_backoff_reserve_list(release); | ||
| 392 | out_free_image: | ||
| 393 | qxl_image_free_objects(qdev, dimage); | ||
| 394 | out_free_clips: | ||
| 395 | qxl_bo_unref(&clips_bo); | ||
| 396 | out_free_drawable: | ||
| 397 | /* only free drawable on error */ | ||
| 398 | if (ret) | ||
| 399 | free_drawable(qdev, release); | ||
| 329 | 400 | ||
| 330 | out_unref: | ||
| 331 | qxl_release_unreserve(qdev, release); | ||
| 332 | qxl_release_free(qdev, release); | ||
| 333 | } | 401 | } |
| 334 | 402 | ||
| 335 | void qxl_draw_copyarea(struct qxl_device *qdev, | 403 | void qxl_draw_copyarea(struct qxl_device *qdev, |
| @@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev, | |||
| 342 | struct qxl_release *release; | 410 | struct qxl_release *release; |
| 343 | int ret; | 411 | int ret; |
| 344 | 412 | ||
| 413 | ret = alloc_drawable(qdev, &release); | ||
| 414 | if (ret) | ||
| 415 | return; | ||
| 416 | |||
| 417 | /* do a reservation run over all the objects we just allocated */ | ||
| 418 | ret = qxl_release_reserve_list(release, true); | ||
| 419 | if (ret) | ||
| 420 | goto out_free_release; | ||
| 421 | |||
| 345 | rect.left = dx; | 422 | rect.left = dx; |
| 346 | rect.top = dy; | 423 | rect.top = dy; |
| 347 | rect.right = dx + width; | 424 | rect.right = dx + width; |
| 348 | rect.bottom = dy + height; | 425 | rect.bottom = dy + height; |
| 349 | ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release); | 426 | ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release); |
| 350 | if (ret) | 427 | if (ret) { |
| 351 | return; | 428 | qxl_release_backoff_reserve_list(release); |
| 429 | goto out_free_release; | ||
| 430 | } | ||
| 352 | 431 | ||
| 353 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 432 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
| 354 | drawable->u.copy_bits.src_pos.x = sx; | 433 | drawable->u.copy_bits.src_pos.x = sx; |
| 355 | drawable->u.copy_bits.src_pos.y = sy; | 434 | drawable->u.copy_bits.src_pos.y = sy; |
| 356 | |||
| 357 | qxl_release_unmap(qdev, release, &drawable->release_info); | 435 | qxl_release_unmap(qdev, release, &drawable->release_info); |
| 358 | qxl_fence_releaseable(qdev, release); | 436 | |
| 359 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 437 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
| 360 | qxl_release_unreserve(qdev, release); | 438 | qxl_release_fence_buffer_objects(release); |
| 439 | |||
| 440 | out_free_release: | ||
| 441 | if (ret) | ||
| 442 | free_drawable(qdev, release); | ||
| 361 | } | 443 | } |
| 362 | 444 | ||
| 363 | void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | 445 | void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) |
| @@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | |||
| 370 | struct qxl_release *release; | 452 | struct qxl_release *release; |
| 371 | int ret; | 453 | int ret; |
| 372 | 454 | ||
| 373 | ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release); | 455 | ret = alloc_drawable(qdev, &release); |
| 374 | if (ret) | 456 | if (ret) |
| 375 | return; | 457 | return; |
| 376 | 458 | ||
| 459 | /* do a reservation run over all the objects we just allocated */ | ||
| 460 | ret = qxl_release_reserve_list(release, true); | ||
| 461 | if (ret) | ||
| 462 | goto out_free_release; | ||
| 463 | |||
| 464 | ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release); | ||
| 465 | if (ret) { | ||
| 466 | qxl_release_backoff_reserve_list(release); | ||
| 467 | goto out_free_release; | ||
| 468 | } | ||
| 469 | |||
| 377 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 470 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
| 378 | drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; | 471 | drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; |
| 379 | drawable->u.fill.brush.u.color = color; | 472 | drawable->u.fill.brush.u.color = color; |
| @@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | |||
| 384 | drawable->u.fill.mask.bitmap = 0; | 477 | drawable->u.fill.mask.bitmap = 0; |
| 385 | 478 | ||
| 386 | qxl_release_unmap(qdev, release, &drawable->release_info); | 479 | qxl_release_unmap(qdev, release, &drawable->release_info); |
| 387 | qxl_fence_releaseable(qdev, release); | 480 | |
| 388 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 481 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
| 389 | qxl_release_unreserve(qdev, release); | 482 | qxl_release_fence_buffer_objects(release); |
| 483 | |||
| 484 | out_free_release: | ||
| 485 | if (ret) | ||
| 486 | free_drawable(qdev, release); | ||
| 390 | } | 487 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index aacb791464a3..7e96f4f11738 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
| @@ -42,6 +42,9 @@ | |||
| 42 | #include <ttm/ttm_placement.h> | 42 | #include <ttm/ttm_placement.h> |
| 43 | #include <ttm/ttm_module.h> | 43 | #include <ttm/ttm_module.h> |
| 44 | 44 | ||
| 45 | /* just for ttm_validate_buffer */ | ||
| 46 | #include <ttm/ttm_execbuf_util.h> | ||
| 47 | |||
| 45 | #include <drm/qxl_drm.h> | 48 | #include <drm/qxl_drm.h> |
| 46 | #include "qxl_dev.h" | 49 | #include "qxl_dev.h" |
| 47 | 50 | ||
| @@ -118,9 +121,9 @@ struct qxl_bo { | |||
| 118 | uint32_t surface_id; | 121 | uint32_t surface_id; |
| 119 | struct qxl_fence fence; /* per bo fence - list of releases */ | 122 | struct qxl_fence fence; /* per bo fence - list of releases */ |
| 120 | struct qxl_release *surf_create; | 123 | struct qxl_release *surf_create; |
| 121 | atomic_t reserve_count; | ||
| 122 | }; | 124 | }; |
| 123 | #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) | 125 | #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) |
| 126 | #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo) | ||
| 124 | 127 | ||
| 125 | struct qxl_gem { | 128 | struct qxl_gem { |
| 126 | struct mutex mutex; | 129 | struct mutex mutex; |
| @@ -128,12 +131,7 @@ struct qxl_gem { | |||
| 128 | }; | 131 | }; |
| 129 | 132 | ||
| 130 | struct qxl_bo_list { | 133 | struct qxl_bo_list { |
| 131 | struct list_head lhead; | 134 | struct ttm_validate_buffer tv; |
| 132 | struct qxl_bo *bo; | ||
| 133 | }; | ||
| 134 | |||
| 135 | struct qxl_reloc_list { | ||
| 136 | struct list_head bos; | ||
| 137 | }; | 135 | }; |
| 138 | 136 | ||
| 139 | struct qxl_crtc { | 137 | struct qxl_crtc { |
| @@ -195,10 +193,20 @@ enum { | |||
| 195 | struct qxl_release { | 193 | struct qxl_release { |
| 196 | int id; | 194 | int id; |
| 197 | int type; | 195 | int type; |
| 198 | int bo_count; | ||
| 199 | uint32_t release_offset; | 196 | uint32_t release_offset; |
| 200 | uint32_t surface_release_id; | 197 | uint32_t surface_release_id; |
| 201 | struct qxl_bo *bos[QXL_MAX_RES]; | 198 | struct ww_acquire_ctx ticket; |
| 199 | struct list_head bos; | ||
| 200 | }; | ||
| 201 | |||
| 202 | struct qxl_drm_chunk { | ||
| 203 | struct list_head head; | ||
| 204 | struct qxl_bo *bo; | ||
| 205 | }; | ||
| 206 | |||
| 207 | struct qxl_drm_image { | ||
| 208 | struct qxl_bo *bo; | ||
| 209 | struct list_head chunk_list; | ||
| 202 | }; | 210 | }; |
| 203 | 211 | ||
| 204 | struct qxl_fb_image { | 212 | struct qxl_fb_image { |
| @@ -314,6 +322,7 @@ struct qxl_device { | |||
| 314 | struct workqueue_struct *gc_queue; | 322 | struct workqueue_struct *gc_queue; |
| 315 | struct work_struct gc_work; | 323 | struct work_struct gc_work; |
| 316 | 324 | ||
| 325 | struct work_struct fb_work; | ||
| 317 | }; | 326 | }; |
| 318 | 327 | ||
| 319 | /* forward declaration for QXL_INFO_IO */ | 328 | /* forward declaration for QXL_INFO_IO */ |
| @@ -433,12 +442,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma); | |||
| 433 | 442 | ||
| 434 | /* qxl image */ | 443 | /* qxl image */ |
| 435 | 444 | ||
| 436 | int qxl_image_create(struct qxl_device *qdev, | 445 | int qxl_image_init(struct qxl_device *qdev, |
| 437 | struct qxl_release *release, | 446 | struct qxl_release *release, |
| 438 | struct qxl_bo **image_bo, | 447 | struct qxl_drm_image *dimage, |
| 439 | const uint8_t *data, | 448 | const uint8_t *data, |
| 440 | int x, int y, int width, int height, | 449 | int x, int y, int width, int height, |
| 441 | int depth, int stride); | 450 | int depth, int stride); |
| 451 | int | ||
| 452 | qxl_image_alloc_objects(struct qxl_device *qdev, | ||
| 453 | struct qxl_release *release, | ||
| 454 | struct qxl_drm_image **image_ptr, | ||
| 455 | int height, int stride); | ||
| 456 | void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage); | ||
| 457 | |||
| 442 | void qxl_update_screen(struct qxl_device *qxl); | 458 | void qxl_update_screen(struct qxl_device *qxl); |
| 443 | 459 | ||
| 444 | /* qxl io operations (qxl_cmd.c) */ | 460 | /* qxl io operations (qxl_cmd.c) */ |
| @@ -459,20 +475,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible | |||
| 459 | void qxl_io_flush_release(struct qxl_device *qdev); | 475 | void qxl_io_flush_release(struct qxl_device *qdev); |
| 460 | void qxl_io_flush_surfaces(struct qxl_device *qdev); | 476 | void qxl_io_flush_surfaces(struct qxl_device *qdev); |
| 461 | 477 | ||
| 462 | int qxl_release_reserve(struct qxl_device *qdev, | ||
| 463 | struct qxl_release *release, bool no_wait); | ||
| 464 | void qxl_release_unreserve(struct qxl_device *qdev, | ||
| 465 | struct qxl_release *release); | ||
| 466 | union qxl_release_info *qxl_release_map(struct qxl_device *qdev, | 478 | union qxl_release_info *qxl_release_map(struct qxl_device *qdev, |
| 467 | struct qxl_release *release); | 479 | struct qxl_release *release); |
| 468 | void qxl_release_unmap(struct qxl_device *qdev, | 480 | void qxl_release_unmap(struct qxl_device *qdev, |
| 469 | struct qxl_release *release, | 481 | struct qxl_release *release, |
| 470 | union qxl_release_info *info); | 482 | union qxl_release_info *info); |
| 471 | /* | 483 | int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo); |
| 472 | * qxl_bo_add_resource. | 484 | int qxl_release_reserve_list(struct qxl_release *release, bool no_intr); |
| 473 | * | 485 | void qxl_release_backoff_reserve_list(struct qxl_release *release); |
| 474 | */ | 486 | void qxl_release_fence_buffer_objects(struct qxl_release *release); |
| 475 | void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource); | ||
| 476 | 487 | ||
| 477 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | 488 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, |
| 478 | enum qxl_surface_cmd_type surface_cmd_type, | 489 | enum qxl_surface_cmd_type surface_cmd_type, |
| @@ -481,15 +492,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | |||
| 481 | int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | 492 | int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, |
| 482 | int type, struct qxl_release **release, | 493 | int type, struct qxl_release **release, |
| 483 | struct qxl_bo **rbo); | 494 | struct qxl_bo **rbo); |
| 484 | int qxl_fence_releaseable(struct qxl_device *qdev, | 495 | |
| 485 | struct qxl_release *release); | ||
| 486 | int | 496 | int |
| 487 | qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, | 497 | qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, |
| 488 | uint32_t type, bool interruptible); | 498 | uint32_t type, bool interruptible); |
| 489 | int | 499 | int |
| 490 | qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, | 500 | qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, |
| 491 | uint32_t type, bool interruptible); | 501 | uint32_t type, bool interruptible); |
| 492 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, | 502 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, |
| 503 | struct qxl_release *release, | ||
| 504 | unsigned long size, | ||
| 493 | struct qxl_bo **_bo); | 505 | struct qxl_bo **_bo); |
| 494 | /* qxl drawing commands */ | 506 | /* qxl drawing commands */ |
| 495 | 507 | ||
| @@ -510,15 +522,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev, | |||
| 510 | u32 sx, u32 sy, | 522 | u32 sx, u32 sy, |
| 511 | u32 dx, u32 dy); | 523 | u32 dx, u32 dy); |
| 512 | 524 | ||
| 513 | uint64_t | ||
| 514 | qxl_release_alloc(struct qxl_device *qdev, int type, | ||
| 515 | struct qxl_release **ret); | ||
| 516 | |||
| 517 | void qxl_release_free(struct qxl_device *qdev, | 525 | void qxl_release_free(struct qxl_device *qdev, |
| 518 | struct qxl_release *release); | 526 | struct qxl_release *release); |
| 519 | void qxl_release_add_res(struct qxl_device *qdev, | 527 | |
| 520 | struct qxl_release *release, | ||
| 521 | struct qxl_bo *bo); | ||
| 522 | /* used by qxl_debugfs_release */ | 528 | /* used by qxl_debugfs_release */ |
| 523 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | 529 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, |
| 524 | uint64_t id); | 530 | uint64_t id); |
| @@ -561,7 +567,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein | |||
| 561 | int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); | 567 | int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); |
| 562 | 568 | ||
| 563 | /* qxl_fence.c */ | 569 | /* qxl_fence.c */ |
| 564 | int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id); | 570 | void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id); |
| 565 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); | 571 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); |
| 566 | int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); | 572 | int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); |
| 567 | void qxl_fence_fini(struct qxl_fence *qfence); | 573 | void qxl_fence_fini(struct qxl_fence *qfence); |
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 76f39d88d684..88722f233430 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c | |||
| @@ -37,12 +37,29 @@ | |||
| 37 | 37 | ||
| 38 | #define QXL_DIRTY_DELAY (HZ / 30) | 38 | #define QXL_DIRTY_DELAY (HZ / 30) |
| 39 | 39 | ||
| 40 | #define QXL_FB_OP_FILLRECT 1 | ||
| 41 | #define QXL_FB_OP_COPYAREA 2 | ||
| 42 | #define QXL_FB_OP_IMAGEBLIT 3 | ||
| 43 | |||
| 44 | struct qxl_fb_op { | ||
| 45 | struct list_head head; | ||
| 46 | int op_type; | ||
| 47 | union { | ||
| 48 | struct fb_fillrect fr; | ||
| 49 | struct fb_copyarea ca; | ||
| 50 | struct fb_image ib; | ||
| 51 | } op; | ||
| 52 | void *img_data; | ||
| 53 | }; | ||
| 54 | |||
| 40 | struct qxl_fbdev { | 55 | struct qxl_fbdev { |
| 41 | struct drm_fb_helper helper; | 56 | struct drm_fb_helper helper; |
| 42 | struct qxl_framebuffer qfb; | 57 | struct qxl_framebuffer qfb; |
| 43 | struct list_head fbdev_list; | 58 | struct list_head fbdev_list; |
| 44 | struct qxl_device *qdev; | 59 | struct qxl_device *qdev; |
| 45 | 60 | ||
| 61 | spinlock_t delayed_ops_lock; | ||
| 62 | struct list_head delayed_ops; | ||
| 46 | void *shadow; | 63 | void *shadow; |
| 47 | int size; | 64 | int size; |
| 48 | 65 | ||
| @@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = { | |||
| 164 | .deferred_io = qxl_deferred_io, | 181 | .deferred_io = qxl_deferred_io, |
| 165 | }; | 182 | }; |
| 166 | 183 | ||
| 167 | static void qxl_fb_fillrect(struct fb_info *info, | 184 | static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev, |
| 168 | const struct fb_fillrect *fb_rect) | 185 | const struct fb_fillrect *fb_rect) |
| 186 | { | ||
| 187 | struct qxl_fb_op *op; | ||
| 188 | unsigned long flags; | ||
| 189 | |||
| 190 | op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN); | ||
| 191 | if (!op) | ||
| 192 | return; | ||
| 193 | |||
| 194 | op->op.fr = *fb_rect; | ||
| 195 | op->img_data = NULL; | ||
| 196 | op->op_type = QXL_FB_OP_FILLRECT; | ||
| 197 | |||
| 198 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
| 199 | list_add_tail(&op->head, &qfbdev->delayed_ops); | ||
| 200 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
| 201 | } | ||
| 202 | |||
| 203 | static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev, | ||
| 204 | const struct fb_copyarea *fb_copy) | ||
| 205 | { | ||
| 206 | struct qxl_fb_op *op; | ||
| 207 | unsigned long flags; | ||
| 208 | |||
| 209 | op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN); | ||
| 210 | if (!op) | ||
| 211 | return; | ||
| 212 | |||
| 213 | op->op.ca = *fb_copy; | ||
| 214 | op->img_data = NULL; | ||
| 215 | op->op_type = QXL_FB_OP_COPYAREA; | ||
| 216 | |||
| 217 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
| 218 | list_add_tail(&op->head, &qfbdev->delayed_ops); | ||
| 219 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
| 220 | } | ||
| 221 | |||
| 222 | static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev, | ||
| 223 | const struct fb_image *fb_image) | ||
| 224 | { | ||
| 225 | struct qxl_fb_op *op; | ||
| 226 | unsigned long flags; | ||
| 227 | uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1); | ||
| 228 | |||
| 229 | op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN); | ||
| 230 | if (!op) | ||
| 231 | return; | ||
| 232 | |||
| 233 | op->op.ib = *fb_image; | ||
| 234 | op->img_data = (void *)(op + 1); | ||
| 235 | op->op_type = QXL_FB_OP_IMAGEBLIT; | ||
| 236 | |||
| 237 | memcpy(op->img_data, fb_image->data, size); | ||
| 238 | |||
| 239 | op->op.ib.data = op->img_data; | ||
| 240 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
| 241 | list_add_tail(&op->head, &qfbdev->delayed_ops); | ||
| 242 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
| 243 | } | ||
| 244 | |||
| 245 | static void qxl_fb_fillrect_internal(struct fb_info *info, | ||
| 246 | const struct fb_fillrect *fb_rect) | ||
| 169 | { | 247 | { |
| 170 | struct qxl_fbdev *qfbdev = info->par; | 248 | struct qxl_fbdev *qfbdev = info->par; |
| 171 | struct qxl_device *qdev = qfbdev->qdev; | 249 | struct qxl_device *qdev = qfbdev->qdev; |
| @@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info, | |||
| 203 | qxl_draw_fill_rec.rect = rect; | 281 | qxl_draw_fill_rec.rect = rect; |
| 204 | qxl_draw_fill_rec.color = color; | 282 | qxl_draw_fill_rec.color = color; |
| 205 | qxl_draw_fill_rec.rop = rop; | 283 | qxl_draw_fill_rec.rop = rop; |
| 284 | |||
| 285 | qxl_draw_fill(&qxl_draw_fill_rec); | ||
| 286 | } | ||
| 287 | |||
| 288 | static void qxl_fb_fillrect(struct fb_info *info, | ||
| 289 | const struct fb_fillrect *fb_rect) | ||
| 290 | { | ||
| 291 | struct qxl_fbdev *qfbdev = info->par; | ||
| 292 | struct qxl_device *qdev = qfbdev->qdev; | ||
| 293 | |||
| 206 | if (!drm_can_sleep()) { | 294 | if (!drm_can_sleep()) { |
| 207 | qxl_io_log(qdev, | 295 | qxl_fb_delayed_fillrect(qfbdev, fb_rect); |
| 208 | "%s: TODO use RCU, mysterious locks with spin_lock\n", | 296 | schedule_work(&qdev->fb_work); |
| 209 | __func__); | ||
| 210 | return; | 297 | return; |
| 211 | } | 298 | } |
| 212 | qxl_draw_fill(&qxl_draw_fill_rec); | 299 | /* make sure any previous work is done */ |
| 300 | flush_work(&qdev->fb_work); | ||
| 301 | qxl_fb_fillrect_internal(info, fb_rect); | ||
| 213 | } | 302 | } |
| 214 | 303 | ||
| 215 | static void qxl_fb_copyarea(struct fb_info *info, | 304 | static void qxl_fb_copyarea_internal(struct fb_info *info, |
| 216 | const struct fb_copyarea *region) | 305 | const struct fb_copyarea *region) |
| 217 | { | 306 | { |
| 218 | struct qxl_fbdev *qfbdev = info->par; | 307 | struct qxl_fbdev *qfbdev = info->par; |
| 219 | 308 | ||
| @@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info, | |||
| 223 | region->dx, region->dy); | 312 | region->dx, region->dy); |
| 224 | } | 313 | } |
| 225 | 314 | ||
| 315 | static void qxl_fb_copyarea(struct fb_info *info, | ||
| 316 | const struct fb_copyarea *region) | ||
| 317 | { | ||
| 318 | struct qxl_fbdev *qfbdev = info->par; | ||
| 319 | struct qxl_device *qdev = qfbdev->qdev; | ||
| 320 | |||
| 321 | if (!drm_can_sleep()) { | ||
| 322 | qxl_fb_delayed_copyarea(qfbdev, region); | ||
| 323 | schedule_work(&qdev->fb_work); | ||
| 324 | return; | ||
| 325 | } | ||
| 326 | /* make sure any previous work is done */ | ||
| 327 | flush_work(&qdev->fb_work); | ||
| 328 | qxl_fb_copyarea_internal(info, region); | ||
| 329 | } | ||
| 330 | |||
| 226 | static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) | 331 | static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) |
| 227 | { | 332 | { |
| 228 | qxl_draw_opaque_fb(qxl_fb_image, 0); | 333 | qxl_draw_opaque_fb(qxl_fb_image, 0); |
| 229 | } | 334 | } |
| 230 | 335 | ||
| 336 | static void qxl_fb_imageblit_internal(struct fb_info *info, | ||
| 337 | const struct fb_image *image) | ||
| 338 | { | ||
| 339 | struct qxl_fbdev *qfbdev = info->par; | ||
| 340 | struct qxl_fb_image qxl_fb_image; | ||
| 341 | |||
| 342 | /* ensure proper order rendering operations - TODO: must do this | ||
| 343 | * for everything. */ | ||
| 344 | qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); | ||
| 345 | qxl_fb_imageblit_safe(&qxl_fb_image); | ||
| 346 | } | ||
| 347 | |||
| 231 | static void qxl_fb_imageblit(struct fb_info *info, | 348 | static void qxl_fb_imageblit(struct fb_info *info, |
| 232 | const struct fb_image *image) | 349 | const struct fb_image *image) |
| 233 | { | 350 | { |
| 234 | struct qxl_fbdev *qfbdev = info->par; | 351 | struct qxl_fbdev *qfbdev = info->par; |
| 235 | struct qxl_device *qdev = qfbdev->qdev; | 352 | struct qxl_device *qdev = qfbdev->qdev; |
| 236 | struct qxl_fb_image qxl_fb_image; | ||
| 237 | 353 | ||
| 238 | if (!drm_can_sleep()) { | 354 | if (!drm_can_sleep()) { |
| 239 | /* we cannot do any ttm_bo allocation since that will fail on | 355 | qxl_fb_delayed_imageblit(qfbdev, image); |
| 240 | * ioremap_wc..__get_vm_area_node, so queue the work item | 356 | schedule_work(&qdev->fb_work); |
| 241 | * instead This can happen from printk inside an interrupt | ||
| 242 | * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */ | ||
| 243 | qxl_io_log(qdev, | ||
| 244 | "%s: TODO use RCU, mysterious locks with spin_lock\n", | ||
| 245 | __func__); | ||
| 246 | return; | 357 | return; |
| 247 | } | 358 | } |
| 359 | /* make sure any previous work is done */ | ||
| 360 | flush_work(&qdev->fb_work); | ||
| 361 | qxl_fb_imageblit_internal(info, image); | ||
| 362 | } | ||
| 248 | 363 | ||
| 249 | /* ensure proper order of rendering operations - TODO: must do this | 364 | static void qxl_fb_work(struct work_struct *work) |
| 250 | * for everything. */ | 365 | { |
| 251 | qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); | 366 | struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work); |
| 252 | qxl_fb_imageblit_safe(&qxl_fb_image); | 367 | unsigned long flags; |
| 368 | struct qxl_fb_op *entry, *tmp; | ||
| 369 | struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev; | ||
| 370 | |||
| 371 | /* since the irq context just adds entries to the end of the | ||
| 372 | list dropping the lock should be fine, as entry isn't modified | ||
| 373 | in the operation code */ | ||
| 374 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
| 375 | list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) { | ||
| 376 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
| 377 | switch (entry->op_type) { | ||
| 378 | case QXL_FB_OP_FILLRECT: | ||
| 379 | qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr); | ||
| 380 | break; | ||
| 381 | case QXL_FB_OP_COPYAREA: | ||
| 382 | qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca); | ||
| 383 | break; | ||
| 384 | case QXL_FB_OP_IMAGEBLIT: | ||
| 385 | qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib); | ||
| 386 | break; | ||
| 387 | } | ||
| 388 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
| 389 | list_del(&entry->head); | ||
| 390 | kfree(entry); | ||
| 391 | } | ||
| 392 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
| 253 | } | 393 | } |
| 254 | 394 | ||
| 255 | int qxl_fb_init(struct qxl_device *qdev) | 395 | int qxl_fb_init(struct qxl_device *qdev) |
| 256 | { | 396 | { |
| 397 | INIT_WORK(&qdev->fb_work, qxl_fb_work); | ||
| 257 | return 0; | 398 | return 0; |
| 258 | } | 399 | } |
| 259 | 400 | ||
| @@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev) | |||
| 536 | qfbdev->qdev = qdev; | 677 | qfbdev->qdev = qdev; |
| 537 | qdev->mode_info.qfbdev = qfbdev; | 678 | qdev->mode_info.qfbdev = qfbdev; |
| 538 | qfbdev->helper.funcs = &qxl_fb_helper_funcs; | 679 | qfbdev->helper.funcs = &qxl_fb_helper_funcs; |
| 539 | 680 | spin_lock_init(&qfbdev->delayed_ops_lock); | |
| 681 | INIT_LIST_HEAD(&qfbdev->delayed_ops); | ||
| 540 | ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, | 682 | ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, |
| 541 | qxl_num_crtc /* num_crtc - QXL supports just 1 */, | 683 | qxl_num_crtc /* num_crtc - QXL supports just 1 */, |
| 542 | QXLFB_CONN_LIMIT); | 684 | QXLFB_CONN_LIMIT); |
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c index 63c6715ad385..ae59e91cfb9a 100644 --- a/drivers/gpu/drm/qxl/qxl_fence.c +++ b/drivers/gpu/drm/qxl/qxl_fence.c | |||
| @@ -49,17 +49,11 @@ | |||
| 49 | 49 | ||
| 50 | For some reason every so often qxl hw fails to release, things go wrong. | 50 | For some reason every so often qxl hw fails to release, things go wrong. |
| 51 | */ | 51 | */ |
| 52 | 52 | /* must be called with the fence lock held */ | |
| 53 | 53 | void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id) | |
| 54 | int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id) | ||
| 55 | { | 54 | { |
| 56 | struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); | ||
| 57 | |||
| 58 | spin_lock(&bo->tbo.bdev->fence_lock); | ||
| 59 | radix_tree_insert(&qfence->tree, rel_id, qfence); | 55 | radix_tree_insert(&qfence->tree, rel_id, qfence); |
| 60 | qfence->num_active_releases++; | 56 | qfence->num_active_releases++; |
| 61 | spin_unlock(&bo->tbo.bdev->fence_lock); | ||
| 62 | return 0; | ||
| 63 | } | 57 | } |
| 64 | 58 | ||
| 65 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) | 59 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) |
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index a235693aabba..25e1777fb0a2 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c | |||
| @@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, | |||
| 55 | /* At least align on page size */ | 55 | /* At least align on page size */ |
| 56 | if (alignment < PAGE_SIZE) | 56 | if (alignment < PAGE_SIZE) |
| 57 | alignment = PAGE_SIZE; | 57 | alignment = PAGE_SIZE; |
| 58 | r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo); | 58 | r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo); |
| 59 | if (r) { | 59 | if (r) { |
| 60 | if (r != -ERESTARTSYS) | 60 | if (r != -ERESTARTSYS) |
| 61 | DRM_ERROR( | 61 | DRM_ERROR( |
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index cf856206996b..7fbcc35e8ad3 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c | |||
| @@ -30,31 +30,100 @@ | |||
| 30 | #include "qxl_object.h" | 30 | #include "qxl_object.h" |
| 31 | 31 | ||
| 32 | static int | 32 | static int |
| 33 | qxl_image_create_helper(struct qxl_device *qdev, | 33 | qxl_allocate_chunk(struct qxl_device *qdev, |
| 34 | struct qxl_release *release, | ||
| 35 | struct qxl_drm_image *image, | ||
| 36 | unsigned int chunk_size) | ||
| 37 | { | ||
| 38 | struct qxl_drm_chunk *chunk; | ||
| 39 | int ret; | ||
| 40 | |||
| 41 | chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); | ||
| 42 | if (!chunk) | ||
| 43 | return -ENOMEM; | ||
| 44 | |||
| 45 | ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); | ||
| 46 | if (ret) { | ||
| 47 | kfree(chunk); | ||
| 48 | return ret; | ||
| 49 | } | ||
| 50 | |||
| 51 | list_add_tail(&chunk->head, &image->chunk_list); | ||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | int | ||
| 56 | qxl_image_alloc_objects(struct qxl_device *qdev, | ||
| 34 | struct qxl_release *release, | 57 | struct qxl_release *release, |
| 35 | struct qxl_bo **image_bo, | 58 | struct qxl_drm_image **image_ptr, |
| 36 | const uint8_t *data, | 59 | int height, int stride) |
| 37 | int width, int height, | 60 | { |
| 38 | int depth, unsigned int hash, | 61 | struct qxl_drm_image *image; |
| 39 | int stride) | 62 | int ret; |
| 63 | |||
| 64 | image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL); | ||
| 65 | if (!image) | ||
| 66 | return -ENOMEM; | ||
| 67 | |||
| 68 | INIT_LIST_HEAD(&image->chunk_list); | ||
| 69 | |||
| 70 | ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); | ||
| 71 | if (ret) { | ||
| 72 | kfree(image); | ||
| 73 | return ret; | ||
| 74 | } | ||
| 75 | |||
| 76 | ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); | ||
| 77 | if (ret) { | ||
| 78 | qxl_bo_unref(&image->bo); | ||
| 79 | kfree(image); | ||
| 80 | return ret; | ||
| 81 | } | ||
| 82 | *image_ptr = image; | ||
| 83 | return 0; | ||
| 84 | } | ||
| 85 | |||
| 86 | void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) | ||
| 40 | { | 87 | { |
| 88 | struct qxl_drm_chunk *chunk, *tmp; | ||
| 89 | |||
| 90 | list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { | ||
| 91 | qxl_bo_unref(&chunk->bo); | ||
| 92 | kfree(chunk); | ||
| 93 | } | ||
| 94 | |||
| 95 | qxl_bo_unref(&dimage->bo); | ||
| 96 | kfree(dimage); | ||
| 97 | } | ||
| 98 | |||
| 99 | static int | ||
| 100 | qxl_image_init_helper(struct qxl_device *qdev, | ||
| 101 | struct qxl_release *release, | ||
| 102 | struct qxl_drm_image *dimage, | ||
| 103 | const uint8_t *data, | ||
| 104 | int width, int height, | ||
| 105 | int depth, unsigned int hash, | ||
| 106 | int stride) | ||
| 107 | { | ||
| 108 | struct qxl_drm_chunk *drv_chunk; | ||
| 41 | struct qxl_image *image; | 109 | struct qxl_image *image; |
| 42 | struct qxl_data_chunk *chunk; | 110 | struct qxl_data_chunk *chunk; |
| 43 | int i; | 111 | int i; |
| 44 | int chunk_stride; | 112 | int chunk_stride; |
| 45 | int linesize = width * depth / 8; | 113 | int linesize = width * depth / 8; |
| 46 | struct qxl_bo *chunk_bo; | 114 | struct qxl_bo *chunk_bo, *image_bo; |
| 47 | int ret; | ||
| 48 | void *ptr; | 115 | void *ptr; |
| 49 | /* Chunk */ | 116 | /* Chunk */ |
| 50 | /* FIXME: Check integer overflow */ | 117 | /* FIXME: Check integer overflow */ |
| 51 | /* TODO: variable number of chunks */ | 118 | /* TODO: variable number of chunks */ |
| 119 | |||
| 120 | drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head); | ||
| 121 | |||
| 122 | chunk_bo = drv_chunk->bo; | ||
| 52 | chunk_stride = stride; /* TODO: should use linesize, but it renders | 123 | chunk_stride = stride; /* TODO: should use linesize, but it renders |
| 53 | wrong (check the bitmaps are sent correctly | 124 | wrong (check the bitmaps are sent correctly |
| 54 | first) */ | 125 | first) */ |
| 55 | ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride, | 126 | |
| 56 | &chunk_bo); | ||
| 57 | |||
| 58 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); | 127 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); |
| 59 | chunk = ptr; | 128 | chunk = ptr; |
| 60 | chunk->data_size = height * chunk_stride; | 129 | chunk->data_size = height * chunk_stride; |
| @@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev, | |||
| 102 | while (remain > 0) { | 171 | while (remain > 0) { |
| 103 | page_base = out_offset & PAGE_MASK; | 172 | page_base = out_offset & PAGE_MASK; |
| 104 | page_offset = offset_in_page(out_offset); | 173 | page_offset = offset_in_page(out_offset); |
| 105 | |||
| 106 | size = min((int)(PAGE_SIZE - page_offset), remain); | 174 | size = min((int)(PAGE_SIZE - page_offset), remain); |
| 107 | 175 | ||
| 108 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); | 176 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); |
| @@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev, | |||
| 116 | } | 184 | } |
| 117 | } | 185 | } |
| 118 | } | 186 | } |
| 119 | |||
| 120 | |||
| 121 | qxl_bo_kunmap(chunk_bo); | 187 | qxl_bo_kunmap(chunk_bo); |
| 122 | 188 | ||
| 123 | /* Image */ | 189 | image_bo = dimage->bo; |
| 124 | ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo); | 190 | ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); |
| 125 | |||
| 126 | ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0); | ||
| 127 | image = ptr; | 191 | image = ptr; |
| 128 | 192 | ||
| 129 | image->descriptor.id = 0; | 193 | image->descriptor.id = 0; |
| @@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev, | |||
| 154 | image->u.bitmap.stride = chunk_stride; | 218 | image->u.bitmap.stride = chunk_stride; |
| 155 | image->u.bitmap.palette = 0; | 219 | image->u.bitmap.palette = 0; |
| 156 | image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); | 220 | image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); |
| 157 | qxl_release_add_res(qdev, release, chunk_bo); | ||
| 158 | qxl_bo_unreserve(chunk_bo); | ||
| 159 | qxl_bo_unref(&chunk_bo); | ||
| 160 | 221 | ||
| 161 | qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr); | 222 | qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); |
| 162 | 223 | ||
| 163 | return 0; | 224 | return 0; |
| 164 | } | 225 | } |
| 165 | 226 | ||
| 166 | int qxl_image_create(struct qxl_device *qdev, | 227 | int qxl_image_init(struct qxl_device *qdev, |
| 167 | struct qxl_release *release, | 228 | struct qxl_release *release, |
| 168 | struct qxl_bo **image_bo, | 229 | struct qxl_drm_image *dimage, |
| 169 | const uint8_t *data, | 230 | const uint8_t *data, |
| 170 | int x, int y, int width, int height, | 231 | int x, int y, int width, int height, |
| 171 | int depth, int stride) | 232 | int depth, int stride) |
| 172 | { | 233 | { |
| 173 | data += y * stride + x * (depth / 8); | 234 | data += y * stride + x * (depth / 8); |
| 174 | return qxl_image_create_helper(qdev, release, image_bo, data, | 235 | return qxl_image_init_helper(qdev, release, dimage, data, |
| 175 | width, height, depth, 0, stride); | 236 | width, height, depth, 0, stride); |
| 176 | } | 237 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 27f45e49250d..6de33563d6f1 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c | |||
| @@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data, | |||
| 68 | &qxl_map->offset); | 68 | &qxl_map->offset); |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | struct qxl_reloc_info { | ||
| 72 | int type; | ||
| 73 | struct qxl_bo *dst_bo; | ||
| 74 | uint32_t dst_offset; | ||
| 75 | struct qxl_bo *src_bo; | ||
| 76 | int src_offset; | ||
| 77 | }; | ||
| 78 | |||
| 71 | /* | 79 | /* |
| 72 | * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's | 80 | * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's |
| 73 | * are on vram). | 81 | * are on vram). |
| 74 | * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) | 82 | * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) |
| 75 | */ | 83 | */ |
| 76 | static void | 84 | static void |
| 77 | apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, | 85 | apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) |
| 78 | struct qxl_bo *src, uint64_t src_off) | ||
| 79 | { | 86 | { |
| 80 | void *reloc_page; | 87 | void *reloc_page; |
| 81 | 88 | reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); | |
| 82 | reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); | 89 | *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, |
| 83 | *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, | 90 | info->src_bo, |
| 84 | src, src_off); | 91 | info->src_offset); |
| 85 | qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); | 92 | qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); |
| 86 | } | 93 | } |
| 87 | 94 | ||
| 88 | static void | 95 | static void |
| 89 | apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, | 96 | apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) |
| 90 | struct qxl_bo *src) | ||
| 91 | { | 97 | { |
| 92 | uint32_t id = 0; | 98 | uint32_t id = 0; |
| 93 | void *reloc_page; | 99 | void *reloc_page; |
| 94 | 100 | ||
| 95 | if (src && !src->is_primary) | 101 | if (info->src_bo && !info->src_bo->is_primary) |
| 96 | id = src->surface_id; | 102 | id = info->src_bo->surface_id; |
| 97 | 103 | ||
| 98 | reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); | 104 | reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); |
| 99 | *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id; | 105 | *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id; |
| 100 | qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); | 106 | qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); |
| 101 | } | 107 | } |
| 102 | 108 | ||
| 103 | /* return holding the reference to this object */ | 109 | /* return holding the reference to this object */ |
| 104 | static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, | 110 | static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, |
| 105 | struct drm_file *file_priv, uint64_t handle, | 111 | struct drm_file *file_priv, uint64_t handle, |
| 106 | struct qxl_reloc_list *reloc_list) | 112 | struct qxl_release *release) |
| 107 | { | 113 | { |
| 108 | struct drm_gem_object *gobj; | 114 | struct drm_gem_object *gobj; |
| 109 | struct qxl_bo *qobj; | 115 | struct qxl_bo *qobj; |
| 110 | int ret; | 116 | int ret; |
| 111 | 117 | ||
| 112 | gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); | 118 | gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); |
| 113 | if (!gobj) { | 119 | if (!gobj) |
| 114 | DRM_ERROR("bad bo handle %lld\n", handle); | ||
| 115 | return NULL; | 120 | return NULL; |
| 116 | } | 121 | |
| 117 | qobj = gem_to_qxl_bo(gobj); | 122 | qobj = gem_to_qxl_bo(gobj); |
| 118 | 123 | ||
| 119 | ret = qxl_bo_list_add(reloc_list, qobj); | 124 | ret = qxl_release_list_add(release, qobj); |
| 120 | if (ret) | 125 | if (ret) |
| 121 | return NULL; | 126 | return NULL; |
| 122 | 127 | ||
| @@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, | |||
| 129 | * However, the command as passed from user space must *not* contain the initial | 134 | * However, the command as passed from user space must *not* contain the initial |
| 130 | * QXLReleaseInfo struct (first XXX bytes) | 135 | * QXLReleaseInfo struct (first XXX bytes) |
| 131 | */ | 136 | */ |
| 132 | static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, | 137 | static int qxl_process_single_command(struct qxl_device *qdev, |
| 133 | struct drm_file *file_priv) | 138 | struct drm_qxl_command *cmd, |
| 139 | struct drm_file *file_priv) | ||
| 134 | { | 140 | { |
| 135 | struct qxl_device *qdev = dev->dev_private; | 141 | struct qxl_reloc_info *reloc_info; |
| 136 | struct drm_qxl_execbuffer *execbuffer = data; | 142 | int release_type; |
| 137 | struct drm_qxl_command user_cmd; | 143 | struct qxl_release *release; |
| 138 | int cmd_num; | 144 | struct qxl_bo *cmd_bo; |
| 139 | struct qxl_bo *reloc_src_bo; | ||
| 140 | struct qxl_bo *reloc_dst_bo; | ||
| 141 | struct drm_qxl_reloc reloc; | ||
| 142 | void *fb_cmd; | 145 | void *fb_cmd; |
| 143 | int i, ret; | 146 | int i, j, ret, num_relocs; |
| 144 | struct qxl_reloc_list reloc_list; | ||
| 145 | int unwritten; | 147 | int unwritten; |
| 146 | uint32_t reloc_dst_offset; | ||
| 147 | INIT_LIST_HEAD(&reloc_list.bos); | ||
| 148 | 148 | ||
| 149 | for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { | 149 | switch (cmd->type) { |
| 150 | struct qxl_release *release; | 150 | case QXL_CMD_DRAW: |
| 151 | struct qxl_bo *cmd_bo; | 151 | release_type = QXL_RELEASE_DRAWABLE; |
| 152 | int release_type; | 152 | break; |
| 153 | struct drm_qxl_command *commands = | 153 | case QXL_CMD_SURFACE: |
| 154 | (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; | 154 | case QXL_CMD_CURSOR: |
| 155 | default: | ||
| 156 | DRM_DEBUG("Only draw commands in execbuffers\n"); | ||
| 157 | return -EINVAL; | ||
| 158 | break; | ||
| 159 | } | ||
| 155 | 160 | ||
| 156 | if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], | 161 | if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info)) |
| 157 | sizeof(user_cmd))) | 162 | return -EINVAL; |
| 158 | return -EFAULT; | ||
| 159 | switch (user_cmd.type) { | ||
| 160 | case QXL_CMD_DRAW: | ||
| 161 | release_type = QXL_RELEASE_DRAWABLE; | ||
| 162 | break; | ||
| 163 | case QXL_CMD_SURFACE: | ||
| 164 | case QXL_CMD_CURSOR: | ||
| 165 | default: | ||
| 166 | DRM_DEBUG("Only draw commands in execbuffers\n"); | ||
| 167 | return -EINVAL; | ||
| 168 | break; | ||
| 169 | } | ||
| 170 | 163 | ||
| 171 | if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) | 164 | if (!access_ok(VERIFY_READ, |
| 172 | return -EINVAL; | 165 | (void *)(unsigned long)cmd->command, |
| 166 | cmd->command_size)) | ||
| 167 | return -EFAULT; | ||
| 173 | 168 | ||
| 174 | if (!access_ok(VERIFY_READ, | 169 | reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); |
| 175 | (void *)(unsigned long)user_cmd.command, | 170 | if (!reloc_info) |
| 176 | user_cmd.command_size)) | 171 | return -ENOMEM; |
| 177 | return -EFAULT; | ||
| 178 | 172 | ||
| 179 | ret = qxl_alloc_release_reserved(qdev, | 173 | ret = qxl_alloc_release_reserved(qdev, |
| 180 | sizeof(union qxl_release_info) + | 174 | sizeof(union qxl_release_info) + |
| 181 | user_cmd.command_size, | 175 | cmd->command_size, |
| 182 | release_type, | 176 | release_type, |
| 183 | &release, | 177 | &release, |
| 184 | &cmd_bo); | 178 | &cmd_bo); |
| 185 | if (ret) | 179 | if (ret) |
| 186 | return ret; | 180 | goto out_free_reloc; |
| 187 | 181 | ||
| 188 | /* TODO copy slow path code from i915 */ | 182 | /* TODO copy slow path code from i915 */ |
| 189 | fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); | 183 | fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); |
| 190 | unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); | 184 | unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size); |
| 191 | 185 | ||
| 192 | { | 186 | { |
| 193 | struct qxl_drawable *draw = fb_cmd; | 187 | struct qxl_drawable *draw = fb_cmd; |
| 188 | draw->mm_time = qdev->rom->mm_clock; | ||
| 189 | } | ||
| 194 | 190 | ||
| 195 | draw->mm_time = qdev->rom->mm_clock; | 191 | qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); |
| 196 | } | 192 | if (unwritten) { |
| 197 | qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); | 193 | DRM_ERROR("got unwritten %d\n", unwritten); |
| 198 | if (unwritten) { | 194 | ret = -EFAULT; |
| 199 | DRM_ERROR("got unwritten %d\n", unwritten); | 195 | goto out_free_release; |
| 200 | qxl_release_unreserve(qdev, release); | 196 | } |
| 201 | qxl_release_free(qdev, release); | 197 | |
| 202 | return -EFAULT; | 198 | /* fill out reloc info structs */ |
| 199 | num_relocs = 0; | ||
| 200 | for (i = 0; i < cmd->relocs_num; ++i) { | ||
| 201 | struct drm_qxl_reloc reloc; | ||
| 202 | |||
| 203 | if (DRM_COPY_FROM_USER(&reloc, | ||
| 204 | &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i], | ||
| 205 | sizeof(reloc))) { | ||
| 206 | ret = -EFAULT; | ||
| 207 | goto out_free_bos; | ||
| 203 | } | 208 | } |
| 204 | 209 | ||
| 205 | for (i = 0 ; i < user_cmd.relocs_num; ++i) { | 210 | /* add the bos to the list of bos to validate - |
| 206 | if (DRM_COPY_FROM_USER(&reloc, | 211 | need to validate first then process relocs? */ |
| 207 | &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], | 212 | if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) { |
| 208 | sizeof(reloc))) { | 213 | DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type); |
| 209 | qxl_bo_list_unreserve(&reloc_list, true); | ||
| 210 | qxl_release_unreserve(qdev, release); | ||
| 211 | qxl_release_free(qdev, release); | ||
| 212 | return -EFAULT; | ||
| 213 | } | ||
| 214 | 214 | ||
| 215 | /* add the bos to the list of bos to validate - | 215 | ret = -EINVAL; |
| 216 | need to validate first then process relocs? */ | 216 | goto out_free_bos; |
| 217 | if (reloc.dst_handle) { | 217 | } |
| 218 | reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, | 218 | reloc_info[i].type = reloc.reloc_type; |
| 219 | reloc.dst_handle, &reloc_list); | 219 | |
| 220 | if (!reloc_dst_bo) { | 220 | if (reloc.dst_handle) { |
| 221 | qxl_bo_list_unreserve(&reloc_list, true); | 221 | reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv, |
| 222 | qxl_release_unreserve(qdev, release); | 222 | reloc.dst_handle, release); |
| 223 | qxl_release_free(qdev, release); | 223 | if (!reloc_info[i].dst_bo) { |
| 224 | return -EINVAL; | 224 | ret = -EINVAL; |
| 225 | } | 225 | reloc_info[i].src_bo = NULL; |
| 226 | reloc_dst_offset = 0; | 226 | goto out_free_bos; |
| 227 | } else { | ||
| 228 | reloc_dst_bo = cmd_bo; | ||
| 229 | reloc_dst_offset = release->release_offset; | ||
| 230 | } | 227 | } |
| 231 | 228 | reloc_info[i].dst_offset = reloc.dst_offset; | |
| 232 | /* reserve and validate the reloc dst bo */ | 229 | } else { |
| 233 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { | 230 | reloc_info[i].dst_bo = cmd_bo; |
| 234 | reloc_src_bo = | 231 | reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset; |
| 235 | qxlhw_handle_to_bo(qdev, file_priv, | 232 | } |
| 236 | reloc.src_handle, &reloc_list); | 233 | num_relocs++; |
| 237 | if (!reloc_src_bo) { | 234 | |
| 238 | if (reloc_dst_bo != cmd_bo) | 235 | /* reserve and validate the reloc dst bo */ |
| 239 | drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); | 236 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { |
| 240 | qxl_bo_list_unreserve(&reloc_list, true); | 237 | reloc_info[i].src_bo = |
| 241 | qxl_release_unreserve(qdev, release); | 238 | qxlhw_handle_to_bo(qdev, file_priv, |
| 242 | qxl_release_free(qdev, release); | 239 | reloc.src_handle, release); |
| 243 | return -EINVAL; | 240 | if (!reloc_info[i].src_bo) { |
| 244 | } | 241 | if (reloc_info[i].dst_bo != cmd_bo) |
| 245 | } else | 242 | drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base); |
| 246 | reloc_src_bo = NULL; | 243 | ret = -EINVAL; |
| 247 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO) { | 244 | goto out_free_bos; |
| 248 | apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, | ||
| 249 | reloc_src_bo, reloc.src_offset); | ||
| 250 | } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) { | ||
| 251 | apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo); | ||
| 252 | } else { | ||
| 253 | DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type); | ||
| 254 | return -EINVAL; | ||
| 255 | } | 245 | } |
| 246 | reloc_info[i].src_offset = reloc.src_offset; | ||
| 247 | } else { | ||
| 248 | reloc_info[i].src_bo = NULL; | ||
| 249 | reloc_info[i].src_offset = 0; | ||
| 250 | } | ||
| 251 | } | ||
| 256 | 252 | ||
| 257 | if (reloc_src_bo && reloc_src_bo != cmd_bo) { | 253 | /* validate all buffers */ |
| 258 | qxl_release_add_res(qdev, release, reloc_src_bo); | 254 | ret = qxl_release_reserve_list(release, false); |
| 259 | drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base); | 255 | if (ret) |
| 260 | } | 256 | goto out_free_bos; |
| 261 | 257 | ||
| 262 | if (reloc_dst_bo != cmd_bo) | 258 | for (i = 0; i < cmd->relocs_num; ++i) { |
| 263 | drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); | 259 | if (reloc_info[i].type == QXL_RELOC_TYPE_BO) |
| 264 | } | 260 | apply_reloc(qdev, &reloc_info[i]); |
| 265 | qxl_fence_releaseable(qdev, release); | 261 | else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF) |
| 262 | apply_surf_reloc(qdev, &reloc_info[i]); | ||
| 263 | } | ||
| 266 | 264 | ||
| 267 | ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true); | 265 | ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); |
| 268 | if (ret == -ERESTARTSYS) { | 266 | if (ret) |
| 269 | qxl_release_unreserve(qdev, release); | 267 | qxl_release_backoff_reserve_list(release); |
| 270 | qxl_release_free(qdev, release); | 268 | else |
| 271 | qxl_bo_list_unreserve(&reloc_list, true); | 269 | qxl_release_fence_buffer_objects(release); |
| 270 | |||
| 271 | out_free_bos: | ||
| 272 | for (j = 0; j < num_relocs; j++) { | ||
| 273 | if (reloc_info[j].dst_bo != cmd_bo) | ||
| 274 | drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base); | ||
| 275 | if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo) | ||
| 276 | drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base); | ||
| 277 | } | ||
| 278 | out_free_release: | ||
| 279 | if (ret) | ||
| 280 | qxl_release_free(qdev, release); | ||
| 281 | out_free_reloc: | ||
| 282 | kfree(reloc_info); | ||
| 283 | return ret; | ||
| 284 | } | ||
| 285 | |||
| 286 | static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, | ||
| 287 | struct drm_file *file_priv) | ||
| 288 | { | ||
| 289 | struct qxl_device *qdev = dev->dev_private; | ||
| 290 | struct drm_qxl_execbuffer *execbuffer = data; | ||
| 291 | struct drm_qxl_command user_cmd; | ||
| 292 | int cmd_num; | ||
| 293 | int ret; | ||
| 294 | |||
| 295 | for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { | ||
| 296 | |||
| 297 | struct drm_qxl_command *commands = | ||
| 298 | (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; | ||
| 299 | |||
| 300 | if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], | ||
| 301 | sizeof(user_cmd))) | ||
| 302 | return -EFAULT; | ||
| 303 | |||
| 304 | ret = qxl_process_single_command(qdev, &user_cmd, file_priv); | ||
| 305 | if (ret) | ||
| 272 | return ret; | 306 | return ret; |
| 273 | } | ||
| 274 | qxl_release_unreserve(qdev, release); | ||
| 275 | } | 307 | } |
| 276 | qxl_bo_list_unreserve(&reloc_list, 0); | ||
| 277 | return 0; | 308 | return 0; |
| 278 | } | 309 | } |
| 279 | 310 | ||
| @@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data, | |||
| 305 | goto out; | 336 | goto out; |
| 306 | 337 | ||
| 307 | if (!qobj->pin_count) { | 338 | if (!qobj->pin_count) { |
| 308 | qxl_ttm_placement_from_domain(qobj, qobj->type); | 339 | qxl_ttm_placement_from_domain(qobj, qobj->type, false); |
| 309 | ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, | 340 | ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, |
| 310 | true, false); | 341 | true, false); |
| 311 | if (unlikely(ret)) | 342 | if (unlikely(ret)) |
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 1191fe7788c9..aa161cddd87e 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c | |||
| @@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) | |||
| 51 | return false; | 51 | return false; |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) | 54 | void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) |
| 55 | { | 55 | { |
| 56 | u32 c = 0; | 56 | u32 c = 0; |
| 57 | u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; | ||
| 57 | 58 | ||
| 58 | qbo->placement.fpfn = 0; | 59 | qbo->placement.fpfn = 0; |
| 59 | qbo->placement.lpfn = 0; | 60 | qbo->placement.lpfn = 0; |
| 60 | qbo->placement.placement = qbo->placements; | 61 | qbo->placement.placement = qbo->placements; |
| 61 | qbo->placement.busy_placement = qbo->placements; | 62 | qbo->placement.busy_placement = qbo->placements; |
| 62 | if (domain == QXL_GEM_DOMAIN_VRAM) | 63 | if (domain == QXL_GEM_DOMAIN_VRAM) |
| 63 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM; | 64 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; |
| 64 | if (domain == QXL_GEM_DOMAIN_SURFACE) | 65 | if (domain == QXL_GEM_DOMAIN_SURFACE) |
| 65 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0; | 66 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; |
| 66 | if (domain == QXL_GEM_DOMAIN_CPU) | 67 | if (domain == QXL_GEM_DOMAIN_CPU) |
| 67 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | 68 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; |
| 68 | if (!c) | 69 | if (!c) |
| 69 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | 70 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
| 70 | qbo->placement.num_placement = c; | 71 | qbo->placement.num_placement = c; |
| @@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) | |||
| 73 | 74 | ||
| 74 | 75 | ||
| 75 | int qxl_bo_create(struct qxl_device *qdev, | 76 | int qxl_bo_create(struct qxl_device *qdev, |
| 76 | unsigned long size, bool kernel, u32 domain, | 77 | unsigned long size, bool kernel, bool pinned, u32 domain, |
| 77 | struct qxl_surface *surf, | 78 | struct qxl_surface *surf, |
| 78 | struct qxl_bo **bo_ptr) | 79 | struct qxl_bo **bo_ptr) |
| 79 | { | 80 | { |
| @@ -99,15 +100,15 @@ int qxl_bo_create(struct qxl_device *qdev, | |||
| 99 | } | 100 | } |
| 100 | bo->gem_base.driver_private = NULL; | 101 | bo->gem_base.driver_private = NULL; |
| 101 | bo->type = domain; | 102 | bo->type = domain; |
| 102 | bo->pin_count = 0; | 103 | bo->pin_count = pinned ? 1 : 0; |
| 103 | bo->surface_id = 0; | 104 | bo->surface_id = 0; |
| 104 | qxl_fence_init(qdev, &bo->fence); | 105 | qxl_fence_init(qdev, &bo->fence); |
| 105 | INIT_LIST_HEAD(&bo->list); | 106 | INIT_LIST_HEAD(&bo->list); |
| 106 | atomic_set(&bo->reserve_count, 0); | 107 | |
| 107 | if (surf) | 108 | if (surf) |
| 108 | bo->surf = *surf; | 109 | bo->surf = *surf; |
| 109 | 110 | ||
| 110 | qxl_ttm_placement_from_domain(bo, domain); | 111 | qxl_ttm_placement_from_domain(bo, domain, pinned); |
| 111 | 112 | ||
| 112 | r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, | 113 | r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, |
| 113 | &bo->placement, 0, !kernel, NULL, size, | 114 | &bo->placement, 0, !kernel, NULL, size, |
| @@ -228,7 +229,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) | |||
| 228 | int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) | 229 | int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) |
| 229 | { | 230 | { |
| 230 | struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; | 231 | struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; |
| 231 | int r, i; | 232 | int r; |
| 232 | 233 | ||
| 233 | if (bo->pin_count) { | 234 | if (bo->pin_count) { |
| 234 | bo->pin_count++; | 235 | bo->pin_count++; |
| @@ -236,9 +237,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) | |||
| 236 | *gpu_addr = qxl_bo_gpu_offset(bo); | 237 | *gpu_addr = qxl_bo_gpu_offset(bo); |
| 237 | return 0; | 238 | return 0; |
| 238 | } | 239 | } |
| 239 | qxl_ttm_placement_from_domain(bo, domain); | 240 | qxl_ttm_placement_from_domain(bo, domain, true); |
| 240 | for (i = 0; i < bo->placement.num_placement; i++) | ||
| 241 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | ||
| 242 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 241 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
| 243 | if (likely(r == 0)) { | 242 | if (likely(r == 0)) { |
| 244 | bo->pin_count = 1; | 243 | bo->pin_count = 1; |
| @@ -317,53 +316,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) | |||
| 317 | return 0; | 316 | return 0; |
| 318 | } | 317 | } |
| 319 | 318 | ||
| 320 | void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed) | ||
| 321 | { | ||
| 322 | struct qxl_bo_list *entry, *sf; | ||
| 323 | |||
| 324 | list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) { | ||
| 325 | qxl_bo_unreserve(entry->bo); | ||
| 326 | list_del(&entry->lhead); | ||
| 327 | kfree(entry); | ||
| 328 | } | ||
| 329 | } | ||
| 330 | |||
| 331 | int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo) | ||
| 332 | { | ||
| 333 | struct qxl_bo_list *entry; | ||
| 334 | int ret; | ||
| 335 | |||
| 336 | list_for_each_entry(entry, &reloc_list->bos, lhead) { | ||
| 337 | if (entry->bo == bo) | ||
| 338 | return 0; | ||
| 339 | } | ||
| 340 | |||
| 341 | entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); | ||
| 342 | if (!entry) | ||
| 343 | return -ENOMEM; | ||
| 344 | |||
| 345 | entry->bo = bo; | ||
| 346 | list_add(&entry->lhead, &reloc_list->bos); | ||
| 347 | |||
| 348 | ret = qxl_bo_reserve(bo, false); | ||
| 349 | if (ret) | ||
| 350 | return ret; | ||
| 351 | |||
| 352 | if (!bo->pin_count) { | ||
| 353 | qxl_ttm_placement_from_domain(bo, bo->type); | ||
| 354 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, | ||
| 355 | true, false); | ||
| 356 | if (ret) | ||
| 357 | return ret; | ||
| 358 | } | ||
| 359 | |||
| 360 | /* allocate a surface for reserved + validated buffers */ | ||
| 361 | ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); | ||
| 362 | if (ret) | ||
| 363 | return ret; | ||
| 364 | return 0; | ||
| 365 | } | ||
| 366 | |||
| 367 | int qxl_surf_evict(struct qxl_device *qdev) | 319 | int qxl_surf_evict(struct qxl_device *qdev) |
| 368 | { | 320 | { |
| 369 | return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); | 321 | return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); |
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index ee7ad79ce781..8cb6167038e5 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h | |||
| @@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, | |||
| 88 | 88 | ||
| 89 | extern int qxl_bo_create(struct qxl_device *qdev, | 89 | extern int qxl_bo_create(struct qxl_device *qdev, |
| 90 | unsigned long size, | 90 | unsigned long size, |
| 91 | bool kernel, u32 domain, | 91 | bool kernel, bool pinned, u32 domain, |
| 92 | struct qxl_surface *surf, | 92 | struct qxl_surface *surf, |
| 93 | struct qxl_bo **bo_ptr); | 93 | struct qxl_bo **bo_ptr); |
| 94 | extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); | 94 | extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); |
| @@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo); | |||
| 99 | extern void qxl_bo_unref(struct qxl_bo **bo); | 99 | extern void qxl_bo_unref(struct qxl_bo **bo); |
| 100 | extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); | 100 | extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); |
| 101 | extern int qxl_bo_unpin(struct qxl_bo *bo); | 101 | extern int qxl_bo_unpin(struct qxl_bo *bo); |
| 102 | extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); | 102 | extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned); |
| 103 | extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); | 103 | extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); |
| 104 | 104 | ||
| 105 | extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo); | ||
| 106 | extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed); | ||
| 107 | #endif | 105 | #endif |
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index b443d6751d5f..b61449e52cd5 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
| @@ -38,7 +38,8 @@ | |||
| 38 | 38 | ||
| 39 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; | 39 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; |
| 40 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; | 40 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; |
| 41 | uint64_t | 41 | |
| 42 | static uint64_t | ||
| 42 | qxl_release_alloc(struct qxl_device *qdev, int type, | 43 | qxl_release_alloc(struct qxl_device *qdev, int type, |
| 43 | struct qxl_release **ret) | 44 | struct qxl_release **ret) |
| 44 | { | 45 | { |
| @@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type, | |||
| 53 | return 0; | 54 | return 0; |
| 54 | } | 55 | } |
| 55 | release->type = type; | 56 | release->type = type; |
| 56 | release->bo_count = 0; | ||
| 57 | release->release_offset = 0; | 57 | release->release_offset = 0; |
| 58 | release->surface_release_id = 0; | 58 | release->surface_release_id = 0; |
| 59 | INIT_LIST_HEAD(&release->bos); | ||
| 59 | 60 | ||
| 60 | idr_preload(GFP_KERNEL); | 61 | idr_preload(GFP_KERNEL); |
| 61 | spin_lock(&qdev->release_idr_lock); | 62 | spin_lock(&qdev->release_idr_lock); |
| @@ -77,20 +78,20 @@ void | |||
| 77 | qxl_release_free(struct qxl_device *qdev, | 78 | qxl_release_free(struct qxl_device *qdev, |
| 78 | struct qxl_release *release) | 79 | struct qxl_release *release) |
| 79 | { | 80 | { |
| 80 | int i; | 81 | struct qxl_bo_list *entry, *tmp; |
| 81 | 82 | QXL_INFO(qdev, "release %d, type %d\n", release->id, | |
| 82 | QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, | 83 | release->type); |
| 83 | release->type, release->bo_count); | ||
| 84 | 84 | ||
| 85 | if (release->surface_release_id) | 85 | if (release->surface_release_id) |
| 86 | qxl_surface_id_dealloc(qdev, release->surface_release_id); | 86 | qxl_surface_id_dealloc(qdev, release->surface_release_id); |
| 87 | 87 | ||
| 88 | for (i = 0 ; i < release->bo_count; ++i) { | 88 | list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) { |
| 89 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
| 89 | QXL_INFO(qdev, "release %llx\n", | 90 | QXL_INFO(qdev, "release %llx\n", |
| 90 | release->bos[i]->tbo.addr_space_offset | 91 | entry->tv.bo->addr_space_offset |
| 91 | - DRM_FILE_OFFSET); | 92 | - DRM_FILE_OFFSET); |
| 92 | qxl_fence_remove_release(&release->bos[i]->fence, release->id); | 93 | qxl_fence_remove_release(&bo->fence, release->id); |
| 93 | qxl_bo_unref(&release->bos[i]); | 94 | qxl_bo_unref(&bo); |
| 94 | } | 95 | } |
| 95 | spin_lock(&qdev->release_idr_lock); | 96 | spin_lock(&qdev->release_idr_lock); |
| 96 | idr_remove(&qdev->release_idr, release->id); | 97 | idr_remove(&qdev->release_idr, release->id); |
| @@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev, | |||
| 98 | kfree(release); | 99 | kfree(release); |
| 99 | } | 100 | } |
| 100 | 101 | ||
| 101 | void | ||
| 102 | qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release, | ||
| 103 | struct qxl_bo *bo) | ||
| 104 | { | ||
| 105 | int i; | ||
| 106 | for (i = 0; i < release->bo_count; i++) | ||
| 107 | if (release->bos[i] == bo) | ||
| 108 | return; | ||
| 109 | |||
| 110 | if (release->bo_count >= QXL_MAX_RES) { | ||
| 111 | DRM_ERROR("exceeded max resource on a qxl_release item\n"); | ||
| 112 | return; | ||
| 113 | } | ||
| 114 | release->bos[release->bo_count++] = qxl_bo_ref(bo); | ||
| 115 | } | ||
| 116 | |||
| 117 | static int qxl_release_bo_alloc(struct qxl_device *qdev, | 102 | static int qxl_release_bo_alloc(struct qxl_device *qdev, |
| 118 | struct qxl_bo **bo) | 103 | struct qxl_bo **bo) |
| 119 | { | 104 | { |
| 120 | int ret; | 105 | int ret; |
| 121 | ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL, | 106 | /* pin releases bo's they are too messy to evict */ |
| 107 | ret = qxl_bo_create(qdev, PAGE_SIZE, false, true, | ||
| 108 | QXL_GEM_DOMAIN_VRAM, NULL, | ||
| 122 | bo); | 109 | bo); |
| 123 | return ret; | 110 | return ret; |
| 124 | } | 111 | } |
| 125 | 112 | ||
| 126 | int qxl_release_reserve(struct qxl_device *qdev, | 113 | int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) |
| 127 | struct qxl_release *release, bool no_wait) | 114 | { |
| 115 | struct qxl_bo_list *entry; | ||
| 116 | |||
| 117 | list_for_each_entry(entry, &release->bos, tv.head) { | ||
| 118 | if (entry->tv.bo == &bo->tbo) | ||
| 119 | return 0; | ||
| 120 | } | ||
| 121 | |||
| 122 | entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); | ||
| 123 | if (!entry) | ||
| 124 | return -ENOMEM; | ||
| 125 | |||
| 126 | qxl_bo_ref(bo); | ||
| 127 | entry->tv.bo = &bo->tbo; | ||
| 128 | list_add_tail(&entry->tv.head, &release->bos); | ||
| 129 | return 0; | ||
| 130 | } | ||
| 131 | |||
| 132 | static int qxl_release_validate_bo(struct qxl_bo *bo) | ||
| 128 | { | 133 | { |
| 129 | int ret; | 134 | int ret; |
| 130 | if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) { | 135 | |
| 131 | ret = qxl_bo_reserve(release->bos[0], no_wait); | 136 | if (!bo->pin_count) { |
| 137 | qxl_ttm_placement_from_domain(bo, bo->type, false); | ||
| 138 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, | ||
| 139 | true, false); | ||
| 132 | if (ret) | 140 | if (ret) |
| 133 | return ret; | 141 | return ret; |
| 134 | } | 142 | } |
| 143 | |||
| 144 | /* allocate a surface for reserved + validated buffers */ | ||
| 145 | ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); | ||
| 146 | if (ret) | ||
| 147 | return ret; | ||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | |||
| 151 | int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) | ||
| 152 | { | ||
| 153 | int ret; | ||
| 154 | struct qxl_bo_list *entry; | ||
| 155 | |||
| 156 | /* if only one object on the release its the release itself | ||
| 157 | since these objects are pinned no need to reserve */ | ||
| 158 | if (list_is_singular(&release->bos)) | ||
| 159 | return 0; | ||
| 160 | |||
| 161 | ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos); | ||
| 162 | if (ret) | ||
| 163 | return ret; | ||
| 164 | |||
| 165 | list_for_each_entry(entry, &release->bos, tv.head) { | ||
| 166 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
| 167 | |||
| 168 | ret = qxl_release_validate_bo(bo); | ||
| 169 | if (ret) { | ||
| 170 | ttm_eu_backoff_reservation(&release->ticket, &release->bos); | ||
| 171 | return ret; | ||
| 172 | } | ||
| 173 | } | ||
| 135 | return 0; | 174 | return 0; |
| 136 | } | 175 | } |
| 137 | 176 | ||
| 138 | void qxl_release_unreserve(struct qxl_device *qdev, | 177 | void qxl_release_backoff_reserve_list(struct qxl_release *release) |
| 139 | struct qxl_release *release) | ||
| 140 | { | 178 | { |
| 141 | if (atomic_dec_and_test(&release->bos[0]->reserve_count)) | 179 | /* if only one object on the release its the release itself |
| 142 | qxl_bo_unreserve(release->bos[0]); | 180 | since these objects are pinned no need to reserve */ |
| 181 | if (list_is_singular(&release->bos)) | ||
| 182 | return; | ||
| 183 | |||
| 184 | ttm_eu_backoff_reservation(&release->ticket, &release->bos); | ||
| 143 | } | 185 | } |
| 144 | 186 | ||
| 187 | |||
| 145 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | 188 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, |
| 146 | enum qxl_surface_cmd_type surface_cmd_type, | 189 | enum qxl_surface_cmd_type surface_cmd_type, |
| 147 | struct qxl_release *create_rel, | 190 | struct qxl_release *create_rel, |
| 148 | struct qxl_release **release) | 191 | struct qxl_release **release) |
| 149 | { | 192 | { |
| 150 | int ret; | ||
| 151 | |||
| 152 | if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { | 193 | if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { |
| 153 | int idr_ret; | 194 | int idr_ret; |
| 195 | struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head); | ||
| 154 | struct qxl_bo *bo; | 196 | struct qxl_bo *bo; |
| 155 | union qxl_release_info *info; | 197 | union qxl_release_info *info; |
| 156 | 198 | ||
| 157 | /* stash the release after the create command */ | 199 | /* stash the release after the create command */ |
| 158 | idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); | 200 | idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); |
| 159 | bo = qxl_bo_ref(create_rel->bos[0]); | 201 | bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); |
| 160 | 202 | ||
| 161 | (*release)->release_offset = create_rel->release_offset + 64; | 203 | (*release)->release_offset = create_rel->release_offset + 64; |
| 162 | 204 | ||
| 163 | qxl_release_add_res(qdev, *release, bo); | 205 | qxl_release_list_add(*release, bo); |
| 164 | 206 | ||
| 165 | ret = qxl_release_reserve(qdev, *release, false); | ||
| 166 | if (ret) { | ||
| 167 | DRM_ERROR("release reserve failed\n"); | ||
| 168 | goto out_unref; | ||
| 169 | } | ||
| 170 | info = qxl_release_map(qdev, *release); | 207 | info = qxl_release_map(qdev, *release); |
| 171 | info->id = idr_ret; | 208 | info->id = idr_ret; |
| 172 | qxl_release_unmap(qdev, *release, info); | 209 | qxl_release_unmap(qdev, *release, info); |
| 173 | 210 | ||
| 174 | |||
| 175 | out_unref: | ||
| 176 | qxl_bo_unref(&bo); | 211 | qxl_bo_unref(&bo); |
| 177 | return ret; | 212 | return 0; |
| 178 | } | 213 | } |
| 179 | 214 | ||
| 180 | return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), | 215 | return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), |
| @@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | |||
| 187 | { | 222 | { |
| 188 | struct qxl_bo *bo; | 223 | struct qxl_bo *bo; |
| 189 | int idr_ret; | 224 | int idr_ret; |
| 190 | int ret; | 225 | int ret = 0; |
| 191 | union qxl_release_info *info; | 226 | union qxl_release_info *info; |
| 192 | int cur_idx; | 227 | int cur_idx; |
| 193 | 228 | ||
| @@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | |||
| 216 | mutex_unlock(&qdev->release_mutex); | 251 | mutex_unlock(&qdev->release_mutex); |
| 217 | return ret; | 252 | return ret; |
| 218 | } | 253 | } |
| 219 | |||
| 220 | /* pin releases bo's they are too messy to evict */ | ||
| 221 | ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false); | ||
| 222 | qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL); | ||
| 223 | qxl_bo_unreserve(qdev->current_release_bo[cur_idx]); | ||
| 224 | } | 254 | } |
| 225 | 255 | ||
| 226 | bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); | 256 | bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); |
| @@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | |||
| 231 | if (rbo) | 261 | if (rbo) |
| 232 | *rbo = bo; | 262 | *rbo = bo; |
| 233 | 263 | ||
| 234 | qxl_release_add_res(qdev, *release, bo); | ||
| 235 | |||
| 236 | ret = qxl_release_reserve(qdev, *release, false); | ||
| 237 | mutex_unlock(&qdev->release_mutex); | 264 | mutex_unlock(&qdev->release_mutex); |
| 238 | if (ret) | 265 | |
| 239 | goto out_unref; | 266 | qxl_release_list_add(*release, bo); |
| 240 | 267 | ||
| 241 | info = qxl_release_map(qdev, *release); | 268 | info = qxl_release_map(qdev, *release); |
| 242 | info->id = idr_ret; | 269 | info->id = idr_ret; |
| 243 | qxl_release_unmap(qdev, *release, info); | 270 | qxl_release_unmap(qdev, *release, info); |
| 244 | 271 | ||
| 245 | out_unref: | ||
| 246 | qxl_bo_unref(&bo); | 272 | qxl_bo_unref(&bo); |
| 247 | return ret; | 273 | return ret; |
| 248 | } | 274 | } |
| 249 | 275 | ||
| 250 | int qxl_fence_releaseable(struct qxl_device *qdev, | ||
| 251 | struct qxl_release *release) | ||
| 252 | { | ||
| 253 | int i, ret; | ||
| 254 | for (i = 0; i < release->bo_count; i++) { | ||
| 255 | if (!release->bos[i]->tbo.sync_obj) | ||
| 256 | release->bos[i]->tbo.sync_obj = &release->bos[i]->fence; | ||
| 257 | ret = qxl_fence_add_release(&release->bos[i]->fence, release->id); | ||
| 258 | if (ret) | ||
| 259 | return ret; | ||
| 260 | } | ||
| 261 | return 0; | ||
| 262 | } | ||
| 263 | |||
| 264 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | 276 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, |
| 265 | uint64_t id) | 277 | uint64_t id) |
| 266 | { | 278 | { |
| @@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | |||
| 273 | DRM_ERROR("failed to find id in release_idr\n"); | 285 | DRM_ERROR("failed to find id in release_idr\n"); |
| 274 | return NULL; | 286 | return NULL; |
| 275 | } | 287 | } |
| 276 | if (release->bo_count < 1) { | 288 | |
| 277 | DRM_ERROR("read a released resource with 0 bos\n"); | ||
| 278 | return NULL; | ||
| 279 | } | ||
| 280 | return release; | 289 | return release; |
| 281 | } | 290 | } |
| 282 | 291 | ||
| @@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev, | |||
| 285 | { | 294 | { |
| 286 | void *ptr; | 295 | void *ptr; |
| 287 | union qxl_release_info *info; | 296 | union qxl_release_info *info; |
| 288 | struct qxl_bo *bo = release->bos[0]; | 297 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); |
| 298 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
| 289 | 299 | ||
| 290 | ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); | 300 | ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); |
| 301 | if (!ptr) | ||
| 302 | return NULL; | ||
| 291 | info = ptr + (release->release_offset & ~PAGE_SIZE); | 303 | info = ptr + (release->release_offset & ~PAGE_SIZE); |
| 292 | return info; | 304 | return info; |
| 293 | } | 305 | } |
| @@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev, | |||
| 296 | struct qxl_release *release, | 308 | struct qxl_release *release, |
| 297 | union qxl_release_info *info) | 309 | union qxl_release_info *info) |
| 298 | { | 310 | { |
| 299 | struct qxl_bo *bo = release->bos[0]; | 311 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); |
| 312 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
| 300 | void *ptr; | 313 | void *ptr; |
| 301 | 314 | ||
| 302 | ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); | 315 | ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); |
| 303 | qxl_bo_kunmap_atomic_page(qdev, bo, ptr); | 316 | qxl_bo_kunmap_atomic_page(qdev, bo, ptr); |
| 304 | } | 317 | } |
| 318 | |||
| 319 | void qxl_release_fence_buffer_objects(struct qxl_release *release) | ||
| 320 | { | ||
| 321 | struct ttm_validate_buffer *entry; | ||
| 322 | struct ttm_buffer_object *bo; | ||
| 323 | struct ttm_bo_global *glob; | ||
| 324 | struct ttm_bo_device *bdev; | ||
| 325 | struct ttm_bo_driver *driver; | ||
| 326 | struct qxl_bo *qbo; | ||
| 327 | |||
| 328 | /* if only one object on the release its the release itself | ||
| 329 | since these objects are pinned no need to reserve */ | ||
| 330 | if (list_is_singular(&release->bos)) | ||
| 331 | return; | ||
| 332 | |||
| 333 | bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; | ||
| 334 | bdev = bo->bdev; | ||
| 335 | driver = bdev->driver; | ||
| 336 | glob = bo->glob; | ||
| 337 | |||
| 338 | spin_lock(&glob->lru_lock); | ||
| 339 | spin_lock(&bdev->fence_lock); | ||
| 340 | |||
| 341 | list_for_each_entry(entry, &release->bos, head) { | ||
| 342 | bo = entry->bo; | ||
| 343 | qbo = to_qxl_bo(bo); | ||
| 344 | |||
| 345 | if (!entry->bo->sync_obj) | ||
| 346 | entry->bo->sync_obj = &qbo->fence; | ||
| 347 | |||
| 348 | qxl_fence_add_release_locked(&qbo->fence, release->id); | ||
| 349 | |||
| 350 | ttm_bo_add_to_lru(bo); | ||
| 351 | ww_mutex_unlock(&bo->resv->lock); | ||
| 352 | entry->reserved = false; | ||
| 353 | } | ||
| 354 | spin_unlock(&bdev->fence_lock); | ||
| 355 | spin_unlock(&glob->lru_lock); | ||
| 356 | ww_acquire_fini(&release->ticket); | ||
| 357 | } | ||
| 358 | |||
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 489cb8cece4d..1dfd84cda2a1 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c | |||
| @@ -206,7 +206,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, | |||
| 206 | return; | 206 | return; |
| 207 | } | 207 | } |
| 208 | qbo = container_of(bo, struct qxl_bo, tbo); | 208 | qbo = container_of(bo, struct qxl_bo, tbo); |
| 209 | qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); | 209 | qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false); |
| 210 | *placement = qbo->placement; | 210 | *placement = qbo->placement; |
| 211 | } | 211 | } |
| 212 | 212 | ||
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index fb441a790f3d..15da7ef344a4 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
| @@ -1222,12 +1222,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | |||
| 1222 | int r; | 1222 | int r; |
| 1223 | 1223 | ||
| 1224 | mutex_lock(&ctx->mutex); | 1224 | mutex_lock(&ctx->mutex); |
| 1225 | /* reset data block */ | ||
| 1226 | ctx->data_block = 0; | ||
| 1225 | /* reset reg block */ | 1227 | /* reset reg block */ |
| 1226 | ctx->reg_block = 0; | 1228 | ctx->reg_block = 0; |
| 1227 | /* reset fb window */ | 1229 | /* reset fb window */ |
| 1228 | ctx->fb_base = 0; | 1230 | ctx->fb_base = 0; |
| 1229 | /* reset io mode */ | 1231 | /* reset io mode */ |
| 1230 | ctx->io_mode = ATOM_IO_MM; | 1232 | ctx->io_mode = ATOM_IO_MM; |
| 1233 | /* reset divmul */ | ||
| 1234 | ctx->divmul[0] = 0; | ||
| 1235 | ctx->divmul[1] = 0; | ||
| 1231 | r = atom_execute_table_locked(ctx, index, params); | 1236 | r = atom_execute_table_locked(ctx, index, params); |
| 1232 | mutex_unlock(&ctx->mutex); | 1237 | mutex_unlock(&ctx->mutex); |
| 1233 | return r; | 1238 | return r; |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 064023bed480..32501f6ec991 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
| @@ -44,6 +44,41 @@ static char *pre_emph_names[] = { | |||
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | /***** radeon AUX functions *****/ | 46 | /***** radeon AUX functions *****/ |
| 47 | |||
| 48 | /* Atom needs data in little endian format | ||
| 49 | * so swap as appropriate when copying data to | ||
| 50 | * or from atom. Note that atom operates on | ||
| 51 | * dw units. | ||
| 52 | */ | ||
| 53 | static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) | ||
| 54 | { | ||
| 55 | #ifdef __BIG_ENDIAN | ||
| 56 | u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ | ||
| 57 | u32 *dst32, *src32; | ||
| 58 | int i; | ||
| 59 | |||
| 60 | memcpy(src_tmp, src, num_bytes); | ||
| 61 | src32 = (u32 *)src_tmp; | ||
| 62 | dst32 = (u32 *)dst_tmp; | ||
| 63 | if (to_le) { | ||
| 64 | for (i = 0; i < ((num_bytes + 3) / 4); i++) | ||
| 65 | dst32[i] = cpu_to_le32(src32[i]); | ||
| 66 | memcpy(dst, dst_tmp, num_bytes); | ||
| 67 | } else { | ||
| 68 | u8 dws = num_bytes & ~3; | ||
| 69 | for (i = 0; i < ((num_bytes + 3) / 4); i++) | ||
| 70 | dst32[i] = le32_to_cpu(src32[i]); | ||
| 71 | memcpy(dst, dst_tmp, dws); | ||
| 72 | if (num_bytes % 4) { | ||
| 73 | for (i = 0; i < (num_bytes % 4); i++) | ||
| 74 | dst[dws+i] = dst_tmp[dws+i]; | ||
| 75 | } | ||
| 76 | } | ||
| 77 | #else | ||
| 78 | memcpy(dst, src, num_bytes); | ||
| 79 | #endif | ||
| 80 | } | ||
| 81 | |||
| 47 | union aux_channel_transaction { | 82 | union aux_channel_transaction { |
| 48 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; | 83 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; |
| 49 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; | 84 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; |
| @@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, | |||
| 65 | 100 | ||
| 66 | base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); | 101 | base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); |
| 67 | 102 | ||
| 68 | memcpy(base, send, send_bytes); | 103 | radeon_copy_swap(base, send, send_bytes, true); |
| 69 | 104 | ||
| 70 | args.v1.lpAuxRequest = 0 + 4; | 105 | args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4)); |
| 71 | args.v1.lpDataOut = 16 + 4; | 106 | args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4)); |
| 72 | args.v1.ucDataOutLen = 0; | 107 | args.v1.ucDataOutLen = 0; |
| 73 | args.v1.ucChannelID = chan->rec.i2c_id; | 108 | args.v1.ucChannelID = chan->rec.i2c_id; |
| 74 | args.v1.ucDelay = delay / 10; | 109 | args.v1.ucDelay = delay / 10; |
| @@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, | |||
| 102 | recv_bytes = recv_size; | 137 | recv_bytes = recv_size; |
| 103 | 138 | ||
| 104 | if (recv && recv_size) | 139 | if (recv && recv_size) |
| 105 | memcpy(recv, base + 16, recv_bytes); | 140 | radeon_copy_swap(recv, base + 16, recv_bytes, false); |
| 106 | 141 | ||
| 107 | return recv_bytes; | 142 | return recv_bytes; |
| 108 | } | 143 | } |
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0bfd55e08820..9953e1fbc46d 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c | |||
| @@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
| 2548 | { | 2548 | { |
| 2549 | struct rv7xx_power_info *pi; | 2549 | struct rv7xx_power_info *pi; |
| 2550 | struct evergreen_power_info *eg_pi; | 2550 | struct evergreen_power_info *eg_pi; |
| 2551 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
| 2552 | u16 data_offset, size; | ||
| 2553 | u8 frev, crev; | ||
| 2554 | struct atom_clock_dividers dividers; | 2551 | struct atom_clock_dividers dividers; |
| 2555 | int ret; | 2552 | int ret; |
| 2556 | 2553 | ||
| @@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
| 2633 | eg_pi->vddci_control = | 2630 | eg_pi->vddci_control = |
| 2634 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); | 2631 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); |
| 2635 | 2632 | ||
| 2636 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 2633 | rv770_get_engine_memory_ss(rdev); |
| 2637 | &frev, &crev, &data_offset)) { | ||
| 2638 | pi->sclk_ss = true; | ||
| 2639 | pi->mclk_ss = true; | ||
| 2640 | pi->dynamic_ss = true; | ||
| 2641 | } else { | ||
| 2642 | pi->sclk_ss = false; | ||
| 2643 | pi->mclk_ss = false; | ||
| 2644 | pi->dynamic_ss = true; | ||
| 2645 | } | ||
| 2646 | 2634 | ||
| 2647 | pi->asi = RV770_ASI_DFLT; | 2635 | pi->asi = RV770_ASI_DFLT; |
| 2648 | pi->pasi = CYPRESS_HASI_DFLT; | 2636 | pi->pasi = CYPRESS_HASI_DFLT; |
| @@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
| 2659 | 2647 | ||
| 2660 | pi->dynamic_pcie_gen2 = true; | 2648 | pi->dynamic_pcie_gen2 = true; |
| 2661 | 2649 | ||
| 2662 | if (pi->gfx_clock_gating && | 2650 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
| 2663 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
| 2664 | pi->thermal_protection = true; | 2651 | pi->thermal_protection = true; |
| 2665 | else | 2652 | else |
| 2666 | pi->thermal_protection = false; | 2653 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 6dacec4e2090..8928bd109c16 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -2587,9 +2587,11 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | |||
| 2587 | if (rdev->wb.enabled) { | 2587 | if (rdev->wb.enabled) { |
| 2588 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); | 2588 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); |
| 2589 | } else { | 2589 | } else { |
| 2590 | mutex_lock(&rdev->srbm_mutex); | ||
| 2590 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 2591 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
| 2591 | rptr = RREG32(CP_HQD_PQ_RPTR); | 2592 | rptr = RREG32(CP_HQD_PQ_RPTR); |
| 2592 | cik_srbm_select(rdev, 0, 0, 0, 0); | 2593 | cik_srbm_select(rdev, 0, 0, 0, 0); |
| 2594 | mutex_unlock(&rdev->srbm_mutex); | ||
| 2593 | } | 2595 | } |
| 2594 | rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | 2596 | rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; |
| 2595 | 2597 | ||
| @@ -2604,9 +2606,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, | |||
| 2604 | if (rdev->wb.enabled) { | 2606 | if (rdev->wb.enabled) { |
| 2605 | wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); | 2607 | wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); |
| 2606 | } else { | 2608 | } else { |
| 2609 | mutex_lock(&rdev->srbm_mutex); | ||
| 2607 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 2610 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
| 2608 | wptr = RREG32(CP_HQD_PQ_WPTR); | 2611 | wptr = RREG32(CP_HQD_PQ_WPTR); |
| 2609 | cik_srbm_select(rdev, 0, 0, 0, 0); | 2612 | cik_srbm_select(rdev, 0, 0, 0, 0); |
| 2613 | mutex_unlock(&rdev->srbm_mutex); | ||
| 2610 | } | 2614 | } |
| 2611 | wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | 2615 | wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; |
| 2612 | 2616 | ||
| @@ -2897,6 +2901,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
| 2897 | WREG32(CP_CPF_DEBUG, tmp); | 2901 | WREG32(CP_CPF_DEBUG, tmp); |
| 2898 | 2902 | ||
| 2899 | /* init the pipes */ | 2903 | /* init the pipes */ |
| 2904 | mutex_lock(&rdev->srbm_mutex); | ||
| 2900 | for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { | 2905 | for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { |
| 2901 | int me = (i < 4) ? 1 : 2; | 2906 | int me = (i < 4) ? 1 : 2; |
| 2902 | int pipe = (i < 4) ? i : (i - 4); | 2907 | int pipe = (i < 4) ? i : (i - 4); |
| @@ -2919,6 +2924,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
| 2919 | WREG32(CP_HPD_EOP_CONTROL, tmp); | 2924 | WREG32(CP_HPD_EOP_CONTROL, tmp); |
| 2920 | } | 2925 | } |
| 2921 | cik_srbm_select(rdev, 0, 0, 0, 0); | 2926 | cik_srbm_select(rdev, 0, 0, 0, 0); |
| 2927 | mutex_unlock(&rdev->srbm_mutex); | ||
| 2922 | 2928 | ||
| 2923 | /* init the queues. Just two for now. */ | 2929 | /* init the queues. Just two for now. */ |
| 2924 | for (i = 0; i < 2; i++) { | 2930 | for (i = 0; i < 2; i++) { |
| @@ -2972,6 +2978,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
| 2972 | mqd->static_thread_mgmt23[0] = 0xffffffff; | 2978 | mqd->static_thread_mgmt23[0] = 0xffffffff; |
| 2973 | mqd->static_thread_mgmt23[1] = 0xffffffff; | 2979 | mqd->static_thread_mgmt23[1] = 0xffffffff; |
| 2974 | 2980 | ||
| 2981 | mutex_lock(&rdev->srbm_mutex); | ||
| 2975 | cik_srbm_select(rdev, rdev->ring[idx].me, | 2982 | cik_srbm_select(rdev, rdev->ring[idx].me, |
| 2976 | rdev->ring[idx].pipe, | 2983 | rdev->ring[idx].pipe, |
| 2977 | rdev->ring[idx].queue, 0); | 2984 | rdev->ring[idx].queue, 0); |
| @@ -3099,6 +3106,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
| 3099 | WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); | 3106 | WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); |
| 3100 | 3107 | ||
| 3101 | cik_srbm_select(rdev, 0, 0, 0, 0); | 3108 | cik_srbm_select(rdev, 0, 0, 0, 0); |
| 3109 | mutex_unlock(&rdev->srbm_mutex); | ||
| 3102 | 3110 | ||
| 3103 | radeon_bo_kunmap(rdev->ring[idx].mqd_obj); | 3111 | radeon_bo_kunmap(rdev->ring[idx].mqd_obj); |
| 3104 | radeon_bo_unreserve(rdev->ring[idx].mqd_obj); | 3112 | radeon_bo_unreserve(rdev->ring[idx].mqd_obj); |
| @@ -4320,6 +4328,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) | |||
| 4320 | 4328 | ||
| 4321 | /* XXX SH_MEM regs */ | 4329 | /* XXX SH_MEM regs */ |
| 4322 | /* where to put LDS, scratch, GPUVM in FSA64 space */ | 4330 | /* where to put LDS, scratch, GPUVM in FSA64 space */ |
| 4331 | mutex_lock(&rdev->srbm_mutex); | ||
| 4323 | for (i = 0; i < 16; i++) { | 4332 | for (i = 0; i < 16; i++) { |
| 4324 | cik_srbm_select(rdev, 0, 0, 0, i); | 4333 | cik_srbm_select(rdev, 0, 0, 0, i); |
| 4325 | /* CP and shaders */ | 4334 | /* CP and shaders */ |
| @@ -4335,6 +4344,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) | |||
| 4335 | /* XXX SDMA RLC - todo */ | 4344 | /* XXX SDMA RLC - todo */ |
| 4336 | } | 4345 | } |
| 4337 | cik_srbm_select(rdev, 0, 0, 0, 0); | 4346 | cik_srbm_select(rdev, 0, 0, 0, 0); |
| 4347 | mutex_unlock(&rdev->srbm_mutex); | ||
| 4338 | 4348 | ||
| 4339 | cik_pcie_gart_tlb_flush(rdev); | 4349 | cik_pcie_gart_tlb_flush(rdev); |
| 4340 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | 4350 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
| @@ -5954,6 +5964,8 @@ static int cik_startup(struct radeon_device *rdev) | |||
| 5954 | struct radeon_ring *ring; | 5964 | struct radeon_ring *ring; |
| 5955 | int r; | 5965 | int r; |
| 5956 | 5966 | ||
| 5967 | cik_mc_program(rdev); | ||
| 5968 | |||
| 5957 | if (rdev->flags & RADEON_IS_IGP) { | 5969 | if (rdev->flags & RADEON_IS_IGP) { |
| 5958 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | 5970 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || |
| 5959 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { | 5971 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { |
| @@ -5985,7 +5997,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
| 5985 | if (r) | 5997 | if (r) |
| 5986 | return r; | 5998 | return r; |
| 5987 | 5999 | ||
| 5988 | cik_mc_program(rdev); | ||
| 5989 | r = cik_pcie_gart_enable(rdev); | 6000 | r = cik_pcie_gart_enable(rdev); |
| 5990 | if (r) | 6001 | if (r) |
| 5991 | return r; | 6002 | return r; |
| @@ -6194,7 +6205,7 @@ int cik_suspend(struct radeon_device *rdev) | |||
| 6194 | radeon_vm_manager_fini(rdev); | 6205 | radeon_vm_manager_fini(rdev); |
| 6195 | cik_cp_enable(rdev, false); | 6206 | cik_cp_enable(rdev, false); |
| 6196 | cik_sdma_enable(rdev, false); | 6207 | cik_sdma_enable(rdev, false); |
| 6197 | r600_uvd_rbc_stop(rdev); | 6208 | r600_uvd_stop(rdev); |
| 6198 | radeon_uvd_suspend(rdev); | 6209 | radeon_uvd_suspend(rdev); |
| 6199 | cik_irq_suspend(rdev); | 6210 | cik_irq_suspend(rdev); |
| 6200 | radeon_wb_disable(rdev); | 6211 | radeon_wb_disable(rdev); |
| @@ -6358,6 +6369,7 @@ void cik_fini(struct radeon_device *rdev) | |||
| 6358 | radeon_vm_manager_fini(rdev); | 6369 | radeon_vm_manager_fini(rdev); |
| 6359 | radeon_ib_pool_fini(rdev); | 6370 | radeon_ib_pool_fini(rdev); |
| 6360 | radeon_irq_kms_fini(rdev); | 6371 | radeon_irq_kms_fini(rdev); |
| 6372 | r600_uvd_stop(rdev); | ||
| 6361 | radeon_uvd_fini(rdev); | 6373 | radeon_uvd_fini(rdev); |
| 6362 | cik_pcie_gart_fini(rdev); | 6374 | cik_pcie_gart_fini(rdev); |
| 6363 | r600_vram_scratch_fini(rdev); | 6375 | r600_vram_scratch_fini(rdev); |
| @@ -6978,7 +6990,7 @@ int cik_uvd_resume(struct radeon_device *rdev) | |||
| 6978 | 6990 | ||
| 6979 | /* programm the VCPU memory controller bits 0-27 */ | 6991 | /* programm the VCPU memory controller bits 0-27 */ |
| 6980 | addr = rdev->uvd.gpu_addr >> 3; | 6992 | addr = rdev->uvd.gpu_addr >> 3; |
| 6981 | size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; | 6993 | size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; |
| 6982 | WREG32(UVD_VCPU_CACHE_OFFSET0, addr); | 6994 | WREG32(UVD_VCPU_CACHE_OFFSET0, addr); |
| 6983 | WREG32(UVD_VCPU_CACHE_SIZE0, size); | 6995 | WREG32(UVD_VCPU_CACHE_SIZE0, size); |
| 6984 | 6996 | ||
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 9bcdd174780f..7e5d0b570a30 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c | |||
| @@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
| 2038 | { | 2038 | { |
| 2039 | struct rv7xx_power_info *pi; | 2039 | struct rv7xx_power_info *pi; |
| 2040 | struct evergreen_power_info *eg_pi; | 2040 | struct evergreen_power_info *eg_pi; |
| 2041 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
| 2042 | uint16_t data_offset, size; | ||
| 2043 | uint8_t frev, crev; | ||
| 2044 | struct atom_clock_dividers dividers; | 2041 | struct atom_clock_dividers dividers; |
| 2045 | int ret; | 2042 | int ret; |
| 2046 | 2043 | ||
| @@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
| 2092 | eg_pi->vddci_control = | 2089 | eg_pi->vddci_control = |
| 2093 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); | 2090 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); |
| 2094 | 2091 | ||
| 2095 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 2092 | rv770_get_engine_memory_ss(rdev); |
| 2096 | &frev, &crev, &data_offset)) { | ||
| 2097 | pi->sclk_ss = true; | ||
| 2098 | pi->mclk_ss = true; | ||
| 2099 | pi->dynamic_ss = true; | ||
| 2100 | } else { | ||
| 2101 | pi->sclk_ss = false; | ||
| 2102 | pi->mclk_ss = false; | ||
| 2103 | pi->dynamic_ss = true; | ||
| 2104 | } | ||
| 2105 | 2093 | ||
| 2106 | pi->asi = RV770_ASI_DFLT; | 2094 | pi->asi = RV770_ASI_DFLT; |
| 2107 | pi->pasi = CYPRESS_HASI_DFLT; | 2095 | pi->pasi = CYPRESS_HASI_DFLT; |
| @@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
| 2122 | 2110 | ||
| 2123 | pi->dynamic_pcie_gen2 = true; | 2111 | pi->dynamic_pcie_gen2 = true; |
| 2124 | 2112 | ||
| 2125 | if (pi->gfx_clock_gating && | 2113 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
| 2126 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
| 2127 | pi->thermal_protection = true; | 2114 | pi->thermal_protection = true; |
| 2128 | else | 2115 | else |
| 2129 | pi->thermal_protection = false; | 2116 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 038dcac7670c..d5b49e33315e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -5106,6 +5106,8 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
| 5106 | /* enable aspm */ | 5106 | /* enable aspm */ |
| 5107 | evergreen_program_aspm(rdev); | 5107 | evergreen_program_aspm(rdev); |
| 5108 | 5108 | ||
| 5109 | evergreen_mc_program(rdev); | ||
| 5110 | |||
| 5109 | if (ASIC_IS_DCE5(rdev)) { | 5111 | if (ASIC_IS_DCE5(rdev)) { |
| 5110 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | 5112 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { |
| 5111 | r = ni_init_microcode(rdev); | 5113 | r = ni_init_microcode(rdev); |
| @@ -5133,7 +5135,6 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
| 5133 | if (r) | 5135 | if (r) |
| 5134 | return r; | 5136 | return r; |
| 5135 | 5137 | ||
| 5136 | evergreen_mc_program(rdev); | ||
| 5137 | if (rdev->flags & RADEON_IS_AGP) { | 5138 | if (rdev->flags & RADEON_IS_AGP) { |
| 5138 | evergreen_agp_enable(rdev); | 5139 | evergreen_agp_enable(rdev); |
| 5139 | } else { | 5140 | } else { |
| @@ -5291,10 +5292,10 @@ int evergreen_resume(struct radeon_device *rdev) | |||
| 5291 | int evergreen_suspend(struct radeon_device *rdev) | 5292 | int evergreen_suspend(struct radeon_device *rdev) |
| 5292 | { | 5293 | { |
| 5293 | r600_audio_fini(rdev); | 5294 | r600_audio_fini(rdev); |
| 5295 | r600_uvd_stop(rdev); | ||
| 5294 | radeon_uvd_suspend(rdev); | 5296 | radeon_uvd_suspend(rdev); |
| 5295 | r700_cp_stop(rdev); | 5297 | r700_cp_stop(rdev); |
| 5296 | r600_dma_stop(rdev); | 5298 | r600_dma_stop(rdev); |
| 5297 | r600_uvd_rbc_stop(rdev); | ||
| 5298 | evergreen_irq_suspend(rdev); | 5299 | evergreen_irq_suspend(rdev); |
| 5299 | radeon_wb_disable(rdev); | 5300 | radeon_wb_disable(rdev); |
| 5300 | evergreen_pcie_gart_disable(rdev); | 5301 | evergreen_pcie_gart_disable(rdev); |
| @@ -5429,6 +5430,7 @@ void evergreen_fini(struct radeon_device *rdev) | |||
| 5429 | radeon_ib_pool_fini(rdev); | 5430 | radeon_ib_pool_fini(rdev); |
| 5430 | radeon_irq_kms_fini(rdev); | 5431 | radeon_irq_kms_fini(rdev); |
| 5431 | evergreen_pcie_gart_fini(rdev); | 5432 | evergreen_pcie_gart_fini(rdev); |
| 5433 | r600_uvd_stop(rdev); | ||
| 5432 | radeon_uvd_fini(rdev); | 5434 | radeon_uvd_fini(rdev); |
| 5433 | r600_vram_scratch_fini(rdev); | 5435 | r600_vram_scratch_fini(rdev); |
| 5434 | radeon_gem_fini(rdev); | 5436 | radeon_gem_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index b0d3fb341417..b0e280058b9b 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
| @@ -148,18 +148,40 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
| 148 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 148 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 149 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | 149 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
| 150 | u32 base_rate = 24000; | 150 | u32 base_rate = 24000; |
| 151 | u32 max_ratio = clock / base_rate; | ||
| 152 | u32 dto_phase; | ||
| 153 | u32 dto_modulo = clock; | ||
| 154 | u32 wallclock_ratio; | ||
| 155 | u32 dto_cntl; | ||
| 151 | 156 | ||
| 152 | if (!dig || !dig->afmt) | 157 | if (!dig || !dig->afmt) |
| 153 | return; | 158 | return; |
| 154 | 159 | ||
| 160 | if (max_ratio >= 8) { | ||
| 161 | dto_phase = 192 * 1000; | ||
| 162 | wallclock_ratio = 3; | ||
| 163 | } else if (max_ratio >= 4) { | ||
| 164 | dto_phase = 96 * 1000; | ||
| 165 | wallclock_ratio = 2; | ||
| 166 | } else if (max_ratio >= 2) { | ||
| 167 | dto_phase = 48 * 1000; | ||
| 168 | wallclock_ratio = 1; | ||
| 169 | } else { | ||
| 170 | dto_phase = 24 * 1000; | ||
| 171 | wallclock_ratio = 0; | ||
| 172 | } | ||
| 173 | dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; | ||
| 174 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); | ||
| 175 | WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); | ||
| 176 | |||
| 155 | /* XXX two dtos; generally use dto0 for hdmi */ | 177 | /* XXX two dtos; generally use dto0 for hdmi */ |
| 156 | /* Express [24MHz / target pixel clock] as an exact rational | 178 | /* Express [24MHz / target pixel clock] as an exact rational |
| 157 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 179 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
| 158 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 180 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
| 159 | */ | 181 | */ |
| 160 | WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); | ||
| 161 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); | ||
| 162 | WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); | 182 | WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); |
| 183 | WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); | ||
| 184 | WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); | ||
| 163 | } | 185 | } |
| 164 | 186 | ||
| 165 | 187 | ||
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index a7baf67aef6c..0d582ac1dc31 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
| @@ -497,6 +497,9 @@ | |||
| 497 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 | 497 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 |
| 498 | #define DCCG_AUDIO_DTO0_LOAD 0x05b8 | 498 | #define DCCG_AUDIO_DTO0_LOAD 0x05b8 |
| 499 | #define DCCG_AUDIO_DTO0_CNTL 0x05bc | 499 | #define DCCG_AUDIO_DTO0_CNTL 0x05bc |
| 500 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) | ||
| 501 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 | ||
| 502 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 | ||
| 500 | 503 | ||
| 501 | #define DCCG_AUDIO_DTO1_PHASE 0x05c0 | 504 | #define DCCG_AUDIO_DTO1_PHASE 0x05c0 |
| 502 | #define DCCG_AUDIO_DTO1_MODULE 0x05c4 | 505 | #define DCCG_AUDIO_DTO1_MODULE 0x05c4 |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 56bd4f3be4fe..ccb4f8b54852 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -794,9 +794,13 @@ int ni_init_microcode(struct radeon_device *rdev) | |||
| 794 | if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { | 794 | if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { |
| 795 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); | 795 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); |
| 796 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); | 796 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
| 797 | if (err) | 797 | if (err) { |
| 798 | goto out; | 798 | printk(KERN_ERR |
| 799 | if (rdev->smc_fw->size != smc_req_size) { | 799 | "smc: error loading firmware \"%s\"\n", |
| 800 | fw_name); | ||
| 801 | release_firmware(rdev->smc_fw); | ||
| 802 | rdev->smc_fw = NULL; | ||
| 803 | } else if (rdev->smc_fw->size != smc_req_size) { | ||
| 800 | printk(KERN_ERR | 804 | printk(KERN_ERR |
| 801 | "ni_mc: Bogus length %zu in firmware \"%s\"\n", | 805 | "ni_mc: Bogus length %zu in firmware \"%s\"\n", |
| 802 | rdev->mc_fw->size, fw_name); | 806 | rdev->mc_fw->size, fw_name); |
| @@ -2079,6 +2083,8 @@ static int cayman_startup(struct radeon_device *rdev) | |||
| 2079 | /* enable aspm */ | 2083 | /* enable aspm */ |
| 2080 | evergreen_program_aspm(rdev); | 2084 | evergreen_program_aspm(rdev); |
| 2081 | 2085 | ||
| 2086 | evergreen_mc_program(rdev); | ||
| 2087 | |||
| 2082 | if (rdev->flags & RADEON_IS_IGP) { | 2088 | if (rdev->flags & RADEON_IS_IGP) { |
| 2083 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 2089 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
| 2084 | r = ni_init_microcode(rdev); | 2090 | r = ni_init_microcode(rdev); |
| @@ -2107,7 +2113,6 @@ static int cayman_startup(struct radeon_device *rdev) | |||
| 2107 | if (r) | 2113 | if (r) |
| 2108 | return r; | 2114 | return r; |
| 2109 | 2115 | ||
| 2110 | evergreen_mc_program(rdev); | ||
| 2111 | r = cayman_pcie_gart_enable(rdev); | 2116 | r = cayman_pcie_gart_enable(rdev); |
| 2112 | if (r) | 2117 | if (r) |
| 2113 | return r; | 2118 | return r; |
| @@ -2286,7 +2291,7 @@ int cayman_suspend(struct radeon_device *rdev) | |||
| 2286 | radeon_vm_manager_fini(rdev); | 2291 | radeon_vm_manager_fini(rdev); |
| 2287 | cayman_cp_enable(rdev, false); | 2292 | cayman_cp_enable(rdev, false); |
| 2288 | cayman_dma_stop(rdev); | 2293 | cayman_dma_stop(rdev); |
| 2289 | r600_uvd_rbc_stop(rdev); | 2294 | r600_uvd_stop(rdev); |
| 2290 | radeon_uvd_suspend(rdev); | 2295 | radeon_uvd_suspend(rdev); |
| 2291 | evergreen_irq_suspend(rdev); | 2296 | evergreen_irq_suspend(rdev); |
| 2292 | radeon_wb_disable(rdev); | 2297 | radeon_wb_disable(rdev); |
| @@ -2418,6 +2423,7 @@ void cayman_fini(struct radeon_device *rdev) | |||
| 2418 | radeon_vm_manager_fini(rdev); | 2423 | radeon_vm_manager_fini(rdev); |
| 2419 | radeon_ib_pool_fini(rdev); | 2424 | radeon_ib_pool_fini(rdev); |
| 2420 | radeon_irq_kms_fini(rdev); | 2425 | radeon_irq_kms_fini(rdev); |
| 2426 | r600_uvd_stop(rdev); | ||
| 2421 | radeon_uvd_fini(rdev); | 2427 | radeon_uvd_fini(rdev); |
| 2422 | cayman_pcie_gart_fini(rdev); | 2428 | cayman_pcie_gart_fini(rdev); |
| 2423 | r600_vram_scratch_fini(rdev); | 2429 | r600_vram_scratch_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 559cf24d51af..f0f5f748938a 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
| @@ -1054,10 +1054,6 @@ static int ni_restrict_performance_levels_before_switch(struct radeon_device *rd | |||
| 1054 | int ni_dpm_force_performance_level(struct radeon_device *rdev, | 1054 | int ni_dpm_force_performance_level(struct radeon_device *rdev, |
| 1055 | enum radeon_dpm_forced_level level) | 1055 | enum radeon_dpm_forced_level level) |
| 1056 | { | 1056 | { |
| 1057 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | ||
| 1058 | struct ni_ps *ps = ni_get_ps(rps); | ||
| 1059 | u32 levels; | ||
| 1060 | |||
| 1061 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | 1057 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { |
| 1062 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) | 1058 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) |
| 1063 | return -EINVAL; | 1059 | return -EINVAL; |
| @@ -1068,8 +1064,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev, | |||
| 1068 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) | 1064 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) |
| 1069 | return -EINVAL; | 1065 | return -EINVAL; |
| 1070 | 1066 | ||
| 1071 | levels = ps->performance_level_count - 1; | 1067 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) |
| 1072 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) | ||
| 1073 | return -EINVAL; | 1068 | return -EINVAL; |
| 1074 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { | 1069 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { |
| 1075 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) | 1070 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) |
| @@ -4072,9 +4067,6 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
| 4072 | struct rv7xx_power_info *pi; | 4067 | struct rv7xx_power_info *pi; |
| 4073 | struct evergreen_power_info *eg_pi; | 4068 | struct evergreen_power_info *eg_pi; |
| 4074 | struct ni_power_info *ni_pi; | 4069 | struct ni_power_info *ni_pi; |
| 4075 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
| 4076 | u16 data_offset, size; | ||
| 4077 | u8 frev, crev; | ||
| 4078 | struct atom_clock_dividers dividers; | 4070 | struct atom_clock_dividers dividers; |
| 4079 | int ret; | 4071 | int ret; |
| 4080 | 4072 | ||
| @@ -4167,16 +4159,7 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
| 4167 | eg_pi->vddci_control = | 4159 | eg_pi->vddci_control = |
| 4168 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); | 4160 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); |
| 4169 | 4161 | ||
| 4170 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 4162 | rv770_get_engine_memory_ss(rdev); |
| 4171 | &frev, &crev, &data_offset)) { | ||
| 4172 | pi->sclk_ss = true; | ||
| 4173 | pi->mclk_ss = true; | ||
| 4174 | pi->dynamic_ss = true; | ||
| 4175 | } else { | ||
| 4176 | pi->sclk_ss = false; | ||
| 4177 | pi->mclk_ss = false; | ||
| 4178 | pi->dynamic_ss = true; | ||
| 4179 | } | ||
| 4180 | 4163 | ||
| 4181 | pi->asi = RV770_ASI_DFLT; | 4164 | pi->asi = RV770_ASI_DFLT; |
| 4182 | pi->pasi = CYPRESS_HASI_DFLT; | 4165 | pi->pasi = CYPRESS_HASI_DFLT; |
| @@ -4193,8 +4176,7 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
| 4193 | 4176 | ||
| 4194 | pi->dynamic_pcie_gen2 = true; | 4177 | pi->dynamic_pcie_gen2 = true; |
| 4195 | 4178 | ||
| 4196 | if (pi->gfx_clock_gating && | 4179 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
| 4197 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
| 4198 | pi->thermal_protection = true; | 4180 | pi->thermal_protection = true; |
| 4199 | else | 4181 | else |
| 4200 | pi->thermal_protection = false; | 4182 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 393880a09412..e66e72077350 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -2299,9 +2299,13 @@ int r600_init_microcode(struct radeon_device *rdev) | |||
| 2299 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { | 2299 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { |
| 2300 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); | 2300 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); |
| 2301 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); | 2301 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
| 2302 | if (err) | 2302 | if (err) { |
| 2303 | goto out; | 2303 | printk(KERN_ERR |
| 2304 | if (rdev->smc_fw->size != smc_req_size) { | 2304 | "smc: error loading firmware \"%s\"\n", |
| 2305 | fw_name); | ||
| 2306 | release_firmware(rdev->smc_fw); | ||
| 2307 | rdev->smc_fw = NULL; | ||
| 2308 | } else if (rdev->smc_fw->size != smc_req_size) { | ||
| 2305 | printk(KERN_ERR | 2309 | printk(KERN_ERR |
| 2306 | "smc: Bogus length %zu in firmware \"%s\"\n", | 2310 | "smc: Bogus length %zu in firmware \"%s\"\n", |
| 2307 | rdev->smc_fw->size, fw_name); | 2311 | rdev->smc_fw->size, fw_name); |
| @@ -2697,12 +2701,29 @@ int r600_uvd_rbc_start(struct radeon_device *rdev) | |||
| 2697 | return 0; | 2701 | return 0; |
| 2698 | } | 2702 | } |
| 2699 | 2703 | ||
| 2700 | void r600_uvd_rbc_stop(struct radeon_device *rdev) | 2704 | void r600_uvd_stop(struct radeon_device *rdev) |
| 2701 | { | 2705 | { |
| 2702 | struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 2706 | struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
| 2703 | 2707 | ||
| 2704 | /* force RBC into idle state */ | 2708 | /* force RBC into idle state */ |
| 2705 | WREG32(UVD_RBC_RB_CNTL, 0x11010101); | 2709 | WREG32(UVD_RBC_RB_CNTL, 0x11010101); |
| 2710 | |||
| 2711 | /* Stall UMC and register bus before resetting VCPU */ | ||
| 2712 | WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
| 2713 | WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); | ||
| 2714 | mdelay(1); | ||
| 2715 | |||
| 2716 | /* put VCPU into reset */ | ||
| 2717 | WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); | ||
| 2718 | mdelay(5); | ||
| 2719 | |||
| 2720 | /* disable VCPU clock */ | ||
| 2721 | WREG32(UVD_VCPU_CNTL, 0x0); | ||
| 2722 | |||
| 2723 | /* Unstall UMC and register bus */ | ||
| 2724 | WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); | ||
| 2725 | WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); | ||
| 2726 | |||
| 2706 | ring->ready = false; | 2727 | ring->ready = false; |
| 2707 | } | 2728 | } |
| 2708 | 2729 | ||
| @@ -2722,6 +2743,11 @@ int r600_uvd_init(struct radeon_device *rdev) | |||
| 2722 | /* disable interupt */ | 2743 | /* disable interupt */ |
| 2723 | WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); | 2744 | WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); |
| 2724 | 2745 | ||
| 2746 | /* Stall UMC and register bus before resetting VCPU */ | ||
| 2747 | WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
| 2748 | WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); | ||
| 2749 | mdelay(1); | ||
| 2750 | |||
| 2725 | /* put LMI, VCPU, RBC etc... into reset */ | 2751 | /* put LMI, VCPU, RBC etc... into reset */ |
| 2726 | WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | | 2752 | WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | |
| 2727 | LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | | 2753 | LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | |
| @@ -2751,10 +2777,6 @@ int r600_uvd_init(struct radeon_device *rdev) | |||
| 2751 | WREG32(UVD_MPC_SET_ALU, 0); | 2777 | WREG32(UVD_MPC_SET_ALU, 0); |
| 2752 | WREG32(UVD_MPC_SET_MUX, 0x88); | 2778 | WREG32(UVD_MPC_SET_MUX, 0x88); |
| 2753 | 2779 | ||
| 2754 | /* Stall UMC */ | ||
| 2755 | WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
| 2756 | WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); | ||
| 2757 | |||
| 2758 | /* take all subblocks out of reset, except VCPU */ | 2780 | /* take all subblocks out of reset, except VCPU */ |
| 2759 | WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); | 2781 | WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); |
| 2760 | mdelay(5); | 2782 | mdelay(5); |
| @@ -3166,7 +3188,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, | |||
| 3166 | 3188 | ||
| 3167 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); | 3189 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); |
| 3168 | num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); | 3190 | num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); |
| 3169 | r = radeon_ring_lock(rdev, ring, num_loops * 6 + 21); | 3191 | r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24); |
| 3170 | if (r) { | 3192 | if (r) { |
| 3171 | DRM_ERROR("radeon: moving bo (%d).\n", r); | 3193 | DRM_ERROR("radeon: moving bo (%d).\n", r); |
| 3172 | radeon_semaphore_free(rdev, &sem, NULL); | 3194 | radeon_semaphore_free(rdev, &sem, NULL); |
| @@ -3181,6 +3203,9 @@ int r600_copy_cpdma(struct radeon_device *rdev, | |||
| 3181 | radeon_semaphore_free(rdev, &sem, NULL); | 3203 | radeon_semaphore_free(rdev, &sem, NULL); |
| 3182 | } | 3204 | } |
| 3183 | 3205 | ||
| 3206 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
| 3207 | radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | ||
| 3208 | radeon_ring_write(ring, WAIT_3D_IDLE_bit); | ||
| 3184 | for (i = 0; i < num_loops; i++) { | 3209 | for (i = 0; i < num_loops; i++) { |
| 3185 | cur_size_in_bytes = size_in_bytes; | 3210 | cur_size_in_bytes = size_in_bytes; |
| 3186 | if (cur_size_in_bytes > 0x1fffff) | 3211 | if (cur_size_in_bytes > 0x1fffff) |
| @@ -3309,6 +3334,8 @@ static int r600_startup(struct radeon_device *rdev) | |||
| 3309 | /* enable pcie gen2 link */ | 3334 | /* enable pcie gen2 link */ |
| 3310 | r600_pcie_gen2_enable(rdev); | 3335 | r600_pcie_gen2_enable(rdev); |
| 3311 | 3336 | ||
| 3337 | r600_mc_program(rdev); | ||
| 3338 | |||
| 3312 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 3339 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
| 3313 | r = r600_init_microcode(rdev); | 3340 | r = r600_init_microcode(rdev); |
| 3314 | if (r) { | 3341 | if (r) { |
| @@ -3321,7 +3348,6 @@ static int r600_startup(struct radeon_device *rdev) | |||
| 3321 | if (r) | 3348 | if (r) |
| 3322 | return r; | 3349 | return r; |
| 3323 | 3350 | ||
| 3324 | r600_mc_program(rdev); | ||
| 3325 | if (rdev->flags & RADEON_IS_AGP) { | 3351 | if (rdev->flags & RADEON_IS_AGP) { |
| 3326 | r600_agp_enable(rdev); | 3352 | r600_agp_enable(rdev); |
| 3327 | } else { | 3353 | } else { |
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index b88f54b134ab..e5c860f4ccbe 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c | |||
| @@ -278,9 +278,9 @@ bool r600_dynamicpm_enabled(struct radeon_device *rdev) | |||
| 278 | void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) | 278 | void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) |
| 279 | { | 279 | { |
| 280 | if (enable) | 280 | if (enable) |
| 281 | WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF); | 281 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); |
| 282 | else | 282 | else |
| 283 | WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); | 283 | WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); |
| 284 | } | 284 | } |
| 285 | 285 | ||
| 286 | void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) | 286 | void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f48240bb8c56..f264df5470f7 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
| @@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
| 226 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 226 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 227 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 227 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 228 | u32 base_rate = 24000; | 228 | u32 base_rate = 24000; |
| 229 | u32 max_ratio = clock / base_rate; | ||
| 230 | u32 dto_phase; | ||
| 231 | u32 dto_modulo = clock; | ||
| 232 | u32 wallclock_ratio; | ||
| 233 | u32 dto_cntl; | ||
| 229 | 234 | ||
| 230 | if (!dig || !dig->afmt) | 235 | if (!dig || !dig->afmt) |
| 231 | return; | 236 | return; |
| 232 | 237 | ||
| 238 | if (max_ratio >= 8) { | ||
| 239 | dto_phase = 192 * 1000; | ||
| 240 | wallclock_ratio = 3; | ||
| 241 | } else if (max_ratio >= 4) { | ||
| 242 | dto_phase = 96 * 1000; | ||
| 243 | wallclock_ratio = 2; | ||
| 244 | } else if (max_ratio >= 2) { | ||
| 245 | dto_phase = 48 * 1000; | ||
| 246 | wallclock_ratio = 1; | ||
| 247 | } else { | ||
| 248 | dto_phase = 24 * 1000; | ||
| 249 | wallclock_ratio = 0; | ||
| 250 | } | ||
| 251 | |||
| 233 | /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. | 252 | /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. |
| 234 | * doesn't matter which one you use. Just use the first one. | 253 | * doesn't matter which one you use. Just use the first one. |
| 235 | */ | 254 | */ |
| @@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
| 242 | /* according to the reg specs, this should DCE3.2 only, but in | 261 | /* according to the reg specs, this should DCE3.2 only, but in |
| 243 | * practice it seems to cover DCE3.0 as well. | 262 | * practice it seems to cover DCE3.0 as well. |
| 244 | */ | 263 | */ |
| 245 | WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); | 264 | if (dig->dig_encoder == 0) { |
| 246 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); | 265 | dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; |
| 247 | WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ | 266 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); |
| 267 | WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); | ||
| 268 | WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); | ||
| 269 | WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); | ||
| 270 | WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ | ||
| 271 | } else { | ||
| 272 | dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; | ||
| 273 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); | ||
| 274 | WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl); | ||
| 275 | WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase); | ||
| 276 | WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); | ||
| 277 | WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ | ||
| 278 | } | ||
| 248 | } else { | 279 | } else { |
| 249 | /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ | 280 | /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ |
| 250 | WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | | 281 | WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 8e3fe815edab..7c780839a7f4 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
| @@ -933,6 +933,9 @@ | |||
| 933 | #define DCCG_AUDIO_DTO0_LOAD 0x051c | 933 | #define DCCG_AUDIO_DTO0_LOAD 0x051c |
| 934 | # define DTO_LOAD (1 << 31) | 934 | # define DTO_LOAD (1 << 31) |
| 935 | #define DCCG_AUDIO_DTO0_CNTL 0x0520 | 935 | #define DCCG_AUDIO_DTO0_CNTL 0x0520 |
| 936 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) | ||
| 937 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 | ||
| 938 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 | ||
| 936 | 939 | ||
| 937 | #define DCCG_AUDIO_DTO1_PHASE 0x0524 | 940 | #define DCCG_AUDIO_DTO1_PHASE 0x0524 |
| 938 | #define DCCG_AUDIO_DTO1_MODULE 0x0528 | 941 | #define DCCG_AUDIO_DTO1_MODULE 0x0528 |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2f08219c39b6..9f19259667df 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -1468,7 +1468,6 @@ struct radeon_uvd { | |||
| 1468 | void *cpu_addr; | 1468 | void *cpu_addr; |
| 1469 | uint64_t gpu_addr; | 1469 | uint64_t gpu_addr; |
| 1470 | void *saved_bo; | 1470 | void *saved_bo; |
| 1471 | unsigned fw_size; | ||
| 1472 | atomic_t handles[RADEON_MAX_UVD_HANDLES]; | 1471 | atomic_t handles[RADEON_MAX_UVD_HANDLES]; |
| 1473 | struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; | 1472 | struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; |
| 1474 | struct delayed_work idle_work; | 1473 | struct delayed_work idle_work; |
| @@ -2066,6 +2065,7 @@ struct radeon_device { | |||
| 2066 | const struct firmware *mec_fw; /* CIK MEC firmware */ | 2065 | const struct firmware *mec_fw; /* CIK MEC firmware */ |
| 2067 | const struct firmware *sdma_fw; /* CIK SDMA firmware */ | 2066 | const struct firmware *sdma_fw; /* CIK SDMA firmware */ |
| 2068 | const struct firmware *smc_fw; /* SMC firmware */ | 2067 | const struct firmware *smc_fw; /* SMC firmware */ |
| 2068 | const struct firmware *uvd_fw; /* UVD firmware */ | ||
| 2069 | struct r600_blit r600_blit; | 2069 | struct r600_blit r600_blit; |
| 2070 | struct r600_vram_scratch vram_scratch; | 2070 | struct r600_vram_scratch vram_scratch; |
| 2071 | int msi_enabled; /* msi enabled */ | 2071 | int msi_enabled; /* msi enabled */ |
| @@ -2095,6 +2095,8 @@ struct radeon_device { | |||
| 2095 | /* ACPI interface */ | 2095 | /* ACPI interface */ |
| 2096 | struct radeon_atif atif; | 2096 | struct radeon_atif atif; |
| 2097 | struct radeon_atcs atcs; | 2097 | struct radeon_atcs atcs; |
| 2098 | /* srbm instance registers */ | ||
| 2099 | struct mutex srbm_mutex; | ||
| 2098 | }; | 2100 | }; |
| 2099 | 2101 | ||
| 2100 | int radeon_device_init(struct radeon_device *rdev, | 2102 | int radeon_device_init(struct radeon_device *rdev, |
| @@ -2161,7 +2163,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); | |||
| 2161 | WREG32(reg, tmp_); \ | 2163 | WREG32(reg, tmp_); \ |
| 2162 | } while (0) | 2164 | } while (0) |
| 2163 | #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) | 2165 | #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) |
| 2164 | #define WREG32_OR(reg, or) WREG32_P(reg, or, ~or) | 2166 | #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) |
| 2165 | #define WREG32_PLL_P(reg, val, mask) \ | 2167 | #define WREG32_PLL_P(reg, val, mask) \ |
| 2166 | do { \ | 2168 | do { \ |
| 2167 | uint32_t tmp_ = RREG32_PLL(reg); \ | 2169 | uint32_t tmp_ = RREG32_PLL(reg); \ |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 78bec1a58ed1..f8f8b3113ddd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -1161,6 +1161,7 @@ static struct radeon_asic rv6xx_asic = { | |||
| 1161 | .get_mclk = &rv6xx_dpm_get_mclk, | 1161 | .get_mclk = &rv6xx_dpm_get_mclk, |
| 1162 | .print_power_state = &rv6xx_dpm_print_power_state, | 1162 | .print_power_state = &rv6xx_dpm_print_power_state, |
| 1163 | .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, | 1163 | .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, |
| 1164 | .force_performance_level = &rv6xx_dpm_force_performance_level, | ||
| 1164 | }, | 1165 | }, |
| 1165 | .pflip = { | 1166 | .pflip = { |
| 1166 | .pre_page_flip = &rs600_pre_page_flip, | 1167 | .pre_page_flip = &rs600_pre_page_flip, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index ca1895709908..3d61d5aac18f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -421,6 +421,8 @@ void rv6xx_dpm_print_power_state(struct radeon_device *rdev, | |||
| 421 | struct radeon_ps *ps); | 421 | struct radeon_ps *ps); |
| 422 | void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 422 | void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
| 423 | struct seq_file *m); | 423 | struct seq_file *m); |
| 424 | int rv6xx_dpm_force_performance_level(struct radeon_device *rdev, | ||
| 425 | enum radeon_dpm_forced_level level); | ||
| 424 | /* rs780 dpm */ | 426 | /* rs780 dpm */ |
| 425 | int rs780_dpm_init(struct radeon_device *rdev); | 427 | int rs780_dpm_init(struct radeon_device *rdev); |
| 426 | int rs780_dpm_enable(struct radeon_device *rdev); | 428 | int rs780_dpm_enable(struct radeon_device *rdev); |
| @@ -439,7 +441,7 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde | |||
| 439 | /* uvd */ | 441 | /* uvd */ |
| 440 | int r600_uvd_init(struct radeon_device *rdev); | 442 | int r600_uvd_init(struct radeon_device *rdev); |
| 441 | int r600_uvd_rbc_start(struct radeon_device *rdev); | 443 | int r600_uvd_rbc_start(struct radeon_device *rdev); |
| 442 | void r600_uvd_rbc_stop(struct radeon_device *rdev); | 444 | void r600_uvd_stop(struct radeon_device *rdev); |
| 443 | int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); | 445 | int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); |
| 444 | void r600_uvd_fence_emit(struct radeon_device *rdev, | 446 | void r600_uvd_fence_emit(struct radeon_device *rdev, |
| 445 | struct radeon_fence *fence); | 447 | struct radeon_fence *fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index e3f3e8841789..4ccd61f60eb6 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -2782,7 +2782,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev, | |||
| 2782 | ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; | 2782 | ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; |
| 2783 | dividers->enable_dithen = (args.v3.ucCntlFlag & | 2783 | dividers->enable_dithen = (args.v3.ucCntlFlag & |
| 2784 | ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; | 2784 | ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; |
| 2785 | dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv); | 2785 | dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv); |
| 2786 | dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac); | 2786 | dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac); |
| 2787 | dividers->ref_div = args.v3.ucRefDiv; | 2787 | dividers->ref_div = args.v3.ucRefDiv; |
| 2788 | dividers->vco_mode = (args.v3.ucCntlFlag & | 2788 | dividers->vco_mode = (args.v3.ucCntlFlag & |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 78edadc9e86b..68ce36056019 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
| 147 | enum radeon_combios_table_offset table) | 147 | enum radeon_combios_table_offset table) |
| 148 | { | 148 | { |
| 149 | struct radeon_device *rdev = dev->dev_private; | 149 | struct radeon_device *rdev = dev->dev_private; |
| 150 | int rev; | 150 | int rev, size; |
| 151 | uint16_t offset = 0, check_offset; | 151 | uint16_t offset = 0, check_offset; |
| 152 | 152 | ||
| 153 | if (!rdev->bios) | 153 | if (!rdev->bios) |
| @@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
| 156 | switch (table) { | 156 | switch (table) { |
| 157 | /* absolute offset tables */ | 157 | /* absolute offset tables */ |
| 158 | case COMBIOS_ASIC_INIT_1_TABLE: | 158 | case COMBIOS_ASIC_INIT_1_TABLE: |
| 159 | check_offset = RBIOS16(rdev->bios_header_start + 0xc); | 159 | check_offset = 0xc; |
| 160 | if (check_offset) | ||
| 161 | offset = check_offset; | ||
| 162 | break; | 160 | break; |
| 163 | case COMBIOS_BIOS_SUPPORT_TABLE: | 161 | case COMBIOS_BIOS_SUPPORT_TABLE: |
| 164 | check_offset = RBIOS16(rdev->bios_header_start + 0x14); | 162 | check_offset = 0x14; |
| 165 | if (check_offset) | ||
| 166 | offset = check_offset; | ||
| 167 | break; | 163 | break; |
| 168 | case COMBIOS_DAC_PROGRAMMING_TABLE: | 164 | case COMBIOS_DAC_PROGRAMMING_TABLE: |
| 169 | check_offset = RBIOS16(rdev->bios_header_start + 0x2a); | 165 | check_offset = 0x2a; |
| 170 | if (check_offset) | ||
| 171 | offset = check_offset; | ||
| 172 | break; | 166 | break; |
| 173 | case COMBIOS_MAX_COLOR_DEPTH_TABLE: | 167 | case COMBIOS_MAX_COLOR_DEPTH_TABLE: |
| 174 | check_offset = RBIOS16(rdev->bios_header_start + 0x2c); | 168 | check_offset = 0x2c; |
| 175 | if (check_offset) | ||
| 176 | offset = check_offset; | ||
| 177 | break; | 169 | break; |
| 178 | case COMBIOS_CRTC_INFO_TABLE: | 170 | case COMBIOS_CRTC_INFO_TABLE: |
| 179 | check_offset = RBIOS16(rdev->bios_header_start + 0x2e); | 171 | check_offset = 0x2e; |
| 180 | if (check_offset) | ||
| 181 | offset = check_offset; | ||
| 182 | break; | 172 | break; |
| 183 | case COMBIOS_PLL_INFO_TABLE: | 173 | case COMBIOS_PLL_INFO_TABLE: |
| 184 | check_offset = RBIOS16(rdev->bios_header_start + 0x30); | 174 | check_offset = 0x30; |
| 185 | if (check_offset) | ||
| 186 | offset = check_offset; | ||
| 187 | break; | 175 | break; |
| 188 | case COMBIOS_TV_INFO_TABLE: | 176 | case COMBIOS_TV_INFO_TABLE: |
| 189 | check_offset = RBIOS16(rdev->bios_header_start + 0x32); | 177 | check_offset = 0x32; |
| 190 | if (check_offset) | ||
| 191 | offset = check_offset; | ||
| 192 | break; | 178 | break; |
| 193 | case COMBIOS_DFP_INFO_TABLE: | 179 | case COMBIOS_DFP_INFO_TABLE: |
| 194 | check_offset = RBIOS16(rdev->bios_header_start + 0x34); | 180 | check_offset = 0x34; |
| 195 | if (check_offset) | ||
| 196 | offset = check_offset; | ||
| 197 | break; | 181 | break; |
| 198 | case COMBIOS_HW_CONFIG_INFO_TABLE: | 182 | case COMBIOS_HW_CONFIG_INFO_TABLE: |
| 199 | check_offset = RBIOS16(rdev->bios_header_start + 0x36); | 183 | check_offset = 0x36; |
| 200 | if (check_offset) | ||
| 201 | offset = check_offset; | ||
| 202 | break; | 184 | break; |
| 203 | case COMBIOS_MULTIMEDIA_INFO_TABLE: | 185 | case COMBIOS_MULTIMEDIA_INFO_TABLE: |
| 204 | check_offset = RBIOS16(rdev->bios_header_start + 0x38); | 186 | check_offset = 0x38; |
| 205 | if (check_offset) | ||
| 206 | offset = check_offset; | ||
| 207 | break; | 187 | break; |
| 208 | case COMBIOS_TV_STD_PATCH_TABLE: | 188 | case COMBIOS_TV_STD_PATCH_TABLE: |
| 209 | check_offset = RBIOS16(rdev->bios_header_start + 0x3e); | 189 | check_offset = 0x3e; |
| 210 | if (check_offset) | ||
| 211 | offset = check_offset; | ||
| 212 | break; | 190 | break; |
| 213 | case COMBIOS_LCD_INFO_TABLE: | 191 | case COMBIOS_LCD_INFO_TABLE: |
| 214 | check_offset = RBIOS16(rdev->bios_header_start + 0x40); | 192 | check_offset = 0x40; |
| 215 | if (check_offset) | ||
| 216 | offset = check_offset; | ||
| 217 | break; | 193 | break; |
| 218 | case COMBIOS_MOBILE_INFO_TABLE: | 194 | case COMBIOS_MOBILE_INFO_TABLE: |
| 219 | check_offset = RBIOS16(rdev->bios_header_start + 0x42); | 195 | check_offset = 0x42; |
| 220 | if (check_offset) | ||
| 221 | offset = check_offset; | ||
| 222 | break; | 196 | break; |
| 223 | case COMBIOS_PLL_INIT_TABLE: | 197 | case COMBIOS_PLL_INIT_TABLE: |
| 224 | check_offset = RBIOS16(rdev->bios_header_start + 0x46); | 198 | check_offset = 0x46; |
| 225 | if (check_offset) | ||
| 226 | offset = check_offset; | ||
| 227 | break; | 199 | break; |
| 228 | case COMBIOS_MEM_CONFIG_TABLE: | 200 | case COMBIOS_MEM_CONFIG_TABLE: |
| 229 | check_offset = RBIOS16(rdev->bios_header_start + 0x48); | 201 | check_offset = 0x48; |
| 230 | if (check_offset) | ||
| 231 | offset = check_offset; | ||
| 232 | break; | 202 | break; |
| 233 | case COMBIOS_SAVE_MASK_TABLE: | 203 | case COMBIOS_SAVE_MASK_TABLE: |
| 234 | check_offset = RBIOS16(rdev->bios_header_start + 0x4a); | 204 | check_offset = 0x4a; |
| 235 | if (check_offset) | ||
| 236 | offset = check_offset; | ||
| 237 | break; | 205 | break; |
| 238 | case COMBIOS_HARDCODED_EDID_TABLE: | 206 | case COMBIOS_HARDCODED_EDID_TABLE: |
| 239 | check_offset = RBIOS16(rdev->bios_header_start + 0x4c); | 207 | check_offset = 0x4c; |
| 240 | if (check_offset) | ||
| 241 | offset = check_offset; | ||
| 242 | break; | 208 | break; |
| 243 | case COMBIOS_ASIC_INIT_2_TABLE: | 209 | case COMBIOS_ASIC_INIT_2_TABLE: |
| 244 | check_offset = RBIOS16(rdev->bios_header_start + 0x4e); | 210 | check_offset = 0x4e; |
| 245 | if (check_offset) | ||
| 246 | offset = check_offset; | ||
| 247 | break; | 211 | break; |
| 248 | case COMBIOS_CONNECTOR_INFO_TABLE: | 212 | case COMBIOS_CONNECTOR_INFO_TABLE: |
| 249 | check_offset = RBIOS16(rdev->bios_header_start + 0x50); | 213 | check_offset = 0x50; |
| 250 | if (check_offset) | ||
| 251 | offset = check_offset; | ||
| 252 | break; | 214 | break; |
| 253 | case COMBIOS_DYN_CLK_1_TABLE: | 215 | case COMBIOS_DYN_CLK_1_TABLE: |
| 254 | check_offset = RBIOS16(rdev->bios_header_start + 0x52); | 216 | check_offset = 0x52; |
| 255 | if (check_offset) | ||
| 256 | offset = check_offset; | ||
| 257 | break; | 217 | break; |
| 258 | case COMBIOS_RESERVED_MEM_TABLE: | 218 | case COMBIOS_RESERVED_MEM_TABLE: |
| 259 | check_offset = RBIOS16(rdev->bios_header_start + 0x54); | 219 | check_offset = 0x54; |
| 260 | if (check_offset) | ||
| 261 | offset = check_offset; | ||
| 262 | break; | 220 | break; |
| 263 | case COMBIOS_EXT_TMDS_INFO_TABLE: | 221 | case COMBIOS_EXT_TMDS_INFO_TABLE: |
| 264 | check_offset = RBIOS16(rdev->bios_header_start + 0x58); | 222 | check_offset = 0x58; |
| 265 | if (check_offset) | ||
| 266 | offset = check_offset; | ||
| 267 | break; | 223 | break; |
| 268 | case COMBIOS_MEM_CLK_INFO_TABLE: | 224 | case COMBIOS_MEM_CLK_INFO_TABLE: |
| 269 | check_offset = RBIOS16(rdev->bios_header_start + 0x5a); | 225 | check_offset = 0x5a; |
| 270 | if (check_offset) | ||
| 271 | offset = check_offset; | ||
| 272 | break; | 226 | break; |
| 273 | case COMBIOS_EXT_DAC_INFO_TABLE: | 227 | case COMBIOS_EXT_DAC_INFO_TABLE: |
| 274 | check_offset = RBIOS16(rdev->bios_header_start + 0x5c); | 228 | check_offset = 0x5c; |
| 275 | if (check_offset) | ||
| 276 | offset = check_offset; | ||
| 277 | break; | 229 | break; |
| 278 | case COMBIOS_MISC_INFO_TABLE: | 230 | case COMBIOS_MISC_INFO_TABLE: |
| 279 | check_offset = RBIOS16(rdev->bios_header_start + 0x5e); | 231 | check_offset = 0x5e; |
| 280 | if (check_offset) | ||
| 281 | offset = check_offset; | ||
| 282 | break; | 232 | break; |
| 283 | case COMBIOS_CRT_INFO_TABLE: | 233 | case COMBIOS_CRT_INFO_TABLE: |
| 284 | check_offset = RBIOS16(rdev->bios_header_start + 0x60); | 234 | check_offset = 0x60; |
| 285 | if (check_offset) | ||
| 286 | offset = check_offset; | ||
| 287 | break; | 235 | break; |
| 288 | case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: | 236 | case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: |
| 289 | check_offset = RBIOS16(rdev->bios_header_start + 0x62); | 237 | check_offset = 0x62; |
| 290 | if (check_offset) | ||
| 291 | offset = check_offset; | ||
| 292 | break; | 238 | break; |
| 293 | case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: | 239 | case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: |
| 294 | check_offset = RBIOS16(rdev->bios_header_start + 0x64); | 240 | check_offset = 0x64; |
| 295 | if (check_offset) | ||
| 296 | offset = check_offset; | ||
| 297 | break; | 241 | break; |
| 298 | case COMBIOS_FAN_SPEED_INFO_TABLE: | 242 | case COMBIOS_FAN_SPEED_INFO_TABLE: |
| 299 | check_offset = RBIOS16(rdev->bios_header_start + 0x66); | 243 | check_offset = 0x66; |
| 300 | if (check_offset) | ||
| 301 | offset = check_offset; | ||
| 302 | break; | 244 | break; |
| 303 | case COMBIOS_OVERDRIVE_INFO_TABLE: | 245 | case COMBIOS_OVERDRIVE_INFO_TABLE: |
| 304 | check_offset = RBIOS16(rdev->bios_header_start + 0x68); | 246 | check_offset = 0x68; |
| 305 | if (check_offset) | ||
| 306 | offset = check_offset; | ||
| 307 | break; | 247 | break; |
| 308 | case COMBIOS_OEM_INFO_TABLE: | 248 | case COMBIOS_OEM_INFO_TABLE: |
| 309 | check_offset = RBIOS16(rdev->bios_header_start + 0x6a); | 249 | check_offset = 0x6a; |
| 310 | if (check_offset) | ||
| 311 | offset = check_offset; | ||
| 312 | break; | 250 | break; |
| 313 | case COMBIOS_DYN_CLK_2_TABLE: | 251 | case COMBIOS_DYN_CLK_2_TABLE: |
| 314 | check_offset = RBIOS16(rdev->bios_header_start + 0x6c); | 252 | check_offset = 0x6c; |
| 315 | if (check_offset) | ||
| 316 | offset = check_offset; | ||
| 317 | break; | 253 | break; |
| 318 | case COMBIOS_POWER_CONNECTOR_INFO_TABLE: | 254 | case COMBIOS_POWER_CONNECTOR_INFO_TABLE: |
| 319 | check_offset = RBIOS16(rdev->bios_header_start + 0x6e); | 255 | check_offset = 0x6e; |
| 320 | if (check_offset) | ||
| 321 | offset = check_offset; | ||
| 322 | break; | 256 | break; |
| 323 | case COMBIOS_I2C_INFO_TABLE: | 257 | case COMBIOS_I2C_INFO_TABLE: |
| 324 | check_offset = RBIOS16(rdev->bios_header_start + 0x70); | 258 | check_offset = 0x70; |
| 325 | if (check_offset) | ||
| 326 | offset = check_offset; | ||
| 327 | break; | 259 | break; |
| 328 | /* relative offset tables */ | 260 | /* relative offset tables */ |
| 329 | case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ | 261 | case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ |
| @@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
| 439 | } | 371 | } |
| 440 | break; | 372 | break; |
| 441 | default: | 373 | default: |
| 374 | check_offset = 0; | ||
| 442 | break; | 375 | break; |
| 443 | } | 376 | } |
| 444 | 377 | ||
| 445 | return offset; | 378 | size = RBIOS8(rdev->bios_header_start + 0x6); |
| 379 | /* check absolute offset tables */ | ||
| 380 | if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size) | ||
| 381 | offset = RBIOS16(rdev->bios_header_start + check_offset); | ||
| 446 | 382 | ||
| 383 | return offset; | ||
| 447 | } | 384 | } |
| 448 | 385 | ||
| 449 | bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) | 386 | bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) |
| @@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct | |||
| 965 | dac = RBIOS8(dac_info + 0x3) & 0xf; | 902 | dac = RBIOS8(dac_info + 0x3) & 0xf; |
| 966 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); | 903 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); |
| 967 | } | 904 | } |
| 968 | /* if the values are all zeros, use the table */ | 905 | /* if the values are zeros, use the table */ |
| 969 | if (p_dac->ps2_pdac_adj) | 906 | if ((dac == 0) || (bg == 0)) |
| 907 | found = 0; | ||
| 908 | else | ||
| 970 | found = 1; | 909 | found = 1; |
| 971 | } | 910 | } |
| 972 | 911 | ||
| 973 | /* quirks */ | 912 | /* quirks */ |
| 913 | /* Radeon 7000 (RV100) */ | ||
| 914 | if (((dev->pdev->device == 0x5159) && | ||
| 915 | (dev->pdev->subsystem_vendor == 0x174B) && | ||
| 916 | (dev->pdev->subsystem_device == 0x7c28)) || | ||
| 974 | /* Radeon 9100 (R200) */ | 917 | /* Radeon 9100 (R200) */ |
| 975 | if ((dev->pdev->device == 0x514D) && | 918 | ((dev->pdev->device == 0x514D) && |
| 976 | (dev->pdev->subsystem_vendor == 0x174B) && | 919 | (dev->pdev->subsystem_vendor == 0x174B) && |
| 977 | (dev->pdev->subsystem_device == 0x7149)) { | 920 | (dev->pdev->subsystem_device == 0x7149))) { |
| 978 | /* vbios value is bad, use the default */ | 921 | /* vbios value is bad, use the default */ |
| 979 | found = 0; | 922 | found = 0; |
| 980 | } | 923 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 82335e38ec4f..63398ae1dbf5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -1163,6 +1163,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 1163 | mutex_init(&rdev->gem.mutex); | 1163 | mutex_init(&rdev->gem.mutex); |
| 1164 | mutex_init(&rdev->pm.mutex); | 1164 | mutex_init(&rdev->pm.mutex); |
| 1165 | mutex_init(&rdev->gpu_clock_mutex); | 1165 | mutex_init(&rdev->gpu_clock_mutex); |
| 1166 | mutex_init(&rdev->srbm_mutex); | ||
| 1166 | init_rwsem(&rdev->pm.mclk_lock); | 1167 | init_rwsem(&rdev->pm.mclk_lock); |
| 1167 | init_rwsem(&rdev->exclusive_lock); | 1168 | init_rwsem(&rdev->exclusive_lock); |
| 1168 | init_waitqueue_head(&rdev->irq.vblank_queue); | 1169 | init_waitqueue_head(&rdev->irq.vblank_queue); |
| @@ -1519,6 +1520,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) | |||
| 1519 | radeon_save_bios_scratch_regs(rdev); | 1520 | radeon_save_bios_scratch_regs(rdev); |
| 1520 | /* block TTM */ | 1521 | /* block TTM */ |
| 1521 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | 1522 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); |
| 1523 | radeon_pm_suspend(rdev); | ||
| 1522 | radeon_suspend(rdev); | 1524 | radeon_suspend(rdev); |
| 1523 | 1525 | ||
| 1524 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 1526 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| @@ -1564,6 +1566,7 @@ retry: | |||
| 1564 | } | 1566 | } |
| 1565 | } | 1567 | } |
| 1566 | 1568 | ||
| 1569 | radeon_pm_resume(rdev); | ||
| 1567 | drm_helper_resume_force_mode(rdev->ddev); | 1570 | drm_helper_resume_force_mode(rdev->ddev); |
| 1568 | 1571 | ||
| 1569 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 1572 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7ddb0efe2408..ddb8f8e04eb5 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
| @@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) | |||
| 782 | 782 | ||
| 783 | } else { | 783 | } else { |
| 784 | /* put fence directly behind firmware */ | 784 | /* put fence directly behind firmware */ |
| 785 | index = ALIGN(rdev->uvd.fw_size, 8); | 785 | index = ALIGN(rdev->uvd_fw->size, 8); |
| 786 | rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; | 786 | rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; |
| 787 | rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; | 787 | rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; |
| 788 | } | 788 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index d9d31a383276..b990b1a2bd50 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
| @@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev) | |||
| 207 | if (rdev->gart.robj == NULL) { | 207 | if (rdev->gart.robj == NULL) { |
| 208 | return; | 208 | return; |
| 209 | } | 209 | } |
| 210 | radeon_gart_table_vram_unpin(rdev); | ||
| 211 | radeon_bo_unref(&rdev->gart.robj); | 210 | radeon_bo_unref(&rdev->gart.robj); |
| 212 | } | 211 | } |
| 213 | 212 | ||
| @@ -466,7 +465,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev) | |||
| 466 | size += rdev->vm_manager.max_pfn * 8; | 465 | size += rdev->vm_manager.max_pfn * 8; |
| 467 | size *= 2; | 466 | size *= 2; |
| 468 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, | 467 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, |
| 469 | RADEON_VM_PTB_ALIGN(size), | 468 | RADEON_GPU_PAGE_ALIGN(size), |
| 470 | RADEON_VM_PTB_ALIGN_SIZE, | 469 | RADEON_VM_PTB_ALIGN_SIZE, |
| 471 | RADEON_GEM_DOMAIN_VRAM); | 470 | RADEON_GEM_DOMAIN_VRAM); |
| 472 | if (r) { | 471 | if (r) { |
| @@ -621,7 +620,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) | |||
| 621 | } | 620 | } |
| 622 | 621 | ||
| 623 | retry: | 622 | retry: |
| 624 | pd_size = RADEON_VM_PTB_ALIGN(radeon_vm_directory_size(rdev)); | 623 | pd_size = radeon_vm_directory_size(rdev); |
| 625 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, | 624 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
| 626 | &vm->page_directory, pd_size, | 625 | &vm->page_directory, pd_size, |
| 627 | RADEON_VM_PTB_ALIGN_SIZE, false); | 626 | RADEON_VM_PTB_ALIGN_SIZE, false); |
| @@ -953,8 +952,8 @@ static int radeon_vm_update_pdes(struct radeon_device *rdev, | |||
| 953 | retry: | 952 | retry: |
| 954 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, | 953 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
| 955 | &vm->page_tables[pt_idx], | 954 | &vm->page_tables[pt_idx], |
| 956 | RADEON_VM_PTB_ALIGN(RADEON_VM_PTE_COUNT * 8), | 955 | RADEON_VM_PTE_COUNT * 8, |
| 957 | RADEON_VM_PTB_ALIGN_SIZE, false); | 956 | RADEON_GPU_PAGE_SIZE, false); |
| 958 | 957 | ||
| 959 | if (r == -ENOMEM) { | 958 | if (r == -ENOMEM) { |
| 960 | r = radeon_vm_evict(rdev, vm); | 959 | r = radeon_vm_evict(rdev, vm); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f374c467aaca..c557850cd345 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -1176,7 +1176,14 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
| 1176 | case CHIP_VERDE: | 1176 | case CHIP_VERDE: |
| 1177 | case CHIP_OLAND: | 1177 | case CHIP_OLAND: |
| 1178 | case CHIP_HAINAN: | 1178 | case CHIP_HAINAN: |
| 1179 | if (radeon_dpm == 1) | 1179 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ |
| 1180 | if (!rdev->rlc_fw) | ||
| 1181 | rdev->pm.pm_method = PM_METHOD_PROFILE; | ||
| 1182 | else if ((rdev->family >= CHIP_RV770) && | ||
| 1183 | (!(rdev->flags & RADEON_IS_IGP)) && | ||
| 1184 | (!rdev->smc_fw)) | ||
| 1185 | rdev->pm.pm_method = PM_METHOD_PROFILE; | ||
| 1186 | else if (radeon_dpm == 1) | ||
| 1180 | rdev->pm.pm_method = PM_METHOD_DPM; | 1187 | rdev->pm.pm_method = PM_METHOD_DPM; |
| 1181 | else | 1188 | else |
| 1182 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 1189 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 414fd145d20e..b79f4f5cdd62 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
| @@ -56,7 +56,6 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work); | |||
| 56 | 56 | ||
| 57 | int radeon_uvd_init(struct radeon_device *rdev) | 57 | int radeon_uvd_init(struct radeon_device *rdev) |
| 58 | { | 58 | { |
| 59 | const struct firmware *fw; | ||
| 60 | unsigned long bo_size; | 59 | unsigned long bo_size; |
| 61 | const char *fw_name; | 60 | const char *fw_name; |
| 62 | int i, r; | 61 | int i, r; |
| @@ -105,14 +104,14 @@ int radeon_uvd_init(struct radeon_device *rdev) | |||
| 105 | return -EINVAL; | 104 | return -EINVAL; |
| 106 | } | 105 | } |
| 107 | 106 | ||
| 108 | r = request_firmware(&fw, fw_name, rdev->dev); | 107 | r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); |
| 109 | if (r) { | 108 | if (r) { |
| 110 | dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", | 109 | dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", |
| 111 | fw_name); | 110 | fw_name); |
| 112 | return r; | 111 | return r; |
| 113 | } | 112 | } |
| 114 | 113 | ||
| 115 | bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) + | 114 | bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + |
| 116 | RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; | 115 | RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; |
| 117 | r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, | 116 | r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, |
| 118 | RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); | 117 | RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); |
| @@ -145,12 +144,6 @@ int radeon_uvd_init(struct radeon_device *rdev) | |||
| 145 | 144 | ||
| 146 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | 145 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); |
| 147 | 146 | ||
| 148 | rdev->uvd.fw_size = fw->size; | ||
| 149 | memset(rdev->uvd.cpu_addr, 0, bo_size); | ||
| 150 | memcpy(rdev->uvd.cpu_addr, fw->data, fw->size); | ||
| 151 | |||
| 152 | release_firmware(fw); | ||
| 153 | |||
| 154 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | 147 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
| 155 | atomic_set(&rdev->uvd.handles[i], 0); | 148 | atomic_set(&rdev->uvd.handles[i], 0); |
| 156 | rdev->uvd.filp[i] = NULL; | 149 | rdev->uvd.filp[i] = NULL; |
| @@ -174,33 +167,60 @@ void radeon_uvd_fini(struct radeon_device *rdev) | |||
| 174 | } | 167 | } |
| 175 | 168 | ||
| 176 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | 169 | radeon_bo_unref(&rdev->uvd.vcpu_bo); |
| 170 | |||
| 171 | release_firmware(rdev->uvd_fw); | ||
| 177 | } | 172 | } |
| 178 | 173 | ||
| 179 | int radeon_uvd_suspend(struct radeon_device *rdev) | 174 | int radeon_uvd_suspend(struct radeon_device *rdev) |
| 180 | { | 175 | { |
| 181 | unsigned size; | 176 | unsigned size; |
| 177 | void *ptr; | ||
| 178 | int i; | ||
| 182 | 179 | ||
| 183 | if (rdev->uvd.vcpu_bo == NULL) | 180 | if (rdev->uvd.vcpu_bo == NULL) |
| 184 | return 0; | 181 | return 0; |
| 185 | 182 | ||
| 183 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) | ||
| 184 | if (atomic_read(&rdev->uvd.handles[i])) | ||
| 185 | break; | ||
| 186 | |||
| 187 | if (i == RADEON_MAX_UVD_HANDLES) | ||
| 188 | return 0; | ||
| 189 | |||
| 186 | size = radeon_bo_size(rdev->uvd.vcpu_bo); | 190 | size = radeon_bo_size(rdev->uvd.vcpu_bo); |
| 191 | size -= rdev->uvd_fw->size; | ||
| 192 | |||
| 193 | ptr = rdev->uvd.cpu_addr; | ||
| 194 | ptr += rdev->uvd_fw->size; | ||
| 195 | |||
| 187 | rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); | 196 | rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); |
| 188 | memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size); | 197 | memcpy(rdev->uvd.saved_bo, ptr, size); |
| 189 | 198 | ||
| 190 | return 0; | 199 | return 0; |
| 191 | } | 200 | } |
| 192 | 201 | ||
| 193 | int radeon_uvd_resume(struct radeon_device *rdev) | 202 | int radeon_uvd_resume(struct radeon_device *rdev) |
| 194 | { | 203 | { |
| 204 | unsigned size; | ||
| 205 | void *ptr; | ||
| 206 | |||
| 195 | if (rdev->uvd.vcpu_bo == NULL) | 207 | if (rdev->uvd.vcpu_bo == NULL) |
| 196 | return -EINVAL; | 208 | return -EINVAL; |
| 197 | 209 | ||
| 210 | memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); | ||
| 211 | |||
| 212 | size = radeon_bo_size(rdev->uvd.vcpu_bo); | ||
| 213 | size -= rdev->uvd_fw->size; | ||
| 214 | |||
| 215 | ptr = rdev->uvd.cpu_addr; | ||
| 216 | ptr += rdev->uvd_fw->size; | ||
| 217 | |||
| 198 | if (rdev->uvd.saved_bo != NULL) { | 218 | if (rdev->uvd.saved_bo != NULL) { |
| 199 | unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo); | 219 | memcpy(ptr, rdev->uvd.saved_bo, size); |
| 200 | memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size); | ||
| 201 | kfree(rdev->uvd.saved_bo); | 220 | kfree(rdev->uvd.saved_bo); |
| 202 | rdev->uvd.saved_bo = NULL; | 221 | rdev->uvd.saved_bo = NULL; |
| 203 | } | 222 | } else |
| 223 | memset(ptr, 0, size); | ||
| 204 | 224 | ||
| 205 | return 0; | 225 | return 0; |
| 206 | } | 226 | } |
| @@ -215,8 +235,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) | |||
| 215 | { | 235 | { |
| 216 | int i, r; | 236 | int i, r; |
| 217 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | 237 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
| 218 | if (rdev->uvd.filp[i] == filp) { | 238 | uint32_t handle = atomic_read(&rdev->uvd.handles[i]); |
| 219 | uint32_t handle = atomic_read(&rdev->uvd.handles[i]); | 239 | if (handle != 0 && rdev->uvd.filp[i] == filp) { |
| 220 | struct radeon_fence *fence; | 240 | struct radeon_fence *fence; |
| 221 | 241 | ||
| 222 | r = radeon_uvd_get_destroy_msg(rdev, | 242 | r = radeon_uvd_get_destroy_msg(rdev, |
| @@ -336,9 +356,19 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
| 336 | return -EINVAL; | 356 | return -EINVAL; |
| 337 | } | 357 | } |
| 338 | 358 | ||
| 359 | if (bo->tbo.sync_obj) { | ||
| 360 | r = radeon_fence_wait(bo->tbo.sync_obj, false); | ||
| 361 | if (r) { | ||
| 362 | DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); | ||
| 363 | return r; | ||
| 364 | } | ||
| 365 | } | ||
| 366 | |||
| 339 | r = radeon_bo_kmap(bo, &ptr); | 367 | r = radeon_bo_kmap(bo, &ptr); |
| 340 | if (r) | 368 | if (r) { |
| 369 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); | ||
| 341 | return r; | 370 | return r; |
| 371 | } | ||
| 342 | 372 | ||
| 343 | msg = ptr + offset; | 373 | msg = ptr + offset; |
| 344 | 374 | ||
| @@ -364,8 +394,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
| 364 | radeon_bo_kunmap(bo); | 394 | radeon_bo_kunmap(bo); |
| 365 | return 0; | 395 | return 0; |
| 366 | } else { | 396 | } else { |
| 367 | /* it's a create msg, no special handling needed */ | ||
| 368 | radeon_bo_kunmap(bo); | 397 | radeon_bo_kunmap(bo); |
| 398 | |||
| 399 | if (msg_type != 0) { | ||
| 400 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); | ||
| 401 | return -EINVAL; | ||
| 402 | } | ||
| 403 | |||
| 404 | /* it's a create msg, no special handling needed */ | ||
| 369 | } | 405 | } |
| 370 | 406 | ||
| 371 | /* create or decode, validate the handle */ | 407 | /* create or decode, validate the handle */ |
| @@ -388,7 +424,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
| 388 | 424 | ||
| 389 | static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | 425 | static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, |
| 390 | int data0, int data1, | 426 | int data0, int data1, |
| 391 | unsigned buf_sizes[]) | 427 | unsigned buf_sizes[], bool *has_msg_cmd) |
| 392 | { | 428 | { |
| 393 | struct radeon_cs_chunk *relocs_chunk; | 429 | struct radeon_cs_chunk *relocs_chunk; |
| 394 | struct radeon_cs_reloc *reloc; | 430 | struct radeon_cs_reloc *reloc; |
| @@ -417,7 +453,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
| 417 | 453 | ||
| 418 | if (cmd < 0x4) { | 454 | if (cmd < 0x4) { |
| 419 | if ((end - start) < buf_sizes[cmd]) { | 455 | if ((end - start) < buf_sizes[cmd]) { |
| 420 | DRM_ERROR("buffer to small (%d / %d)!\n", | 456 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, |
| 421 | (unsigned)(end - start), buf_sizes[cmd]); | 457 | (unsigned)(end - start), buf_sizes[cmd]); |
| 422 | return -EINVAL; | 458 | return -EINVAL; |
| 423 | } | 459 | } |
| @@ -442,9 +478,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
| 442 | } | 478 | } |
| 443 | 479 | ||
| 444 | if (cmd == 0) { | 480 | if (cmd == 0) { |
| 481 | if (*has_msg_cmd) { | ||
| 482 | DRM_ERROR("More than one message in a UVD-IB!\n"); | ||
| 483 | return -EINVAL; | ||
| 484 | } | ||
| 485 | *has_msg_cmd = true; | ||
| 445 | r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); | 486 | r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); |
| 446 | if (r) | 487 | if (r) |
| 447 | return r; | 488 | return r; |
| 489 | } else if (!*has_msg_cmd) { | ||
| 490 | DRM_ERROR("Message needed before other commands are send!\n"); | ||
| 491 | return -EINVAL; | ||
| 448 | } | 492 | } |
| 449 | 493 | ||
| 450 | return 0; | 494 | return 0; |
| @@ -453,7 +497,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
| 453 | static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, | 497 | static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, |
| 454 | struct radeon_cs_packet *pkt, | 498 | struct radeon_cs_packet *pkt, |
| 455 | int *data0, int *data1, | 499 | int *data0, int *data1, |
| 456 | unsigned buf_sizes[]) | 500 | unsigned buf_sizes[], |
| 501 | bool *has_msg_cmd) | ||
| 457 | { | 502 | { |
| 458 | int i, r; | 503 | int i, r; |
| 459 | 504 | ||
| @@ -467,7 +512,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, | |||
| 467 | *data1 = p->idx; | 512 | *data1 = p->idx; |
| 468 | break; | 513 | break; |
| 469 | case UVD_GPCOM_VCPU_CMD: | 514 | case UVD_GPCOM_VCPU_CMD: |
| 470 | r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes); | 515 | r = radeon_uvd_cs_reloc(p, *data0, *data1, |
| 516 | buf_sizes, has_msg_cmd); | ||
| 471 | if (r) | 517 | if (r) |
| 472 | return r; | 518 | return r; |
| 473 | break; | 519 | break; |
| @@ -488,6 +534,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |||
| 488 | struct radeon_cs_packet pkt; | 534 | struct radeon_cs_packet pkt; |
| 489 | int r, data0 = 0, data1 = 0; | 535 | int r, data0 = 0, data1 = 0; |
| 490 | 536 | ||
| 537 | /* does the IB has a msg command */ | ||
| 538 | bool has_msg_cmd = false; | ||
| 539 | |||
| 491 | /* minimum buffer sizes */ | 540 | /* minimum buffer sizes */ |
| 492 | unsigned buf_sizes[] = { | 541 | unsigned buf_sizes[] = { |
| 493 | [0x00000000] = 2048, | 542 | [0x00000000] = 2048, |
| @@ -514,8 +563,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |||
| 514 | return r; | 563 | return r; |
| 515 | switch (pkt.type) { | 564 | switch (pkt.type) { |
| 516 | case RADEON_PACKET_TYPE0: | 565 | case RADEON_PACKET_TYPE0: |
| 517 | r = radeon_uvd_cs_reg(p, &pkt, &data0, | 566 | r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1, |
| 518 | &data1, buf_sizes); | 567 | buf_sizes, &has_msg_cmd); |
| 519 | if (r) | 568 | if (r) |
| 520 | return r; | 569 | return r; |
| 521 | break; | 570 | break; |
| @@ -527,6 +576,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |||
| 527 | return -EINVAL; | 576 | return -EINVAL; |
| 528 | } | 577 | } |
| 529 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); | 578 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
| 579 | |||
| 580 | if (!has_msg_cmd) { | ||
| 581 | DRM_ERROR("UVD-IBs need a msg command!\n"); | ||
| 582 | return -EINVAL; | ||
| 583 | } | ||
| 584 | |||
| 530 | return 0; | 585 | return 0; |
| 531 | } | 586 | } |
| 532 | 587 | ||
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index 65e33f387341..bdd888b4db2b 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c | |||
| @@ -819,7 +819,7 @@ static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev) | |||
| 819 | POWERMODE1(calculate_memory_refresh_rate(rdev, | 819 | POWERMODE1(calculate_memory_refresh_rate(rdev, |
| 820 | pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | | 820 | pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | |
| 821 | POWERMODE2(calculate_memory_refresh_rate(rdev, | 821 | POWERMODE2(calculate_memory_refresh_rate(rdev, |
| 822 | pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | | 822 | pi->hw.sclks[R600_POWER_LEVEL_HIGH])) | |
| 823 | POWERMODE3(calculate_memory_refresh_rate(rdev, | 823 | POWERMODE3(calculate_memory_refresh_rate(rdev, |
| 824 | pi->hw.sclks[R600_POWER_LEVEL_HIGH]))); | 824 | pi->hw.sclks[R600_POWER_LEVEL_HIGH]))); |
| 825 | WREG32(ARB_RFSH_RATE, arb_refresh_rate); | 825 | WREG32(ARB_RFSH_RATE, arb_refresh_rate); |
| @@ -1182,10 +1182,10 @@ static void rv6xx_program_display_gap(struct radeon_device *rdev) | |||
| 1182 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); | 1182 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); |
| 1183 | 1183 | ||
| 1184 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); | 1184 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); |
| 1185 | if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) { | 1185 | if (rdev->pm.dpm.new_active_crtcs & 1) { |
| 1186 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); | 1186 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); |
| 1187 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); | 1187 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); |
| 1188 | } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) { | 1188 | } else if (rdev->pm.dpm.new_active_crtcs & 2) { |
| 1189 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); | 1189 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); |
| 1190 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); | 1190 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); |
| 1191 | } else { | 1191 | } else { |
| @@ -1670,6 +1670,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev) | |||
| 1670 | struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; | 1670 | struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; |
| 1671 | int ret; | 1671 | int ret; |
| 1672 | 1672 | ||
| 1673 | pi->restricted_levels = 0; | ||
| 1674 | |||
| 1673 | rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); | 1675 | rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); |
| 1674 | 1676 | ||
| 1675 | rv6xx_clear_vc(rdev); | 1677 | rv6xx_clear_vc(rdev); |
| @@ -1756,6 +1758,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev) | |||
| 1756 | 1758 | ||
| 1757 | rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); | 1759 | rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); |
| 1758 | 1760 | ||
| 1761 | rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; | ||
| 1762 | |||
| 1759 | return 0; | 1763 | return 0; |
| 1760 | } | 1764 | } |
| 1761 | 1765 | ||
| @@ -1940,9 +1944,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev) | |||
| 1940 | 1944 | ||
| 1941 | int rv6xx_dpm_init(struct radeon_device *rdev) | 1945 | int rv6xx_dpm_init(struct radeon_device *rdev) |
| 1942 | { | 1946 | { |
| 1943 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | 1947 | struct radeon_atom_ss ss; |
| 1944 | uint16_t data_offset, size; | ||
| 1945 | uint8_t frev, crev; | ||
| 1946 | struct atom_clock_dividers dividers; | 1948 | struct atom_clock_dividers dividers; |
| 1947 | struct rv6xx_power_info *pi; | 1949 | struct rv6xx_power_info *pi; |
| 1948 | int ret; | 1950 | int ret; |
| @@ -1985,16 +1987,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev) | |||
| 1985 | 1987 | ||
| 1986 | pi->gfx_clock_gating = true; | 1988 | pi->gfx_clock_gating = true; |
| 1987 | 1989 | ||
| 1988 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 1990 | pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
| 1989 | &frev, &crev, &data_offset)) { | 1991 | ASIC_INTERNAL_ENGINE_SS, 0); |
| 1990 | pi->sclk_ss = true; | 1992 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
| 1991 | pi->mclk_ss = true; | 1993 | ASIC_INTERNAL_MEMORY_SS, 0); |
| 1994 | |||
| 1995 | /* Disable sclk ss, causes hangs on a lot of systems */ | ||
| 1996 | pi->sclk_ss = false; | ||
| 1997 | |||
| 1998 | if (pi->sclk_ss || pi->mclk_ss) | ||
| 1992 | pi->dynamic_ss = true; | 1999 | pi->dynamic_ss = true; |
| 1993 | } else { | 2000 | else |
| 1994 | pi->sclk_ss = false; | ||
| 1995 | pi->mclk_ss = false; | ||
| 1996 | pi->dynamic_ss = false; | 2001 | pi->dynamic_ss = false; |
| 1997 | } | ||
| 1998 | 2002 | ||
| 1999 | pi->dynamic_pcie_gen2 = true; | 2003 | pi->dynamic_pcie_gen2 = true; |
| 2000 | 2004 | ||
| @@ -2085,3 +2089,34 @@ u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low) | |||
| 2085 | else | 2089 | else |
| 2086 | return requested_state->high.mclk; | 2090 | return requested_state->high.mclk; |
| 2087 | } | 2091 | } |
| 2092 | |||
| 2093 | int rv6xx_dpm_force_performance_level(struct radeon_device *rdev, | ||
| 2094 | enum radeon_dpm_forced_level level) | ||
| 2095 | { | ||
| 2096 | struct rv6xx_power_info *pi = rv6xx_get_pi(rdev); | ||
| 2097 | |||
| 2098 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | ||
| 2099 | pi->restricted_levels = 3; | ||
| 2100 | } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { | ||
| 2101 | pi->restricted_levels = 2; | ||
| 2102 | } else { | ||
| 2103 | pi->restricted_levels = 0; | ||
| 2104 | } | ||
| 2105 | |||
| 2106 | rv6xx_clear_vc(rdev); | ||
| 2107 | r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true); | ||
| 2108 | r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF); | ||
| 2109 | r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW); | ||
| 2110 | r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false); | ||
| 2111 | r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false); | ||
| 2112 | rv6xx_enable_medium(rdev); | ||
| 2113 | rv6xx_enable_high(rdev); | ||
| 2114 | if (pi->restricted_levels == 3) | ||
| 2115 | r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false); | ||
| 2116 | rv6xx_program_vc(rdev); | ||
| 2117 | rv6xx_program_at(rdev); | ||
| 2118 | |||
| 2119 | rdev->pm.dpm.forced_level = level; | ||
| 2120 | |||
| 2121 | return 0; | ||
| 2122 | } | ||
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 30ea14e8854c..f5e92cfcc140 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) | |||
| 744 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); | 744 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); |
| 745 | radeon_program_register_sequence(rdev, | 745 | radeon_program_register_sequence(rdev, |
| 746 | rv730_golden_registers, | 746 | rv730_golden_registers, |
| 747 | (const u32)ARRAY_SIZE(rv770_golden_registers)); | 747 | (const u32)ARRAY_SIZE(rv730_golden_registers)); |
| 748 | radeon_program_register_sequence(rdev, | 748 | radeon_program_register_sequence(rdev, |
| 749 | rv730_mgcg_init, | 749 | rv730_mgcg_init, |
| 750 | (const u32)ARRAY_SIZE(rv770_mgcg_init)); | 750 | (const u32)ARRAY_SIZE(rv730_mgcg_init)); |
| 751 | break; | 751 | break; |
| 752 | case CHIP_RV710: | 752 | case CHIP_RV710: |
| 753 | radeon_program_register_sequence(rdev, | 753 | radeon_program_register_sequence(rdev, |
| @@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) | |||
| 758 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); | 758 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); |
| 759 | radeon_program_register_sequence(rdev, | 759 | radeon_program_register_sequence(rdev, |
| 760 | rv710_golden_registers, | 760 | rv710_golden_registers, |
| 761 | (const u32)ARRAY_SIZE(rv770_golden_registers)); | 761 | (const u32)ARRAY_SIZE(rv710_golden_registers)); |
| 762 | radeon_program_register_sequence(rdev, | 762 | radeon_program_register_sequence(rdev, |
| 763 | rv710_mgcg_init, | 763 | rv710_mgcg_init, |
| 764 | (const u32)ARRAY_SIZE(rv770_mgcg_init)); | 764 | (const u32)ARRAY_SIZE(rv710_mgcg_init)); |
| 765 | break; | 765 | break; |
| 766 | case CHIP_RV740: | 766 | case CHIP_RV740: |
| 767 | radeon_program_register_sequence(rdev, | 767 | radeon_program_register_sequence(rdev, |
| 768 | rv740_golden_registers, | 768 | rv740_golden_registers, |
| 769 | (const u32)ARRAY_SIZE(rv770_golden_registers)); | 769 | (const u32)ARRAY_SIZE(rv740_golden_registers)); |
| 770 | radeon_program_register_sequence(rdev, | 770 | radeon_program_register_sequence(rdev, |
| 771 | rv740_mgcg_init, | 771 | rv740_mgcg_init, |
| 772 | (const u32)ARRAY_SIZE(rv770_mgcg_init)); | 772 | (const u32)ARRAY_SIZE(rv740_mgcg_init)); |
| 773 | break; | 773 | break; |
| 774 | default: | 774 | default: |
| 775 | break; | 775 | break; |
| @@ -813,7 +813,7 @@ int rv770_uvd_resume(struct radeon_device *rdev) | |||
| 813 | 813 | ||
| 814 | /* programm the VCPU memory controller bits 0-27 */ | 814 | /* programm the VCPU memory controller bits 0-27 */ |
| 815 | addr = rdev->uvd.gpu_addr >> 3; | 815 | addr = rdev->uvd.gpu_addr >> 3; |
| 816 | size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; | 816 | size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; |
| 817 | WREG32(UVD_VCPU_CACHE_OFFSET0, addr); | 817 | WREG32(UVD_VCPU_CACHE_OFFSET0, addr); |
| 818 | WREG32(UVD_VCPU_CACHE_SIZE0, size); | 818 | WREG32(UVD_VCPU_CACHE_SIZE0, size); |
| 819 | 819 | ||
| @@ -1829,6 +1829,8 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 1829 | /* enable pcie gen2 link */ | 1829 | /* enable pcie gen2 link */ |
| 1830 | rv770_pcie_gen2_enable(rdev); | 1830 | rv770_pcie_gen2_enable(rdev); |
| 1831 | 1831 | ||
| 1832 | rv770_mc_program(rdev); | ||
| 1833 | |||
| 1832 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 1834 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
| 1833 | r = r600_init_microcode(rdev); | 1835 | r = r600_init_microcode(rdev); |
| 1834 | if (r) { | 1836 | if (r) { |
| @@ -1841,7 +1843,6 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 1841 | if (r) | 1843 | if (r) |
| 1842 | return r; | 1844 | return r; |
| 1843 | 1845 | ||
| 1844 | rv770_mc_program(rdev); | ||
| 1845 | if (rdev->flags & RADEON_IS_AGP) { | 1846 | if (rdev->flags & RADEON_IS_AGP) { |
| 1846 | rv770_agp_enable(rdev); | 1847 | rv770_agp_enable(rdev); |
| 1847 | } else { | 1848 | } else { |
| @@ -1983,6 +1984,7 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 1983 | int rv770_suspend(struct radeon_device *rdev) | 1984 | int rv770_suspend(struct radeon_device *rdev) |
| 1984 | { | 1985 | { |
| 1985 | r600_audio_fini(rdev); | 1986 | r600_audio_fini(rdev); |
| 1987 | r600_uvd_stop(rdev); | ||
| 1986 | radeon_uvd_suspend(rdev); | 1988 | radeon_uvd_suspend(rdev); |
| 1987 | r700_cp_stop(rdev); | 1989 | r700_cp_stop(rdev); |
| 1988 | r600_dma_stop(rdev); | 1990 | r600_dma_stop(rdev); |
| @@ -2098,6 +2100,7 @@ void rv770_fini(struct radeon_device *rdev) | |||
| 2098 | radeon_ib_pool_fini(rdev); | 2100 | radeon_ib_pool_fini(rdev); |
| 2099 | radeon_irq_kms_fini(rdev); | 2101 | radeon_irq_kms_fini(rdev); |
| 2100 | rv770_pcie_gart_fini(rdev); | 2102 | rv770_pcie_gart_fini(rdev); |
| 2103 | r600_uvd_stop(rdev); | ||
| 2101 | radeon_uvd_fini(rdev); | 2104 | radeon_uvd_fini(rdev); |
| 2102 | r600_vram_scratch_fini(rdev); | 2105 | r600_vram_scratch_fini(rdev); |
| 2103 | radeon_gem_fini(rdev); | 2106 | radeon_gem_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 2d347925f77d..094c67a29d0d 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
| @@ -2319,12 +2319,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev) | |||
| 2319 | return 0; | 2319 | return 0; |
| 2320 | } | 2320 | } |
| 2321 | 2321 | ||
| 2322 | void rv770_get_engine_memory_ss(struct radeon_device *rdev) | ||
| 2323 | { | ||
| 2324 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | ||
| 2325 | struct radeon_atom_ss ss; | ||
| 2326 | |||
| 2327 | pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
| 2328 | ASIC_INTERNAL_ENGINE_SS, 0); | ||
| 2329 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
| 2330 | ASIC_INTERNAL_MEMORY_SS, 0); | ||
| 2331 | |||
| 2332 | if (pi->sclk_ss || pi->mclk_ss) | ||
| 2333 | pi->dynamic_ss = true; | ||
| 2334 | else | ||
| 2335 | pi->dynamic_ss = false; | ||
| 2336 | } | ||
| 2337 | |||
| 2322 | int rv770_dpm_init(struct radeon_device *rdev) | 2338 | int rv770_dpm_init(struct radeon_device *rdev) |
| 2323 | { | 2339 | { |
| 2324 | struct rv7xx_power_info *pi; | 2340 | struct rv7xx_power_info *pi; |
| 2325 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
| 2326 | uint16_t data_offset, size; | ||
| 2327 | uint8_t frev, crev; | ||
| 2328 | struct atom_clock_dividers dividers; | 2341 | struct atom_clock_dividers dividers; |
| 2329 | int ret; | 2342 | int ret; |
| 2330 | 2343 | ||
| @@ -2369,16 +2382,7 @@ int rv770_dpm_init(struct radeon_device *rdev) | |||
| 2369 | pi->mvdd_control = | 2382 | pi->mvdd_control = |
| 2370 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); | 2383 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); |
| 2371 | 2384 | ||
| 2372 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 2385 | rv770_get_engine_memory_ss(rdev); |
| 2373 | &frev, &crev, &data_offset)) { | ||
| 2374 | pi->sclk_ss = true; | ||
| 2375 | pi->mclk_ss = true; | ||
| 2376 | pi->dynamic_ss = true; | ||
| 2377 | } else { | ||
| 2378 | pi->sclk_ss = false; | ||
| 2379 | pi->mclk_ss = false; | ||
| 2380 | pi->dynamic_ss = false; | ||
| 2381 | } | ||
| 2382 | 2386 | ||
| 2383 | pi->asi = RV770_ASI_DFLT; | 2387 | pi->asi = RV770_ASI_DFLT; |
| 2384 | pi->pasi = RV770_HASI_DFLT; | 2388 | pi->pasi = RV770_HASI_DFLT; |
| @@ -2393,8 +2397,7 @@ int rv770_dpm_init(struct radeon_device *rdev) | |||
| 2393 | 2397 | ||
| 2394 | pi->dynamic_pcie_gen2 = true; | 2398 | pi->dynamic_pcie_gen2 = true; |
| 2395 | 2399 | ||
| 2396 | if (pi->gfx_clock_gating && | 2400 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
| 2397 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
| 2398 | pi->thermal_protection = true; | 2401 | pi->thermal_protection = true; |
| 2399 | else | 2402 | else |
| 2400 | pi->thermal_protection = false; | 2403 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h index 96b1b2a62a8a..9244effc6b59 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.h +++ b/drivers/gpu/drm/radeon/rv770_dpm.h | |||
| @@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev, | |||
| 275 | void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, | 275 | void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, |
| 276 | struct radeon_ps *new_ps, | 276 | struct radeon_ps *new_ps, |
| 277 | struct radeon_ps *old_ps); | 277 | struct radeon_ps *old_ps); |
| 278 | void rv770_get_engine_memory_ss(struct radeon_device *rdev); | ||
| 278 | 279 | ||
| 279 | /* smc */ | 280 | /* smc */ |
| 280 | int rv770_read_smc_soft_register(struct radeon_device *rdev, | 281 | int rv770_read_smc_soft_register(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index d325280e2f9f..daa8d2df8ec5 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -1663,9 +1663,13 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1663 | 1663 | ||
| 1664 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); | 1664 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); |
| 1665 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); | 1665 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
| 1666 | if (err) | 1666 | if (err) { |
| 1667 | goto out; | 1667 | printk(KERN_ERR |
| 1668 | if (rdev->smc_fw->size != smc_req_size) { | 1668 | "smc: error loading firmware \"%s\"\n", |
| 1669 | fw_name); | ||
| 1670 | release_firmware(rdev->smc_fw); | ||
| 1671 | rdev->smc_fw = NULL; | ||
| 1672 | } else if (rdev->smc_fw->size != smc_req_size) { | ||
| 1669 | printk(KERN_ERR | 1673 | printk(KERN_ERR |
| 1670 | "si_smc: Bogus length %zu in firmware \"%s\"\n", | 1674 | "si_smc: Bogus length %zu in firmware \"%s\"\n", |
| 1671 | rdev->smc_fw->size, fw_name); | 1675 | rdev->smc_fw->size, fw_name); |
| @@ -5215,14 +5219,12 @@ static void si_enable_mc_ls(struct radeon_device *rdev, | |||
| 5215 | 5219 | ||
| 5216 | static void si_init_cg(struct radeon_device *rdev) | 5220 | static void si_init_cg(struct radeon_device *rdev) |
| 5217 | { | 5221 | { |
| 5218 | bool has_uvd = true; | ||
| 5219 | |||
| 5220 | si_enable_mgcg(rdev, true); | 5222 | si_enable_mgcg(rdev, true); |
| 5221 | si_enable_cgcg(rdev, true); | 5223 | si_enable_cgcg(rdev, false); |
| 5222 | /* disable MC LS on Tahiti */ | 5224 | /* disable MC LS on Tahiti */ |
| 5223 | if (rdev->family == CHIP_TAHITI) | 5225 | if (rdev->family == CHIP_TAHITI) |
| 5224 | si_enable_mc_ls(rdev, false); | 5226 | si_enable_mc_ls(rdev, false); |
| 5225 | if (has_uvd) { | 5227 | if (rdev->has_uvd) { |
| 5226 | si_enable_uvd_mgcg(rdev, true); | 5228 | si_enable_uvd_mgcg(rdev, true); |
| 5227 | si_init_uvd_internal_cg(rdev); | 5229 | si_init_uvd_internal_cg(rdev); |
| 5228 | } | 5230 | } |
| @@ -5230,9 +5232,7 @@ static void si_init_cg(struct radeon_device *rdev) | |||
| 5230 | 5232 | ||
| 5231 | static void si_fini_cg(struct radeon_device *rdev) | 5233 | static void si_fini_cg(struct radeon_device *rdev) |
| 5232 | { | 5234 | { |
| 5233 | bool has_uvd = true; | 5235 | if (rdev->has_uvd) |
| 5234 | |||
| 5235 | if (has_uvd) | ||
| 5236 | si_enable_uvd_mgcg(rdev, false); | 5236 | si_enable_uvd_mgcg(rdev, false); |
| 5237 | si_enable_cgcg(rdev, false); | 5237 | si_enable_cgcg(rdev, false); |
| 5238 | si_enable_mgcg(rdev, false); | 5238 | si_enable_mgcg(rdev, false); |
| @@ -5241,11 +5241,11 @@ static void si_fini_cg(struct radeon_device *rdev) | |||
| 5241 | static void si_init_pg(struct radeon_device *rdev) | 5241 | static void si_init_pg(struct radeon_device *rdev) |
| 5242 | { | 5242 | { |
| 5243 | bool has_pg = false; | 5243 | bool has_pg = false; |
| 5244 | 5244 | #if 0 | |
| 5245 | /* only cape verde supports PG */ | 5245 | /* only cape verde supports PG */ |
| 5246 | if (rdev->family == CHIP_VERDE) | 5246 | if (rdev->family == CHIP_VERDE) |
| 5247 | has_pg = true; | 5247 | has_pg = true; |
| 5248 | 5248 | #endif | |
| 5249 | if (has_pg) { | 5249 | if (has_pg) { |
| 5250 | si_init_ao_cu_mask(rdev); | 5250 | si_init_ao_cu_mask(rdev); |
| 5251 | si_init_dma_pg(rdev); | 5251 | si_init_dma_pg(rdev); |
| @@ -6422,6 +6422,8 @@ static int si_startup(struct radeon_device *rdev) | |||
| 6422 | /* enable aspm */ | 6422 | /* enable aspm */ |
| 6423 | si_program_aspm(rdev); | 6423 | si_program_aspm(rdev); |
| 6424 | 6424 | ||
| 6425 | si_mc_program(rdev); | ||
| 6426 | |||
| 6425 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | 6427 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || |
| 6426 | !rdev->rlc_fw || !rdev->mc_fw) { | 6428 | !rdev->rlc_fw || !rdev->mc_fw) { |
| 6427 | r = si_init_microcode(rdev); | 6429 | r = si_init_microcode(rdev); |
| @@ -6441,7 +6443,6 @@ static int si_startup(struct radeon_device *rdev) | |||
| 6441 | if (r) | 6443 | if (r) |
| 6442 | return r; | 6444 | return r; |
| 6443 | 6445 | ||
| 6444 | si_mc_program(rdev); | ||
| 6445 | r = si_pcie_gart_enable(rdev); | 6446 | r = si_pcie_gart_enable(rdev); |
| 6446 | if (r) | 6447 | if (r) |
| 6447 | return r; | 6448 | return r; |
| @@ -6625,7 +6626,7 @@ int si_suspend(struct radeon_device *rdev) | |||
| 6625 | si_cp_enable(rdev, false); | 6626 | si_cp_enable(rdev, false); |
| 6626 | cayman_dma_stop(rdev); | 6627 | cayman_dma_stop(rdev); |
| 6627 | if (rdev->has_uvd) { | 6628 | if (rdev->has_uvd) { |
| 6628 | r600_uvd_rbc_stop(rdev); | 6629 | r600_uvd_stop(rdev); |
| 6629 | radeon_uvd_suspend(rdev); | 6630 | radeon_uvd_suspend(rdev); |
| 6630 | } | 6631 | } |
| 6631 | si_irq_suspend(rdev); | 6632 | si_irq_suspend(rdev); |
| @@ -6767,8 +6768,10 @@ void si_fini(struct radeon_device *rdev) | |||
| 6767 | radeon_vm_manager_fini(rdev); | 6768 | radeon_vm_manager_fini(rdev); |
| 6768 | radeon_ib_pool_fini(rdev); | 6769 | radeon_ib_pool_fini(rdev); |
| 6769 | radeon_irq_kms_fini(rdev); | 6770 | radeon_irq_kms_fini(rdev); |
| 6770 | if (rdev->has_uvd) | 6771 | if (rdev->has_uvd) { |
| 6772 | r600_uvd_stop(rdev); | ||
| 6771 | radeon_uvd_fini(rdev); | 6773 | radeon_uvd_fini(rdev); |
| 6774 | } | ||
| 6772 | si_pcie_gart_fini(rdev); | 6775 | si_pcie_gart_fini(rdev); |
| 6773 | r600_vram_scratch_fini(rdev); | 6776 | r600_vram_scratch_fini(rdev); |
| 6774 | radeon_gem_fini(rdev); | 6777 | radeon_gem_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 73aaa2e4c312..88699e3cd868 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
| @@ -37,8 +37,6 @@ | |||
| 37 | 37 | ||
| 38 | #define SMC_RAM_END 0x20000 | 38 | #define SMC_RAM_END 0x20000 |
| 39 | 39 | ||
| 40 | #define DDR3_DRAM_ROWS 0x2000 | ||
| 41 | |||
| 42 | #define SCLK_MIN_DEEPSLEEP_FREQ 1350 | 40 | #define SCLK_MIN_DEEPSLEEP_FREQ 1350 |
| 43 | 41 | ||
| 44 | static const struct si_cac_config_reg cac_weights_tahiti[] = | 42 | static const struct si_cac_config_reg cac_weights_tahiti[] = |
| @@ -1767,8 +1765,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe | |||
| 1767 | { | 1765 | { |
| 1768 | s64 kt, kv, leakage_w, i_leakage, vddc; | 1766 | s64 kt, kv, leakage_w, i_leakage, vddc; |
| 1769 | s64 temperature, t_slope, t_intercept, av, bv, t_ref; | 1767 | s64 temperature, t_slope, t_intercept, av, bv, t_ref; |
| 1768 | s64 tmp; | ||
| 1770 | 1769 | ||
| 1771 | i_leakage = drm_int2fixp(ileakage / 100); | 1770 | i_leakage = div64_s64(drm_int2fixp(ileakage), 100); |
| 1772 | vddc = div64_s64(drm_int2fixp(v), 1000); | 1771 | vddc = div64_s64(drm_int2fixp(v), 1000); |
| 1773 | temperature = div64_s64(drm_int2fixp(t), 1000); | 1772 | temperature = div64_s64(drm_int2fixp(t), 1000); |
| 1774 | 1773 | ||
| @@ -1778,8 +1777,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe | |||
| 1778 | bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); | 1777 | bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); |
| 1779 | t_ref = drm_int2fixp(coeff->t_ref); | 1778 | t_ref = drm_int2fixp(coeff->t_ref); |
| 1780 | 1779 | ||
| 1781 | kt = drm_fixp_div(drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, temperature)), | 1780 | tmp = drm_fixp_mul(t_slope, vddc) + t_intercept; |
| 1782 | drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, t_ref))); | 1781 | kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature)); |
| 1782 | kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref))); | ||
| 1783 | kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); | 1783 | kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); |
| 1784 | 1784 | ||
| 1785 | leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); | 1785 | leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); |
| @@ -1931,6 +1931,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) | |||
| 1931 | si_pi->cac_override = cac_override_pitcairn; | 1931 | si_pi->cac_override = cac_override_pitcairn; |
| 1932 | si_pi->powertune_data = &powertune_data_pitcairn; | 1932 | si_pi->powertune_data = &powertune_data_pitcairn; |
| 1933 | si_pi->dte_data = dte_data_pitcairn; | 1933 | si_pi->dte_data = dte_data_pitcairn; |
| 1934 | break; | ||
| 1934 | } | 1935 | } |
| 1935 | } else if (rdev->family == CHIP_VERDE) { | 1936 | } else if (rdev->family == CHIP_VERDE) { |
| 1936 | si_pi->lcac_config = lcac_cape_verde; | 1937 | si_pi->lcac_config = lcac_cape_verde; |
| @@ -1941,6 +1942,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) | |||
| 1941 | case 0x683B: | 1942 | case 0x683B: |
| 1942 | case 0x683F: | 1943 | case 0x683F: |
| 1943 | case 0x6829: | 1944 | case 0x6829: |
| 1945 | case 0x6835: | ||
| 1944 | si_pi->cac_weights = cac_weights_cape_verde_pro; | 1946 | si_pi->cac_weights = cac_weights_cape_verde_pro; |
| 1945 | si_pi->dte_data = dte_data_cape_verde; | 1947 | si_pi->dte_data = dte_data_cape_verde; |
| 1946 | break; | 1948 | break; |
| @@ -2901,7 +2903,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 2901 | { | 2903 | { |
| 2902 | struct ni_ps *ps = ni_get_ps(rps); | 2904 | struct ni_ps *ps = ni_get_ps(rps); |
| 2903 | struct radeon_clock_and_voltage_limits *max_limits; | 2905 | struct radeon_clock_and_voltage_limits *max_limits; |
| 2904 | bool disable_mclk_switching; | 2906 | bool disable_mclk_switching = false; |
| 2907 | bool disable_sclk_switching = false; | ||
| 2905 | u32 mclk, sclk; | 2908 | u32 mclk, sclk; |
| 2906 | u16 vddc, vddci; | 2909 | u16 vddc, vddci; |
| 2907 | int i; | 2910 | int i; |
| @@ -2909,8 +2912,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 2909 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || | 2912 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
| 2910 | ni_dpm_vblank_too_short(rdev)) | 2913 | ni_dpm_vblank_too_short(rdev)) |
| 2911 | disable_mclk_switching = true; | 2914 | disable_mclk_switching = true; |
| 2912 | else | 2915 | |
| 2913 | disable_mclk_switching = false; | 2916 | if (rps->vclk || rps->dclk) { |
| 2917 | disable_mclk_switching = true; | ||
| 2918 | disable_sclk_switching = true; | ||
| 2919 | } | ||
| 2914 | 2920 | ||
| 2915 | if (rdev->pm.dpm.ac_power) | 2921 | if (rdev->pm.dpm.ac_power) |
| 2916 | max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; | 2922 | max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; |
| @@ -2938,27 +2944,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 2938 | 2944 | ||
| 2939 | if (disable_mclk_switching) { | 2945 | if (disable_mclk_switching) { |
| 2940 | mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; | 2946 | mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; |
| 2941 | sclk = ps->performance_levels[0].sclk; | ||
| 2942 | vddc = ps->performance_levels[0].vddc; | ||
| 2943 | vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; | 2947 | vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; |
| 2944 | } else { | 2948 | } else { |
| 2945 | sclk = ps->performance_levels[0].sclk; | ||
| 2946 | mclk = ps->performance_levels[0].mclk; | 2949 | mclk = ps->performance_levels[0].mclk; |
| 2947 | vddc = ps->performance_levels[0].vddc; | ||
| 2948 | vddci = ps->performance_levels[0].vddci; | 2950 | vddci = ps->performance_levels[0].vddci; |
| 2949 | } | 2951 | } |
| 2950 | 2952 | ||
| 2953 | if (disable_sclk_switching) { | ||
| 2954 | sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; | ||
| 2955 | vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; | ||
| 2956 | } else { | ||
| 2957 | sclk = ps->performance_levels[0].sclk; | ||
| 2958 | vddc = ps->performance_levels[0].vddc; | ||
| 2959 | } | ||
| 2960 | |||
| 2951 | /* adjusted low state */ | 2961 | /* adjusted low state */ |
| 2952 | ps->performance_levels[0].sclk = sclk; | 2962 | ps->performance_levels[0].sclk = sclk; |
| 2953 | ps->performance_levels[0].mclk = mclk; | 2963 | ps->performance_levels[0].mclk = mclk; |
| 2954 | ps->performance_levels[0].vddc = vddc; | 2964 | ps->performance_levels[0].vddc = vddc; |
| 2955 | ps->performance_levels[0].vddci = vddci; | 2965 | ps->performance_levels[0].vddci = vddci; |
| 2956 | 2966 | ||
| 2957 | for (i = 1; i < ps->performance_level_count; i++) { | 2967 | if (disable_sclk_switching) { |
| 2958 | if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) | 2968 | sclk = ps->performance_levels[0].sclk; |
| 2959 | ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; | 2969 | for (i = 1; i < ps->performance_level_count; i++) { |
| 2960 | if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) | 2970 | if (sclk < ps->performance_levels[i].sclk) |
| 2961 | ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; | 2971 | sclk = ps->performance_levels[i].sclk; |
| 2972 | } | ||
| 2973 | for (i = 0; i < ps->performance_level_count; i++) { | ||
| 2974 | ps->performance_levels[i].sclk = sclk; | ||
| 2975 | ps->performance_levels[i].vddc = vddc; | ||
| 2976 | } | ||
| 2977 | } else { | ||
| 2978 | for (i = 1; i < ps->performance_level_count; i++) { | ||
| 2979 | if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) | ||
| 2980 | ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; | ||
| 2981 | if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) | ||
| 2982 | ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; | ||
| 2983 | } | ||
| 2962 | } | 2984 | } |
| 2963 | 2985 | ||
| 2964 | if (disable_mclk_switching) { | 2986 | if (disable_mclk_switching) { |
| @@ -3237,10 +3259,10 @@ int si_dpm_force_performance_level(struct radeon_device *rdev, | |||
| 3237 | { | 3259 | { |
| 3238 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 3260 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; |
| 3239 | struct ni_ps *ps = ni_get_ps(rps); | 3261 | struct ni_ps *ps = ni_get_ps(rps); |
| 3240 | u32 levels; | 3262 | u32 levels = ps->performance_level_count; |
| 3241 | 3263 | ||
| 3242 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | 3264 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { |
| 3243 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) | 3265 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) |
| 3244 | return -EINVAL; | 3266 | return -EINVAL; |
| 3245 | 3267 | ||
| 3246 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) | 3268 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) |
| @@ -3249,14 +3271,13 @@ int si_dpm_force_performance_level(struct radeon_device *rdev, | |||
| 3249 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) | 3271 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) |
| 3250 | return -EINVAL; | 3272 | return -EINVAL; |
| 3251 | 3273 | ||
| 3252 | levels = ps->performance_level_count - 1; | 3274 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) |
| 3253 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) | ||
| 3254 | return -EINVAL; | 3275 | return -EINVAL; |
| 3255 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { | 3276 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { |
| 3256 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) | 3277 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) |
| 3257 | return -EINVAL; | 3278 | return -EINVAL; |
| 3258 | 3279 | ||
| 3259 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) | 3280 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) |
| 3260 | return -EINVAL; | 3281 | return -EINVAL; |
| 3261 | } | 3282 | } |
| 3262 | 3283 | ||
| @@ -3620,8 +3641,12 @@ static void si_enable_display_gap(struct radeon_device *rdev) | |||
| 3620 | { | 3641 | { |
| 3621 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); | 3642 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); |
| 3622 | 3643 | ||
| 3644 | tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK); | ||
| 3645 | tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) | | ||
| 3646 | DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE)); | ||
| 3647 | |||
| 3623 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); | 3648 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); |
| 3624 | tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) | | 3649 | tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) | |
| 3625 | DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); | 3650 | DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); |
| 3626 | WREG32(CG_DISPLAY_GAP_CNTL, tmp); | 3651 | WREG32(CG_DISPLAY_GAP_CNTL, tmp); |
| 3627 | } | 3652 | } |
| @@ -4036,16 +4061,15 @@ static int si_force_switch_to_arb_f0(struct radeon_device *rdev) | |||
| 4036 | static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev, | 4061 | static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev, |
| 4037 | u32 engine_clock) | 4062 | u32 engine_clock) |
| 4038 | { | 4063 | { |
| 4039 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | ||
| 4040 | u32 dram_rows; | 4064 | u32 dram_rows; |
| 4041 | u32 dram_refresh_rate; | 4065 | u32 dram_refresh_rate; |
| 4042 | u32 mc_arb_rfsh_rate; | 4066 | u32 mc_arb_rfsh_rate; |
| 4043 | u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; | 4067 | u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; |
| 4044 | 4068 | ||
| 4045 | if (pi->mem_gddr5) | 4069 | if (tmp >= 4) |
| 4046 | dram_rows = 1 << (tmp + 10); | 4070 | dram_rows = 16384; |
| 4047 | else | 4071 | else |
| 4048 | dram_rows = DDR3_DRAM_ROWS; | 4072 | dram_rows = 1 << (tmp + 10); |
| 4049 | 4073 | ||
| 4050 | dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); | 4074 | dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); |
| 4051 | mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; | 4075 | mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; |
| @@ -6013,16 +6037,11 @@ int si_dpm_set_power_state(struct radeon_device *rdev) | |||
| 6013 | return ret; | 6037 | return ret; |
| 6014 | } | 6038 | } |
| 6015 | 6039 | ||
| 6016 | #if 0 | ||
| 6017 | /* XXX */ | ||
| 6018 | ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); | 6040 | ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); |
| 6019 | if (ret) { | 6041 | if (ret) { |
| 6020 | DRM_ERROR("si_dpm_force_performance_level failed\n"); | 6042 | DRM_ERROR("si_dpm_force_performance_level failed\n"); |
| 6021 | return ret; | 6043 | return ret; |
| 6022 | } | 6044 | } |
| 6023 | #else | ||
| 6024 | rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; | ||
| 6025 | #endif | ||
| 6026 | 6045 | ||
| 6027 | return 0; | 6046 | return 0; |
| 6028 | } | 6047 | } |
| @@ -6254,9 +6273,6 @@ int si_dpm_init(struct radeon_device *rdev) | |||
| 6254 | struct evergreen_power_info *eg_pi; | 6273 | struct evergreen_power_info *eg_pi; |
| 6255 | struct ni_power_info *ni_pi; | 6274 | struct ni_power_info *ni_pi; |
| 6256 | struct si_power_info *si_pi; | 6275 | struct si_power_info *si_pi; |
| 6257 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
| 6258 | u16 data_offset, size; | ||
| 6259 | u8 frev, crev; | ||
| 6260 | struct atom_clock_dividers dividers; | 6276 | struct atom_clock_dividers dividers; |
| 6261 | int ret; | 6277 | int ret; |
| 6262 | u32 mask; | 6278 | u32 mask; |
| @@ -6347,16 +6363,7 @@ int si_dpm_init(struct radeon_device *rdev) | |||
| 6347 | si_pi->vddc_phase_shed_control = | 6363 | si_pi->vddc_phase_shed_control = |
| 6348 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); | 6364 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); |
| 6349 | 6365 | ||
| 6350 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 6366 | rv770_get_engine_memory_ss(rdev); |
| 6351 | &frev, &crev, &data_offset)) { | ||
| 6352 | pi->sclk_ss = true; | ||
| 6353 | pi->mclk_ss = true; | ||
| 6354 | pi->dynamic_ss = true; | ||
| 6355 | } else { | ||
| 6356 | pi->sclk_ss = false; | ||
| 6357 | pi->mclk_ss = false; | ||
| 6358 | pi->dynamic_ss = true; | ||
| 6359 | } | ||
| 6360 | 6367 | ||
| 6361 | pi->asi = RV770_ASI_DFLT; | 6368 | pi->asi = RV770_ASI_DFLT; |
| 6362 | pi->pasi = CYPRESS_HASI_DFLT; | 6369 | pi->pasi = CYPRESS_HASI_DFLT; |
| @@ -6367,8 +6374,7 @@ int si_dpm_init(struct radeon_device *rdev) | |||
| 6367 | eg_pi->sclk_deep_sleep = true; | 6374 | eg_pi->sclk_deep_sleep = true; |
| 6368 | si_pi->sclk_deep_sleep_above_low = false; | 6375 | si_pi->sclk_deep_sleep_above_low = false; |
| 6369 | 6376 | ||
| 6370 | if (pi->gfx_clock_gating && | 6377 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
| 6371 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
| 6372 | pi->thermal_protection = true; | 6378 | pi->thermal_protection = true; |
| 6373 | else | 6379 | else |
| 6374 | pi->thermal_protection = false; | 6380 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 3751730764a5..1a0bf07fe54b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | |||
| @@ -29,7 +29,9 @@ | |||
| 29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
| 30 | #include <drm/ttm/ttm_bo_driver.h> | 30 | #include <drm/ttm/ttm_bo_driver.h> |
| 31 | 31 | ||
| 32 | #define VMW_PPN_SIZE sizeof(unsigned long) | 32 | #define VMW_PPN_SIZE (sizeof(unsigned long)) |
| 33 | /* A future safe maximum remap size. */ | ||
| 34 | #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE) | ||
| 33 | 35 | ||
| 34 | static int vmw_gmr2_bind(struct vmw_private *dev_priv, | 36 | static int vmw_gmr2_bind(struct vmw_private *dev_priv, |
| 35 | struct page *pages[], | 37 | struct page *pages[], |
| @@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv, | |||
| 38 | { | 40 | { |
| 39 | SVGAFifoCmdDefineGMR2 define_cmd; | 41 | SVGAFifoCmdDefineGMR2 define_cmd; |
| 40 | SVGAFifoCmdRemapGMR2 remap_cmd; | 42 | SVGAFifoCmdRemapGMR2 remap_cmd; |
| 41 | uint32_t define_size = sizeof(define_cmd) + 4; | ||
| 42 | uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4; | ||
| 43 | uint32_t *cmd; | 43 | uint32_t *cmd; |
| 44 | uint32_t *cmd_orig; | 44 | uint32_t *cmd_orig; |
| 45 | uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd); | ||
| 46 | uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); | ||
| 47 | uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; | ||
| 48 | uint32_t remap_pos = 0; | ||
| 49 | uint32_t cmd_size = define_size + remap_size; | ||
| 45 | uint32_t i; | 50 | uint32_t i; |
| 46 | 51 | ||
| 47 | cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size); | 52 | cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size); |
| 48 | if (unlikely(cmd == NULL)) | 53 | if (unlikely(cmd == NULL)) |
| 49 | return -ENOMEM; | 54 | return -ENOMEM; |
| 50 | 55 | ||
| 51 | define_cmd.gmrId = gmr_id; | 56 | define_cmd.gmrId = gmr_id; |
| 52 | define_cmd.numPages = num_pages; | 57 | define_cmd.numPages = num_pages; |
| 53 | 58 | ||
| 59 | *cmd++ = SVGA_CMD_DEFINE_GMR2; | ||
| 60 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); | ||
| 61 | cmd += sizeof(define_cmd) / sizeof(*cmd); | ||
| 62 | |||
| 63 | /* | ||
| 64 | * Need to split the command if there are too many | ||
| 65 | * pages that goes into the gmr. | ||
| 66 | */ | ||
| 67 | |||
| 54 | remap_cmd.gmrId = gmr_id; | 68 | remap_cmd.gmrId = gmr_id; |
| 55 | remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? | 69 | remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? |
| 56 | SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; | 70 | SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; |
| 57 | remap_cmd.offsetPages = 0; | ||
| 58 | remap_cmd.numPages = num_pages; | ||
| 59 | 71 | ||
| 60 | *cmd++ = SVGA_CMD_DEFINE_GMR2; | 72 | while (num_pages > 0) { |
| 61 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); | 73 | unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP); |
| 62 | cmd += sizeof(define_cmd) / sizeof(uint32); | 74 | |
| 75 | remap_cmd.offsetPages = remap_pos; | ||
| 76 | remap_cmd.numPages = nr; | ||
| 63 | 77 | ||
| 64 | *cmd++ = SVGA_CMD_REMAP_GMR2; | 78 | *cmd++ = SVGA_CMD_REMAP_GMR2; |
| 65 | memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); | 79 | memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); |
| 66 | cmd += sizeof(remap_cmd) / sizeof(uint32); | 80 | cmd += sizeof(remap_cmd) / sizeof(*cmd); |
| 67 | 81 | ||
| 68 | for (i = 0; i < num_pages; ++i) { | 82 | for (i = 0; i < nr; ++i) { |
| 69 | if (VMW_PPN_SIZE <= 4) | 83 | if (VMW_PPN_SIZE <= 4) |
| 70 | *cmd = page_to_pfn(*pages++); | 84 | *cmd = page_to_pfn(*pages++); |
| 71 | else | 85 | else |
| 72 | *((uint64_t *)cmd) = page_to_pfn(*pages++); | 86 | *((uint64_t *)cmd) = page_to_pfn(*pages++); |
| 73 | 87 | ||
| 74 | cmd += VMW_PPN_SIZE / sizeof(*cmd); | 88 | cmd += VMW_PPN_SIZE / sizeof(*cmd); |
| 89 | } | ||
| 90 | |||
| 91 | num_pages -= nr; | ||
| 92 | remap_pos += nr; | ||
| 75 | } | 93 | } |
| 76 | 94 | ||
| 77 | vmw_fifo_commit(dev_priv, define_size + remap_size); | 95 | BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd)); |
| 96 | |||
| 97 | vmw_fifo_commit(dev_priv, cmd_size); | ||
| 78 | 98 | ||
| 79 | return 0; | 99 | return 0; |
| 80 | } | 100 | } |
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 5207591a598c..cd33084c7860 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c | |||
| @@ -192,6 +192,7 @@ static struct hid_ll_driver logi_dj_ll_driver; | |||
| 192 | static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf, | 192 | static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf, |
| 193 | size_t count, | 193 | size_t count, |
| 194 | unsigned char report_type); | 194 | unsigned char report_type); |
| 195 | static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev); | ||
| 195 | 196 | ||
| 196 | static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev, | 197 | static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev, |
| 197 | struct dj_report *dj_report) | 198 | struct dj_report *dj_report) |
| @@ -232,6 +233,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev, | |||
| 232 | if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] & | 233 | if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] & |
| 233 | SPFUNCTION_DEVICE_LIST_EMPTY) { | 234 | SPFUNCTION_DEVICE_LIST_EMPTY) { |
| 234 | dbg_hid("%s: device list is empty\n", __func__); | 235 | dbg_hid("%s: device list is empty\n", __func__); |
| 236 | djrcv_dev->querying_devices = false; | ||
| 235 | return; | 237 | return; |
| 236 | } | 238 | } |
| 237 | 239 | ||
| @@ -242,6 +244,12 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev, | |||
| 242 | return; | 244 | return; |
| 243 | } | 245 | } |
| 244 | 246 | ||
| 247 | if (djrcv_dev->paired_dj_devices[dj_report->device_index]) { | ||
| 248 | /* The device is already known. No need to reallocate it. */ | ||
| 249 | dbg_hid("%s: device is already known\n", __func__); | ||
| 250 | return; | ||
| 251 | } | ||
| 252 | |||
| 245 | dj_hiddev = hid_allocate_device(); | 253 | dj_hiddev = hid_allocate_device(); |
| 246 | if (IS_ERR(dj_hiddev)) { | 254 | if (IS_ERR(dj_hiddev)) { |
| 247 | dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n", | 255 | dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n", |
| @@ -305,6 +313,7 @@ static void delayedwork_callback(struct work_struct *work) | |||
| 305 | struct dj_report dj_report; | 313 | struct dj_report dj_report; |
| 306 | unsigned long flags; | 314 | unsigned long flags; |
| 307 | int count; | 315 | int count; |
| 316 | int retval; | ||
| 308 | 317 | ||
| 309 | dbg_hid("%s\n", __func__); | 318 | dbg_hid("%s\n", __func__); |
| 310 | 319 | ||
| @@ -337,6 +346,25 @@ static void delayedwork_callback(struct work_struct *work) | |||
| 337 | logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report); | 346 | logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report); |
| 338 | break; | 347 | break; |
| 339 | default: | 348 | default: |
| 349 | /* A normal report (i. e. not belonging to a pair/unpair notification) | ||
| 350 | * arriving here, means that the report arrived but we did not have a | ||
| 351 | * paired dj_device associated to the report's device_index, this | ||
| 352 | * means that the original "device paired" notification corresponding | ||
| 353 | * to this dj_device never arrived to this driver. The reason is that | ||
| 354 | * hid-core discards all packets coming from a device while probe() is | ||
| 355 | * executing. */ | ||
| 356 | if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) { | ||
| 357 | /* ok, we don't know the device, just re-ask the | ||
| 358 | * receiver for the list of connected devices. */ | ||
| 359 | retval = logi_dj_recv_query_paired_devices(djrcv_dev); | ||
| 360 | if (!retval) { | ||
| 361 | /* everything went fine, so just leave */ | ||
| 362 | break; | ||
| 363 | } | ||
| 364 | dev_err(&djrcv_dev->hdev->dev, | ||
| 365 | "%s:logi_dj_recv_query_paired_devices " | ||
| 366 | "error:%d\n", __func__, retval); | ||
| 367 | } | ||
| 340 | dbg_hid("%s: unexpected report type\n", __func__); | 368 | dbg_hid("%s: unexpected report type\n", __func__); |
| 341 | } | 369 | } |
| 342 | } | 370 | } |
| @@ -367,6 +395,12 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev, | |||
| 367 | if (!djdev) { | 395 | if (!djdev) { |
| 368 | dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" | 396 | dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" |
| 369 | " is NULL, index %d\n", dj_report->device_index); | 397 | " is NULL, index %d\n", dj_report->device_index); |
| 398 | kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report)); | ||
| 399 | |||
| 400 | if (schedule_work(&djrcv_dev->work) == 0) { | ||
| 401 | dbg_hid("%s: did not schedule the work item, was already " | ||
| 402 | "queued\n", __func__); | ||
| 403 | } | ||
| 370 | return; | 404 | return; |
| 371 | } | 405 | } |
| 372 | 406 | ||
| @@ -397,6 +431,12 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev, | |||
| 397 | if (dj_device == NULL) { | 431 | if (dj_device == NULL) { |
| 398 | dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" | 432 | dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" |
| 399 | " is NULL, index %d\n", dj_report->device_index); | 433 | " is NULL, index %d\n", dj_report->device_index); |
| 434 | kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report)); | ||
| 435 | |||
| 436 | if (schedule_work(&djrcv_dev->work) == 0) { | ||
| 437 | dbg_hid("%s: did not schedule the work item, was already " | ||
| 438 | "queued\n", __func__); | ||
| 439 | } | ||
| 400 | return; | 440 | return; |
| 401 | } | 441 | } |
| 402 | 442 | ||
| @@ -444,6 +484,10 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) | |||
| 444 | struct dj_report *dj_report; | 484 | struct dj_report *dj_report; |
| 445 | int retval; | 485 | int retval; |
| 446 | 486 | ||
| 487 | /* no need to protect djrcv_dev->querying_devices */ | ||
| 488 | if (djrcv_dev->querying_devices) | ||
| 489 | return 0; | ||
| 490 | |||
| 447 | dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); | 491 | dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); |
| 448 | if (!dj_report) | 492 | if (!dj_report) |
| 449 | return -ENOMEM; | 493 | return -ENOMEM; |
| @@ -455,6 +499,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) | |||
| 455 | return retval; | 499 | return retval; |
| 456 | } | 500 | } |
| 457 | 501 | ||
| 502 | |||
| 458 | static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, | 503 | static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, |
| 459 | unsigned timeout) | 504 | unsigned timeout) |
| 460 | { | 505 | { |
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h index fd28a5e0ca3b..4a4000340ce1 100644 --- a/drivers/hid/hid-logitech-dj.h +++ b/drivers/hid/hid-logitech-dj.h | |||
| @@ -101,6 +101,7 @@ struct dj_receiver_dev { | |||
| 101 | struct work_struct work; | 101 | struct work_struct work; |
| 102 | struct kfifo notif_fifo; | 102 | struct kfifo notif_fifo; |
| 103 | spinlock_t lock; | 103 | spinlock_t lock; |
| 104 | bool querying_devices; | ||
| 104 | }; | 105 | }; |
| 105 | 106 | ||
| 106 | struct dj_device { | 107 | struct dj_device { |
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index ecbc74923d06..87fbe2924cfa 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c | |||
| @@ -369,7 +369,8 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
| 369 | if (sc->quirks & PS3REMOTE) | 369 | if (sc->quirks & PS3REMOTE) |
| 370 | return ps3remote_mapping(hdev, hi, field, usage, bit, max); | 370 | return ps3remote_mapping(hdev, hi, field, usage, bit, max); |
| 371 | 371 | ||
| 372 | return -1; | 372 | /* Let hid-core decide for the others */ |
| 373 | return 0; | ||
| 373 | } | 374 | } |
| 374 | 375 | ||
| 375 | /* | 376 | /* |
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index a7451632ceb4..6f1feb2c2e97 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
| @@ -518,7 +518,6 @@ int hidraw_connect(struct hid_device *hid) | |||
| 518 | goto out; | 518 | goto out; |
| 519 | } | 519 | } |
| 520 | 520 | ||
| 521 | mutex_unlock(&minors_lock); | ||
| 522 | init_waitqueue_head(&dev->wait); | 521 | init_waitqueue_head(&dev->wait); |
| 523 | INIT_LIST_HEAD(&dev->list); | 522 | INIT_LIST_HEAD(&dev->list); |
| 524 | 523 | ||
| @@ -528,6 +527,7 @@ int hidraw_connect(struct hid_device *hid) | |||
| 528 | dev->exist = 1; | 527 | dev->exist = 1; |
| 529 | hid->hidraw = dev; | 528 | hid->hidraw = dev; |
| 530 | 529 | ||
| 530 | mutex_unlock(&minors_lock); | ||
| 531 | out: | 531 | out: |
| 532 | return result; | 532 | return result; |
| 533 | 533 | ||
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 4c605c70ebf9..deb5c25305af 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c | |||
| @@ -562,7 +562,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, | |||
| 562 | struct hv_hotadd_state *has) | 562 | struct hv_hotadd_state *has) |
| 563 | { | 563 | { |
| 564 | int ret = 0; | 564 | int ret = 0; |
| 565 | int i, nid, t; | 565 | int i, nid; |
| 566 | unsigned long start_pfn; | 566 | unsigned long start_pfn; |
| 567 | unsigned long processed_pfn; | 567 | unsigned long processed_pfn; |
| 568 | unsigned long total_pfn = pfn_count; | 568 | unsigned long total_pfn = pfn_count; |
| @@ -607,14 +607,11 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, | |||
| 607 | 607 | ||
| 608 | /* | 608 | /* |
| 609 | * Wait for the memory block to be onlined. | 609 | * Wait for the memory block to be onlined. |
| 610 | * Since the hot add has succeeded, it is ok to | ||
| 611 | * proceed even if the pages in the hot added region | ||
| 612 | * have not been "onlined" within the allowed time. | ||
| 610 | */ | 613 | */ |
| 611 | t = wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ); | 614 | wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ); |
| 612 | if (t == 0) { | ||
| 613 | pr_info("hot_add memory timedout\n"); | ||
| 614 | has->ha_end_pfn -= HA_CHUNK; | ||
| 615 | has->covered_end_pfn -= processed_pfn; | ||
| 616 | break; | ||
| 617 | } | ||
| 618 | 615 | ||
| 619 | } | 616 | } |
| 620 | 617 | ||
| @@ -978,6 +975,14 @@ static void post_status(struct hv_dynmem_device *dm) | |||
| 978 | dm->num_pages_ballooned + | 975 | dm->num_pages_ballooned + |
| 979 | compute_balloon_floor(); | 976 | compute_balloon_floor(); |
| 980 | 977 | ||
| 978 | /* | ||
| 979 | * If our transaction ID is no longer current, just don't | ||
| 980 | * send the status. This can happen if we were interrupted | ||
| 981 | * after we picked our transaction ID. | ||
| 982 | */ | ||
| 983 | if (status.hdr.trans_id != atomic_read(&trans_id)) | ||
| 984 | return; | ||
| 985 | |||
| 981 | vmbus_sendpacket(dm->dev->channel, &status, | 986 | vmbus_sendpacket(dm->dev->channel, &status, |
| 982 | sizeof(struct dm_status), | 987 | sizeof(struct dm_status), |
| 983 | (unsigned long)NULL, | 988 | (unsigned long)NULL, |
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index a2464bf07c49..e8e071fc1d6d 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c | |||
| @@ -690,7 +690,7 @@ int vmbus_device_register(struct hv_device *child_device_obj) | |||
| 690 | if (ret) | 690 | if (ret) |
| 691 | pr_err("Unable to register child device\n"); | 691 | pr_err("Unable to register child device\n"); |
| 692 | else | 692 | else |
| 693 | pr_info("child device %s registered\n", | 693 | pr_debug("child device %s registered\n", |
| 694 | dev_name(&child_device_obj->device)); | 694 | dev_name(&child_device_obj->device)); |
| 695 | 695 | ||
| 696 | return ret; | 696 | return ret; |
| @@ -702,14 +702,14 @@ int vmbus_device_register(struct hv_device *child_device_obj) | |||
| 702 | */ | 702 | */ |
| 703 | void vmbus_device_unregister(struct hv_device *device_obj) | 703 | void vmbus_device_unregister(struct hv_device *device_obj) |
| 704 | { | 704 | { |
| 705 | pr_debug("child device %s unregistered\n", | ||
| 706 | dev_name(&device_obj->device)); | ||
| 707 | |||
| 705 | /* | 708 | /* |
| 706 | * Kick off the process of unregistering the device. | 709 | * Kick off the process of unregistering the device. |
| 707 | * This will call vmbus_remove() and eventually vmbus_device_release() | 710 | * This will call vmbus_remove() and eventually vmbus_device_release() |
| 708 | */ | 711 | */ |
| 709 | device_unregister(&device_obj->device); | 712 | device_unregister(&device_obj->device); |
| 710 | |||
| 711 | pr_info("child device %s unregistered\n", | ||
| 712 | dev_name(&device_obj->device)); | ||
| 713 | } | 713 | } |
| 714 | 714 | ||
| 715 | 715 | ||
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 0f34bca9f5e5..6099f50b28aa 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c | |||
| @@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg, | |||
| 215 | u16 value) | 215 | u16 value) |
| 216 | { | 216 | { |
| 217 | return i2c_smbus_write_byte_data(client, reg, value & 0xFF) | 217 | return i2c_smbus_write_byte_data(client, reg, value & 0xFF) |
| 218 | && i2c_smbus_write_byte_data(client, reg + 1, value >> 8); | 218 | || i2c_smbus_write_byte_data(client, reg + 1, value >> 8); |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | static void adt7470_init_client(struct i2c_client *client) | 221 | static void adt7470_init_client(struct i2c_client *client) |
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c index 328fb0353c17..a41b5f3fc506 100644 --- a/drivers/hwmon/max6697.c +++ b/drivers/hwmon/max6697.c | |||
| @@ -605,12 +605,12 @@ static int max6697_init_chip(struct i2c_client *client) | |||
| 605 | if (ret < 0) | 605 | if (ret < 0) |
| 606 | return ret; | 606 | return ret; |
| 607 | ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY, | 607 | ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY, |
| 608 | pdata->ideality_mask >> 1); | 608 | pdata->ideality_value); |
| 609 | if (ret < 0) | 609 | if (ret < 0) |
| 610 | return ret; | 610 | return ret; |
| 611 | ret = i2c_smbus_write_byte_data(client, | 611 | ret = i2c_smbus_write_byte_data(client, |
| 612 | MAX6581_REG_IDEALITY_SELECT, | 612 | MAX6581_REG_IDEALITY_SELECT, |
| 613 | pdata->ideality_value); | 613 | pdata->ideality_mask >> 1); |
| 614 | if (ret < 0) | 614 | if (ret < 0) |
| 615 | return ret; | 615 | return ret; |
| 616 | } | 616 | } |
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c index ccec916bc3eb..af8f65fb1c05 100644 --- a/drivers/i2c/busses/i2c-kempld.c +++ b/drivers/i2c/busses/i2c-kempld.c | |||
| @@ -246,9 +246,9 @@ static void kempld_i2c_device_init(struct kempld_i2c_data *i2c) | |||
| 246 | bus_frequency = KEMPLD_I2C_FREQ_MAX; | 246 | bus_frequency = KEMPLD_I2C_FREQ_MAX; |
| 247 | 247 | ||
| 248 | if (pld->info.spec_major == 1) | 248 | if (pld->info.spec_major == 1) |
| 249 | prescale = pld->pld_clock / bus_frequency * 5 - 1000; | 249 | prescale = pld->pld_clock / (bus_frequency * 5) - 1000; |
| 250 | else | 250 | else |
| 251 | prescale = pld->pld_clock / bus_frequency * 4 - 3000; | 251 | prescale = pld->pld_clock / (bus_frequency * 4) - 3000; |
| 252 | 252 | ||
| 253 | if (prescale < 0) | 253 | if (prescale < 0) |
| 254 | prescale = 0; | 254 | prescale = 0; |
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index df8ff5aea5b5..e2e9a0dade96 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c | |||
| @@ -493,7 +493,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, | |||
| 493 | * based on this empirical measurement and a lot of previous frobbing. | 493 | * based on this empirical measurement and a lot of previous frobbing. |
| 494 | */ | 494 | */ |
| 495 | i2c->cmd_err = 0; | 495 | i2c->cmd_err = 0; |
| 496 | if (msg->len < 8) { | 496 | if (0) { /* disable PIO mode until a proper fix is made */ |
| 497 | ret = mxs_i2c_pio_setup_xfer(adap, msg, flags); | 497 | ret = mxs_i2c_pio_setup_xfer(adap, msg, flags); |
| 498 | if (ret) | 498 | if (ret) |
| 499 | mxs_i2c_reset(i2c); | 499 | mxs_i2c_reset(i2c); |
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 0ad208a69c29..3ceac3e91dde 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c | |||
| @@ -60,7 +60,6 @@ static void tiadc_step_config(struct tiadc_device *adc_dev) | |||
| 60 | { | 60 | { |
| 61 | unsigned int stepconfig; | 61 | unsigned int stepconfig; |
| 62 | int i, steps; | 62 | int i, steps; |
| 63 | u32 step_en; | ||
| 64 | 63 | ||
| 65 | /* | 64 | /* |
| 66 | * There are 16 configurable steps and 8 analog input | 65 | * There are 16 configurable steps and 8 analog input |
| @@ -86,8 +85,7 @@ static void tiadc_step_config(struct tiadc_device *adc_dev) | |||
| 86 | adc_dev->channel_step[i] = steps; | 85 | adc_dev->channel_step[i] = steps; |
| 87 | steps++; | 86 | steps++; |
| 88 | } | 87 | } |
| 89 | step_en = get_adc_step_mask(adc_dev); | 88 | |
| 90 | am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en); | ||
| 91 | } | 89 | } |
| 92 | 90 | ||
| 93 | static const char * const chan_name_ain[] = { | 91 | static const char * const chan_name_ain[] = { |
| @@ -142,10 +140,22 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, | |||
| 142 | int *val, int *val2, long mask) | 140 | int *val, int *val2, long mask) |
| 143 | { | 141 | { |
| 144 | struct tiadc_device *adc_dev = iio_priv(indio_dev); | 142 | struct tiadc_device *adc_dev = iio_priv(indio_dev); |
| 145 | int i; | 143 | int i, map_val; |
| 146 | unsigned int fifo1count, read; | 144 | unsigned int fifo1count, read, stepid; |
| 147 | u32 step = UINT_MAX; | 145 | u32 step = UINT_MAX; |
| 148 | bool found = false; | 146 | bool found = false; |
| 147 | u32 step_en; | ||
| 148 | unsigned long timeout = jiffies + usecs_to_jiffies | ||
| 149 | (IDLE_TIMEOUT * adc_dev->channels); | ||
| 150 | step_en = get_adc_step_mask(adc_dev); | ||
| 151 | am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en); | ||
| 152 | |||
| 153 | /* Wait for ADC sequencer to complete sampling */ | ||
| 154 | while (tiadc_readl(adc_dev, REG_ADCFSM) & SEQ_STATUS) { | ||
| 155 | if (time_after(jiffies, timeout)) | ||
| 156 | return -EAGAIN; | ||
| 157 | } | ||
| 158 | map_val = chan->channel + TOTAL_CHANNELS; | ||
| 149 | 159 | ||
| 150 | /* | 160 | /* |
| 151 | * When the sub-system is first enabled, | 161 | * When the sub-system is first enabled, |
| @@ -170,12 +180,16 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, | |||
| 170 | fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); | 180 | fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); |
| 171 | for (i = 0; i < fifo1count; i++) { | 181 | for (i = 0; i < fifo1count; i++) { |
| 172 | read = tiadc_readl(adc_dev, REG_FIFO1); | 182 | read = tiadc_readl(adc_dev, REG_FIFO1); |
| 173 | if (read >> 16 == step) { | 183 | stepid = read & FIFOREAD_CHNLID_MASK; |
| 174 | *val = read & 0xfff; | 184 | stepid = stepid >> 0x10; |
| 185 | |||
| 186 | if (stepid == map_val) { | ||
| 187 | read = read & FIFOREAD_DATA_MASK; | ||
| 175 | found = true; | 188 | found = true; |
| 189 | *val = read; | ||
| 176 | } | 190 | } |
| 177 | } | 191 | } |
| 178 | am335x_tsc_se_update(adc_dev->mfd_tscadc); | 192 | |
| 179 | if (found == false) | 193 | if (found == false) |
| 180 | return -EBUSY; | 194 | return -EBUSY; |
| 181 | return IIO_VAL_INT; | 195 | return IIO_VAL_INT; |
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index ea8a4146620d..0dd9bb873130 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c | |||
| @@ -127,12 +127,17 @@ static struct iio_trigger *iio_trigger_find_by_name(const char *name, | |||
| 127 | void iio_trigger_poll(struct iio_trigger *trig, s64 time) | 127 | void iio_trigger_poll(struct iio_trigger *trig, s64 time) |
| 128 | { | 128 | { |
| 129 | int i; | 129 | int i; |
| 130 | if (!trig->use_count) | 130 | |
| 131 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) | 131 | if (!atomic_read(&trig->use_count)) { |
| 132 | if (trig->subirqs[i].enabled) { | 132 | atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
| 133 | trig->use_count++; | 133 | |
| 134 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { | ||
| 135 | if (trig->subirqs[i].enabled) | ||
| 134 | generic_handle_irq(trig->subirq_base + i); | 136 | generic_handle_irq(trig->subirq_base + i); |
| 135 | } | 137 | else |
| 138 | iio_trigger_notify_done(trig); | ||
| 139 | } | ||
| 140 | } | ||
| 136 | } | 141 | } |
| 137 | EXPORT_SYMBOL(iio_trigger_poll); | 142 | EXPORT_SYMBOL(iio_trigger_poll); |
| 138 | 143 | ||
| @@ -146,19 +151,24 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll); | |||
| 146 | void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time) | 151 | void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time) |
| 147 | { | 152 | { |
| 148 | int i; | 153 | int i; |
| 149 | if (!trig->use_count) | 154 | |
| 150 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) | 155 | if (!atomic_read(&trig->use_count)) { |
| 151 | if (trig->subirqs[i].enabled) { | 156 | atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
| 152 | trig->use_count++; | 157 | |
| 158 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { | ||
| 159 | if (trig->subirqs[i].enabled) | ||
| 153 | handle_nested_irq(trig->subirq_base + i); | 160 | handle_nested_irq(trig->subirq_base + i); |
| 154 | } | 161 | else |
| 162 | iio_trigger_notify_done(trig); | ||
| 163 | } | ||
| 164 | } | ||
| 155 | } | 165 | } |
| 156 | EXPORT_SYMBOL(iio_trigger_poll_chained); | 166 | EXPORT_SYMBOL(iio_trigger_poll_chained); |
| 157 | 167 | ||
| 158 | void iio_trigger_notify_done(struct iio_trigger *trig) | 168 | void iio_trigger_notify_done(struct iio_trigger *trig) |
| 159 | { | 169 | { |
| 160 | trig->use_count--; | 170 | if (atomic_dec_and_test(&trig->use_count) && trig->ops && |
| 161 | if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable) | 171 | trig->ops->try_reenable) |
| 162 | if (trig->ops->try_reenable(trig)) | 172 | if (trig->ops->try_reenable(trig)) |
| 163 | /* Missed an interrupt so launch new poll now */ | 173 | /* Missed an interrupt so launch new poll now */ |
| 164 | iio_trigger_poll(trig, 0); | 174 | iio_trigger_poll(trig, 0); |
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c index 5f4749e60b04..c1cd5698b8ae 100644 --- a/drivers/iio/light/adjd_s311.c +++ b/drivers/iio/light/adjd_s311.c | |||
| @@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev, | |||
| 232 | 232 | ||
| 233 | switch (mask) { | 233 | switch (mask) { |
| 234 | case IIO_CHAN_INFO_RAW: | 234 | case IIO_CHAN_INFO_RAW: |
| 235 | ret = adjd_s311_read_data(indio_dev, chan->address, val); | 235 | ret = adjd_s311_read_data(indio_dev, |
| 236 | ADJD_S311_DATA_REG(chan->address), val); | ||
| 236 | if (ret < 0) | 237 | if (ret < 0) |
| 237 | return ret; | 238 | return ret; |
| 238 | return IIO_VAL_INT; | 239 | return IIO_VAL_INT; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index f1c279fabe64..7c0f9535fb7d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -423,7 +423,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) | |||
| 423 | struct sockaddr_ib *addr; | 423 | struct sockaddr_ib *addr; |
| 424 | union ib_gid gid, sgid, *dgid; | 424 | union ib_gid gid, sgid, *dgid; |
| 425 | u16 pkey, index; | 425 | u16 pkey, index; |
| 426 | u8 port, p; | 426 | u8 p; |
| 427 | int i; | 427 | int i; |
| 428 | 428 | ||
| 429 | cma_dev = NULL; | 429 | cma_dev = NULL; |
| @@ -443,7 +443,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) | |||
| 443 | if (!memcmp(&gid, dgid, sizeof(gid))) { | 443 | if (!memcmp(&gid, dgid, sizeof(gid))) { |
| 444 | cma_dev = cur_dev; | 444 | cma_dev = cur_dev; |
| 445 | sgid = gid; | 445 | sgid = gid; |
| 446 | port = p; | 446 | id_priv->id.port_num = p; |
| 447 | goto found; | 447 | goto found; |
| 448 | } | 448 | } |
| 449 | 449 | ||
| @@ -451,7 +451,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) | |||
| 451 | dgid->global.subnet_prefix)) { | 451 | dgid->global.subnet_prefix)) { |
| 452 | cma_dev = cur_dev; | 452 | cma_dev = cur_dev; |
| 453 | sgid = gid; | 453 | sgid = gid; |
| 454 | port = p; | 454 | id_priv->id.port_num = p; |
| 455 | } | 455 | } |
| 456 | } | 456 | } |
| 457 | } | 457 | } |
| @@ -462,7 +462,6 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) | |||
| 462 | 462 | ||
| 463 | found: | 463 | found: |
| 464 | cma_attach_to_dev(id_priv, cma_dev); | 464 | cma_attach_to_dev(id_priv, cma_dev); |
| 465 | id_priv->id.port_num = port; | ||
| 466 | addr = (struct sockaddr_ib *) cma_src_addr(id_priv); | 465 | addr = (struct sockaddr_ib *) cma_src_addr(id_priv); |
| 467 | memcpy(&addr->sib_addr, &sgid, sizeof sgid); | 466 | memcpy(&addr->sib_addr, &sgid, sizeof sgid); |
| 468 | cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); | 467 | cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); |
| @@ -880,7 +879,8 @@ static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id | |||
| 880 | { | 879 | { |
| 881 | struct cma_hdr *hdr; | 880 | struct cma_hdr *hdr; |
| 882 | 881 | ||
| 883 | if (listen_id->route.addr.src_addr.ss_family == AF_IB) { | 882 | if ((listen_id->route.addr.src_addr.ss_family == AF_IB) && |
| 883 | (ib_event->event == IB_CM_REQ_RECEIVED)) { | ||
| 884 | cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path); | 884 | cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path); |
| 885 | return 0; | 885 | return 0; |
| 886 | } | 886 | } |
| @@ -2677,29 +2677,32 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, | |||
| 2677 | { | 2677 | { |
| 2678 | struct ib_cm_sidr_req_param req; | 2678 | struct ib_cm_sidr_req_param req; |
| 2679 | struct ib_cm_id *id; | 2679 | struct ib_cm_id *id; |
| 2680 | void *private_data; | ||
| 2680 | int offset, ret; | 2681 | int offset, ret; |
| 2681 | 2682 | ||
| 2683 | memset(&req, 0, sizeof req); | ||
| 2682 | offset = cma_user_data_offset(id_priv); | 2684 | offset = cma_user_data_offset(id_priv); |
| 2683 | req.private_data_len = offset + conn_param->private_data_len; | 2685 | req.private_data_len = offset + conn_param->private_data_len; |
| 2684 | if (req.private_data_len < conn_param->private_data_len) | 2686 | if (req.private_data_len < conn_param->private_data_len) |
| 2685 | return -EINVAL; | 2687 | return -EINVAL; |
| 2686 | 2688 | ||
| 2687 | if (req.private_data_len) { | 2689 | if (req.private_data_len) { |
| 2688 | req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); | 2690 | private_data = kzalloc(req.private_data_len, GFP_ATOMIC); |
| 2689 | if (!req.private_data) | 2691 | if (!private_data) |
| 2690 | return -ENOMEM; | 2692 | return -ENOMEM; |
| 2691 | } else { | 2693 | } else { |
| 2692 | req.private_data = NULL; | 2694 | private_data = NULL; |
| 2693 | } | 2695 | } |
| 2694 | 2696 | ||
| 2695 | if (conn_param->private_data && conn_param->private_data_len) | 2697 | if (conn_param->private_data && conn_param->private_data_len) |
| 2696 | memcpy((void *) req.private_data + offset, | 2698 | memcpy(private_data + offset, conn_param->private_data, |
| 2697 | conn_param->private_data, conn_param->private_data_len); | 2699 | conn_param->private_data_len); |
| 2698 | 2700 | ||
| 2699 | if (req.private_data) { | 2701 | if (private_data) { |
| 2700 | ret = cma_format_hdr((void *) req.private_data, id_priv); | 2702 | ret = cma_format_hdr(private_data, id_priv); |
| 2701 | if (ret) | 2703 | if (ret) |
| 2702 | goto out; | 2704 | goto out; |
| 2705 | req.private_data = private_data; | ||
| 2703 | } | 2706 | } |
| 2704 | 2707 | ||
| 2705 | id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, | 2708 | id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, |
| @@ -2721,7 +2724,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, | |||
| 2721 | id_priv->cm_id.ib = NULL; | 2724 | id_priv->cm_id.ib = NULL; |
| 2722 | } | 2725 | } |
| 2723 | out: | 2726 | out: |
| 2724 | kfree(req.private_data); | 2727 | kfree(private_data); |
| 2725 | return ret; | 2728 | return ret; |
| 2726 | } | 2729 | } |
| 2727 | 2730 | ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index dc3fd1e8af07..4c837e66516b 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -2663,6 +2663,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2663 | int ret, i; | 2663 | int ret, i; |
| 2664 | struct ib_qp_attr *attr; | 2664 | struct ib_qp_attr *attr; |
| 2665 | struct ib_qp *qp; | 2665 | struct ib_qp *qp; |
| 2666 | u16 pkey_index; | ||
| 2666 | 2667 | ||
| 2667 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | 2668 | attr = kmalloc(sizeof *attr, GFP_KERNEL); |
| 2668 | if (!attr) { | 2669 | if (!attr) { |
| @@ -2670,6 +2671,11 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2670 | return -ENOMEM; | 2671 | return -ENOMEM; |
| 2671 | } | 2672 | } |
| 2672 | 2673 | ||
| 2674 | ret = ib_find_pkey(port_priv->device, port_priv->port_num, | ||
| 2675 | IB_DEFAULT_PKEY_FULL, &pkey_index); | ||
| 2676 | if (ret) | ||
| 2677 | pkey_index = 0; | ||
| 2678 | |||
| 2673 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { | 2679 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { |
| 2674 | qp = port_priv->qp_info[i].qp; | 2680 | qp = port_priv->qp_info[i].qp; |
| 2675 | if (!qp) | 2681 | if (!qp) |
| @@ -2680,7 +2686,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2680 | * one is needed for the Reset to Init transition | 2686 | * one is needed for the Reset to Init transition |
| 2681 | */ | 2687 | */ |
| 2682 | attr->qp_state = IB_QPS_INIT; | 2688 | attr->qp_state = IB_QPS_INIT; |
| 2683 | attr->pkey_index = 0; | 2689 | attr->pkey_index = pkey_index; |
| 2684 | attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; | 2690 | attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; |
| 2685 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | | 2691 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | |
| 2686 | IB_QP_PKEY_INDEX | IB_QP_QKEY); | 2692 | IB_QP_PKEY_INDEX | IB_QP_QKEY); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index e87f2201b220..d2283837d451 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
| @@ -226,6 +226,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve | |||
| 226 | mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * | 226 | mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * |
| 227 | sizeof(struct t3_cqe)); | 227 | sizeof(struct t3_cqe)); |
| 228 | uresp.memsize = mm->len; | 228 | uresp.memsize = mm->len; |
| 229 | uresp.reserved = 0; | ||
| 229 | resplen = sizeof uresp; | 230 | resplen = sizeof uresp; |
| 230 | } | 231 | } |
| 231 | if (ib_copy_to_udata(udata, &uresp, resplen)) { | 232 | if (ib_copy_to_udata(udata, &uresp, resplen)) { |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 232040447e8a..a4975e1654a6 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
| @@ -1657,6 +1657,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
| 1657 | if (mm5) { | 1657 | if (mm5) { |
| 1658 | uresp.ma_sync_key = ucontext->key; | 1658 | uresp.ma_sync_key = ucontext->key; |
| 1659 | ucontext->key += PAGE_SIZE; | 1659 | ucontext->key += PAGE_SIZE; |
| 1660 | } else { | ||
| 1661 | uresp.ma_sync_key = 0; | ||
| 1660 | } | 1662 | } |
| 1661 | uresp.sq_key = ucontext->key; | 1663 | uresp.sq_key = ucontext->key; |
| 1662 | ucontext->key += PAGE_SIZE; | 1664 | ucontext->key += PAGE_SIZE; |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 4d599cedbb0b..f2a3f48107e7 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
| @@ -1511,8 +1511,14 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx, | |||
| 1511 | 1511 | ||
| 1512 | memset(&attr, 0, sizeof attr); | 1512 | memset(&attr, 0, sizeof attr); |
| 1513 | attr.qp_state = IB_QPS_INIT; | 1513 | attr.qp_state = IB_QPS_INIT; |
| 1514 | attr.pkey_index = | 1514 | ret = 0; |
| 1515 | to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; | 1515 | if (create_tun) |
| 1516 | ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave, | ||
| 1517 | ctx->port, IB_DEFAULT_PKEY_FULL, | ||
| 1518 | &attr.pkey_index); | ||
| 1519 | if (ret || !create_tun) | ||
| 1520 | attr.pkey_index = | ||
| 1521 | to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; | ||
| 1516 | attr.qkey = IB_QP1_QKEY; | 1522 | attr.qkey = IB_QP1_QKEY; |
| 1517 | attr.port_num = ctx->port; | 1523 | attr.port_num = ctx->port; |
| 1518 | ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); | 1524 | ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8000fff4d444..3f831de9a4d8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -619,7 +619,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
| 619 | 619 | ||
| 620 | resp.tot_uuars = req.total_num_uuars; | 620 | resp.tot_uuars = req.total_num_uuars; |
| 621 | resp.num_ports = dev->mdev.caps.num_ports; | 621 | resp.num_ports = dev->mdev.caps.num_ports; |
| 622 | err = ib_copy_to_udata(udata, &resp, sizeof(resp)); | 622 | err = ib_copy_to_udata(udata, &resp, |
| 623 | sizeof(resp) - sizeof(resp.reserved)); | ||
| 623 | if (err) | 624 | if (err) |
| 624 | goto out_uars; | 625 | goto out_uars; |
| 625 | 626 | ||
| @@ -1426,7 +1427,8 @@ static int init_one(struct pci_dev *pdev, | |||
| 1426 | if (err) | 1427 | if (err) |
| 1427 | goto err_eqs; | 1428 | goto err_eqs; |
| 1428 | 1429 | ||
| 1429 | if (ib_register_device(&dev->ib_dev, NULL)) | 1430 | err = ib_register_device(&dev->ib_dev, NULL); |
| 1431 | if (err) | ||
| 1430 | goto err_rsrc; | 1432 | goto err_rsrc; |
| 1431 | 1433 | ||
| 1432 | err = create_umr_res(dev); | 1434 | err = create_umr_res(dev); |
| @@ -1434,8 +1436,9 @@ static int init_one(struct pci_dev *pdev, | |||
| 1434 | goto err_dev; | 1436 | goto err_dev; |
| 1435 | 1437 | ||
| 1436 | for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { | 1438 | for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { |
| 1437 | if (device_create_file(&dev->ib_dev.dev, | 1439 | err = device_create_file(&dev->ib_dev.dev, |
| 1438 | mlx5_class_attributes[i])) | 1440 | mlx5_class_attributes[i]); |
| 1441 | if (err) | ||
| 1439 | goto err_umrc; | 1442 | goto err_umrc; |
| 1440 | } | 1443 | } |
| 1441 | 1444 | ||
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 16ac54c9819f..045f8cdbd303 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -199,7 +199,7 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, | |||
| 199 | 199 | ||
| 200 | static int sq_overhead(enum ib_qp_type qp_type) | 200 | static int sq_overhead(enum ib_qp_type qp_type) |
| 201 | { | 201 | { |
| 202 | int size; | 202 | int size = 0; |
| 203 | 203 | ||
| 204 | switch (qp_type) { | 204 | switch (qp_type) { |
| 205 | case IB_QPT_XRC_INI: | 205 | case IB_QPT_XRC_INI: |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 418004c93feb..90200245c5eb 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
| @@ -3570,10 +3570,10 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
| 3570 | tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; | 3570 | tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; |
| 3571 | iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; | 3571 | iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; |
| 3572 | nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p," | 3572 | nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p," |
| 3573 | " Tcp state = %d, iWARP state = %d\n", | 3573 | " Tcp state = %s, iWARP state = %s\n", |
| 3574 | async_event_id, | 3574 | async_event_id, |
| 3575 | le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, | 3575 | le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, |
| 3576 | tcp_state, iwarp_state); | 3576 | nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]); |
| 3577 | 3577 | ||
| 3578 | aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); | 3578 | aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); |
| 3579 | if (aeq_info & NES_AEQE_QP) { | 3579 | if (aeq_info & NES_AEQE_QP) { |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 8f67fe2e91e6..5b53ca5a2284 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
| @@ -1384,6 +1384,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, | |||
| 1384 | 1384 | ||
| 1385 | if (ibpd->uobject) { | 1385 | if (ibpd->uobject) { |
| 1386 | uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index; | 1386 | uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index; |
| 1387 | uresp.mmap_rq_db_index = 0; | ||
| 1387 | uresp.actual_sq_size = sq_size; | 1388 | uresp.actual_sq_size = sq_size; |
| 1388 | uresp.actual_rq_size = rq_size; | 1389 | uresp.actual_rq_size = rq_size; |
| 1389 | uresp.qp_id = nesqp->hwqp.qp_id; | 1390 | uresp.qp_id = nesqp->hwqp.qp_id; |
| @@ -1767,7 +1768,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
| 1767 | resp.cq_id = nescq->hw_cq.cq_number; | 1768 | resp.cq_id = nescq->hw_cq.cq_number; |
| 1768 | resp.cq_size = nescq->hw_cq.cq_size; | 1769 | resp.cq_size = nescq->hw_cq.cq_size; |
| 1769 | resp.mmap_db_index = 0; | 1770 | resp.mmap_db_index = 0; |
| 1770 | if (ib_copy_to_udata(udata, &resp, sizeof resp)) { | 1771 | if (ib_copy_to_udata(udata, &resp, sizeof resp - sizeof resp.reserved)) { |
| 1771 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | 1772 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); |
| 1772 | kfree(nescq); | 1773 | kfree(nescq); |
| 1773 | return ERR_PTR(-EFAULT); | 1774 | return ERR_PTR(-EFAULT); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index a877a8ed7907..f4c587c68f64 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c | |||
| @@ -29,7 +29,6 @@ | |||
| 29 | #include <net/netevent.h> | 29 | #include <net/netevent.h> |
| 30 | 30 | ||
| 31 | #include <rdma/ib_addr.h> | 31 | #include <rdma/ib_addr.h> |
| 32 | #include <rdma/ib_cache.h> | ||
| 33 | 32 | ||
| 34 | #include "ocrdma.h" | 33 | #include "ocrdma.h" |
| 35 | #include "ocrdma_verbs.h" | 34 | #include "ocrdma_verbs.h" |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index dcfbab177faa..f36630e4b6be 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
| @@ -242,6 +242,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, | |||
| 242 | memset(ctx->ah_tbl.va, 0, map_len); | 242 | memset(ctx->ah_tbl.va, 0, map_len); |
| 243 | ctx->ah_tbl.len = map_len; | 243 | ctx->ah_tbl.len = map_len; |
| 244 | 244 | ||
| 245 | memset(&resp, 0, sizeof(resp)); | ||
| 245 | resp.ah_tbl_len = ctx->ah_tbl.len; | 246 | resp.ah_tbl_len = ctx->ah_tbl.len; |
| 246 | resp.ah_tbl_page = ctx->ah_tbl.pa; | 247 | resp.ah_tbl_page = ctx->ah_tbl.pa; |
| 247 | 248 | ||
| @@ -253,7 +254,6 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, | |||
| 253 | resp.wqe_size = dev->attr.wqe_size; | 254 | resp.wqe_size = dev->attr.wqe_size; |
| 254 | resp.rqe_size = dev->attr.rqe_size; | 255 | resp.rqe_size = dev->attr.rqe_size; |
| 255 | resp.dpp_wqe_size = dev->attr.wqe_size; | 256 | resp.dpp_wqe_size = dev->attr.wqe_size; |
| 256 | resp.rsvd = 0; | ||
| 257 | 257 | ||
| 258 | memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); | 258 | memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); |
| 259 | status = ib_copy_to_udata(udata, &resp, sizeof(resp)); | 259 | status = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
| @@ -338,6 +338,7 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd, | |||
| 338 | struct ocrdma_alloc_pd_uresp rsp; | 338 | struct ocrdma_alloc_pd_uresp rsp; |
| 339 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); | 339 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); |
| 340 | 340 | ||
| 341 | memset(&rsp, 0, sizeof(rsp)); | ||
| 341 | rsp.id = pd->id; | 342 | rsp.id = pd->id; |
| 342 | rsp.dpp_enabled = pd->dpp_enabled; | 343 | rsp.dpp_enabled = pd->dpp_enabled; |
| 343 | db_page_addr = pd->dev->nic_info.unmapped_db + | 344 | db_page_addr = pd->dev->nic_info.unmapped_db + |
| @@ -692,6 +693,7 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata, | |||
| 692 | struct ocrdma_ucontext *uctx; | 693 | struct ocrdma_ucontext *uctx; |
| 693 | struct ocrdma_create_cq_uresp uresp; | 694 | struct ocrdma_create_cq_uresp uresp; |
| 694 | 695 | ||
| 696 | memset(&uresp, 0, sizeof(uresp)); | ||
| 695 | uresp.cq_id = cq->id; | 697 | uresp.cq_id = cq->id; |
| 696 | uresp.page_size = cq->len; | 698 | uresp.page_size = cq->len; |
| 697 | uresp.num_pages = 1; | 699 | uresp.num_pages = 1; |
| @@ -1460,6 +1462,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata) | |||
| 1460 | int status; | 1462 | int status; |
| 1461 | struct ocrdma_create_srq_uresp uresp; | 1463 | struct ocrdma_create_srq_uresp uresp; |
| 1462 | 1464 | ||
| 1465 | memset(&uresp, 0, sizeof(uresp)); | ||
| 1463 | uresp.rq_dbid = srq->rq.dbid; | 1466 | uresp.rq_dbid = srq->rq.dbid; |
| 1464 | uresp.num_rq_pages = 1; | 1467 | uresp.num_rq_pages = 1; |
| 1465 | uresp.rq_page_addr[0] = srq->rq.pa; | 1468 | uresp.rq_page_addr[0] = srq->rq.pa; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 21e8b09d4bf8..016e7429adf6 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
| @@ -1596,6 +1596,8 @@ static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs) | |||
| 1596 | struct qib_devdata *dd = ppd->dd; | 1596 | struct qib_devdata *dd = ppd->dd; |
| 1597 | 1597 | ||
| 1598 | errs &= QIB_E_P_SDMAERRS; | 1598 | errs &= QIB_E_P_SDMAERRS; |
| 1599 | err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf), | ||
| 1600 | errs, qib_7322p_error_msgs); | ||
| 1599 | 1601 | ||
| 1600 | if (errs & QIB_E_P_SDMAUNEXPDATA) | 1602 | if (errs & QIB_E_P_SDMAUNEXPDATA) |
| 1601 | qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, | 1603 | qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, |
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index 32162d355370..9b5322d8cd5a 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c | |||
| @@ -717,7 +717,7 @@ void dump_sdma_state(struct qib_pportdata *ppd) | |||
| 717 | struct qib_sdma_txreq *txp, *txpnext; | 717 | struct qib_sdma_txreq *txp, *txpnext; |
| 718 | __le64 *descqp; | 718 | __le64 *descqp; |
| 719 | u64 desc[2]; | 719 | u64 desc[2]; |
| 720 | dma_addr_t addr; | 720 | u64 addr; |
| 721 | u16 gen, dwlen, dwoffset; | 721 | u16 gen, dwlen, dwoffset; |
| 722 | u16 head, tail, cnt; | 722 | u16 head, tail, cnt; |
| 723 | 723 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 2cfa76f5d99e..196b1d13cbcb 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -932,12 +932,47 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
| 932 | return 0; | 932 | return 0; |
| 933 | } | 933 | } |
| 934 | 934 | ||
| 935 | /* | ||
| 936 | * Takes whatever value which is in pkey index 0 and updates priv->pkey | ||
| 937 | * returns 0 if the pkey value was changed. | ||
| 938 | */ | ||
| 939 | static inline int update_parent_pkey(struct ipoib_dev_priv *priv) | ||
| 940 | { | ||
| 941 | int result; | ||
| 942 | u16 prev_pkey; | ||
| 943 | |||
| 944 | prev_pkey = priv->pkey; | ||
| 945 | result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey); | ||
| 946 | if (result) { | ||
| 947 | ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n", | ||
| 948 | priv->port, result); | ||
| 949 | return result; | ||
| 950 | } | ||
| 951 | |||
| 952 | priv->pkey |= 0x8000; | ||
| 953 | |||
| 954 | if (prev_pkey != priv->pkey) { | ||
| 955 | ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n", | ||
| 956 | prev_pkey, priv->pkey); | ||
| 957 | /* | ||
| 958 | * Update the pkey in the broadcast address, while making sure to set | ||
| 959 | * the full membership bit, so that we join the right broadcast group. | ||
| 960 | */ | ||
| 961 | priv->dev->broadcast[8] = priv->pkey >> 8; | ||
| 962 | priv->dev->broadcast[9] = priv->pkey & 0xff; | ||
| 963 | return 0; | ||
| 964 | } | ||
| 965 | |||
| 966 | return 1; | ||
| 967 | } | ||
| 968 | |||
| 935 | static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | 969 | static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, |
| 936 | enum ipoib_flush_level level) | 970 | enum ipoib_flush_level level) |
| 937 | { | 971 | { |
| 938 | struct ipoib_dev_priv *cpriv; | 972 | struct ipoib_dev_priv *cpriv; |
| 939 | struct net_device *dev = priv->dev; | 973 | struct net_device *dev = priv->dev; |
| 940 | u16 new_index; | 974 | u16 new_index; |
| 975 | int result; | ||
| 941 | 976 | ||
| 942 | mutex_lock(&priv->vlan_mutex); | 977 | mutex_lock(&priv->vlan_mutex); |
| 943 | 978 | ||
| @@ -951,6 +986,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | |||
| 951 | mutex_unlock(&priv->vlan_mutex); | 986 | mutex_unlock(&priv->vlan_mutex); |
| 952 | 987 | ||
| 953 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { | 988 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { |
| 989 | /* for non-child devices must check/update the pkey value here */ | ||
| 990 | if (level == IPOIB_FLUSH_HEAVY && | ||
| 991 | !test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) | ||
| 992 | update_parent_pkey(priv); | ||
| 954 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); | 993 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); |
| 955 | return; | 994 | return; |
| 956 | } | 995 | } |
| @@ -961,21 +1000,32 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | |||
| 961 | } | 1000 | } |
| 962 | 1001 | ||
| 963 | if (level == IPOIB_FLUSH_HEAVY) { | 1002 | if (level == IPOIB_FLUSH_HEAVY) { |
| 964 | if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { | 1003 | /* child devices chase their origin pkey value, while non-child |
| 965 | clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | 1004 | * (parent) devices should always takes what present in pkey index 0 |
| 966 | ipoib_ib_dev_down(dev, 0); | 1005 | */ |
| 967 | ipoib_ib_dev_stop(dev, 0); | 1006 | if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { |
| 968 | if (ipoib_pkey_dev_delay_open(dev)) | 1007 | if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { |
| 1008 | clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); | ||
| 1009 | ipoib_ib_dev_down(dev, 0); | ||
| 1010 | ipoib_ib_dev_stop(dev, 0); | ||
| 1011 | if (ipoib_pkey_dev_delay_open(dev)) | ||
| 1012 | return; | ||
| 1013 | } | ||
| 1014 | /* restart QP only if P_Key index is changed */ | ||
| 1015 | if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && | ||
| 1016 | new_index == priv->pkey_index) { | ||
| 1017 | ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); | ||
| 969 | return; | 1018 | return; |
| 1019 | } | ||
| 1020 | priv->pkey_index = new_index; | ||
| 1021 | } else { | ||
| 1022 | result = update_parent_pkey(priv); | ||
| 1023 | /* restart QP only if P_Key value changed */ | ||
| 1024 | if (result) { | ||
| 1025 | ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n"); | ||
| 1026 | return; | ||
| 1027 | } | ||
| 970 | } | 1028 | } |
| 971 | |||
| 972 | /* restart QP only if P_Key index is changed */ | ||
| 973 | if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && | ||
| 974 | new_index == priv->pkey_index) { | ||
| 975 | ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); | ||
| 976 | return; | ||
| 977 | } | ||
| 978 | priv->pkey_index = new_index; | ||
| 979 | } | 1029 | } |
| 980 | 1030 | ||
| 981 | if (level == IPOIB_FLUSH_LIGHT) { | 1031 | if (level == IPOIB_FLUSH_LIGHT) { |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index b6e049a3c7a8..c6f71a88c55c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -1461,7 +1461,7 @@ static ssize_t create_child(struct device *dev, | |||
| 1461 | if (sscanf(buf, "%i", &pkey) != 1) | 1461 | if (sscanf(buf, "%i", &pkey) != 1) |
| 1462 | return -EINVAL; | 1462 | return -EINVAL; |
| 1463 | 1463 | ||
| 1464 | if (pkey < 0 || pkey > 0xffff) | 1464 | if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000) |
| 1465 | return -EINVAL; | 1465 | return -EINVAL; |
| 1466 | 1466 | ||
| 1467 | /* | 1467 | /* |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index 74685936c948..f81abe16cf09 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c | |||
| @@ -119,6 +119,15 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, | |||
| 119 | } else | 119 | } else |
| 120 | child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]); | 120 | child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]); |
| 121 | 121 | ||
| 122 | if (child_pkey == 0 || child_pkey == 0x8000) | ||
| 123 | return -EINVAL; | ||
| 124 | |||
| 125 | /* | ||
| 126 | * Set the full membership bit, so that we join the right | ||
| 127 | * broadcast group, etc. | ||
| 128 | */ | ||
| 129 | child_pkey |= 0x8000; | ||
| 130 | |||
| 122 | err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD); | 131 | err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD); |
| 123 | 132 | ||
| 124 | if (!err && data) | 133 | if (!err && data) |
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index fa061d46527f..75e3b102ce45 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
| @@ -167,6 +167,7 @@ static const struct xpad_device { | |||
| 167 | { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, | 167 | { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, |
| 168 | { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, | 168 | { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, |
| 169 | { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | 169 | { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, |
| 170 | { 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | ||
| 170 | { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, | 171 | { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, |
| 171 | { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | 172 | { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, |
| 172 | { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, | 173 | { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 57b2637e153a..8551dcaf24db 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
| @@ -672,6 +672,7 @@ static int elantech_packet_check_v2(struct psmouse *psmouse) | |||
| 672 | */ | 672 | */ |
| 673 | static int elantech_packet_check_v3(struct psmouse *psmouse) | 673 | static int elantech_packet_check_v3(struct psmouse *psmouse) |
| 674 | { | 674 | { |
| 675 | struct elantech_data *etd = psmouse->private; | ||
| 675 | const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; | 676 | const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; |
| 676 | unsigned char *packet = psmouse->packet; | 677 | unsigned char *packet = psmouse->packet; |
| 677 | 678 | ||
| @@ -682,19 +683,48 @@ static int elantech_packet_check_v3(struct psmouse *psmouse) | |||
| 682 | if (!memcmp(packet, debounce_packet, sizeof(debounce_packet))) | 683 | if (!memcmp(packet, debounce_packet, sizeof(debounce_packet))) |
| 683 | return PACKET_DEBOUNCE; | 684 | return PACKET_DEBOUNCE; |
| 684 | 685 | ||
| 685 | if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02) | 686 | /* |
| 686 | return PACKET_V3_HEAD; | 687 | * If the hardware flag 'crc_enabled' is set the packets have |
| 688 | * different signatures. | ||
| 689 | */ | ||
| 690 | if (etd->crc_enabled) { | ||
| 691 | if ((packet[3] & 0x09) == 0x08) | ||
| 692 | return PACKET_V3_HEAD; | ||
| 693 | |||
| 694 | if ((packet[3] & 0x09) == 0x09) | ||
| 695 | return PACKET_V3_TAIL; | ||
| 696 | } else { | ||
| 697 | if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02) | ||
| 698 | return PACKET_V3_HEAD; | ||
| 687 | 699 | ||
| 688 | if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) | 700 | if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) |
| 689 | return PACKET_V3_TAIL; | 701 | return PACKET_V3_TAIL; |
| 702 | } | ||
| 690 | 703 | ||
| 691 | return PACKET_UNKNOWN; | 704 | return PACKET_UNKNOWN; |
| 692 | } | 705 | } |
| 693 | 706 | ||
| 694 | static int elantech_packet_check_v4(struct psmouse *psmouse) | 707 | static int elantech_packet_check_v4(struct psmouse *psmouse) |
| 695 | { | 708 | { |
| 709 | struct elantech_data *etd = psmouse->private; | ||
| 696 | unsigned char *packet = psmouse->packet; | 710 | unsigned char *packet = psmouse->packet; |
| 697 | unsigned char packet_type = packet[3] & 0x03; | 711 | unsigned char packet_type = packet[3] & 0x03; |
| 712 | bool sanity_check; | ||
| 713 | |||
| 714 | /* | ||
| 715 | * Sanity check based on the constant bits of a packet. | ||
| 716 | * The constant bits change depending on the value of | ||
| 717 | * the hardware flag 'crc_enabled' but are the same for | ||
| 718 | * every packet, regardless of the type. | ||
| 719 | */ | ||
| 720 | if (etd->crc_enabled) | ||
| 721 | sanity_check = ((packet[3] & 0x08) == 0x00); | ||
| 722 | else | ||
| 723 | sanity_check = ((packet[0] & 0x0c) == 0x04 && | ||
| 724 | (packet[3] & 0x1c) == 0x10); | ||
| 725 | |||
| 726 | if (!sanity_check) | ||
| 727 | return PACKET_UNKNOWN; | ||
| 698 | 728 | ||
| 699 | switch (packet_type) { | 729 | switch (packet_type) { |
| 700 | case 0: | 730 | case 0: |
| @@ -1313,6 +1343,12 @@ static int elantech_set_properties(struct elantech_data *etd) | |||
| 1313 | etd->reports_pressure = true; | 1343 | etd->reports_pressure = true; |
| 1314 | } | 1344 | } |
| 1315 | 1345 | ||
| 1346 | /* | ||
| 1347 | * The signatures of v3 and v4 packets change depending on the | ||
| 1348 | * value of this hardware flag. | ||
| 1349 | */ | ||
| 1350 | etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000); | ||
| 1351 | |||
| 1316 | return 0; | 1352 | return 0; |
| 1317 | } | 1353 | } |
| 1318 | 1354 | ||
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index 46db3be45ac9..036a04abaef7 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h | |||
| @@ -129,6 +129,7 @@ struct elantech_data { | |||
| 129 | bool paritycheck; | 129 | bool paritycheck; |
| 130 | bool jumpy_cursor; | 130 | bool jumpy_cursor; |
| 131 | bool reports_pressure; | 131 | bool reports_pressure; |
| 132 | bool crc_enabled; | ||
| 132 | unsigned char hw_version; | 133 | unsigned char hw_version; |
| 133 | unsigned int fw_version; | 134 | unsigned int fw_version; |
| 134 | unsigned int single_finger_reports; | 135 | unsigned int single_finger_reports; |
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index 94c17c28d268..1e691a3a79cb 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig | |||
| @@ -22,7 +22,8 @@ config SERIO_I8042 | |||
| 22 | tristate "i8042 PC Keyboard controller" if EXPERT || !X86 | 22 | tristate "i8042 PC Keyboard controller" if EXPERT || !X86 |
| 23 | default y | 23 | default y |
| 24 | depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \ | 24 | depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \ |
| 25 | (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 | 25 | (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \ |
| 26 | !ARC | ||
| 26 | help | 27 | help |
| 27 | i8042 is the chip over which the standard AT keyboard and PS/2 | 28 | i8042 is the chip over which the standard AT keyboard and PS/2 |
| 28 | mouse are connected to the computer. If you use these devices, | 29 | mouse are connected to the computer. If you use these devices, |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 384fbcd0cee0..f3e91f0b57ae 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
| @@ -2112,7 +2112,7 @@ static const struct wacom_features wacom_features_0xDA = | |||
| 2112 | { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, | 2112 | { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, |
| 2113 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, | 2113 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, |
| 2114 | .touch_max = 2 }; | 2114 | .touch_max = 2 }; |
| 2115 | static struct wacom_features wacom_features_0xDB = | 2115 | static const struct wacom_features wacom_features_0xDB = |
| 2116 | { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, | 2116 | { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, |
| 2117 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, | 2117 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, |
| 2118 | .touch_max = 2 }; | 2118 | .touch_max = 2 }; |
| @@ -2127,6 +2127,12 @@ static const struct wacom_features wacom_features_0xDF = | |||
| 2127 | { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023, | 2127 | { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023, |
| 2128 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, | 2128 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, |
| 2129 | .touch_max = 16 }; | 2129 | .touch_max = 16 }; |
| 2130 | static const struct wacom_features wacom_features_0x300 = | ||
| 2131 | { "Wacom Bamboo One S", WACOM_PKGLEN_BBPEN, 14720, 9225, 1023, | ||
| 2132 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | ||
| 2133 | static const struct wacom_features wacom_features_0x301 = | ||
| 2134 | { "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023, | ||
| 2135 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | ||
| 2130 | static const struct wacom_features wacom_features_0x6004 = | 2136 | static const struct wacom_features wacom_features_0x6004 = |
| 2131 | { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, | 2137 | { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, |
| 2132 | 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | 2138 | 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; |
| @@ -2253,6 +2259,8 @@ const struct usb_device_id wacom_ids[] = { | |||
| 2253 | { USB_DEVICE_WACOM(0x100) }, | 2259 | { USB_DEVICE_WACOM(0x100) }, |
| 2254 | { USB_DEVICE_WACOM(0x101) }, | 2260 | { USB_DEVICE_WACOM(0x101) }, |
| 2255 | { USB_DEVICE_WACOM(0x10D) }, | 2261 | { USB_DEVICE_WACOM(0x10D) }, |
| 2262 | { USB_DEVICE_WACOM(0x300) }, | ||
| 2263 | { USB_DEVICE_WACOM(0x301) }, | ||
| 2256 | { USB_DEVICE_WACOM(0x304) }, | 2264 | { USB_DEVICE_WACOM(0x304) }, |
| 2257 | { USB_DEVICE_WACOM(0x4001) }, | 2265 | { USB_DEVICE_WACOM(0x4001) }, |
| 2258 | { USB_DEVICE_WACOM(0x47) }, | 2266 | { USB_DEVICE_WACOM(0x47) }, |
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c index 69ea44ebcf61..4851afae38dc 100644 --- a/drivers/irqchip/irq-sirfsoc.c +++ b/drivers/irqchip/irq-sirfsoc.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | #define SIRFSOC_INT_RISC_LEVEL1 0x0024 | 23 | #define SIRFSOC_INT_RISC_LEVEL1 0x0024 |
| 24 | #define SIRFSOC_INIT_IRQ_ID 0x0038 | 24 | #define SIRFSOC_INIT_IRQ_ID 0x0038 |
| 25 | 25 | ||
| 26 | #define SIRFSOC_NUM_IRQS 128 | 26 | #define SIRFSOC_NUM_IRQS 64 |
| 27 | 27 | ||
| 28 | static struct irq_domain *sirfsoc_irqdomain; | 28 | static struct irq_domain *sirfsoc_irqdomain; |
| 29 | 29 | ||
| @@ -32,15 +32,18 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) | |||
| 32 | { | 32 | { |
| 33 | struct irq_chip_generic *gc; | 33 | struct irq_chip_generic *gc; |
| 34 | struct irq_chip_type *ct; | 34 | struct irq_chip_type *ct; |
| 35 | int ret; | ||
| 36 | unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; | ||
| 35 | 37 | ||
| 36 | gc = irq_alloc_generic_chip("SIRFINTC", 1, irq_start, base, handle_level_irq); | 38 | ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc", |
| 37 | ct = gc->chip_types; | 39 | handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); |
| 38 | 40 | ||
| 41 | gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start); | ||
| 42 | gc->reg_base = base; | ||
| 43 | ct = gc->chip_types; | ||
| 39 | ct->chip.irq_mask = irq_gc_mask_clr_bit; | 44 | ct->chip.irq_mask = irq_gc_mask_clr_bit; |
| 40 | ct->chip.irq_unmask = irq_gc_mask_set_bit; | 45 | ct->chip.irq_unmask = irq_gc_mask_set_bit; |
| 41 | ct->regs.mask = SIRFSOC_INT_RISC_MASK0; | 46 | ct->regs.mask = SIRFSOC_INT_RISC_MASK0; |
| 42 | |||
| 43 | irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0); | ||
| 44 | } | 47 | } |
| 45 | 48 | ||
| 46 | static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) | 49 | static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) |
| @@ -60,9 +63,8 @@ static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *p | |||
| 60 | if (!base) | 63 | if (!base) |
| 61 | panic("unable to map intc cpu registers\n"); | 64 | panic("unable to map intc cpu registers\n"); |
| 62 | 65 | ||
| 63 | /* using legacy because irqchip_generic does not work with linear */ | 66 | sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS, |
| 64 | sirfsoc_irqdomain = irq_domain_add_legacy(np, SIRFSOC_NUM_IRQS, 0, 0, | 67 | &irq_generic_chip_ops, base); |
| 65 | &irq_domain_simple_ops, base); | ||
| 66 | 68 | ||
| 67 | sirfsoc_alloc_gc(base, 0, 32); | 69 | sirfsoc_alloc_gc(base, 0, 32); |
| 68 | sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32); | 70 | sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32); |
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 22b720ec80cb..77025f5cb57d 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c | |||
| @@ -288,8 +288,10 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb) | |||
| 288 | u8 *data; | 288 | u8 *data; |
| 289 | int len; | 289 | int len; |
| 290 | 290 | ||
| 291 | if (skb->len < sizeof(int)) | 291 | if (skb->len < sizeof(int)) { |
| 292 | printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); | 292 | printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); |
| 293 | return -EINVAL; | ||
| 294 | } | ||
| 293 | cont = *((int *)skb->data); | 295 | cont = *((int *)skb->data); |
| 294 | len = skb->len - sizeof(int); | 296 | len = skb->len - sizeof(int); |
| 295 | data = skb->data + sizeof(int); | 297 | data = skb->data + sizeof(int); |
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c index 0b9a79b2f48a..82fc86a90c1a 100644 --- a/drivers/macintosh/windfarm_rm31.c +++ b/drivers/macintosh/windfarm_rm31.c | |||
| @@ -439,15 +439,15 @@ static void backside_setup_pid(void) | |||
| 439 | 439 | ||
| 440 | /* Slots fan */ | 440 | /* Slots fan */ |
| 441 | static const struct wf_pid_param slots_param = { | 441 | static const struct wf_pid_param slots_param = { |
| 442 | .interval = 5, | 442 | .interval = 1, |
| 443 | .history_len = 2, | 443 | .history_len = 20, |
| 444 | .gd = 30 << 20, | 444 | .gd = 0, |
| 445 | .gp = 5 << 20, | 445 | .gp = 0, |
| 446 | .gr = 0, | 446 | .gr = 0x00100000, |
| 447 | .itarget = 40 << 16, | 447 | .itarget = 3200000, |
| 448 | .additive = 1, | 448 | .additive = 0, |
| 449 | .min = 300, | 449 | .min = 20, |
| 450 | .max = 4000, | 450 | .max = 100, |
| 451 | }; | 451 | }; |
| 452 | 452 | ||
| 453 | static void slots_fan_tick(void) | 453 | static void slots_fan_tick(void) |
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 048f2947e08b..e45f5575fd4d 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c | |||
| @@ -63,7 +63,10 @@ | |||
| 63 | #include "bcache.h" | 63 | #include "bcache.h" |
| 64 | #include "btree.h" | 64 | #include "btree.h" |
| 65 | 65 | ||
| 66 | #include <linux/freezer.h> | ||
| 67 | #include <linux/kthread.h> | ||
| 66 | #include <linux/random.h> | 68 | #include <linux/random.h> |
| 69 | #include <trace/events/bcache.h> | ||
| 67 | 70 | ||
| 68 | #define MAX_IN_FLIGHT_DISCARDS 8U | 71 | #define MAX_IN_FLIGHT_DISCARDS 8U |
| 69 | 72 | ||
| @@ -151,7 +154,7 @@ static void discard_finish(struct work_struct *w) | |||
| 151 | mutex_unlock(&ca->set->bucket_lock); | 154 | mutex_unlock(&ca->set->bucket_lock); |
| 152 | 155 | ||
| 153 | closure_wake_up(&ca->set->bucket_wait); | 156 | closure_wake_up(&ca->set->bucket_wait); |
| 154 | wake_up(&ca->set->alloc_wait); | 157 | wake_up_process(ca->alloc_thread); |
| 155 | 158 | ||
| 156 | closure_put(&ca->set->cl); | 159 | closure_put(&ca->set->cl); |
| 157 | } | 160 | } |
| @@ -350,38 +353,30 @@ static void invalidate_buckets(struct cache *ca) | |||
| 350 | break; | 353 | break; |
| 351 | } | 354 | } |
| 352 | 355 | ||
| 353 | pr_debug("free %zu/%zu free_inc %zu/%zu unused %zu/%zu", | 356 | trace_bcache_alloc_invalidate(ca); |
| 354 | fifo_used(&ca->free), ca->free.size, | ||
| 355 | fifo_used(&ca->free_inc), ca->free_inc.size, | ||
| 356 | fifo_used(&ca->unused), ca->unused.size); | ||
| 357 | } | 357 | } |
| 358 | 358 | ||
| 359 | #define allocator_wait(ca, cond) \ | 359 | #define allocator_wait(ca, cond) \ |
| 360 | do { \ | 360 | do { \ |
| 361 | DEFINE_WAIT(__wait); \ | ||
| 362 | \ | ||
| 363 | while (1) { \ | 361 | while (1) { \ |
| 364 | prepare_to_wait(&ca->set->alloc_wait, \ | 362 | set_current_state(TASK_INTERRUPTIBLE); \ |
| 365 | &__wait, TASK_INTERRUPTIBLE); \ | ||
| 366 | if (cond) \ | 363 | if (cond) \ |
| 367 | break; \ | 364 | break; \ |
| 368 | \ | 365 | \ |
| 369 | mutex_unlock(&(ca)->set->bucket_lock); \ | 366 | mutex_unlock(&(ca)->set->bucket_lock); \ |
| 370 | if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \ | 367 | if (kthread_should_stop()) \ |
| 371 | finish_wait(&ca->set->alloc_wait, &__wait); \ | 368 | return 0; \ |
| 372 | closure_return(cl); \ | ||
| 373 | } \ | ||
| 374 | \ | 369 | \ |
| 370 | try_to_freeze(); \ | ||
| 375 | schedule(); \ | 371 | schedule(); \ |
| 376 | mutex_lock(&(ca)->set->bucket_lock); \ | 372 | mutex_lock(&(ca)->set->bucket_lock); \ |
| 377 | } \ | 373 | } \ |
| 378 | \ | 374 | __set_current_state(TASK_RUNNING); \ |
| 379 | finish_wait(&ca->set->alloc_wait, &__wait); \ | ||
| 380 | } while (0) | 375 | } while (0) |
| 381 | 376 | ||
| 382 | void bch_allocator_thread(struct closure *cl) | 377 | static int bch_allocator_thread(void *arg) |
| 383 | { | 378 | { |
| 384 | struct cache *ca = container_of(cl, struct cache, alloc); | 379 | struct cache *ca = arg; |
| 385 | 380 | ||
| 386 | mutex_lock(&ca->set->bucket_lock); | 381 | mutex_lock(&ca->set->bucket_lock); |
| 387 | 382 | ||
| @@ -442,7 +437,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl) | |||
| 442 | { | 437 | { |
| 443 | long r = -1; | 438 | long r = -1; |
| 444 | again: | 439 | again: |
| 445 | wake_up(&ca->set->alloc_wait); | 440 | wake_up_process(ca->alloc_thread); |
| 446 | 441 | ||
| 447 | if (fifo_used(&ca->free) > ca->watermark[watermark] && | 442 | if (fifo_used(&ca->free) > ca->watermark[watermark] && |
| 448 | fifo_pop(&ca->free, r)) { | 443 | fifo_pop(&ca->free, r)) { |
| @@ -476,9 +471,7 @@ again: | |||
| 476 | return r; | 471 | return r; |
| 477 | } | 472 | } |
| 478 | 473 | ||
| 479 | pr_debug("alloc failure: blocked %i free %zu free_inc %zu unused %zu", | 474 | trace_bcache_alloc_fail(ca); |
| 480 | atomic_read(&ca->set->prio_blocked), fifo_used(&ca->free), | ||
| 481 | fifo_used(&ca->free_inc), fifo_used(&ca->unused)); | ||
| 482 | 475 | ||
| 483 | if (cl) { | 476 | if (cl) { |
| 484 | closure_wait(&ca->set->bucket_wait, cl); | 477 | closure_wait(&ca->set->bucket_wait, cl); |
| @@ -552,6 +545,17 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, | |||
| 552 | 545 | ||
| 553 | /* Init */ | 546 | /* Init */ |
| 554 | 547 | ||
| 548 | int bch_cache_allocator_start(struct cache *ca) | ||
| 549 | { | ||
| 550 | struct task_struct *k = kthread_run(bch_allocator_thread, | ||
| 551 | ca, "bcache_allocator"); | ||
| 552 | if (IS_ERR(k)) | ||
| 553 | return PTR_ERR(k); | ||
| 554 | |||
| 555 | ca->alloc_thread = k; | ||
| 556 | return 0; | ||
| 557 | } | ||
| 558 | |||
| 555 | void bch_cache_allocator_exit(struct cache *ca) | 559 | void bch_cache_allocator_exit(struct cache *ca) |
| 556 | { | 560 | { |
| 557 | struct discard *d; | 561 | struct discard *d; |
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index d3e15b42a4ab..b39f6f0b45f2 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
| @@ -178,7 +178,6 @@ | |||
| 178 | #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ | 178 | #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ |
| 179 | 179 | ||
| 180 | #include <linux/bio.h> | 180 | #include <linux/bio.h> |
| 181 | #include <linux/blktrace_api.h> | ||
| 182 | #include <linux/kobject.h> | 181 | #include <linux/kobject.h> |
| 183 | #include <linux/list.h> | 182 | #include <linux/list.h> |
| 184 | #include <linux/mutex.h> | 183 | #include <linux/mutex.h> |
| @@ -388,8 +387,6 @@ struct keybuf_key { | |||
| 388 | typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); | 387 | typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); |
| 389 | 388 | ||
| 390 | struct keybuf { | 389 | struct keybuf { |
| 391 | keybuf_pred_fn *key_predicate; | ||
| 392 | |||
| 393 | struct bkey last_scanned; | 390 | struct bkey last_scanned; |
| 394 | spinlock_t lock; | 391 | spinlock_t lock; |
| 395 | 392 | ||
| @@ -437,9 +434,12 @@ struct bcache_device { | |||
| 437 | 434 | ||
| 438 | /* If nonzero, we're detaching/unregistering from cache set */ | 435 | /* If nonzero, we're detaching/unregistering from cache set */ |
| 439 | atomic_t detaching; | 436 | atomic_t detaching; |
| 437 | int flush_done; | ||
| 438 | |||
| 439 | uint64_t nr_stripes; | ||
| 440 | unsigned stripe_size_bits; | ||
| 441 | atomic_t *stripe_sectors_dirty; | ||
| 440 | 442 | ||
| 441 | atomic_long_t sectors_dirty; | ||
| 442 | unsigned long sectors_dirty_gc; | ||
| 443 | unsigned long sectors_dirty_last; | 443 | unsigned long sectors_dirty_last; |
| 444 | long sectors_dirty_derivative; | 444 | long sectors_dirty_derivative; |
| 445 | 445 | ||
| @@ -531,6 +531,7 @@ struct cached_dev { | |||
| 531 | unsigned sequential_merge:1; | 531 | unsigned sequential_merge:1; |
| 532 | unsigned verify:1; | 532 | unsigned verify:1; |
| 533 | 533 | ||
| 534 | unsigned partial_stripes_expensive:1; | ||
| 534 | unsigned writeback_metadata:1; | 535 | unsigned writeback_metadata:1; |
| 535 | unsigned writeback_running:1; | 536 | unsigned writeback_running:1; |
| 536 | unsigned char writeback_percent; | 537 | unsigned char writeback_percent; |
| @@ -565,8 +566,7 @@ struct cache { | |||
| 565 | 566 | ||
| 566 | unsigned watermark[WATERMARK_MAX]; | 567 | unsigned watermark[WATERMARK_MAX]; |
| 567 | 568 | ||
| 568 | struct closure alloc; | 569 | struct task_struct *alloc_thread; |
| 569 | struct workqueue_struct *alloc_workqueue; | ||
| 570 | 570 | ||
| 571 | struct closure prio; | 571 | struct closure prio; |
| 572 | struct prio_set *disk_buckets; | 572 | struct prio_set *disk_buckets; |
| @@ -664,13 +664,9 @@ struct gc_stat { | |||
| 664 | * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; | 664 | * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; |
| 665 | * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. | 665 | * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. |
| 666 | * flushing dirty data). | 666 | * flushing dirty data). |
| 667 | * | ||
| 668 | * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down | ||
| 669 | * the allocation thread. | ||
| 670 | */ | 667 | */ |
| 671 | #define CACHE_SET_UNREGISTERING 0 | 668 | #define CACHE_SET_UNREGISTERING 0 |
| 672 | #define CACHE_SET_STOPPING 1 | 669 | #define CACHE_SET_STOPPING 1 |
| 673 | #define CACHE_SET_STOPPING_2 2 | ||
| 674 | 670 | ||
| 675 | struct cache_set { | 671 | struct cache_set { |
| 676 | struct closure cl; | 672 | struct closure cl; |
| @@ -703,9 +699,6 @@ struct cache_set { | |||
| 703 | /* For the btree cache */ | 699 | /* For the btree cache */ |
| 704 | struct shrinker shrink; | 700 | struct shrinker shrink; |
| 705 | 701 | ||
| 706 | /* For the allocator itself */ | ||
| 707 | wait_queue_head_t alloc_wait; | ||
| 708 | |||
| 709 | /* For the btree cache and anything allocation related */ | 702 | /* For the btree cache and anything allocation related */ |
| 710 | struct mutex bucket_lock; | 703 | struct mutex bucket_lock; |
| 711 | 704 | ||
| @@ -823,10 +816,9 @@ struct cache_set { | |||
| 823 | 816 | ||
| 824 | /* | 817 | /* |
| 825 | * A btree node on disk could have too many bsets for an iterator to fit | 818 | * A btree node on disk could have too many bsets for an iterator to fit |
| 826 | * on the stack - this is a single element mempool for btree_read_work() | 819 | * on the stack - have to dynamically allocate them |
| 827 | */ | 820 | */ |
| 828 | struct mutex fill_lock; | 821 | mempool_t *fill_iter; |
| 829 | struct btree_iter *fill_iter; | ||
| 830 | 822 | ||
| 831 | /* | 823 | /* |
| 832 | * btree_sort() is a merge sort and requires temporary space - single | 824 | * btree_sort() is a merge sort and requires temporary space - single |
| @@ -834,6 +826,7 @@ struct cache_set { | |||
| 834 | */ | 826 | */ |
| 835 | struct mutex sort_lock; | 827 | struct mutex sort_lock; |
| 836 | struct bset *sort; | 828 | struct bset *sort; |
| 829 | unsigned sort_crit_factor; | ||
| 837 | 830 | ||
| 838 | /* List of buckets we're currently writing data to */ | 831 | /* List of buckets we're currently writing data to */ |
| 839 | struct list_head data_buckets; | 832 | struct list_head data_buckets; |
| @@ -906,8 +899,6 @@ static inline unsigned local_clock_us(void) | |||
| 906 | return local_clock() >> 10; | 899 | return local_clock() >> 10; |
| 907 | } | 900 | } |
| 908 | 901 | ||
| 909 | #define MAX_BSETS 4U | ||
| 910 | |||
| 911 | #define BTREE_PRIO USHRT_MAX | 902 | #define BTREE_PRIO USHRT_MAX |
| 912 | #define INITIAL_PRIO 32768 | 903 | #define INITIAL_PRIO 32768 |
| 913 | 904 | ||
| @@ -1112,23 +1103,6 @@ static inline void __bkey_put(struct cache_set *c, struct bkey *k) | |||
| 1112 | atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); | 1103 | atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); |
| 1113 | } | 1104 | } |
| 1114 | 1105 | ||
| 1115 | /* Blktrace macros */ | ||
| 1116 | |||
| 1117 | #define blktrace_msg(c, fmt, ...) \ | ||
| 1118 | do { \ | ||
| 1119 | struct request_queue *q = bdev_get_queue(c->bdev); \ | ||
| 1120 | if (q) \ | ||
| 1121 | blk_add_trace_msg(q, fmt, ##__VA_ARGS__); \ | ||
| 1122 | } while (0) | ||
| 1123 | |||
| 1124 | #define blktrace_msg_all(s, fmt, ...) \ | ||
| 1125 | do { \ | ||
| 1126 | struct cache *_c; \ | ||
| 1127 | unsigned i; \ | ||
| 1128 | for_each_cache(_c, (s), i) \ | ||
| 1129 | blktrace_msg(_c, fmt, ##__VA_ARGS__); \ | ||
| 1130 | } while (0) | ||
| 1131 | |||
| 1132 | static inline void cached_dev_put(struct cached_dev *dc) | 1106 | static inline void cached_dev_put(struct cached_dev *dc) |
| 1133 | { | 1107 | { |
| 1134 | if (atomic_dec_and_test(&dc->count)) | 1108 | if (atomic_dec_and_test(&dc->count)) |
| @@ -1173,10 +1147,16 @@ static inline uint8_t bucket_disk_gen(struct bucket *b) | |||
| 1173 | static struct kobj_attribute ksysfs_##n = \ | 1147 | static struct kobj_attribute ksysfs_##n = \ |
| 1174 | __ATTR(n, S_IWUSR|S_IRUSR, show, store) | 1148 | __ATTR(n, S_IWUSR|S_IRUSR, show, store) |
| 1175 | 1149 | ||
| 1176 | /* Forward declarations */ | 1150 | static inline void wake_up_allocators(struct cache_set *c) |
| 1151 | { | ||
| 1152 | struct cache *ca; | ||
| 1153 | unsigned i; | ||
| 1154 | |||
| 1155 | for_each_cache(ca, c, i) | ||
| 1156 | wake_up_process(ca->alloc_thread); | ||
| 1157 | } | ||
| 1177 | 1158 | ||
| 1178 | void bch_writeback_queue(struct cached_dev *); | 1159 | /* Forward declarations */ |
| 1179 | void bch_writeback_add(struct cached_dev *, unsigned); | ||
| 1180 | 1160 | ||
| 1181 | void bch_count_io_errors(struct cache *, int, const char *); | 1161 | void bch_count_io_errors(struct cache *, int, const char *); |
| 1182 | void bch_bbio_count_io_errors(struct cache_set *, struct bio *, | 1162 | void bch_bbio_count_io_errors(struct cache_set *, struct bio *, |
| @@ -1193,7 +1173,6 @@ void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); | |||
| 1193 | uint8_t bch_inc_gen(struct cache *, struct bucket *); | 1173 | uint8_t bch_inc_gen(struct cache *, struct bucket *); |
| 1194 | void bch_rescale_priorities(struct cache_set *, int); | 1174 | void bch_rescale_priorities(struct cache_set *, int); |
| 1195 | bool bch_bucket_add_unused(struct cache *, struct bucket *); | 1175 | bool bch_bucket_add_unused(struct cache *, struct bucket *); |
| 1196 | void bch_allocator_thread(struct closure *); | ||
| 1197 | 1176 | ||
| 1198 | long bch_bucket_alloc(struct cache *, unsigned, struct closure *); | 1177 | long bch_bucket_alloc(struct cache *, unsigned, struct closure *); |
| 1199 | void bch_bucket_free(struct cache_set *, struct bkey *); | 1178 | void bch_bucket_free(struct cache_set *, struct bkey *); |
| @@ -1241,9 +1220,9 @@ void bch_cache_set_stop(struct cache_set *); | |||
| 1241 | struct cache_set *bch_cache_set_alloc(struct cache_sb *); | 1220 | struct cache_set *bch_cache_set_alloc(struct cache_sb *); |
| 1242 | void bch_btree_cache_free(struct cache_set *); | 1221 | void bch_btree_cache_free(struct cache_set *); |
| 1243 | int bch_btree_cache_alloc(struct cache_set *); | 1222 | int bch_btree_cache_alloc(struct cache_set *); |
| 1244 | void bch_cached_dev_writeback_init(struct cached_dev *); | ||
| 1245 | void bch_moving_init_cache_set(struct cache_set *); | 1223 | void bch_moving_init_cache_set(struct cache_set *); |
| 1246 | 1224 | ||
| 1225 | int bch_cache_allocator_start(struct cache *ca); | ||
| 1247 | void bch_cache_allocator_exit(struct cache *ca); | 1226 | void bch_cache_allocator_exit(struct cache *ca); |
| 1248 | int bch_cache_allocator_init(struct cache *ca); | 1227 | int bch_cache_allocator_init(struct cache *ca); |
| 1249 | 1228 | ||
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 1d27d3af3251..8010eed06a51 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c | |||
| @@ -78,6 +78,7 @@ struct bkey *bch_keylist_pop(struct keylist *l) | |||
| 78 | bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k) | 78 | bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k) |
| 79 | { | 79 | { |
| 80 | unsigned i; | 80 | unsigned i; |
| 81 | char buf[80]; | ||
| 81 | 82 | ||
| 82 | if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))) | 83 | if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))) |
| 83 | goto bad; | 84 | goto bad; |
| @@ -102,7 +103,8 @@ bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k) | |||
| 102 | 103 | ||
| 103 | return false; | 104 | return false; |
| 104 | bad: | 105 | bad: |
| 105 | cache_bug(c, "spotted bad key %s: %s", pkey(k), bch_ptr_status(c, k)); | 106 | bch_bkey_to_text(buf, sizeof(buf), k); |
| 107 | cache_bug(c, "spotted bad key %s: %s", buf, bch_ptr_status(c, k)); | ||
| 106 | return true; | 108 | return true; |
| 107 | } | 109 | } |
| 108 | 110 | ||
| @@ -162,10 +164,16 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k) | |||
| 162 | #ifdef CONFIG_BCACHE_EDEBUG | 164 | #ifdef CONFIG_BCACHE_EDEBUG |
| 163 | bug: | 165 | bug: |
| 164 | mutex_unlock(&b->c->bucket_lock); | 166 | mutex_unlock(&b->c->bucket_lock); |
| 165 | btree_bug(b, | 167 | |
| 168 | { | ||
| 169 | char buf[80]; | ||
| 170 | |||
| 171 | bch_bkey_to_text(buf, sizeof(buf), k); | ||
| 172 | btree_bug(b, | ||
| 166 | "inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", | 173 | "inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", |
| 167 | pkey(k), PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), | 174 | buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), |
| 168 | g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); | 175 | g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); |
| 176 | } | ||
| 169 | return true; | 177 | return true; |
| 170 | #endif | 178 | #endif |
| 171 | } | 179 | } |
| @@ -1084,33 +1092,39 @@ void bch_btree_sort_into(struct btree *b, struct btree *new) | |||
| 1084 | new->sets->size = 0; | 1092 | new->sets->size = 0; |
| 1085 | } | 1093 | } |
| 1086 | 1094 | ||
| 1095 | #define SORT_CRIT (4096 / sizeof(uint64_t)) | ||
| 1096 | |||
| 1087 | void bch_btree_sort_lazy(struct btree *b) | 1097 | void bch_btree_sort_lazy(struct btree *b) |
| 1088 | { | 1098 | { |
| 1089 | if (b->nsets) { | 1099 | unsigned crit = SORT_CRIT; |
| 1090 | unsigned i, j, keys = 0, total; | 1100 | int i; |
| 1091 | 1101 | ||
| 1092 | for (i = 0; i <= b->nsets; i++) | 1102 | /* Don't sort if nothing to do */ |
| 1093 | keys += b->sets[i].data->keys; | 1103 | if (!b->nsets) |
| 1094 | 1104 | goto out; | |
| 1095 | total = keys; | ||
| 1096 | 1105 | ||
| 1097 | for (j = 0; j < b->nsets; j++) { | 1106 | /* If not a leaf node, always sort */ |
| 1098 | if (keys * 2 < total || | 1107 | if (b->level) { |
| 1099 | keys < 1000) { | 1108 | bch_btree_sort(b); |
| 1100 | bch_btree_sort_partial(b, j); | 1109 | return; |
| 1101 | return; | 1110 | } |
| 1102 | } | ||
| 1103 | 1111 | ||
| 1104 | keys -= b->sets[j].data->keys; | 1112 | for (i = b->nsets - 1; i >= 0; --i) { |
| 1105 | } | 1113 | crit *= b->c->sort_crit_factor; |
| 1106 | 1114 | ||
| 1107 | /* Must sort if b->nsets == 3 or we'll overflow */ | 1115 | if (b->sets[i].data->keys < crit) { |
| 1108 | if (b->nsets >= (MAX_BSETS - 1) - b->level) { | 1116 | bch_btree_sort_partial(b, i); |
| 1109 | bch_btree_sort(b); | ||
| 1110 | return; | 1117 | return; |
| 1111 | } | 1118 | } |
| 1112 | } | 1119 | } |
| 1113 | 1120 | ||
| 1121 | /* Sort if we'd overflow */ | ||
| 1122 | if (b->nsets + 1 == MAX_BSETS) { | ||
| 1123 | bch_btree_sort(b); | ||
| 1124 | return; | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | out: | ||
| 1114 | bset_build_written_tree(b); | 1128 | bset_build_written_tree(b); |
| 1115 | } | 1129 | } |
| 1116 | 1130 | ||
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index 57a9cff41546..ae115a253d73 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h | |||
| @@ -1,6 +1,8 @@ | |||
| 1 | #ifndef _BCACHE_BSET_H | 1 | #ifndef _BCACHE_BSET_H |
| 2 | #define _BCACHE_BSET_H | 2 | #define _BCACHE_BSET_H |
| 3 | 3 | ||
| 4 | #include <linux/slab.h> | ||
| 5 | |||
| 4 | /* | 6 | /* |
| 5 | * BKEYS: | 7 | * BKEYS: |
| 6 | * | 8 | * |
| @@ -142,6 +144,8 @@ | |||
| 142 | 144 | ||
| 143 | /* Btree key comparison/iteration */ | 145 | /* Btree key comparison/iteration */ |
| 144 | 146 | ||
| 147 | #define MAX_BSETS 4U | ||
| 148 | |||
| 145 | struct btree_iter { | 149 | struct btree_iter { |
| 146 | size_t size, used; | 150 | size_t size, used; |
| 147 | struct btree_iter_set { | 151 | struct btree_iter_set { |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 7a5658f04e62..ee372884c405 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include "btree.h" | 24 | #include "btree.h" |
| 25 | #include "debug.h" | 25 | #include "debug.h" |
| 26 | #include "request.h" | 26 | #include "request.h" |
| 27 | #include "writeback.h" | ||
| 27 | 28 | ||
| 28 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
| 29 | #include <linux/bitops.h> | 30 | #include <linux/bitops.h> |
| @@ -134,44 +135,17 @@ static uint64_t btree_csum_set(struct btree *b, struct bset *i) | |||
| 134 | return crc ^ 0xffffffffffffffffULL; | 135 | return crc ^ 0xffffffffffffffffULL; |
| 135 | } | 136 | } |
| 136 | 137 | ||
| 137 | static void btree_bio_endio(struct bio *bio, int error) | 138 | static void bch_btree_node_read_done(struct btree *b) |
| 138 | { | 139 | { |
| 139 | struct closure *cl = bio->bi_private; | ||
| 140 | struct btree *b = container_of(cl, struct btree, io.cl); | ||
| 141 | |||
| 142 | if (error) | ||
| 143 | set_btree_node_io_error(b); | ||
| 144 | |||
| 145 | bch_bbio_count_io_errors(b->c, bio, error, (bio->bi_rw & WRITE) | ||
| 146 | ? "writing btree" : "reading btree"); | ||
| 147 | closure_put(cl); | ||
| 148 | } | ||
| 149 | |||
| 150 | static void btree_bio_init(struct btree *b) | ||
| 151 | { | ||
| 152 | BUG_ON(b->bio); | ||
| 153 | b->bio = bch_bbio_alloc(b->c); | ||
| 154 | |||
| 155 | b->bio->bi_end_io = btree_bio_endio; | ||
| 156 | b->bio->bi_private = &b->io.cl; | ||
| 157 | } | ||
| 158 | |||
| 159 | void bch_btree_read_done(struct closure *cl) | ||
| 160 | { | ||
| 161 | struct btree *b = container_of(cl, struct btree, io.cl); | ||
| 162 | struct bset *i = b->sets[0].data; | ||
| 163 | struct btree_iter *iter = b->c->fill_iter; | ||
| 164 | const char *err = "bad btree header"; | 140 | const char *err = "bad btree header"; |
| 165 | BUG_ON(b->nsets || b->written); | 141 | struct bset *i = b->sets[0].data; |
| 166 | 142 | struct btree_iter *iter; | |
| 167 | bch_bbio_free(b->bio, b->c); | ||
| 168 | b->bio = NULL; | ||
| 169 | 143 | ||
| 170 | mutex_lock(&b->c->fill_lock); | 144 | iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT); |
| 145 | iter->size = b->c->sb.bucket_size / b->c->sb.block_size; | ||
| 171 | iter->used = 0; | 146 | iter->used = 0; |
| 172 | 147 | ||
| 173 | if (btree_node_io_error(b) || | 148 | if (!i->seq) |
| 174 | !i->seq) | ||
| 175 | goto err; | 149 | goto err; |
| 176 | 150 | ||
| 177 | for (; | 151 | for (; |
| @@ -228,17 +202,8 @@ void bch_btree_read_done(struct closure *cl) | |||
| 228 | if (b->written < btree_blocks(b)) | 202 | if (b->written < btree_blocks(b)) |
| 229 | bch_bset_init_next(b); | 203 | bch_bset_init_next(b); |
| 230 | out: | 204 | out: |
| 231 | 205 | mempool_free(iter, b->c->fill_iter); | |
| 232 | mutex_unlock(&b->c->fill_lock); | 206 | return; |
| 233 | |||
| 234 | spin_lock(&b->c->btree_read_time_lock); | ||
| 235 | bch_time_stats_update(&b->c->btree_read_time, b->io_start_time); | ||
| 236 | spin_unlock(&b->c->btree_read_time_lock); | ||
| 237 | |||
| 238 | smp_wmb(); /* read_done is our write lock */ | ||
| 239 | set_btree_node_read_done(b); | ||
| 240 | |||
| 241 | closure_return(cl); | ||
| 242 | err: | 207 | err: |
| 243 | set_btree_node_io_error(b); | 208 | set_btree_node_io_error(b); |
| 244 | bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys", | 209 | bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys", |
| @@ -247,48 +212,69 @@ err: | |||
| 247 | goto out; | 212 | goto out; |
| 248 | } | 213 | } |
| 249 | 214 | ||
| 250 | void bch_btree_read(struct btree *b) | 215 | static void btree_node_read_endio(struct bio *bio, int error) |
| 216 | { | ||
| 217 | struct closure *cl = bio->bi_private; | ||
| 218 | closure_put(cl); | ||
| 219 | } | ||
| 220 | |||
| 221 | void bch_btree_node_read(struct btree *b) | ||
| 251 | { | 222 | { |
| 252 | BUG_ON(b->nsets || b->written); | 223 | uint64_t start_time = local_clock(); |
| 224 | struct closure cl; | ||
| 225 | struct bio *bio; | ||
| 226 | |||
| 227 | trace_bcache_btree_read(b); | ||
| 228 | |||
| 229 | closure_init_stack(&cl); | ||
| 230 | |||
| 231 | bio = bch_bbio_alloc(b->c); | ||
| 232 | bio->bi_rw = REQ_META|READ_SYNC; | ||
| 233 | bio->bi_size = KEY_SIZE(&b->key) << 9; | ||
| 234 | bio->bi_end_io = btree_node_read_endio; | ||
| 235 | bio->bi_private = &cl; | ||
| 236 | |||
| 237 | bch_bio_map(bio, b->sets[0].data); | ||
| 238 | |||
| 239 | bch_submit_bbio(bio, b->c, &b->key, 0); | ||
| 240 | closure_sync(&cl); | ||
| 253 | 241 | ||
| 254 | if (!closure_trylock(&b->io.cl, &b->c->cl)) | 242 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) |
| 255 | BUG(); | 243 | set_btree_node_io_error(b); |
| 256 | 244 | ||
| 257 | b->io_start_time = local_clock(); | 245 | bch_bbio_free(bio, b->c); |
| 258 | 246 | ||
| 259 | btree_bio_init(b); | 247 | if (btree_node_io_error(b)) |
| 260 | b->bio->bi_rw = REQ_META|READ_SYNC; | 248 | goto err; |
| 261 | b->bio->bi_size = KEY_SIZE(&b->key) << 9; | ||
| 262 | 249 | ||
| 263 | bch_bio_map(b->bio, b->sets[0].data); | 250 | bch_btree_node_read_done(b); |
| 264 | 251 | ||
| 265 | pr_debug("%s", pbtree(b)); | 252 | spin_lock(&b->c->btree_read_time_lock); |
| 266 | trace_bcache_btree_read(b->bio); | 253 | bch_time_stats_update(&b->c->btree_read_time, start_time); |
| 267 | bch_submit_bbio(b->bio, b->c, &b->key, 0); | 254 | spin_unlock(&b->c->btree_read_time_lock); |
| 268 | 255 | ||
| 269 | continue_at(&b->io.cl, bch_btree_read_done, system_wq); | 256 | return; |
| 257 | err: | ||
| 258 | bch_cache_set_error(b->c, "io error reading bucket %lu", | ||
| 259 | PTR_BUCKET_NR(b->c, &b->key, 0)); | ||
| 270 | } | 260 | } |
| 271 | 261 | ||
| 272 | static void btree_complete_write(struct btree *b, struct btree_write *w) | 262 | static void btree_complete_write(struct btree *b, struct btree_write *w) |
| 273 | { | 263 | { |
| 274 | if (w->prio_blocked && | 264 | if (w->prio_blocked && |
| 275 | !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) | 265 | !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) |
| 276 | wake_up(&b->c->alloc_wait); | 266 | wake_up_allocators(b->c); |
| 277 | 267 | ||
| 278 | if (w->journal) { | 268 | if (w->journal) { |
| 279 | atomic_dec_bug(w->journal); | 269 | atomic_dec_bug(w->journal); |
| 280 | __closure_wake_up(&b->c->journal.wait); | 270 | __closure_wake_up(&b->c->journal.wait); |
| 281 | } | 271 | } |
| 282 | 272 | ||
| 283 | if (w->owner) | ||
| 284 | closure_put(w->owner); | ||
| 285 | |||
| 286 | w->prio_blocked = 0; | 273 | w->prio_blocked = 0; |
| 287 | w->journal = NULL; | 274 | w->journal = NULL; |
| 288 | w->owner = NULL; | ||
| 289 | } | 275 | } |
| 290 | 276 | ||
| 291 | static void __btree_write_done(struct closure *cl) | 277 | static void __btree_node_write_done(struct closure *cl) |
| 292 | { | 278 | { |
| 293 | struct btree *b = container_of(cl, struct btree, io.cl); | 279 | struct btree *b = container_of(cl, struct btree, io.cl); |
| 294 | struct btree_write *w = btree_prev_write(b); | 280 | struct btree_write *w = btree_prev_write(b); |
| @@ -304,7 +290,7 @@ static void __btree_write_done(struct closure *cl) | |||
| 304 | closure_return(cl); | 290 | closure_return(cl); |
| 305 | } | 291 | } |
| 306 | 292 | ||
| 307 | static void btree_write_done(struct closure *cl) | 293 | static void btree_node_write_done(struct closure *cl) |
| 308 | { | 294 | { |
| 309 | struct btree *b = container_of(cl, struct btree, io.cl); | 295 | struct btree *b = container_of(cl, struct btree, io.cl); |
| 310 | struct bio_vec *bv; | 296 | struct bio_vec *bv; |
| @@ -313,10 +299,22 @@ static void btree_write_done(struct closure *cl) | |||
| 313 | __bio_for_each_segment(bv, b->bio, n, 0) | 299 | __bio_for_each_segment(bv, b->bio, n, 0) |
| 314 | __free_page(bv->bv_page); | 300 | __free_page(bv->bv_page); |
| 315 | 301 | ||
| 316 | __btree_write_done(cl); | 302 | __btree_node_write_done(cl); |
| 317 | } | 303 | } |
| 318 | 304 | ||
| 319 | static void do_btree_write(struct btree *b) | 305 | static void btree_node_write_endio(struct bio *bio, int error) |
| 306 | { | ||
| 307 | struct closure *cl = bio->bi_private; | ||
| 308 | struct btree *b = container_of(cl, struct btree, io.cl); | ||
| 309 | |||
| 310 | if (error) | ||
| 311 | set_btree_node_io_error(b); | ||
| 312 | |||
| 313 | bch_bbio_count_io_errors(b->c, bio, error, "writing btree"); | ||
| 314 | closure_put(cl); | ||
| 315 | } | ||
| 316 | |||
| 317 | static void do_btree_node_write(struct btree *b) | ||
| 320 | { | 318 | { |
| 321 | struct closure *cl = &b->io.cl; | 319 | struct closure *cl = &b->io.cl; |
| 322 | struct bset *i = b->sets[b->nsets].data; | 320 | struct bset *i = b->sets[b->nsets].data; |
| @@ -325,15 +323,34 @@ static void do_btree_write(struct btree *b) | |||
| 325 | i->version = BCACHE_BSET_VERSION; | 323 | i->version = BCACHE_BSET_VERSION; |
| 326 | i->csum = btree_csum_set(b, i); | 324 | i->csum = btree_csum_set(b, i); |
| 327 | 325 | ||
| 328 | btree_bio_init(b); | 326 | BUG_ON(b->bio); |
| 329 | b->bio->bi_rw = REQ_META|WRITE_SYNC; | 327 | b->bio = bch_bbio_alloc(b->c); |
| 330 | b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); | 328 | |
| 329 | b->bio->bi_end_io = btree_node_write_endio; | ||
| 330 | b->bio->bi_private = &b->io.cl; | ||
| 331 | b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; | ||
| 332 | b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); | ||
| 331 | bch_bio_map(b->bio, i); | 333 | bch_bio_map(b->bio, i); |
| 332 | 334 | ||
| 335 | /* | ||
| 336 | * If we're appending to a leaf node, we don't technically need FUA - | ||
| 337 | * this write just needs to be persisted before the next journal write, | ||
| 338 | * which will be marked FLUSH|FUA. | ||
| 339 | * | ||
| 340 | * Similarly if we're writing a new btree root - the pointer is going to | ||
| 341 | * be in the next journal entry. | ||
| 342 | * | ||
| 343 | * But if we're writing a new btree node (that isn't a root) or | ||
| 344 | * appending to a non leaf btree node, we need either FUA or a flush | ||
| 345 | * when we write the parent with the new pointer. FUA is cheaper than a | ||
| 346 | * flush, and writes appending to leaf nodes aren't blocking anything so | ||
| 347 | * just make all btree node writes FUA to keep things sane. | ||
| 348 | */ | ||
| 349 | |||
| 333 | bkey_copy(&k.key, &b->key); | 350 | bkey_copy(&k.key, &b->key); |
| 334 | SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i)); | 351 | SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i)); |
| 335 | 352 | ||
| 336 | if (!bch_bio_alloc_pages(b->bio, GFP_NOIO)) { | 353 | if (!bio_alloc_pages(b->bio, GFP_NOIO)) { |
| 337 | int j; | 354 | int j; |
| 338 | struct bio_vec *bv; | 355 | struct bio_vec *bv; |
| 339 | void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); | 356 | void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); |
| @@ -342,40 +359,41 @@ static void do_btree_write(struct btree *b) | |||
| 342 | memcpy(page_address(bv->bv_page), | 359 | memcpy(page_address(bv->bv_page), |
| 343 | base + j * PAGE_SIZE, PAGE_SIZE); | 360 | base + j * PAGE_SIZE, PAGE_SIZE); |
| 344 | 361 | ||
| 345 | trace_bcache_btree_write(b->bio); | ||
| 346 | bch_submit_bbio(b->bio, b->c, &k.key, 0); | 362 | bch_submit_bbio(b->bio, b->c, &k.key, 0); |
| 347 | 363 | ||
| 348 | continue_at(cl, btree_write_done, NULL); | 364 | continue_at(cl, btree_node_write_done, NULL); |
| 349 | } else { | 365 | } else { |
| 350 | b->bio->bi_vcnt = 0; | 366 | b->bio->bi_vcnt = 0; |
| 351 | bch_bio_map(b->bio, i); | 367 | bch_bio_map(b->bio, i); |
| 352 | 368 | ||
| 353 | trace_bcache_btree_write(b->bio); | ||
| 354 | bch_submit_bbio(b->bio, b->c, &k.key, 0); | 369 | bch_submit_bbio(b->bio, b->c, &k.key, 0); |
| 355 | 370 | ||
| 356 | closure_sync(cl); | 371 | closure_sync(cl); |
| 357 | __btree_write_done(cl); | 372 | __btree_node_write_done(cl); |
| 358 | } | 373 | } |
| 359 | } | 374 | } |
| 360 | 375 | ||
| 361 | static void __btree_write(struct btree *b) | 376 | void bch_btree_node_write(struct btree *b, struct closure *parent) |
| 362 | { | 377 | { |
| 363 | struct bset *i = b->sets[b->nsets].data; | 378 | struct bset *i = b->sets[b->nsets].data; |
| 364 | 379 | ||
| 380 | trace_bcache_btree_write(b); | ||
| 381 | |||
| 365 | BUG_ON(current->bio_list); | 382 | BUG_ON(current->bio_list); |
| 383 | BUG_ON(b->written >= btree_blocks(b)); | ||
| 384 | BUG_ON(b->written && !i->keys); | ||
| 385 | BUG_ON(b->sets->data->seq != i->seq); | ||
| 386 | bch_check_key_order(b, i); | ||
| 366 | 387 | ||
| 367 | closure_lock(&b->io, &b->c->cl); | ||
| 368 | cancel_delayed_work(&b->work); | 388 | cancel_delayed_work(&b->work); |
| 369 | 389 | ||
| 390 | /* If caller isn't waiting for write, parent refcount is cache set */ | ||
| 391 | closure_lock(&b->io, parent ?: &b->c->cl); | ||
| 392 | |||
| 370 | clear_bit(BTREE_NODE_dirty, &b->flags); | 393 | clear_bit(BTREE_NODE_dirty, &b->flags); |
| 371 | change_bit(BTREE_NODE_write_idx, &b->flags); | 394 | change_bit(BTREE_NODE_write_idx, &b->flags); |
| 372 | 395 | ||
| 373 | bch_check_key_order(b, i); | 396 | do_btree_node_write(b); |
| 374 | BUG_ON(b->written && !i->keys); | ||
| 375 | |||
| 376 | do_btree_write(b); | ||
| 377 | |||
| 378 | pr_debug("%s block %i keys %i", pbtree(b), b->written, i->keys); | ||
| 379 | 397 | ||
| 380 | b->written += set_blocks(i, b->c); | 398 | b->written += set_blocks(i, b->c); |
| 381 | atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size, | 399 | atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size, |
| @@ -387,37 +405,31 @@ static void __btree_write(struct btree *b) | |||
| 387 | bch_bset_init_next(b); | 405 | bch_bset_init_next(b); |
| 388 | } | 406 | } |
| 389 | 407 | ||
| 390 | static void btree_write_work(struct work_struct *w) | 408 | static void btree_node_write_work(struct work_struct *w) |
| 391 | { | 409 | { |
| 392 | struct btree *b = container_of(to_delayed_work(w), struct btree, work); | 410 | struct btree *b = container_of(to_delayed_work(w), struct btree, work); |
| 393 | 411 | ||
| 394 | down_write(&b->lock); | 412 | rw_lock(true, b, b->level); |
| 395 | 413 | ||
| 396 | if (btree_node_dirty(b)) | 414 | if (btree_node_dirty(b)) |
| 397 | __btree_write(b); | 415 | bch_btree_node_write(b, NULL); |
| 398 | up_write(&b->lock); | 416 | rw_unlock(true, b); |
| 399 | } | 417 | } |
| 400 | 418 | ||
| 401 | void bch_btree_write(struct btree *b, bool now, struct btree_op *op) | 419 | static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op) |
| 402 | { | 420 | { |
| 403 | struct bset *i = b->sets[b->nsets].data; | 421 | struct bset *i = b->sets[b->nsets].data; |
| 404 | struct btree_write *w = btree_current_write(b); | 422 | struct btree_write *w = btree_current_write(b); |
| 405 | 423 | ||
| 406 | BUG_ON(b->written && | 424 | BUG_ON(!b->written); |
| 407 | (b->written >= btree_blocks(b) || | 425 | BUG_ON(!i->keys); |
| 408 | i->seq != b->sets[0].data->seq || | ||
| 409 | !i->keys)); | ||
| 410 | 426 | ||
| 411 | if (!btree_node_dirty(b)) { | 427 | if (!btree_node_dirty(b)) |
| 412 | set_btree_node_dirty(b); | 428 | queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); |
| 413 | queue_delayed_work(btree_io_wq, &b->work, | ||
| 414 | msecs_to_jiffies(30000)); | ||
| 415 | } | ||
| 416 | 429 | ||
| 417 | w->prio_blocked += b->prio_blocked; | 430 | set_btree_node_dirty(b); |
| 418 | b->prio_blocked = 0; | ||
| 419 | 431 | ||
| 420 | if (op && op->journal && !b->level) { | 432 | if (op && op->journal) { |
| 421 | if (w->journal && | 433 | if (w->journal && |
| 422 | journal_pin_cmp(b->c, w, op)) { | 434 | journal_pin_cmp(b->c, w, op)) { |
| 423 | atomic_dec_bug(w->journal); | 435 | atomic_dec_bug(w->journal); |
| @@ -430,23 +442,10 @@ void bch_btree_write(struct btree *b, bool now, struct btree_op *op) | |||
| 430 | } | 442 | } |
| 431 | } | 443 | } |
| 432 | 444 | ||
| 433 | if (current->bio_list) | ||
| 434 | return; | ||
| 435 | |||
| 436 | /* Force write if set is too big */ | 445 | /* Force write if set is too big */ |
| 437 | if (now || | 446 | if (set_bytes(i) > PAGE_SIZE - 48 && |
| 438 | b->level || | 447 | !current->bio_list) |
| 439 | set_bytes(i) > PAGE_SIZE - 48) { | 448 | bch_btree_node_write(b, NULL); |
| 440 | if (op && now) { | ||
| 441 | /* Must wait on multiple writes */ | ||
| 442 | BUG_ON(w->owner); | ||
| 443 | w->owner = &op->cl; | ||
| 444 | closure_get(&op->cl); | ||
| 445 | } | ||
| 446 | |||
| 447 | __btree_write(b); | ||
| 448 | } | ||
| 449 | BUG_ON(!b->written); | ||
| 450 | } | 449 | } |
| 451 | 450 | ||
| 452 | /* | 451 | /* |
| @@ -559,7 +558,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, | |||
| 559 | init_rwsem(&b->lock); | 558 | init_rwsem(&b->lock); |
| 560 | lockdep_set_novalidate_class(&b->lock); | 559 | lockdep_set_novalidate_class(&b->lock); |
| 561 | INIT_LIST_HEAD(&b->list); | 560 | INIT_LIST_HEAD(&b->list); |
| 562 | INIT_DELAYED_WORK(&b->work, btree_write_work); | 561 | INIT_DELAYED_WORK(&b->work, btree_node_write_work); |
| 563 | b->c = c; | 562 | b->c = c; |
| 564 | closure_init_unlocked(&b->io); | 563 | closure_init_unlocked(&b->io); |
| 565 | 564 | ||
| @@ -582,7 +581,7 @@ static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order) | |||
| 582 | BUG_ON(btree_node_dirty(b) && !b->sets[0].data); | 581 | BUG_ON(btree_node_dirty(b) && !b->sets[0].data); |
| 583 | 582 | ||
| 584 | if (cl && btree_node_dirty(b)) | 583 | if (cl && btree_node_dirty(b)) |
| 585 | bch_btree_write(b, true, NULL); | 584 | bch_btree_node_write(b, NULL); |
| 586 | 585 | ||
| 587 | if (cl) | 586 | if (cl) |
| 588 | closure_wait_event_async(&b->io.wait, cl, | 587 | closure_wait_event_async(&b->io.wait, cl, |
| @@ -623,6 +622,13 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc) | |||
| 623 | else if (!mutex_trylock(&c->bucket_lock)) | 622 | else if (!mutex_trylock(&c->bucket_lock)) |
| 624 | return -1; | 623 | return -1; |
| 625 | 624 | ||
| 625 | /* | ||
| 626 | * It's _really_ critical that we don't free too many btree nodes - we | ||
| 627 | * have to always leave ourselves a reserve. The reserve is how we | ||
| 628 | * guarantee that allocating memory for a new btree node can always | ||
| 629 | * succeed, so that inserting keys into the btree can always succeed and | ||
| 630 | * IO can always make forward progress: | ||
| 631 | */ | ||
| 626 | nr /= c->btree_pages; | 632 | nr /= c->btree_pages; |
| 627 | nr = min_t(unsigned long, nr, mca_can_free(c)); | 633 | nr = min_t(unsigned long, nr, mca_can_free(c)); |
| 628 | 634 | ||
| @@ -766,6 +772,8 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k, | |||
| 766 | int ret = -ENOMEM; | 772 | int ret = -ENOMEM; |
| 767 | struct btree *i; | 773 | struct btree *i; |
| 768 | 774 | ||
| 775 | trace_bcache_btree_cache_cannibalize(c); | ||
| 776 | |||
| 769 | if (!cl) | 777 | if (!cl) |
| 770 | return ERR_PTR(-ENOMEM); | 778 | return ERR_PTR(-ENOMEM); |
| 771 | 779 | ||
| @@ -784,7 +792,6 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k, | |||
| 784 | return ERR_PTR(-EAGAIN); | 792 | return ERR_PTR(-EAGAIN); |
| 785 | } | 793 | } |
| 786 | 794 | ||
| 787 | /* XXX: tracepoint */ | ||
| 788 | c->try_harder = cl; | 795 | c->try_harder = cl; |
| 789 | c->try_harder_start = local_clock(); | 796 | c->try_harder_start = local_clock(); |
| 790 | retry: | 797 | retry: |
| @@ -905,6 +912,9 @@ retry: | |||
| 905 | b = mca_find(c, k); | 912 | b = mca_find(c, k); |
| 906 | 913 | ||
| 907 | if (!b) { | 914 | if (!b) { |
| 915 | if (current->bio_list) | ||
| 916 | return ERR_PTR(-EAGAIN); | ||
| 917 | |||
| 908 | mutex_lock(&c->bucket_lock); | 918 | mutex_lock(&c->bucket_lock); |
| 909 | b = mca_alloc(c, k, level, &op->cl); | 919 | b = mca_alloc(c, k, level, &op->cl); |
| 910 | mutex_unlock(&c->bucket_lock); | 920 | mutex_unlock(&c->bucket_lock); |
| @@ -914,7 +924,7 @@ retry: | |||
| 914 | if (IS_ERR(b)) | 924 | if (IS_ERR(b)) |
| 915 | return b; | 925 | return b; |
| 916 | 926 | ||
| 917 | bch_btree_read(b); | 927 | bch_btree_node_read(b); |
| 918 | 928 | ||
| 919 | if (!write) | 929 | if (!write) |
| 920 | downgrade_write(&b->lock); | 930 | downgrade_write(&b->lock); |
| @@ -937,15 +947,12 @@ retry: | |||
| 937 | for (; i <= b->nsets; i++) | 947 | for (; i <= b->nsets; i++) |
| 938 | prefetch(b->sets[i].data); | 948 | prefetch(b->sets[i].data); |
| 939 | 949 | ||
| 940 | if (!closure_wait_event(&b->io.wait, &op->cl, | 950 | if (btree_node_io_error(b)) { |
| 941 | btree_node_read_done(b))) { | ||
| 942 | rw_unlock(write, b); | ||
| 943 | b = ERR_PTR(-EAGAIN); | ||
| 944 | } else if (btree_node_io_error(b)) { | ||
| 945 | rw_unlock(write, b); | 951 | rw_unlock(write, b); |
| 946 | b = ERR_PTR(-EIO); | 952 | return ERR_PTR(-EIO); |
| 947 | } else | 953 | } |
| 948 | BUG_ON(!b->written); | 954 | |
| 955 | BUG_ON(!b->written); | ||
| 949 | 956 | ||
| 950 | return b; | 957 | return b; |
| 951 | } | 958 | } |
| @@ -959,7 +966,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level) | |||
| 959 | mutex_unlock(&c->bucket_lock); | 966 | mutex_unlock(&c->bucket_lock); |
| 960 | 967 | ||
| 961 | if (!IS_ERR_OR_NULL(b)) { | 968 | if (!IS_ERR_OR_NULL(b)) { |
| 962 | bch_btree_read(b); | 969 | bch_btree_node_read(b); |
| 963 | rw_unlock(true, b); | 970 | rw_unlock(true, b); |
| 964 | } | 971 | } |
| 965 | } | 972 | } |
| @@ -970,24 +977,19 @@ static void btree_node_free(struct btree *b, struct btree_op *op) | |||
| 970 | { | 977 | { |
| 971 | unsigned i; | 978 | unsigned i; |
| 972 | 979 | ||
| 980 | trace_bcache_btree_node_free(b); | ||
| 981 | |||
| 973 | /* | 982 | /* |
| 974 | * The BUG_ON() in btree_node_get() implies that we must have a write | 983 | * The BUG_ON() in btree_node_get() implies that we must have a write |
| 975 | * lock on parent to free or even invalidate a node | 984 | * lock on parent to free or even invalidate a node |
| 976 | */ | 985 | */ |
| 977 | BUG_ON(op->lock <= b->level); | 986 | BUG_ON(op->lock <= b->level); |
| 978 | BUG_ON(b == b->c->root); | 987 | BUG_ON(b == b->c->root); |
| 979 | pr_debug("bucket %s", pbtree(b)); | ||
| 980 | 988 | ||
| 981 | if (btree_node_dirty(b)) | 989 | if (btree_node_dirty(b)) |
| 982 | btree_complete_write(b, btree_current_write(b)); | 990 | btree_complete_write(b, btree_current_write(b)); |
| 983 | clear_bit(BTREE_NODE_dirty, &b->flags); | 991 | clear_bit(BTREE_NODE_dirty, &b->flags); |
| 984 | 992 | ||
| 985 | if (b->prio_blocked && | ||
| 986 | !atomic_sub_return(b->prio_blocked, &b->c->prio_blocked)) | ||
| 987 | wake_up(&b->c->alloc_wait); | ||
| 988 | |||
| 989 | b->prio_blocked = 0; | ||
| 990 | |||
| 991 | cancel_delayed_work(&b->work); | 993 | cancel_delayed_work(&b->work); |
| 992 | 994 | ||
| 993 | mutex_lock(&b->c->bucket_lock); | 995 | mutex_lock(&b->c->bucket_lock); |
| @@ -1028,17 +1030,20 @@ retry: | |||
| 1028 | goto retry; | 1030 | goto retry; |
| 1029 | } | 1031 | } |
| 1030 | 1032 | ||
| 1031 | set_btree_node_read_done(b); | ||
| 1032 | b->accessed = 1; | 1033 | b->accessed = 1; |
| 1033 | bch_bset_init_next(b); | 1034 | bch_bset_init_next(b); |
| 1034 | 1035 | ||
| 1035 | mutex_unlock(&c->bucket_lock); | 1036 | mutex_unlock(&c->bucket_lock); |
| 1037 | |||
| 1038 | trace_bcache_btree_node_alloc(b); | ||
| 1036 | return b; | 1039 | return b; |
| 1037 | err_free: | 1040 | err_free: |
| 1038 | bch_bucket_free(c, &k.key); | 1041 | bch_bucket_free(c, &k.key); |
| 1039 | __bkey_put(c, &k.key); | 1042 | __bkey_put(c, &k.key); |
| 1040 | err: | 1043 | err: |
| 1041 | mutex_unlock(&c->bucket_lock); | 1044 | mutex_unlock(&c->bucket_lock); |
| 1045 | |||
| 1046 | trace_bcache_btree_node_alloc_fail(b); | ||
| 1042 | return b; | 1047 | return b; |
| 1043 | } | 1048 | } |
| 1044 | 1049 | ||
| @@ -1137,11 +1142,8 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys, | |||
| 1137 | gc->nkeys++; | 1142 | gc->nkeys++; |
| 1138 | 1143 | ||
| 1139 | gc->data += KEY_SIZE(k); | 1144 | gc->data += KEY_SIZE(k); |
| 1140 | if (KEY_DIRTY(k)) { | 1145 | if (KEY_DIRTY(k)) |
| 1141 | gc->dirty += KEY_SIZE(k); | 1146 | gc->dirty += KEY_SIZE(k); |
| 1142 | if (d) | ||
| 1143 | d->sectors_dirty_gc += KEY_SIZE(k); | ||
| 1144 | } | ||
| 1145 | } | 1147 | } |
| 1146 | 1148 | ||
| 1147 | for (t = b->sets; t <= &b->sets[b->nsets]; t++) | 1149 | for (t = b->sets; t <= &b->sets[b->nsets]; t++) |
| @@ -1166,14 +1168,11 @@ static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k, | |||
| 1166 | 1168 | ||
| 1167 | if (!IS_ERR_OR_NULL(n)) { | 1169 | if (!IS_ERR_OR_NULL(n)) { |
| 1168 | swap(b, n); | 1170 | swap(b, n); |
| 1171 | __bkey_put(b->c, &b->key); | ||
| 1169 | 1172 | ||
| 1170 | memcpy(k->ptr, b->key.ptr, | 1173 | memcpy(k->ptr, b->key.ptr, |
| 1171 | sizeof(uint64_t) * KEY_PTRS(&b->key)); | 1174 | sizeof(uint64_t) * KEY_PTRS(&b->key)); |
| 1172 | 1175 | ||
| 1173 | __bkey_put(b->c, &b->key); | ||
| 1174 | atomic_inc(&b->c->prio_blocked); | ||
| 1175 | b->prio_blocked++; | ||
| 1176 | |||
| 1177 | btree_node_free(n, op); | 1176 | btree_node_free(n, op); |
| 1178 | up_write(&n->lock); | 1177 | up_write(&n->lock); |
| 1179 | } | 1178 | } |
| @@ -1278,7 +1277,7 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op, | |||
| 1278 | btree_node_free(r->b, op); | 1277 | btree_node_free(r->b, op); |
| 1279 | up_write(&r->b->lock); | 1278 | up_write(&r->b->lock); |
| 1280 | 1279 | ||
| 1281 | pr_debug("coalesced %u nodes", nodes); | 1280 | trace_bcache_btree_gc_coalesce(nodes); |
| 1282 | 1281 | ||
| 1283 | gc->nodes--; | 1282 | gc->nodes--; |
| 1284 | nodes--; | 1283 | nodes--; |
| @@ -1293,14 +1292,9 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, | |||
| 1293 | void write(struct btree *r) | 1292 | void write(struct btree *r) |
| 1294 | { | 1293 | { |
| 1295 | if (!r->written) | 1294 | if (!r->written) |
| 1296 | bch_btree_write(r, true, op); | 1295 | bch_btree_node_write(r, &op->cl); |
| 1297 | else if (btree_node_dirty(r)) { | 1296 | else if (btree_node_dirty(r)) |
| 1298 | BUG_ON(btree_current_write(r)->owner); | 1297 | bch_btree_node_write(r, writes); |
| 1299 | btree_current_write(r)->owner = writes; | ||
| 1300 | closure_get(writes); | ||
| 1301 | |||
| 1302 | bch_btree_write(r, true, NULL); | ||
| 1303 | } | ||
| 1304 | 1298 | ||
| 1305 | up_write(&r->lock); | 1299 | up_write(&r->lock); |
| 1306 | } | 1300 | } |
| @@ -1386,9 +1380,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op, | |||
| 1386 | ret = btree_gc_recurse(b, op, writes, gc); | 1380 | ret = btree_gc_recurse(b, op, writes, gc); |
| 1387 | 1381 | ||
| 1388 | if (!b->written || btree_node_dirty(b)) { | 1382 | if (!b->written || btree_node_dirty(b)) { |
| 1389 | atomic_inc(&b->c->prio_blocked); | 1383 | bch_btree_node_write(b, n ? &op->cl : NULL); |
| 1390 | b->prio_blocked++; | ||
| 1391 | bch_btree_write(b, true, n ? op : NULL); | ||
| 1392 | } | 1384 | } |
| 1393 | 1385 | ||
| 1394 | if (!IS_ERR_OR_NULL(n)) { | 1386 | if (!IS_ERR_OR_NULL(n)) { |
| @@ -1405,7 +1397,6 @@ static void btree_gc_start(struct cache_set *c) | |||
| 1405 | { | 1397 | { |
| 1406 | struct cache *ca; | 1398 | struct cache *ca; |
| 1407 | struct bucket *b; | 1399 | struct bucket *b; |
| 1408 | struct bcache_device **d; | ||
| 1409 | unsigned i; | 1400 | unsigned i; |
| 1410 | 1401 | ||
| 1411 | if (!c->gc_mark_valid) | 1402 | if (!c->gc_mark_valid) |
| @@ -1419,16 +1410,12 @@ static void btree_gc_start(struct cache_set *c) | |||
| 1419 | for_each_cache(ca, c, i) | 1410 | for_each_cache(ca, c, i) |
| 1420 | for_each_bucket(b, ca) { | 1411 | for_each_bucket(b, ca) { |
| 1421 | b->gc_gen = b->gen; | 1412 | b->gc_gen = b->gen; |
| 1422 | if (!atomic_read(&b->pin)) | 1413 | if (!atomic_read(&b->pin)) { |
| 1423 | SET_GC_MARK(b, GC_MARK_RECLAIMABLE); | 1414 | SET_GC_MARK(b, GC_MARK_RECLAIMABLE); |
| 1415 | SET_GC_SECTORS_USED(b, 0); | ||
| 1416 | } | ||
| 1424 | } | 1417 | } |
| 1425 | 1418 | ||
| 1426 | for (d = c->devices; | ||
| 1427 | d < c->devices + c->nr_uuids; | ||
| 1428 | d++) | ||
| 1429 | if (*d) | ||
| 1430 | (*d)->sectors_dirty_gc = 0; | ||
| 1431 | |||
| 1432 | mutex_unlock(&c->bucket_lock); | 1419 | mutex_unlock(&c->bucket_lock); |
| 1433 | } | 1420 | } |
| 1434 | 1421 | ||
| @@ -1437,7 +1424,6 @@ size_t bch_btree_gc_finish(struct cache_set *c) | |||
| 1437 | size_t available = 0; | 1424 | size_t available = 0; |
| 1438 | struct bucket *b; | 1425 | struct bucket *b; |
| 1439 | struct cache *ca; | 1426 | struct cache *ca; |
| 1440 | struct bcache_device **d; | ||
| 1441 | unsigned i; | 1427 | unsigned i; |
| 1442 | 1428 | ||
| 1443 | mutex_lock(&c->bucket_lock); | 1429 | mutex_lock(&c->bucket_lock); |
| @@ -1480,22 +1466,6 @@ size_t bch_btree_gc_finish(struct cache_set *c) | |||
| 1480 | } | 1466 | } |
| 1481 | } | 1467 | } |
| 1482 | 1468 | ||
| 1483 | for (d = c->devices; | ||
| 1484 | d < c->devices + c->nr_uuids; | ||
| 1485 | d++) | ||
| 1486 | if (*d) { | ||
| 1487 | unsigned long last = | ||
| 1488 | atomic_long_read(&((*d)->sectors_dirty)); | ||
| 1489 | long difference = (*d)->sectors_dirty_gc - last; | ||
| 1490 | |||
| 1491 | pr_debug("sectors dirty off by %li", difference); | ||
| 1492 | |||
| 1493 | (*d)->sectors_dirty_last += difference; | ||
| 1494 | |||
| 1495 | atomic_long_set(&((*d)->sectors_dirty), | ||
| 1496 | (*d)->sectors_dirty_gc); | ||
| 1497 | } | ||
| 1498 | |||
| 1499 | mutex_unlock(&c->bucket_lock); | 1469 | mutex_unlock(&c->bucket_lock); |
| 1500 | return available; | 1470 | return available; |
| 1501 | } | 1471 | } |
| @@ -1508,10 +1478,9 @@ static void bch_btree_gc(struct closure *cl) | |||
| 1508 | struct gc_stat stats; | 1478 | struct gc_stat stats; |
| 1509 | struct closure writes; | 1479 | struct closure writes; |
| 1510 | struct btree_op op; | 1480 | struct btree_op op; |
| 1511 | |||
| 1512 | uint64_t start_time = local_clock(); | 1481 | uint64_t start_time = local_clock(); |
| 1513 | trace_bcache_gc_start(c->sb.set_uuid); | 1482 | |
| 1514 | blktrace_msg_all(c, "Starting gc"); | 1483 | trace_bcache_gc_start(c); |
| 1515 | 1484 | ||
| 1516 | memset(&stats, 0, sizeof(struct gc_stat)); | 1485 | memset(&stats, 0, sizeof(struct gc_stat)); |
| 1517 | closure_init_stack(&writes); | 1486 | closure_init_stack(&writes); |
| @@ -1520,14 +1489,14 @@ static void bch_btree_gc(struct closure *cl) | |||
| 1520 | 1489 | ||
| 1521 | btree_gc_start(c); | 1490 | btree_gc_start(c); |
| 1522 | 1491 | ||
| 1492 | atomic_inc(&c->prio_blocked); | ||
| 1493 | |||
| 1523 | ret = btree_root(gc_root, c, &op, &writes, &stats); | 1494 | ret = btree_root(gc_root, c, &op, &writes, &stats); |
| 1524 | closure_sync(&op.cl); | 1495 | closure_sync(&op.cl); |
| 1525 | closure_sync(&writes); | 1496 | closure_sync(&writes); |
| 1526 | 1497 | ||
| 1527 | if (ret) { | 1498 | if (ret) { |
| 1528 | blktrace_msg_all(c, "Stopped gc"); | ||
| 1529 | pr_warn("gc failed!"); | 1499 | pr_warn("gc failed!"); |
| 1530 | |||
| 1531 | continue_at(cl, bch_btree_gc, bch_gc_wq); | 1500 | continue_at(cl, bch_btree_gc, bch_gc_wq); |
| 1532 | } | 1501 | } |
| 1533 | 1502 | ||
| @@ -1537,6 +1506,9 @@ static void bch_btree_gc(struct closure *cl) | |||
| 1537 | 1506 | ||
| 1538 | available = bch_btree_gc_finish(c); | 1507 | available = bch_btree_gc_finish(c); |
| 1539 | 1508 | ||
| 1509 | atomic_dec(&c->prio_blocked); | ||
| 1510 | wake_up_allocators(c); | ||
| 1511 | |||
| 1540 | bch_time_stats_update(&c->btree_gc_time, start_time); | 1512 | bch_time_stats_update(&c->btree_gc_time, start_time); |
| 1541 | 1513 | ||
| 1542 | stats.key_bytes *= sizeof(uint64_t); | 1514 | stats.key_bytes *= sizeof(uint64_t); |
| @@ -1544,10 +1516,8 @@ static void bch_btree_gc(struct closure *cl) | |||
| 1544 | stats.data <<= 9; | 1516 | stats.data <<= 9; |
| 1545 | stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets; | 1517 | stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets; |
| 1546 | memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); | 1518 | memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); |
| 1547 | blktrace_msg_all(c, "Finished gc"); | ||
| 1548 | 1519 | ||
| 1549 | trace_bcache_gc_end(c->sb.set_uuid); | 1520 | trace_bcache_gc_end(c); |
| 1550 | wake_up(&c->alloc_wait); | ||
| 1551 | 1521 | ||
| 1552 | continue_at(cl, bch_moving_gc, bch_gc_wq); | 1522 | continue_at(cl, bch_moving_gc, bch_gc_wq); |
| 1553 | } | 1523 | } |
| @@ -1654,14 +1624,14 @@ static bool fix_overlapping_extents(struct btree *b, | |||
| 1654 | struct btree_iter *iter, | 1624 | struct btree_iter *iter, |
| 1655 | struct btree_op *op) | 1625 | struct btree_op *op) |
| 1656 | { | 1626 | { |
| 1657 | void subtract_dirty(struct bkey *k, int sectors) | 1627 | void subtract_dirty(struct bkey *k, uint64_t offset, int sectors) |
| 1658 | { | 1628 | { |
| 1659 | struct bcache_device *d = b->c->devices[KEY_INODE(k)]; | 1629 | if (KEY_DIRTY(k)) |
| 1660 | 1630 | bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), | |
| 1661 | if (KEY_DIRTY(k) && d) | 1631 | offset, -sectors); |
| 1662 | atomic_long_sub(sectors, &d->sectors_dirty); | ||
| 1663 | } | 1632 | } |
| 1664 | 1633 | ||
| 1634 | uint64_t old_offset; | ||
| 1665 | unsigned old_size, sectors_found = 0; | 1635 | unsigned old_size, sectors_found = 0; |
| 1666 | 1636 | ||
| 1667 | while (1) { | 1637 | while (1) { |
| @@ -1673,6 +1643,7 @@ static bool fix_overlapping_extents(struct btree *b, | |||
| 1673 | if (bkey_cmp(k, &START_KEY(insert)) <= 0) | 1643 | if (bkey_cmp(k, &START_KEY(insert)) <= 0) |
| 1674 | continue; | 1644 | continue; |
| 1675 | 1645 | ||
| 1646 | old_offset = KEY_START(k); | ||
| 1676 | old_size = KEY_SIZE(k); | 1647 | old_size = KEY_SIZE(k); |
| 1677 | 1648 | ||
| 1678 | /* | 1649 | /* |
| @@ -1728,7 +1699,7 @@ static bool fix_overlapping_extents(struct btree *b, | |||
| 1728 | 1699 | ||
| 1729 | struct bkey *top; | 1700 | struct bkey *top; |
| 1730 | 1701 | ||
| 1731 | subtract_dirty(k, KEY_SIZE(insert)); | 1702 | subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert)); |
| 1732 | 1703 | ||
| 1733 | if (bkey_written(b, k)) { | 1704 | if (bkey_written(b, k)) { |
| 1734 | /* | 1705 | /* |
| @@ -1775,7 +1746,7 @@ static bool fix_overlapping_extents(struct btree *b, | |||
| 1775 | } | 1746 | } |
| 1776 | } | 1747 | } |
| 1777 | 1748 | ||
| 1778 | subtract_dirty(k, old_size - KEY_SIZE(k)); | 1749 | subtract_dirty(k, old_offset, old_size - KEY_SIZE(k)); |
| 1779 | } | 1750 | } |
| 1780 | 1751 | ||
| 1781 | check_failed: | 1752 | check_failed: |
| @@ -1798,7 +1769,7 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op, | |||
| 1798 | { | 1769 | { |
| 1799 | struct bset *i = b->sets[b->nsets].data; | 1770 | struct bset *i = b->sets[b->nsets].data; |
| 1800 | struct bkey *m, *prev; | 1771 | struct bkey *m, *prev; |
| 1801 | const char *status = "insert"; | 1772 | unsigned status = BTREE_INSERT_STATUS_INSERT; |
| 1802 | 1773 | ||
| 1803 | BUG_ON(bkey_cmp(k, &b->key) > 0); | 1774 | BUG_ON(bkey_cmp(k, &b->key) > 0); |
| 1804 | BUG_ON(b->level && !KEY_PTRS(k)); | 1775 | BUG_ON(b->level && !KEY_PTRS(k)); |
| @@ -1831,17 +1802,17 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op, | |||
| 1831 | goto insert; | 1802 | goto insert; |
| 1832 | 1803 | ||
| 1833 | /* prev is in the tree, if we merge we're done */ | 1804 | /* prev is in the tree, if we merge we're done */ |
| 1834 | status = "back merging"; | 1805 | status = BTREE_INSERT_STATUS_BACK_MERGE; |
| 1835 | if (prev && | 1806 | if (prev && |
| 1836 | bch_bkey_try_merge(b, prev, k)) | 1807 | bch_bkey_try_merge(b, prev, k)) |
| 1837 | goto merged; | 1808 | goto merged; |
| 1838 | 1809 | ||
| 1839 | status = "overwrote front"; | 1810 | status = BTREE_INSERT_STATUS_OVERWROTE; |
| 1840 | if (m != end(i) && | 1811 | if (m != end(i) && |
| 1841 | KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) | 1812 | KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) |
| 1842 | goto copy; | 1813 | goto copy; |
| 1843 | 1814 | ||
| 1844 | status = "front merge"; | 1815 | status = BTREE_INSERT_STATUS_FRONT_MERGE; |
| 1845 | if (m != end(i) && | 1816 | if (m != end(i) && |
| 1846 | bch_bkey_try_merge(b, k, m)) | 1817 | bch_bkey_try_merge(b, k, m)) |
| 1847 | goto copy; | 1818 | goto copy; |
| @@ -1851,21 +1822,21 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op, | |||
| 1851 | insert: shift_keys(b, m, k); | 1822 | insert: shift_keys(b, m, k); |
| 1852 | copy: bkey_copy(m, k); | 1823 | copy: bkey_copy(m, k); |
| 1853 | merged: | 1824 | merged: |
| 1854 | bch_check_keys(b, "%s for %s at %s: %s", status, | 1825 | if (KEY_DIRTY(k)) |
| 1855 | op_type(op), pbtree(b), pkey(k)); | 1826 | bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), |
| 1856 | bch_check_key_order_msg(b, i, "%s for %s at %s: %s", status, | 1827 | KEY_START(k), KEY_SIZE(k)); |
| 1857 | op_type(op), pbtree(b), pkey(k)); | 1828 | |
| 1829 | bch_check_keys(b, "%u for %s", status, op_type(op)); | ||
| 1858 | 1830 | ||
| 1859 | if (b->level && !KEY_OFFSET(k)) | 1831 | if (b->level && !KEY_OFFSET(k)) |
| 1860 | b->prio_blocked++; | 1832 | btree_current_write(b)->prio_blocked++; |
| 1861 | 1833 | ||
| 1862 | pr_debug("%s for %s at %s: %s", status, | 1834 | trace_bcache_btree_insert_key(b, k, op->type, status); |
| 1863 | op_type(op), pbtree(b), pkey(k)); | ||
| 1864 | 1835 | ||
| 1865 | return true; | 1836 | return true; |
| 1866 | } | 1837 | } |
| 1867 | 1838 | ||
| 1868 | bool bch_btree_insert_keys(struct btree *b, struct btree_op *op) | 1839 | static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op) |
| 1869 | { | 1840 | { |
| 1870 | bool ret = false; | 1841 | bool ret = false; |
| 1871 | struct bkey *k; | 1842 | struct bkey *k; |
| @@ -1896,7 +1867,7 @@ bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op, | |||
| 1896 | should_split(b)) | 1867 | should_split(b)) |
| 1897 | goto out; | 1868 | goto out; |
| 1898 | 1869 | ||
| 1899 | op->replace = KEY(op->inode, bio_end(bio), bio_sectors(bio)); | 1870 | op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio)); |
| 1900 | 1871 | ||
| 1901 | SET_KEY_PTRS(&op->replace, 1); | 1872 | SET_KEY_PTRS(&op->replace, 1); |
| 1902 | get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t)); | 1873 | get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t)); |
| @@ -1907,7 +1878,6 @@ bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op, | |||
| 1907 | 1878 | ||
| 1908 | BUG_ON(op->type != BTREE_INSERT); | 1879 | BUG_ON(op->type != BTREE_INSERT); |
| 1909 | BUG_ON(!btree_insert_key(b, op, &tmp.k)); | 1880 | BUG_ON(!btree_insert_key(b, op, &tmp.k)); |
| 1910 | bch_btree_write(b, false, NULL); | ||
| 1911 | ret = true; | 1881 | ret = true; |
| 1912 | out: | 1882 | out: |
| 1913 | downgrade_write(&b->lock); | 1883 | downgrade_write(&b->lock); |
| @@ -1929,12 +1899,11 @@ static int btree_split(struct btree *b, struct btree_op *op) | |||
| 1929 | 1899 | ||
| 1930 | split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5; | 1900 | split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5; |
| 1931 | 1901 | ||
| 1932 | pr_debug("%ssplitting at %s keys %i", split ? "" : "not ", | ||
| 1933 | pbtree(b), n1->sets[0].data->keys); | ||
| 1934 | |||
| 1935 | if (split) { | 1902 | if (split) { |
| 1936 | unsigned keys = 0; | 1903 | unsigned keys = 0; |
| 1937 | 1904 | ||
| 1905 | trace_bcache_btree_node_split(b, n1->sets[0].data->keys); | ||
| 1906 | |||
| 1938 | n2 = bch_btree_node_alloc(b->c, b->level, &op->cl); | 1907 | n2 = bch_btree_node_alloc(b->c, b->level, &op->cl); |
| 1939 | if (IS_ERR(n2)) | 1908 | if (IS_ERR(n2)) |
| 1940 | goto err_free1; | 1909 | goto err_free1; |
| @@ -1967,18 +1936,21 @@ static int btree_split(struct btree *b, struct btree_op *op) | |||
| 1967 | bkey_copy_key(&n2->key, &b->key); | 1936 | bkey_copy_key(&n2->key, &b->key); |
| 1968 | 1937 | ||
| 1969 | bch_keylist_add(&op->keys, &n2->key); | 1938 | bch_keylist_add(&op->keys, &n2->key); |
| 1970 | bch_btree_write(n2, true, op); | 1939 | bch_btree_node_write(n2, &op->cl); |
| 1971 | rw_unlock(true, n2); | 1940 | rw_unlock(true, n2); |
| 1972 | } else | 1941 | } else { |
| 1942 | trace_bcache_btree_node_compact(b, n1->sets[0].data->keys); | ||
| 1943 | |||
| 1973 | bch_btree_insert_keys(n1, op); | 1944 | bch_btree_insert_keys(n1, op); |
| 1945 | } | ||
| 1974 | 1946 | ||
| 1975 | bch_keylist_add(&op->keys, &n1->key); | 1947 | bch_keylist_add(&op->keys, &n1->key); |
| 1976 | bch_btree_write(n1, true, op); | 1948 | bch_btree_node_write(n1, &op->cl); |
| 1977 | 1949 | ||
| 1978 | if (n3) { | 1950 | if (n3) { |
| 1979 | bkey_copy_key(&n3->key, &MAX_KEY); | 1951 | bkey_copy_key(&n3->key, &MAX_KEY); |
| 1980 | bch_btree_insert_keys(n3, op); | 1952 | bch_btree_insert_keys(n3, op); |
| 1981 | bch_btree_write(n3, true, op); | 1953 | bch_btree_node_write(n3, &op->cl); |
| 1982 | 1954 | ||
| 1983 | closure_sync(&op->cl); | 1955 | closure_sync(&op->cl); |
| 1984 | bch_btree_set_root(n3); | 1956 | bch_btree_set_root(n3); |
| @@ -2082,8 +2054,12 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op, | |||
| 2082 | 2054 | ||
| 2083 | BUG_ON(write_block(b) != b->sets[b->nsets].data); | 2055 | BUG_ON(write_block(b) != b->sets[b->nsets].data); |
| 2084 | 2056 | ||
| 2085 | if (bch_btree_insert_keys(b, op)) | 2057 | if (bch_btree_insert_keys(b, op)) { |
| 2086 | bch_btree_write(b, false, op); | 2058 | if (!b->level) |
| 2059 | bch_btree_leaf_dirty(b, op); | ||
| 2060 | else | ||
| 2061 | bch_btree_node_write(b, &op->cl); | ||
| 2062 | } | ||
| 2087 | } | 2063 | } |
| 2088 | 2064 | ||
| 2089 | return 0; | 2065 | return 0; |
| @@ -2140,6 +2116,11 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c) | |||
| 2140 | void bch_btree_set_root(struct btree *b) | 2116 | void bch_btree_set_root(struct btree *b) |
| 2141 | { | 2117 | { |
| 2142 | unsigned i; | 2118 | unsigned i; |
| 2119 | struct closure cl; | ||
| 2120 | |||
| 2121 | closure_init_stack(&cl); | ||
| 2122 | |||
| 2123 | trace_bcache_btree_set_root(b); | ||
| 2143 | 2124 | ||
| 2144 | BUG_ON(!b->written); | 2125 | BUG_ON(!b->written); |
| 2145 | 2126 | ||
| @@ -2153,8 +2134,8 @@ void bch_btree_set_root(struct btree *b) | |||
| 2153 | b->c->root = b; | 2134 | b->c->root = b; |
| 2154 | __bkey_put(b->c, &b->key); | 2135 | __bkey_put(b->c, &b->key); |
| 2155 | 2136 | ||
| 2156 | bch_journal_meta(b->c, NULL); | 2137 | bch_journal_meta(b->c, &cl); |
| 2157 | pr_debug("%s for %pf", pbtree(b), __builtin_return_address(0)); | 2138 | closure_sync(&cl); |
| 2158 | } | 2139 | } |
| 2159 | 2140 | ||
| 2160 | /* Cache lookup */ | 2141 | /* Cache lookup */ |
| @@ -2215,9 +2196,6 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op, | |||
| 2215 | KEY_OFFSET(k) - bio->bi_sector); | 2196 | KEY_OFFSET(k) - bio->bi_sector); |
| 2216 | 2197 | ||
| 2217 | n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); | 2198 | n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
| 2218 | if (!n) | ||
| 2219 | return -EAGAIN; | ||
| 2220 | |||
| 2221 | if (n == bio) | 2199 | if (n == bio) |
| 2222 | op->lookup_done = true; | 2200 | op->lookup_done = true; |
| 2223 | 2201 | ||
| @@ -2240,7 +2218,6 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op, | |||
| 2240 | n->bi_end_io = bch_cache_read_endio; | 2218 | n->bi_end_io = bch_cache_read_endio; |
| 2241 | n->bi_private = &s->cl; | 2219 | n->bi_private = &s->cl; |
| 2242 | 2220 | ||
| 2243 | trace_bcache_cache_hit(n); | ||
| 2244 | __bch_submit_bbio(n, b->c); | 2221 | __bch_submit_bbio(n, b->c); |
| 2245 | } | 2222 | } |
| 2246 | 2223 | ||
| @@ -2257,9 +2234,6 @@ int bch_btree_search_recurse(struct btree *b, struct btree_op *op) | |||
| 2257 | struct btree_iter iter; | 2234 | struct btree_iter iter; |
| 2258 | bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0)); | 2235 | bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0)); |
| 2259 | 2236 | ||
| 2260 | pr_debug("at %s searching for %u:%llu", pbtree(b), op->inode, | ||
| 2261 | (uint64_t) bio->bi_sector); | ||
| 2262 | |||
| 2263 | do { | 2237 | do { |
| 2264 | k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad); | 2238 | k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad); |
| 2265 | if (!k) { | 2239 | if (!k) { |
| @@ -2303,7 +2277,8 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, | |||
| 2303 | } | 2277 | } |
| 2304 | 2278 | ||
| 2305 | static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, | 2279 | static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, |
| 2306 | struct keybuf *buf, struct bkey *end) | 2280 | struct keybuf *buf, struct bkey *end, |
| 2281 | keybuf_pred_fn *pred) | ||
| 2307 | { | 2282 | { |
| 2308 | struct btree_iter iter; | 2283 | struct btree_iter iter; |
| 2309 | bch_btree_iter_init(b, &iter, &buf->last_scanned); | 2284 | bch_btree_iter_init(b, &iter, &buf->last_scanned); |
| @@ -2322,11 +2297,9 @@ static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, | |||
| 2322 | if (bkey_cmp(&buf->last_scanned, end) >= 0) | 2297 | if (bkey_cmp(&buf->last_scanned, end) >= 0) |
| 2323 | break; | 2298 | break; |
| 2324 | 2299 | ||
| 2325 | if (buf->key_predicate(buf, k)) { | 2300 | if (pred(buf, k)) { |
| 2326 | struct keybuf_key *w; | 2301 | struct keybuf_key *w; |
| 2327 | 2302 | ||
| 2328 | pr_debug("%s", pkey(k)); | ||
| 2329 | |||
| 2330 | spin_lock(&buf->lock); | 2303 | spin_lock(&buf->lock); |
| 2331 | 2304 | ||
| 2332 | w = array_alloc(&buf->freelist); | 2305 | w = array_alloc(&buf->freelist); |
| @@ -2343,7 +2316,7 @@ static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, | |||
| 2343 | if (!k) | 2316 | if (!k) |
| 2344 | break; | 2317 | break; |
| 2345 | 2318 | ||
| 2346 | btree(refill_keybuf, k, b, op, buf, end); | 2319 | btree(refill_keybuf, k, b, op, buf, end, pred); |
| 2347 | /* | 2320 | /* |
| 2348 | * Might get an error here, but can't really do anything | 2321 | * Might get an error here, but can't really do anything |
| 2349 | * and it'll get logged elsewhere. Just read what we | 2322 | * and it'll get logged elsewhere. Just read what we |
| @@ -2361,7 +2334,7 @@ static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, | |||
| 2361 | } | 2334 | } |
| 2362 | 2335 | ||
| 2363 | void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, | 2336 | void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, |
| 2364 | struct bkey *end) | 2337 | struct bkey *end, keybuf_pred_fn *pred) |
| 2365 | { | 2338 | { |
| 2366 | struct bkey start = buf->last_scanned; | 2339 | struct bkey start = buf->last_scanned; |
| 2367 | struct btree_op op; | 2340 | struct btree_op op; |
| @@ -2369,7 +2342,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, | |||
| 2369 | 2342 | ||
| 2370 | cond_resched(); | 2343 | cond_resched(); |
| 2371 | 2344 | ||
| 2372 | btree_root(refill_keybuf, c, &op, buf, end); | 2345 | btree_root(refill_keybuf, c, &op, buf, end, pred); |
| 2373 | closure_sync(&op.cl); | 2346 | closure_sync(&op.cl); |
| 2374 | 2347 | ||
| 2375 | pr_debug("found %s keys from %llu:%llu to %llu:%llu", | 2348 | pr_debug("found %s keys from %llu:%llu to %llu:%llu", |
| @@ -2455,7 +2428,8 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf) | |||
| 2455 | 2428 | ||
| 2456 | struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, | 2429 | struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, |
| 2457 | struct keybuf *buf, | 2430 | struct keybuf *buf, |
| 2458 | struct bkey *end) | 2431 | struct bkey *end, |
| 2432 | keybuf_pred_fn *pred) | ||
| 2459 | { | 2433 | { |
| 2460 | struct keybuf_key *ret; | 2434 | struct keybuf_key *ret; |
| 2461 | 2435 | ||
| @@ -2469,15 +2443,14 @@ struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, | |||
| 2469 | break; | 2443 | break; |
| 2470 | } | 2444 | } |
| 2471 | 2445 | ||
| 2472 | bch_refill_keybuf(c, buf, end); | 2446 | bch_refill_keybuf(c, buf, end, pred); |
| 2473 | } | 2447 | } |
| 2474 | 2448 | ||
| 2475 | return ret; | 2449 | return ret; |
| 2476 | } | 2450 | } |
| 2477 | 2451 | ||
| 2478 | void bch_keybuf_init(struct keybuf *buf, keybuf_pred_fn *fn) | 2452 | void bch_keybuf_init(struct keybuf *buf) |
| 2479 | { | 2453 | { |
| 2480 | buf->key_predicate = fn; | ||
| 2481 | buf->last_scanned = MAX_KEY; | 2454 | buf->last_scanned = MAX_KEY; |
| 2482 | buf->keys = RB_ROOT; | 2455 | buf->keys = RB_ROOT; |
| 2483 | 2456 | ||
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index af4a7092a28c..3333d3723633 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h | |||
| @@ -102,7 +102,6 @@ | |||
| 102 | #include "debug.h" | 102 | #include "debug.h" |
| 103 | 103 | ||
| 104 | struct btree_write { | 104 | struct btree_write { |
| 105 | struct closure *owner; | ||
| 106 | atomic_t *journal; | 105 | atomic_t *journal; |
| 107 | 106 | ||
| 108 | /* If btree_split() frees a btree node, it writes a new pointer to that | 107 | /* If btree_split() frees a btree node, it writes a new pointer to that |
| @@ -142,16 +141,12 @@ struct btree { | |||
| 142 | */ | 141 | */ |
| 143 | struct bset_tree sets[MAX_BSETS]; | 142 | struct bset_tree sets[MAX_BSETS]; |
| 144 | 143 | ||
| 145 | /* Used to refcount bio splits, also protects b->bio */ | 144 | /* For outstanding btree writes, used as a lock - protects write_idx */ |
| 146 | struct closure_with_waitlist io; | 145 | struct closure_with_waitlist io; |
| 147 | 146 | ||
| 148 | /* Gets transferred to w->prio_blocked - see the comment there */ | ||
| 149 | int prio_blocked; | ||
| 150 | |||
| 151 | struct list_head list; | 147 | struct list_head list; |
| 152 | struct delayed_work work; | 148 | struct delayed_work work; |
| 153 | 149 | ||
| 154 | uint64_t io_start_time; | ||
| 155 | struct btree_write writes[2]; | 150 | struct btree_write writes[2]; |
| 156 | struct bio *bio; | 151 | struct bio *bio; |
| 157 | }; | 152 | }; |
| @@ -164,13 +159,11 @@ static inline void set_btree_node_ ## flag(struct btree *b) \ | |||
| 164 | { set_bit(BTREE_NODE_ ## flag, &b->flags); } \ | 159 | { set_bit(BTREE_NODE_ ## flag, &b->flags); } \ |
| 165 | 160 | ||
| 166 | enum btree_flags { | 161 | enum btree_flags { |
| 167 | BTREE_NODE_read_done, | ||
| 168 | BTREE_NODE_io_error, | 162 | BTREE_NODE_io_error, |
| 169 | BTREE_NODE_dirty, | 163 | BTREE_NODE_dirty, |
| 170 | BTREE_NODE_write_idx, | 164 | BTREE_NODE_write_idx, |
| 171 | }; | 165 | }; |
| 172 | 166 | ||
| 173 | BTREE_FLAG(read_done); | ||
| 174 | BTREE_FLAG(io_error); | 167 | BTREE_FLAG(io_error); |
| 175 | BTREE_FLAG(dirty); | 168 | BTREE_FLAG(dirty); |
| 176 | BTREE_FLAG(write_idx); | 169 | BTREE_FLAG(write_idx); |
| @@ -278,6 +271,13 @@ struct btree_op { | |||
| 278 | BKEY_PADDED(replace); | 271 | BKEY_PADDED(replace); |
| 279 | }; | 272 | }; |
| 280 | 273 | ||
| 274 | enum { | ||
| 275 | BTREE_INSERT_STATUS_INSERT, | ||
| 276 | BTREE_INSERT_STATUS_BACK_MERGE, | ||
| 277 | BTREE_INSERT_STATUS_OVERWROTE, | ||
| 278 | BTREE_INSERT_STATUS_FRONT_MERGE, | ||
| 279 | }; | ||
| 280 | |||
| 281 | void bch_btree_op_init_stack(struct btree_op *); | 281 | void bch_btree_op_init_stack(struct btree_op *); |
| 282 | 282 | ||
| 283 | static inline void rw_lock(bool w, struct btree *b, int level) | 283 | static inline void rw_lock(bool w, struct btree *b, int level) |
| @@ -293,9 +293,7 @@ static inline void rw_unlock(bool w, struct btree *b) | |||
| 293 | #ifdef CONFIG_BCACHE_EDEBUG | 293 | #ifdef CONFIG_BCACHE_EDEBUG |
| 294 | unsigned i; | 294 | unsigned i; |
| 295 | 295 | ||
| 296 | if (w && | 296 | if (w && b->key.ptr[0]) |
| 297 | b->key.ptr[0] && | ||
| 298 | btree_node_read_done(b)) | ||
| 299 | for (i = 0; i <= b->nsets; i++) | 297 | for (i = 0; i <= b->nsets; i++) |
| 300 | bch_check_key_order(b, b->sets[i].data); | 298 | bch_check_key_order(b, b->sets[i].data); |
| 301 | #endif | 299 | #endif |
| @@ -370,9 +368,8 @@ static inline bool should_split(struct btree *b) | |||
| 370 | > btree_blocks(b)); | 368 | > btree_blocks(b)); |
| 371 | } | 369 | } |
| 372 | 370 | ||
| 373 | void bch_btree_read_done(struct closure *); | 371 | void bch_btree_node_read(struct btree *); |
| 374 | void bch_btree_read(struct btree *); | 372 | void bch_btree_node_write(struct btree *, struct closure *); |
| 375 | void bch_btree_write(struct btree *b, bool now, struct btree_op *op); | ||
| 376 | 373 | ||
| 377 | void bch_cannibalize_unlock(struct cache_set *, struct closure *); | 374 | void bch_cannibalize_unlock(struct cache_set *, struct closure *); |
| 378 | void bch_btree_set_root(struct btree *); | 375 | void bch_btree_set_root(struct btree *); |
| @@ -380,7 +377,6 @@ struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *); | |||
| 380 | struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, | 377 | struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, |
| 381 | int, struct btree_op *); | 378 | int, struct btree_op *); |
| 382 | 379 | ||
| 383 | bool bch_btree_insert_keys(struct btree *, struct btree_op *); | ||
| 384 | bool bch_btree_insert_check_key(struct btree *, struct btree_op *, | 380 | bool bch_btree_insert_check_key(struct btree *, struct btree_op *, |
| 385 | struct bio *); | 381 | struct bio *); |
| 386 | int bch_btree_insert(struct btree_op *, struct cache_set *); | 382 | int bch_btree_insert(struct btree_op *, struct cache_set *); |
| @@ -393,13 +389,14 @@ void bch_moving_gc(struct closure *); | |||
| 393 | int bch_btree_check(struct cache_set *, struct btree_op *); | 389 | int bch_btree_check(struct cache_set *, struct btree_op *); |
| 394 | uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); | 390 | uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); |
| 395 | 391 | ||
| 396 | void bch_keybuf_init(struct keybuf *, keybuf_pred_fn *); | 392 | void bch_keybuf_init(struct keybuf *); |
| 397 | void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *); | 393 | void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, |
| 394 | keybuf_pred_fn *); | ||
| 398 | bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, | 395 | bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, |
| 399 | struct bkey *); | 396 | struct bkey *); |
| 400 | void bch_keybuf_del(struct keybuf *, struct keybuf_key *); | 397 | void bch_keybuf_del(struct keybuf *, struct keybuf_key *); |
| 401 | struct keybuf_key *bch_keybuf_next(struct keybuf *); | 398 | struct keybuf_key *bch_keybuf_next(struct keybuf *); |
| 402 | struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, | 399 | struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *, |
| 403 | struct keybuf *, struct bkey *); | 400 | struct bkey *, keybuf_pred_fn *); |
| 404 | 401 | ||
| 405 | #endif | 402 | #endif |
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index bd05a9a8c7cf..9aba2017f0d1 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c | |||
| @@ -66,16 +66,18 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) | |||
| 66 | } else { | 66 | } else { |
| 67 | struct closure *parent = cl->parent; | 67 | struct closure *parent = cl->parent; |
| 68 | struct closure_waitlist *wait = closure_waitlist(cl); | 68 | struct closure_waitlist *wait = closure_waitlist(cl); |
| 69 | closure_fn *destructor = cl->fn; | ||
| 69 | 70 | ||
| 70 | closure_debug_destroy(cl); | 71 | closure_debug_destroy(cl); |
| 71 | 72 | ||
| 73 | smp_mb(); | ||
| 72 | atomic_set(&cl->remaining, -1); | 74 | atomic_set(&cl->remaining, -1); |
| 73 | 75 | ||
| 74 | if (wait) | 76 | if (wait) |
| 75 | closure_wake_up(wait); | 77 | closure_wake_up(wait); |
| 76 | 78 | ||
| 77 | if (cl->fn) | 79 | if (destructor) |
| 78 | cl->fn(cl); | 80 | destructor(cl); |
| 79 | 81 | ||
| 80 | if (parent) | 82 | if (parent) |
| 81 | closure_put(parent); | 83 | closure_put(parent); |
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 89fd5204924e..88e6411eab4f 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c | |||
| @@ -47,11 +47,10 @@ const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) | |||
| 47 | return ""; | 47 | return ""; |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | struct keyprint_hack bch_pkey(const struct bkey *k) | 50 | int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k) |
| 51 | { | 51 | { |
| 52 | unsigned i = 0; | 52 | unsigned i = 0; |
| 53 | struct keyprint_hack r; | 53 | char *out = buf, *end = buf + size; |
| 54 | char *out = r.s, *end = r.s + KEYHACK_SIZE; | ||
| 55 | 54 | ||
| 56 | #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__)) | 55 | #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__)) |
| 57 | 56 | ||
| @@ -75,16 +74,14 @@ struct keyprint_hack bch_pkey(const struct bkey *k) | |||
| 75 | if (KEY_CSUM(k)) | 74 | if (KEY_CSUM(k)) |
| 76 | p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); | 75 | p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); |
| 77 | #undef p | 76 | #undef p |
| 78 | return r; | 77 | return out - buf; |
| 79 | } | 78 | } |
| 80 | 79 | ||
| 81 | struct keyprint_hack bch_pbtree(const struct btree *b) | 80 | int bch_btree_to_text(char *buf, size_t size, const struct btree *b) |
| 82 | { | 81 | { |
| 83 | struct keyprint_hack r; | 82 | return scnprintf(buf, size, "%zu level %i/%i", |
| 84 | 83 | PTR_BUCKET_NR(b->c, &b->key, 0), | |
| 85 | snprintf(r.s, 40, "%zu level %i/%i", PTR_BUCKET_NR(b->c, &b->key, 0), | 84 | b->level, b->c->root ? b->c->root->level : -1); |
| 86 | b->level, b->c->root ? b->c->root->level : -1); | ||
| 87 | return r; | ||
| 88 | } | 85 | } |
| 89 | 86 | ||
| 90 | #if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG) | 87 | #if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG) |
| @@ -100,10 +97,12 @@ static void dump_bset(struct btree *b, struct bset *i) | |||
| 100 | { | 97 | { |
| 101 | struct bkey *k; | 98 | struct bkey *k; |
| 102 | unsigned j; | 99 | unsigned j; |
| 100 | char buf[80]; | ||
| 103 | 101 | ||
| 104 | for (k = i->start; k < end(i); k = bkey_next(k)) { | 102 | for (k = i->start; k < end(i); k = bkey_next(k)) { |
| 103 | bch_bkey_to_text(buf, sizeof(buf), k); | ||
| 105 | printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), | 104 | printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), |
| 106 | (uint64_t *) k - i->d, i->keys, pkey(k)); | 105 | (uint64_t *) k - i->d, i->keys, buf); |
| 107 | 106 | ||
| 108 | for (j = 0; j < KEY_PTRS(k); j++) { | 107 | for (j = 0; j < KEY_PTRS(k); j++) { |
| 109 | size_t n = PTR_BUCKET_NR(b->c, k, j); | 108 | size_t n = PTR_BUCKET_NR(b->c, k, j); |
| @@ -144,7 +143,7 @@ void bch_btree_verify(struct btree *b, struct bset *new) | |||
| 144 | v->written = 0; | 143 | v->written = 0; |
| 145 | v->level = b->level; | 144 | v->level = b->level; |
| 146 | 145 | ||
| 147 | bch_btree_read(v); | 146 | bch_btree_node_read(v); |
| 148 | closure_wait_event(&v->io.wait, &cl, | 147 | closure_wait_event(&v->io.wait, &cl, |
| 149 | atomic_read(&b->io.cl.remaining) == -1); | 148 | atomic_read(&b->io.cl.remaining) == -1); |
| 150 | 149 | ||
| @@ -200,7 +199,7 @@ void bch_data_verify(struct search *s) | |||
| 200 | if (!check) | 199 | if (!check) |
| 201 | return; | 200 | return; |
| 202 | 201 | ||
| 203 | if (bch_bio_alloc_pages(check, GFP_NOIO)) | 202 | if (bio_alloc_pages(check, GFP_NOIO)) |
| 204 | goto out_put; | 203 | goto out_put; |
| 205 | 204 | ||
| 206 | check->bi_rw = READ_SYNC; | 205 | check->bi_rw = READ_SYNC; |
| @@ -252,6 +251,7 @@ static void vdump_bucket_and_panic(struct btree *b, const char *fmt, | |||
| 252 | va_list args) | 251 | va_list args) |
| 253 | { | 252 | { |
| 254 | unsigned i; | 253 | unsigned i; |
| 254 | char buf[80]; | ||
| 255 | 255 | ||
| 256 | console_lock(); | 256 | console_lock(); |
| 257 | 257 | ||
| @@ -262,7 +262,8 @@ static void vdump_bucket_and_panic(struct btree *b, const char *fmt, | |||
| 262 | 262 | ||
| 263 | console_unlock(); | 263 | console_unlock(); |
| 264 | 264 | ||
| 265 | panic("at %s\n", pbtree(b)); | 265 | bch_btree_to_text(buf, sizeof(buf), b); |
| 266 | panic("at %s\n", buf); | ||
| 266 | } | 267 | } |
| 267 | 268 | ||
| 268 | void bch_check_key_order_msg(struct btree *b, struct bset *i, | 269 | void bch_check_key_order_msg(struct btree *b, struct bset *i, |
| @@ -337,6 +338,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf, | |||
| 337 | { | 338 | { |
| 338 | struct dump_iterator *i = file->private_data; | 339 | struct dump_iterator *i = file->private_data; |
| 339 | ssize_t ret = 0; | 340 | ssize_t ret = 0; |
| 341 | char kbuf[80]; | ||
| 340 | 342 | ||
| 341 | while (size) { | 343 | while (size) { |
| 342 | struct keybuf_key *w; | 344 | struct keybuf_key *w; |
| @@ -355,11 +357,12 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf, | |||
| 355 | if (i->bytes) | 357 | if (i->bytes) |
| 356 | break; | 358 | break; |
| 357 | 359 | ||
| 358 | w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY); | 360 | w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred); |
| 359 | if (!w) | 361 | if (!w) |
| 360 | break; | 362 | break; |
| 361 | 363 | ||
| 362 | i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", pkey(&w->key)); | 364 | bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key); |
| 365 | i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf); | ||
| 363 | bch_keybuf_del(&i->keys, w); | 366 | bch_keybuf_del(&i->keys, w); |
| 364 | } | 367 | } |
| 365 | 368 | ||
| @@ -377,7 +380,7 @@ static int bch_dump_open(struct inode *inode, struct file *file) | |||
| 377 | 380 | ||
| 378 | file->private_data = i; | 381 | file->private_data = i; |
| 379 | i->c = c; | 382 | i->c = c; |
| 380 | bch_keybuf_init(&i->keys, dump_pred); | 383 | bch_keybuf_init(&i->keys); |
| 381 | i->keys.last_scanned = KEY(0, 0, 0); | 384 | i->keys.last_scanned = KEY(0, 0, 0); |
| 382 | 385 | ||
| 383 | return 0; | 386 | return 0; |
| @@ -409,142 +412,6 @@ void bch_debug_init_cache_set(struct cache_set *c) | |||
| 409 | 412 | ||
| 410 | #endif | 413 | #endif |
| 411 | 414 | ||
| 412 | /* Fuzz tester has rotted: */ | ||
| 413 | #if 0 | ||
| 414 | |||
| 415 | static ssize_t btree_fuzz(struct kobject *k, struct kobj_attribute *a, | ||
| 416 | const char *buffer, size_t size) | ||
| 417 | { | ||
| 418 | void dump(struct btree *b) | ||
| 419 | { | ||
| 420 | struct bset *i; | ||
| 421 | |||
| 422 | for (i = b->sets[0].data; | ||
| 423 | index(i, b) < btree_blocks(b) && | ||
| 424 | i->seq == b->sets[0].data->seq; | ||
| 425 | i = ((void *) i) + set_blocks(i, b->c) * block_bytes(b->c)) | ||
| 426 | dump_bset(b, i); | ||
| 427 | } | ||
| 428 | |||
| 429 | struct cache_sb *sb; | ||
| 430 | struct cache_set *c; | ||
| 431 | struct btree *all[3], *b, *fill, *orig; | ||
| 432 | int j; | ||
| 433 | |||
| 434 | struct btree_op op; | ||
| 435 | bch_btree_op_init_stack(&op); | ||
| 436 | |||
| 437 | sb = kzalloc(sizeof(struct cache_sb), GFP_KERNEL); | ||
| 438 | if (!sb) | ||
| 439 | return -ENOMEM; | ||
| 440 | |||
| 441 | sb->bucket_size = 128; | ||
| 442 | sb->block_size = 4; | ||
| 443 | |||
| 444 | c = bch_cache_set_alloc(sb); | ||
| 445 | if (!c) | ||
| 446 | return -ENOMEM; | ||
| 447 | |||
| 448 | for (j = 0; j < 3; j++) { | ||
| 449 | BUG_ON(list_empty(&c->btree_cache)); | ||
| 450 | all[j] = list_first_entry(&c->btree_cache, struct btree, list); | ||
| 451 | list_del_init(&all[j]->list); | ||
| 452 | |||
| 453 | all[j]->key = KEY(0, 0, c->sb.bucket_size); | ||
| 454 | bkey_copy_key(&all[j]->key, &MAX_KEY); | ||
| 455 | } | ||
| 456 | |||
| 457 | b = all[0]; | ||
| 458 | fill = all[1]; | ||
| 459 | orig = all[2]; | ||
| 460 | |||
| 461 | while (1) { | ||
| 462 | for (j = 0; j < 3; j++) | ||
| 463 | all[j]->written = all[j]->nsets = 0; | ||
| 464 | |||
| 465 | bch_bset_init_next(b); | ||
| 466 | |||
| 467 | while (1) { | ||
| 468 | struct bset *i = write_block(b); | ||
| 469 | struct bkey *k = op.keys.top; | ||
| 470 | unsigned rand; | ||
| 471 | |||
| 472 | bkey_init(k); | ||
| 473 | rand = get_random_int(); | ||
| 474 | |||
| 475 | op.type = rand & 1 | ||
| 476 | ? BTREE_INSERT | ||
| 477 | : BTREE_REPLACE; | ||
| 478 | rand >>= 1; | ||
| 479 | |||
| 480 | SET_KEY_SIZE(k, bucket_remainder(c, rand)); | ||
| 481 | rand >>= c->bucket_bits; | ||
| 482 | rand &= 1024 * 512 - 1; | ||
| 483 | rand += c->sb.bucket_size; | ||
| 484 | SET_KEY_OFFSET(k, rand); | ||
| 485 | #if 0 | ||
| 486 | SET_KEY_PTRS(k, 1); | ||
| 487 | #endif | ||
| 488 | bch_keylist_push(&op.keys); | ||
| 489 | bch_btree_insert_keys(b, &op); | ||
| 490 | |||
| 491 | if (should_split(b) || | ||
| 492 | set_blocks(i, b->c) != | ||
| 493 | __set_blocks(i, i->keys + 15, b->c)) { | ||
| 494 | i->csum = csum_set(i); | ||
| 495 | |||
| 496 | memcpy(write_block(fill), | ||
| 497 | i, set_bytes(i)); | ||
| 498 | |||
| 499 | b->written += set_blocks(i, b->c); | ||
| 500 | fill->written = b->written; | ||
| 501 | if (b->written == btree_blocks(b)) | ||
| 502 | break; | ||
| 503 | |||
| 504 | bch_btree_sort_lazy(b); | ||
| 505 | bch_bset_init_next(b); | ||
| 506 | } | ||
| 507 | } | ||
| 508 | |||
| 509 | memcpy(orig->sets[0].data, | ||
| 510 | fill->sets[0].data, | ||
| 511 | btree_bytes(c)); | ||
| 512 | |||
| 513 | bch_btree_sort(b); | ||
| 514 | fill->written = 0; | ||
| 515 | bch_btree_read_done(&fill->io.cl); | ||
| 516 | |||
| 517 | if (b->sets[0].data->keys != fill->sets[0].data->keys || | ||
| 518 | memcmp(b->sets[0].data->start, | ||
| 519 | fill->sets[0].data->start, | ||
| 520 | b->sets[0].data->keys * sizeof(uint64_t))) { | ||
| 521 | struct bset *i = b->sets[0].data; | ||
| 522 | struct bkey *k, *l; | ||
| 523 | |||
| 524 | for (k = i->start, | ||
| 525 | l = fill->sets[0].data->start; | ||
| 526 | k < end(i); | ||
| 527 | k = bkey_next(k), l = bkey_next(l)) | ||
| 528 | if (bkey_cmp(k, l) || | ||
| 529 | KEY_SIZE(k) != KEY_SIZE(l)) | ||
| 530 | pr_err("key %zi differs: %s != %s", | ||
| 531 | (uint64_t *) k - i->d, | ||
| 532 | pkey(k), pkey(l)); | ||
| 533 | |||
| 534 | for (j = 0; j < 3; j++) { | ||
| 535 | pr_err("**** Set %i ****", j); | ||
| 536 | dump(all[j]); | ||
| 537 | } | ||
| 538 | panic("\n"); | ||
| 539 | } | ||
| 540 | |||
| 541 | pr_info("fuzz complete: %i keys", b->sets[0].data->keys); | ||
| 542 | } | ||
| 543 | } | ||
| 544 | |||
| 545 | kobj_attribute_write(fuzz, btree_fuzz); | ||
| 546 | #endif | ||
| 547 | |||
| 548 | void bch_debug_exit(void) | 415 | void bch_debug_exit(void) |
| 549 | { | 416 | { |
| 550 | if (!IS_ERR_OR_NULL(debug)) | 417 | if (!IS_ERR_OR_NULL(debug)) |
| @@ -554,11 +421,6 @@ void bch_debug_exit(void) | |||
| 554 | int __init bch_debug_init(struct kobject *kobj) | 421 | int __init bch_debug_init(struct kobject *kobj) |
| 555 | { | 422 | { |
| 556 | int ret = 0; | 423 | int ret = 0; |
| 557 | #if 0 | ||
| 558 | ret = sysfs_create_file(kobj, &ksysfs_fuzz.attr); | ||
| 559 | if (ret) | ||
| 560 | return ret; | ||
| 561 | #endif | ||
| 562 | 424 | ||
| 563 | debug = debugfs_create_dir("bcache", NULL); | 425 | debug = debugfs_create_dir("bcache", NULL); |
| 564 | return ret; | 426 | return ret; |
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h index f9378a218148..1c39b5a2489b 100644 --- a/drivers/md/bcache/debug.h +++ b/drivers/md/bcache/debug.h | |||
| @@ -3,15 +3,8 @@ | |||
| 3 | 3 | ||
| 4 | /* Btree/bkey debug printing */ | 4 | /* Btree/bkey debug printing */ |
| 5 | 5 | ||
| 6 | #define KEYHACK_SIZE 80 | 6 | int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k); |
| 7 | struct keyprint_hack { | 7 | int bch_btree_to_text(char *buf, size_t size, const struct btree *b); |
| 8 | char s[KEYHACK_SIZE]; | ||
| 9 | }; | ||
| 10 | |||
| 11 | struct keyprint_hack bch_pkey(const struct bkey *k); | ||
| 12 | struct keyprint_hack bch_pbtree(const struct btree *b); | ||
| 13 | #define pkey(k) (&bch_pkey(k).s[0]) | ||
| 14 | #define pbtree(b) (&bch_pbtree(b).s[0]) | ||
| 15 | 8 | ||
| 16 | #ifdef CONFIG_BCACHE_EDEBUG | 9 | #ifdef CONFIG_BCACHE_EDEBUG |
| 17 | 10 | ||
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 48efd4dea645..9056632995b1 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c | |||
| @@ -9,6 +9,8 @@ | |||
| 9 | #include "bset.h" | 9 | #include "bset.h" |
| 10 | #include "debug.h" | 10 | #include "debug.h" |
| 11 | 11 | ||
| 12 | #include <linux/blkdev.h> | ||
| 13 | |||
| 12 | static void bch_bi_idx_hack_endio(struct bio *bio, int error) | 14 | static void bch_bi_idx_hack_endio(struct bio *bio, int error) |
| 13 | { | 15 | { |
| 14 | struct bio *p = bio->bi_private; | 16 | struct bio *p = bio->bi_private; |
| @@ -66,13 +68,6 @@ static void bch_generic_make_request_hack(struct bio *bio) | |||
| 66 | * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a | 68 | * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a |
| 67 | * bvec boundry; it is the caller's responsibility to ensure that @bio is not | 69 | * bvec boundry; it is the caller's responsibility to ensure that @bio is not |
| 68 | * freed before the split. | 70 | * freed before the split. |
| 69 | * | ||
| 70 | * If bch_bio_split() is running under generic_make_request(), it's not safe to | ||
| 71 | * allocate more than one bio from the same bio set. Therefore, if it is running | ||
| 72 | * under generic_make_request() it masks out __GFP_WAIT when doing the | ||
| 73 | * allocation. The caller must check for failure if there's any possibility of | ||
| 74 | * it being called from under generic_make_request(); it is then the caller's | ||
| 75 | * responsibility to retry from a safe context (by e.g. punting to workqueue). | ||
| 76 | */ | 71 | */ |
| 77 | struct bio *bch_bio_split(struct bio *bio, int sectors, | 72 | struct bio *bch_bio_split(struct bio *bio, int sectors, |
| 78 | gfp_t gfp, struct bio_set *bs) | 73 | gfp_t gfp, struct bio_set *bs) |
| @@ -83,20 +78,13 @@ struct bio *bch_bio_split(struct bio *bio, int sectors, | |||
| 83 | 78 | ||
| 84 | BUG_ON(sectors <= 0); | 79 | BUG_ON(sectors <= 0); |
| 85 | 80 | ||
| 86 | /* | ||
| 87 | * If we're being called from underneath generic_make_request() and we | ||
| 88 | * already allocated any bios from this bio set, we risk deadlock if we | ||
| 89 | * use the mempool. So instead, we possibly fail and let the caller punt | ||
| 90 | * to workqueue or somesuch and retry in a safe context. | ||
| 91 | */ | ||
| 92 | if (current->bio_list) | ||
| 93 | gfp &= ~__GFP_WAIT; | ||
| 94 | |||
| 95 | if (sectors >= bio_sectors(bio)) | 81 | if (sectors >= bio_sectors(bio)) |
| 96 | return bio; | 82 | return bio; |
| 97 | 83 | ||
| 98 | if (bio->bi_rw & REQ_DISCARD) { | 84 | if (bio->bi_rw & REQ_DISCARD) { |
| 99 | ret = bio_alloc_bioset(gfp, 1, bs); | 85 | ret = bio_alloc_bioset(gfp, 1, bs); |
| 86 | if (!ret) | ||
| 87 | return NULL; | ||
| 100 | idx = 0; | 88 | idx = 0; |
| 101 | goto out; | 89 | goto out; |
| 102 | } | 90 | } |
| @@ -160,17 +148,18 @@ static unsigned bch_bio_max_sectors(struct bio *bio) | |||
| 160 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 148 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
| 161 | unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, | 149 | unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, |
| 162 | queue_max_segments(q)); | 150 | queue_max_segments(q)); |
| 163 | struct bio_vec *bv, *end = bio_iovec(bio) + | ||
| 164 | min_t(int, bio_segments(bio), max_segments); | ||
| 165 | 151 | ||
| 166 | if (bio->bi_rw & REQ_DISCARD) | 152 | if (bio->bi_rw & REQ_DISCARD) |
| 167 | return min(ret, q->limits.max_discard_sectors); | 153 | return min(ret, q->limits.max_discard_sectors); |
| 168 | 154 | ||
| 169 | if (bio_segments(bio) > max_segments || | 155 | if (bio_segments(bio) > max_segments || |
| 170 | q->merge_bvec_fn) { | 156 | q->merge_bvec_fn) { |
| 157 | struct bio_vec *bv; | ||
| 158 | int i, seg = 0; | ||
| 159 | |||
| 171 | ret = 0; | 160 | ret = 0; |
| 172 | 161 | ||
| 173 | for (bv = bio_iovec(bio); bv < end; bv++) { | 162 | bio_for_each_segment(bv, bio, i) { |
| 174 | struct bvec_merge_data bvm = { | 163 | struct bvec_merge_data bvm = { |
| 175 | .bi_bdev = bio->bi_bdev, | 164 | .bi_bdev = bio->bi_bdev, |
| 176 | .bi_sector = bio->bi_sector, | 165 | .bi_sector = bio->bi_sector, |
| @@ -178,10 +167,14 @@ static unsigned bch_bio_max_sectors(struct bio *bio) | |||
| 178 | .bi_rw = bio->bi_rw, | 167 | .bi_rw = bio->bi_rw, |
| 179 | }; | 168 | }; |
| 180 | 169 | ||
| 170 | if (seg == max_segments) | ||
| 171 | break; | ||
| 172 | |||
| 181 | if (q->merge_bvec_fn && | 173 | if (q->merge_bvec_fn && |
| 182 | q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) | 174 | q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) |
| 183 | break; | 175 | break; |
| 184 | 176 | ||
| 177 | seg++; | ||
| 185 | ret += bv->bv_len >> 9; | 178 | ret += bv->bv_len >> 9; |
| 186 | } | 179 | } |
| 187 | } | 180 | } |
| @@ -218,30 +211,10 @@ static void bch_bio_submit_split_endio(struct bio *bio, int error) | |||
| 218 | closure_put(cl); | 211 | closure_put(cl); |
| 219 | } | 212 | } |
| 220 | 213 | ||
| 221 | static void __bch_bio_submit_split(struct closure *cl) | ||
| 222 | { | ||
| 223 | struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); | ||
| 224 | struct bio *bio = s->bio, *n; | ||
| 225 | |||
| 226 | do { | ||
| 227 | n = bch_bio_split(bio, bch_bio_max_sectors(bio), | ||
| 228 | GFP_NOIO, s->p->bio_split); | ||
| 229 | if (!n) | ||
| 230 | continue_at(cl, __bch_bio_submit_split, system_wq); | ||
| 231 | |||
| 232 | n->bi_end_io = bch_bio_submit_split_endio; | ||
| 233 | n->bi_private = cl; | ||
| 234 | |||
| 235 | closure_get(cl); | ||
| 236 | bch_generic_make_request_hack(n); | ||
| 237 | } while (n != bio); | ||
| 238 | |||
| 239 | continue_at(cl, bch_bio_submit_split_done, NULL); | ||
| 240 | } | ||
| 241 | |||
| 242 | void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) | 214 | void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) |
| 243 | { | 215 | { |
| 244 | struct bio_split_hook *s; | 216 | struct bio_split_hook *s; |
| 217 | struct bio *n; | ||
| 245 | 218 | ||
| 246 | if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD)) | 219 | if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD)) |
| 247 | goto submit; | 220 | goto submit; |
| @@ -250,6 +223,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) | |||
| 250 | goto submit; | 223 | goto submit; |
| 251 | 224 | ||
| 252 | s = mempool_alloc(p->bio_split_hook, GFP_NOIO); | 225 | s = mempool_alloc(p->bio_split_hook, GFP_NOIO); |
| 226 | closure_init(&s->cl, NULL); | ||
| 253 | 227 | ||
| 254 | s->bio = bio; | 228 | s->bio = bio; |
| 255 | s->p = p; | 229 | s->p = p; |
| @@ -257,8 +231,18 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) | |||
| 257 | s->bi_private = bio->bi_private; | 231 | s->bi_private = bio->bi_private; |
| 258 | bio_get(bio); | 232 | bio_get(bio); |
| 259 | 233 | ||
| 260 | closure_call(&s->cl, __bch_bio_submit_split, NULL, NULL); | 234 | do { |
| 261 | return; | 235 | n = bch_bio_split(bio, bch_bio_max_sectors(bio), |
| 236 | GFP_NOIO, s->p->bio_split); | ||
| 237 | |||
| 238 | n->bi_end_io = bch_bio_submit_split_endio; | ||
| 239 | n->bi_private = &s->cl; | ||
| 240 | |||
| 241 | closure_get(&s->cl); | ||
| 242 | bch_generic_make_request_hack(n); | ||
| 243 | } while (n != bio); | ||
| 244 | |||
| 245 | continue_at(&s->cl, bch_bio_submit_split_done, NULL); | ||
| 262 | submit: | 246 | submit: |
| 263 | bch_generic_make_request_hack(bio); | 247 | bch_generic_make_request_hack(bio); |
| 264 | } | 248 | } |
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 8c8dfdcd9d4c..ba95ab84b2be 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c | |||
| @@ -9,6 +9,8 @@ | |||
| 9 | #include "debug.h" | 9 | #include "debug.h" |
| 10 | #include "request.h" | 10 | #include "request.h" |
| 11 | 11 | ||
| 12 | #include <trace/events/bcache.h> | ||
| 13 | |||
| 12 | /* | 14 | /* |
| 13 | * Journal replay/recovery: | 15 | * Journal replay/recovery: |
| 14 | * | 16 | * |
| @@ -182,9 +184,14 @@ bsearch: | |||
| 182 | pr_debug("starting binary search, l %u r %u", l, r); | 184 | pr_debug("starting binary search, l %u r %u", l, r); |
| 183 | 185 | ||
| 184 | while (l + 1 < r) { | 186 | while (l + 1 < r) { |
| 187 | seq = list_entry(list->prev, struct journal_replay, | ||
| 188 | list)->j.seq; | ||
| 189 | |||
| 185 | m = (l + r) >> 1; | 190 | m = (l + r) >> 1; |
| 191 | read_bucket(m); | ||
| 186 | 192 | ||
| 187 | if (read_bucket(m)) | 193 | if (seq != list_entry(list->prev, struct journal_replay, |
| 194 | list)->j.seq) | ||
| 188 | l = m; | 195 | l = m; |
| 189 | else | 196 | else |
| 190 | r = m; | 197 | r = m; |
| @@ -300,7 +307,8 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list, | |||
| 300 | for (k = i->j.start; | 307 | for (k = i->j.start; |
| 301 | k < end(&i->j); | 308 | k < end(&i->j); |
| 302 | k = bkey_next(k)) { | 309 | k = bkey_next(k)) { |
| 303 | pr_debug("%s", pkey(k)); | 310 | trace_bcache_journal_replay_key(k); |
| 311 | |||
| 304 | bkey_copy(op->keys.top, k); | 312 | bkey_copy(op->keys.top, k); |
| 305 | bch_keylist_push(&op->keys); | 313 | bch_keylist_push(&op->keys); |
| 306 | 314 | ||
| @@ -384,7 +392,7 @@ out: | |||
| 384 | return; | 392 | return; |
| 385 | found: | 393 | found: |
| 386 | if (btree_node_dirty(best)) | 394 | if (btree_node_dirty(best)) |
| 387 | bch_btree_write(best, true, NULL); | 395 | bch_btree_node_write(best, NULL); |
| 388 | rw_unlock(true, best); | 396 | rw_unlock(true, best); |
| 389 | } | 397 | } |
| 390 | 398 | ||
| @@ -617,7 +625,7 @@ static void journal_write_unlocked(struct closure *cl) | |||
| 617 | bio_reset(bio); | 625 | bio_reset(bio); |
| 618 | bio->bi_sector = PTR_OFFSET(k, i); | 626 | bio->bi_sector = PTR_OFFSET(k, i); |
| 619 | bio->bi_bdev = ca->bdev; | 627 | bio->bi_bdev = ca->bdev; |
| 620 | bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH; | 628 | bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; |
| 621 | bio->bi_size = sectors << 9; | 629 | bio->bi_size = sectors << 9; |
| 622 | 630 | ||
| 623 | bio->bi_end_io = journal_write_endio; | 631 | bio->bi_end_io = journal_write_endio; |
| @@ -712,7 +720,8 @@ void bch_journal(struct closure *cl) | |||
| 712 | spin_lock(&c->journal.lock); | 720 | spin_lock(&c->journal.lock); |
| 713 | 721 | ||
| 714 | if (journal_full(&c->journal)) { | 722 | if (journal_full(&c->journal)) { |
| 715 | /* XXX: tracepoint */ | 723 | trace_bcache_journal_full(c); |
| 724 | |||
| 716 | closure_wait(&c->journal.wait, cl); | 725 | closure_wait(&c->journal.wait, cl); |
| 717 | 726 | ||
| 718 | journal_reclaim(c); | 727 | journal_reclaim(c); |
| @@ -728,13 +737,15 @@ void bch_journal(struct closure *cl) | |||
| 728 | 737 | ||
| 729 | if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS || | 738 | if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS || |
| 730 | b > c->journal.blocks_free) { | 739 | b > c->journal.blocks_free) { |
| 731 | /* XXX: If we were inserting so many keys that they won't fit in | 740 | trace_bcache_journal_entry_full(c); |
| 741 | |||
| 742 | /* | ||
| 743 | * XXX: If we were inserting so many keys that they won't fit in | ||
| 732 | * an _empty_ journal write, we'll deadlock. For now, handle | 744 | * an _empty_ journal write, we'll deadlock. For now, handle |
| 733 | * this in bch_keylist_realloc() - but something to think about. | 745 | * this in bch_keylist_realloc() - but something to think about. |
| 734 | */ | 746 | */ |
| 735 | BUG_ON(!w->data->keys); | 747 | BUG_ON(!w->data->keys); |
| 736 | 748 | ||
| 737 | /* XXX: tracepoint */ | ||
| 738 | BUG_ON(!closure_wait(&w->wait, cl)); | 749 | BUG_ON(!closure_wait(&w->wait, cl)); |
| 739 | 750 | ||
| 740 | closure_flush(&c->journal.io); | 751 | closure_flush(&c->journal.io); |
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 8589512c972e..1a3b4f4786c3 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c | |||
| @@ -9,6 +9,8 @@ | |||
| 9 | #include "debug.h" | 9 | #include "debug.h" |
| 10 | #include "request.h" | 10 | #include "request.h" |
| 11 | 11 | ||
| 12 | #include <trace/events/bcache.h> | ||
| 13 | |||
| 12 | struct moving_io { | 14 | struct moving_io { |
| 13 | struct keybuf_key *w; | 15 | struct keybuf_key *w; |
| 14 | struct search s; | 16 | struct search s; |
| @@ -44,14 +46,14 @@ static void write_moving_finish(struct closure *cl) | |||
| 44 | { | 46 | { |
| 45 | struct moving_io *io = container_of(cl, struct moving_io, s.cl); | 47 | struct moving_io *io = container_of(cl, struct moving_io, s.cl); |
| 46 | struct bio *bio = &io->bio.bio; | 48 | struct bio *bio = &io->bio.bio; |
| 47 | struct bio_vec *bv = bio_iovec_idx(bio, bio->bi_vcnt); | 49 | struct bio_vec *bv; |
| 50 | int i; | ||
| 48 | 51 | ||
| 49 | while (bv-- != bio->bi_io_vec) | 52 | bio_for_each_segment_all(bv, bio, i) |
| 50 | __free_page(bv->bv_page); | 53 | __free_page(bv->bv_page); |
| 51 | 54 | ||
| 52 | pr_debug("%s %s", io->s.op.insert_collision | 55 | if (io->s.op.insert_collision) |
| 53 | ? "collision moving" : "moved", | 56 | trace_bcache_gc_copy_collision(&io->w->key); |
| 54 | pkey(&io->w->key)); | ||
| 55 | 57 | ||
| 56 | bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w); | 58 | bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w); |
| 57 | 59 | ||
| @@ -94,8 +96,6 @@ static void write_moving(struct closure *cl) | |||
| 94 | struct moving_io *io = container_of(s, struct moving_io, s); | 96 | struct moving_io *io = container_of(s, struct moving_io, s); |
| 95 | 97 | ||
| 96 | if (!s->error) { | 98 | if (!s->error) { |
| 97 | trace_bcache_write_moving(&io->bio.bio); | ||
| 98 | |||
| 99 | moving_init(io); | 99 | moving_init(io); |
| 100 | 100 | ||
| 101 | io->bio.bio.bi_sector = KEY_START(&io->w->key); | 101 | io->bio.bio.bi_sector = KEY_START(&io->w->key); |
| @@ -122,7 +122,6 @@ static void read_moving_submit(struct closure *cl) | |||
| 122 | struct moving_io *io = container_of(s, struct moving_io, s); | 122 | struct moving_io *io = container_of(s, struct moving_io, s); |
| 123 | struct bio *bio = &io->bio.bio; | 123 | struct bio *bio = &io->bio.bio; |
| 124 | 124 | ||
| 125 | trace_bcache_read_moving(bio); | ||
| 126 | bch_submit_bbio(bio, s->op.c, &io->w->key, 0); | 125 | bch_submit_bbio(bio, s->op.c, &io->w->key, 0); |
| 127 | 126 | ||
| 128 | continue_at(cl, write_moving, bch_gc_wq); | 127 | continue_at(cl, write_moving, bch_gc_wq); |
| @@ -138,7 +137,8 @@ static void read_moving(struct closure *cl) | |||
| 138 | /* XXX: if we error, background writeback could stall indefinitely */ | 137 | /* XXX: if we error, background writeback could stall indefinitely */ |
| 139 | 138 | ||
| 140 | while (!test_bit(CACHE_SET_STOPPING, &c->flags)) { | 139 | while (!test_bit(CACHE_SET_STOPPING, &c->flags)) { |
| 141 | w = bch_keybuf_next_rescan(c, &c->moving_gc_keys, &MAX_KEY); | 140 | w = bch_keybuf_next_rescan(c, &c->moving_gc_keys, |
| 141 | &MAX_KEY, moving_pred); | ||
| 142 | if (!w) | 142 | if (!w) |
| 143 | break; | 143 | break; |
| 144 | 144 | ||
| @@ -159,10 +159,10 @@ static void read_moving(struct closure *cl) | |||
| 159 | bio->bi_rw = READ; | 159 | bio->bi_rw = READ; |
| 160 | bio->bi_end_io = read_moving_endio; | 160 | bio->bi_end_io = read_moving_endio; |
| 161 | 161 | ||
| 162 | if (bch_bio_alloc_pages(bio, GFP_KERNEL)) | 162 | if (bio_alloc_pages(bio, GFP_KERNEL)) |
| 163 | goto err; | 163 | goto err; |
| 164 | 164 | ||
| 165 | pr_debug("%s", pkey(&w->key)); | 165 | trace_bcache_gc_copy(&w->key); |
| 166 | 166 | ||
| 167 | closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl); | 167 | closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl); |
| 168 | 168 | ||
| @@ -250,5 +250,5 @@ void bch_moving_gc(struct closure *cl) | |||
| 250 | 250 | ||
| 251 | void bch_moving_init_cache_set(struct cache_set *c) | 251 | void bch_moving_init_cache_set(struct cache_set *c) |
| 252 | { | 252 | { |
| 253 | bch_keybuf_init(&c->moving_gc_keys, moving_pred); | 253 | bch_keybuf_init(&c->moving_gc_keys); |
| 254 | } | 254 | } |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index e5ff12e52d5b..786a1a4f74d8 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include "btree.h" | 10 | #include "btree.h" |
| 11 | #include "debug.h" | 11 | #include "debug.h" |
| 12 | #include "request.h" | 12 | #include "request.h" |
| 13 | #include "writeback.h" | ||
| 13 | 14 | ||
| 14 | #include <linux/cgroup.h> | 15 | #include <linux/cgroup.h> |
| 15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| @@ -21,8 +22,6 @@ | |||
| 21 | 22 | ||
| 22 | #define CUTOFF_CACHE_ADD 95 | 23 | #define CUTOFF_CACHE_ADD 95 |
| 23 | #define CUTOFF_CACHE_READA 90 | 24 | #define CUTOFF_CACHE_READA 90 |
| 24 | #define CUTOFF_WRITEBACK 50 | ||
| 25 | #define CUTOFF_WRITEBACK_SYNC 75 | ||
| 26 | 25 | ||
| 27 | struct kmem_cache *bch_search_cache; | 26 | struct kmem_cache *bch_search_cache; |
| 28 | 27 | ||
| @@ -489,6 +488,12 @@ static void bch_insert_data_loop(struct closure *cl) | |||
| 489 | bch_queue_gc(op->c); | 488 | bch_queue_gc(op->c); |
| 490 | } | 489 | } |
| 491 | 490 | ||
| 491 | /* | ||
| 492 | * Journal writes are marked REQ_FLUSH; if the original write was a | ||
| 493 | * flush, it'll wait on the journal write. | ||
| 494 | */ | ||
| 495 | bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA); | ||
| 496 | |||
| 492 | do { | 497 | do { |
| 493 | unsigned i; | 498 | unsigned i; |
| 494 | struct bkey *k; | 499 | struct bkey *k; |
| @@ -510,10 +515,6 @@ static void bch_insert_data_loop(struct closure *cl) | |||
| 510 | goto err; | 515 | goto err; |
| 511 | 516 | ||
| 512 | n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); | 517 | n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); |
| 513 | if (!n) { | ||
| 514 | __bkey_put(op->c, k); | ||
| 515 | continue_at(cl, bch_insert_data_loop, bcache_wq); | ||
| 516 | } | ||
| 517 | 518 | ||
| 518 | n->bi_end_io = bch_insert_data_endio; | 519 | n->bi_end_io = bch_insert_data_endio; |
| 519 | n->bi_private = cl; | 520 | n->bi_private = cl; |
| @@ -530,10 +531,9 @@ static void bch_insert_data_loop(struct closure *cl) | |||
| 530 | if (KEY_CSUM(k)) | 531 | if (KEY_CSUM(k)) |
| 531 | bio_csum(n, k); | 532 | bio_csum(n, k); |
| 532 | 533 | ||
| 533 | pr_debug("%s", pkey(k)); | 534 | trace_bcache_cache_insert(k); |
| 534 | bch_keylist_push(&op->keys); | 535 | bch_keylist_push(&op->keys); |
| 535 | 536 | ||
| 536 | trace_bcache_cache_insert(n, n->bi_sector, n->bi_bdev); | ||
| 537 | n->bi_rw |= REQ_WRITE; | 537 | n->bi_rw |= REQ_WRITE; |
| 538 | bch_submit_bbio(n, op->c, k, 0); | 538 | bch_submit_bbio(n, op->c, k, 0); |
| 539 | } while (n != bio); | 539 | } while (n != bio); |
| @@ -716,7 +716,7 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d) | |||
| 716 | s->task = current; | 716 | s->task = current; |
| 717 | s->orig_bio = bio; | 717 | s->orig_bio = bio; |
| 718 | s->write = (bio->bi_rw & REQ_WRITE) != 0; | 718 | s->write = (bio->bi_rw & REQ_WRITE) != 0; |
| 719 | s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0; | 719 | s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; |
| 720 | s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0; | 720 | s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0; |
| 721 | s->recoverable = 1; | 721 | s->recoverable = 1; |
| 722 | s->start_time = jiffies; | 722 | s->start_time = jiffies; |
| @@ -784,11 +784,8 @@ static void request_read_error(struct closure *cl) | |||
| 784 | int i; | 784 | int i; |
| 785 | 785 | ||
| 786 | if (s->recoverable) { | 786 | if (s->recoverable) { |
| 787 | /* The cache read failed, but we can retry from the backing | 787 | /* Retry from the backing device: */ |
| 788 | * device. | 788 | trace_bcache_read_retry(s->orig_bio); |
| 789 | */ | ||
| 790 | pr_debug("recovering at sector %llu", | ||
| 791 | (uint64_t) s->orig_bio->bi_sector); | ||
| 792 | 789 | ||
| 793 | s->error = 0; | 790 | s->error = 0; |
| 794 | bv = s->bio.bio.bi_io_vec; | 791 | bv = s->bio.bio.bi_io_vec; |
| @@ -806,7 +803,6 @@ static void request_read_error(struct closure *cl) | |||
| 806 | 803 | ||
| 807 | /* XXX: invalidate cache */ | 804 | /* XXX: invalidate cache */ |
| 808 | 805 | ||
| 809 | trace_bcache_read_retry(&s->bio.bio); | ||
| 810 | closure_bio_submit(&s->bio.bio, &s->cl, s->d); | 806 | closure_bio_submit(&s->bio.bio, &s->cl, s->d); |
| 811 | } | 807 | } |
| 812 | 808 | ||
| @@ -827,53 +823,13 @@ static void request_read_done(struct closure *cl) | |||
| 827 | */ | 823 | */ |
| 828 | 824 | ||
| 829 | if (s->op.cache_bio) { | 825 | if (s->op.cache_bio) { |
| 830 | struct bio_vec *src, *dst; | ||
| 831 | unsigned src_offset, dst_offset, bytes; | ||
| 832 | void *dst_ptr; | ||
| 833 | |||
| 834 | bio_reset(s->op.cache_bio); | 826 | bio_reset(s->op.cache_bio); |
| 835 | s->op.cache_bio->bi_sector = s->cache_miss->bi_sector; | 827 | s->op.cache_bio->bi_sector = s->cache_miss->bi_sector; |
| 836 | s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev; | 828 | s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev; |
| 837 | s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; | 829 | s->op.cache_bio->bi_size = s->cache_bio_sectors << 9; |
| 838 | bch_bio_map(s->op.cache_bio, NULL); | 830 | bch_bio_map(s->op.cache_bio, NULL); |
| 839 | 831 | ||
| 840 | src = bio_iovec(s->op.cache_bio); | 832 | bio_copy_data(s->cache_miss, s->op.cache_bio); |
| 841 | dst = bio_iovec(s->cache_miss); | ||
| 842 | src_offset = src->bv_offset; | ||
| 843 | dst_offset = dst->bv_offset; | ||
| 844 | dst_ptr = kmap(dst->bv_page); | ||
| 845 | |||
| 846 | while (1) { | ||
| 847 | if (dst_offset == dst->bv_offset + dst->bv_len) { | ||
| 848 | kunmap(dst->bv_page); | ||
| 849 | dst++; | ||
| 850 | if (dst == bio_iovec_idx(s->cache_miss, | ||
| 851 | s->cache_miss->bi_vcnt)) | ||
| 852 | break; | ||
| 853 | |||
| 854 | dst_offset = dst->bv_offset; | ||
| 855 | dst_ptr = kmap(dst->bv_page); | ||
| 856 | } | ||
| 857 | |||
| 858 | if (src_offset == src->bv_offset + src->bv_len) { | ||
| 859 | src++; | ||
| 860 | if (src == bio_iovec_idx(s->op.cache_bio, | ||
| 861 | s->op.cache_bio->bi_vcnt)) | ||
| 862 | BUG(); | ||
| 863 | |||
| 864 | src_offset = src->bv_offset; | ||
| 865 | } | ||
| 866 | |||
| 867 | bytes = min(dst->bv_offset + dst->bv_len - dst_offset, | ||
| 868 | src->bv_offset + src->bv_len - src_offset); | ||
| 869 | |||
| 870 | memcpy(dst_ptr + dst_offset, | ||
| 871 | page_address(src->bv_page) + src_offset, | ||
| 872 | bytes); | ||
| 873 | |||
| 874 | src_offset += bytes; | ||
| 875 | dst_offset += bytes; | ||
| 876 | } | ||
| 877 | 833 | ||
| 878 | bio_put(s->cache_miss); | 834 | bio_put(s->cache_miss); |
| 879 | s->cache_miss = NULL; | 835 | s->cache_miss = NULL; |
| @@ -899,6 +855,7 @@ static void request_read_done_bh(struct closure *cl) | |||
| 899 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | 855 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); |
| 900 | 856 | ||
| 901 | bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip); | 857 | bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip); |
| 858 | trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip); | ||
| 902 | 859 | ||
| 903 | if (s->error) | 860 | if (s->error) |
| 904 | continue_at_nobarrier(cl, request_read_error, bcache_wq); | 861 | continue_at_nobarrier(cl, request_read_error, bcache_wq); |
| @@ -917,9 +874,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
| 917 | struct bio *miss; | 874 | struct bio *miss; |
| 918 | 875 | ||
| 919 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); | 876 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
| 920 | if (!miss) | ||
| 921 | return -EAGAIN; | ||
| 922 | |||
| 923 | if (miss == bio) | 877 | if (miss == bio) |
| 924 | s->op.lookup_done = true; | 878 | s->op.lookup_done = true; |
| 925 | 879 | ||
| @@ -938,8 +892,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
| 938 | reada = min(dc->readahead >> 9, | 892 | reada = min(dc->readahead >> 9, |
| 939 | sectors - bio_sectors(miss)); | 893 | sectors - bio_sectors(miss)); |
| 940 | 894 | ||
| 941 | if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev)) | 895 | if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev)) |
| 942 | reada = bdev_sectors(miss->bi_bdev) - bio_end(miss); | 896 | reada = bdev_sectors(miss->bi_bdev) - |
| 897 | bio_end_sector(miss); | ||
| 943 | } | 898 | } |
| 944 | 899 | ||
| 945 | s->cache_bio_sectors = bio_sectors(miss) + reada; | 900 | s->cache_bio_sectors = bio_sectors(miss) + reada; |
| @@ -963,13 +918,12 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
| 963 | goto out_put; | 918 | goto out_put; |
| 964 | 919 | ||
| 965 | bch_bio_map(s->op.cache_bio, NULL); | 920 | bch_bio_map(s->op.cache_bio, NULL); |
| 966 | if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) | 921 | if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) |
| 967 | goto out_put; | 922 | goto out_put; |
| 968 | 923 | ||
| 969 | s->cache_miss = miss; | 924 | s->cache_miss = miss; |
| 970 | bio_get(s->op.cache_bio); | 925 | bio_get(s->op.cache_bio); |
| 971 | 926 | ||
| 972 | trace_bcache_cache_miss(s->orig_bio); | ||
| 973 | closure_bio_submit(s->op.cache_bio, &s->cl, s->d); | 927 | closure_bio_submit(s->op.cache_bio, &s->cl, s->d); |
| 974 | 928 | ||
| 975 | return ret; | 929 | return ret; |
| @@ -1002,24 +956,13 @@ static void cached_dev_write_complete(struct closure *cl) | |||
| 1002 | cached_dev_bio_complete(cl); | 956 | cached_dev_bio_complete(cl); |
| 1003 | } | 957 | } |
| 1004 | 958 | ||
| 1005 | static bool should_writeback(struct cached_dev *dc, struct bio *bio) | ||
| 1006 | { | ||
| 1007 | unsigned threshold = (bio->bi_rw & REQ_SYNC) | ||
| 1008 | ? CUTOFF_WRITEBACK_SYNC | ||
| 1009 | : CUTOFF_WRITEBACK; | ||
| 1010 | |||
| 1011 | return !atomic_read(&dc->disk.detaching) && | ||
| 1012 | cache_mode(dc, bio) == CACHE_MODE_WRITEBACK && | ||
| 1013 | dc->disk.c->gc_stats.in_use < threshold; | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | static void request_write(struct cached_dev *dc, struct search *s) | 959 | static void request_write(struct cached_dev *dc, struct search *s) |
| 1017 | { | 960 | { |
| 1018 | struct closure *cl = &s->cl; | 961 | struct closure *cl = &s->cl; |
| 1019 | struct bio *bio = &s->bio.bio; | 962 | struct bio *bio = &s->bio.bio; |
| 1020 | struct bkey start, end; | 963 | struct bkey start, end; |
| 1021 | start = KEY(dc->disk.id, bio->bi_sector, 0); | 964 | start = KEY(dc->disk.id, bio->bi_sector, 0); |
| 1022 | end = KEY(dc->disk.id, bio_end(bio), 0); | 965 | end = KEY(dc->disk.id, bio_end_sector(bio), 0); |
| 1023 | 966 | ||
| 1024 | bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end); | 967 | bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end); |
| 1025 | 968 | ||
| @@ -1034,22 +977,37 @@ static void request_write(struct cached_dev *dc, struct search *s) | |||
| 1034 | if (bio->bi_rw & REQ_DISCARD) | 977 | if (bio->bi_rw & REQ_DISCARD) |
| 1035 | goto skip; | 978 | goto skip; |
| 1036 | 979 | ||
| 980 | if (should_writeback(dc, s->orig_bio, | ||
| 981 | cache_mode(dc, bio), | ||
| 982 | s->op.skip)) { | ||
| 983 | s->op.skip = false; | ||
| 984 | s->writeback = true; | ||
| 985 | } | ||
| 986 | |||
| 1037 | if (s->op.skip) | 987 | if (s->op.skip) |
| 1038 | goto skip; | 988 | goto skip; |
| 1039 | 989 | ||
| 1040 | if (should_writeback(dc, s->orig_bio)) | 990 | trace_bcache_write(s->orig_bio, s->writeback, s->op.skip); |
| 1041 | s->writeback = true; | ||
| 1042 | 991 | ||
| 1043 | if (!s->writeback) { | 992 | if (!s->writeback) { |
| 1044 | s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, | 993 | s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, |
| 1045 | dc->disk.bio_split); | 994 | dc->disk.bio_split); |
| 1046 | 995 | ||
| 1047 | trace_bcache_writethrough(s->orig_bio); | ||
| 1048 | closure_bio_submit(bio, cl, s->d); | 996 | closure_bio_submit(bio, cl, s->d); |
| 1049 | } else { | 997 | } else { |
| 1050 | s->op.cache_bio = bio; | 998 | bch_writeback_add(dc); |
| 1051 | trace_bcache_writeback(s->orig_bio); | 999 | |
| 1052 | bch_writeback_add(dc, bio_sectors(bio)); | 1000 | if (s->op.flush_journal) { |
| 1001 | /* Also need to send a flush to the backing device */ | ||
| 1002 | s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, | ||
| 1003 | dc->disk.bio_split); | ||
| 1004 | |||
| 1005 | bio->bi_size = 0; | ||
| 1006 | bio->bi_vcnt = 0; | ||
| 1007 | closure_bio_submit(bio, cl, s->d); | ||
| 1008 | } else { | ||
| 1009 | s->op.cache_bio = bio; | ||
| 1010 | } | ||
| 1053 | } | 1011 | } |
| 1054 | out: | 1012 | out: |
| 1055 | closure_call(&s->op.cl, bch_insert_data, NULL, cl); | 1013 | closure_call(&s->op.cl, bch_insert_data, NULL, cl); |
| @@ -1058,7 +1016,6 @@ skip: | |||
| 1058 | s->op.skip = true; | 1016 | s->op.skip = true; |
| 1059 | s->op.cache_bio = s->orig_bio; | 1017 | s->op.cache_bio = s->orig_bio; |
| 1060 | bio_get(s->op.cache_bio); | 1018 | bio_get(s->op.cache_bio); |
| 1061 | trace_bcache_write_skip(s->orig_bio); | ||
| 1062 | 1019 | ||
| 1063 | if ((bio->bi_rw & REQ_DISCARD) && | 1020 | if ((bio->bi_rw & REQ_DISCARD) && |
| 1064 | !blk_queue_discard(bdev_get_queue(dc->bdev))) | 1021 | !blk_queue_discard(bdev_get_queue(dc->bdev))) |
| @@ -1088,9 +1045,10 @@ static void request_nodata(struct cached_dev *dc, struct search *s) | |||
| 1088 | 1045 | ||
| 1089 | /* Cached devices - read & write stuff */ | 1046 | /* Cached devices - read & write stuff */ |
| 1090 | 1047 | ||
| 1091 | int bch_get_congested(struct cache_set *c) | 1048 | unsigned bch_get_congested(struct cache_set *c) |
| 1092 | { | 1049 | { |
| 1093 | int i; | 1050 | int i; |
| 1051 | long rand; | ||
| 1094 | 1052 | ||
| 1095 | if (!c->congested_read_threshold_us && | 1053 | if (!c->congested_read_threshold_us && |
| 1096 | !c->congested_write_threshold_us) | 1054 | !c->congested_write_threshold_us) |
| @@ -1106,7 +1064,13 @@ int bch_get_congested(struct cache_set *c) | |||
| 1106 | 1064 | ||
| 1107 | i += CONGESTED_MAX; | 1065 | i += CONGESTED_MAX; |
| 1108 | 1066 | ||
| 1109 | return i <= 0 ? 1 : fract_exp_two(i, 6); | 1067 | if (i > 0) |
| 1068 | i = fract_exp_two(i, 6); | ||
| 1069 | |||
| 1070 | rand = get_random_int(); | ||
| 1071 | i -= bitmap_weight(&rand, BITS_PER_LONG); | ||
| 1072 | |||
| 1073 | return i > 0 ? i : 1; | ||
| 1110 | } | 1074 | } |
| 1111 | 1075 | ||
| 1112 | static void add_sequential(struct task_struct *t) | 1076 | static void add_sequential(struct task_struct *t) |
| @@ -1126,10 +1090,8 @@ static void check_should_skip(struct cached_dev *dc, struct search *s) | |||
| 1126 | { | 1090 | { |
| 1127 | struct cache_set *c = s->op.c; | 1091 | struct cache_set *c = s->op.c; |
| 1128 | struct bio *bio = &s->bio.bio; | 1092 | struct bio *bio = &s->bio.bio; |
| 1129 | |||
| 1130 | long rand; | ||
| 1131 | int cutoff = bch_get_congested(c); | ||
| 1132 | unsigned mode = cache_mode(dc, bio); | 1093 | unsigned mode = cache_mode(dc, bio); |
| 1094 | unsigned sectors, congested = bch_get_congested(c); | ||
| 1133 | 1095 | ||
| 1134 | if (atomic_read(&dc->disk.detaching) || | 1096 | if (atomic_read(&dc->disk.detaching) || |
| 1135 | c->gc_stats.in_use > CUTOFF_CACHE_ADD || | 1097 | c->gc_stats.in_use > CUTOFF_CACHE_ADD || |
| @@ -1147,17 +1109,14 @@ static void check_should_skip(struct cached_dev *dc, struct search *s) | |||
| 1147 | goto skip; | 1109 | goto skip; |
| 1148 | } | 1110 | } |
| 1149 | 1111 | ||
| 1150 | if (!cutoff) { | 1112 | if (!congested && !dc->sequential_cutoff) |
| 1151 | cutoff = dc->sequential_cutoff >> 9; | 1113 | goto rescale; |
| 1152 | 1114 | ||
| 1153 | if (!cutoff) | 1115 | if (!congested && |
| 1154 | goto rescale; | 1116 | mode == CACHE_MODE_WRITEBACK && |
| 1155 | 1117 | (bio->bi_rw & REQ_WRITE) && | |
| 1156 | if (mode == CACHE_MODE_WRITEBACK && | 1118 | (bio->bi_rw & REQ_SYNC)) |
| 1157 | (bio->bi_rw & REQ_WRITE) && | 1119 | goto rescale; |
| 1158 | (bio->bi_rw & REQ_SYNC)) | ||
| 1159 | goto rescale; | ||
| 1160 | } | ||
| 1161 | 1120 | ||
| 1162 | if (dc->sequential_merge) { | 1121 | if (dc->sequential_merge) { |
| 1163 | struct io *i; | 1122 | struct io *i; |
| @@ -1177,7 +1136,7 @@ found: | |||
| 1177 | if (i->sequential + bio->bi_size > i->sequential) | 1136 | if (i->sequential + bio->bi_size > i->sequential) |
| 1178 | i->sequential += bio->bi_size; | 1137 | i->sequential += bio->bi_size; |
| 1179 | 1138 | ||
| 1180 | i->last = bio_end(bio); | 1139 | i->last = bio_end_sector(bio); |
| 1181 | i->jiffies = jiffies + msecs_to_jiffies(5000); | 1140 | i->jiffies = jiffies + msecs_to_jiffies(5000); |
| 1182 | s->task->sequential_io = i->sequential; | 1141 | s->task->sequential_io = i->sequential; |
| 1183 | 1142 | ||
| @@ -1192,12 +1151,19 @@ found: | |||
| 1192 | add_sequential(s->task); | 1151 | add_sequential(s->task); |
| 1193 | } | 1152 | } |
| 1194 | 1153 | ||
| 1195 | rand = get_random_int(); | 1154 | sectors = max(s->task->sequential_io, |
| 1196 | cutoff -= bitmap_weight(&rand, BITS_PER_LONG); | 1155 | s->task->sequential_io_avg) >> 9; |
| 1197 | 1156 | ||
| 1198 | if (cutoff <= (int) (max(s->task->sequential_io, | 1157 | if (dc->sequential_cutoff && |
| 1199 | s->task->sequential_io_avg) >> 9)) | 1158 | sectors >= dc->sequential_cutoff >> 9) { |
| 1159 | trace_bcache_bypass_sequential(s->orig_bio); | ||
| 1200 | goto skip; | 1160 | goto skip; |
| 1161 | } | ||
| 1162 | |||
| 1163 | if (congested && sectors >= congested) { | ||
| 1164 | trace_bcache_bypass_congested(s->orig_bio); | ||
| 1165 | goto skip; | ||
| 1166 | } | ||
| 1201 | 1167 | ||
| 1202 | rescale: | 1168 | rescale: |
| 1203 | bch_rescale_priorities(c, bio_sectors(bio)); | 1169 | bch_rescale_priorities(c, bio_sectors(bio)); |
| @@ -1288,30 +1254,25 @@ void bch_cached_dev_request_init(struct cached_dev *dc) | |||
| 1288 | static int flash_dev_cache_miss(struct btree *b, struct search *s, | 1254 | static int flash_dev_cache_miss(struct btree *b, struct search *s, |
| 1289 | struct bio *bio, unsigned sectors) | 1255 | struct bio *bio, unsigned sectors) |
| 1290 | { | 1256 | { |
| 1257 | struct bio_vec *bv; | ||
| 1258 | int i; | ||
| 1259 | |||
| 1291 | /* Zero fill bio */ | 1260 | /* Zero fill bio */ |
| 1292 | 1261 | ||
| 1293 | while (bio->bi_idx != bio->bi_vcnt) { | 1262 | bio_for_each_segment(bv, bio, i) { |
| 1294 | struct bio_vec *bv = bio_iovec(bio); | ||
| 1295 | unsigned j = min(bv->bv_len >> 9, sectors); | 1263 | unsigned j = min(bv->bv_len >> 9, sectors); |
| 1296 | 1264 | ||
| 1297 | void *p = kmap(bv->bv_page); | 1265 | void *p = kmap(bv->bv_page); |
| 1298 | memset(p + bv->bv_offset, 0, j << 9); | 1266 | memset(p + bv->bv_offset, 0, j << 9); |
| 1299 | kunmap(bv->bv_page); | 1267 | kunmap(bv->bv_page); |
| 1300 | 1268 | ||
| 1301 | bv->bv_len -= j << 9; | 1269 | sectors -= j; |
| 1302 | bv->bv_offset += j << 9; | ||
| 1303 | |||
| 1304 | if (bv->bv_len) | ||
| 1305 | return 0; | ||
| 1306 | |||
| 1307 | bio->bi_sector += j; | ||
| 1308 | bio->bi_size -= j << 9; | ||
| 1309 | |||
| 1310 | bio->bi_idx++; | ||
| 1311 | sectors -= j; | ||
| 1312 | } | 1270 | } |
| 1313 | 1271 | ||
| 1314 | s->op.lookup_done = true; | 1272 | bio_advance(bio, min(sectors << 9, bio->bi_size)); |
| 1273 | |||
| 1274 | if (!bio->bi_size) | ||
| 1275 | s->op.lookup_done = true; | ||
| 1315 | 1276 | ||
| 1316 | return 0; | 1277 | return 0; |
| 1317 | } | 1278 | } |
| @@ -1338,8 +1299,8 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) | |||
| 1338 | closure_call(&s->op.cl, btree_read_async, NULL, cl); | 1299 | closure_call(&s->op.cl, btree_read_async, NULL, cl); |
| 1339 | } else if (bio_has_data(bio) || s->op.skip) { | 1300 | } else if (bio_has_data(bio) || s->op.skip) { |
| 1340 | bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, | 1301 | bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, |
| 1341 | &KEY(d->id, bio->bi_sector, 0), | 1302 | &KEY(d->id, bio->bi_sector, 0), |
| 1342 | &KEY(d->id, bio_end(bio), 0)); | 1303 | &KEY(d->id, bio_end_sector(bio), 0)); |
| 1343 | 1304 | ||
| 1344 | s->writeback = true; | 1305 | s->writeback = true; |
| 1345 | s->op.cache_bio = bio; | 1306 | s->op.cache_bio = bio; |
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 254d9ab5707c..57dc4784f4f4 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h | |||
| @@ -30,7 +30,7 @@ struct search { | |||
| 30 | }; | 30 | }; |
| 31 | 31 | ||
| 32 | void bch_cache_read_endio(struct bio *, int); | 32 | void bch_cache_read_endio(struct bio *, int); |
| 33 | int bch_get_congested(struct cache_set *); | 33 | unsigned bch_get_congested(struct cache_set *); |
| 34 | void bch_insert_data(struct closure *cl); | 34 | void bch_insert_data(struct closure *cl); |
| 35 | void bch_btree_insert_async(struct closure *); | 35 | void bch_btree_insert_async(struct closure *); |
| 36 | void bch_cache_read_endio(struct bio *, int); | 36 | void bch_cache_read_endio(struct bio *, int); |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index f88e2b653a3f..547c4c57b052 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
| @@ -10,10 +10,13 @@ | |||
| 10 | #include "btree.h" | 10 | #include "btree.h" |
| 11 | #include "debug.h" | 11 | #include "debug.h" |
| 12 | #include "request.h" | 12 | #include "request.h" |
| 13 | #include "writeback.h" | ||
| 13 | 14 | ||
| 15 | #include <linux/blkdev.h> | ||
| 14 | #include <linux/buffer_head.h> | 16 | #include <linux/buffer_head.h> |
| 15 | #include <linux/debugfs.h> | 17 | #include <linux/debugfs.h> |
| 16 | #include <linux/genhd.h> | 18 | #include <linux/genhd.h> |
| 19 | #include <linux/kthread.h> | ||
| 17 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| 18 | #include <linux/random.h> | 21 | #include <linux/random.h> |
| 19 | #include <linux/reboot.h> | 22 | #include <linux/reboot.h> |
| @@ -342,6 +345,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw, | |||
| 342 | struct closure *cl = &c->uuid_write.cl; | 345 | struct closure *cl = &c->uuid_write.cl; |
| 343 | struct uuid_entry *u; | 346 | struct uuid_entry *u; |
| 344 | unsigned i; | 347 | unsigned i; |
| 348 | char buf[80]; | ||
| 345 | 349 | ||
| 346 | BUG_ON(!parent); | 350 | BUG_ON(!parent); |
| 347 | closure_lock(&c->uuid_write, parent); | 351 | closure_lock(&c->uuid_write, parent); |
| @@ -362,8 +366,8 @@ static void uuid_io(struct cache_set *c, unsigned long rw, | |||
| 362 | break; | 366 | break; |
| 363 | } | 367 | } |
| 364 | 368 | ||
| 365 | pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", | 369 | bch_bkey_to_text(buf, sizeof(buf), k); |
| 366 | pkey(&c->uuid_bucket)); | 370 | pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf); |
| 367 | 371 | ||
| 368 | for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) | 372 | for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) |
| 369 | if (!bch_is_zero(u->uuid, 16)) | 373 | if (!bch_is_zero(u->uuid, 16)) |
| @@ -543,7 +547,6 @@ void bch_prio_write(struct cache *ca) | |||
| 543 | 547 | ||
| 544 | pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), | 548 | pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), |
| 545 | fifo_used(&ca->free_inc), fifo_used(&ca->unused)); | 549 | fifo_used(&ca->free_inc), fifo_used(&ca->unused)); |
| 546 | blktrace_msg(ca, "Starting priorities: " buckets_free(ca)); | ||
| 547 | 550 | ||
| 548 | for (i = prio_buckets(ca) - 1; i >= 0; --i) { | 551 | for (i = prio_buckets(ca) - 1; i >= 0; --i) { |
| 549 | long bucket; | 552 | long bucket; |
| @@ -704,7 +707,8 @@ static void bcache_device_detach(struct bcache_device *d) | |||
| 704 | atomic_set(&d->detaching, 0); | 707 | atomic_set(&d->detaching, 0); |
| 705 | } | 708 | } |
| 706 | 709 | ||
| 707 | bcache_device_unlink(d); | 710 | if (!d->flush_done) |
| 711 | bcache_device_unlink(d); | ||
| 708 | 712 | ||
| 709 | d->c->devices[d->id] = NULL; | 713 | d->c->devices[d->id] = NULL; |
| 710 | closure_put(&d->c->caching); | 714 | closure_put(&d->c->caching); |
| @@ -743,13 +747,35 @@ static void bcache_device_free(struct bcache_device *d) | |||
| 743 | mempool_destroy(d->unaligned_bvec); | 747 | mempool_destroy(d->unaligned_bvec); |
| 744 | if (d->bio_split) | 748 | if (d->bio_split) |
| 745 | bioset_free(d->bio_split); | 749 | bioset_free(d->bio_split); |
| 750 | if (is_vmalloc_addr(d->stripe_sectors_dirty)) | ||
| 751 | vfree(d->stripe_sectors_dirty); | ||
| 752 | else | ||
| 753 | kfree(d->stripe_sectors_dirty); | ||
| 746 | 754 | ||
| 747 | closure_debug_destroy(&d->cl); | 755 | closure_debug_destroy(&d->cl); |
| 748 | } | 756 | } |
| 749 | 757 | ||
| 750 | static int bcache_device_init(struct bcache_device *d, unsigned block_size) | 758 | static int bcache_device_init(struct bcache_device *d, unsigned block_size, |
| 759 | sector_t sectors) | ||
| 751 | { | 760 | { |
| 752 | struct request_queue *q; | 761 | struct request_queue *q; |
| 762 | size_t n; | ||
| 763 | |||
| 764 | if (!d->stripe_size_bits) | ||
| 765 | d->stripe_size_bits = 31; | ||
| 766 | |||
| 767 | d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >> | ||
| 768 | d->stripe_size_bits; | ||
| 769 | |||
| 770 | if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) | ||
| 771 | return -ENOMEM; | ||
| 772 | |||
| 773 | n = d->nr_stripes * sizeof(atomic_t); | ||
| 774 | d->stripe_sectors_dirty = n < PAGE_SIZE << 6 | ||
| 775 | ? kzalloc(n, GFP_KERNEL) | ||
| 776 | : vzalloc(n); | ||
| 777 | if (!d->stripe_sectors_dirty) | ||
| 778 | return -ENOMEM; | ||
| 753 | 779 | ||
| 754 | if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || | 780 | if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || |
| 755 | !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, | 781 | !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, |
| @@ -759,6 +785,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size) | |||
| 759 | !(q = blk_alloc_queue(GFP_KERNEL))) | 785 | !(q = blk_alloc_queue(GFP_KERNEL))) |
| 760 | return -ENOMEM; | 786 | return -ENOMEM; |
| 761 | 787 | ||
| 788 | set_capacity(d->disk, sectors); | ||
| 762 | snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); | 789 | snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); |
| 763 | 790 | ||
| 764 | d->disk->major = bcache_major; | 791 | d->disk->major = bcache_major; |
| @@ -781,6 +808,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size) | |||
| 781 | set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); | 808 | set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); |
| 782 | set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); | 809 | set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); |
| 783 | 810 | ||
| 811 | blk_queue_flush(q, REQ_FLUSH|REQ_FUA); | ||
| 812 | |||
| 784 | return 0; | 813 | return 0; |
| 785 | } | 814 | } |
| 786 | 815 | ||
| @@ -800,6 +829,17 @@ static void calc_cached_dev_sectors(struct cache_set *c) | |||
| 800 | void bch_cached_dev_run(struct cached_dev *dc) | 829 | void bch_cached_dev_run(struct cached_dev *dc) |
| 801 | { | 830 | { |
| 802 | struct bcache_device *d = &dc->disk; | 831 | struct bcache_device *d = &dc->disk; |
| 832 | char buf[SB_LABEL_SIZE + 1]; | ||
| 833 | char *env[] = { | ||
| 834 | "DRIVER=bcache", | ||
| 835 | kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), | ||
| 836 | NULL, | ||
| 837 | NULL, | ||
| 838 | }; | ||
| 839 | |||
| 840 | memcpy(buf, dc->sb.label, SB_LABEL_SIZE); | ||
| 841 | buf[SB_LABEL_SIZE] = '\0'; | ||
| 842 | env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); | ||
| 803 | 843 | ||
| 804 | if (atomic_xchg(&dc->running, 1)) | 844 | if (atomic_xchg(&dc->running, 1)) |
| 805 | return; | 845 | return; |
| @@ -816,10 +856,12 @@ void bch_cached_dev_run(struct cached_dev *dc) | |||
| 816 | 856 | ||
| 817 | add_disk(d->disk); | 857 | add_disk(d->disk); |
| 818 | bd_link_disk_holder(dc->bdev, dc->disk.disk); | 858 | bd_link_disk_holder(dc->bdev, dc->disk.disk); |
| 819 | #if 0 | 859 | /* won't show up in the uevent file, use udevadm monitor -e instead |
| 820 | char *env[] = { "SYMLINK=label" , NULL }; | 860 | * only class / kset properties are persistent */ |
| 821 | kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); | 861 | kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); |
| 822 | #endif | 862 | kfree(env[1]); |
| 863 | kfree(env[2]); | ||
| 864 | |||
| 823 | if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || | 865 | if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || |
| 824 | sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) | 866 | sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) |
| 825 | pr_debug("error creating sysfs link"); | 867 | pr_debug("error creating sysfs link"); |
| @@ -960,6 +1002,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) | |||
| 960 | atomic_set(&dc->count, 1); | 1002 | atomic_set(&dc->count, 1); |
| 961 | 1003 | ||
| 962 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { | 1004 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { |
| 1005 | bch_sectors_dirty_init(dc); | ||
| 963 | atomic_set(&dc->has_dirty, 1); | 1006 | atomic_set(&dc->has_dirty, 1); |
| 964 | atomic_inc(&dc->count); | 1007 | atomic_inc(&dc->count); |
| 965 | bch_writeback_queue(dc); | 1008 | bch_writeback_queue(dc); |
| @@ -1014,6 +1057,14 @@ static void cached_dev_flush(struct closure *cl) | |||
| 1014 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); | 1057 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); |
| 1015 | struct bcache_device *d = &dc->disk; | 1058 | struct bcache_device *d = &dc->disk; |
| 1016 | 1059 | ||
| 1060 | mutex_lock(&bch_register_lock); | ||
| 1061 | d->flush_done = 1; | ||
| 1062 | |||
| 1063 | if (d->c) | ||
| 1064 | bcache_device_unlink(d); | ||
| 1065 | |||
| 1066 | mutex_unlock(&bch_register_lock); | ||
| 1067 | |||
| 1017 | bch_cache_accounting_destroy(&dc->accounting); | 1068 | bch_cache_accounting_destroy(&dc->accounting); |
| 1018 | kobject_del(&d->kobj); | 1069 | kobject_del(&d->kobj); |
| 1019 | 1070 | ||
| @@ -1045,7 +1096,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size) | |||
| 1045 | hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); | 1096 | hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); |
| 1046 | } | 1097 | } |
| 1047 | 1098 | ||
| 1048 | ret = bcache_device_init(&dc->disk, block_size); | 1099 | ret = bcache_device_init(&dc->disk, block_size, |
| 1100 | dc->bdev->bd_part->nr_sects - dc->sb.data_offset); | ||
| 1049 | if (ret) | 1101 | if (ret) |
| 1050 | return ret; | 1102 | return ret; |
| 1051 | 1103 | ||
| @@ -1144,11 +1196,10 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) | |||
| 1144 | 1196 | ||
| 1145 | kobject_init(&d->kobj, &bch_flash_dev_ktype); | 1197 | kobject_init(&d->kobj, &bch_flash_dev_ktype); |
| 1146 | 1198 | ||
| 1147 | if (bcache_device_init(d, block_bytes(c))) | 1199 | if (bcache_device_init(d, block_bytes(c), u->sectors)) |
| 1148 | goto err; | 1200 | goto err; |
| 1149 | 1201 | ||
| 1150 | bcache_device_attach(d, c, u - c->uuids); | 1202 | bcache_device_attach(d, c, u - c->uuids); |
| 1151 | set_capacity(d->disk, u->sectors); | ||
| 1152 | bch_flash_dev_request_init(d); | 1203 | bch_flash_dev_request_init(d); |
| 1153 | add_disk(d->disk); | 1204 | add_disk(d->disk); |
| 1154 | 1205 | ||
| @@ -1255,9 +1306,10 @@ static void cache_set_free(struct closure *cl) | |||
| 1255 | free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); | 1306 | free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); |
| 1256 | free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); | 1307 | free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); |
| 1257 | 1308 | ||
| 1258 | kfree(c->fill_iter); | ||
| 1259 | if (c->bio_split) | 1309 | if (c->bio_split) |
| 1260 | bioset_free(c->bio_split); | 1310 | bioset_free(c->bio_split); |
| 1311 | if (c->fill_iter) | ||
| 1312 | mempool_destroy(c->fill_iter); | ||
| 1261 | if (c->bio_meta) | 1313 | if (c->bio_meta) |
| 1262 | mempool_destroy(c->bio_meta); | 1314 | mempool_destroy(c->bio_meta); |
| 1263 | if (c->search) | 1315 | if (c->search) |
| @@ -1278,11 +1330,9 @@ static void cache_set_free(struct closure *cl) | |||
| 1278 | static void cache_set_flush(struct closure *cl) | 1330 | static void cache_set_flush(struct closure *cl) |
| 1279 | { | 1331 | { |
| 1280 | struct cache_set *c = container_of(cl, struct cache_set, caching); | 1332 | struct cache_set *c = container_of(cl, struct cache_set, caching); |
| 1333 | struct cache *ca; | ||
| 1281 | struct btree *b; | 1334 | struct btree *b; |
| 1282 | 1335 | unsigned i; | |
| 1283 | /* Shut down allocator threads */ | ||
| 1284 | set_bit(CACHE_SET_STOPPING_2, &c->flags); | ||
| 1285 | wake_up(&c->alloc_wait); | ||
| 1286 | 1336 | ||
| 1287 | bch_cache_accounting_destroy(&c->accounting); | 1337 | bch_cache_accounting_destroy(&c->accounting); |
| 1288 | 1338 | ||
| @@ -1295,7 +1345,11 @@ static void cache_set_flush(struct closure *cl) | |||
| 1295 | /* Should skip this if we're unregistering because of an error */ | 1345 | /* Should skip this if we're unregistering because of an error */ |
| 1296 | list_for_each_entry(b, &c->btree_cache, list) | 1346 | list_for_each_entry(b, &c->btree_cache, list) |
| 1297 | if (btree_node_dirty(b)) | 1347 | if (btree_node_dirty(b)) |
| 1298 | bch_btree_write(b, true, NULL); | 1348 | bch_btree_node_write(b, NULL); |
| 1349 | |||
| 1350 | for_each_cache(ca, c, i) | ||
| 1351 | if (ca->alloc_thread) | ||
| 1352 | kthread_stop(ca->alloc_thread); | ||
| 1299 | 1353 | ||
| 1300 | closure_return(cl); | 1354 | closure_return(cl); |
| 1301 | } | 1355 | } |
| @@ -1303,18 +1357,22 @@ static void cache_set_flush(struct closure *cl) | |||
| 1303 | static void __cache_set_unregister(struct closure *cl) | 1357 | static void __cache_set_unregister(struct closure *cl) |
| 1304 | { | 1358 | { |
| 1305 | struct cache_set *c = container_of(cl, struct cache_set, caching); | 1359 | struct cache_set *c = container_of(cl, struct cache_set, caching); |
| 1306 | struct cached_dev *dc, *t; | 1360 | struct cached_dev *dc; |
| 1307 | size_t i; | 1361 | size_t i; |
| 1308 | 1362 | ||
| 1309 | mutex_lock(&bch_register_lock); | 1363 | mutex_lock(&bch_register_lock); |
| 1310 | 1364 | ||
| 1311 | if (test_bit(CACHE_SET_UNREGISTERING, &c->flags)) | ||
| 1312 | list_for_each_entry_safe(dc, t, &c->cached_devs, list) | ||
| 1313 | bch_cached_dev_detach(dc); | ||
| 1314 | |||
| 1315 | for (i = 0; i < c->nr_uuids; i++) | 1365 | for (i = 0; i < c->nr_uuids; i++) |
| 1316 | if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i])) | 1366 | if (c->devices[i]) { |
| 1317 | bcache_device_stop(c->devices[i]); | 1367 | if (!UUID_FLASH_ONLY(&c->uuids[i]) && |
| 1368 | test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { | ||
| 1369 | dc = container_of(c->devices[i], | ||
| 1370 | struct cached_dev, disk); | ||
| 1371 | bch_cached_dev_detach(dc); | ||
| 1372 | } else { | ||
| 1373 | bcache_device_stop(c->devices[i]); | ||
| 1374 | } | ||
| 1375 | } | ||
| 1318 | 1376 | ||
| 1319 | mutex_unlock(&bch_register_lock); | 1377 | mutex_unlock(&bch_register_lock); |
| 1320 | 1378 | ||
| @@ -1373,9 +1431,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) | |||
| 1373 | c->btree_pages = max_t(int, c->btree_pages / 4, | 1431 | c->btree_pages = max_t(int, c->btree_pages / 4, |
| 1374 | BTREE_MAX_PAGES); | 1432 | BTREE_MAX_PAGES); |
| 1375 | 1433 | ||
| 1376 | init_waitqueue_head(&c->alloc_wait); | 1434 | c->sort_crit_factor = int_sqrt(c->btree_pages); |
| 1435 | |||
| 1377 | mutex_init(&c->bucket_lock); | 1436 | mutex_init(&c->bucket_lock); |
| 1378 | mutex_init(&c->fill_lock); | ||
| 1379 | mutex_init(&c->sort_lock); | 1437 | mutex_init(&c->sort_lock); |
| 1380 | spin_lock_init(&c->sort_time_lock); | 1438 | spin_lock_init(&c->sort_time_lock); |
| 1381 | closure_init_unlocked(&c->sb_write); | 1439 | closure_init_unlocked(&c->sb_write); |
| @@ -1401,8 +1459,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) | |||
| 1401 | !(c->bio_meta = mempool_create_kmalloc_pool(2, | 1459 | !(c->bio_meta = mempool_create_kmalloc_pool(2, |
| 1402 | sizeof(struct bbio) + sizeof(struct bio_vec) * | 1460 | sizeof(struct bbio) + sizeof(struct bio_vec) * |
| 1403 | bucket_pages(c))) || | 1461 | bucket_pages(c))) || |
| 1462 | !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || | ||
| 1404 | !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || | 1463 | !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || |
| 1405 | !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) || | ||
| 1406 | !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) || | 1464 | !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) || |
| 1407 | !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || | 1465 | !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || |
| 1408 | bch_journal_alloc(c) || | 1466 | bch_journal_alloc(c) || |
| @@ -1410,8 +1468,6 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) | |||
| 1410 | bch_open_buckets_alloc(c)) | 1468 | bch_open_buckets_alloc(c)) |
| 1411 | goto err; | 1469 | goto err; |
| 1412 | 1470 | ||
| 1413 | c->fill_iter->size = sb->bucket_size / sb->block_size; | ||
| 1414 | |||
| 1415 | c->congested_read_threshold_us = 2000; | 1471 | c->congested_read_threshold_us = 2000; |
| 1416 | c->congested_write_threshold_us = 20000; | 1472 | c->congested_write_threshold_us = 20000; |
| 1417 | c->error_limit = 8 << IO_ERROR_SHIFT; | 1473 | c->error_limit = 8 << IO_ERROR_SHIFT; |
| @@ -1496,9 +1552,10 @@ static void run_cache_set(struct cache_set *c) | |||
| 1496 | */ | 1552 | */ |
| 1497 | bch_journal_next(&c->journal); | 1553 | bch_journal_next(&c->journal); |
| 1498 | 1554 | ||
| 1555 | err = "error starting allocator thread"; | ||
| 1499 | for_each_cache(ca, c, i) | 1556 | for_each_cache(ca, c, i) |
| 1500 | closure_call(&ca->alloc, bch_allocator_thread, | 1557 | if (bch_cache_allocator_start(ca)) |
| 1501 | system_wq, &c->cl); | 1558 | goto err; |
| 1502 | 1559 | ||
| 1503 | /* | 1560 | /* |
| 1504 | * First place it's safe to allocate: btree_check() and | 1561 | * First place it's safe to allocate: btree_check() and |
| @@ -1531,17 +1588,16 @@ static void run_cache_set(struct cache_set *c) | |||
| 1531 | 1588 | ||
| 1532 | bch_btree_gc_finish(c); | 1589 | bch_btree_gc_finish(c); |
| 1533 | 1590 | ||
| 1591 | err = "error starting allocator thread"; | ||
| 1534 | for_each_cache(ca, c, i) | 1592 | for_each_cache(ca, c, i) |
| 1535 | closure_call(&ca->alloc, bch_allocator_thread, | 1593 | if (bch_cache_allocator_start(ca)) |
| 1536 | ca->alloc_workqueue, &c->cl); | 1594 | goto err; |
| 1537 | 1595 | ||
| 1538 | mutex_lock(&c->bucket_lock); | 1596 | mutex_lock(&c->bucket_lock); |
| 1539 | for_each_cache(ca, c, i) | 1597 | for_each_cache(ca, c, i) |
| 1540 | bch_prio_write(ca); | 1598 | bch_prio_write(ca); |
| 1541 | mutex_unlock(&c->bucket_lock); | 1599 | mutex_unlock(&c->bucket_lock); |
| 1542 | 1600 | ||
| 1543 | wake_up(&c->alloc_wait); | ||
| 1544 | |||
| 1545 | err = "cannot allocate new UUID bucket"; | 1601 | err = "cannot allocate new UUID bucket"; |
| 1546 | if (__uuid_write(c)) | 1602 | if (__uuid_write(c)) |
| 1547 | goto err_unlock_gc; | 1603 | goto err_unlock_gc; |
| @@ -1552,7 +1608,7 @@ static void run_cache_set(struct cache_set *c) | |||
| 1552 | goto err_unlock_gc; | 1608 | goto err_unlock_gc; |
| 1553 | 1609 | ||
| 1554 | bkey_copy_key(&c->root->key, &MAX_KEY); | 1610 | bkey_copy_key(&c->root->key, &MAX_KEY); |
| 1555 | bch_btree_write(c->root, true, &op); | 1611 | bch_btree_node_write(c->root, &op.cl); |
| 1556 | 1612 | ||
| 1557 | bch_btree_set_root(c->root); | 1613 | bch_btree_set_root(c->root); |
| 1558 | rw_unlock(true, c->root); | 1614 | rw_unlock(true, c->root); |
| @@ -1673,9 +1729,6 @@ void bch_cache_release(struct kobject *kobj) | |||
| 1673 | 1729 | ||
| 1674 | bio_split_pool_free(&ca->bio_split_hook); | 1730 | bio_split_pool_free(&ca->bio_split_hook); |
| 1675 | 1731 | ||
| 1676 | if (ca->alloc_workqueue) | ||
| 1677 | destroy_workqueue(ca->alloc_workqueue); | ||
| 1678 | |||
| 1679 | free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); | 1732 | free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); |
| 1680 | kfree(ca->prio_buckets); | 1733 | kfree(ca->prio_buckets); |
| 1681 | vfree(ca->buckets); | 1734 | vfree(ca->buckets); |
| @@ -1723,7 +1776,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) | |||
| 1723 | !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * | 1776 | !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * |
| 1724 | 2, GFP_KERNEL)) || | 1777 | 2, GFP_KERNEL)) || |
| 1725 | !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || | 1778 | !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || |
| 1726 | !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || | ||
| 1727 | bio_split_pool_init(&ca->bio_split_hook)) | 1779 | bio_split_pool_init(&ca->bio_split_hook)) |
| 1728 | return -ENOMEM; | 1780 | return -ENOMEM; |
| 1729 | 1781 | ||
| @@ -1786,6 +1838,36 @@ static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, | |||
| 1786 | kobj_attribute_write(register, register_bcache); | 1838 | kobj_attribute_write(register, register_bcache); |
| 1787 | kobj_attribute_write(register_quiet, register_bcache); | 1839 | kobj_attribute_write(register_quiet, register_bcache); |
| 1788 | 1840 | ||
| 1841 | static bool bch_is_open_backing(struct block_device *bdev) { | ||
| 1842 | struct cache_set *c, *tc; | ||
| 1843 | struct cached_dev *dc, *t; | ||
| 1844 | |||
| 1845 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) | ||
| 1846 | list_for_each_entry_safe(dc, t, &c->cached_devs, list) | ||
| 1847 | if (dc->bdev == bdev) | ||
| 1848 | return true; | ||
| 1849 | list_for_each_entry_safe(dc, t, &uncached_devices, list) | ||
| 1850 | if (dc->bdev == bdev) | ||
| 1851 | return true; | ||
| 1852 | return false; | ||
| 1853 | } | ||
| 1854 | |||
| 1855 | static bool bch_is_open_cache(struct block_device *bdev) { | ||
| 1856 | struct cache_set *c, *tc; | ||
| 1857 | struct cache *ca; | ||
| 1858 | unsigned i; | ||
| 1859 | |||
| 1860 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) | ||
| 1861 | for_each_cache(ca, c, i) | ||
| 1862 | if (ca->bdev == bdev) | ||
| 1863 | return true; | ||
| 1864 | return false; | ||
| 1865 | } | ||
| 1866 | |||
| 1867 | static bool bch_is_open(struct block_device *bdev) { | ||
| 1868 | return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); | ||
| 1869 | } | ||
| 1870 | |||
| 1789 | static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, | 1871 | static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, |
| 1790 | const char *buffer, size_t size) | 1872 | const char *buffer, size_t size) |
| 1791 | { | 1873 | { |
| @@ -1810,8 +1892,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, | |||
| 1810 | FMODE_READ|FMODE_WRITE|FMODE_EXCL, | 1892 | FMODE_READ|FMODE_WRITE|FMODE_EXCL, |
| 1811 | sb); | 1893 | sb); |
| 1812 | if (IS_ERR(bdev)) { | 1894 | if (IS_ERR(bdev)) { |
| 1813 | if (bdev == ERR_PTR(-EBUSY)) | 1895 | if (bdev == ERR_PTR(-EBUSY)) { |
| 1814 | err = "device busy"; | 1896 | bdev = lookup_bdev(strim(path)); |
| 1897 | if (!IS_ERR(bdev) && bch_is_open(bdev)) | ||
| 1898 | err = "device already registered"; | ||
| 1899 | else | ||
| 1900 | err = "device busy"; | ||
| 1901 | } | ||
| 1815 | goto err; | 1902 | goto err; |
| 1816 | } | 1903 | } |
| 1817 | 1904 | ||
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 4d9cca47e4c6..12a2c2846f99 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c | |||
| @@ -9,7 +9,9 @@ | |||
| 9 | #include "sysfs.h" | 9 | #include "sysfs.h" |
| 10 | #include "btree.h" | 10 | #include "btree.h" |
| 11 | #include "request.h" | 11 | #include "request.h" |
| 12 | #include "writeback.h" | ||
| 12 | 13 | ||
| 14 | #include <linux/blkdev.h> | ||
| 13 | #include <linux/sort.h> | 15 | #include <linux/sort.h> |
| 14 | 16 | ||
| 15 | static const char * const cache_replacement_policies[] = { | 17 | static const char * const cache_replacement_policies[] = { |
| @@ -79,6 +81,9 @@ rw_attribute(writeback_rate_p_term_inverse); | |||
| 79 | rw_attribute(writeback_rate_d_smooth); | 81 | rw_attribute(writeback_rate_d_smooth); |
| 80 | read_attribute(writeback_rate_debug); | 82 | read_attribute(writeback_rate_debug); |
| 81 | 83 | ||
| 84 | read_attribute(stripe_size); | ||
| 85 | read_attribute(partial_stripes_expensive); | ||
| 86 | |||
| 82 | rw_attribute(synchronous); | 87 | rw_attribute(synchronous); |
| 83 | rw_attribute(journal_delay_ms); | 88 | rw_attribute(journal_delay_ms); |
| 84 | rw_attribute(discard); | 89 | rw_attribute(discard); |
| @@ -127,7 +132,7 @@ SHOW(__bch_cached_dev) | |||
| 127 | char derivative[20]; | 132 | char derivative[20]; |
| 128 | char target[20]; | 133 | char target[20]; |
| 129 | bch_hprint(dirty, | 134 | bch_hprint(dirty, |
| 130 | atomic_long_read(&dc->disk.sectors_dirty) << 9); | 135 | bcache_dev_sectors_dirty(&dc->disk) << 9); |
| 131 | bch_hprint(derivative, dc->writeback_rate_derivative << 9); | 136 | bch_hprint(derivative, dc->writeback_rate_derivative << 9); |
| 132 | bch_hprint(target, dc->writeback_rate_target << 9); | 137 | bch_hprint(target, dc->writeback_rate_target << 9); |
| 133 | 138 | ||
| @@ -143,7 +148,10 @@ SHOW(__bch_cached_dev) | |||
| 143 | } | 148 | } |
| 144 | 149 | ||
| 145 | sysfs_hprint(dirty_data, | 150 | sysfs_hprint(dirty_data, |
| 146 | atomic_long_read(&dc->disk.sectors_dirty) << 9); | 151 | bcache_dev_sectors_dirty(&dc->disk) << 9); |
| 152 | |||
| 153 | sysfs_hprint(stripe_size, (1 << dc->disk.stripe_size_bits) << 9); | ||
| 154 | var_printf(partial_stripes_expensive, "%u"); | ||
| 147 | 155 | ||
| 148 | var_printf(sequential_merge, "%i"); | 156 | var_printf(sequential_merge, "%i"); |
| 149 | var_hprint(sequential_cutoff); | 157 | var_hprint(sequential_cutoff); |
| @@ -170,6 +178,7 @@ STORE(__cached_dev) | |||
| 170 | disk.kobj); | 178 | disk.kobj); |
| 171 | unsigned v = size; | 179 | unsigned v = size; |
| 172 | struct cache_set *c; | 180 | struct cache_set *c; |
| 181 | struct kobj_uevent_env *env; | ||
| 173 | 182 | ||
| 174 | #define d_strtoul(var) sysfs_strtoul(var, dc->var) | 183 | #define d_strtoul(var) sysfs_strtoul(var, dc->var) |
| 175 | #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) | 184 | #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) |
| @@ -214,6 +223,7 @@ STORE(__cached_dev) | |||
| 214 | } | 223 | } |
| 215 | 224 | ||
| 216 | if (attr == &sysfs_label) { | 225 | if (attr == &sysfs_label) { |
| 226 | /* note: endlines are preserved */ | ||
| 217 | memcpy(dc->sb.label, buf, SB_LABEL_SIZE); | 227 | memcpy(dc->sb.label, buf, SB_LABEL_SIZE); |
| 218 | bch_write_bdev_super(dc, NULL); | 228 | bch_write_bdev_super(dc, NULL); |
| 219 | if (dc->disk.c) { | 229 | if (dc->disk.c) { |
| @@ -221,6 +231,15 @@ STORE(__cached_dev) | |||
| 221 | buf, SB_LABEL_SIZE); | 231 | buf, SB_LABEL_SIZE); |
| 222 | bch_uuid_write(dc->disk.c); | 232 | bch_uuid_write(dc->disk.c); |
| 223 | } | 233 | } |
| 234 | env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); | ||
| 235 | if (!env) | ||
| 236 | return -ENOMEM; | ||
| 237 | add_uevent_var(env, "DRIVER=bcache"); | ||
| 238 | add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid), | ||
| 239 | add_uevent_var(env, "CACHED_LABEL=%s", buf); | ||
| 240 | kobject_uevent_env( | ||
| 241 | &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp); | ||
| 242 | kfree(env); | ||
| 224 | } | 243 | } |
| 225 | 244 | ||
| 226 | if (attr == &sysfs_attach) { | 245 | if (attr == &sysfs_attach) { |
| @@ -284,6 +303,8 @@ static struct attribute *bch_cached_dev_files[] = { | |||
| 284 | &sysfs_writeback_rate_d_smooth, | 303 | &sysfs_writeback_rate_d_smooth, |
| 285 | &sysfs_writeback_rate_debug, | 304 | &sysfs_writeback_rate_debug, |
| 286 | &sysfs_dirty_data, | 305 | &sysfs_dirty_data, |
| 306 | &sysfs_stripe_size, | ||
| 307 | &sysfs_partial_stripes_expensive, | ||
| 287 | &sysfs_sequential_cutoff, | 308 | &sysfs_sequential_cutoff, |
| 288 | &sysfs_sequential_merge, | 309 | &sysfs_sequential_merge, |
| 289 | &sysfs_clear_stats, | 310 | &sysfs_clear_stats, |
| @@ -665,12 +686,10 @@ SHOW(__bch_cache) | |||
| 665 | int cmp(const void *l, const void *r) | 686 | int cmp(const void *l, const void *r) |
| 666 | { return *((uint16_t *) r) - *((uint16_t *) l); } | 687 | { return *((uint16_t *) r) - *((uint16_t *) l); } |
| 667 | 688 | ||
| 668 | /* Number of quantiles we compute */ | ||
| 669 | const unsigned nq = 31; | ||
| 670 | |||
| 671 | size_t n = ca->sb.nbuckets, i, unused, btree; | 689 | size_t n = ca->sb.nbuckets, i, unused, btree; |
| 672 | uint64_t sum = 0; | 690 | uint64_t sum = 0; |
| 673 | uint16_t q[nq], *p, *cached; | 691 | /* Compute 31 quantiles */ |
| 692 | uint16_t q[31], *p, *cached; | ||
| 674 | ssize_t ret; | 693 | ssize_t ret; |
| 675 | 694 | ||
| 676 | cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t)); | 695 | cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t)); |
| @@ -703,26 +722,29 @@ SHOW(__bch_cache) | |||
| 703 | if (n) | 722 | if (n) |
| 704 | do_div(sum, n); | 723 | do_div(sum, n); |
| 705 | 724 | ||
| 706 | for (i = 0; i < nq; i++) | 725 | for (i = 0; i < ARRAY_SIZE(q); i++) |
| 707 | q[i] = INITIAL_PRIO - cached[n * (i + 1) / (nq + 1)]; | 726 | q[i] = INITIAL_PRIO - cached[n * (i + 1) / |
| 727 | (ARRAY_SIZE(q) + 1)]; | ||
| 708 | 728 | ||
| 709 | vfree(p); | 729 | vfree(p); |
| 710 | 730 | ||
| 711 | ret = snprintf(buf, PAGE_SIZE, | 731 | ret = scnprintf(buf, PAGE_SIZE, |
| 712 | "Unused: %zu%%\n" | 732 | "Unused: %zu%%\n" |
| 713 | "Metadata: %zu%%\n" | 733 | "Metadata: %zu%%\n" |
| 714 | "Average: %llu\n" | 734 | "Average: %llu\n" |
| 715 | "Sectors per Q: %zu\n" | 735 | "Sectors per Q: %zu\n" |
| 716 | "Quantiles: [", | 736 | "Quantiles: [", |
| 717 | unused * 100 / (size_t) ca->sb.nbuckets, | 737 | unused * 100 / (size_t) ca->sb.nbuckets, |
| 718 | btree * 100 / (size_t) ca->sb.nbuckets, sum, | 738 | btree * 100 / (size_t) ca->sb.nbuckets, sum, |
| 719 | n * ca->sb.bucket_size / (nq + 1)); | 739 | n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); |
| 720 | 740 | ||
| 721 | for (i = 0; i < nq && ret < (ssize_t) PAGE_SIZE; i++) | 741 | for (i = 0; i < ARRAY_SIZE(q); i++) |
| 722 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 742 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 723 | i < nq - 1 ? "%u " : "%u]\n", q[i]); | 743 | "%u ", q[i]); |
| 724 | 744 | ret--; | |
| 725 | buf[PAGE_SIZE - 1] = '\0'; | 745 | |
| 746 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n"); | ||
| 747 | |||
| 726 | return ret; | 748 | return ret; |
| 727 | } | 749 | } |
| 728 | 750 | ||
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c index 983f9bb411bc..f7b6c197f90f 100644 --- a/drivers/md/bcache/trace.c +++ b/drivers/md/bcache/trace.c | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #include "btree.h" | 2 | #include "btree.h" |
| 3 | #include "request.h" | 3 | #include "request.h" |
| 4 | 4 | ||
| 5 | #include <linux/blktrace_api.h> | ||
| 5 | #include <linux/module.h> | 6 | #include <linux/module.h> |
| 6 | 7 | ||
| 7 | #define CREATE_TRACE_POINTS | 8 | #define CREATE_TRACE_POINTS |
| @@ -9,18 +10,44 @@ | |||
| 9 | 10 | ||
| 10 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_start); | 11 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_start); |
| 11 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_end); | 12 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_end); |
| 12 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_passthrough); | 13 | |
| 13 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_hit); | 14 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_bypass_sequential); |
| 14 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_miss); | 15 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_bypass_congested); |
| 16 | |||
| 17 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read); | ||
| 18 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write); | ||
| 15 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_retry); | 19 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_retry); |
| 16 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writethrough); | 20 | |
| 17 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback); | 21 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_insert); |
| 18 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write_skip); | 22 | |
| 23 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_replay_key); | ||
| 24 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_write); | ||
| 25 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_full); | ||
| 26 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_entry_full); | ||
| 27 | |||
| 28 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_cache_cannibalize); | ||
| 29 | |||
| 19 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_read); | 30 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_read); |
| 20 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_write); | 31 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_write); |
| 21 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write_dirty); | 32 | |
| 22 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_dirty); | 33 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_alloc); |
| 23 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_write); | 34 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_alloc_fail); |
| 24 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_insert); | 35 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_free); |
| 36 | |||
| 37 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_gc_coalesce); | ||
| 25 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_start); | 38 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_start); |
| 26 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_end); | 39 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_end); |
| 40 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_copy); | ||
| 41 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_copy_collision); | ||
| 42 | |||
| 43 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_insert_key); | ||
| 44 | |||
| 45 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_split); | ||
| 46 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_compact); | ||
| 47 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_set_root); | ||
| 48 | |||
| 49 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_invalidate); | ||
| 50 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_fail); | ||
| 51 | |||
| 52 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback); | ||
| 53 | EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback_collision); | ||
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index da3a99e85b1e..98eb81159a22 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c | |||
| @@ -228,23 +228,6 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, | |||
| 228 | } | 228 | } |
| 229 | } | 229 | } |
| 230 | 230 | ||
| 231 | int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp) | ||
| 232 | { | ||
| 233 | int i; | ||
| 234 | struct bio_vec *bv; | ||
| 235 | |||
| 236 | bio_for_each_segment(bv, bio, i) { | ||
| 237 | bv->bv_page = alloc_page(gfp); | ||
| 238 | if (!bv->bv_page) { | ||
| 239 | while (bv-- != bio->bi_io_vec + bio->bi_idx) | ||
| 240 | __free_page(bv->bv_page); | ||
| 241 | return -ENOMEM; | ||
| 242 | } | ||
| 243 | } | ||
| 244 | |||
| 245 | return 0; | ||
| 246 | } | ||
| 247 | |||
| 248 | /* | 231 | /* |
| 249 | * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any | 232 | * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any |
| 250 | * use permitted, subject to terms of PostgreSQL license; see.) | 233 | * use permitted, subject to terms of PostgreSQL license; see.) |
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 577393e38c3a..1ae2a73ad85f 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h | |||
| @@ -15,8 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | struct closure; | 16 | struct closure; |
| 17 | 17 | ||
| 18 | #include <trace/events/bcache.h> | ||
| 19 | |||
| 20 | #ifdef CONFIG_BCACHE_EDEBUG | 18 | #ifdef CONFIG_BCACHE_EDEBUG |
| 21 | 19 | ||
| 22 | #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) | 20 | #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) |
| @@ -566,12 +564,8 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) | |||
| 566 | return x; | 564 | return x; |
| 567 | } | 565 | } |
| 568 | 566 | ||
| 569 | #define bio_end(bio) ((bio)->bi_sector + bio_sectors(bio)) | ||
| 570 | |||
| 571 | void bch_bio_map(struct bio *bio, void *base); | 567 | void bch_bio_map(struct bio *bio, void *base); |
| 572 | 568 | ||
| 573 | int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp); | ||
| 574 | |||
| 575 | static inline sector_t bdev_sectors(struct block_device *bdev) | 569 | static inline sector_t bdev_sectors(struct block_device *bdev) |
| 576 | { | 570 | { |
| 577 | return bdev->bd_inode->i_size >> 9; | 571 | return bdev->bd_inode->i_size >> 9; |
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 2714ed3991d1..22cbff551628 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c | |||
| @@ -9,6 +9,9 @@ | |||
| 9 | #include "bcache.h" | 9 | #include "bcache.h" |
| 10 | #include "btree.h" | 10 | #include "btree.h" |
| 11 | #include "debug.h" | 11 | #include "debug.h" |
| 12 | #include "writeback.h" | ||
| 13 | |||
| 14 | #include <trace/events/bcache.h> | ||
| 12 | 15 | ||
| 13 | static struct workqueue_struct *dirty_wq; | 16 | static struct workqueue_struct *dirty_wq; |
| 14 | 17 | ||
| @@ -36,7 +39,7 @@ static void __update_writeback_rate(struct cached_dev *dc) | |||
| 36 | 39 | ||
| 37 | int change = 0; | 40 | int change = 0; |
| 38 | int64_t error; | 41 | int64_t error; |
| 39 | int64_t dirty = atomic_long_read(&dc->disk.sectors_dirty); | 42 | int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); |
| 40 | int64_t derivative = dirty - dc->disk.sectors_dirty_last; | 43 | int64_t derivative = dirty - dc->disk.sectors_dirty_last; |
| 41 | 44 | ||
| 42 | dc->disk.sectors_dirty_last = dirty; | 45 | dc->disk.sectors_dirty_last = dirty; |
| @@ -105,6 +108,31 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k) | |||
| 105 | return KEY_DIRTY(k); | 108 | return KEY_DIRTY(k); |
| 106 | } | 109 | } |
| 107 | 110 | ||
| 111 | static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k) | ||
| 112 | { | ||
| 113 | uint64_t stripe; | ||
| 114 | unsigned nr_sectors = KEY_SIZE(k); | ||
| 115 | struct cached_dev *dc = container_of(buf, struct cached_dev, | ||
| 116 | writeback_keys); | ||
| 117 | unsigned stripe_size = 1 << dc->disk.stripe_size_bits; | ||
| 118 | |||
| 119 | if (!KEY_DIRTY(k)) | ||
| 120 | return false; | ||
| 121 | |||
| 122 | stripe = KEY_START(k) >> dc->disk.stripe_size_bits; | ||
| 123 | while (1) { | ||
| 124 | if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) != | ||
| 125 | stripe_size) | ||
| 126 | return false; | ||
| 127 | |||
| 128 | if (nr_sectors <= stripe_size) | ||
| 129 | return true; | ||
| 130 | |||
| 131 | nr_sectors -= stripe_size; | ||
| 132 | stripe++; | ||
| 133 | } | ||
| 134 | } | ||
| 135 | |||
| 108 | static void dirty_init(struct keybuf_key *w) | 136 | static void dirty_init(struct keybuf_key *w) |
| 109 | { | 137 | { |
| 110 | struct dirty_io *io = w->private; | 138 | struct dirty_io *io = w->private; |
| @@ -149,7 +177,22 @@ static void refill_dirty(struct closure *cl) | |||
| 149 | searched_from_start = true; | 177 | searched_from_start = true; |
| 150 | } | 178 | } |
| 151 | 179 | ||
| 152 | bch_refill_keybuf(dc->disk.c, buf, &end); | 180 | if (dc->partial_stripes_expensive) { |
| 181 | uint64_t i; | ||
| 182 | |||
| 183 | for (i = 0; i < dc->disk.nr_stripes; i++) | ||
| 184 | if (atomic_read(dc->disk.stripe_sectors_dirty + i) == | ||
| 185 | 1 << dc->disk.stripe_size_bits) | ||
| 186 | goto full_stripes; | ||
| 187 | |||
| 188 | goto normal_refill; | ||
| 189 | full_stripes: | ||
| 190 | bch_refill_keybuf(dc->disk.c, buf, &end, | ||
| 191 | dirty_full_stripe_pred); | ||
| 192 | } else { | ||
| 193 | normal_refill: | ||
| 194 | bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); | ||
| 195 | } | ||
| 153 | 196 | ||
| 154 | if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) { | 197 | if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) { |
| 155 | /* Searched the entire btree - delay awhile */ | 198 | /* Searched the entire btree - delay awhile */ |
| @@ -181,10 +224,8 @@ void bch_writeback_queue(struct cached_dev *dc) | |||
| 181 | } | 224 | } |
| 182 | } | 225 | } |
| 183 | 226 | ||
| 184 | void bch_writeback_add(struct cached_dev *dc, unsigned sectors) | 227 | void bch_writeback_add(struct cached_dev *dc) |
| 185 | { | 228 | { |
| 186 | atomic_long_add(sectors, &dc->disk.sectors_dirty); | ||
| 187 | |||
| 188 | if (!atomic_read(&dc->has_dirty) && | 229 | if (!atomic_read(&dc->has_dirty) && |
| 189 | !atomic_xchg(&dc->has_dirty, 1)) { | 230 | !atomic_xchg(&dc->has_dirty, 1)) { |
| 190 | atomic_inc(&dc->count); | 231 | atomic_inc(&dc->count); |
| @@ -203,6 +244,34 @@ void bch_writeback_add(struct cached_dev *dc, unsigned sectors) | |||
| 203 | } | 244 | } |
| 204 | } | 245 | } |
| 205 | 246 | ||
| 247 | void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, | ||
| 248 | uint64_t offset, int nr_sectors) | ||
| 249 | { | ||
| 250 | struct bcache_device *d = c->devices[inode]; | ||
| 251 | unsigned stripe_size, stripe_offset; | ||
| 252 | uint64_t stripe; | ||
| 253 | |||
| 254 | if (!d) | ||
| 255 | return; | ||
| 256 | |||
| 257 | stripe_size = 1 << d->stripe_size_bits; | ||
| 258 | stripe = offset >> d->stripe_size_bits; | ||
| 259 | stripe_offset = offset & (stripe_size - 1); | ||
| 260 | |||
| 261 | while (nr_sectors) { | ||
| 262 | int s = min_t(unsigned, abs(nr_sectors), | ||
| 263 | stripe_size - stripe_offset); | ||
| 264 | |||
| 265 | if (nr_sectors < 0) | ||
| 266 | s = -s; | ||
| 267 | |||
| 268 | atomic_add(s, d->stripe_sectors_dirty + stripe); | ||
| 269 | nr_sectors -= s; | ||
| 270 | stripe_offset = 0; | ||
| 271 | stripe++; | ||
| 272 | } | ||
| 273 | } | ||
| 274 | |||
| 206 | /* Background writeback - IO loop */ | 275 | /* Background writeback - IO loop */ |
| 207 | 276 | ||
| 208 | static void dirty_io_destructor(struct closure *cl) | 277 | static void dirty_io_destructor(struct closure *cl) |
| @@ -216,9 +285,10 @@ static void write_dirty_finish(struct closure *cl) | |||
| 216 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | 285 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); |
| 217 | struct keybuf_key *w = io->bio.bi_private; | 286 | struct keybuf_key *w = io->bio.bi_private; |
| 218 | struct cached_dev *dc = io->dc; | 287 | struct cached_dev *dc = io->dc; |
| 219 | struct bio_vec *bv = bio_iovec_idx(&io->bio, io->bio.bi_vcnt); | 288 | struct bio_vec *bv; |
| 289 | int i; | ||
| 220 | 290 | ||
| 221 | while (bv-- != io->bio.bi_io_vec) | 291 | bio_for_each_segment_all(bv, &io->bio, i) |
| 222 | __free_page(bv->bv_page); | 292 | __free_page(bv->bv_page); |
| 223 | 293 | ||
| 224 | /* This is kind of a dumb way of signalling errors. */ | 294 | /* This is kind of a dumb way of signalling errors. */ |
| @@ -236,10 +306,12 @@ static void write_dirty_finish(struct closure *cl) | |||
| 236 | for (i = 0; i < KEY_PTRS(&w->key); i++) | 306 | for (i = 0; i < KEY_PTRS(&w->key); i++) |
| 237 | atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); | 307 | atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); |
| 238 | 308 | ||
| 239 | pr_debug("clearing %s", pkey(&w->key)); | ||
| 240 | bch_btree_insert(&op, dc->disk.c); | 309 | bch_btree_insert(&op, dc->disk.c); |
| 241 | closure_sync(&op.cl); | 310 | closure_sync(&op.cl); |
| 242 | 311 | ||
| 312 | if (op.insert_collision) | ||
| 313 | trace_bcache_writeback_collision(&w->key); | ||
| 314 | |||
| 243 | atomic_long_inc(op.insert_collision | 315 | atomic_long_inc(op.insert_collision |
| 244 | ? &dc->disk.c->writeback_keys_failed | 316 | ? &dc->disk.c->writeback_keys_failed |
| 245 | : &dc->disk.c->writeback_keys_done); | 317 | : &dc->disk.c->writeback_keys_done); |
| @@ -275,7 +347,6 @@ static void write_dirty(struct closure *cl) | |||
| 275 | io->bio.bi_bdev = io->dc->bdev; | 347 | io->bio.bi_bdev = io->dc->bdev; |
| 276 | io->bio.bi_end_io = dirty_endio; | 348 | io->bio.bi_end_io = dirty_endio; |
| 277 | 349 | ||
| 278 | trace_bcache_write_dirty(&io->bio); | ||
| 279 | closure_bio_submit(&io->bio, cl, &io->dc->disk); | 350 | closure_bio_submit(&io->bio, cl, &io->dc->disk); |
| 280 | 351 | ||
| 281 | continue_at(cl, write_dirty_finish, dirty_wq); | 352 | continue_at(cl, write_dirty_finish, dirty_wq); |
| @@ -296,7 +367,6 @@ static void read_dirty_submit(struct closure *cl) | |||
| 296 | { | 367 | { |
| 297 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | 368 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); |
| 298 | 369 | ||
| 299 | trace_bcache_read_dirty(&io->bio); | ||
| 300 | closure_bio_submit(&io->bio, cl, &io->dc->disk); | 370 | closure_bio_submit(&io->bio, cl, &io->dc->disk); |
| 301 | 371 | ||
| 302 | continue_at(cl, write_dirty, dirty_wq); | 372 | continue_at(cl, write_dirty, dirty_wq); |
| @@ -349,10 +419,10 @@ static void read_dirty(struct closure *cl) | |||
| 349 | io->bio.bi_rw = READ; | 419 | io->bio.bi_rw = READ; |
| 350 | io->bio.bi_end_io = read_dirty_endio; | 420 | io->bio.bi_end_io = read_dirty_endio; |
| 351 | 421 | ||
| 352 | if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL)) | 422 | if (bio_alloc_pages(&io->bio, GFP_KERNEL)) |
| 353 | goto err_free; | 423 | goto err_free; |
| 354 | 424 | ||
| 355 | pr_debug("%s", pkey(&w->key)); | 425 | trace_bcache_writeback(&w->key); |
| 356 | 426 | ||
| 357 | closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); | 427 | closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); |
| 358 | 428 | ||
| @@ -375,12 +445,49 @@ err: | |||
| 375 | refill_dirty(cl); | 445 | refill_dirty(cl); |
| 376 | } | 446 | } |
| 377 | 447 | ||
| 448 | /* Init */ | ||
| 449 | |||
| 450 | static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op, | ||
| 451 | struct cached_dev *dc) | ||
| 452 | { | ||
| 453 | struct bkey *k; | ||
| 454 | struct btree_iter iter; | ||
| 455 | |||
| 456 | bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0)); | ||
| 457 | while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) | ||
| 458 | if (!b->level) { | ||
| 459 | if (KEY_INODE(k) > dc->disk.id) | ||
| 460 | break; | ||
| 461 | |||
| 462 | if (KEY_DIRTY(k)) | ||
| 463 | bcache_dev_sectors_dirty_add(b->c, dc->disk.id, | ||
| 464 | KEY_START(k), | ||
| 465 | KEY_SIZE(k)); | ||
| 466 | } else { | ||
| 467 | btree(sectors_dirty_init, k, b, op, dc); | ||
| 468 | if (KEY_INODE(k) > dc->disk.id) | ||
| 469 | break; | ||
| 470 | |||
| 471 | cond_resched(); | ||
| 472 | } | ||
| 473 | |||
| 474 | return 0; | ||
| 475 | } | ||
| 476 | |||
| 477 | void bch_sectors_dirty_init(struct cached_dev *dc) | ||
| 478 | { | ||
| 479 | struct btree_op op; | ||
| 480 | |||
| 481 | bch_btree_op_init_stack(&op); | ||
| 482 | btree_root(sectors_dirty_init, dc->disk.c, &op, dc); | ||
| 483 | } | ||
| 484 | |||
| 378 | void bch_cached_dev_writeback_init(struct cached_dev *dc) | 485 | void bch_cached_dev_writeback_init(struct cached_dev *dc) |
| 379 | { | 486 | { |
| 380 | closure_init_unlocked(&dc->writeback); | 487 | closure_init_unlocked(&dc->writeback); |
| 381 | init_rwsem(&dc->writeback_lock); | 488 | init_rwsem(&dc->writeback_lock); |
| 382 | 489 | ||
| 383 | bch_keybuf_init(&dc->writeback_keys, dirty_pred); | 490 | bch_keybuf_init(&dc->writeback_keys); |
| 384 | 491 | ||
| 385 | dc->writeback_metadata = true; | 492 | dc->writeback_metadata = true; |
| 386 | dc->writeback_running = true; | 493 | dc->writeback_running = true; |
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h new file mode 100644 index 000000000000..c91f61bb95b6 --- /dev/null +++ b/drivers/md/bcache/writeback.h | |||
| @@ -0,0 +1,64 @@ | |||
| 1 | #ifndef _BCACHE_WRITEBACK_H | ||
| 2 | #define _BCACHE_WRITEBACK_H | ||
| 3 | |||
| 4 | #define CUTOFF_WRITEBACK 40 | ||
| 5 | #define CUTOFF_WRITEBACK_SYNC 70 | ||
| 6 | |||
| 7 | static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) | ||
| 8 | { | ||
| 9 | uint64_t i, ret = 0; | ||
| 10 | |||
| 11 | for (i = 0; i < d->nr_stripes; i++) | ||
| 12 | ret += atomic_read(d->stripe_sectors_dirty + i); | ||
| 13 | |||
| 14 | return ret; | ||
| 15 | } | ||
| 16 | |||
| 17 | static inline bool bcache_dev_stripe_dirty(struct bcache_device *d, | ||
| 18 | uint64_t offset, | ||
| 19 | unsigned nr_sectors) | ||
| 20 | { | ||
| 21 | uint64_t stripe = offset >> d->stripe_size_bits; | ||
| 22 | |||
| 23 | while (1) { | ||
| 24 | if (atomic_read(d->stripe_sectors_dirty + stripe)) | ||
| 25 | return true; | ||
| 26 | |||
| 27 | if (nr_sectors <= 1 << d->stripe_size_bits) | ||
| 28 | return false; | ||
| 29 | |||
| 30 | nr_sectors -= 1 << d->stripe_size_bits; | ||
| 31 | stripe++; | ||
| 32 | } | ||
| 33 | } | ||
| 34 | |||
| 35 | static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, | ||
| 36 | unsigned cache_mode, bool would_skip) | ||
| 37 | { | ||
| 38 | unsigned in_use = dc->disk.c->gc_stats.in_use; | ||
| 39 | |||
| 40 | if (cache_mode != CACHE_MODE_WRITEBACK || | ||
| 41 | atomic_read(&dc->disk.detaching) || | ||
| 42 | in_use > CUTOFF_WRITEBACK_SYNC) | ||
| 43 | return false; | ||
| 44 | |||
| 45 | if (dc->partial_stripes_expensive && | ||
| 46 | bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector, | ||
| 47 | bio_sectors(bio))) | ||
| 48 | return true; | ||
| 49 | |||
| 50 | if (would_skip) | ||
| 51 | return false; | ||
| 52 | |||
| 53 | return bio->bi_rw & REQ_SYNC || | ||
| 54 | in_use <= CUTOFF_WRITEBACK; | ||
| 55 | } | ||
| 56 | |||
| 57 | void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); | ||
| 58 | void bch_writeback_queue(struct cached_dev *); | ||
| 59 | void bch_writeback_add(struct cached_dev *); | ||
| 60 | |||
| 61 | void bch_sectors_dirty_init(struct cached_dev *dc); | ||
| 62 | void bch_cached_dev_writeback_init(struct cached_dev *); | ||
| 63 | |||
| 64 | #endif | ||
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index dc112a7137fe..4296155090b2 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c | |||
| @@ -959,23 +959,21 @@ out: | |||
| 959 | return r; | 959 | return r; |
| 960 | } | 960 | } |
| 961 | 961 | ||
| 962 | static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) | 962 | static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) |
| 963 | { | 963 | { |
| 964 | struct entry *e = hash_lookup(mq, oblock); | 964 | struct mq_policy *mq = to_mq_policy(p); |
| 965 | struct entry *e; | ||
| 966 | |||
| 967 | mutex_lock(&mq->lock); | ||
| 968 | |||
| 969 | e = hash_lookup(mq, oblock); | ||
| 965 | 970 | ||
| 966 | BUG_ON(!e || !e->in_cache); | 971 | BUG_ON(!e || !e->in_cache); |
| 967 | 972 | ||
| 968 | del(mq, e); | 973 | del(mq, e); |
| 969 | e->in_cache = false; | 974 | e->in_cache = false; |
| 970 | push(mq, e); | 975 | push(mq, e); |
| 971 | } | ||
| 972 | 976 | ||
| 973 | static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) | ||
| 974 | { | ||
| 975 | struct mq_policy *mq = to_mq_policy(p); | ||
| 976 | |||
| 977 | mutex_lock(&mq->lock); | ||
| 978 | remove_mapping(mq, oblock); | ||
| 979 | mutex_unlock(&mq->lock); | 977 | mutex_unlock(&mq->lock); |
| 980 | } | 978 | } |
| 981 | 979 | ||
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 957a719e8c2f..df7b0a06b0ea 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -2290,12 +2290,18 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
| 2290 | d = r10_bio->devs[1].devnum; | 2290 | d = r10_bio->devs[1].devnum; |
| 2291 | wbio = r10_bio->devs[1].bio; | 2291 | wbio = r10_bio->devs[1].bio; |
| 2292 | wbio2 = r10_bio->devs[1].repl_bio; | 2292 | wbio2 = r10_bio->devs[1].repl_bio; |
| 2293 | /* Need to test wbio2->bi_end_io before we call | ||
| 2294 | * generic_make_request as if the former is NULL, | ||
| 2295 | * the latter is free to free wbio2. | ||
| 2296 | */ | ||
| 2297 | if (wbio2 && !wbio2->bi_end_io) | ||
| 2298 | wbio2 = NULL; | ||
| 2293 | if (wbio->bi_end_io) { | 2299 | if (wbio->bi_end_io) { |
| 2294 | atomic_inc(&conf->mirrors[d].rdev->nr_pending); | 2300 | atomic_inc(&conf->mirrors[d].rdev->nr_pending); |
| 2295 | md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); | 2301 | md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); |
| 2296 | generic_make_request(wbio); | 2302 | generic_make_request(wbio); |
| 2297 | } | 2303 | } |
| 2298 | if (wbio2 && wbio2->bi_end_io) { | 2304 | if (wbio2) { |
| 2299 | atomic_inc(&conf->mirrors[d].replacement->nr_pending); | 2305 | atomic_inc(&conf->mirrors[d].replacement->nr_pending); |
| 2300 | md_sync_acct(conf->mirrors[d].replacement->bdev, | 2306 | md_sync_acct(conf->mirrors[d].replacement->bdev, |
| 2301 | bio_sectors(wbio2)); | 2307 | bio_sectors(wbio2)); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 2bf094a587cb..78ea44336e75 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -3462,6 +3462,7 @@ static void handle_stripe(struct stripe_head *sh) | |||
| 3462 | test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { | 3462 | test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { |
| 3463 | set_bit(STRIPE_SYNCING, &sh->state); | 3463 | set_bit(STRIPE_SYNCING, &sh->state); |
| 3464 | clear_bit(STRIPE_INSYNC, &sh->state); | 3464 | clear_bit(STRIPE_INSYNC, &sh->state); |
| 3465 | clear_bit(STRIPE_REPLACED, &sh->state); | ||
| 3465 | } | 3466 | } |
| 3466 | spin_unlock(&sh->stripe_lock); | 3467 | spin_unlock(&sh->stripe_lock); |
| 3467 | } | 3468 | } |
| @@ -3607,19 +3608,23 @@ static void handle_stripe(struct stripe_head *sh) | |||
| 3607 | handle_parity_checks5(conf, sh, &s, disks); | 3608 | handle_parity_checks5(conf, sh, &s, disks); |
| 3608 | } | 3609 | } |
| 3609 | 3610 | ||
| 3610 | if (s.replacing && s.locked == 0 | 3611 | if ((s.replacing || s.syncing) && s.locked == 0 |
| 3611 | && !test_bit(STRIPE_INSYNC, &sh->state)) { | 3612 | && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) |
| 3613 | && !test_bit(STRIPE_REPLACED, &sh->state)) { | ||
| 3612 | /* Write out to replacement devices where possible */ | 3614 | /* Write out to replacement devices where possible */ |
| 3613 | for (i = 0; i < conf->raid_disks; i++) | 3615 | for (i = 0; i < conf->raid_disks; i++) |
| 3614 | if (test_bit(R5_UPTODATE, &sh->dev[i].flags) && | 3616 | if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { |
| 3615 | test_bit(R5_NeedReplace, &sh->dev[i].flags)) { | 3617 | WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); |
| 3616 | set_bit(R5_WantReplace, &sh->dev[i].flags); | 3618 | set_bit(R5_WantReplace, &sh->dev[i].flags); |
| 3617 | set_bit(R5_LOCKED, &sh->dev[i].flags); | 3619 | set_bit(R5_LOCKED, &sh->dev[i].flags); |
| 3618 | s.locked++; | 3620 | s.locked++; |
| 3619 | } | 3621 | } |
| 3620 | set_bit(STRIPE_INSYNC, &sh->state); | 3622 | if (s.replacing) |
| 3623 | set_bit(STRIPE_INSYNC, &sh->state); | ||
| 3624 | set_bit(STRIPE_REPLACED, &sh->state); | ||
| 3621 | } | 3625 | } |
| 3622 | if ((s.syncing || s.replacing) && s.locked == 0 && | 3626 | if ((s.syncing || s.replacing) && s.locked == 0 && |
| 3627 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && | ||
| 3623 | test_bit(STRIPE_INSYNC, &sh->state)) { | 3628 | test_bit(STRIPE_INSYNC, &sh->state)) { |
| 3624 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); | 3629 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); |
| 3625 | clear_bit(STRIPE_SYNCING, &sh->state); | 3630 | clear_bit(STRIPE_SYNCING, &sh->state); |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index b0b663b119a8..70c49329ca9a 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
| @@ -306,6 +306,7 @@ enum { | |||
| 306 | STRIPE_SYNC_REQUESTED, | 306 | STRIPE_SYNC_REQUESTED, |
| 307 | STRIPE_SYNCING, | 307 | STRIPE_SYNCING, |
| 308 | STRIPE_INSYNC, | 308 | STRIPE_INSYNC, |
| 309 | STRIPE_REPLACED, | ||
| 309 | STRIPE_PREREAD_ACTIVE, | 310 | STRIPE_PREREAD_ACTIVE, |
| 310 | STRIPE_DELAYED, | 311 | STRIPE_DELAYED, |
| 311 | STRIPE_DEGRADED, | 312 | STRIPE_DEGRADED, |
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c index efdc873e58d1..a9857022f71d 100644 --- a/drivers/media/i2c/ml86v7667.c +++ b/drivers/media/i2c/ml86v7667.c | |||
| @@ -117,7 +117,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl) | |||
| 117 | { | 117 | { |
| 118 | struct v4l2_subdev *sd = to_sd(ctrl); | 118 | struct v4l2_subdev *sd = to_sd(ctrl); |
| 119 | struct i2c_client *client = v4l2_get_subdevdata(sd); | 119 | struct i2c_client *client = v4l2_get_subdevdata(sd); |
| 120 | int ret; | 120 | int ret = -EINVAL; |
| 121 | 121 | ||
| 122 | switch (ctrl->id) { | 122 | switch (ctrl->id) { |
| 123 | case V4L2_CID_BRIGHTNESS: | 123 | case V4L2_CID_BRIGHTNESS: |
| @@ -157,7 +157,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl) | |||
| 157 | break; | 157 | break; |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | return 0; | 160 | return ret; |
| 161 | } | 161 | } |
| 162 | 162 | ||
| 163 | static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) | 163 | static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) |
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index df4ada880e42..bd9405df1bd6 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c | |||
| @@ -1987,7 +1987,7 @@ MODULE_DEVICE_TABLE(platform, coda_platform_ids); | |||
| 1987 | 1987 | ||
| 1988 | #ifdef CONFIG_OF | 1988 | #ifdef CONFIG_OF |
| 1989 | static const struct of_device_id coda_dt_ids[] = { | 1989 | static const struct of_device_id coda_dt_ids[] = { |
| 1990 | { .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] }, | 1990 | { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] }, |
| 1991 | { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] }, | 1991 | { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] }, |
| 1992 | { /* sentinel */ } | 1992 | { /* sentinel */ } |
| 1993 | }; | 1993 | }; |
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index 553d87e5ceab..fd6289d60cde 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c | |||
| @@ -784,6 +784,7 @@ static int g2d_probe(struct platform_device *pdev) | |||
| 784 | } | 784 | } |
| 785 | *vfd = g2d_videodev; | 785 | *vfd = g2d_videodev; |
| 786 | vfd->lock = &dev->mutex; | 786 | vfd->lock = &dev->mutex; |
| 787 | vfd->v4l2_dev = &dev->v4l2_dev; | ||
| 787 | ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); | 788 | ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); |
| 788 | if (ret) { | 789 | if (ret) { |
| 789 | v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); | 790 | v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index 5296385153d5..4f6dd42c9adb 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c | |||
| @@ -344,7 +344,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
| 344 | pix_mp->num_planes = 2; | 344 | pix_mp->num_planes = 2; |
| 345 | /* Set pixelformat to the format in which MFC | 345 | /* Set pixelformat to the format in which MFC |
| 346 | outputs the decoded frame */ | 346 | outputs the decoded frame */ |
| 347 | pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT; | 347 | pix_mp->pixelformat = ctx->dst_fmt->fourcc; |
| 348 | pix_mp->plane_fmt[0].bytesperline = ctx->buf_width; | 348 | pix_mp->plane_fmt[0].bytesperline = ctx->buf_width; |
| 349 | pix_mp->plane_fmt[0].sizeimage = ctx->luma_size; | 349 | pix_mp->plane_fmt[0].sizeimage = ctx->luma_size; |
| 350 | pix_mp->plane_fmt[1].bytesperline = ctx->buf_width; | 350 | pix_mp->plane_fmt[1].bytesperline = ctx->buf_width; |
| @@ -382,10 +382,16 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
| 382 | mfc_err("Unsupported format for source.\n"); | 382 | mfc_err("Unsupported format for source.\n"); |
| 383 | return -EINVAL; | 383 | return -EINVAL; |
| 384 | } | 384 | } |
| 385 | if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) { | 385 | if (fmt->codec_mode == S5P_FIMV_CODEC_NONE) { |
| 386 | mfc_err("Not supported format.\n"); | 386 | mfc_err("Unknown codec\n"); |
| 387 | return -EINVAL; | 387 | return -EINVAL; |
| 388 | } | 388 | } |
| 389 | if (!IS_MFCV6(dev)) { | ||
| 390 | if (fmt->fourcc == V4L2_PIX_FMT_VP8) { | ||
| 391 | mfc_err("Not supported format.\n"); | ||
| 392 | return -EINVAL; | ||
| 393 | } | ||
| 394 | } | ||
| 389 | } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { | 395 | } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { |
| 390 | fmt = find_format(f, MFC_FMT_RAW); | 396 | fmt = find_format(f, MFC_FMT_RAW); |
| 391 | if (!fmt) { | 397 | if (!fmt) { |
| @@ -411,7 +417,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
| 411 | struct s5p_mfc_dev *dev = video_drvdata(file); | 417 | struct s5p_mfc_dev *dev = video_drvdata(file); |
| 412 | struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); | 418 | struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); |
| 413 | int ret = 0; | 419 | int ret = 0; |
| 414 | struct s5p_mfc_fmt *fmt; | ||
| 415 | struct v4l2_pix_format_mplane *pix_mp; | 420 | struct v4l2_pix_format_mplane *pix_mp; |
| 416 | 421 | ||
| 417 | mfc_debug_enter(); | 422 | mfc_debug_enter(); |
| @@ -425,54 +430,32 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
| 425 | goto out; | 430 | goto out; |
| 426 | } | 431 | } |
| 427 | if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { | 432 | if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { |
| 428 | fmt = find_format(f, MFC_FMT_RAW); | 433 | /* dst_fmt is validated by call to vidioc_try_fmt */ |
| 429 | if (!fmt) { | 434 | ctx->dst_fmt = find_format(f, MFC_FMT_RAW); |
| 430 | mfc_err("Unsupported format for source.\n"); | 435 | ret = 0; |
| 431 | return -EINVAL; | ||
| 432 | } | ||
| 433 | if (!IS_MFCV6(dev) && (fmt->fourcc != V4L2_PIX_FMT_NV12MT)) { | ||
| 434 | mfc_err("Not supported format.\n"); | ||
| 435 | return -EINVAL; | ||
| 436 | } else if (IS_MFCV6(dev) && | ||
| 437 | (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) { | ||
| 438 | mfc_err("Not supported format.\n"); | ||
| 439 | return -EINVAL; | ||
| 440 | } | ||
| 441 | ctx->dst_fmt = fmt; | ||
| 442 | mfc_debug_leave(); | ||
| 443 | return ret; | ||
| 444 | } else if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { | ||
| 445 | mfc_err("Wrong type error for S_FMT : %d", f->type); | ||
| 446 | return -EINVAL; | ||
| 447 | } | ||
| 448 | fmt = find_format(f, MFC_FMT_DEC); | ||
| 449 | if (!fmt || fmt->codec_mode == S5P_MFC_CODEC_NONE) { | ||
| 450 | mfc_err("Unknown codec\n"); | ||
| 451 | ret = -EINVAL; | ||
| 452 | goto out; | 436 | goto out; |
| 453 | } | 437 | } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { |
| 454 | if (fmt->type != MFC_FMT_DEC) { | 438 | /* src_fmt is validated by call to vidioc_try_fmt */ |
| 455 | mfc_err("Wrong format selected, you should choose " | 439 | ctx->src_fmt = find_format(f, MFC_FMT_DEC); |
| 456 | "format for decoding\n"); | 440 | ctx->codec_mode = ctx->src_fmt->codec_mode; |
| 441 | mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode); | ||
| 442 | pix_mp->height = 0; | ||
| 443 | pix_mp->width = 0; | ||
| 444 | if (pix_mp->plane_fmt[0].sizeimage) | ||
| 445 | ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage; | ||
| 446 | else | ||
| 447 | pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size = | ||
| 448 | DEF_CPB_SIZE; | ||
| 449 | pix_mp->plane_fmt[0].bytesperline = 0; | ||
| 450 | ctx->state = MFCINST_INIT; | ||
| 451 | ret = 0; | ||
| 452 | goto out; | ||
| 453 | } else { | ||
| 454 | mfc_err("Wrong type error for S_FMT : %d", f->type); | ||
| 457 | ret = -EINVAL; | 455 | ret = -EINVAL; |
| 458 | goto out; | 456 | goto out; |
| 459 | } | 457 | } |
| 460 | if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) { | 458 | |
| 461 | mfc_err("Not supported format.\n"); | ||
| 462 | return -EINVAL; | ||
| 463 | } | ||
| 464 | ctx->src_fmt = fmt; | ||
| 465 | ctx->codec_mode = fmt->codec_mode; | ||
| 466 | mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode); | ||
| 467 | pix_mp->height = 0; | ||
| 468 | pix_mp->width = 0; | ||
| 469 | if (pix_mp->plane_fmt[0].sizeimage) | ||
| 470 | ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage; | ||
| 471 | else | ||
| 472 | pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size = | ||
| 473 | DEF_CPB_SIZE; | ||
| 474 | pix_mp->plane_fmt[0].bytesperline = 0; | ||
| 475 | ctx->state = MFCINST_INIT; | ||
| 476 | out: | 459 | out: |
| 477 | mfc_debug_leave(); | 460 | mfc_debug_leave(); |
| 478 | return ret; | 461 | return ret; |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 2549967b2f85..59e56f4c8ce3 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c | |||
| @@ -906,6 +906,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
| 906 | 906 | ||
| 907 | static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) | 907 | static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) |
| 908 | { | 908 | { |
| 909 | struct s5p_mfc_dev *dev = video_drvdata(file); | ||
| 909 | struct s5p_mfc_fmt *fmt; | 910 | struct s5p_mfc_fmt *fmt; |
| 910 | struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; | 911 | struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; |
| 911 | 912 | ||
| @@ -930,6 +931,18 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
| 930 | return -EINVAL; | 931 | return -EINVAL; |
| 931 | } | 932 | } |
| 932 | 933 | ||
| 934 | if (!IS_MFCV6(dev)) { | ||
| 935 | if (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) { | ||
| 936 | mfc_err("Not supported format.\n"); | ||
| 937 | return -EINVAL; | ||
| 938 | } | ||
| 939 | } else if (IS_MFCV6(dev)) { | ||
| 940 | if (fmt->fourcc == V4L2_PIX_FMT_NV12MT) { | ||
| 941 | mfc_err("Not supported format.\n"); | ||
| 942 | return -EINVAL; | ||
| 943 | } | ||
| 944 | } | ||
| 945 | |||
| 933 | if (fmt->num_planes != pix_fmt_mp->num_planes) { | 946 | if (fmt->num_planes != pix_fmt_mp->num_planes) { |
| 934 | mfc_err("failed to try output format\n"); | 947 | mfc_err("failed to try output format\n"); |
| 935 | return -EINVAL; | 948 | return -EINVAL; |
| @@ -947,7 +960,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
| 947 | { | 960 | { |
| 948 | struct s5p_mfc_dev *dev = video_drvdata(file); | 961 | struct s5p_mfc_dev *dev = video_drvdata(file); |
| 949 | struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); | 962 | struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); |
| 950 | struct s5p_mfc_fmt *fmt; | ||
| 951 | struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; | 963 | struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; |
| 952 | int ret = 0; | 964 | int ret = 0; |
| 953 | 965 | ||
| @@ -960,13 +972,9 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
| 960 | goto out; | 972 | goto out; |
| 961 | } | 973 | } |
| 962 | if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { | 974 | if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { |
| 963 | fmt = find_format(f, MFC_FMT_ENC); | 975 | /* dst_fmt is validated by call to vidioc_try_fmt */ |
| 964 | if (!fmt) { | 976 | ctx->dst_fmt = find_format(f, MFC_FMT_ENC); |
| 965 | mfc_err("failed to set capture format\n"); | ||
| 966 | return -EINVAL; | ||
| 967 | } | ||
| 968 | ctx->state = MFCINST_INIT; | 977 | ctx->state = MFCINST_INIT; |
| 969 | ctx->dst_fmt = fmt; | ||
| 970 | ctx->codec_mode = ctx->dst_fmt->codec_mode; | 978 | ctx->codec_mode = ctx->dst_fmt->codec_mode; |
| 971 | ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage; | 979 | ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage; |
| 972 | pix_fmt_mp->plane_fmt[0].bytesperline = 0; | 980 | pix_fmt_mp->plane_fmt[0].bytesperline = 0; |
| @@ -987,28 +995,8 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
| 987 | } | 995 | } |
| 988 | mfc_debug(2, "Got instance number: %d\n", ctx->inst_no); | 996 | mfc_debug(2, "Got instance number: %d\n", ctx->inst_no); |
| 989 | } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { | 997 | } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { |
| 990 | fmt = find_format(f, MFC_FMT_RAW); | 998 | /* src_fmt is validated by call to vidioc_try_fmt */ |
| 991 | if (!fmt) { | 999 | ctx->src_fmt = find_format(f, MFC_FMT_RAW); |
| 992 | mfc_err("failed to set output format\n"); | ||
| 993 | return -EINVAL; | ||
| 994 | } | ||
| 995 | |||
| 996 | if (!IS_MFCV6(dev) && | ||
| 997 | (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)) { | ||
| 998 | mfc_err("Not supported format.\n"); | ||
| 999 | return -EINVAL; | ||
| 1000 | } else if (IS_MFCV6(dev) && | ||
| 1001 | (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) { | ||
| 1002 | mfc_err("Not supported format.\n"); | ||
| 1003 | return -EINVAL; | ||
| 1004 | } | ||
| 1005 | |||
| 1006 | if (fmt->num_planes != pix_fmt_mp->num_planes) { | ||
| 1007 | mfc_err("failed to set output format\n"); | ||
| 1008 | ret = -EINVAL; | ||
| 1009 | goto out; | ||
| 1010 | } | ||
| 1011 | ctx->src_fmt = fmt; | ||
| 1012 | ctx->img_width = pix_fmt_mp->width; | 1000 | ctx->img_width = pix_fmt_mp->width; |
| 1013 | ctx->img_height = pix_fmt_mp->height; | 1001 | ctx->img_height = pix_fmt_mp->height; |
| 1014 | mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode); | 1002 | mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode); |
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c index 4851cc2e4a4d..c4ff9739a7ae 100644 --- a/drivers/media/usb/em28xx/em28xx-i2c.c +++ b/drivers/media/usb/em28xx/em28xx-i2c.c | |||
| @@ -726,7 +726,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus, | |||
| 726 | 726 | ||
| 727 | *eedata = data; | 727 | *eedata = data; |
| 728 | *eedata_len = len; | 728 | *eedata_len = len; |
| 729 | dev_config = (void *)eedata; | 729 | dev_config = (void *)*eedata; |
| 730 | 730 | ||
| 731 | switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) { | 731 | switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) { |
| 732 | case 0: | 732 | case 0: |
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index cb694055ba7d..6e5070774dc2 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c | |||
| @@ -303,6 +303,11 @@ static int hdpvr_probe(struct usb_interface *interface, | |||
| 303 | 303 | ||
| 304 | dev->workqueue = 0; | 304 | dev->workqueue = 0; |
| 305 | 305 | ||
| 306 | /* init video transfer queues first of all */ | ||
| 307 | /* to prevent oops in hdpvr_delete() on error paths */ | ||
| 308 | INIT_LIST_HEAD(&dev->free_buff_list); | ||
| 309 | INIT_LIST_HEAD(&dev->rec_buff_list); | ||
| 310 | |||
| 306 | /* register v4l2_device early so it can be used for printks */ | 311 | /* register v4l2_device early so it can be used for printks */ |
| 307 | if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { | 312 | if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { |
| 308 | dev_err(&interface->dev, "v4l2_device_register failed\n"); | 313 | dev_err(&interface->dev, "v4l2_device_register failed\n"); |
| @@ -325,10 +330,6 @@ static int hdpvr_probe(struct usb_interface *interface, | |||
| 325 | if (!dev->workqueue) | 330 | if (!dev->workqueue) |
| 326 | goto error; | 331 | goto error; |
| 327 | 332 | ||
| 328 | /* init video transfer queues */ | ||
| 329 | INIT_LIST_HEAD(&dev->free_buff_list); | ||
| 330 | INIT_LIST_HEAD(&dev->rec_buff_list); | ||
| 331 | |||
| 332 | dev->options = hdpvr_default_options; | 333 | dev->options = hdpvr_default_options; |
| 333 | 334 | ||
| 334 | if (default_video_input < HDPVR_VIDEO_INPUTS) | 335 | if (default_video_input < HDPVR_VIDEO_INPUTS) |
| @@ -405,7 +406,7 @@ static int hdpvr_probe(struct usb_interface *interface, | |||
| 405 | video_nr[atomic_inc_return(&dev_nr)]); | 406 | video_nr[atomic_inc_return(&dev_nr)]); |
| 406 | if (retval < 0) { | 407 | if (retval < 0) { |
| 407 | v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); | 408 | v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); |
| 408 | goto error; | 409 | goto reg_fail; |
| 409 | } | 410 | } |
| 410 | 411 | ||
| 411 | /* let the user know what node this device is now attached to */ | 412 | /* let the user know what node this device is now attached to */ |
diff --git a/drivers/media/usb/usbtv/Kconfig b/drivers/media/usb/usbtv/Kconfig index 8864436464bf..7c5b86006ee6 100644 --- a/drivers/media/usb/usbtv/Kconfig +++ b/drivers/media/usb/usbtv/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config VIDEO_USBTV | 1 | config VIDEO_USBTV |
| 2 | tristate "USBTV007 video capture support" | 2 | tristate "USBTV007 video capture support" |
| 3 | depends on VIDEO_DEV | 3 | depends on VIDEO_V4L2 |
| 4 | select VIDEOBUF2_VMALLOC | 4 | select VIDEOBUF2_VMALLOC |
| 5 | 5 | ||
| 6 | ---help--- | 6 | ---help--- |
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c index bf43f874685e..91650173941a 100644 --- a/drivers/media/usb/usbtv/usbtv.c +++ b/drivers/media/usb/usbtv/usbtv.c | |||
| @@ -57,7 +57,7 @@ | |||
| 57 | #define USBTV_CHUNK_SIZE 256 | 57 | #define USBTV_CHUNK_SIZE 256 |
| 58 | #define USBTV_CHUNK 240 | 58 | #define USBTV_CHUNK 240 |
| 59 | #define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \ | 59 | #define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \ |
| 60 | / 2 / USBTV_CHUNK) | 60 | / 4 / USBTV_CHUNK) |
| 61 | 61 | ||
| 62 | /* Chunk header. */ | 62 | /* Chunk header. */ |
| 63 | #define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ | 63 | #define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ |
| @@ -89,6 +89,7 @@ struct usbtv { | |||
| 89 | /* Number of currently processed frame, useful find | 89 | /* Number of currently processed frame, useful find |
| 90 | * out when a new one begins. */ | 90 | * out when a new one begins. */ |
| 91 | u32 frame_id; | 91 | u32 frame_id; |
| 92 | int chunks_done; | ||
| 92 | 93 | ||
| 93 | int iso_size; | 94 | int iso_size; |
| 94 | unsigned int sequence; | 95 | unsigned int sequence; |
| @@ -202,6 +203,26 @@ static int usbtv_setup_capture(struct usbtv *usbtv) | |||
| 202 | return 0; | 203 | return 0; |
| 203 | } | 204 | } |
| 204 | 205 | ||
| 206 | /* Copy data from chunk into a frame buffer, deinterlacing the data | ||
| 207 | * into every second line. Unfortunately, they don't align nicely into | ||
| 208 | * 720 pixel lines, as the chunk is 240 words long, which is 480 pixels. | ||
| 209 | * Therefore, we break down the chunk into two halves before copyting, | ||
| 210 | * so that we can interleave a line if needed. */ | ||
| 211 | static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd) | ||
| 212 | { | ||
| 213 | int half; | ||
| 214 | |||
| 215 | for (half = 0; half < 2; half++) { | ||
| 216 | int part_no = chunk_no * 2 + half; | ||
| 217 | int line = part_no / 3; | ||
| 218 | int part_index = (line * 2 + !odd) * 3 + (part_no % 3); | ||
| 219 | |||
| 220 | u32 *dst = &frame[part_index * USBTV_CHUNK/2]; | ||
| 221 | memcpy(dst, src, USBTV_CHUNK/2 * sizeof(*src)); | ||
| 222 | src += USBTV_CHUNK/2; | ||
| 223 | } | ||
| 224 | } | ||
| 225 | |||
| 205 | /* Called for each 256-byte image chunk. | 226 | /* Called for each 256-byte image chunk. |
| 206 | * First word identifies the chunk, followed by 240 words of image | 227 | * First word identifies the chunk, followed by 240 words of image |
| 207 | * data and padding. */ | 228 | * data and padding. */ |
| @@ -218,17 +239,17 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) | |||
| 218 | frame_id = USBTV_FRAME_ID(chunk); | 239 | frame_id = USBTV_FRAME_ID(chunk); |
| 219 | odd = USBTV_ODD(chunk); | 240 | odd = USBTV_ODD(chunk); |
| 220 | chunk_no = USBTV_CHUNK_NO(chunk); | 241 | chunk_no = USBTV_CHUNK_NO(chunk); |
| 221 | |||
| 222 | /* Deinterlace. TODO: Use interlaced frame format. */ | ||
| 223 | chunk_no = (chunk_no - chunk_no % 3) * 2 + chunk_no % 3; | ||
| 224 | chunk_no += !odd * 3; | ||
| 225 | |||
| 226 | if (chunk_no >= USBTV_CHUNKS) | 242 | if (chunk_no >= USBTV_CHUNKS) |
| 227 | return; | 243 | return; |
| 228 | 244 | ||
| 229 | /* Beginning of a frame. */ | 245 | /* Beginning of a frame. */ |
| 230 | if (chunk_no == 0) | 246 | if (chunk_no == 0) { |
| 231 | usbtv->frame_id = frame_id; | 247 | usbtv->frame_id = frame_id; |
| 248 | usbtv->chunks_done = 0; | ||
| 249 | } | ||
| 250 | |||
| 251 | if (usbtv->frame_id != frame_id) | ||
| 252 | return; | ||
| 232 | 253 | ||
| 233 | spin_lock_irqsave(&usbtv->buflock, flags); | 254 | spin_lock_irqsave(&usbtv->buflock, flags); |
| 234 | if (list_empty(&usbtv->bufs)) { | 255 | if (list_empty(&usbtv->bufs)) { |
| @@ -241,19 +262,23 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) | |||
| 241 | buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list); | 262 | buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list); |
| 242 | frame = vb2_plane_vaddr(&buf->vb, 0); | 263 | frame = vb2_plane_vaddr(&buf->vb, 0); |
| 243 | 264 | ||
| 244 | /* Copy the chunk. */ | 265 | /* Copy the chunk data. */ |
| 245 | memcpy(&frame[chunk_no * USBTV_CHUNK], &chunk[1], | 266 | usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd); |
| 246 | USBTV_CHUNK * sizeof(chunk[1])); | 267 | usbtv->chunks_done++; |
| 247 | 268 | ||
| 248 | /* Last chunk in a frame, signalling an end */ | 269 | /* Last chunk in a frame, signalling an end */ |
| 249 | if (usbtv->frame_id && chunk_no == USBTV_CHUNKS-1) { | 270 | if (odd && chunk_no == USBTV_CHUNKS-1) { |
| 250 | int size = vb2_plane_size(&buf->vb, 0); | 271 | int size = vb2_plane_size(&buf->vb, 0); |
| 272 | enum vb2_buffer_state state = usbtv->chunks_done == | ||
| 273 | USBTV_CHUNKS ? | ||
| 274 | VB2_BUF_STATE_DONE : | ||
| 275 | VB2_BUF_STATE_ERROR; | ||
| 251 | 276 | ||
| 252 | buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; | 277 | buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; |
| 253 | buf->vb.v4l2_buf.sequence = usbtv->sequence++; | 278 | buf->vb.v4l2_buf.sequence = usbtv->sequence++; |
| 254 | v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); | 279 | v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); |
| 255 | vb2_set_plane_payload(&buf->vb, 0, size); | 280 | vb2_set_plane_payload(&buf->vb, 0, size); |
| 256 | vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); | 281 | vb2_buffer_done(&buf->vb, state); |
| 257 | list_del(&buf->list); | 282 | list_del(&buf->list); |
| 258 | } | 283 | } |
| 259 | 284 | ||
| @@ -518,7 +543,7 @@ static int usbtv_queue_setup(struct vb2_queue *vq, | |||
| 518 | if (*nbuffers < 2) | 543 | if (*nbuffers < 2) |
| 519 | *nbuffers = 2; | 544 | *nbuffers = 2; |
| 520 | *nplanes = 1; | 545 | *nplanes = 1; |
| 521 | sizes[0] = USBTV_CHUNK * USBTV_CHUNKS * sizeof(u32); | 546 | sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32); |
| 522 | 547 | ||
| 523 | return 0; | 548 | return 0; |
| 524 | } | 549 | } |
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index f7b90661e321..e068a76a5f6f 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c | |||
| @@ -66,14 +66,19 @@ EXPORT_SYMBOL(ssc_request); | |||
| 66 | 66 | ||
| 67 | void ssc_free(struct ssc_device *ssc) | 67 | void ssc_free(struct ssc_device *ssc) |
| 68 | { | 68 | { |
| 69 | bool disable_clk = true; | ||
| 70 | |||
| 69 | spin_lock(&user_lock); | 71 | spin_lock(&user_lock); |
| 70 | if (ssc->user) { | 72 | if (ssc->user) |
| 71 | ssc->user--; | 73 | ssc->user--; |
| 72 | clk_disable_unprepare(ssc->clk); | 74 | else { |
| 73 | } else { | 75 | disable_clk = false; |
| 74 | dev_dbg(&ssc->pdev->dev, "device already free\n"); | 76 | dev_dbg(&ssc->pdev->dev, "device already free\n"); |
| 75 | } | 77 | } |
| 76 | spin_unlock(&user_lock); | 78 | spin_unlock(&user_lock); |
| 79 | |||
| 80 | if (disable_clk) | ||
| 81 | clk_disable_unprepare(ssc->clk); | ||
| 77 | } | 82 | } |
| 78 | EXPORT_SYMBOL(ssc_free); | 83 | EXPORT_SYMBOL(ssc_free); |
| 79 | 84 | ||
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index f9296abcf02a..6127ab64bb39 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c | |||
| @@ -167,7 +167,7 @@ int mei_hbm_start_req(struct mei_device *dev) | |||
| 167 | 167 | ||
| 168 | dev->hbm_state = MEI_HBM_IDLE; | 168 | dev->hbm_state = MEI_HBM_IDLE; |
| 169 | if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { | 169 | if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { |
| 170 | dev_err(&dev->pdev->dev, "version message writet failed\n"); | 170 | dev_err(&dev->pdev->dev, "version message write failed\n"); |
| 171 | dev->dev_state = MEI_DEV_RESETTING; | 171 | dev->dev_state = MEI_DEV_RESETTING; |
| 172 | mei_reset(dev, 1); | 172 | mei_reset(dev, 1); |
| 173 | return -ENODEV; | 173 | return -ENODEV; |
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index e4f8dec4dc3c..b22c7e247225 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c | |||
| @@ -239,14 +239,18 @@ static int mei_me_hw_ready_wait(struct mei_device *dev) | |||
| 239 | if (mei_me_hw_is_ready(dev)) | 239 | if (mei_me_hw_is_ready(dev)) |
| 240 | return 0; | 240 | return 0; |
| 241 | 241 | ||
| 242 | dev->recvd_hw_ready = false; | ||
| 242 | mutex_unlock(&dev->device_lock); | 243 | mutex_unlock(&dev->device_lock); |
| 243 | err = wait_event_interruptible_timeout(dev->wait_hw_ready, | 244 | err = wait_event_interruptible_timeout(dev->wait_hw_ready, |
| 244 | dev->recvd_hw_ready, MEI_INTEROP_TIMEOUT); | 245 | dev->recvd_hw_ready, |
| 246 | mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); | ||
| 245 | mutex_lock(&dev->device_lock); | 247 | mutex_lock(&dev->device_lock); |
| 246 | if (!err && !dev->recvd_hw_ready) { | 248 | if (!err && !dev->recvd_hw_ready) { |
| 249 | if (!err) | ||
| 250 | err = -ETIMEDOUT; | ||
| 247 | dev_err(&dev->pdev->dev, | 251 | dev_err(&dev->pdev->dev, |
| 248 | "wait hw ready failed. status = 0x%x\n", err); | 252 | "wait hw ready failed. status = %d\n", err); |
| 249 | return -ETIMEDOUT; | 253 | return err; |
| 250 | } | 254 | } |
| 251 | 255 | ||
| 252 | dev->recvd_hw_ready = false; | 256 | dev->recvd_hw_ready = false; |
| @@ -483,7 +487,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | |||
| 483 | /* check if ME wants a reset */ | 487 | /* check if ME wants a reset */ |
| 484 | if (!mei_hw_is_ready(dev) && | 488 | if (!mei_hw_is_ready(dev) && |
| 485 | dev->dev_state != MEI_DEV_RESETTING && | 489 | dev->dev_state != MEI_DEV_RESETTING && |
| 486 | dev->dev_state != MEI_DEV_INITIALIZING) { | 490 | dev->dev_state != MEI_DEV_INITIALIZING && |
| 491 | dev->dev_state != MEI_DEV_POWER_DOWN && | ||
| 492 | dev->dev_state != MEI_DEV_POWER_UP) { | ||
| 487 | dev_dbg(&dev->pdev->dev, "FW not ready.\n"); | 493 | dev_dbg(&dev->pdev->dev, "FW not ready.\n"); |
| 488 | mei_reset(dev, 1); | 494 | mei_reset(dev, 1); |
| 489 | mutex_unlock(&dev->device_lock); | 495 | mutex_unlock(&dev->device_lock); |
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index ed1d75203af6..e6f16f83ecde 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c | |||
| @@ -148,7 +148,8 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled) | |||
| 148 | 148 | ||
| 149 | dev->hbm_state = MEI_HBM_IDLE; | 149 | dev->hbm_state = MEI_HBM_IDLE; |
| 150 | 150 | ||
| 151 | if (dev->dev_state != MEI_DEV_INITIALIZING) { | 151 | if (dev->dev_state != MEI_DEV_INITIALIZING && |
| 152 | dev->dev_state != MEI_DEV_POWER_UP) { | ||
| 152 | if (dev->dev_state != MEI_DEV_DISABLED && | 153 | if (dev->dev_state != MEI_DEV_DISABLED && |
| 153 | dev->dev_state != MEI_DEV_POWER_DOWN) | 154 | dev->dev_state != MEI_DEV_POWER_DOWN) |
| 154 | dev->dev_state = MEI_DEV_RESETTING; | 155 | dev->dev_state = MEI_DEV_RESETTING; |
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 847b1996ce8e..2c5a91bb8ec3 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
| @@ -128,7 +128,7 @@ static inline int pxamci_set_power(struct pxamci_host *host, | |||
| 128 | !!on ^ host->pdata->gpio_power_invert); | 128 | !!on ^ host->pdata->gpio_power_invert); |
| 129 | } | 129 | } |
| 130 | if (!host->vcc && host->pdata && host->pdata->setpower) | 130 | if (!host->vcc && host->pdata && host->pdata->setpower) |
| 131 | host->pdata->setpower(mmc_dev(host->mmc), vdd); | 131 | return host->pdata->setpower(mmc_dev(host->mmc), vdd); |
| 132 | 132 | ||
| 133 | return 0; | 133 | return 0; |
| 134 | } | 134 | } |
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index a746ba272f04..a956053608f9 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c | |||
| @@ -1007,7 +1007,7 @@ static void arcnet_rx(struct net_device *dev, int bufnum) | |||
| 1007 | 1007 | ||
| 1008 | soft = &pkt.soft.rfc1201; | 1008 | soft = &pkt.soft.rfc1201; |
| 1009 | 1009 | ||
| 1010 | lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE)); | 1010 | lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE); |
| 1011 | if (pkt.hard.offset[0]) { | 1011 | if (pkt.hard.offset[0]) { |
| 1012 | ofs = pkt.hard.offset[0]; | 1012 | ofs = pkt.hard.offset[0]; |
| 1013 | length = 256 - ofs; | 1013 | length = 256 - ofs; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 07f257d44a1e..e48cb339c0c6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n) | |||
| 3714 | * The bonding ndo_neigh_setup is called at init time beofre any | 3714 | * The bonding ndo_neigh_setup is called at init time beofre any |
| 3715 | * slave exists. So we must declare proxy setup function which will | 3715 | * slave exists. So we must declare proxy setup function which will |
| 3716 | * be used at run time to resolve the actual slave neigh param setup. | 3716 | * be used at run time to resolve the actual slave neigh param setup. |
| 3717 | * | ||
| 3718 | * It's also called by master devices (such as vlans) to setup their | ||
| 3719 | * underlying devices. In that case - do nothing, we're already set up from | ||
| 3720 | * our init. | ||
| 3717 | */ | 3721 | */ |
| 3718 | static int bond_neigh_setup(struct net_device *dev, | 3722 | static int bond_neigh_setup(struct net_device *dev, |
| 3719 | struct neigh_parms *parms) | 3723 | struct neigh_parms *parms) |
| 3720 | { | 3724 | { |
| 3721 | parms->neigh_setup = bond_neigh_init; | 3725 | /* modify only our neigh_parms */ |
| 3726 | if (parms->dev == dev) | ||
| 3727 | parms->neigh_setup = bond_neigh_init; | ||
| 3722 | 3728 | ||
| 3723 | return 0; | 3729 | return 0; |
| 3724 | } | 3730 | } |
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 6aa7b3266c80..ac6177d3befc 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c | |||
| @@ -412,10 +412,20 @@ static void esd_usb2_read_bulk_callback(struct urb *urb) | |||
| 412 | 412 | ||
| 413 | switch (msg->msg.hdr.cmd) { | 413 | switch (msg->msg.hdr.cmd) { |
| 414 | case CMD_CAN_RX: | 414 | case CMD_CAN_RX: |
| 415 | if (msg->msg.rx.net >= dev->net_count) { | ||
| 416 | dev_err(dev->udev->dev.parent, "format error\n"); | ||
| 417 | break; | ||
| 418 | } | ||
| 419 | |||
| 415 | esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg); | 420 | esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg); |
| 416 | break; | 421 | break; |
| 417 | 422 | ||
| 418 | case CMD_CAN_TX: | 423 | case CMD_CAN_TX: |
| 424 | if (msg->msg.txdone.net >= dev->net_count) { | ||
| 425 | dev_err(dev->udev->dev.parent, "format error\n"); | ||
| 426 | break; | ||
| 427 | } | ||
| 428 | |||
| 419 | esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net], | 429 | esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net], |
| 420 | msg); | 430 | msg); |
| 421 | break; | 431 | break; |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 25723d8ee201..925ab8ec9329 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c | |||
| @@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) | |||
| 649 | if ((mc->ptr + rec_len) > mc->end) | 649 | if ((mc->ptr + rec_len) > mc->end) |
| 650 | goto decode_failed; | 650 | goto decode_failed; |
| 651 | 651 | ||
| 652 | memcpy(cf->data, mc->ptr, rec_len); | 652 | memcpy(cf->data, mc->ptr, cf->can_dlc); |
| 653 | mc->ptr += rec_len; | 653 | mc->ptr += rec_len; |
| 654 | } | 654 | } |
| 655 | 655 | ||
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index cbd388eea682..8becd3d838b5 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c | |||
| @@ -779,6 +779,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) | |||
| 779 | usb_unanchor_urb(urb); | 779 | usb_unanchor_urb(urb); |
| 780 | usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf, | 780 | usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf, |
| 781 | urb->transfer_dma); | 781 | urb->transfer_dma); |
| 782 | usb_free_urb(urb); | ||
| 782 | break; | 783 | break; |
| 783 | } | 784 | } |
| 784 | 785 | ||
diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig index 53ad213e865b..d8d95d4cd45a 100644 --- a/drivers/net/ethernet/allwinner/Kconfig +++ b/drivers/net/ethernet/allwinner/Kconfig | |||
| @@ -3,19 +3,20 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | config NET_VENDOR_ALLWINNER | 5 | config NET_VENDOR_ALLWINNER |
| 6 | bool "Allwinner devices" | 6 | bool "Allwinner devices" |
| 7 | default y | 7 | default y |
| 8 | depends on ARCH_SUNXI | ||
| 9 | ---help--- | ||
| 10 | If you have a network (Ethernet) card belonging to this | ||
| 11 | class, say Y and read the Ethernet-HOWTO, available from | ||
| 12 | <http://www.tldp.org/docs.html#howto>. | ||
| 13 | 8 | ||
| 14 | Note that the answer to this question doesn't directly | 9 | depends on ARCH_SUNXI |
| 15 | affect the kernel: saying N will just cause the configurator | 10 | ---help--- |
| 16 | to skip all the questions about Allwinner cards. If you say Y, | 11 | If you have a network (Ethernet) card belonging to this |
| 17 | you will be asked for your specific card in the following | 12 | class, say Y and read the Ethernet-HOWTO, available from |
| 18 | questions. | 13 | <http://www.tldp.org/docs.html#howto>. |
| 14 | |||
| 15 | Note that the answer to this question doesn't directly | ||
| 16 | affect the kernel: saying N will just cause the configurator | ||
| 17 | to skip all the questions about Allwinner cards. If you say Y, | ||
| 18 | you will be asked for your specific card in the following | ||
| 19 | questions. | ||
| 19 | 20 | ||
| 20 | if NET_VENDOR_ALLWINNER | 21 | if NET_VENDOR_ALLWINNER |
| 21 | 22 | ||
| @@ -26,6 +27,7 @@ config SUN4I_EMAC | |||
| 26 | select CRC32 | 27 | select CRC32 |
| 27 | select MII | 28 | select MII |
| 28 | select PHYLIB | 29 | select PHYLIB |
| 30 | select MDIO_SUN4I | ||
| 29 | ---help--- | 31 | ---help--- |
| 30 | Support for Allwinner A10 EMAC ethernet driver. | 32 | Support for Allwinner A10 EMAC ethernet driver. |
| 31 | 33 | ||
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index f1b121ee5525..55d79cb53a79 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
| @@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) | |||
| 199 | struct arc_emac_priv *priv = netdev_priv(ndev); | 199 | struct arc_emac_priv *priv = netdev_priv(ndev); |
| 200 | unsigned int work_done; | 200 | unsigned int work_done; |
| 201 | 201 | ||
| 202 | for (work_done = 0; work_done <= budget; work_done++) { | 202 | for (work_done = 0; work_done < budget; work_done++) { |
| 203 | unsigned int *last_rx_bd = &priv->last_rx_bd; | 203 | unsigned int *last_rx_bd = &priv->last_rx_bd; |
| 204 | struct net_device_stats *stats = &priv->stats; | 204 | struct net_device_stats *stats = &priv->stats; |
| 205 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; | 205 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; |
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h index b2bf324631dc..0f0556526ba9 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c.h +++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h | |||
| @@ -520,6 +520,9 @@ struct atl1c_adapter { | |||
| 520 | struct net_device *netdev; | 520 | struct net_device *netdev; |
| 521 | struct pci_dev *pdev; | 521 | struct pci_dev *pdev; |
| 522 | struct napi_struct napi; | 522 | struct napi_struct napi; |
| 523 | struct page *rx_page; | ||
| 524 | unsigned int rx_page_offset; | ||
| 525 | unsigned int rx_frag_size; | ||
| 523 | struct atl1c_hw hw; | 526 | struct atl1c_hw hw; |
| 524 | struct atl1c_hw_stats hw_stats; | 527 | struct atl1c_hw_stats hw_stats; |
| 525 | struct mii_if_info mii; /* MII interface info */ | 528 | struct mii_if_info mii; /* MII interface info */ |
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 786a87483298..a36a760ada28 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c | |||
| @@ -481,10 +481,15 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p) | |||
| 481 | static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, | 481 | static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, |
| 482 | struct net_device *dev) | 482 | struct net_device *dev) |
| 483 | { | 483 | { |
| 484 | unsigned int head_size; | ||
| 484 | int mtu = dev->mtu; | 485 | int mtu = dev->mtu; |
| 485 | 486 | ||
| 486 | adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? | 487 | adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? |
| 487 | roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; | 488 | roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; |
| 489 | |||
| 490 | head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) + | ||
| 491 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
| 492 | adapter->rx_frag_size = roundup_pow_of_two(head_size); | ||
| 488 | } | 493 | } |
| 489 | 494 | ||
| 490 | static netdev_features_t atl1c_fix_features(struct net_device *netdev, | 495 | static netdev_features_t atl1c_fix_features(struct net_device *netdev, |
| @@ -952,6 +957,10 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter) | |||
| 952 | kfree(adapter->tpd_ring[0].buffer_info); | 957 | kfree(adapter->tpd_ring[0].buffer_info); |
| 953 | adapter->tpd_ring[0].buffer_info = NULL; | 958 | adapter->tpd_ring[0].buffer_info = NULL; |
| 954 | } | 959 | } |
| 960 | if (adapter->rx_page) { | ||
| 961 | put_page(adapter->rx_page); | ||
| 962 | adapter->rx_page = NULL; | ||
| 963 | } | ||
| 955 | } | 964 | } |
| 956 | 965 | ||
| 957 | /** | 966 | /** |
| @@ -1639,6 +1648,35 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter, | |||
| 1639 | skb_checksum_none_assert(skb); | 1648 | skb_checksum_none_assert(skb); |
| 1640 | } | 1649 | } |
| 1641 | 1650 | ||
| 1651 | static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) | ||
| 1652 | { | ||
| 1653 | struct sk_buff *skb; | ||
| 1654 | struct page *page; | ||
| 1655 | |||
| 1656 | if (adapter->rx_frag_size > PAGE_SIZE) | ||
| 1657 | return netdev_alloc_skb(adapter->netdev, | ||
| 1658 | adapter->rx_buffer_len); | ||
| 1659 | |||
| 1660 | page = adapter->rx_page; | ||
| 1661 | if (!page) { | ||
| 1662 | adapter->rx_page = page = alloc_page(GFP_ATOMIC); | ||
| 1663 | if (unlikely(!page)) | ||
| 1664 | return NULL; | ||
| 1665 | adapter->rx_page_offset = 0; | ||
| 1666 | } | ||
| 1667 | |||
| 1668 | skb = build_skb(page_address(page) + adapter->rx_page_offset, | ||
| 1669 | adapter->rx_frag_size); | ||
| 1670 | if (likely(skb)) { | ||
| 1671 | adapter->rx_page_offset += adapter->rx_frag_size; | ||
| 1672 | if (adapter->rx_page_offset >= PAGE_SIZE) | ||
| 1673 | adapter->rx_page = NULL; | ||
| 1674 | else | ||
| 1675 | get_page(page); | ||
| 1676 | } | ||
| 1677 | return skb; | ||
| 1678 | } | ||
| 1679 | |||
| 1642 | static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) | 1680 | static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) |
| 1643 | { | 1681 | { |
| 1644 | struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; | 1682 | struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; |
| @@ -1660,7 +1698,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) | |||
| 1660 | while (next_info->flags & ATL1C_BUFFER_FREE) { | 1698 | while (next_info->flags & ATL1C_BUFFER_FREE) { |
| 1661 | rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); | 1699 | rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); |
| 1662 | 1700 | ||
| 1663 | skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len); | 1701 | skb = atl1c_alloc_skb(adapter); |
| 1664 | if (unlikely(!skb)) { | 1702 | if (unlikely(!skb)) { |
| 1665 | if (netif_msg_rx_err(adapter)) | 1703 | if (netif_msg_rx_err(adapter)) |
| 1666 | dev_warn(&pdev->dev, "alloc rx buffer failed\n"); | 1704 | dev_warn(&pdev->dev, "alloc rx buffer failed\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index dedbd76c033e..00b88cbfde25 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
| @@ -486,7 +486,7 @@ struct bnx2x_fastpath { | |||
| 486 | 486 | ||
| 487 | struct napi_struct napi; | 487 | struct napi_struct napi; |
| 488 | 488 | ||
| 489 | #ifdef CONFIG_NET_LL_RX_POLL | 489 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 490 | unsigned int state; | 490 | unsigned int state; |
| 491 | #define BNX2X_FP_STATE_IDLE 0 | 491 | #define BNX2X_FP_STATE_IDLE 0 |
| 492 | #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ | 492 | #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ |
| @@ -498,7 +498,7 @@ struct bnx2x_fastpath { | |||
| 498 | #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) | 498 | #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) |
| 499 | /* protect state */ | 499 | /* protect state */ |
| 500 | spinlock_t lock; | 500 | spinlock_t lock; |
| 501 | #endif /* CONFIG_NET_LL_RX_POLL */ | 501 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 502 | 502 | ||
| 503 | union host_hc_status_block status_blk; | 503 | union host_hc_status_block status_blk; |
| 504 | /* chip independent shortcuts into sb structure */ | 504 | /* chip independent shortcuts into sb structure */ |
| @@ -572,7 +572,7 @@ struct bnx2x_fastpath { | |||
| 572 | #define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) | 572 | #define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) |
| 573 | #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) | 573 | #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) |
| 574 | 574 | ||
| 575 | #ifdef CONFIG_NET_LL_RX_POLL | 575 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 576 | static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) | 576 | static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) |
| 577 | { | 577 | { |
| 578 | spin_lock_init(&fp->lock); | 578 | spin_lock_init(&fp->lock); |
| @@ -680,7 +680,7 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) | |||
| 680 | { | 680 | { |
| 681 | return false; | 681 | return false; |
| 682 | } | 682 | } |
| 683 | #endif /* CONFIG_NET_LL_RX_POLL */ | 683 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 684 | 684 | ||
| 685 | /* Use 2500 as a mini-jumbo MTU for FCoE */ | 685 | /* Use 2500 as a mini-jumbo MTU for FCoE */ |
| 686 | #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 | 686 | #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 |
| @@ -1333,6 +1333,8 @@ enum { | |||
| 1333 | BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, | 1333 | BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, |
| 1334 | BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, | 1334 | BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, |
| 1335 | BNX2X_SP_RTNL_HYPERVISOR_VLAN, | 1335 | BNX2X_SP_RTNL_HYPERVISOR_VLAN, |
| 1336 | BNX2X_SP_RTNL_TX_STOP, | ||
| 1337 | BNX2X_SP_RTNL_TX_RESUME, | ||
| 1336 | }; | 1338 | }; |
| 1337 | 1339 | ||
| 1338 | struct bnx2x_prev_path_list { | 1340 | struct bnx2x_prev_path_list { |
| @@ -1502,6 +1504,7 @@ struct bnx2x { | |||
| 1502 | #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) | 1504 | #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) |
| 1503 | #define IS_VF_FLAG (1 << 22) | 1505 | #define IS_VF_FLAG (1 << 22) |
| 1504 | #define INTERRUPTS_ENABLED_FLAG (1 << 23) | 1506 | #define INTERRUPTS_ENABLED_FLAG (1 << 23) |
| 1507 | #define BC_SUPPORTS_RMMOD_CMD (1 << 24) | ||
| 1505 | 1508 | ||
| 1506 | #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) | 1509 | #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) |
| 1507 | 1510 | ||
| @@ -1830,6 +1833,8 @@ struct bnx2x { | |||
| 1830 | 1833 | ||
| 1831 | int fp_array_size; | 1834 | int fp_array_size; |
| 1832 | u32 dump_preset_idx; | 1835 | u32 dump_preset_idx; |
| 1836 | bool stats_started; | ||
| 1837 | struct semaphore stats_sema; | ||
| 1833 | }; | 1838 | }; |
| 1834 | 1839 | ||
| 1835 | /* Tx queues may be less or equal to Rx queues */ | 1840 | /* Tx queues may be less or equal to Rx queues */ |
| @@ -2451,4 +2456,6 @@ enum bnx2x_pci_bus_speed { | |||
| 2451 | BNX2X_PCI_LINK_SPEED_5000 = 5000, | 2456 | BNX2X_PCI_LINK_SPEED_5000 = 5000, |
| 2452 | BNX2X_PCI_LINK_SPEED_8000 = 8000 | 2457 | BNX2X_PCI_LINK_SPEED_8000 = 8000 |
| 2453 | }; | 2458 | }; |
| 2459 | |||
| 2460 | void bnx2x_set_local_cmng(struct bnx2x *bp); | ||
| 2454 | #endif /* bnx2x.h */ | 2461 | #endif /* bnx2x.h */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ee350bde1818..0cc26110868d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
| @@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | |||
| 53 | struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; | 53 | struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; |
| 54 | int old_max_eth_txqs, new_max_eth_txqs; | 54 | int old_max_eth_txqs, new_max_eth_txqs; |
| 55 | int old_txdata_index = 0, new_txdata_index = 0; | 55 | int old_txdata_index = 0, new_txdata_index = 0; |
| 56 | struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; | ||
| 56 | 57 | ||
| 57 | /* Copy the NAPI object as it has been already initialized */ | 58 | /* Copy the NAPI object as it has been already initialized */ |
| 58 | from_fp->napi = to_fp->napi; | 59 | from_fp->napi = to_fp->napi; |
| @@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | |||
| 61 | memcpy(to_fp, from_fp, sizeof(*to_fp)); | 62 | memcpy(to_fp, from_fp, sizeof(*to_fp)); |
| 62 | to_fp->index = to; | 63 | to_fp->index = to; |
| 63 | 64 | ||
| 65 | /* Retain the tpa_info of the original `to' version as we don't want | ||
| 66 | * 2 FPs to contain the same tpa_info pointer. | ||
| 67 | */ | ||
| 68 | to_fp->tpa_info = old_tpa_info; | ||
| 69 | |||
| 64 | /* move sp_objs contents as well, as their indices match fp ones */ | 70 | /* move sp_objs contents as well, as their indices match fp ones */ |
| 65 | memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); | 71 | memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); |
| 66 | 72 | ||
| @@ -2956,8 +2962,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) | |||
| 2956 | if (IS_PF(bp)) { | 2962 | if (IS_PF(bp)) { |
| 2957 | if (CNIC_LOADED(bp)) | 2963 | if (CNIC_LOADED(bp)) |
| 2958 | bnx2x_free_mem_cnic(bp); | 2964 | bnx2x_free_mem_cnic(bp); |
| 2959 | bnx2x_free_mem(bp); | ||
| 2960 | } | 2965 | } |
| 2966 | bnx2x_free_mem(bp); | ||
| 2967 | |||
| 2961 | bp->state = BNX2X_STATE_CLOSED; | 2968 | bp->state = BNX2X_STATE_CLOSED; |
| 2962 | bp->cnic_loaded = false; | 2969 | bp->cnic_loaded = false; |
| 2963 | 2970 | ||
| @@ -3117,7 +3124,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget) | |||
| 3117 | return work_done; | 3124 | return work_done; |
| 3118 | } | 3125 | } |
| 3119 | 3126 | ||
| 3120 | #ifdef CONFIG_NET_LL_RX_POLL | 3127 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 3121 | /* must be called with local_bh_disable()d */ | 3128 | /* must be called with local_bh_disable()d */ |
| 3122 | int bnx2x_low_latency_recv(struct napi_struct *napi) | 3129 | int bnx2x_low_latency_recv(struct napi_struct *napi) |
| 3123 | { | 3130 | { |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 0c94df47e0e8..fcf2761d8828 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | |||
| @@ -30,10 +30,8 @@ | |||
| 30 | #include "bnx2x_dcb.h" | 30 | #include "bnx2x_dcb.h" |
| 31 | 31 | ||
| 32 | /* forward declarations of dcbx related functions */ | 32 | /* forward declarations of dcbx related functions */ |
| 33 | static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); | ||
| 34 | static void bnx2x_pfc_set_pfc(struct bnx2x *bp); | 33 | static void bnx2x_pfc_set_pfc(struct bnx2x *bp); |
| 35 | static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); | 34 | static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); |
| 36 | static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); | ||
| 37 | static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, | 35 | static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, |
| 38 | u32 *set_configuration_ets_pg, | 36 | u32 *set_configuration_ets_pg, |
| 39 | u32 *pri_pg_tbl); | 37 | u32 *pri_pg_tbl); |
| @@ -425,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp) | |||
| 425 | bnx2x_pfc_clear(bp); | 423 | bnx2x_pfc_clear(bp); |
| 426 | } | 424 | } |
| 427 | 425 | ||
| 428 | static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) | 426 | int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) |
| 429 | { | 427 | { |
| 430 | struct bnx2x_func_state_params func_params = {NULL}; | 428 | struct bnx2x_func_state_params func_params = {NULL}; |
| 429 | int rc; | ||
| 431 | 430 | ||
| 432 | func_params.f_obj = &bp->func_obj; | 431 | func_params.f_obj = &bp->func_obj; |
| 433 | func_params.cmd = BNX2X_F_CMD_TX_STOP; | 432 | func_params.cmd = BNX2X_F_CMD_TX_STOP; |
| 434 | 433 | ||
| 434 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
| 435 | __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); | ||
| 436 | |||
| 435 | DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); | 437 | DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); |
| 436 | return bnx2x_func_state_change(bp, &func_params); | 438 | |
| 439 | rc = bnx2x_func_state_change(bp, &func_params); | ||
| 440 | if (rc) { | ||
| 441 | BNX2X_ERR("Unable to hold traffic for HW configuration\n"); | ||
| 442 | bnx2x_panic(); | ||
| 443 | } | ||
| 444 | |||
| 445 | return rc; | ||
| 437 | } | 446 | } |
| 438 | 447 | ||
| 439 | static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) | 448 | int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) |
| 440 | { | 449 | { |
| 441 | struct bnx2x_func_state_params func_params = {NULL}; | 450 | struct bnx2x_func_state_params func_params = {NULL}; |
| 442 | struct bnx2x_func_tx_start_params *tx_params = | 451 | struct bnx2x_func_tx_start_params *tx_params = |
| 443 | &func_params.params.tx_start; | 452 | &func_params.params.tx_start; |
| 453 | int rc; | ||
| 444 | 454 | ||
| 445 | func_params.f_obj = &bp->func_obj; | 455 | func_params.f_obj = &bp->func_obj; |
| 446 | func_params.cmd = BNX2X_F_CMD_TX_START; | 456 | func_params.cmd = BNX2X_F_CMD_TX_START; |
| 447 | 457 | ||
| 458 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
| 459 | __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); | ||
| 460 | |||
| 448 | bnx2x_dcbx_fw_struct(bp, tx_params); | 461 | bnx2x_dcbx_fw_struct(bp, tx_params); |
| 449 | 462 | ||
| 450 | DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); | 463 | DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); |
| 451 | return bnx2x_func_state_change(bp, &func_params); | 464 | |
| 465 | rc = bnx2x_func_state_change(bp, &func_params); | ||
| 466 | if (rc) { | ||
| 467 | BNX2X_ERR("Unable to resume traffic after HW configuration\n"); | ||
| 468 | bnx2x_panic(); | ||
| 469 | } | ||
| 470 | |||
| 471 | return rc; | ||
| 452 | } | 472 | } |
| 453 | 473 | ||
| 454 | static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) | 474 | static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) |
| @@ -744,7 +764,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
| 744 | if (IS_MF(bp)) | 764 | if (IS_MF(bp)) |
| 745 | bnx2x_link_sync_notify(bp); | 765 | bnx2x_link_sync_notify(bp); |
| 746 | 766 | ||
| 747 | bnx2x_dcbx_stop_hw_tx(bp); | 767 | set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state); |
| 768 | |||
| 769 | schedule_delayed_work(&bp->sp_rtnl_task, 0); | ||
| 748 | 770 | ||
| 749 | return; | 771 | return; |
| 750 | } | 772 | } |
| @@ -753,7 +775,13 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
| 753 | bnx2x_pfc_set_pfc(bp); | 775 | bnx2x_pfc_set_pfc(bp); |
| 754 | 776 | ||
| 755 | bnx2x_dcbx_update_ets_params(bp); | 777 | bnx2x_dcbx_update_ets_params(bp); |
| 756 | bnx2x_dcbx_resume_hw_tx(bp); | 778 | |
| 779 | /* ets may affect cmng configuration: reinit it in hw */ | ||
| 780 | bnx2x_set_local_cmng(bp); | ||
| 781 | |||
| 782 | set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state); | ||
| 783 | |||
| 784 | schedule_delayed_work(&bp->sp_rtnl_task, 0); | ||
| 757 | 785 | ||
| 758 | return; | 786 | return; |
| 759 | case BNX2X_DCBX_STATE_TX_RELEASED: | 787 | case BNX2X_DCBX_STATE_TX_RELEASED: |
| @@ -2363,21 +2391,24 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, | |||
| 2363 | case DCB_FEATCFG_ATTR_PG: | 2391 | case DCB_FEATCFG_ATTR_PG: |
| 2364 | if (bp->dcbx_local_feat.ets.enabled) | 2392 | if (bp->dcbx_local_feat.ets.enabled) |
| 2365 | *flags |= DCB_FEATCFG_ENABLE; | 2393 | *flags |= DCB_FEATCFG_ENABLE; |
| 2366 | if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) | 2394 | if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR | |
| 2395 | DCBX_REMOTE_MIB_ERROR)) | ||
| 2367 | *flags |= DCB_FEATCFG_ERROR; | 2396 | *flags |= DCB_FEATCFG_ERROR; |
| 2368 | break; | 2397 | break; |
| 2369 | case DCB_FEATCFG_ATTR_PFC: | 2398 | case DCB_FEATCFG_ATTR_PFC: |
| 2370 | if (bp->dcbx_local_feat.pfc.enabled) | 2399 | if (bp->dcbx_local_feat.pfc.enabled) |
| 2371 | *flags |= DCB_FEATCFG_ENABLE; | 2400 | *flags |= DCB_FEATCFG_ENABLE; |
| 2372 | if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | | 2401 | if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | |
| 2373 | DCBX_LOCAL_PFC_MISMATCH)) | 2402 | DCBX_LOCAL_PFC_MISMATCH | |
| 2403 | DCBX_REMOTE_MIB_ERROR)) | ||
| 2374 | *flags |= DCB_FEATCFG_ERROR; | 2404 | *flags |= DCB_FEATCFG_ERROR; |
| 2375 | break; | 2405 | break; |
| 2376 | case DCB_FEATCFG_ATTR_APP: | 2406 | case DCB_FEATCFG_ATTR_APP: |
| 2377 | if (bp->dcbx_local_feat.app.enabled) | 2407 | if (bp->dcbx_local_feat.app.enabled) |
| 2378 | *flags |= DCB_FEATCFG_ENABLE; | 2408 | *flags |= DCB_FEATCFG_ENABLE; |
| 2379 | if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | | 2409 | if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | |
| 2380 | DCBX_LOCAL_APP_MISMATCH)) | 2410 | DCBX_LOCAL_APP_MISMATCH | |
| 2411 | DCBX_REMOTE_MIB_ERROR)) | ||
| 2381 | *flags |= DCB_FEATCFG_ERROR; | 2412 | *flags |= DCB_FEATCFG_ERROR; |
| 2382 | break; | 2413 | break; |
| 2383 | default: | 2414 | default: |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index 125bd1b6586f..804b8f64463e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h | |||
| @@ -199,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; | |||
| 199 | int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); | 199 | int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); |
| 200 | #endif /* BCM_DCBNL */ | 200 | #endif /* BCM_DCBNL */ |
| 201 | 201 | ||
| 202 | int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); | ||
| 203 | int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); | ||
| 204 | |||
| 202 | #endif /* BNX2X_DCB_H */ | 205 | #endif /* BNX2X_DCB_H */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5018e52ae2ad..32767f6aa33f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | |||
| @@ -1300,6 +1300,9 @@ struct drv_func_mb { | |||
| 1300 | 1300 | ||
| 1301 | #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 | 1301 | #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 |
| 1302 | 1302 | ||
| 1303 | #define DRV_MSG_CODE_RMMOD 0xdb000000 | ||
| 1304 | #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f | ||
| 1305 | |||
| 1303 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 | 1306 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 |
| 1304 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 | 1307 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 |
| 1305 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 | 1308 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 |
| @@ -1372,6 +1375,8 @@ struct drv_func_mb { | |||
| 1372 | 1375 | ||
| 1373 | #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 | 1376 | #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 |
| 1374 | 1377 | ||
| 1378 | #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 | ||
| 1379 | |||
| 1375 | #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 | 1380 | #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 |
| 1376 | #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 | 1381 | #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 |
| 1377 | 1382 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e5da07858a2f..1627a4e09c32 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -2261,6 +2261,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp) | |||
| 2261 | bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; | 2261 | bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; |
| 2262 | } | 2262 | } |
| 2263 | 2263 | ||
| 2264 | static void bnx2x_init_dropless_fc(struct bnx2x *bp) | ||
| 2265 | { | ||
| 2266 | u32 pause_enabled = 0; | ||
| 2267 | |||
| 2268 | if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { | ||
| 2269 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) | ||
| 2270 | pause_enabled = 1; | ||
| 2271 | |||
| 2272 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
| 2273 | USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), | ||
| 2274 | pause_enabled); | ||
| 2275 | } | ||
| 2276 | |||
| 2277 | DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", | ||
| 2278 | pause_enabled ? "enabled" : "disabled"); | ||
| 2279 | } | ||
| 2280 | |||
| 2264 | int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) | 2281 | int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) |
| 2265 | { | 2282 | { |
| 2266 | int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); | 2283 | int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); |
| @@ -2294,6 +2311,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) | |||
| 2294 | 2311 | ||
| 2295 | bnx2x_release_phy_lock(bp); | 2312 | bnx2x_release_phy_lock(bp); |
| 2296 | 2313 | ||
| 2314 | bnx2x_init_dropless_fc(bp); | ||
| 2315 | |||
| 2297 | bnx2x_calc_fc_adv(bp); | 2316 | bnx2x_calc_fc_adv(bp); |
| 2298 | 2317 | ||
| 2299 | if (bp->link_vars.link_up) { | 2318 | if (bp->link_vars.link_up) { |
| @@ -2315,6 +2334,8 @@ void bnx2x_link_set(struct bnx2x *bp) | |||
| 2315 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 2334 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
| 2316 | bnx2x_release_phy_lock(bp); | 2335 | bnx2x_release_phy_lock(bp); |
| 2317 | 2336 | ||
| 2337 | bnx2x_init_dropless_fc(bp); | ||
| 2338 | |||
| 2318 | bnx2x_calc_fc_adv(bp); | 2339 | bnx2x_calc_fc_adv(bp); |
| 2319 | } else | 2340 | } else |
| 2320 | BNX2X_ERR("Bootcode is missing - can not set link\n"); | 2341 | BNX2X_ERR("Bootcode is missing - can not set link\n"); |
| @@ -2476,7 +2497,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
| 2476 | 2497 | ||
| 2477 | input.port_rate = bp->link_vars.line_speed; | 2498 | input.port_rate = bp->link_vars.line_speed; |
| 2478 | 2499 | ||
| 2479 | if (cmng_type == CMNG_FNS_MINMAX) { | 2500 | if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { |
| 2480 | int vn; | 2501 | int vn; |
| 2481 | 2502 | ||
| 2482 | /* read mf conf from shmem */ | 2503 | /* read mf conf from shmem */ |
| @@ -2533,6 +2554,21 @@ static void storm_memset_cmng(struct bnx2x *bp, | |||
| 2533 | } | 2554 | } |
| 2534 | } | 2555 | } |
| 2535 | 2556 | ||
| 2557 | /* init cmng mode in HW according to local configuration */ | ||
| 2558 | void bnx2x_set_local_cmng(struct bnx2x *bp) | ||
| 2559 | { | ||
| 2560 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); | ||
| 2561 | |||
| 2562 | if (cmng_fns != CMNG_FNS_NONE) { | ||
| 2563 | bnx2x_cmng_fns_init(bp, false, cmng_fns); | ||
| 2564 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | ||
| 2565 | } else { | ||
| 2566 | /* rate shaping and fairness are disabled */ | ||
| 2567 | DP(NETIF_MSG_IFUP, | ||
| 2568 | "single function mode without fairness\n"); | ||
| 2569 | } | ||
| 2570 | } | ||
| 2571 | |||
| 2536 | /* This function is called upon link interrupt */ | 2572 | /* This function is called upon link interrupt */ |
| 2537 | static void bnx2x_link_attn(struct bnx2x *bp) | 2573 | static void bnx2x_link_attn(struct bnx2x *bp) |
| 2538 | { | 2574 | { |
| @@ -2541,20 +2577,9 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
| 2541 | 2577 | ||
| 2542 | bnx2x_link_update(&bp->link_params, &bp->link_vars); | 2578 | bnx2x_link_update(&bp->link_params, &bp->link_vars); |
| 2543 | 2579 | ||
| 2544 | if (bp->link_vars.link_up) { | 2580 | bnx2x_init_dropless_fc(bp); |
| 2545 | |||
| 2546 | /* dropless flow control */ | ||
| 2547 | if (!CHIP_IS_E1(bp) && bp->dropless_fc) { | ||
| 2548 | int port = BP_PORT(bp); | ||
| 2549 | u32 pause_enabled = 0; | ||
| 2550 | 2581 | ||
| 2551 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) | 2582 | if (bp->link_vars.link_up) { |
| 2552 | pause_enabled = 1; | ||
| 2553 | |||
| 2554 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
| 2555 | USTORM_ETH_PAUSE_ENABLED_OFFSET(port), | ||
| 2556 | pause_enabled); | ||
| 2557 | } | ||
| 2558 | 2583 | ||
| 2559 | if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { | 2584 | if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { |
| 2560 | struct host_port_stats *pstats; | 2585 | struct host_port_stats *pstats; |
| @@ -2568,17 +2593,8 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
| 2568 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); | 2593 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); |
| 2569 | } | 2594 | } |
| 2570 | 2595 | ||
| 2571 | if (bp->link_vars.link_up && bp->link_vars.line_speed) { | 2596 | if (bp->link_vars.link_up && bp->link_vars.line_speed) |
| 2572 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); | 2597 | bnx2x_set_local_cmng(bp); |
| 2573 | |||
| 2574 | if (cmng_fns != CMNG_FNS_NONE) { | ||
| 2575 | bnx2x_cmng_fns_init(bp, false, cmng_fns); | ||
| 2576 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | ||
| 2577 | } else | ||
| 2578 | /* rate shaping and fairness are disabled */ | ||
| 2579 | DP(NETIF_MSG_IFUP, | ||
| 2580 | "single function mode without fairness\n"); | ||
| 2581 | } | ||
| 2582 | 2598 | ||
| 2583 | __bnx2x_link_report(bp); | 2599 | __bnx2x_link_report(bp); |
| 2584 | 2600 | ||
| @@ -7839,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
| 7839 | { | 7855 | { |
| 7840 | int i; | 7856 | int i; |
| 7841 | 7857 | ||
| 7842 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | ||
| 7843 | sizeof(struct host_sp_status_block)); | ||
| 7844 | |||
| 7845 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, | 7858 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, |
| 7846 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); | 7859 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); |
| 7847 | 7860 | ||
| 7861 | if (IS_VF(bp)) | ||
| 7862 | return; | ||
| 7863 | |||
| 7864 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | ||
| 7865 | sizeof(struct host_sp_status_block)); | ||
| 7866 | |||
| 7848 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, | 7867 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, |
| 7849 | sizeof(struct bnx2x_slowpath)); | 7868 | sizeof(struct bnx2x_slowpath)); |
| 7850 | 7869 | ||
| @@ -9639,6 +9658,12 @@ sp_rtnl_not_reset: | |||
| 9639 | &bp->sp_rtnl_state)) | 9658 | &bp->sp_rtnl_state)) |
| 9640 | bnx2x_pf_set_vfs_vlan(bp); | 9659 | bnx2x_pf_set_vfs_vlan(bp); |
| 9641 | 9660 | ||
| 9661 | if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) | ||
| 9662 | bnx2x_dcbx_stop_hw_tx(bp); | ||
| 9663 | |||
| 9664 | if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state)) | ||
| 9665 | bnx2x_dcbx_resume_hw_tx(bp); | ||
| 9666 | |||
| 9642 | /* work which needs rtnl lock not-taken (as it takes the lock itself and | 9667 | /* work which needs rtnl lock not-taken (as it takes the lock itself and |
| 9643 | * can be called from other contexts as well) | 9668 | * can be called from other contexts as well) |
| 9644 | */ | 9669 | */ |
| @@ -10362,6 +10387,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
| 10362 | 10387 | ||
| 10363 | bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? | 10388 | bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? |
| 10364 | BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; | 10389 | BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; |
| 10390 | |||
| 10391 | bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? | ||
| 10392 | BC_SUPPORTS_RMMOD_CMD : 0; | ||
| 10393 | |||
| 10365 | boot_mode = SHMEM_RD(bp, | 10394 | boot_mode = SHMEM_RD(bp, |
| 10366 | dev_info.port_feature_config[BP_PORT(bp)].mba_config) & | 10395 | dev_info.port_feature_config[BP_PORT(bp)].mba_config) & |
| 10367 | PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; | 10396 | PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; |
| @@ -11137,6 +11166,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp) | |||
| 11137 | int tmp; | 11166 | int tmp; |
| 11138 | u32 cfg; | 11167 | u32 cfg; |
| 11139 | 11168 | ||
| 11169 | if (IS_VF(bp)) | ||
| 11170 | return 0; | ||
| 11171 | |||
| 11140 | if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { | 11172 | if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { |
| 11141 | /* Take function: tmp = func */ | 11173 | /* Take function: tmp = func */ |
| 11142 | tmp = BP_ABS_FUNC(bp); | 11174 | tmp = BP_ABS_FUNC(bp); |
| @@ -11524,6 +11556,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
| 11524 | mutex_init(&bp->port.phy_mutex); | 11556 | mutex_init(&bp->port.phy_mutex); |
| 11525 | mutex_init(&bp->fw_mb_mutex); | 11557 | mutex_init(&bp->fw_mb_mutex); |
| 11526 | spin_lock_init(&bp->stats_lock); | 11558 | spin_lock_init(&bp->stats_lock); |
| 11559 | sema_init(&bp->stats_sema, 1); | ||
| 11527 | 11560 | ||
| 11528 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 11561 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
| 11529 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); | 11562 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); |
| @@ -12026,7 +12059,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { | |||
| 12026 | .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, | 12059 | .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, |
| 12027 | #endif | 12060 | #endif |
| 12028 | 12061 | ||
| 12029 | #ifdef CONFIG_NET_LL_RX_POLL | 12062 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 12030 | .ndo_busy_poll = bnx2x_low_latency_recv, | 12063 | .ndo_busy_poll = bnx2x_low_latency_recv, |
| 12031 | #endif | 12064 | #endif |
| 12032 | }; | 12065 | }; |
| @@ -12817,13 +12850,17 @@ static void __bnx2x_remove(struct pci_dev *pdev, | |||
| 12817 | bnx2x_dcbnl_update_applist(bp, true); | 12850 | bnx2x_dcbnl_update_applist(bp, true); |
| 12818 | #endif | 12851 | #endif |
| 12819 | 12852 | ||
| 12853 | if (IS_PF(bp) && | ||
| 12854 | !BP_NOMCP(bp) && | ||
| 12855 | (bp->flags & BC_SUPPORTS_RMMOD_CMD)) | ||
| 12856 | bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); | ||
| 12857 | |||
| 12820 | /* Close the interface - either directly or implicitly */ | 12858 | /* Close the interface - either directly or implicitly */ |
| 12821 | if (remove_netdev) { | 12859 | if (remove_netdev) { |
| 12822 | unregister_netdev(dev); | 12860 | unregister_netdev(dev); |
| 12823 | } else { | 12861 | } else { |
| 12824 | rtnl_lock(); | 12862 | rtnl_lock(); |
| 12825 | if (netif_running(dev)) | 12863 | dev_close(dev); |
| 12826 | bnx2x_close(dev); | ||
| 12827 | rtnl_unlock(); | 12864 | rtnl_unlock(); |
| 12828 | } | 12865 | } |
| 12829 | 12866 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 95861efb5051..e8706e19f96f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
| @@ -522,23 +522,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp, | |||
| 522 | return 0; | 522 | return 0; |
| 523 | } | 523 | } |
| 524 | 524 | ||
| 525 | static int | ||
| 526 | bnx2x_vfop_config_vlan0(struct bnx2x *bp, | ||
| 527 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac, | ||
| 528 | bool add) | ||
| 529 | { | ||
| 530 | int rc; | ||
| 531 | |||
| 532 | vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : | ||
| 533 | BNX2X_VLAN_MAC_DEL; | ||
| 534 | vlan_mac->user_req.u.vlan.vlan = 0; | ||
| 535 | |||
| 536 | rc = bnx2x_config_vlan_mac(bp, vlan_mac); | ||
| 537 | if (rc == -EEXIST) | ||
| 538 | rc = 0; | ||
| 539 | return rc; | ||
| 540 | } | ||
| 541 | |||
| 542 | static int bnx2x_vfop_config_list(struct bnx2x *bp, | 525 | static int bnx2x_vfop_config_list(struct bnx2x *bp, |
| 543 | struct bnx2x_vfop_filters *filters, | 526 | struct bnx2x_vfop_filters *filters, |
| 544 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac) | 527 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac) |
| @@ -643,30 +626,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
| 643 | 626 | ||
| 644 | case BNX2X_VFOP_VLAN_CONFIG_LIST: | 627 | case BNX2X_VFOP_VLAN_CONFIG_LIST: |
| 645 | /* next state */ | 628 | /* next state */ |
| 646 | vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; | 629 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; |
| 647 | |||
| 648 | /* remove vlan0 - could be no-op */ | ||
| 649 | vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); | ||
| 650 | if (vfop->rc) | ||
| 651 | goto op_err; | ||
| 652 | 630 | ||
| 653 | /* Do vlan list config. if this operation fails we try to | 631 | /* do list config */ |
| 654 | * restore vlan0 to keep the queue is working order | ||
| 655 | */ | ||
| 656 | vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); | 632 | vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); |
| 657 | if (!vfop->rc) { | 633 | if (!vfop->rc) { |
| 658 | set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); | 634 | set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); |
| 659 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); | 635 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); |
| 660 | } | 636 | } |
| 661 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ | ||
| 662 | |||
| 663 | case BNX2X_VFOP_VLAN_CONFIG_LIST_0: | ||
| 664 | /* next state */ | ||
| 665 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; | ||
| 666 | |||
| 667 | if (list_empty(&obj->head)) | ||
| 668 | /* add vlan0 */ | ||
| 669 | vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); | ||
| 670 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | 637 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); |
| 671 | 638 | ||
| 672 | default: | 639 | default: |
| @@ -1747,11 +1714,8 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) | |||
| 1747 | 1714 | ||
| 1748 | void bnx2x_iov_init_dmae(struct bnx2x *bp) | 1715 | void bnx2x_iov_init_dmae(struct bnx2x *bp) |
| 1749 | { | 1716 | { |
| 1750 | DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); | 1717 | if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) |
| 1751 | if (!IS_SRIOV(bp)) | 1718 | REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); |
| 1752 | return; | ||
| 1753 | |||
| 1754 | REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); | ||
| 1755 | } | 1719 | } |
| 1756 | 1720 | ||
| 1757 | static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) | 1721 | static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) |
| @@ -2822,6 +2786,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) | |||
| 2822 | return 0; | 2786 | return 0; |
| 2823 | } | 2787 | } |
| 2824 | 2788 | ||
| 2789 | struct set_vf_state_cookie { | ||
| 2790 | struct bnx2x_virtf *vf; | ||
| 2791 | u8 state; | ||
| 2792 | }; | ||
| 2793 | |||
| 2794 | void bnx2x_set_vf_state(void *cookie) | ||
| 2795 | { | ||
| 2796 | struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; | ||
| 2797 | |||
| 2798 | p->vf->state = p->state; | ||
| 2799 | } | ||
| 2800 | |||
| 2825 | /* VFOP close (teardown the queues, delete mcasts and close HW) */ | 2801 | /* VFOP close (teardown the queues, delete mcasts and close HW) */ |
| 2826 | static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | 2802 | static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) |
| 2827 | { | 2803 | { |
| @@ -2872,7 +2848,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
| 2872 | op_err: | 2848 | op_err: |
| 2873 | BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); | 2849 | BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); |
| 2874 | op_done: | 2850 | op_done: |
| 2875 | vf->state = VF_ACQUIRED; | 2851 | |
| 2852 | /* need to make sure there are no outstanding stats ramrods which may | ||
| 2853 | * cause the device to access the VF's stats buffer which it will free | ||
| 2854 | * as soon as we return from the close flow. | ||
| 2855 | */ | ||
| 2856 | { | ||
| 2857 | struct set_vf_state_cookie cookie; | ||
| 2858 | |||
| 2859 | cookie.vf = vf; | ||
| 2860 | cookie.state = VF_ACQUIRED; | ||
| 2861 | bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); | ||
| 2862 | } | ||
| 2863 | |||
| 2876 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); | 2864 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); |
| 2877 | bnx2x_vfop_end(bp, vf, vfop); | 2865 | bnx2x_vfop_end(bp, vf, vfop); |
| 2878 | } | 2866 | } |
| @@ -3084,8 +3072,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp) | |||
| 3084 | pci_disable_sriov(bp->pdev); | 3072 | pci_disable_sriov(bp->pdev); |
| 3085 | } | 3073 | } |
| 3086 | 3074 | ||
| 3087 | static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, | 3075 | static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, |
| 3088 | struct bnx2x_virtf *vf) | 3076 | struct bnx2x_virtf **vf, |
| 3077 | struct pf_vf_bulletin_content **bulletin) | ||
| 3089 | { | 3078 | { |
| 3090 | if (bp->state != BNX2X_STATE_OPEN) { | 3079 | if (bp->state != BNX2X_STATE_OPEN) { |
| 3091 | BNX2X_ERR("vf ndo called though PF is down\n"); | 3080 | BNX2X_ERR("vf ndo called though PF is down\n"); |
| @@ -3103,12 +3092,22 @@ static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, | |||
| 3103 | return -EINVAL; | 3092 | return -EINVAL; |
| 3104 | } | 3093 | } |
| 3105 | 3094 | ||
| 3106 | if (!vf) { | 3095 | /* init members */ |
| 3096 | *vf = BP_VF(bp, vfidx); | ||
| 3097 | *bulletin = BP_VF_BULLETIN(bp, vfidx); | ||
| 3098 | |||
| 3099 | if (!*vf) { | ||
| 3107 | BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", | 3100 | BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", |
| 3108 | vfidx); | 3101 | vfidx); |
| 3109 | return -EINVAL; | 3102 | return -EINVAL; |
| 3110 | } | 3103 | } |
| 3111 | 3104 | ||
| 3105 | if (!*bulletin) { | ||
| 3106 | BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", | ||
| 3107 | vfidx); | ||
| 3108 | return -EINVAL; | ||
| 3109 | } | ||
| 3110 | |||
| 3112 | return 0; | 3111 | return 0; |
| 3113 | } | 3112 | } |
| 3114 | 3113 | ||
| @@ -3116,17 +3115,19 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, | |||
| 3116 | struct ifla_vf_info *ivi) | 3115 | struct ifla_vf_info *ivi) |
| 3117 | { | 3116 | { |
| 3118 | struct bnx2x *bp = netdev_priv(dev); | 3117 | struct bnx2x *bp = netdev_priv(dev); |
| 3119 | struct bnx2x_virtf *vf = BP_VF(bp, vfidx); | 3118 | struct bnx2x_virtf *vf = NULL; |
| 3120 | struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); | 3119 | struct pf_vf_bulletin_content *bulletin = NULL; |
| 3121 | struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); | 3120 | struct bnx2x_vlan_mac_obj *mac_obj; |
| 3122 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); | 3121 | struct bnx2x_vlan_mac_obj *vlan_obj; |
| 3123 | int rc; | 3122 | int rc; |
| 3124 | 3123 | ||
| 3125 | /* sanity */ | 3124 | /* sanity and init */ |
| 3126 | rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); | 3125 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
| 3127 | if (rc) | 3126 | if (rc) |
| 3128 | return rc; | 3127 | return rc; |
| 3129 | if (!mac_obj || !vlan_obj || !bulletin) { | 3128 | mac_obj = &bnx2x_vfq(vf, 0, mac_obj); |
| 3129 | vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); | ||
| 3130 | if (!mac_obj || !vlan_obj) { | ||
| 3130 | BNX2X_ERR("VF partially initialized\n"); | 3131 | BNX2X_ERR("VF partially initialized\n"); |
| 3131 | return -EINVAL; | 3132 | return -EINVAL; |
| 3132 | } | 3133 | } |
| @@ -3183,11 +3184,11 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) | |||
| 3183 | { | 3184 | { |
| 3184 | struct bnx2x *bp = netdev_priv(dev); | 3185 | struct bnx2x *bp = netdev_priv(dev); |
| 3185 | int rc, q_logical_state; | 3186 | int rc, q_logical_state; |
| 3186 | struct bnx2x_virtf *vf = BP_VF(bp, vfidx); | 3187 | struct bnx2x_virtf *vf = NULL; |
| 3187 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); | 3188 | struct pf_vf_bulletin_content *bulletin = NULL; |
| 3188 | 3189 | ||
| 3189 | /* sanity */ | 3190 | /* sanity and init */ |
| 3190 | rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); | 3191 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
| 3191 | if (rc) | 3192 | if (rc) |
| 3192 | return rc; | 3193 | return rc; |
| 3193 | if (!is_valid_ether_addr(mac)) { | 3194 | if (!is_valid_ether_addr(mac)) { |
| @@ -3249,11 +3250,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | |||
| 3249 | { | 3250 | { |
| 3250 | struct bnx2x *bp = netdev_priv(dev); | 3251 | struct bnx2x *bp = netdev_priv(dev); |
| 3251 | int rc, q_logical_state; | 3252 | int rc, q_logical_state; |
| 3252 | struct bnx2x_virtf *vf = BP_VF(bp, vfidx); | 3253 | struct bnx2x_virtf *vf = NULL; |
| 3253 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); | 3254 | struct pf_vf_bulletin_content *bulletin = NULL; |
| 3254 | 3255 | ||
| 3255 | /* sanity */ | 3256 | /* sanity and init */ |
| 3256 | rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); | 3257 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
| 3257 | if (rc) | 3258 | if (rc) |
| 3258 | return rc; | 3259 | return rc; |
| 3259 | 3260 | ||
| @@ -3463,7 +3464,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) | |||
| 3463 | alloc_mem_err: | 3464 | alloc_mem_err: |
| 3464 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, | 3465 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, |
| 3465 | sizeof(struct bnx2x_vf_mbx_msg)); | 3466 | sizeof(struct bnx2x_vf_mbx_msg)); |
| 3466 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, | 3467 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, |
| 3467 | sizeof(union pf_vf_bulletin)); | 3468 | sizeof(union pf_vf_bulletin)); |
| 3468 | return -ENOMEM; | 3469 | return -ENOMEM; |
| 3469 | } | 3470 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 98366abd02bd..86436c77af03 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
| @@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp) | |||
| 221 | * Statistics service functions | 221 | * Statistics service functions |
| 222 | */ | 222 | */ |
| 223 | 223 | ||
| 224 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | 224 | /* should be called under stats_sema */ |
| 225 | static void __bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
| 225 | { | 226 | { |
| 226 | struct dmae_command *dmae; | 227 | struct dmae_command *dmae; |
| 227 | u32 opcode; | 228 | u32 opcode; |
| @@ -518,29 +519,47 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) | |||
| 518 | *stats_comp = 0; | 519 | *stats_comp = 0; |
| 519 | } | 520 | } |
| 520 | 521 | ||
| 521 | static void bnx2x_stats_start(struct bnx2x *bp) | 522 | /* should be called under stats_sema */ |
| 523 | static void __bnx2x_stats_start(struct bnx2x *bp) | ||
| 522 | { | 524 | { |
| 523 | /* vfs travel through here as part of the statistics FSM, but no action | 525 | if (IS_PF(bp)) { |
| 524 | * is required | 526 | if (bp->port.pmf) |
| 525 | */ | 527 | bnx2x_port_stats_init(bp); |
| 526 | if (IS_VF(bp)) | ||
| 527 | return; | ||
| 528 | 528 | ||
| 529 | if (bp->port.pmf) | 529 | else if (bp->func_stx) |
| 530 | bnx2x_port_stats_init(bp); | 530 | bnx2x_func_stats_init(bp); |
| 531 | 531 | ||
| 532 | else if (bp->func_stx) | 532 | bnx2x_hw_stats_post(bp); |
| 533 | bnx2x_func_stats_init(bp); | 533 | bnx2x_storm_stats_post(bp); |
| 534 | } | ||
| 534 | 535 | ||
| 535 | bnx2x_hw_stats_post(bp); | 536 | bp->stats_started = true; |
| 536 | bnx2x_storm_stats_post(bp); | 537 | } |
| 538 | |||
| 539 | static void bnx2x_stats_start(struct bnx2x *bp) | ||
| 540 | { | ||
| 541 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
| 542 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
| 543 | __bnx2x_stats_start(bp); | ||
| 544 | up(&bp->stats_sema); | ||
| 537 | } | 545 | } |
| 538 | 546 | ||
| 539 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) | 547 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) |
| 540 | { | 548 | { |
| 549 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
| 550 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
| 541 | bnx2x_stats_comp(bp); | 551 | bnx2x_stats_comp(bp); |
| 542 | bnx2x_stats_pmf_update(bp); | 552 | __bnx2x_stats_pmf_update(bp); |
| 543 | bnx2x_stats_start(bp); | 553 | __bnx2x_stats_start(bp); |
| 554 | up(&bp->stats_sema); | ||
| 555 | } | ||
| 556 | |||
| 557 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
| 558 | { | ||
| 559 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
| 560 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
| 561 | __bnx2x_stats_pmf_update(bp); | ||
| 562 | up(&bp->stats_sema); | ||
| 544 | } | 563 | } |
| 545 | 564 | ||
| 546 | static void bnx2x_stats_restart(struct bnx2x *bp) | 565 | static void bnx2x_stats_restart(struct bnx2x *bp) |
| @@ -550,8 +569,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp) | |||
| 550 | */ | 569 | */ |
| 551 | if (IS_VF(bp)) | 570 | if (IS_VF(bp)) |
| 552 | return; | 571 | return; |
| 572 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
| 573 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
| 553 | bnx2x_stats_comp(bp); | 574 | bnx2x_stats_comp(bp); |
| 554 | bnx2x_stats_start(bp); | 575 | __bnx2x_stats_start(bp); |
| 576 | up(&bp->stats_sema); | ||
| 555 | } | 577 | } |
| 556 | 578 | ||
| 557 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) | 579 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) |
| @@ -888,9 +910,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) | |||
| 888 | /* Make sure we use the value of the counter | 910 | /* Make sure we use the value of the counter |
| 889 | * used for sending the last stats ramrod. | 911 | * used for sending the last stats ramrod. |
| 890 | */ | 912 | */ |
| 891 | spin_lock_bh(&bp->stats_lock); | ||
| 892 | cur_stats_counter = bp->stats_counter - 1; | 913 | cur_stats_counter = bp->stats_counter - 1; |
| 893 | spin_unlock_bh(&bp->stats_lock); | ||
| 894 | 914 | ||
| 895 | /* are storm stats valid? */ | 915 | /* are storm stats valid? */ |
| 896 | if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { | 916 | if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { |
| @@ -1227,12 +1247,18 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
| 1227 | { | 1247 | { |
| 1228 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | 1248 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); |
| 1229 | 1249 | ||
| 1230 | if (bnx2x_edebug_stats_stopped(bp)) | 1250 | /* we run update from timer context, so give up |
| 1251 | * if somebody is in the middle of transition | ||
| 1252 | */ | ||
| 1253 | if (down_trylock(&bp->stats_sema)) | ||
| 1231 | return; | 1254 | return; |
| 1232 | 1255 | ||
| 1256 | if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) | ||
| 1257 | goto out; | ||
| 1258 | |||
| 1233 | if (IS_PF(bp)) { | 1259 | if (IS_PF(bp)) { |
| 1234 | if (*stats_comp != DMAE_COMP_VAL) | 1260 | if (*stats_comp != DMAE_COMP_VAL) |
| 1235 | return; | 1261 | goto out; |
| 1236 | 1262 | ||
| 1237 | if (bp->port.pmf) | 1263 | if (bp->port.pmf) |
| 1238 | bnx2x_hw_stats_update(bp); | 1264 | bnx2x_hw_stats_update(bp); |
| @@ -1242,7 +1268,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
| 1242 | BNX2X_ERR("storm stats were not updated for 3 times\n"); | 1268 | BNX2X_ERR("storm stats were not updated for 3 times\n"); |
| 1243 | bnx2x_panic(); | 1269 | bnx2x_panic(); |
| 1244 | } | 1270 | } |
| 1245 | return; | 1271 | goto out; |
| 1246 | } | 1272 | } |
| 1247 | } else { | 1273 | } else { |
| 1248 | /* vf doesn't collect HW statistics, and doesn't get completions | 1274 | /* vf doesn't collect HW statistics, and doesn't get completions |
| @@ -1256,7 +1282,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
| 1256 | 1282 | ||
| 1257 | /* vf is done */ | 1283 | /* vf is done */ |
| 1258 | if (IS_VF(bp)) | 1284 | if (IS_VF(bp)) |
| 1259 | return; | 1285 | goto out; |
| 1260 | 1286 | ||
| 1261 | if (netif_msg_timer(bp)) { | 1287 | if (netif_msg_timer(bp)) { |
| 1262 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 1288 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
| @@ -1267,6 +1293,9 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
| 1267 | 1293 | ||
| 1268 | bnx2x_hw_stats_post(bp); | 1294 | bnx2x_hw_stats_post(bp); |
| 1269 | bnx2x_storm_stats_post(bp); | 1295 | bnx2x_storm_stats_post(bp); |
| 1296 | |||
| 1297 | out: | ||
| 1298 | up(&bp->stats_sema); | ||
| 1270 | } | 1299 | } |
| 1271 | 1300 | ||
| 1272 | static void bnx2x_port_stats_stop(struct bnx2x *bp) | 1301 | static void bnx2x_port_stats_stop(struct bnx2x *bp) |
| @@ -1332,6 +1361,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
| 1332 | { | 1361 | { |
| 1333 | int update = 0; | 1362 | int update = 0; |
| 1334 | 1363 | ||
| 1364 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
| 1365 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
| 1366 | |||
| 1367 | bp->stats_started = false; | ||
| 1368 | |||
| 1335 | bnx2x_stats_comp(bp); | 1369 | bnx2x_stats_comp(bp); |
| 1336 | 1370 | ||
| 1337 | if (bp->port.pmf) | 1371 | if (bp->port.pmf) |
| @@ -1348,6 +1382,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
| 1348 | bnx2x_hw_stats_post(bp); | 1382 | bnx2x_hw_stats_post(bp); |
| 1349 | bnx2x_stats_comp(bp); | 1383 | bnx2x_stats_comp(bp); |
| 1350 | } | 1384 | } |
| 1385 | |||
| 1386 | up(&bp->stats_sema); | ||
| 1351 | } | 1387 | } |
| 1352 | 1388 | ||
| 1353 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) | 1389 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) |
| @@ -1376,15 +1412,17 @@ static const struct { | |||
| 1376 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | 1412 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) |
| 1377 | { | 1413 | { |
| 1378 | enum bnx2x_stats_state state; | 1414 | enum bnx2x_stats_state state; |
| 1415 | void (*action)(struct bnx2x *bp); | ||
| 1379 | if (unlikely(bp->panic)) | 1416 | if (unlikely(bp->panic)) |
| 1380 | return; | 1417 | return; |
| 1381 | 1418 | ||
| 1382 | spin_lock_bh(&bp->stats_lock); | 1419 | spin_lock_bh(&bp->stats_lock); |
| 1383 | state = bp->stats_state; | 1420 | state = bp->stats_state; |
| 1384 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1421 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
| 1422 | action = bnx2x_stats_stm[state][event].action; | ||
| 1385 | spin_unlock_bh(&bp->stats_lock); | 1423 | spin_unlock_bh(&bp->stats_lock); |
| 1386 | 1424 | ||
| 1387 | bnx2x_stats_stm[state][event].action(bp); | 1425 | action(bp); |
| 1388 | 1426 | ||
| 1389 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1427 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
| 1390 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1428 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
| @@ -1955,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, | |||
| 1955 | estats->mac_discard); | 1993 | estats->mac_discard); |
| 1956 | } | 1994 | } |
| 1957 | } | 1995 | } |
| 1996 | |||
| 1997 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | ||
| 1998 | void (func_to_exec)(void *cookie), | ||
| 1999 | void *cookie){ | ||
| 2000 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
| 2001 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
| 2002 | bnx2x_stats_comp(bp); | ||
| 2003 | func_to_exec(cookie); | ||
| 2004 | __bnx2x_stats_start(bp); | ||
| 2005 | up(&bp->stats_sema); | ||
| 2006 | } | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 853824d258e8..f35845006cdd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | |||
| @@ -539,6 +539,9 @@ struct bnx2x; | |||
| 539 | void bnx2x_memset_stats(struct bnx2x *bp); | 539 | void bnx2x_memset_stats(struct bnx2x *bp); |
| 540 | void bnx2x_stats_init(struct bnx2x *bp); | 540 | void bnx2x_stats_init(struct bnx2x *bp); |
| 541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | 541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); |
| 542 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | ||
| 543 | void (func_to_exec)(void *cookie), | ||
| 544 | void *cookie); | ||
| 542 | 545 | ||
| 543 | /** | 546 | /** |
| 544 | * bnx2x_save_statistics - save statistics when unloading. | 547 | * bnx2x_save_statistics - save statistics when unloading. |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d964f302ac94..0da2214ef1b9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -17625,7 +17625,8 @@ err_out_free_res: | |||
| 17625 | pci_release_regions(pdev); | 17625 | pci_release_regions(pdev); |
| 17626 | 17626 | ||
| 17627 | err_out_disable_pdev: | 17627 | err_out_disable_pdev: |
| 17628 | pci_disable_device(pdev); | 17628 | if (pci_is_enabled(pdev)) |
| 17629 | pci_disable_device(pdev); | ||
| 17629 | pci_set_drvdata(pdev, NULL); | 17630 | pci_set_drvdata(pdev, NULL); |
| 17630 | return err; | 17631 | return err; |
| 17631 | } | 17632 | } |
| @@ -17773,7 +17774,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, | |||
| 17773 | 17774 | ||
| 17774 | rtnl_lock(); | 17775 | rtnl_lock(); |
| 17775 | 17776 | ||
| 17776 | if (!netif_running(netdev)) | 17777 | /* We probably don't have netdev yet */ |
| 17778 | if (!netdev || !netif_running(netdev)) | ||
| 17777 | goto done; | 17779 | goto done; |
| 17778 | 17780 | ||
| 17779 | tg3_phy_stop(tp); | 17781 | tg3_phy_stop(tp); |
| @@ -17794,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, | |||
| 17794 | 17796 | ||
| 17795 | done: | 17797 | done: |
| 17796 | if (state == pci_channel_io_perm_failure) { | 17798 | if (state == pci_channel_io_perm_failure) { |
| 17797 | tg3_napi_enable(tp); | 17799 | if (netdev) { |
| 17798 | dev_close(netdev); | 17800 | tg3_napi_enable(tp); |
| 17801 | dev_close(netdev); | ||
| 17802 | } | ||
| 17799 | err = PCI_ERS_RESULT_DISCONNECT; | 17803 | err = PCI_ERS_RESULT_DISCONNECT; |
| 17800 | } else { | 17804 | } else { |
| 17801 | pci_disable_device(pdev); | 17805 | pci_disable_device(pdev); |
| @@ -17825,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
| 17825 | rtnl_lock(); | 17829 | rtnl_lock(); |
| 17826 | 17830 | ||
| 17827 | if (pci_enable_device(pdev)) { | 17831 | if (pci_enable_device(pdev)) { |
| 17828 | netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); | 17832 | dev_err(&pdev->dev, |
| 17833 | "Cannot re-enable PCI device after reset.\n"); | ||
| 17829 | goto done; | 17834 | goto done; |
| 17830 | } | 17835 | } |
| 17831 | 17836 | ||
| @@ -17833,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
| 17833 | pci_restore_state(pdev); | 17838 | pci_restore_state(pdev); |
| 17834 | pci_save_state(pdev); | 17839 | pci_save_state(pdev); |
| 17835 | 17840 | ||
| 17836 | if (!netif_running(netdev)) { | 17841 | if (!netdev || !netif_running(netdev)) { |
| 17837 | rc = PCI_ERS_RESULT_RECOVERED; | 17842 | rc = PCI_ERS_RESULT_RECOVERED; |
| 17838 | goto done; | 17843 | goto done; |
| 17839 | } | 17844 | } |
| @@ -17845,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
| 17845 | rc = PCI_ERS_RESULT_RECOVERED; | 17850 | rc = PCI_ERS_RESULT_RECOVERED; |
| 17846 | 17851 | ||
| 17847 | done: | 17852 | done: |
| 17848 | if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { | 17853 | if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { |
| 17849 | tg3_napi_enable(tp); | 17854 | tg3_napi_enable(tp); |
| 17850 | dev_close(netdev); | 17855 | dev_close(netdev); |
| 17851 | } | 17856 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 687ec4a8bb48..9c89dc8fe105 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c | |||
| @@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, | |||
| 455 | q->pg_chunk.offset = 0; | 455 | q->pg_chunk.offset = 0; |
| 456 | mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, | 456 | mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, |
| 457 | 0, q->alloc_size, PCI_DMA_FROMDEVICE); | 457 | 0, q->alloc_size, PCI_DMA_FROMDEVICE); |
| 458 | if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { | ||
| 459 | __free_pages(q->pg_chunk.page, order); | ||
| 460 | q->pg_chunk.page = NULL; | ||
| 461 | return -EIO; | ||
| 462 | } | ||
| 463 | q->pg_chunk.mapping = mapping; | 458 | q->pg_chunk.mapping = mapping; |
| 464 | } | 459 | } |
| 465 | sd->pg_chunk = q->pg_chunk; | 460 | sd->pg_chunk = q->pg_chunk; |
| @@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) | |||
| 954 | return flits_to_desc(flits); | 949 | return flits_to_desc(flits); |
| 955 | } | 950 | } |
| 956 | 951 | ||
| 957 | |||
| 958 | /* map_skb - map a packet main body and its page fragments | ||
| 959 | * @pdev: the PCI device | ||
| 960 | * @skb: the packet | ||
| 961 | * @addr: placeholder to save the mapped addresses | ||
| 962 | * | ||
| 963 | * map the main body of an sk_buff and its page fragments, if any. | ||
| 964 | */ | ||
| 965 | static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, | ||
| 966 | dma_addr_t *addr) | ||
| 967 | { | ||
| 968 | const skb_frag_t *fp, *end; | ||
| 969 | const struct skb_shared_info *si; | ||
| 970 | |||
| 971 | *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), | ||
| 972 | PCI_DMA_TODEVICE); | ||
| 973 | if (pci_dma_mapping_error(pdev, *addr)) | ||
| 974 | goto out_err; | ||
| 975 | |||
| 976 | si = skb_shinfo(skb); | ||
| 977 | end = &si->frags[si->nr_frags]; | ||
| 978 | |||
| 979 | for (fp = si->frags; fp < end; fp++) { | ||
| 980 | *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), | ||
| 981 | DMA_TO_DEVICE); | ||
| 982 | if (pci_dma_mapping_error(pdev, *addr)) | ||
| 983 | goto unwind; | ||
| 984 | } | ||
| 985 | return 0; | ||
| 986 | |||
| 987 | unwind: | ||
| 988 | while (fp-- > si->frags) | ||
| 989 | dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), | ||
| 990 | DMA_TO_DEVICE); | ||
| 991 | |||
| 992 | pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); | ||
| 993 | out_err: | ||
| 994 | return -ENOMEM; | ||
| 995 | } | ||
| 996 | |||
| 997 | /** | 952 | /** |
| 998 | * write_sgl - populate a scatter/gather list for a packet | 953 | * make_sgl - populate a scatter/gather list for a packet |
| 999 | * @skb: the packet | 954 | * @skb: the packet |
| 1000 | * @sgp: the SGL to populate | 955 | * @sgp: the SGL to populate |
| 1001 | * @start: start address of skb main body data to include in the SGL | 956 | * @start: start address of skb main body data to include in the SGL |
| 1002 | * @len: length of skb main body data to include in the SGL | 957 | * @len: length of skb main body data to include in the SGL |
| 1003 | * @addr: the list of the mapped addresses | 958 | * @pdev: the PCI device |
| 1004 | * | 959 | * |
| 1005 | * Copies the scatter/gather list for the buffers that make up a packet | 960 | * Generates a scatter/gather list for the buffers that make up a packet |
| 1006 | * and returns the SGL size in 8-byte words. The caller must size the SGL | 961 | * and returns the SGL size in 8-byte words. The caller must size the SGL |
| 1007 | * appropriately. | 962 | * appropriately. |
| 1008 | */ | 963 | */ |
| 1009 | static inline unsigned int write_sgl(const struct sk_buff *skb, | 964 | static inline unsigned int make_sgl(const struct sk_buff *skb, |
| 1010 | struct sg_ent *sgp, unsigned char *start, | 965 | struct sg_ent *sgp, unsigned char *start, |
| 1011 | unsigned int len, const dma_addr_t *addr) | 966 | unsigned int len, struct pci_dev *pdev) |
| 1012 | { | 967 | { |
| 1013 | unsigned int i, j = 0, k = 0, nfrags; | 968 | dma_addr_t mapping; |
| 969 | unsigned int i, j = 0, nfrags; | ||
| 1014 | 970 | ||
| 1015 | if (len) { | 971 | if (len) { |
| 972 | mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); | ||
| 1016 | sgp->len[0] = cpu_to_be32(len); | 973 | sgp->len[0] = cpu_to_be32(len); |
| 1017 | sgp->addr[j++] = cpu_to_be64(addr[k++]); | 974 | sgp->addr[0] = cpu_to_be64(mapping); |
| 975 | j = 1; | ||
| 1018 | } | 976 | } |
| 1019 | 977 | ||
| 1020 | nfrags = skb_shinfo(skb)->nr_frags; | 978 | nfrags = skb_shinfo(skb)->nr_frags; |
| 1021 | for (i = 0; i < nfrags; i++) { | 979 | for (i = 0; i < nfrags; i++) { |
| 1022 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 980 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 1023 | 981 | ||
| 982 | mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), | ||
| 983 | DMA_TO_DEVICE); | ||
| 1024 | sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); | 984 | sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); |
| 1025 | sgp->addr[j] = cpu_to_be64(addr[k++]); | 985 | sgp->addr[j] = cpu_to_be64(mapping); |
| 1026 | j ^= 1; | 986 | j ^= 1; |
| 1027 | if (j == 0) | 987 | if (j == 0) |
| 1028 | ++sgp; | 988 | ++sgp; |
| @@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
| 1178 | const struct port_info *pi, | 1138 | const struct port_info *pi, |
| 1179 | unsigned int pidx, unsigned int gen, | 1139 | unsigned int pidx, unsigned int gen, |
| 1180 | struct sge_txq *q, unsigned int ndesc, | 1140 | struct sge_txq *q, unsigned int ndesc, |
| 1181 | unsigned int compl, const dma_addr_t *addr) | 1141 | unsigned int compl) |
| 1182 | { | 1142 | { |
| 1183 | unsigned int flits, sgl_flits, cntrl, tso_info; | 1143 | unsigned int flits, sgl_flits, cntrl, tso_info; |
| 1184 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; | 1144 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; |
| @@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
| 1236 | } | 1196 | } |
| 1237 | 1197 | ||
| 1238 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1198 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
| 1239 | sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); | 1199 | sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); |
| 1240 | 1200 | ||
| 1241 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, | 1201 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, |
| 1242 | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), | 1202 | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), |
| @@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1267 | struct netdev_queue *txq; | 1227 | struct netdev_queue *txq; |
| 1268 | struct sge_qset *qs; | 1228 | struct sge_qset *qs; |
| 1269 | struct sge_txq *q; | 1229 | struct sge_txq *q; |
| 1270 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
| 1271 | 1230 | ||
| 1272 | /* | 1231 | /* |
| 1273 | * The chip min packet length is 9 octets but play safe and reject | 1232 | * The chip min packet length is 9 octets but play safe and reject |
| @@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1296 | return NETDEV_TX_BUSY; | 1255 | return NETDEV_TX_BUSY; |
| 1297 | } | 1256 | } |
| 1298 | 1257 | ||
| 1299 | if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { | ||
| 1300 | dev_kfree_skb(skb); | ||
| 1301 | return NETDEV_TX_OK; | ||
| 1302 | } | ||
| 1303 | |||
| 1304 | q->in_use += ndesc; | 1258 | q->in_use += ndesc; |
| 1305 | if (unlikely(credits - ndesc < q->stop_thres)) { | 1259 | if (unlikely(credits - ndesc < q->stop_thres)) { |
| 1306 | t3_stop_tx_queue(txq, qs, q); | 1260 | t3_stop_tx_queue(txq, qs, q); |
| @@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1358 | if (likely(!skb_shared(skb))) | 1312 | if (likely(!skb_shared(skb))) |
| 1359 | skb_orphan(skb); | 1313 | skb_orphan(skb); |
| 1360 | 1314 | ||
| 1361 | write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); | 1315 | write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); |
| 1362 | check_ring_tx_db(adap, q); | 1316 | check_ring_tx_db(adap, q); |
| 1363 | return NETDEV_TX_OK; | 1317 | return NETDEV_TX_OK; |
| 1364 | } | 1318 | } |
| @@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, | |||
| 1623 | */ | 1577 | */ |
| 1624 | static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | 1578 | static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, |
| 1625 | struct sge_txq *q, unsigned int pidx, | 1579 | struct sge_txq *q, unsigned int pidx, |
| 1626 | unsigned int gen, unsigned int ndesc, | 1580 | unsigned int gen, unsigned int ndesc) |
| 1627 | const dma_addr_t *addr) | ||
| 1628 | { | 1581 | { |
| 1629 | unsigned int sgl_flits, flits; | 1582 | unsigned int sgl_flits, flits; |
| 1630 | struct work_request_hdr *from; | 1583 | struct work_request_hdr *from; |
| @@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | |||
| 1645 | 1598 | ||
| 1646 | flits = skb_transport_offset(skb) / 8; | 1599 | flits = skb_transport_offset(skb) / 8; |
| 1647 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1600 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
| 1648 | sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), | 1601 | sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), |
| 1649 | skb_tail_pointer(skb) - | 1602 | skb->tail - skb->transport_header, |
| 1650 | skb_transport_header(skb), addr); | 1603 | adap->pdev); |
| 1651 | if (need_skb_unmap()) { | 1604 | if (need_skb_unmap()) { |
| 1652 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); | 1605 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); |
| 1653 | skb->destructor = deferred_unmap_destructor; | 1606 | skb->destructor = deferred_unmap_destructor; |
| @@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
| 1705 | goto again; | 1658 | goto again; |
| 1706 | } | 1659 | } |
| 1707 | 1660 | ||
| 1708 | if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { | ||
| 1709 | spin_unlock(&q->lock); | ||
| 1710 | return NET_XMIT_SUCCESS; | ||
| 1711 | } | ||
| 1712 | |||
| 1713 | gen = q->gen; | 1661 | gen = q->gen; |
| 1714 | q->in_use += ndesc; | 1662 | q->in_use += ndesc; |
| 1715 | pidx = q->pidx; | 1663 | pidx = q->pidx; |
| @@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
| 1720 | } | 1668 | } |
| 1721 | spin_unlock(&q->lock); | 1669 | spin_unlock(&q->lock); |
| 1722 | 1670 | ||
| 1723 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); | 1671 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); |
| 1724 | check_ring_tx_db(adap, q); | 1672 | check_ring_tx_db(adap, q); |
| 1725 | return NET_XMIT_SUCCESS; | 1673 | return NET_XMIT_SUCCESS; |
| 1726 | } | 1674 | } |
| @@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data) | |||
| 1738 | struct sge_txq *q = &qs->txq[TXQ_OFLD]; | 1686 | struct sge_txq *q = &qs->txq[TXQ_OFLD]; |
| 1739 | const struct port_info *pi = netdev_priv(qs->netdev); | 1687 | const struct port_info *pi = netdev_priv(qs->netdev); |
| 1740 | struct adapter *adap = pi->adapter; | 1688 | struct adapter *adap = pi->adapter; |
| 1741 | unsigned int written = 0; | ||
| 1742 | 1689 | ||
| 1743 | spin_lock(&q->lock); | 1690 | spin_lock(&q->lock); |
| 1744 | again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | 1691 | again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); |
| @@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
| 1758 | break; | 1705 | break; |
| 1759 | } | 1706 | } |
| 1760 | 1707 | ||
| 1761 | if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) | ||
| 1762 | break; | ||
| 1763 | |||
| 1764 | gen = q->gen; | 1708 | gen = q->gen; |
| 1765 | q->in_use += ndesc; | 1709 | q->in_use += ndesc; |
| 1766 | pidx = q->pidx; | 1710 | pidx = q->pidx; |
| 1767 | q->pidx += ndesc; | 1711 | q->pidx += ndesc; |
| 1768 | written += ndesc; | ||
| 1769 | if (q->pidx >= q->size) { | 1712 | if (q->pidx >= q->size) { |
| 1770 | q->pidx -= q->size; | 1713 | q->pidx -= q->size; |
| 1771 | q->gen ^= 1; | 1714 | q->gen ^= 1; |
| @@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
| 1773 | __skb_unlink(skb, &q->sendq); | 1716 | __skb_unlink(skb, &q->sendq); |
| 1774 | spin_unlock(&q->lock); | 1717 | spin_unlock(&q->lock); |
| 1775 | 1718 | ||
| 1776 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc, | 1719 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); |
| 1777 | (dma_addr_t *)skb->head); | ||
| 1778 | spin_lock(&q->lock); | 1720 | spin_lock(&q->lock); |
| 1779 | } | 1721 | } |
| 1780 | spin_unlock(&q->lock); | 1722 | spin_unlock(&q->lock); |
| @@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
| 1784 | set_bit(TXQ_LAST_PKT_DB, &q->flags); | 1726 | set_bit(TXQ_LAST_PKT_DB, &q->flags); |
| 1785 | #endif | 1727 | #endif |
| 1786 | wmb(); | 1728 | wmb(); |
| 1787 | if (likely(written)) | 1729 | t3_write_reg(adap, A_SG_KDOORBELL, |
| 1788 | t3_write_reg(adap, A_SG_KDOORBELL, | 1730 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); |
| 1789 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | ||
| 1790 | } | 1731 | } |
| 1791 | 1732 | ||
| 1792 | /** | 1733 | /** |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 6e6e0a117ee2..8ec5d74ad44d 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
| @@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter) | |||
| 3048 | 3048 | ||
| 3049 | adapter->max_event_queues = le16_to_cpu(desc->eq_count); | 3049 | adapter->max_event_queues = le16_to_cpu(desc->eq_count); |
| 3050 | adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); | 3050 | adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); |
| 3051 | |||
| 3052 | /* Clear flags that driver is not interested in */ | ||
| 3053 | adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT; | ||
| 3051 | } | 3054 | } |
| 3052 | err: | 3055 | err: |
| 3053 | mutex_unlock(&adapter->mbox_lock); | 3056 | mutex_unlock(&adapter->mbox_lock); |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 5228d88c5a02..1b3b9e886412 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
| @@ -563,6 +563,12 @@ enum be_if_flags { | |||
| 563 | BE_IF_FLAGS_MULTICAST = 0x1000 | 563 | BE_IF_FLAGS_MULTICAST = 0x1000 |
| 564 | }; | 564 | }; |
| 565 | 565 | ||
| 566 | #define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\ | ||
| 567 | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\ | ||
| 568 | BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\ | ||
| 569 | BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ | ||
| 570 | BE_IF_FLAGS_UNTAGGED) | ||
| 571 | |||
| 566 | /* An RX interface is an object with one or more MAC addresses and | 572 | /* An RX interface is an object with one or more MAC addresses and |
| 567 | * filtering capabilities. */ | 573 | * filtering capabilities. */ |
| 568 | struct be_cmd_req_if_create { | 574 | struct be_cmd_req_if_create { |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 181edb522450..3d91a5ec61a4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -2563,8 +2563,8 @@ static int be_close(struct net_device *netdev) | |||
| 2563 | /* Wait for all pending tx completions to arrive so that | 2563 | /* Wait for all pending tx completions to arrive so that |
| 2564 | * all tx skbs are freed. | 2564 | * all tx skbs are freed. |
| 2565 | */ | 2565 | */ |
| 2566 | be_tx_compl_clean(adapter); | ||
| 2567 | netif_tx_disable(netdev); | 2566 | netif_tx_disable(netdev); |
| 2567 | be_tx_compl_clean(adapter); | ||
| 2568 | 2568 | ||
| 2569 | be_rx_qs_destroy(adapter); | 2569 | be_rx_qs_destroy(adapter); |
| 2570 | 2570 | ||
| @@ -4373,6 +4373,10 @@ static int be_resume(struct pci_dev *pdev) | |||
| 4373 | pci_set_power_state(pdev, PCI_D0); | 4373 | pci_set_power_state(pdev, PCI_D0); |
| 4374 | pci_restore_state(pdev); | 4374 | pci_restore_state(pdev); |
| 4375 | 4375 | ||
| 4376 | status = be_fw_wait_ready(adapter); | ||
| 4377 | if (status) | ||
| 4378 | return status; | ||
| 4379 | |||
| 4376 | /* tell fw we're ready to fire cmds */ | 4380 | /* tell fw we're ready to fire cmds */ |
| 4377 | status = be_cmd_fw_init(adapter); | 4381 | status = be_cmd_fw_init(adapter); |
| 4378 | if (status) | 4382 | if (status) |
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 2b0a0ea4f8e7..ae236009f1a8 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
| @@ -259,6 +259,7 @@ struct bufdesc_ex { | |||
| 259 | struct fec_enet_delayed_work { | 259 | struct fec_enet_delayed_work { |
| 260 | struct delayed_work delay_work; | 260 | struct delayed_work delay_work; |
| 261 | bool timeout; | 261 | bool timeout; |
| 262 | bool trig_tx; | ||
| 262 | }; | 263 | }; |
| 263 | 264 | ||
| 264 | /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and | 265 | /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d3ad5ea711d3..c610a2716be4 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -93,6 +93,20 @@ static void set_multicast_list(struct net_device *ndev); | |||
| 93 | #define FEC_QUIRK_HAS_CSUM (1 << 5) | 93 | #define FEC_QUIRK_HAS_CSUM (1 << 5) |
| 94 | /* Controller has hardware vlan support */ | 94 | /* Controller has hardware vlan support */ |
| 95 | #define FEC_QUIRK_HAS_VLAN (1 << 6) | 95 | #define FEC_QUIRK_HAS_VLAN (1 << 6) |
| 96 | /* ENET IP errata ERR006358 | ||
| 97 | * | ||
| 98 | * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously | ||
| 99 | * detected as not set during a prior frame transmission, then the | ||
| 100 | * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs | ||
| 101 | * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in | ||
| 102 | * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously | ||
| 103 | * detected as not set during a prior frame transmission, then the | ||
| 104 | * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs | ||
| 105 | * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in | ||
| 106 | * frames not being transmitted until there is a 0-to-1 transition on | ||
| 107 | * ENET_TDAR[TDAR]. | ||
| 108 | */ | ||
| 109 | #define FEC_QUIRK_ERR006358 (1 << 7) | ||
| 96 | 110 | ||
| 97 | static struct platform_device_id fec_devtype[] = { | 111 | static struct platform_device_id fec_devtype[] = { |
| 98 | { | 112 | { |
| @@ -112,7 +126,7 @@ static struct platform_device_id fec_devtype[] = { | |||
| 112 | .name = "imx6q-fec", | 126 | .name = "imx6q-fec", |
| 113 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | | 127 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
| 114 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | | 128 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
| 115 | FEC_QUIRK_HAS_VLAN, | 129 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358, |
| 116 | }, { | 130 | }, { |
| 117 | .name = "mvf600-fec", | 131 | .name = "mvf600-fec", |
| 118 | .driver_data = FEC_QUIRK_ENET_MAC, | 132 | .driver_data = FEC_QUIRK_ENET_MAC, |
| @@ -275,16 +289,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 275 | struct fec_enet_private *fep = netdev_priv(ndev); | 289 | struct fec_enet_private *fep = netdev_priv(ndev); |
| 276 | const struct platform_device_id *id_entry = | 290 | const struct platform_device_id *id_entry = |
| 277 | platform_get_device_id(fep->pdev); | 291 | platform_get_device_id(fep->pdev); |
| 278 | struct bufdesc *bdp; | 292 | struct bufdesc *bdp, *bdp_pre; |
| 279 | void *bufaddr; | 293 | void *bufaddr; |
| 280 | unsigned short status; | 294 | unsigned short status; |
| 281 | unsigned int index; | 295 | unsigned int index; |
| 282 | 296 | ||
| 283 | if (!fep->link) { | ||
| 284 | /* Link is down or auto-negotiation is in progress. */ | ||
| 285 | return NETDEV_TX_BUSY; | ||
| 286 | } | ||
| 287 | |||
| 288 | /* Fill in a Tx ring entry */ | 297 | /* Fill in a Tx ring entry */ |
| 289 | bdp = fep->cur_tx; | 298 | bdp = fep->cur_tx; |
| 290 | 299 | ||
| @@ -370,6 +379,15 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 370 | ebdp->cbd_esc |= BD_ENET_TX_PINS; | 379 | ebdp->cbd_esc |= BD_ENET_TX_PINS; |
| 371 | } | 380 | } |
| 372 | } | 381 | } |
| 382 | |||
| 383 | bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
| 384 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && | ||
| 385 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { | ||
| 386 | fep->delay_work.trig_tx = true; | ||
| 387 | schedule_delayed_work(&(fep->delay_work.delay_work), | ||
| 388 | msecs_to_jiffies(1)); | ||
| 389 | } | ||
| 390 | |||
| 373 | /* If this was the last BD in the ring, start at the beginning again. */ | 391 | /* If this was the last BD in the ring, start at the beginning again. */ |
| 374 | if (status & BD_ENET_TX_WRAP) | 392 | if (status & BD_ENET_TX_WRAP) |
| 375 | bdp = fep->tx_bd_base; | 393 | bdp = fep->tx_bd_base; |
| @@ -689,6 +707,11 @@ static void fec_enet_work(struct work_struct *work) | |||
| 689 | fec_restart(fep->netdev, fep->full_duplex); | 707 | fec_restart(fep->netdev, fep->full_duplex); |
| 690 | netif_wake_queue(fep->netdev); | 708 | netif_wake_queue(fep->netdev); |
| 691 | } | 709 | } |
| 710 | |||
| 711 | if (fep->delay_work.trig_tx) { | ||
| 712 | fep->delay_work.trig_tx = false; | ||
| 713 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | ||
| 714 | } | ||
| 692 | } | 715 | } |
| 693 | 716 | ||
| 694 | static void | 717 | static void |
| @@ -948,8 +971,7 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
| 948 | htons(ETH_P_8021Q), | 971 | htons(ETH_P_8021Q), |
| 949 | vlan_tag); | 972 | vlan_tag); |
| 950 | 973 | ||
| 951 | if (!skb_defer_rx_timestamp(skb)) | 974 | napi_gro_receive(&fep->napi, skb); |
| 952 | napi_gro_receive(&fep->napi, skb); | ||
| 953 | } | 975 | } |
| 954 | 976 | ||
| 955 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, | 977 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, |
| @@ -2279,4 +2301,5 @@ static struct platform_driver fec_driver = { | |||
| 2279 | 2301 | ||
| 2280 | module_platform_driver(fec_driver); | 2302 | module_platform_driver(fec_driver); |
| 2281 | 2303 | ||
| 2304 | MODULE_ALIAS("platform:"DRIVER_NAME); | ||
| 2282 | MODULE_LICENSE("GPL"); | 2305 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 6a0c1b66ce54..c1d72c03cb59 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
| @@ -3739,9 +3739,8 @@ static void igb_set_rx_mode(struct net_device *netdev) | |||
| 3739 | rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); | 3739 | rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); |
| 3740 | 3740 | ||
| 3741 | if (netdev->flags & IFF_PROMISC) { | 3741 | if (netdev->flags & IFF_PROMISC) { |
| 3742 | u32 mrqc = rd32(E1000_MRQC); | ||
| 3743 | /* retain VLAN HW filtering if in VT mode */ | 3742 | /* retain VLAN HW filtering if in VT mode */ |
| 3744 | if (mrqc & E1000_MRQC_ENABLE_VMDQ) | 3743 | if (adapter->vfs_allocated_count) |
| 3745 | rctl |= E1000_RCTL_VFE; | 3744 | rctl |= E1000_RCTL_VFE; |
| 3746 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | 3745 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); |
| 3747 | vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); | 3746 | vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 7be725cdfea8..a6494e5daffe 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h | |||
| @@ -54,7 +54,7 @@ | |||
| 54 | 54 | ||
| 55 | #include <net/busy_poll.h> | 55 | #include <net/busy_poll.h> |
| 56 | 56 | ||
| 57 | #ifdef CONFIG_NET_LL_RX_POLL | 57 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 58 | #define LL_EXTENDED_STATS | 58 | #define LL_EXTENDED_STATS |
| 59 | #endif | 59 | #endif |
| 60 | /* common prefix used by pr_<> macros */ | 60 | /* common prefix used by pr_<> macros */ |
| @@ -366,7 +366,7 @@ struct ixgbe_q_vector { | |||
| 366 | struct rcu_head rcu; /* to avoid race with update stats on free */ | 366 | struct rcu_head rcu; /* to avoid race with update stats on free */ |
| 367 | char name[IFNAMSIZ + 9]; | 367 | char name[IFNAMSIZ + 9]; |
| 368 | 368 | ||
| 369 | #ifdef CONFIG_NET_LL_RX_POLL | 369 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 370 | unsigned int state; | 370 | unsigned int state; |
| 371 | #define IXGBE_QV_STATE_IDLE 0 | 371 | #define IXGBE_QV_STATE_IDLE 0 |
| 372 | #define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ | 372 | #define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ |
| @@ -377,12 +377,12 @@ struct ixgbe_q_vector { | |||
| 377 | #define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) | 377 | #define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) |
| 378 | #define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) | 378 | #define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) |
| 379 | spinlock_t lock; | 379 | spinlock_t lock; |
| 380 | #endif /* CONFIG_NET_LL_RX_POLL */ | 380 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 381 | 381 | ||
| 382 | /* for dynamic allocation of rings associated with this q_vector */ | 382 | /* for dynamic allocation of rings associated with this q_vector */ |
| 383 | struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; | 383 | struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; |
| 384 | }; | 384 | }; |
| 385 | #ifdef CONFIG_NET_LL_RX_POLL | 385 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 386 | static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) | 386 | static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) |
| 387 | { | 387 | { |
| 388 | 388 | ||
| @@ -462,7 +462,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) | |||
| 462 | WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED)); | 462 | WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED)); |
| 463 | return q_vector->state & IXGBE_QV_USER_PEND; | 463 | return q_vector->state & IXGBE_QV_USER_PEND; |
| 464 | } | 464 | } |
| 465 | #else /* CONFIG_NET_LL_RX_POLL */ | 465 | #else /* CONFIG_NET_RX_BUSY_POLL */ |
| 466 | static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) | 466 | static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) |
| 467 | { | 467 | { |
| 468 | } | 468 | } |
| @@ -491,7 +491,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) | |||
| 491 | { | 491 | { |
| 492 | return false; | 492 | return false; |
| 493 | } | 493 | } |
| 494 | #endif /* CONFIG_NET_LL_RX_POLL */ | 494 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 495 | 495 | ||
| 496 | #ifdef CONFIG_IXGBE_HWMON | 496 | #ifdef CONFIG_IXGBE_HWMON |
| 497 | 497 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index ac780770863d..7a77f37a7cbc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c | |||
| @@ -108,9 +108,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, | |||
| 108 | 108 | ||
| 109 | /* Enable arbiter */ | 109 | /* Enable arbiter */ |
| 110 | reg &= ~IXGBE_DPMCS_ARBDIS; | 110 | reg &= ~IXGBE_DPMCS_ARBDIS; |
| 111 | /* Enable DFP and Recycle mode */ | ||
| 112 | reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM); | ||
| 113 | reg |= IXGBE_DPMCS_TSOEF; | 111 | reg |= IXGBE_DPMCS_TSOEF; |
| 112 | |||
| 114 | /* Configure Max TSO packet size 34KB including payload and headers */ | 113 | /* Configure Max TSO packet size 34KB including payload and headers */ |
| 115 | reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); | 114 | reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); |
| 116 | 115 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index bad8f14b1941..be4b1fb3d0d2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -1998,7 +1998,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
| 1998 | return total_rx_packets; | 1998 | return total_rx_packets; |
| 1999 | } | 1999 | } |
| 2000 | 2000 | ||
| 2001 | #ifdef CONFIG_NET_LL_RX_POLL | 2001 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 2002 | /* must be called with local_bh_disable()d */ | 2002 | /* must be called with local_bh_disable()d */ |
| 2003 | static int ixgbe_low_latency_recv(struct napi_struct *napi) | 2003 | static int ixgbe_low_latency_recv(struct napi_struct *napi) |
| 2004 | { | 2004 | { |
| @@ -2030,7 +2030,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi) | |||
| 2030 | 2030 | ||
| 2031 | return found; | 2031 | return found; |
| 2032 | } | 2032 | } |
| 2033 | #endif /* CONFIG_NET_LL_RX_POLL */ | 2033 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 2034 | 2034 | ||
| 2035 | /** | 2035 | /** |
| 2036 | * ixgbe_configure_msix - Configure MSI-X hardware | 2036 | * ixgbe_configure_msix - Configure MSI-X hardware |
| @@ -7227,7 +7227,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
| 7227 | #ifdef CONFIG_NET_POLL_CONTROLLER | 7227 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 7228 | .ndo_poll_controller = ixgbe_netpoll, | 7228 | .ndo_poll_controller = ixgbe_netpoll, |
| 7229 | #endif | 7229 | #endif |
| 7230 | #ifdef CONFIG_NET_LL_RX_POLL | 7230 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 7231 | .ndo_busy_poll = ixgbe_low_latency_recv, | 7231 | .ndo_busy_poll = ixgbe_low_latency_recv, |
| 7232 | #endif | 7232 | #endif |
| 7233 | #ifdef IXGBE_FCOE | 7233 | #ifdef IXGBE_FCOE |
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 7fbe6abf6054..23de82a9da82 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c | |||
| @@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev, | |||
| 3069 | jwrite32(jme, JME_APMC, apmc); | 3069 | jwrite32(jme, JME_APMC, apmc); |
| 3070 | } | 3070 | } |
| 3071 | 3071 | ||
| 3072 | NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) | 3072 | NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT) |
| 3073 | 3073 | ||
| 3074 | spin_lock_init(&jme->phy_lock); | 3074 | spin_lock_init(&jme->phy_lock); |
| 3075 | spin_lock_init(&jme->macaddr_lock); | 3075 | spin_lock_init(&jme->macaddr_lock); |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 712779fb12b7..b017818bccae 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
| @@ -88,6 +88,8 @@ | |||
| 88 | #define MVNETA_TX_IN_PRGRS BIT(1) | 88 | #define MVNETA_TX_IN_PRGRS BIT(1) |
| 89 | #define MVNETA_TX_FIFO_EMPTY BIT(8) | 89 | #define MVNETA_TX_FIFO_EMPTY BIT(8) |
| 90 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c | 90 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c |
| 91 | #define MVNETA_SGMII_SERDES_CFG 0x24A0 | ||
| 92 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 | ||
| 91 | #define MVNETA_TYPE_PRIO 0x24bc | 93 | #define MVNETA_TYPE_PRIO 0x24bc |
| 92 | #define MVNETA_FORCE_UNI BIT(21) | 94 | #define MVNETA_FORCE_UNI BIT(21) |
| 93 | #define MVNETA_TXQ_CMD_1 0x24e4 | 95 | #define MVNETA_TXQ_CMD_1 0x24e4 |
| @@ -655,6 +657,8 @@ static void mvneta_port_sgmii_config(struct mvneta_port *pp) | |||
| 655 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | 657 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); |
| 656 | val |= MVNETA_GMAC2_PSC_ENABLE; | 658 | val |= MVNETA_GMAC2_PSC_ENABLE; |
| 657 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); | 659 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); |
| 660 | |||
| 661 | mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); | ||
| 658 | } | 662 | } |
| 659 | 663 | ||
| 660 | /* Start the Ethernet port RX and TX activity */ | 664 | /* Start the Ethernet port RX and TX activity */ |
| @@ -2728,28 +2732,24 @@ static int mvneta_probe(struct platform_device *pdev) | |||
| 2728 | 2732 | ||
| 2729 | pp = netdev_priv(dev); | 2733 | pp = netdev_priv(dev); |
| 2730 | 2734 | ||
| 2731 | pp->tx_done_timer.function = mvneta_tx_done_timer_callback; | ||
| 2732 | init_timer(&pp->tx_done_timer); | ||
| 2733 | clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags); | ||
| 2734 | |||
| 2735 | pp->weight = MVNETA_RX_POLL_WEIGHT; | 2735 | pp->weight = MVNETA_RX_POLL_WEIGHT; |
| 2736 | pp->phy_node = phy_node; | 2736 | pp->phy_node = phy_node; |
| 2737 | pp->phy_interface = phy_mode; | 2737 | pp->phy_interface = phy_mode; |
| 2738 | 2738 | ||
| 2739 | pp->base = of_iomap(dn, 0); | ||
| 2740 | if (pp->base == NULL) { | ||
| 2741 | err = -ENOMEM; | ||
| 2742 | goto err_free_irq; | ||
| 2743 | } | ||
| 2744 | |||
| 2745 | pp->clk = devm_clk_get(&pdev->dev, NULL); | 2739 | pp->clk = devm_clk_get(&pdev->dev, NULL); |
| 2746 | if (IS_ERR(pp->clk)) { | 2740 | if (IS_ERR(pp->clk)) { |
| 2747 | err = PTR_ERR(pp->clk); | 2741 | err = PTR_ERR(pp->clk); |
| 2748 | goto err_unmap; | 2742 | goto err_free_irq; |
| 2749 | } | 2743 | } |
| 2750 | 2744 | ||
| 2751 | clk_prepare_enable(pp->clk); | 2745 | clk_prepare_enable(pp->clk); |
| 2752 | 2746 | ||
| 2747 | pp->base = of_iomap(dn, 0); | ||
| 2748 | if (pp->base == NULL) { | ||
| 2749 | err = -ENOMEM; | ||
| 2750 | goto err_clk; | ||
| 2751 | } | ||
| 2752 | |||
| 2753 | dt_mac_addr = of_get_mac_address(dn); | 2753 | dt_mac_addr = of_get_mac_address(dn); |
| 2754 | if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { | 2754 | if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { |
| 2755 | mac_from = "device tree"; | 2755 | mac_from = "device tree"; |
| @@ -2766,6 +2766,9 @@ static int mvneta_probe(struct platform_device *pdev) | |||
| 2766 | } | 2766 | } |
| 2767 | 2767 | ||
| 2768 | pp->tx_done_timer.data = (unsigned long)dev; | 2768 | pp->tx_done_timer.data = (unsigned long)dev; |
| 2769 | pp->tx_done_timer.function = mvneta_tx_done_timer_callback; | ||
| 2770 | init_timer(&pp->tx_done_timer); | ||
| 2771 | clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags); | ||
| 2769 | 2772 | ||
| 2770 | pp->tx_ring_size = MVNETA_MAX_TXD; | 2773 | pp->tx_ring_size = MVNETA_MAX_TXD; |
| 2771 | pp->rx_ring_size = MVNETA_MAX_RXD; | 2774 | pp->rx_ring_size = MVNETA_MAX_RXD; |
| @@ -2776,7 +2779,7 @@ static int mvneta_probe(struct platform_device *pdev) | |||
| 2776 | err = mvneta_init(pp, phy_addr); | 2779 | err = mvneta_init(pp, phy_addr); |
| 2777 | if (err < 0) { | 2780 | if (err < 0) { |
| 2778 | dev_err(&pdev->dev, "can't init eth hal\n"); | 2781 | dev_err(&pdev->dev, "can't init eth hal\n"); |
| 2779 | goto err_clk; | 2782 | goto err_unmap; |
| 2780 | } | 2783 | } |
| 2781 | mvneta_port_power_up(pp, phy_mode); | 2784 | mvneta_port_power_up(pp, phy_mode); |
| 2782 | 2785 | ||
| @@ -2806,10 +2809,10 @@ static int mvneta_probe(struct platform_device *pdev) | |||
| 2806 | 2809 | ||
| 2807 | err_deinit: | 2810 | err_deinit: |
| 2808 | mvneta_deinit(pp); | 2811 | mvneta_deinit(pp); |
| 2809 | err_clk: | ||
| 2810 | clk_disable_unprepare(pp->clk); | ||
| 2811 | err_unmap: | 2812 | err_unmap: |
| 2812 | iounmap(pp->base); | 2813 | iounmap(pp->base); |
| 2814 | err_clk: | ||
| 2815 | clk_disable_unprepare(pp->clk); | ||
| 2813 | err_free_irq: | 2816 | err_free_irq: |
| 2814 | irq_dispose_mapping(dev->irq); | 2817 | irq_dispose_mapping(dev->irq); |
| 2815 | err_free_netdev: | 2818 | err_free_netdev: |
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index c896079728e1..ef94a591f9e5 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
| @@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) | |||
| 931 | } | 931 | } |
| 932 | 932 | ||
| 933 | /* Allocate and setup a new buffer for receiving */ | 933 | /* Allocate and setup a new buffer for receiving */ |
| 934 | static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | 934 | static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, |
| 935 | struct sk_buff *skb, unsigned int bufsize) | 935 | struct sk_buff *skb, unsigned int bufsize) |
| 936 | { | 936 | { |
| 937 | struct skge_rx_desc *rd = e->desc; | 937 | struct skge_rx_desc *rd = e->desc; |
| 938 | u64 map; | 938 | dma_addr_t map; |
| 939 | 939 | ||
| 940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, | 940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, |
| 941 | PCI_DMA_FROMDEVICE); | 941 | PCI_DMA_FROMDEVICE); |
| 942 | 942 | ||
| 943 | rd->dma_lo = map; | 943 | if (pci_dma_mapping_error(skge->hw->pdev, map)) |
| 944 | rd->dma_hi = map >> 32; | 944 | return -1; |
| 945 | |||
| 946 | rd->dma_lo = lower_32_bits(map); | ||
| 947 | rd->dma_hi = upper_32_bits(map); | ||
| 945 | e->skb = skb; | 948 | e->skb = skb; |
| 946 | rd->csum1_start = ETH_HLEN; | 949 | rd->csum1_start = ETH_HLEN; |
| 947 | rd->csum2_start = ETH_HLEN; | 950 | rd->csum2_start = ETH_HLEN; |
| @@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | |||
| 953 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; | 956 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; |
| 954 | dma_unmap_addr_set(e, mapaddr, map); | 957 | dma_unmap_addr_set(e, mapaddr, map); |
| 955 | dma_unmap_len_set(e, maplen, bufsize); | 958 | dma_unmap_len_set(e, maplen, bufsize); |
| 959 | return 0; | ||
| 956 | } | 960 | } |
| 957 | 961 | ||
| 958 | /* Resume receiving using existing skb, | 962 | /* Resume receiving using existing skb, |
| @@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev) | |||
| 1014 | return -ENOMEM; | 1018 | return -ENOMEM; |
| 1015 | 1019 | ||
| 1016 | skb_reserve(skb, NET_IP_ALIGN); | 1020 | skb_reserve(skb, NET_IP_ALIGN); |
| 1017 | skge_rx_setup(skge, e, skb, skge->rx_buf_size); | 1021 | if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { |
| 1022 | dev_kfree_skb(skb); | ||
| 1023 | return -EIO; | ||
| 1024 | } | ||
| 1018 | } while ((e = e->next) != ring->start); | 1025 | } while ((e = e->next) != ring->start); |
| 1019 | 1026 | ||
| 1020 | ring->to_clean = ring->start; | 1027 | ring->to_clean = ring->start; |
| @@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev) | |||
| 2544 | 2551 | ||
| 2545 | BUG_ON(skge->dma & 7); | 2552 | BUG_ON(skge->dma & 7); |
| 2546 | 2553 | ||
| 2547 | if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { | 2554 | if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { |
| 2548 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); | 2555 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); |
| 2549 | err = -EINVAL; | 2556 | err = -EINVAL; |
| 2550 | goto free_pci_mem; | 2557 | goto free_pci_mem; |
| @@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
| 2729 | struct skge_tx_desc *td; | 2736 | struct skge_tx_desc *td; |
| 2730 | int i; | 2737 | int i; |
| 2731 | u32 control, len; | 2738 | u32 control, len; |
| 2732 | u64 map; | 2739 | dma_addr_t map; |
| 2733 | 2740 | ||
| 2734 | if (skb_padto(skb, ETH_ZLEN)) | 2741 | if (skb_padto(skb, ETH_ZLEN)) |
| 2735 | return NETDEV_TX_OK; | 2742 | return NETDEV_TX_OK; |
| @@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
| 2743 | e->skb = skb; | 2750 | e->skb = skb; |
| 2744 | len = skb_headlen(skb); | 2751 | len = skb_headlen(skb); |
| 2745 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2752 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
| 2753 | if (pci_dma_mapping_error(hw->pdev, map)) | ||
| 2754 | goto mapping_error; | ||
| 2755 | |||
| 2746 | dma_unmap_addr_set(e, mapaddr, map); | 2756 | dma_unmap_addr_set(e, mapaddr, map); |
| 2747 | dma_unmap_len_set(e, maplen, len); | 2757 | dma_unmap_len_set(e, maplen, len); |
| 2748 | 2758 | ||
| 2749 | td->dma_lo = map; | 2759 | td->dma_lo = lower_32_bits(map); |
| 2750 | td->dma_hi = map >> 32; | 2760 | td->dma_hi = upper_32_bits(map); |
| 2751 | 2761 | ||
| 2752 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2762 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 2753 | const int offset = skb_checksum_start_offset(skb); | 2763 | const int offset = skb_checksum_start_offset(skb); |
| @@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
| 2778 | 2788 | ||
| 2779 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, | 2789 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, |
| 2780 | skb_frag_size(frag), DMA_TO_DEVICE); | 2790 | skb_frag_size(frag), DMA_TO_DEVICE); |
| 2791 | if (dma_mapping_error(&hw->pdev->dev, map)) | ||
| 2792 | goto mapping_unwind; | ||
| 2781 | 2793 | ||
| 2782 | e = e->next; | 2794 | e = e->next; |
| 2783 | e->skb = skb; | 2795 | e->skb = skb; |
| 2784 | tf = e->desc; | 2796 | tf = e->desc; |
| 2785 | BUG_ON(tf->control & BMU_OWN); | 2797 | BUG_ON(tf->control & BMU_OWN); |
| 2786 | 2798 | ||
| 2787 | tf->dma_lo = map; | 2799 | tf->dma_lo = lower_32_bits(map); |
| 2788 | tf->dma_hi = (u64) map >> 32; | 2800 | tf->dma_hi = upper_32_bits(map); |
| 2789 | dma_unmap_addr_set(e, mapaddr, map); | 2801 | dma_unmap_addr_set(e, mapaddr, map); |
| 2790 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); | 2802 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); |
| 2791 | 2803 | ||
| @@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
| 2815 | } | 2827 | } |
| 2816 | 2828 | ||
| 2817 | return NETDEV_TX_OK; | 2829 | return NETDEV_TX_OK; |
| 2830 | |||
| 2831 | mapping_unwind: | ||
| 2832 | e = skge->tx_ring.to_use; | ||
| 2833 | pci_unmap_single(hw->pdev, | ||
| 2834 | dma_unmap_addr(e, mapaddr), | ||
| 2835 | dma_unmap_len(e, maplen), | ||
| 2836 | PCI_DMA_TODEVICE); | ||
| 2837 | while (i-- > 0) { | ||
| 2838 | e = e->next; | ||
| 2839 | pci_unmap_page(hw->pdev, | ||
| 2840 | dma_unmap_addr(e, mapaddr), | ||
| 2841 | dma_unmap_len(e, maplen), | ||
| 2842 | PCI_DMA_TODEVICE); | ||
| 2843 | } | ||
| 2844 | |||
| 2845 | mapping_error: | ||
| 2846 | if (net_ratelimit()) | ||
| 2847 | dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); | ||
| 2848 | dev_kfree_skb(skb); | ||
| 2849 | return NETDEV_TX_OK; | ||
| 2818 | } | 2850 | } |
| 2819 | 2851 | ||
| 2820 | 2852 | ||
| @@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
| 3045 | 3077 | ||
| 3046 | pci_dma_sync_single_for_cpu(skge->hw->pdev, | 3078 | pci_dma_sync_single_for_cpu(skge->hw->pdev, |
| 3047 | dma_unmap_addr(e, mapaddr), | 3079 | dma_unmap_addr(e, mapaddr), |
| 3048 | len, PCI_DMA_FROMDEVICE); | 3080 | dma_unmap_len(e, maplen), |
| 3081 | PCI_DMA_FROMDEVICE); | ||
| 3049 | skb_copy_from_linear_data(e->skb, skb->data, len); | 3082 | skb_copy_from_linear_data(e->skb, skb->data, len); |
| 3050 | pci_dma_sync_single_for_device(skge->hw->pdev, | 3083 | pci_dma_sync_single_for_device(skge->hw->pdev, |
| 3051 | dma_unmap_addr(e, mapaddr), | 3084 | dma_unmap_addr(e, mapaddr), |
| 3052 | len, PCI_DMA_FROMDEVICE); | 3085 | dma_unmap_len(e, maplen), |
| 3086 | PCI_DMA_FROMDEVICE); | ||
| 3053 | skge_rx_reuse(e, skge->rx_buf_size); | 3087 | skge_rx_reuse(e, skge->rx_buf_size); |
| 3054 | } else { | 3088 | } else { |
| 3055 | struct sk_buff *nskb; | 3089 | struct sk_buff *nskb; |
| @@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
| 3058 | if (!nskb) | 3092 | if (!nskb) |
| 3059 | goto resubmit; | 3093 | goto resubmit; |
| 3060 | 3094 | ||
| 3095 | if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { | ||
| 3096 | dev_kfree_skb(nskb); | ||
| 3097 | goto resubmit; | ||
| 3098 | } | ||
| 3099 | |||
| 3061 | pci_unmap_single(skge->hw->pdev, | 3100 | pci_unmap_single(skge->hw->pdev, |
| 3062 | dma_unmap_addr(e, mapaddr), | 3101 | dma_unmap_addr(e, mapaddr), |
| 3063 | dma_unmap_len(e, maplen), | 3102 | dma_unmap_len(e, maplen), |
| 3064 | PCI_DMA_FROMDEVICE); | 3103 | PCI_DMA_FROMDEVICE); |
| 3065 | skb = e->skb; | 3104 | skb = e->skb; |
| 3066 | prefetch(skb->data); | 3105 | prefetch(skb->data); |
| 3067 | skge_rx_setup(skge, e, nskb, skge->rx_buf_size); | ||
| 3068 | } | 3106 | } |
| 3069 | 3107 | ||
| 3070 | skb_put(skb, len); | 3108 | skb_put(skb, len); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 727874f575ce..a28cd801a236 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
| @@ -223,7 +223,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset) | |||
| 223 | case ETH_SS_STATS: | 223 | case ETH_SS_STATS: |
| 224 | return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) + | 224 | return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) + |
| 225 | (priv->tx_ring_num * 2) + | 225 | (priv->tx_ring_num * 2) + |
| 226 | #ifdef CONFIG_NET_LL_RX_POLL | 226 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 227 | (priv->rx_ring_num * 5); | 227 | (priv->rx_ring_num * 5); |
| 228 | #else | 228 | #else |
| 229 | (priv->rx_ring_num * 2); | 229 | (priv->rx_ring_num * 2); |
| @@ -276,7 +276,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, | |||
| 276 | for (i = 0; i < priv->rx_ring_num; i++) { | 276 | for (i = 0; i < priv->rx_ring_num; i++) { |
| 277 | data[index++] = priv->rx_ring[i].packets; | 277 | data[index++] = priv->rx_ring[i].packets; |
| 278 | data[index++] = priv->rx_ring[i].bytes; | 278 | data[index++] = priv->rx_ring[i].bytes; |
| 279 | #ifdef CONFIG_NET_LL_RX_POLL | 279 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 280 | data[index++] = priv->rx_ring[i].yields; | 280 | data[index++] = priv->rx_ring[i].yields; |
| 281 | data[index++] = priv->rx_ring[i].misses; | 281 | data[index++] = priv->rx_ring[i].misses; |
| 282 | data[index++] = priv->rx_ring[i].cleaned; | 282 | data[index++] = priv->rx_ring[i].cleaned; |
| @@ -344,7 +344,7 @@ static void mlx4_en_get_strings(struct net_device *dev, | |||
| 344 | "rx%d_packets", i); | 344 | "rx%d_packets", i); |
| 345 | sprintf(data + (index++) * ETH_GSTRING_LEN, | 345 | sprintf(data + (index++) * ETH_GSTRING_LEN, |
| 346 | "rx%d_bytes", i); | 346 | "rx%d_bytes", i); |
| 347 | #ifdef CONFIG_NET_LL_RX_POLL | 347 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 348 | sprintf(data + (index++) * ETH_GSTRING_LEN, | 348 | sprintf(data + (index++) * ETH_GSTRING_LEN, |
| 349 | "rx%d_napi_yield", i); | 349 | "rx%d_napi_yield", i); |
| 350 | sprintf(data + (index++) * ETH_GSTRING_LEN, | 350 | sprintf(data + (index++) * ETH_GSTRING_LEN, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 5eac871399d8..fa37b7a61213 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -68,7 +68,7 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) | |||
| 68 | return 0; | 68 | return 0; |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | #ifdef CONFIG_NET_LL_RX_POLL | 71 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 72 | /* must be called with local_bh_disable()d */ | 72 | /* must be called with local_bh_disable()d */ |
| 73 | static int mlx4_en_low_latency_recv(struct napi_struct *napi) | 73 | static int mlx4_en_low_latency_recv(struct napi_struct *napi) |
| 74 | { | 74 | { |
| @@ -94,7 +94,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi) | |||
| 94 | 94 | ||
| 95 | return done; | 95 | return done; |
| 96 | } | 96 | } |
| 97 | #endif /* CONFIG_NET_LL_RX_POLL */ | 97 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 98 | 98 | ||
| 99 | #ifdef CONFIG_RFS_ACCEL | 99 | #ifdef CONFIG_RFS_ACCEL |
| 100 | 100 | ||
| @@ -2140,7 +2140,7 @@ static const struct net_device_ops mlx4_netdev_ops = { | |||
| 2140 | #ifdef CONFIG_RFS_ACCEL | 2140 | #ifdef CONFIG_RFS_ACCEL |
| 2141 | .ndo_rx_flow_steer = mlx4_en_filter_rfs, | 2141 | .ndo_rx_flow_steer = mlx4_en_filter_rfs, |
| 2142 | #endif | 2142 | #endif |
| 2143 | #ifdef CONFIG_NET_LL_RX_POLL | 2143 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 2144 | .ndo_busy_poll = mlx4_en_low_latency_recv, | 2144 | .ndo_busy_poll = mlx4_en_low_latency_recv, |
| 2145 | #endif | 2145 | #endif |
| 2146 | }; | 2146 | }; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 8873d6802c80..6fc6dabc78d5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
| @@ -845,16 +845,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, | |||
| 845 | MLX4_CMD_NATIVE); | 845 | MLX4_CMD_NATIVE); |
| 846 | 846 | ||
| 847 | if (!err && dev->caps.function != slave) { | 847 | if (!err && dev->caps.function != slave) { |
| 848 | /* if config MAC in DB use it */ | 848 | def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; |
| 849 | if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac) | ||
| 850 | def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; | ||
| 851 | else { | ||
| 852 | /* set slave default_mac address */ | ||
| 853 | MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET); | ||
| 854 | def_mac += slave << 8; | ||
| 855 | priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac; | ||
| 856 | } | ||
| 857 | |||
| 858 | MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); | 849 | MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); |
| 859 | 850 | ||
| 860 | /* get port type - currently only eth is enabled */ | 851 | /* get port type - currently only eth is enabled */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index e85af922dcdc..36be3208786a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -371,7 +371,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 371 | 371 | ||
| 372 | dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; | 372 | dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; |
| 373 | 373 | ||
| 374 | if (!enable_64b_cqe_eqe) { | 374 | if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { |
| 375 | if (dev_cap->flags & | 375 | if (dev_cap->flags & |
| 376 | (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { | 376 | (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { |
| 377 | mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); | 377 | mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 35fb60e2320c..5e0aa569306a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
| @@ -292,7 +292,7 @@ struct mlx4_en_rx_ring { | |||
| 292 | void *rx_info; | 292 | void *rx_info; |
| 293 | unsigned long bytes; | 293 | unsigned long bytes; |
| 294 | unsigned long packets; | 294 | unsigned long packets; |
| 295 | #ifdef CONFIG_NET_LL_RX_POLL | 295 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 296 | unsigned long yields; | 296 | unsigned long yields; |
| 297 | unsigned long misses; | 297 | unsigned long misses; |
| 298 | unsigned long cleaned; | 298 | unsigned long cleaned; |
| @@ -318,7 +318,7 @@ struct mlx4_en_cq { | |||
| 318 | struct mlx4_cqe *buf; | 318 | struct mlx4_cqe *buf; |
| 319 | #define MLX4_EN_OPCODE_ERROR 0x1e | 319 | #define MLX4_EN_OPCODE_ERROR 0x1e |
| 320 | 320 | ||
| 321 | #ifdef CONFIG_NET_LL_RX_POLL | 321 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 322 | unsigned int state; | 322 | unsigned int state; |
| 323 | #define MLX4_EN_CQ_STATE_IDLE 0 | 323 | #define MLX4_EN_CQ_STATE_IDLE 0 |
| 324 | #define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */ | 324 | #define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */ |
| @@ -329,7 +329,7 @@ struct mlx4_en_cq { | |||
| 329 | #define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD) | 329 | #define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD) |
| 330 | #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) | 330 | #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) |
| 331 | spinlock_t poll_lock; /* protects from LLS/napi conflicts */ | 331 | spinlock_t poll_lock; /* protects from LLS/napi conflicts */ |
| 332 | #endif /* CONFIG_NET_LL_RX_POLL */ | 332 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 333 | }; | 333 | }; |
| 334 | 334 | ||
| 335 | struct mlx4_en_port_profile { | 335 | struct mlx4_en_port_profile { |
| @@ -580,7 +580,7 @@ struct mlx4_mac_entry { | |||
| 580 | struct rcu_head rcu; | 580 | struct rcu_head rcu; |
| 581 | }; | 581 | }; |
| 582 | 582 | ||
| 583 | #ifdef CONFIG_NET_LL_RX_POLL | 583 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 584 | static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) | 584 | static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) |
| 585 | { | 585 | { |
| 586 | spin_lock_init(&cq->poll_lock); | 586 | spin_lock_init(&cq->poll_lock); |
| @@ -687,7 +687,7 @@ static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq) | |||
| 687 | { | 687 | { |
| 688 | return false; | 688 | return false; |
| 689 | } | 689 | } |
| 690 | #endif /* CONFIG_NET_LL_RX_POLL */ | 690 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 691 | 691 | ||
| 692 | #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) | 692 | #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) |
| 693 | 693 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 205753a04cfc..5472cbd34028 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | #include "mlx5_core.h" | 46 | #include "mlx5_core.h" |
| 47 | 47 | ||
| 48 | enum { | 48 | enum { |
| 49 | CMD_IF_REV = 3, | 49 | CMD_IF_REV = 5, |
| 50 | }; | 50 | }; |
| 51 | 51 | ||
| 52 | enum { | 52 | enum { |
| @@ -282,6 +282,12 @@ const char *mlx5_command_str(int command) | |||
| 282 | case MLX5_CMD_OP_TEARDOWN_HCA: | 282 | case MLX5_CMD_OP_TEARDOWN_HCA: |
| 283 | return "TEARDOWN_HCA"; | 283 | return "TEARDOWN_HCA"; |
| 284 | 284 | ||
| 285 | case MLX5_CMD_OP_ENABLE_HCA: | ||
| 286 | return "MLX5_CMD_OP_ENABLE_HCA"; | ||
| 287 | |||
| 288 | case MLX5_CMD_OP_DISABLE_HCA: | ||
| 289 | return "MLX5_CMD_OP_DISABLE_HCA"; | ||
| 290 | |||
| 285 | case MLX5_CMD_OP_QUERY_PAGES: | 291 | case MLX5_CMD_OP_QUERY_PAGES: |
| 286 | return "QUERY_PAGES"; | 292 | return "QUERY_PAGES"; |
| 287 | 293 | ||
| @@ -1113,7 +1119,13 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) | |||
| 1113 | 1119 | ||
| 1114 | for (i = 0; i < (1 << cmd->log_sz); i++) { | 1120 | for (i = 0; i < (1 << cmd->log_sz); i++) { |
| 1115 | if (test_bit(i, &vector)) { | 1121 | if (test_bit(i, &vector)) { |
| 1122 | struct semaphore *sem; | ||
| 1123 | |||
| 1116 | ent = cmd->ent_arr[i]; | 1124 | ent = cmd->ent_arr[i]; |
| 1125 | if (ent->page_queue) | ||
| 1126 | sem = &cmd->pages_sem; | ||
| 1127 | else | ||
| 1128 | sem = &cmd->sem; | ||
| 1117 | ktime_get_ts(&ent->ts2); | 1129 | ktime_get_ts(&ent->ts2); |
| 1118 | memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); | 1130 | memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); |
| 1119 | dump_command(dev, ent, 0); | 1131 | dump_command(dev, ent, 0); |
| @@ -1136,10 +1148,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) | |||
| 1136 | } else { | 1148 | } else { |
| 1137 | complete(&ent->done); | 1149 | complete(&ent->done); |
| 1138 | } | 1150 | } |
| 1139 | if (ent->page_queue) | 1151 | up(sem); |
| 1140 | up(&cmd->pages_sem); | ||
| 1141 | else | ||
| 1142 | up(&cmd->sem); | ||
| 1143 | } | 1152 | } |
| 1144 | } | 1153 | } |
| 1145 | } | 1154 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c02cbcfd0fb8..443cc4d7b024 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
| @@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |||
| 268 | case MLX5_EVENT_TYPE_PAGE_REQUEST: | 268 | case MLX5_EVENT_TYPE_PAGE_REQUEST: |
| 269 | { | 269 | { |
| 270 | u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); | 270 | u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); |
| 271 | s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); | 271 | s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); |
| 272 | 272 | ||
| 273 | mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); | 273 | mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); |
| 274 | mlx5_core_req_pages_handler(dev, func_id, npages); | 274 | mlx5_core_req_pages_handler(dev, func_id, npages); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 72a5222447f5..f012658b6a92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c | |||
| @@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, | |||
| 113 | caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; | 113 | caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; |
| 114 | caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; | 114 | caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; |
| 115 | caps->log_max_mcg = out->hca_cap.log_max_mcg; | 115 | caps->log_max_mcg = out->hca_cap.log_max_mcg; |
| 116 | caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); | 116 | caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff; |
| 117 | caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); | 117 | caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); |
| 118 | caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); | 118 | caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); |
| 119 | caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; | 119 | caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 748f10a155c4..3e6670c4a7cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
| @@ -55,33 +55,9 @@ enum { | |||
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | static DEFINE_SPINLOCK(health_lock); | 57 | static DEFINE_SPINLOCK(health_lock); |
| 58 | |||
| 59 | static LIST_HEAD(health_list); | 58 | static LIST_HEAD(health_list); |
| 60 | static struct work_struct health_work; | 59 | static struct work_struct health_work; |
| 61 | 60 | ||
| 62 | static health_handler_t reg_handler; | ||
| 63 | int mlx5_register_health_report_handler(health_handler_t handler) | ||
| 64 | { | ||
| 65 | spin_lock_irq(&health_lock); | ||
| 66 | if (reg_handler) { | ||
| 67 | spin_unlock_irq(&health_lock); | ||
| 68 | return -EEXIST; | ||
| 69 | } | ||
| 70 | reg_handler = handler; | ||
| 71 | spin_unlock_irq(&health_lock); | ||
| 72 | |||
| 73 | return 0; | ||
| 74 | } | ||
| 75 | EXPORT_SYMBOL(mlx5_register_health_report_handler); | ||
| 76 | |||
| 77 | void mlx5_unregister_health_report_handler(void) | ||
| 78 | { | ||
| 79 | spin_lock_irq(&health_lock); | ||
| 80 | reg_handler = NULL; | ||
| 81 | spin_unlock_irq(&health_lock); | ||
| 82 | } | ||
| 83 | EXPORT_SYMBOL(mlx5_unregister_health_report_handler); | ||
| 84 | |||
| 85 | static void health_care(struct work_struct *work) | 61 | static void health_care(struct work_struct *work) |
| 86 | { | 62 | { |
| 87 | struct mlx5_core_health *health, *n; | 63 | struct mlx5_core_health *health, *n; |
| @@ -98,11 +74,8 @@ static void health_care(struct work_struct *work) | |||
| 98 | priv = container_of(health, struct mlx5_priv, health); | 74 | priv = container_of(health, struct mlx5_priv, health); |
| 99 | dev = container_of(priv, struct mlx5_core_dev, priv); | 75 | dev = container_of(priv, struct mlx5_core_dev, priv); |
| 100 | mlx5_core_warn(dev, "handling bad device here\n"); | 76 | mlx5_core_warn(dev, "handling bad device here\n"); |
| 77 | /* nothing yet */ | ||
| 101 | spin_lock_irq(&health_lock); | 78 | spin_lock_irq(&health_lock); |
| 102 | if (reg_handler) | ||
| 103 | reg_handler(dev->pdev, health->health, | ||
| 104 | sizeof(health->health)); | ||
| 105 | |||
| 106 | list_del_init(&health->list); | 79 | list_del_init(&health->list); |
| 107 | spin_unlock_irq(&health_lock); | 80 | spin_unlock_irq(&health_lock); |
| 108 | } | 81 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 12242de2b0e3..b47739b0b5f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
| @@ -249,6 +249,44 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev) | |||
| 249 | return err; | 249 | return err; |
| 250 | } | 250 | } |
| 251 | 251 | ||
| 252 | static int mlx5_core_enable_hca(struct mlx5_core_dev *dev) | ||
| 253 | { | ||
| 254 | int err; | ||
| 255 | struct mlx5_enable_hca_mbox_in in; | ||
| 256 | struct mlx5_enable_hca_mbox_out out; | ||
| 257 | |||
| 258 | memset(&in, 0, sizeof(in)); | ||
| 259 | memset(&out, 0, sizeof(out)); | ||
| 260 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA); | ||
| 261 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
| 262 | if (err) | ||
| 263 | return err; | ||
| 264 | |||
| 265 | if (out.hdr.status) | ||
| 266 | return mlx5_cmd_status_to_err(&out.hdr); | ||
| 267 | |||
| 268 | return 0; | ||
| 269 | } | ||
| 270 | |||
| 271 | static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) | ||
| 272 | { | ||
| 273 | int err; | ||
| 274 | struct mlx5_disable_hca_mbox_in in; | ||
| 275 | struct mlx5_disable_hca_mbox_out out; | ||
| 276 | |||
| 277 | memset(&in, 0, sizeof(in)); | ||
| 278 | memset(&out, 0, sizeof(out)); | ||
| 279 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA); | ||
| 280 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
| 281 | if (err) | ||
| 282 | return err; | ||
| 283 | |||
| 284 | if (out.hdr.status) | ||
| 285 | return mlx5_cmd_status_to_err(&out.hdr); | ||
| 286 | |||
| 287 | return 0; | ||
| 288 | } | ||
| 289 | |||
| 252 | int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) | 290 | int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) |
| 253 | { | 291 | { |
| 254 | struct mlx5_priv *priv = &dev->priv; | 292 | struct mlx5_priv *priv = &dev->priv; |
| @@ -304,28 +342,41 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) | |||
| 304 | } | 342 | } |
| 305 | 343 | ||
| 306 | mlx5_pagealloc_init(dev); | 344 | mlx5_pagealloc_init(dev); |
| 345 | |||
| 346 | err = mlx5_core_enable_hca(dev); | ||
| 347 | if (err) { | ||
| 348 | dev_err(&pdev->dev, "enable hca failed\n"); | ||
| 349 | goto err_pagealloc_cleanup; | ||
| 350 | } | ||
| 351 | |||
| 352 | err = mlx5_satisfy_startup_pages(dev, 1); | ||
| 353 | if (err) { | ||
| 354 | dev_err(&pdev->dev, "failed to allocate boot pages\n"); | ||
| 355 | goto err_disable_hca; | ||
| 356 | } | ||
| 357 | |||
| 307 | err = set_hca_ctrl(dev); | 358 | err = set_hca_ctrl(dev); |
| 308 | if (err) { | 359 | if (err) { |
| 309 | dev_err(&pdev->dev, "set_hca_ctrl failed\n"); | 360 | dev_err(&pdev->dev, "set_hca_ctrl failed\n"); |
| 310 | goto err_pagealloc_cleanup; | 361 | goto reclaim_boot_pages; |
| 311 | } | 362 | } |
| 312 | 363 | ||
| 313 | err = handle_hca_cap(dev); | 364 | err = handle_hca_cap(dev); |
| 314 | if (err) { | 365 | if (err) { |
| 315 | dev_err(&pdev->dev, "handle_hca_cap failed\n"); | 366 | dev_err(&pdev->dev, "handle_hca_cap failed\n"); |
| 316 | goto err_pagealloc_cleanup; | 367 | goto reclaim_boot_pages; |
| 317 | } | 368 | } |
| 318 | 369 | ||
| 319 | err = mlx5_satisfy_startup_pages(dev); | 370 | err = mlx5_satisfy_startup_pages(dev, 0); |
| 320 | if (err) { | 371 | if (err) { |
| 321 | dev_err(&pdev->dev, "failed to allocate startup pages\n"); | 372 | dev_err(&pdev->dev, "failed to allocate init pages\n"); |
| 322 | goto err_pagealloc_cleanup; | 373 | goto reclaim_boot_pages; |
| 323 | } | 374 | } |
| 324 | 375 | ||
| 325 | err = mlx5_pagealloc_start(dev); | 376 | err = mlx5_pagealloc_start(dev); |
| 326 | if (err) { | 377 | if (err) { |
| 327 | dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); | 378 | dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); |
| 328 | goto err_reclaim_pages; | 379 | goto reclaim_boot_pages; |
| 329 | } | 380 | } |
| 330 | 381 | ||
| 331 | err = mlx5_cmd_init_hca(dev); | 382 | err = mlx5_cmd_init_hca(dev); |
| @@ -396,9 +447,12 @@ err_stop_poll: | |||
| 396 | err_pagealloc_stop: | 447 | err_pagealloc_stop: |
| 397 | mlx5_pagealloc_stop(dev); | 448 | mlx5_pagealloc_stop(dev); |
| 398 | 449 | ||
| 399 | err_reclaim_pages: | 450 | reclaim_boot_pages: |
| 400 | mlx5_reclaim_startup_pages(dev); | 451 | mlx5_reclaim_startup_pages(dev); |
| 401 | 452 | ||
| 453 | err_disable_hca: | ||
| 454 | mlx5_core_disable_hca(dev); | ||
| 455 | |||
| 402 | err_pagealloc_cleanup: | 456 | err_pagealloc_cleanup: |
| 403 | mlx5_pagealloc_cleanup(dev); | 457 | mlx5_pagealloc_cleanup(dev); |
| 404 | mlx5_cmd_cleanup(dev); | 458 | mlx5_cmd_cleanup(dev); |
| @@ -434,6 +488,7 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev) | |||
| 434 | mlx5_cmd_teardown_hca(dev); | 488 | mlx5_cmd_teardown_hca(dev); |
| 435 | mlx5_pagealloc_stop(dev); | 489 | mlx5_pagealloc_stop(dev); |
| 436 | mlx5_reclaim_startup_pages(dev); | 490 | mlx5_reclaim_startup_pages(dev); |
| 491 | mlx5_core_disable_hca(dev); | ||
| 437 | mlx5_pagealloc_cleanup(dev); | 492 | mlx5_pagealloc_cleanup(dev); |
| 438 | mlx5_cmd_cleanup(dev); | 493 | mlx5_cmd_cleanup(dev); |
| 439 | iounmap(dev->iseg); | 494 | iounmap(dev->iseg); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index f0bf46339b28..3a2408d44820 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | |||
| @@ -43,10 +43,16 @@ enum { | |||
| 43 | MLX5_PAGES_TAKE = 2 | 43 | MLX5_PAGES_TAKE = 2 |
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | enum { | ||
| 47 | MLX5_BOOT_PAGES = 1, | ||
| 48 | MLX5_INIT_PAGES = 2, | ||
| 49 | MLX5_POST_INIT_PAGES = 3 | ||
| 50 | }; | ||
| 51 | |||
| 46 | struct mlx5_pages_req { | 52 | struct mlx5_pages_req { |
| 47 | struct mlx5_core_dev *dev; | 53 | struct mlx5_core_dev *dev; |
| 48 | u32 func_id; | 54 | u32 func_id; |
| 49 | s16 npages; | 55 | s32 npages; |
| 50 | struct work_struct work; | 56 | struct work_struct work; |
| 51 | }; | 57 | }; |
| 52 | 58 | ||
| @@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox { | |||
| 64 | 70 | ||
| 65 | struct mlx5_query_pages_outbox { | 71 | struct mlx5_query_pages_outbox { |
| 66 | struct mlx5_outbox_hdr hdr; | 72 | struct mlx5_outbox_hdr hdr; |
| 67 | u8 reserved[2]; | 73 | __be16 rsvd; |
| 68 | __be16 func_id; | 74 | __be16 func_id; |
| 69 | __be16 init_pages; | 75 | __be32 num_pages; |
| 70 | __be16 num_pages; | ||
| 71 | }; | 76 | }; |
| 72 | 77 | ||
| 73 | struct mlx5_manage_pages_inbox { | 78 | struct mlx5_manage_pages_inbox { |
| 74 | struct mlx5_inbox_hdr hdr; | 79 | struct mlx5_inbox_hdr hdr; |
| 75 | __be16 rsvd0; | 80 | __be16 rsvd; |
| 76 | __be16 func_id; | 81 | __be16 func_id; |
| 77 | __be16 rsvd1; | 82 | __be32 num_entries; |
| 78 | __be16 num_entries; | ||
| 79 | u8 rsvd2[16]; | ||
| 80 | __be64 pas[0]; | 83 | __be64 pas[0]; |
| 81 | }; | 84 | }; |
| 82 | 85 | ||
| 83 | struct mlx5_manage_pages_outbox { | 86 | struct mlx5_manage_pages_outbox { |
| 84 | struct mlx5_outbox_hdr hdr; | 87 | struct mlx5_outbox_hdr hdr; |
| 85 | u8 rsvd0[2]; | 88 | __be32 num_entries; |
| 86 | __be16 num_entries; | 89 | u8 rsvd[4]; |
| 87 | u8 rsvd1[20]; | ||
| 88 | __be64 pas[0]; | 90 | __be64 pas[0]; |
| 89 | }; | 91 | }; |
| 90 | 92 | ||
| @@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) | |||
| 146 | } | 148 | } |
| 147 | 149 | ||
| 148 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | 150 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, |
| 149 | s16 *pages, s16 *init_pages) | 151 | s32 *npages, int boot) |
| 150 | { | 152 | { |
| 151 | struct mlx5_query_pages_inbox in; | 153 | struct mlx5_query_pages_inbox in; |
| 152 | struct mlx5_query_pages_outbox out; | 154 | struct mlx5_query_pages_outbox out; |
| @@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |||
| 155 | memset(&in, 0, sizeof(in)); | 157 | memset(&in, 0, sizeof(in)); |
| 156 | memset(&out, 0, sizeof(out)); | 158 | memset(&out, 0, sizeof(out)); |
| 157 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); | 159 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); |
| 160 | in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); | ||
| 161 | |||
| 158 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | 162 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); |
| 159 | if (err) | 163 | if (err) |
| 160 | return err; | 164 | return err; |
| @@ -162,10 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |||
| 162 | if (out.hdr.status) | 166 | if (out.hdr.status) |
| 163 | return mlx5_cmd_status_to_err(&out.hdr); | 167 | return mlx5_cmd_status_to_err(&out.hdr); |
| 164 | 168 | ||
| 165 | if (pages) | 169 | *npages = be32_to_cpu(out.num_pages); |
| 166 | *pages = be16_to_cpu(out.num_pages); | ||
| 167 | if (init_pages) | ||
| 168 | *init_pages = be16_to_cpu(out.init_pages); | ||
| 169 | *func_id = be16_to_cpu(out.func_id); | 170 | *func_id = be16_to_cpu(out.func_id); |
| 170 | 171 | ||
| 171 | return err; | 172 | return err; |
| @@ -219,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, | |||
| 219 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | 220 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); |
| 220 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); | 221 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); |
| 221 | in->func_id = cpu_to_be16(func_id); | 222 | in->func_id = cpu_to_be16(func_id); |
| 222 | in->num_entries = cpu_to_be16(npages); | 223 | in->num_entries = cpu_to_be32(npages); |
| 223 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | 224 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); |
| 224 | mlx5_core_dbg(dev, "err %d\n", err); | 225 | mlx5_core_dbg(dev, "err %d\n", err); |
| 225 | if (err) { | 226 | if (err) { |
| @@ -287,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |||
| 287 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | 288 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); |
| 288 | in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); | 289 | in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); |
| 289 | in.func_id = cpu_to_be16(func_id); | 290 | in.func_id = cpu_to_be16(func_id); |
| 290 | in.num_entries = cpu_to_be16(npages); | 291 | in.num_entries = cpu_to_be32(npages); |
| 291 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); | 292 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); |
| 292 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | 293 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); |
| 293 | if (err) { | 294 | if (err) { |
| @@ -301,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |||
| 301 | goto out_free; | 302 | goto out_free; |
| 302 | } | 303 | } |
| 303 | 304 | ||
| 304 | num_claimed = be16_to_cpu(out->num_entries); | 305 | num_claimed = be32_to_cpu(out->num_entries); |
| 305 | if (nclaimed) | 306 | if (nclaimed) |
| 306 | *nclaimed = num_claimed; | 307 | *nclaimed = num_claimed; |
| 307 | 308 | ||
| @@ -340,7 +341,7 @@ static void pages_work_handler(struct work_struct *work) | |||
| 340 | } | 341 | } |
| 341 | 342 | ||
| 342 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | 343 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
| 343 | s16 npages) | 344 | s32 npages) |
| 344 | { | 345 | { |
| 345 | struct mlx5_pages_req *req; | 346 | struct mlx5_pages_req *req; |
| 346 | 347 | ||
| @@ -357,19 +358,20 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | |||
| 357 | queue_work(dev->priv.pg_wq, &req->work); | 358 | queue_work(dev->priv.pg_wq, &req->work); |
| 358 | } | 359 | } |
| 359 | 360 | ||
| 360 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev) | 361 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) |
| 361 | { | 362 | { |
| 362 | s16 uninitialized_var(init_pages); | ||
| 363 | u16 uninitialized_var(func_id); | 363 | u16 uninitialized_var(func_id); |
| 364 | s32 uninitialized_var(npages); | ||
| 364 | int err; | 365 | int err; |
| 365 | 366 | ||
| 366 | err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages); | 367 | err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); |
| 367 | if (err) | 368 | if (err) |
| 368 | return err; | 369 | return err; |
| 369 | 370 | ||
| 370 | mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id); | 371 | mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", |
| 372 | npages, boot ? "boot" : "init", func_id); | ||
| 371 | 373 | ||
| 372 | return give_pages(dev, func_id, init_pages, 0); | 374 | return give_pages(dev, func_id, npages, 0); |
| 373 | } | 375 | } |
| 374 | 376 | ||
| 375 | static int optimal_reclaimed_pages(void) | 377 | static int optimal_reclaimed_pages(void) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 71d4a3937200..68f5d9c77c7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c | |||
| @@ -164,6 +164,7 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) | |||
| 164 | uuari->uars[i].map = ioremap(addr, PAGE_SIZE); | 164 | uuari->uars[i].map = ioremap(addr, PAGE_SIZE); |
| 165 | if (!uuari->uars[i].map) { | 165 | if (!uuari->uars[i].map) { |
| 166 | mlx5_cmd_free_uar(dev, uuari->uars[i].index); | 166 | mlx5_cmd_free_uar(dev, uuari->uars[i].index); |
| 167 | err = -ENOMEM; | ||
| 167 | goto out_count; | 168 | goto out_count; |
| 168 | } | 169 | } |
| 169 | mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", | 170 | mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", |
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index cb22341a14a8..a588ffde9700 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | config PCH_GBE | 5 | config PCH_GBE |
| 6 | tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" | 6 | tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" |
| 7 | depends on PCI | 7 | depends on PCI && (X86 || COMPILE_TEST) |
| 8 | select MII | 8 | select MII |
| 9 | select PTP_1588_CLOCK_PCH | 9 | select PTP_1588_CLOCK_PCH |
| 10 | ---help--- | 10 | ---help--- |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 3fe09ab2d7c9..32675e16021e 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h | |||
| @@ -1171,7 +1171,6 @@ typedef struct { | |||
| 1171 | 1171 | ||
| 1172 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 | 1172 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 |
| 1173 | 1173 | ||
| 1174 | #define NETXEN_NETDEV_WEIGHT 128 | ||
| 1175 | #define NETXEN_ADAPTER_UP_MAGIC 777 | 1174 | #define NETXEN_ADAPTER_UP_MAGIC 777 |
| 1176 | #define NETXEN_NIC_PEG_TUNE 0 | 1175 | #define NETXEN_NIC_PEG_TUNE 0 |
| 1177 | 1176 | ||
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index c401b0b4353d..ec4cf7fd4123 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
| @@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) | |||
| 197 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | 197 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { |
| 198 | sds_ring = &recv_ctx->sds_rings[ring]; | 198 | sds_ring = &recv_ctx->sds_rings[ring]; |
| 199 | netif_napi_add(netdev, &sds_ring->napi, | 199 | netif_napi_add(netdev, &sds_ring->napi, |
| 200 | netxen_nic_poll, NETXEN_NETDEV_WEIGHT); | 200 | netxen_nic_poll, NAPI_POLL_WEIGHT); |
| 201 | } | 201 | } |
| 202 | 202 | ||
| 203 | return 0; | 203 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index b00cf5665eab..221645e9f182 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
| @@ -1400,8 +1400,8 @@ void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64); | |||
| 1400 | #define ADDR_IN_RANGE(addr, low, high) \ | 1400 | #define ADDR_IN_RANGE(addr, low, high) \ |
| 1401 | (((addr) < (high)) && ((addr) >= (low))) | 1401 | (((addr) < (high)) && ((addr) >= (low))) |
| 1402 | 1402 | ||
| 1403 | #define QLCRD32(adapter, off) \ | 1403 | #define QLCRD32(adapter, off, err) \ |
| 1404 | (adapter->ahw->hw_ops->read_reg)(adapter, off) | 1404 | (adapter->ahw->hw_ops->read_reg)(adapter, off, err) |
| 1405 | 1405 | ||
| 1406 | #define QLCWR32(adapter, off, val) \ | 1406 | #define QLCWR32(adapter, off, val) \ |
| 1407 | adapter->ahw->hw_ops->write_reg(adapter, off, val) | 1407 | adapter->ahw->hw_ops->write_reg(adapter, off, val) |
| @@ -1604,7 +1604,7 @@ struct qlcnic_nic_template { | |||
| 1604 | struct qlcnic_hardware_ops { | 1604 | struct qlcnic_hardware_ops { |
| 1605 | void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); | 1605 | void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); |
| 1606 | void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); | 1606 | void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); |
| 1607 | int (*read_reg) (struct qlcnic_adapter *, ulong); | 1607 | int (*read_reg) (struct qlcnic_adapter *, ulong, int *); |
| 1608 | int (*write_reg) (struct qlcnic_adapter *, ulong, u32); | 1608 | int (*write_reg) (struct qlcnic_adapter *, ulong, u32); |
| 1609 | void (*get_ocm_win) (struct qlcnic_hardware_context *); | 1609 | void (*get_ocm_win) (struct qlcnic_hardware_context *); |
| 1610 | int (*get_mac_address) (struct qlcnic_adapter *, u8 *); | 1610 | int (*get_mac_address) (struct qlcnic_adapter *, u8 *); |
| @@ -1662,12 +1662,6 @@ static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf, | |||
| 1662 | adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size); | 1662 | adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size); |
| 1663 | } | 1663 | } |
| 1664 | 1664 | ||
| 1665 | static inline int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, | ||
| 1666 | ulong off) | ||
| 1667 | { | ||
| 1668 | return adapter->ahw->hw_ops->read_reg(adapter, off); | ||
| 1669 | } | ||
| 1670 | |||
| 1671 | static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, | 1665 | static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, |
| 1672 | ulong off, u32 data) | 1666 | ulong off, u32 data) |
| 1673 | { | 1667 | { |
| @@ -1869,7 +1863,8 @@ static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter) | |||
| 1869 | 1863 | ||
| 1870 | static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter) | 1864 | static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter) |
| 1871 | { | 1865 | { |
| 1872 | adapter->ahw->hw_ops->set_mac_filter_count(adapter); | 1866 | if (adapter->ahw->hw_ops->set_mac_filter_count) |
| 1867 | adapter->ahw->hw_ops->set_mac_filter_count(adapter); | ||
| 1873 | } | 1868 | } |
| 1874 | 1869 | ||
| 1875 | static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, | 1870 | static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 0913c623a67e..9d4bb7f83904 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
| @@ -228,17 +228,17 @@ static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr) | |||
| 228 | return 0; | 228 | return 0; |
| 229 | } | 229 | } |
| 230 | 230 | ||
| 231 | int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr) | 231 | int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr, |
| 232 | int *err) | ||
| 232 | { | 233 | { |
| 233 | int ret; | ||
| 234 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 234 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
| 235 | 235 | ||
| 236 | ret = __qlcnic_set_win_base(adapter, (u32) addr); | 236 | *err = __qlcnic_set_win_base(adapter, (u32) addr); |
| 237 | if (!ret) { | 237 | if (!*err) { |
| 238 | return QLCRDX(ahw, QLCNIC_WILDCARD); | 238 | return QLCRDX(ahw, QLCNIC_WILDCARD); |
| 239 | } else { | 239 | } else { |
| 240 | dev_err(&adapter->pdev->dev, | 240 | dev_err(&adapter->pdev->dev, |
| 241 | "%s failed, addr = 0x%x\n", __func__, (int)addr); | 241 | "%s failed, addr = 0x%lx\n", __func__, addr); |
| 242 | return -EIO; | 242 | return -EIO; |
| 243 | } | 243 | } |
| 244 | } | 244 | } |
| @@ -561,7 +561,7 @@ void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter) | |||
| 561 | void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf, | 561 | void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf, |
| 562 | loff_t offset, size_t size) | 562 | loff_t offset, size_t size) |
| 563 | { | 563 | { |
| 564 | int ret; | 564 | int ret = 0; |
| 565 | u32 data; | 565 | u32 data; |
| 566 | 566 | ||
| 567 | if (qlcnic_api_lock(adapter)) { | 567 | if (qlcnic_api_lock(adapter)) { |
| @@ -571,7 +571,7 @@ void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf, | |||
| 571 | return; | 571 | return; |
| 572 | } | 572 | } |
| 573 | 573 | ||
| 574 | ret = qlcnic_83xx_rd_reg_indirect(adapter, (u32) offset); | 574 | data = QLCRD32(adapter, (u32) offset, &ret); |
| 575 | qlcnic_api_unlock(adapter); | 575 | qlcnic_api_unlock(adapter); |
| 576 | 576 | ||
| 577 | if (ret == -EIO) { | 577 | if (ret == -EIO) { |
| @@ -580,7 +580,6 @@ void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf, | |||
| 580 | __func__, (u32)offset); | 580 | __func__, (u32)offset); |
| 581 | return; | 581 | return; |
| 582 | } | 582 | } |
| 583 | data = ret; | ||
| 584 | memcpy(buf, &data, size); | 583 | memcpy(buf, &data, size); |
| 585 | } | 584 | } |
| 586 | 585 | ||
| @@ -2075,18 +2074,25 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter) | |||
| 2075 | static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, | 2074 | static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, |
| 2076 | u32 data[]) | 2075 | u32 data[]) |
| 2077 | { | 2076 | { |
| 2077 | struct qlcnic_hardware_context *ahw = adapter->ahw; | ||
| 2078 | u8 link_status, duplex; | 2078 | u8 link_status, duplex; |
| 2079 | /* link speed */ | 2079 | /* link speed */ |
| 2080 | link_status = LSB(data[3]) & 1; | 2080 | link_status = LSB(data[3]) & 1; |
| 2081 | adapter->ahw->link_speed = MSW(data[2]); | 2081 | if (link_status) { |
| 2082 | adapter->ahw->link_autoneg = MSB(MSW(data[3])); | 2082 | ahw->link_speed = MSW(data[2]); |
| 2083 | adapter->ahw->module_type = MSB(LSW(data[3])); | 2083 | duplex = LSB(MSW(data[3])); |
| 2084 | duplex = LSB(MSW(data[3])); | 2084 | if (duplex) |
| 2085 | if (duplex) | 2085 | ahw->link_duplex = DUPLEX_FULL; |
| 2086 | adapter->ahw->link_duplex = DUPLEX_FULL; | 2086 | else |
| 2087 | else | 2087 | ahw->link_duplex = DUPLEX_HALF; |
| 2088 | adapter->ahw->link_duplex = DUPLEX_HALF; | 2088 | } else { |
| 2089 | adapter->ahw->has_link_events = 1; | 2089 | ahw->link_speed = SPEED_UNKNOWN; |
| 2090 | ahw->link_duplex = DUPLEX_UNKNOWN; | ||
| 2091 | } | ||
| 2092 | |||
| 2093 | ahw->link_autoneg = MSB(MSW(data[3])); | ||
| 2094 | ahw->module_type = MSB(LSW(data[3])); | ||
| 2095 | ahw->has_link_events = 1; | ||
| 2090 | qlcnic_advert_link_change(adapter, link_status); | 2096 | qlcnic_advert_link_change(adapter, link_status); |
| 2091 | } | 2097 | } |
| 2092 | 2098 | ||
| @@ -2384,9 +2390,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter, | |||
| 2384 | u32 flash_addr, u8 *p_data, | 2390 | u32 flash_addr, u8 *p_data, |
| 2385 | int count) | 2391 | int count) |
| 2386 | { | 2392 | { |
| 2387 | int i, ret; | 2393 | u32 word, range, flash_offset, addr = flash_addr, ret; |
| 2388 | u32 word, range, flash_offset, addr = flash_addr; | ||
| 2389 | ulong indirect_add, direct_window; | 2394 | ulong indirect_add, direct_window; |
| 2395 | int i, err = 0; | ||
| 2390 | 2396 | ||
| 2391 | flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1); | 2397 | flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1); |
| 2392 | if (addr & 0x3) { | 2398 | if (addr & 0x3) { |
| @@ -2404,10 +2410,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter, | |||
| 2404 | /* Multi sector read */ | 2410 | /* Multi sector read */ |
| 2405 | for (i = 0; i < count; i++) { | 2411 | for (i = 0; i < count; i++) { |
| 2406 | indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); | 2412 | indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); |
| 2407 | ret = qlcnic_83xx_rd_reg_indirect(adapter, | 2413 | ret = QLCRD32(adapter, indirect_add, &err); |
| 2408 | indirect_add); | 2414 | if (err == -EIO) |
| 2409 | if (ret == -EIO) | 2415 | return err; |
| 2410 | return -EIO; | ||
| 2411 | 2416 | ||
| 2412 | word = ret; | 2417 | word = ret; |
| 2413 | *(u32 *)p_data = word; | 2418 | *(u32 *)p_data = word; |
| @@ -2428,10 +2433,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter, | |||
| 2428 | /* Single sector read */ | 2433 | /* Single sector read */ |
| 2429 | for (i = 0; i < count; i++) { | 2434 | for (i = 0; i < count; i++) { |
| 2430 | indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); | 2435 | indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); |
| 2431 | ret = qlcnic_83xx_rd_reg_indirect(adapter, | 2436 | ret = QLCRD32(adapter, indirect_add, &err); |
| 2432 | indirect_add); | 2437 | if (err == -EIO) |
| 2433 | if (ret == -EIO) | 2438 | return err; |
| 2434 | return -EIO; | ||
| 2435 | 2439 | ||
| 2436 | word = ret; | 2440 | word = ret; |
| 2437 | *(u32 *)p_data = word; | 2441 | *(u32 *)p_data = word; |
| @@ -2447,10 +2451,13 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter) | |||
| 2447 | { | 2451 | { |
| 2448 | u32 status; | 2452 | u32 status; |
| 2449 | int retries = QLC_83XX_FLASH_READ_RETRY_COUNT; | 2453 | int retries = QLC_83XX_FLASH_READ_RETRY_COUNT; |
| 2454 | int err = 0; | ||
| 2450 | 2455 | ||
| 2451 | do { | 2456 | do { |
| 2452 | status = qlcnic_83xx_rd_reg_indirect(adapter, | 2457 | status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err); |
| 2453 | QLC_83XX_FLASH_STATUS); | 2458 | if (err == -EIO) |
| 2459 | return err; | ||
| 2460 | |||
| 2454 | if ((status & QLC_83XX_FLASH_STATUS_READY) == | 2461 | if ((status & QLC_83XX_FLASH_STATUS_READY) == |
| 2455 | QLC_83XX_FLASH_STATUS_READY) | 2462 | QLC_83XX_FLASH_STATUS_READY) |
| 2456 | break; | 2463 | break; |
| @@ -2502,7 +2509,8 @@ int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter) | |||
| 2502 | 2509 | ||
| 2503 | int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter) | 2510 | int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter) |
| 2504 | { | 2511 | { |
| 2505 | int ret, mfg_id; | 2512 | int ret, err = 0; |
| 2513 | u32 mfg_id; | ||
| 2506 | 2514 | ||
| 2507 | if (qlcnic_83xx_lock_flash(adapter)) | 2515 | if (qlcnic_83xx_lock_flash(adapter)) |
| 2508 | return -EIO; | 2516 | return -EIO; |
| @@ -2517,9 +2525,11 @@ int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter) | |||
| 2517 | return -EIO; | 2525 | return -EIO; |
| 2518 | } | 2526 | } |
| 2519 | 2527 | ||
| 2520 | mfg_id = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA); | 2528 | mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err); |
| 2521 | if (mfg_id == -EIO) | 2529 | if (err == -EIO) { |
| 2522 | return -EIO; | 2530 | qlcnic_83xx_unlock_flash(adapter); |
| 2531 | return err; | ||
| 2532 | } | ||
| 2523 | 2533 | ||
| 2524 | adapter->flash_mfg_id = (mfg_id & 0xFF); | 2534 | adapter->flash_mfg_id = (mfg_id & 0xFF); |
| 2525 | qlcnic_83xx_unlock_flash(adapter); | 2535 | qlcnic_83xx_unlock_flash(adapter); |
| @@ -2636,7 +2646,7 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr, | |||
| 2636 | u32 *p_data, int count) | 2646 | u32 *p_data, int count) |
| 2637 | { | 2647 | { |
| 2638 | u32 temp; | 2648 | u32 temp; |
| 2639 | int ret = -EIO; | 2649 | int ret = -EIO, err = 0; |
| 2640 | 2650 | ||
| 2641 | if ((count < QLC_83XX_FLASH_WRITE_MIN) || | 2651 | if ((count < QLC_83XX_FLASH_WRITE_MIN) || |
| 2642 | (count > QLC_83XX_FLASH_WRITE_MAX)) { | 2652 | (count > QLC_83XX_FLASH_WRITE_MAX)) { |
| @@ -2645,8 +2655,10 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr, | |||
| 2645 | return -EIO; | 2655 | return -EIO; |
| 2646 | } | 2656 | } |
| 2647 | 2657 | ||
| 2648 | temp = qlcnic_83xx_rd_reg_indirect(adapter, | 2658 | temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err); |
| 2649 | QLC_83XX_FLASH_SPI_CONTROL); | 2659 | if (err == -EIO) |
| 2660 | return err; | ||
| 2661 | |||
| 2650 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL, | 2662 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL, |
| 2651 | (temp | QLC_83XX_FLASH_SPI_CTRL)); | 2663 | (temp | QLC_83XX_FLASH_SPI_CTRL)); |
| 2652 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, | 2664 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, |
| @@ -2695,13 +2707,18 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr, | |||
| 2695 | return -EIO; | 2707 | return -EIO; |
| 2696 | } | 2708 | } |
| 2697 | 2709 | ||
| 2698 | ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_SPI_STATUS); | 2710 | ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err); |
| 2711 | if (err == -EIO) | ||
| 2712 | return err; | ||
| 2713 | |||
| 2699 | if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) { | 2714 | if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) { |
| 2700 | dev_err(&adapter->pdev->dev, "%s: failed at %d\n", | 2715 | dev_err(&adapter->pdev->dev, "%s: failed at %d\n", |
| 2701 | __func__, __LINE__); | 2716 | __func__, __LINE__); |
| 2702 | /* Operation failed, clear error bit */ | 2717 | /* Operation failed, clear error bit */ |
| 2703 | temp = qlcnic_83xx_rd_reg_indirect(adapter, | 2718 | temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err); |
| 2704 | QLC_83XX_FLASH_SPI_CONTROL); | 2719 | if (err == -EIO) |
| 2720 | return err; | ||
| 2721 | |||
| 2705 | qlcnic_83xx_wrt_reg_indirect(adapter, | 2722 | qlcnic_83xx_wrt_reg_indirect(adapter, |
| 2706 | QLC_83XX_FLASH_SPI_CONTROL, | 2723 | QLC_83XX_FLASH_SPI_CONTROL, |
| 2707 | (temp | QLC_83XX_FLASH_SPI_CTRL)); | 2724 | (temp | QLC_83XX_FLASH_SPI_CTRL)); |
| @@ -2823,6 +2840,7 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr, | |||
| 2823 | { | 2840 | { |
| 2824 | int i, j, ret = 0; | 2841 | int i, j, ret = 0; |
| 2825 | u32 temp; | 2842 | u32 temp; |
| 2843 | int err = 0; | ||
| 2826 | 2844 | ||
| 2827 | /* Check alignment */ | 2845 | /* Check alignment */ |
| 2828 | if (addr & 0xF) | 2846 | if (addr & 0xF) |
| @@ -2855,8 +2873,12 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr, | |||
| 2855 | QLCNIC_TA_WRITE_START); | 2873 | QLCNIC_TA_WRITE_START); |
| 2856 | 2874 | ||
| 2857 | for (j = 0; j < MAX_CTL_CHECK; j++) { | 2875 | for (j = 0; j < MAX_CTL_CHECK; j++) { |
| 2858 | temp = qlcnic_83xx_rd_reg_indirect(adapter, | 2876 | temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err); |
| 2859 | QLCNIC_MS_CTRL); | 2877 | if (err == -EIO) { |
| 2878 | mutex_unlock(&adapter->ahw->mem_lock); | ||
| 2879 | return err; | ||
| 2880 | } | ||
| 2881 | |||
| 2860 | if ((temp & TA_CTL_BUSY) == 0) | 2882 | if ((temp & TA_CTL_BUSY) == 0) |
| 2861 | break; | 2883 | break; |
| 2862 | } | 2884 | } |
| @@ -2878,9 +2900,9 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr, | |||
| 2878 | int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, | 2900 | int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, |
| 2879 | u8 *p_data, int count) | 2901 | u8 *p_data, int count) |
| 2880 | { | 2902 | { |
| 2881 | int i, ret; | 2903 | u32 word, addr = flash_addr, ret; |
| 2882 | u32 word, addr = flash_addr; | ||
| 2883 | ulong indirect_addr; | 2904 | ulong indirect_addr; |
| 2905 | int i, err = 0; | ||
| 2884 | 2906 | ||
| 2885 | if (qlcnic_83xx_lock_flash(adapter) != 0) | 2907 | if (qlcnic_83xx_lock_flash(adapter) != 0) |
| 2886 | return -EIO; | 2908 | return -EIO; |
| @@ -2900,10 +2922,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, | |||
| 2900 | } | 2922 | } |
| 2901 | 2923 | ||
| 2902 | indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); | 2924 | indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); |
| 2903 | ret = qlcnic_83xx_rd_reg_indirect(adapter, | 2925 | ret = QLCRD32(adapter, indirect_addr, &err); |
| 2904 | indirect_addr); | 2926 | if (err == -EIO) |
| 2905 | if (ret == -EIO) | 2927 | return err; |
| 2906 | return -EIO; | 2928 | |
| 2907 | word = ret; | 2929 | word = ret; |
| 2908 | *(u32 *)p_data = word; | 2930 | *(u32 *)p_data = word; |
| 2909 | p_data = p_data + 4; | 2931 | p_data = p_data + 4; |
| @@ -3014,8 +3036,8 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, | |||
| 3014 | } | 3036 | } |
| 3015 | 3037 | ||
| 3016 | if (ahw->port_type == QLCNIC_XGBE) { | 3038 | if (ahw->port_type == QLCNIC_XGBE) { |
| 3017 | ecmd->supported = SUPPORTED_1000baseT_Full; | 3039 | ecmd->supported = SUPPORTED_10000baseT_Full; |
| 3018 | ecmd->advertising = ADVERTISED_1000baseT_Full; | 3040 | ecmd->advertising = ADVERTISED_10000baseT_Full; |
| 3019 | } else { | 3041 | } else { |
| 3020 | ecmd->supported = (SUPPORTED_10baseT_Half | | 3042 | ecmd->supported = (SUPPORTED_10baseT_Half | |
| 3021 | SUPPORTED_10baseT_Full | | 3043 | SUPPORTED_10baseT_Full | |
| @@ -3244,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) | |||
| 3244 | u8 val; | 3266 | u8 val; |
| 3245 | int ret, max_sds_rings = adapter->max_sds_rings; | 3267 | int ret, max_sds_rings = adapter->max_sds_rings; |
| 3246 | 3268 | ||
| 3269 | if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { | ||
| 3270 | netdev_info(netdev, "Device is resetting\n"); | ||
| 3271 | return -EBUSY; | ||
| 3272 | } | ||
| 3273 | |||
| 3247 | if (qlcnic_get_diag_lock(adapter)) { | 3274 | if (qlcnic_get_diag_lock(adapter)) { |
| 3248 | netdev_info(netdev, "Device in diagnostics mode\n"); | 3275 | netdev_info(netdev, "Device in diagnostics mode\n"); |
| 3249 | return -EBUSY; | 3276 | return -EBUSY; |
| @@ -3369,7 +3396,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter, | |||
| 3369 | 3396 | ||
| 3370 | static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter) | 3397 | static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter) |
| 3371 | { | 3398 | { |
| 3372 | int ret; | 3399 | int ret, err = 0; |
| 3400 | u32 temp; | ||
| 3373 | 3401 | ||
| 3374 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, | 3402 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, |
| 3375 | QLC_83XX_FLASH_OEM_READ_SIG); | 3403 | QLC_83XX_FLASH_OEM_READ_SIG); |
| @@ -3379,8 +3407,11 @@ static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter) | |||
| 3379 | if (ret) | 3407 | if (ret) |
| 3380 | return -EIO; | 3408 | return -EIO; |
| 3381 | 3409 | ||
| 3382 | ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA); | 3410 | temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err); |
| 3383 | return ret & 0xFF; | 3411 | if (err == -EIO) |
| 3412 | return err; | ||
| 3413 | |||
| 3414 | return temp & 0xFF; | ||
| 3384 | } | 3415 | } |
| 3385 | 3416 | ||
| 3386 | int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter) | 3417 | int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 2548d1403d75..272f56a2e14b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | |||
| @@ -508,7 +508,7 @@ void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *); | |||
| 508 | void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *); | 508 | void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *); |
| 509 | void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); | 509 | void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); |
| 510 | void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t); | 510 | void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t); |
| 511 | int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong); | 511 | int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *); |
| 512 | int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32); | 512 | int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32); |
| 513 | void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []); | 513 | void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []); |
| 514 | int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32); | 514 | int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index f41dfab1e9a3..345d987aede4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
| @@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) | |||
| 629 | return -EIO; | 629 | return -EIO; |
| 630 | } | 630 | } |
| 631 | 631 | ||
| 632 | qlcnic_set_drv_version(adapter); | 632 | if (adapter->portnum == 0) |
| 633 | qlcnic_set_drv_version(adapter); | ||
| 633 | qlcnic_83xx_idc_attach_driver(adapter); | 634 | qlcnic_83xx_idc_attach_driver(adapter); |
| 634 | 635 | ||
| 635 | return 0; | 636 | return 0; |
| @@ -1303,8 +1304,11 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) | |||
| 1303 | { | 1304 | { |
| 1304 | int i, j; | 1305 | int i, j; |
| 1305 | u32 val = 0, val1 = 0, reg = 0; | 1306 | u32 val = 0, val1 = 0, reg = 0; |
| 1307 | int err = 0; | ||
| 1306 | 1308 | ||
| 1307 | val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG); | 1309 | val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err); |
| 1310 | if (err == -EIO) | ||
| 1311 | return; | ||
| 1308 | dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val); | 1312 | dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val); |
| 1309 | 1313 | ||
| 1310 | for (j = 0; j < 2; j++) { | 1314 | for (j = 0; j < 2; j++) { |
| @@ -1318,7 +1322,9 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) | |||
| 1318 | reg = QLC_83XX_PORT1_THRESHOLD; | 1322 | reg = QLC_83XX_PORT1_THRESHOLD; |
| 1319 | } | 1323 | } |
| 1320 | for (i = 0; i < 8; i++) { | 1324 | for (i = 0; i < 8; i++) { |
| 1321 | val = QLCRD32(adapter, reg + (i * 0x4)); | 1325 | val = QLCRD32(adapter, reg + (i * 0x4), &err); |
| 1326 | if (err == -EIO) | ||
| 1327 | return; | ||
| 1322 | dev_info(&adapter->pdev->dev, "0x%x ", val); | 1328 | dev_info(&adapter->pdev->dev, "0x%x ", val); |
| 1323 | } | 1329 | } |
| 1324 | dev_info(&adapter->pdev->dev, "\n"); | 1330 | dev_info(&adapter->pdev->dev, "\n"); |
| @@ -1335,8 +1341,10 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) | |||
| 1335 | reg = QLC_83XX_PORT1_TC_MC_REG; | 1341 | reg = QLC_83XX_PORT1_TC_MC_REG; |
| 1336 | } | 1342 | } |
| 1337 | for (i = 0; i < 4; i++) { | 1343 | for (i = 0; i < 4; i++) { |
| 1338 | val = QLCRD32(adapter, reg + (i * 0x4)); | 1344 | val = QLCRD32(adapter, reg + (i * 0x4), &err); |
| 1339 | dev_info(&adapter->pdev->dev, "0x%x ", val); | 1345 | if (err == -EIO) |
| 1346 | return; | ||
| 1347 | dev_info(&adapter->pdev->dev, "0x%x ", val); | ||
| 1340 | } | 1348 | } |
| 1341 | dev_info(&adapter->pdev->dev, "\n"); | 1349 | dev_info(&adapter->pdev->dev, "\n"); |
| 1342 | } | 1350 | } |
| @@ -1352,17 +1360,25 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) | |||
| 1352 | reg = QLC_83XX_PORT1_TC_STATS; | 1360 | reg = QLC_83XX_PORT1_TC_STATS; |
| 1353 | } | 1361 | } |
| 1354 | for (i = 7; i >= 0; i--) { | 1362 | for (i = 7; i >= 0; i--) { |
| 1355 | val = QLCRD32(adapter, reg); | 1363 | val = QLCRD32(adapter, reg, &err); |
| 1364 | if (err == -EIO) | ||
| 1365 | return; | ||
| 1356 | val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ | 1366 | val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ |
| 1357 | QLCWR32(adapter, reg, (val | (i << 29))); | 1367 | QLCWR32(adapter, reg, (val | (i << 29))); |
| 1358 | val = QLCRD32(adapter, reg); | 1368 | val = QLCRD32(adapter, reg, &err); |
| 1369 | if (err == -EIO) | ||
| 1370 | return; | ||
| 1359 | dev_info(&adapter->pdev->dev, "0x%x ", val); | 1371 | dev_info(&adapter->pdev->dev, "0x%x ", val); |
| 1360 | } | 1372 | } |
| 1361 | dev_info(&adapter->pdev->dev, "\n"); | 1373 | dev_info(&adapter->pdev->dev, "\n"); |
| 1362 | } | 1374 | } |
| 1363 | 1375 | ||
| 1364 | val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD); | 1376 | val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err); |
| 1365 | val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD); | 1377 | if (err == -EIO) |
| 1378 | return; | ||
| 1379 | val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err); | ||
| 1380 | if (err == -EIO) | ||
| 1381 | return; | ||
| 1366 | dev_info(&adapter->pdev->dev, | 1382 | dev_info(&adapter->pdev->dev, |
| 1367 | "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", | 1383 | "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", |
| 1368 | val, val1); | 1384 | val, val1); |
| @@ -1425,7 +1441,7 @@ static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter) | |||
| 1425 | static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev) | 1441 | static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev) |
| 1426 | { | 1442 | { |
| 1427 | u32 heartbeat, peg_status; | 1443 | u32 heartbeat, peg_status; |
| 1428 | int retries, ret = -EIO; | 1444 | int retries, ret = -EIO, err = 0; |
| 1429 | 1445 | ||
| 1430 | retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; | 1446 | retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; |
| 1431 | p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev, | 1447 | p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev, |
| @@ -1453,11 +1469,11 @@ static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev) | |||
| 1453 | "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" | 1469 | "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" |
| 1454 | "PEG_NET_4_PC: 0x%x\n", peg_status, | 1470 | "PEG_NET_4_PC: 0x%x\n", peg_status, |
| 1455 | QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2), | 1471 | QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2), |
| 1456 | QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0), | 1472 | QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err), |
| 1457 | QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1), | 1473 | QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err), |
| 1458 | QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2), | 1474 | QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err), |
| 1459 | QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3), | 1475 | QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err), |
| 1460 | QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4)); | 1476 | QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err)); |
| 1461 | 1477 | ||
| 1462 | if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) | 1478 | if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) |
| 1463 | dev_err(&p_dev->pdev->dev, | 1479 | dev_err(&p_dev->pdev->dev, |
| @@ -1501,18 +1517,22 @@ int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev) | |||
| 1501 | static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr, | 1517 | static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr, |
| 1502 | int duration, u32 mask, u32 status) | 1518 | int duration, u32 mask, u32 status) |
| 1503 | { | 1519 | { |
| 1520 | int timeout_error, err = 0; | ||
| 1504 | u32 value; | 1521 | u32 value; |
| 1505 | int timeout_error; | ||
| 1506 | u8 retries; | 1522 | u8 retries; |
| 1507 | 1523 | ||
| 1508 | value = qlcnic_83xx_rd_reg_indirect(p_dev, addr); | 1524 | value = QLCRD32(p_dev, addr, &err); |
| 1525 | if (err == -EIO) | ||
| 1526 | return err; | ||
| 1509 | retries = duration / 10; | 1527 | retries = duration / 10; |
| 1510 | 1528 | ||
| 1511 | do { | 1529 | do { |
| 1512 | if ((value & mask) != status) { | 1530 | if ((value & mask) != status) { |
| 1513 | timeout_error = 1; | 1531 | timeout_error = 1; |
| 1514 | msleep(duration / 10); | 1532 | msleep(duration / 10); |
| 1515 | value = qlcnic_83xx_rd_reg_indirect(p_dev, addr); | 1533 | value = QLCRD32(p_dev, addr, &err); |
| 1534 | if (err == -EIO) | ||
| 1535 | return err; | ||
| 1516 | } else { | 1536 | } else { |
| 1517 | timeout_error = 0; | 1537 | timeout_error = 0; |
| 1518 | break; | 1538 | break; |
| @@ -1606,9 +1626,12 @@ int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev) | |||
| 1606 | static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev, | 1626 | static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev, |
| 1607 | u32 raddr, u32 waddr) | 1627 | u32 raddr, u32 waddr) |
| 1608 | { | 1628 | { |
| 1609 | int value; | 1629 | int err = 0; |
| 1630 | u32 value; | ||
| 1610 | 1631 | ||
| 1611 | value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr); | 1632 | value = QLCRD32(p_dev, raddr, &err); |
| 1633 | if (err == -EIO) | ||
| 1634 | return; | ||
| 1612 | qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value); | 1635 | qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value); |
| 1613 | } | 1636 | } |
| 1614 | 1637 | ||
| @@ -1617,12 +1640,16 @@ static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev, | |||
| 1617 | u32 raddr, u32 waddr, | 1640 | u32 raddr, u32 waddr, |
| 1618 | struct qlc_83xx_rmw *p_rmw_hdr) | 1641 | struct qlc_83xx_rmw *p_rmw_hdr) |
| 1619 | { | 1642 | { |
| 1620 | int value; | 1643 | int err = 0; |
| 1644 | u32 value; | ||
| 1621 | 1645 | ||
| 1622 | if (p_rmw_hdr->index_a) | 1646 | if (p_rmw_hdr->index_a) { |
| 1623 | value = p_dev->ahw->reset.array[p_rmw_hdr->index_a]; | 1647 | value = p_dev->ahw->reset.array[p_rmw_hdr->index_a]; |
| 1624 | else | 1648 | } else { |
| 1625 | value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr); | 1649 | value = QLCRD32(p_dev, raddr, &err); |
| 1650 | if (err == -EIO) | ||
| 1651 | return; | ||
| 1652 | } | ||
| 1626 | 1653 | ||
| 1627 | value &= p_rmw_hdr->mask; | 1654 | value &= p_rmw_hdr->mask; |
| 1628 | value <<= p_rmw_hdr->shl; | 1655 | value <<= p_rmw_hdr->shl; |
| @@ -1675,7 +1702,7 @@ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev, | |||
| 1675 | long delay; | 1702 | long delay; |
| 1676 | struct qlc_83xx_entry *entry; | 1703 | struct qlc_83xx_entry *entry; |
| 1677 | struct qlc_83xx_poll *poll; | 1704 | struct qlc_83xx_poll *poll; |
| 1678 | int i; | 1705 | int i, err = 0; |
| 1679 | unsigned long arg1, arg2; | 1706 | unsigned long arg1, arg2; |
| 1680 | 1707 | ||
| 1681 | poll = (struct qlc_83xx_poll *)((char *)p_hdr + | 1708 | poll = (struct qlc_83xx_poll *)((char *)p_hdr + |
| @@ -1699,10 +1726,12 @@ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev, | |||
| 1699 | arg1, delay, | 1726 | arg1, delay, |
| 1700 | poll->mask, | 1727 | poll->mask, |
| 1701 | poll->status)){ | 1728 | poll->status)){ |
| 1702 | qlcnic_83xx_rd_reg_indirect(p_dev, | 1729 | QLCRD32(p_dev, arg1, &err); |
| 1703 | arg1); | 1730 | if (err == -EIO) |
| 1704 | qlcnic_83xx_rd_reg_indirect(p_dev, | 1731 | return; |
| 1705 | arg2); | 1732 | QLCRD32(p_dev, arg2, &err); |
| 1733 | if (err == -EIO) | ||
| 1734 | return; | ||
| 1706 | } | 1735 | } |
| 1707 | } | 1736 | } |
| 1708 | } | 1737 | } |
| @@ -1768,7 +1797,7 @@ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev, | |||
| 1768 | struct qlc_83xx_entry_hdr *p_hdr) | 1797 | struct qlc_83xx_entry_hdr *p_hdr) |
| 1769 | { | 1798 | { |
| 1770 | long delay; | 1799 | long delay; |
| 1771 | int index, i, j; | 1800 | int index, i, j, err; |
| 1772 | struct qlc_83xx_quad_entry *entry; | 1801 | struct qlc_83xx_quad_entry *entry; |
| 1773 | struct qlc_83xx_poll *poll; | 1802 | struct qlc_83xx_poll *poll; |
| 1774 | unsigned long addr; | 1803 | unsigned long addr; |
| @@ -1788,7 +1817,10 @@ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev, | |||
| 1788 | poll->mask, poll->status)){ | 1817 | poll->mask, poll->status)){ |
| 1789 | index = p_dev->ahw->reset.array_index; | 1818 | index = p_dev->ahw->reset.array_index; |
| 1790 | addr = entry->dr_addr; | 1819 | addr = entry->dr_addr; |
| 1791 | j = qlcnic_83xx_rd_reg_indirect(p_dev, addr); | 1820 | j = QLCRD32(p_dev, addr, &err); |
| 1821 | if (err == -EIO) | ||
| 1822 | return; | ||
| 1823 | |||
| 1792 | p_dev->ahw->reset.array[index++] = j; | 1824 | p_dev->ahw->reset.array[index++] = j; |
| 1793 | 1825 | ||
| 1794 | if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES) | 1826 | if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES) |
| @@ -2123,6 +2155,8 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) | |||
| 2123 | set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); | 2155 | set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); |
| 2124 | qlcnic_83xx_clear_function_resources(adapter); | 2156 | qlcnic_83xx_clear_function_resources(adapter); |
| 2125 | 2157 | ||
| 2158 | INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); | ||
| 2159 | |||
| 2126 | /* register for NIC IDC AEN Events */ | 2160 | /* register for NIC IDC AEN Events */ |
| 2127 | qlcnic_83xx_register_nic_idc_func(adapter, 1); | 2161 | qlcnic_83xx_register_nic_idc_func(adapter, 1); |
| 2128 | 2162 | ||
| @@ -2140,8 +2174,6 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) | |||
| 2140 | if (adapter->nic_ops->init_driver(adapter)) | 2174 | if (adapter->nic_ops->init_driver(adapter)) |
| 2141 | return -EIO; | 2175 | return -EIO; |
| 2142 | 2176 | ||
| 2143 | INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); | ||
| 2144 | |||
| 2145 | /* Periodically monitor device status */ | 2177 | /* Periodically monitor device status */ |
| 2146 | qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); | 2178 | qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); |
| 2147 | 2179 | ||
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 0581a484ceb5..d09389b33474 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | |||
| @@ -104,7 +104,7 @@ static u32 | |||
| 104 | qlcnic_poll_rsp(struct qlcnic_adapter *adapter) | 104 | qlcnic_poll_rsp(struct qlcnic_adapter *adapter) |
| 105 | { | 105 | { |
| 106 | u32 rsp; | 106 | u32 rsp; |
| 107 | int timeout = 0; | 107 | int timeout = 0, err = 0; |
| 108 | 108 | ||
| 109 | do { | 109 | do { |
| 110 | /* give atleast 1ms for firmware to respond */ | 110 | /* give atleast 1ms for firmware to respond */ |
| @@ -113,7 +113,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter) | |||
| 113 | if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) | 113 | if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) |
| 114 | return QLCNIC_CDRP_RSP_TIMEOUT; | 114 | return QLCNIC_CDRP_RSP_TIMEOUT; |
| 115 | 115 | ||
| 116 | rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET); | 116 | rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err); |
| 117 | } while (!QLCNIC_CDRP_IS_RSP(rsp)); | 117 | } while (!QLCNIC_CDRP_IS_RSP(rsp)); |
| 118 | 118 | ||
| 119 | return rsp; | 119 | return rsp; |
| @@ -122,7 +122,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter) | |||
| 122 | int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, | 122 | int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, |
| 123 | struct qlcnic_cmd_args *cmd) | 123 | struct qlcnic_cmd_args *cmd) |
| 124 | { | 124 | { |
| 125 | int i; | 125 | int i, err = 0; |
| 126 | u32 rsp; | 126 | u32 rsp; |
| 127 | u32 signature; | 127 | u32 signature; |
| 128 | struct pci_dev *pdev = adapter->pdev; | 128 | struct pci_dev *pdev = adapter->pdev; |
| @@ -148,7 +148,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, | |||
| 148 | dev_err(&pdev->dev, "card response timeout.\n"); | 148 | dev_err(&pdev->dev, "card response timeout.\n"); |
| 149 | cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; | 149 | cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; |
| 150 | } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { | 150 | } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { |
| 151 | cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1)); | 151 | cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err); |
| 152 | switch (cmd->rsp.arg[0]) { | 152 | switch (cmd->rsp.arg[0]) { |
| 153 | case QLCNIC_RCODE_INVALID_ARGS: | 153 | case QLCNIC_RCODE_INVALID_ARGS: |
| 154 | fmt = "CDRP invalid args: [%d]\n"; | 154 | fmt = "CDRP invalid args: [%d]\n"; |
| @@ -175,7 +175,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, | |||
| 175 | cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; | 175 | cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; |
| 176 | 176 | ||
| 177 | for (i = 1; i < cmd->rsp.num; i++) | 177 | for (i = 1; i < cmd->rsp.num; i++) |
| 178 | cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i)); | 178 | cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err); |
| 179 | 179 | ||
| 180 | /* Release semaphore */ | 180 | /* Release semaphore */ |
| 181 | qlcnic_api_unlock(adapter); | 181 | qlcnic_api_unlock(adapter); |
| @@ -210,10 +210,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd) | |||
| 210 | if (err) { | 210 | if (err) { |
| 211 | dev_info(&adapter->pdev->dev, | 211 | dev_info(&adapter->pdev->dev, |
| 212 | "Failed to set driver version in firmware\n"); | 212 | "Failed to set driver version in firmware\n"); |
| 213 | return -EIO; | 213 | err = -EIO; |
| 214 | } | 214 | } |
| 215 | 215 | qlcnic_free_mbx_args(&cmd); | |
| 216 | return 0; | 216 | return err; |
| 217 | } | 217 | } |
| 218 | 218 | ||
| 219 | int | 219 | int |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 700a46324d09..7aac23ab31d1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | |||
| @@ -150,6 +150,7 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { | |||
| 150 | "Link_Test_on_offline", | 150 | "Link_Test_on_offline", |
| 151 | "Interrupt_Test_offline", | 151 | "Interrupt_Test_offline", |
| 152 | "Internal_Loopback_offline", | 152 | "Internal_Loopback_offline", |
| 153 | "External_Loopback_offline", | ||
| 153 | "EEPROM_Test_offline" | 154 | "EEPROM_Test_offline" |
| 154 | }; | 155 | }; |
| 155 | 156 | ||
| @@ -266,7 +267,7 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, | |||
| 266 | { | 267 | { |
| 267 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 268 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
| 268 | u32 speed, reg; | 269 | u32 speed, reg; |
| 269 | int check_sfp_module = 0; | 270 | int check_sfp_module = 0, err = 0; |
| 270 | u16 pcifn = ahw->pci_func; | 271 | u16 pcifn = ahw->pci_func; |
| 271 | 272 | ||
| 272 | /* read which mode */ | 273 | /* read which mode */ |
| @@ -289,7 +290,7 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, | |||
| 289 | 290 | ||
| 290 | } else if (adapter->ahw->port_type == QLCNIC_XGBE) { | 291 | } else if (adapter->ahw->port_type == QLCNIC_XGBE) { |
| 291 | u32 val = 0; | 292 | u32 val = 0; |
| 292 | val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); | 293 | val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err); |
| 293 | 294 | ||
| 294 | if (val == QLCNIC_PORT_MODE_802_3_AP) { | 295 | if (val == QLCNIC_PORT_MODE_802_3_AP) { |
| 295 | ecmd->supported = SUPPORTED_1000baseT_Full; | 296 | ecmd->supported = SUPPORTED_1000baseT_Full; |
| @@ -300,9 +301,13 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, | |||
| 300 | } | 301 | } |
| 301 | 302 | ||
| 302 | if (netif_running(adapter->netdev) && ahw->has_link_events) { | 303 | if (netif_running(adapter->netdev) && ahw->has_link_events) { |
| 303 | reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); | 304 | if (ahw->linkup) { |
| 304 | speed = P3P_LINK_SPEED_VAL(pcifn, reg); | 305 | reg = QLCRD32(adapter, |
| 305 | ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; | 306 | P3P_LINK_SPEED_REG(pcifn), &err); |
| 307 | speed = P3P_LINK_SPEED_VAL(pcifn, reg); | ||
| 308 | ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; | ||
| 309 | } | ||
| 310 | |||
| 306 | ethtool_cmd_speed_set(ecmd, ahw->link_speed); | 311 | ethtool_cmd_speed_set(ecmd, ahw->link_speed); |
| 307 | ecmd->autoneg = ahw->link_autoneg; | 312 | ecmd->autoneg = ahw->link_autoneg; |
| 308 | ecmd->duplex = ahw->link_duplex; | 313 | ecmd->duplex = ahw->link_duplex; |
| @@ -463,13 +468,14 @@ static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
| 463 | static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter, | 468 | static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter, |
| 464 | u32 *regs_buff) | 469 | u32 *regs_buff) |
| 465 | { | 470 | { |
| 466 | int i, j = 0; | 471 | int i, j = 0, err = 0; |
| 467 | 472 | ||
| 468 | for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++) | 473 | for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++) |
| 469 | regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]); | 474 | regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]); |
| 470 | j = 0; | 475 | j = 0; |
| 471 | while (ext_diag_registers[j] != -1) | 476 | while (ext_diag_registers[j] != -1) |
| 472 | regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++]); | 477 | regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++], |
| 478 | &err); | ||
| 473 | return i; | 479 | return i; |
| 474 | } | 480 | } |
| 475 | 481 | ||
| @@ -519,13 +525,16 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) | |||
| 519 | static u32 qlcnic_test_link(struct net_device *dev) | 525 | static u32 qlcnic_test_link(struct net_device *dev) |
| 520 | { | 526 | { |
| 521 | struct qlcnic_adapter *adapter = netdev_priv(dev); | 527 | struct qlcnic_adapter *adapter = netdev_priv(dev); |
| 528 | int err = 0; | ||
| 522 | u32 val; | 529 | u32 val; |
| 523 | 530 | ||
| 524 | if (qlcnic_83xx_check(adapter)) { | 531 | if (qlcnic_83xx_check(adapter)) { |
| 525 | val = qlcnic_83xx_test_link(adapter); | 532 | val = qlcnic_83xx_test_link(adapter); |
| 526 | return (val & 1) ? 0 : 1; | 533 | return (val & 1) ? 0 : 1; |
| 527 | } | 534 | } |
| 528 | val = QLCRD32(adapter, CRB_XG_STATE_P3P); | 535 | val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err); |
| 536 | if (err == -EIO) | ||
| 537 | return err; | ||
| 529 | val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val); | 538 | val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val); |
| 530 | return (val == XG_LINK_UP_P3P) ? 0 : 1; | 539 | return (val == XG_LINK_UP_P3P) ? 0 : 1; |
| 531 | } | 540 | } |
| @@ -658,6 +667,7 @@ qlcnic_get_pauseparam(struct net_device *netdev, | |||
| 658 | { | 667 | { |
| 659 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 668 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
| 660 | int port = adapter->ahw->physical_port; | 669 | int port = adapter->ahw->physical_port; |
| 670 | int err = 0; | ||
| 661 | __u32 val; | 671 | __u32 val; |
| 662 | 672 | ||
| 663 | if (qlcnic_83xx_check(adapter)) { | 673 | if (qlcnic_83xx_check(adapter)) { |
| @@ -668,9 +678,13 @@ qlcnic_get_pauseparam(struct net_device *netdev, | |||
| 668 | if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) | 678 | if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) |
| 669 | return; | 679 | return; |
| 670 | /* get flow control settings */ | 680 | /* get flow control settings */ |
| 671 | val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); | 681 | val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err); |
| 682 | if (err == -EIO) | ||
| 683 | return; | ||
| 672 | pause->rx_pause = qlcnic_gb_get_rx_flowctl(val); | 684 | pause->rx_pause = qlcnic_gb_get_rx_flowctl(val); |
| 673 | val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); | 685 | val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err); |
| 686 | if (err == -EIO) | ||
| 687 | return; | ||
| 674 | switch (port) { | 688 | switch (port) { |
| 675 | case 0: | 689 | case 0: |
| 676 | pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val)); | 690 | pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val)); |
| @@ -690,7 +704,9 @@ qlcnic_get_pauseparam(struct net_device *netdev, | |||
| 690 | if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) | 704 | if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) |
| 691 | return; | 705 | return; |
| 692 | pause->rx_pause = 1; | 706 | pause->rx_pause = 1; |
| 693 | val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); | 707 | val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err); |
| 708 | if (err == -EIO) | ||
| 709 | return; | ||
| 694 | if (port == 0) | 710 | if (port == 0) |
| 695 | pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val)); | 711 | pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val)); |
| 696 | else | 712 | else |
| @@ -707,6 +723,7 @@ qlcnic_set_pauseparam(struct net_device *netdev, | |||
| 707 | { | 723 | { |
| 708 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 724 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
| 709 | int port = adapter->ahw->physical_port; | 725 | int port = adapter->ahw->physical_port; |
| 726 | int err = 0; | ||
| 710 | __u32 val; | 727 | __u32 val; |
| 711 | 728 | ||
| 712 | if (qlcnic_83xx_check(adapter)) | 729 | if (qlcnic_83xx_check(adapter)) |
| @@ -717,7 +734,9 @@ qlcnic_set_pauseparam(struct net_device *netdev, | |||
| 717 | if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) | 734 | if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) |
| 718 | return -EIO; | 735 | return -EIO; |
| 719 | /* set flow control */ | 736 | /* set flow control */ |
| 720 | val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); | 737 | val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err); |
| 738 | if (err == -EIO) | ||
| 739 | return err; | ||
| 721 | 740 | ||
| 722 | if (pause->rx_pause) | 741 | if (pause->rx_pause) |
| 723 | qlcnic_gb_rx_flowctl(val); | 742 | qlcnic_gb_rx_flowctl(val); |
| @@ -728,7 +747,9 @@ qlcnic_set_pauseparam(struct net_device *netdev, | |||
| 728 | val); | 747 | val); |
| 729 | QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val); | 748 | QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val); |
| 730 | /* set autoneg */ | 749 | /* set autoneg */ |
| 731 | val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); | 750 | val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err); |
| 751 | if (err == -EIO) | ||
| 752 | return err; | ||
| 732 | switch (port) { | 753 | switch (port) { |
| 733 | case 0: | 754 | case 0: |
| 734 | if (pause->tx_pause) | 755 | if (pause->tx_pause) |
| @@ -764,7 +785,9 @@ qlcnic_set_pauseparam(struct net_device *netdev, | |||
| 764 | if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) | 785 | if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) |
| 765 | return -EIO; | 786 | return -EIO; |
| 766 | 787 | ||
| 767 | val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); | 788 | val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err); |
| 789 | if (err == -EIO) | ||
| 790 | return err; | ||
| 768 | if (port == 0) { | 791 | if (port == 0) { |
| 769 | if (pause->tx_pause) | 792 | if (pause->tx_pause) |
| 770 | qlcnic_xg_unset_xg0_mask(val); | 793 | qlcnic_xg_unset_xg0_mask(val); |
| @@ -788,11 +811,14 @@ static int qlcnic_reg_test(struct net_device *dev) | |||
| 788 | { | 811 | { |
| 789 | struct qlcnic_adapter *adapter = netdev_priv(dev); | 812 | struct qlcnic_adapter *adapter = netdev_priv(dev); |
| 790 | u32 data_read; | 813 | u32 data_read; |
| 814 | int err = 0; | ||
| 791 | 815 | ||
| 792 | if (qlcnic_83xx_check(adapter)) | 816 | if (qlcnic_83xx_check(adapter)) |
| 793 | return qlcnic_83xx_reg_test(adapter); | 817 | return qlcnic_83xx_reg_test(adapter); |
| 794 | 818 | ||
| 795 | data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0)); | 819 | data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err); |
| 820 | if (err == -EIO) | ||
| 821 | return err; | ||
| 796 | if ((data_read & 0xffff) != adapter->pdev->vendor) | 822 | if ((data_read & 0xffff) != adapter->pdev->vendor) |
| 797 | return 1; | 823 | return 1; |
| 798 | 824 | ||
| @@ -1026,8 +1052,15 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, | |||
| 1026 | if (data[3]) | 1052 | if (data[3]) |
| 1027 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1053 | eth_test->flags |= ETH_TEST_FL_FAILED; |
| 1028 | 1054 | ||
| 1029 | data[4] = qlcnic_eeprom_test(dev); | 1055 | if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) { |
| 1030 | if (data[4]) | 1056 | data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE); |
| 1057 | if (data[4]) | ||
| 1058 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
| 1059 | eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; | ||
| 1060 | } | ||
| 1061 | |||
| 1062 | data[5] = qlcnic_eeprom_test(dev); | ||
| 1063 | if (data[5]) | ||
| 1031 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1064 | eth_test->flags |= ETH_TEST_FL_FAILED; |
| 1032 | } | 1065 | } |
| 1033 | } | 1066 | } |
| @@ -1257,17 +1290,20 @@ qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
| 1257 | { | 1290 | { |
| 1258 | struct qlcnic_adapter *adapter = netdev_priv(dev); | 1291 | struct qlcnic_adapter *adapter = netdev_priv(dev); |
| 1259 | u32 wol_cfg; | 1292 | u32 wol_cfg; |
| 1293 | int err = 0; | ||
| 1260 | 1294 | ||
| 1261 | if (qlcnic_83xx_check(adapter)) | 1295 | if (qlcnic_83xx_check(adapter)) |
| 1262 | return; | 1296 | return; |
| 1263 | wol->supported = 0; | 1297 | wol->supported = 0; |
| 1264 | wol->wolopts = 0; | 1298 | wol->wolopts = 0; |
| 1265 | 1299 | ||
| 1266 | wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); | 1300 | wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err); |
| 1301 | if (err == -EIO) | ||
| 1302 | return; | ||
| 1267 | if (wol_cfg & (1UL << adapter->portnum)) | 1303 | if (wol_cfg & (1UL << adapter->portnum)) |
| 1268 | wol->supported |= WAKE_MAGIC; | 1304 | wol->supported |= WAKE_MAGIC; |
| 1269 | 1305 | ||
| 1270 | wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); | 1306 | wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err); |
| 1271 | if (wol_cfg & (1UL << adapter->portnum)) | 1307 | if (wol_cfg & (1UL << adapter->portnum)) |
| 1272 | wol->wolopts |= WAKE_MAGIC; | 1308 | wol->wolopts |= WAKE_MAGIC; |
| 1273 | } | 1309 | } |
| @@ -1277,17 +1313,22 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
| 1277 | { | 1313 | { |
| 1278 | struct qlcnic_adapter *adapter = netdev_priv(dev); | 1314 | struct qlcnic_adapter *adapter = netdev_priv(dev); |
| 1279 | u32 wol_cfg; | 1315 | u32 wol_cfg; |
| 1316 | int err = 0; | ||
| 1280 | 1317 | ||
| 1281 | if (qlcnic_83xx_check(adapter)) | 1318 | if (qlcnic_83xx_check(adapter)) |
| 1282 | return -EOPNOTSUPP; | 1319 | return -EOPNOTSUPP; |
| 1283 | if (wol->wolopts & ~WAKE_MAGIC) | 1320 | if (wol->wolopts & ~WAKE_MAGIC) |
| 1284 | return -EINVAL; | 1321 | return -EINVAL; |
| 1285 | 1322 | ||
| 1286 | wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); | 1323 | wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err); |
| 1324 | if (err == -EIO) | ||
| 1325 | return err; | ||
| 1287 | if (!(wol_cfg & (1 << adapter->portnum))) | 1326 | if (!(wol_cfg & (1 << adapter->portnum))) |
| 1288 | return -EOPNOTSUPP; | 1327 | return -EOPNOTSUPP; |
| 1289 | 1328 | ||
| 1290 | wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); | 1329 | wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err); |
| 1330 | if (err == -EIO) | ||
| 1331 | return err; | ||
| 1291 | if (wol->wolopts & WAKE_MAGIC) | 1332 | if (wol->wolopts & WAKE_MAGIC) |
| 1292 | wol_cfg |= 1UL << adapter->portnum; | 1333 | wol_cfg |= 1UL << adapter->portnum; |
| 1293 | else | 1334 | else |
| @@ -1540,7 +1581,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) | |||
| 1540 | return 0; | 1581 | return 0; |
| 1541 | case QLCNIC_SET_QUIESCENT: | 1582 | case QLCNIC_SET_QUIESCENT: |
| 1542 | case QLCNIC_RESET_QUIESCENT: | 1583 | case QLCNIC_RESET_QUIESCENT: |
| 1543 | state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); | 1584 | state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); |
| 1544 | if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) | 1585 | if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) |
| 1545 | netdev_info(netdev, "Device in FAILED state\n"); | 1586 | netdev_info(netdev, "Device in FAILED state\n"); |
| 1546 | return 0; | 1587 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 5b5d2edf125d..4d5f59b2d153 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | |||
| @@ -317,16 +317,20 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data) | |||
| 317 | int | 317 | int |
| 318 | qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) | 318 | qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) |
| 319 | { | 319 | { |
| 320 | int done = 0, timeout = 0; | 320 | int timeout = 0; |
| 321 | int err = 0; | ||
| 322 | u32 done = 0; | ||
| 321 | 323 | ||
| 322 | while (!done) { | 324 | while (!done) { |
| 323 | done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem))); | 325 | done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)), |
| 326 | &err); | ||
| 324 | if (done == 1) | 327 | if (done == 1) |
| 325 | break; | 328 | break; |
| 326 | if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { | 329 | if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { |
| 327 | dev_err(&adapter->pdev->dev, | 330 | dev_err(&adapter->pdev->dev, |
| 328 | "Failed to acquire sem=%d lock; holdby=%d\n", | 331 | "Failed to acquire sem=%d lock; holdby=%d\n", |
| 329 | sem, id_reg ? QLCRD32(adapter, id_reg) : -1); | 332 | sem, |
| 333 | id_reg ? QLCRD32(adapter, id_reg, &err) : -1); | ||
| 330 | return -EIO; | 334 | return -EIO; |
| 331 | } | 335 | } |
| 332 | msleep(1); | 336 | msleep(1); |
| @@ -341,19 +345,22 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) | |||
| 341 | void | 345 | void |
| 342 | qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem) | 346 | qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem) |
| 343 | { | 347 | { |
| 344 | QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem))); | 348 | int err = 0; |
| 349 | |||
| 350 | QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)), &err); | ||
| 345 | } | 351 | } |
| 346 | 352 | ||
| 347 | int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr) | 353 | int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr) |
| 348 | { | 354 | { |
| 355 | int err = 0; | ||
| 349 | u32 data; | 356 | u32 data; |
| 350 | 357 | ||
| 351 | if (qlcnic_82xx_check(adapter)) | 358 | if (qlcnic_82xx_check(adapter)) |
| 352 | qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data); | 359 | qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data); |
| 353 | else { | 360 | else { |
| 354 | data = qlcnic_83xx_rd_reg_indirect(adapter, addr); | 361 | data = QLCRD32(adapter, addr, &err); |
| 355 | if (data == -EIO) | 362 | if (err == -EIO) |
| 356 | return -EIO; | 363 | return err; |
| 357 | } | 364 | } |
| 358 | return data; | 365 | return data; |
| 359 | } | 366 | } |
| @@ -516,20 +523,18 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan) | |||
| 516 | if (netdev->flags & IFF_PROMISC) { | 523 | if (netdev->flags & IFF_PROMISC) { |
| 517 | if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) | 524 | if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) |
| 518 | mode = VPORT_MISS_MODE_ACCEPT_ALL; | 525 | mode = VPORT_MISS_MODE_ACCEPT_ALL; |
| 519 | } else if (netdev->flags & IFF_ALLMULTI) { | 526 | } else if ((netdev->flags & IFF_ALLMULTI) || |
| 520 | if (netdev_mc_count(netdev) > ahw->max_mc_count) { | 527 | (netdev_mc_count(netdev) > ahw->max_mc_count)) { |
| 521 | mode = VPORT_MISS_MODE_ACCEPT_MULTI; | 528 | mode = VPORT_MISS_MODE_ACCEPT_MULTI; |
| 522 | } else if (!netdev_mc_empty(netdev) && | 529 | } else if (!netdev_mc_empty(netdev) && |
| 523 | !qlcnic_sriov_vf_check(adapter)) { | 530 | !qlcnic_sriov_vf_check(adapter)) { |
| 524 | netdev_for_each_mc_addr(ha, netdev) | 531 | netdev_for_each_mc_addr(ha, netdev) |
| 525 | qlcnic_nic_add_mac(adapter, ha->addr, | 532 | qlcnic_nic_add_mac(adapter, ha->addr, vlan); |
| 526 | vlan); | ||
| 527 | } | ||
| 528 | if (mode != VPORT_MISS_MODE_ACCEPT_MULTI && | ||
| 529 | qlcnic_sriov_vf_check(adapter)) | ||
| 530 | qlcnic_vf_add_mc_list(netdev, vlan); | ||
| 531 | } | 533 | } |
| 532 | 534 | ||
| 535 | if (qlcnic_sriov_vf_check(adapter)) | ||
| 536 | qlcnic_vf_add_mc_list(netdev, vlan); | ||
| 537 | |||
| 533 | /* configure unicast MAC address, if there is not sufficient space | 538 | /* configure unicast MAC address, if there is not sufficient space |
| 534 | * to store all the unicast addresses then enable promiscuous mode | 539 | * to store all the unicast addresses then enable promiscuous mode |
| 535 | */ | 540 | */ |
| @@ -1161,7 +1166,8 @@ int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, | |||
| 1161 | return -EIO; | 1166 | return -EIO; |
| 1162 | } | 1167 | } |
| 1163 | 1168 | ||
| 1164 | int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) | 1169 | int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off, |
| 1170 | int *err) | ||
| 1165 | { | 1171 | { |
| 1166 | unsigned long flags; | 1172 | unsigned long flags; |
| 1167 | int rv; | 1173 | int rv; |
| @@ -1417,7 +1423,7 @@ int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) | |||
| 1417 | 1423 | ||
| 1418 | int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter) | 1424 | int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter) |
| 1419 | { | 1425 | { |
| 1420 | int offset, board_type, magic; | 1426 | int offset, board_type, magic, err = 0; |
| 1421 | struct pci_dev *pdev = adapter->pdev; | 1427 | struct pci_dev *pdev = adapter->pdev; |
| 1422 | 1428 | ||
| 1423 | offset = QLCNIC_FW_MAGIC_OFFSET; | 1429 | offset = QLCNIC_FW_MAGIC_OFFSET; |
| @@ -1437,7 +1443,9 @@ int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter) | |||
| 1437 | adapter->ahw->board_type = board_type; | 1443 | adapter->ahw->board_type = board_type; |
| 1438 | 1444 | ||
| 1439 | if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { | 1445 | if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { |
| 1440 | u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I); | 1446 | u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I, &err); |
| 1447 | if (err == -EIO) | ||
| 1448 | return err; | ||
| 1441 | if ((gpio & 0x8000) == 0) | 1449 | if ((gpio & 0x8000) == 0) |
| 1442 | board_type = QLCNIC_BRDTYPE_P3P_10G_TP; | 1450 | board_type = QLCNIC_BRDTYPE_P3P_10G_TP; |
| 1443 | } | 1451 | } |
| @@ -1477,10 +1485,13 @@ int | |||
| 1477 | qlcnic_wol_supported(struct qlcnic_adapter *adapter) | 1485 | qlcnic_wol_supported(struct qlcnic_adapter *adapter) |
| 1478 | { | 1486 | { |
| 1479 | u32 wol_cfg; | 1487 | u32 wol_cfg; |
| 1488 | int err = 0; | ||
| 1480 | 1489 | ||
| 1481 | wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); | 1490 | wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err); |
| 1482 | if (wol_cfg & (1UL << adapter->portnum)) { | 1491 | if (wol_cfg & (1UL << adapter->portnum)) { |
| 1483 | wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); | 1492 | wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err); |
| 1493 | if (err == -EIO) | ||
| 1494 | return err; | ||
| 1484 | if (wol_cfg & (1 << adapter->portnum)) | 1495 | if (wol_cfg & (1 << adapter->portnum)) |
| 1485 | return 1; | 1496 | return 1; |
| 1486 | } | 1497 | } |
| @@ -1541,6 +1552,7 @@ void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter) | |||
| 1541 | void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf, | 1552 | void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf, |
| 1542 | loff_t offset, size_t size) | 1553 | loff_t offset, size_t size) |
| 1543 | { | 1554 | { |
| 1555 | int err = 0; | ||
| 1544 | u32 data; | 1556 | u32 data; |
| 1545 | u64 qmdata; | 1557 | u64 qmdata; |
| 1546 | 1558 | ||
| @@ -1548,7 +1560,7 @@ void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf, | |||
| 1548 | qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); | 1560 | qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); |
| 1549 | memcpy(buf, &qmdata, size); | 1561 | memcpy(buf, &qmdata, size); |
| 1550 | } else { | 1562 | } else { |
| 1551 | data = QLCRD32(adapter, offset); | 1563 | data = QLCRD32(adapter, offset, &err); |
| 1552 | memcpy(buf, &data, size); | 1564 | memcpy(buf, &data, size); |
| 1553 | } | 1565 | } |
| 1554 | } | 1566 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 2c22504f57aa..4a71b28effcb 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h | |||
| @@ -154,7 +154,7 @@ struct qlcnic_hardware_context; | |||
| 154 | struct qlcnic_adapter; | 154 | struct qlcnic_adapter; |
| 155 | 155 | ||
| 156 | int qlcnic_82xx_start_firmware(struct qlcnic_adapter *); | 156 | int qlcnic_82xx_start_firmware(struct qlcnic_adapter *); |
| 157 | int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong); | 157 | int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *); |
| 158 | int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32); | 158 | int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32); |
| 159 | int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int); | 159 | int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int); |
| 160 | int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32); | 160 | int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index d28336fc65ab..974d62607e13 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c | |||
| @@ -142,7 +142,7 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter) | |||
| 142 | buffrag->length, PCI_DMA_TODEVICE); | 142 | buffrag->length, PCI_DMA_TODEVICE); |
| 143 | buffrag->dma = 0ULL; | 143 | buffrag->dma = 0ULL; |
| 144 | } | 144 | } |
| 145 | for (j = 0; j < cmd_buf->frag_count; j++) { | 145 | for (j = 1; j < cmd_buf->frag_count; j++) { |
| 146 | buffrag++; | 146 | buffrag++; |
| 147 | if (buffrag->dma) { | 147 | if (buffrag->dma) { |
| 148 | pci_unmap_page(adapter->pdev, buffrag->dma, | 148 | pci_unmap_page(adapter->pdev, buffrag->dma, |
| @@ -286,10 +286,11 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter) | |||
| 286 | { | 286 | { |
| 287 | long timeout = 0; | 287 | long timeout = 0; |
| 288 | long done = 0; | 288 | long done = 0; |
| 289 | int err = 0; | ||
| 289 | 290 | ||
| 290 | cond_resched(); | 291 | cond_resched(); |
| 291 | while (done == 0) { | 292 | while (done == 0) { |
| 292 | done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS); | 293 | done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err); |
| 293 | done &= 2; | 294 | done &= 2; |
| 294 | if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) { | 295 | if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) { |
| 295 | dev_err(&adapter->pdev->dev, | 296 | dev_err(&adapter->pdev->dev, |
| @@ -304,6 +305,8 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter) | |||
| 304 | static int do_rom_fast_read(struct qlcnic_adapter *adapter, | 305 | static int do_rom_fast_read(struct qlcnic_adapter *adapter, |
| 305 | u32 addr, u32 *valp) | 306 | u32 addr, u32 *valp) |
| 306 | { | 307 | { |
| 308 | int err = 0; | ||
| 309 | |||
| 307 | QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr); | 310 | QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr); |
| 308 | QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); | 311 | QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); |
| 309 | QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3); | 312 | QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3); |
| @@ -317,7 +320,9 @@ static int do_rom_fast_read(struct qlcnic_adapter *adapter, | |||
| 317 | udelay(10); | 320 | udelay(10); |
| 318 | QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); | 321 | QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); |
| 319 | 322 | ||
| 320 | *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA); | 323 | *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA, &err); |
| 324 | if (err == -EIO) | ||
| 325 | return err; | ||
| 321 | return 0; | 326 | return 0; |
| 322 | } | 327 | } |
| 323 | 328 | ||
| @@ -369,11 +374,11 @@ int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp) | |||
| 369 | 374 | ||
| 370 | int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) | 375 | int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) |
| 371 | { | 376 | { |
| 372 | int addr, val; | 377 | int addr, err = 0; |
| 373 | int i, n, init_delay; | 378 | int i, n, init_delay; |
| 374 | struct crb_addr_pair *buf; | 379 | struct crb_addr_pair *buf; |
| 375 | unsigned offset; | 380 | unsigned offset; |
| 376 | u32 off; | 381 | u32 off, val; |
| 377 | struct pci_dev *pdev = adapter->pdev; | 382 | struct pci_dev *pdev = adapter->pdev; |
| 378 | 383 | ||
| 379 | QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0); | 384 | QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0); |
| @@ -402,7 +407,9 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) | |||
| 402 | QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00); | 407 | QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00); |
| 403 | 408 | ||
| 404 | /* halt sre */ | 409 | /* halt sre */ |
| 405 | val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000); | 410 | val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000, &err); |
| 411 | if (err == -EIO) | ||
| 412 | return err; | ||
| 406 | QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1))); | 413 | QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1))); |
| 407 | 414 | ||
| 408 | /* halt epg */ | 415 | /* halt epg */ |
| @@ -719,10 +726,12 @@ qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) | |||
| 719 | static int | 726 | static int |
| 720 | qlcnic_has_mn(struct qlcnic_adapter *adapter) | 727 | qlcnic_has_mn(struct qlcnic_adapter *adapter) |
| 721 | { | 728 | { |
| 722 | u32 capability; | 729 | u32 capability = 0; |
| 723 | capability = 0; | 730 | int err = 0; |
| 724 | 731 | ||
| 725 | capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY); | 732 | capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY, &err); |
| 733 | if (err == -EIO) | ||
| 734 | return err; | ||
| 726 | if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) | 735 | if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) |
| 727 | return 1; | 736 | return 1; |
| 728 | 737 | ||
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index d3f8797efcc3..6946d354f44f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
| @@ -161,36 +161,68 @@ static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data) | |||
| 161 | return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0; | 161 | return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0; |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter, | ||
| 165 | struct qlcnic_filter *fil, | ||
| 166 | void *addr, u16 vlan_id) | ||
| 167 | { | ||
| 168 | int ret; | ||
| 169 | u8 op; | ||
| 170 | |||
| 171 | op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; | ||
| 172 | ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op); | ||
| 173 | if (ret) | ||
| 174 | return; | ||
| 175 | |||
| 176 | op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; | ||
| 177 | ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op); | ||
| 178 | if (!ret) { | ||
| 179 | hlist_del(&fil->fnode); | ||
| 180 | adapter->rx_fhash.fnum--; | ||
| 181 | } | ||
| 182 | } | ||
| 183 | |||
| 184 | static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head, | ||
| 185 | void *addr, u16 vlan_id) | ||
| 186 | { | ||
| 187 | struct qlcnic_filter *tmp_fil = NULL; | ||
| 188 | struct hlist_node *n; | ||
| 189 | |||
| 190 | hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { | ||
| 191 | if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) && | ||
| 192 | tmp_fil->vlan_id == vlan_id) | ||
| 193 | return tmp_fil; | ||
| 194 | } | ||
| 195 | |||
| 196 | return NULL; | ||
| 197 | } | ||
| 198 | |||
| 164 | void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb, | 199 | void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb, |
| 165 | int loopback_pkt, u16 vlan_id) | 200 | int loopback_pkt, u16 vlan_id) |
| 166 | { | 201 | { |
| 167 | struct ethhdr *phdr = (struct ethhdr *)(skb->data); | 202 | struct ethhdr *phdr = (struct ethhdr *)(skb->data); |
| 168 | struct qlcnic_filter *fil, *tmp_fil; | 203 | struct qlcnic_filter *fil, *tmp_fil; |
| 169 | struct hlist_node *n; | ||
| 170 | struct hlist_head *head; | 204 | struct hlist_head *head; |
| 171 | unsigned long time; | 205 | unsigned long time; |
| 172 | u64 src_addr = 0; | 206 | u64 src_addr = 0; |
| 173 | u8 hindex, found = 0, op; | 207 | u8 hindex, op; |
| 174 | int ret; | 208 | int ret; |
| 175 | 209 | ||
| 176 | memcpy(&src_addr, phdr->h_source, ETH_ALEN); | 210 | memcpy(&src_addr, phdr->h_source, ETH_ALEN); |
| 211 | hindex = qlcnic_mac_hash(src_addr) & | ||
| 212 | (adapter->fhash.fbucket_size - 1); | ||
| 177 | 213 | ||
| 178 | if (loopback_pkt) { | 214 | if (loopback_pkt) { |
| 179 | if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax) | 215 | if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax) |
| 180 | return; | 216 | return; |
| 181 | 217 | ||
| 182 | hindex = qlcnic_mac_hash(src_addr) & | ||
| 183 | (adapter->fhash.fbucket_size - 1); | ||
| 184 | head = &(adapter->rx_fhash.fhead[hindex]); | 218 | head = &(adapter->rx_fhash.fhead[hindex]); |
| 185 | 219 | ||
| 186 | hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { | 220 | tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id); |
| 187 | if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && | 221 | if (tmp_fil) { |
| 188 | tmp_fil->vlan_id == vlan_id) { | 222 | time = tmp_fil->ftime; |
| 189 | time = tmp_fil->ftime; | 223 | if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time)) |
| 190 | if (jiffies > (QLCNIC_READD_AGE * HZ + time)) | 224 | tmp_fil->ftime = jiffies; |
| 191 | tmp_fil->ftime = jiffies; | 225 | return; |
| 192 | return; | ||
| 193 | } | ||
| 194 | } | 226 | } |
| 195 | 227 | ||
| 196 | fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); | 228 | fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); |
| @@ -205,36 +237,37 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb, | |||
| 205 | adapter->rx_fhash.fnum++; | 237 | adapter->rx_fhash.fnum++; |
| 206 | spin_unlock(&adapter->rx_mac_learn_lock); | 238 | spin_unlock(&adapter->rx_mac_learn_lock); |
| 207 | } else { | 239 | } else { |
| 208 | hindex = qlcnic_mac_hash(src_addr) & | 240 | head = &adapter->fhash.fhead[hindex]; |
| 209 | (adapter->fhash.fbucket_size - 1); | ||
| 210 | head = &(adapter->rx_fhash.fhead[hindex]); | ||
| 211 | spin_lock(&adapter->rx_mac_learn_lock); | ||
| 212 | hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { | ||
| 213 | if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && | ||
| 214 | tmp_fil->vlan_id == vlan_id) { | ||
| 215 | found = 1; | ||
| 216 | break; | ||
| 217 | } | ||
| 218 | } | ||
| 219 | 241 | ||
| 220 | if (!found) { | 242 | spin_lock(&adapter->mac_learn_lock); |
| 221 | spin_unlock(&adapter->rx_mac_learn_lock); | ||
| 222 | return; | ||
| 223 | } | ||
| 224 | 243 | ||
| 225 | op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; | 244 | tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id); |
| 226 | ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr, | 245 | if (tmp_fil) { |
| 227 | vlan_id, op); | ||
| 228 | if (!ret) { | ||
| 229 | op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; | 246 | op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; |
| 230 | ret = qlcnic_sre_macaddr_change(adapter, | 247 | ret = qlcnic_sre_macaddr_change(adapter, |
| 231 | (u8 *)&src_addr, | 248 | (u8 *)&src_addr, |
| 232 | vlan_id, op); | 249 | vlan_id, op); |
| 233 | if (!ret) { | 250 | if (!ret) { |
| 234 | hlist_del(&(tmp_fil->fnode)); | 251 | hlist_del(&tmp_fil->fnode); |
| 235 | adapter->rx_fhash.fnum--; | 252 | adapter->fhash.fnum--; |
| 236 | } | 253 | } |
| 254 | |||
| 255 | spin_unlock(&adapter->mac_learn_lock); | ||
| 256 | |||
| 257 | return; | ||
| 237 | } | 258 | } |
| 259 | |||
| 260 | spin_unlock(&adapter->mac_learn_lock); | ||
| 261 | |||
| 262 | head = &adapter->rx_fhash.fhead[hindex]; | ||
| 263 | |||
| 264 | spin_lock(&adapter->rx_mac_learn_lock); | ||
| 265 | |||
| 266 | tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id); | ||
| 267 | if (tmp_fil) | ||
| 268 | qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr, | ||
| 269 | vlan_id); | ||
| 270 | |||
| 238 | spin_unlock(&adapter->rx_mac_learn_lock); | 271 | spin_unlock(&adapter->rx_mac_learn_lock); |
| 239 | } | 272 | } |
| 240 | } | 273 | } |
| @@ -262,7 +295,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, | |||
| 262 | 295 | ||
| 263 | mac_req = (struct qlcnic_mac_req *)&(req->words[0]); | 296 | mac_req = (struct qlcnic_mac_req *)&(req->words[0]); |
| 264 | mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; | 297 | mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; |
| 265 | memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN); | 298 | memcpy(mac_req->mac_addr, uaddr, ETH_ALEN); |
| 266 | 299 | ||
| 267 | vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; | 300 | vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; |
| 268 | vlan_req->vlan_id = cpu_to_le16(vlan_id); | 301 | vlan_req->vlan_id = cpu_to_le16(vlan_id); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 4528f8ec333b..bc05d016c859 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
| @@ -977,8 +977,8 @@ qlcnic_check_options(struct qlcnic_adapter *adapter) | |||
| 977 | static int | 977 | static int |
| 978 | qlcnic_initialize_nic(struct qlcnic_adapter *adapter) | 978 | qlcnic_initialize_nic(struct qlcnic_adapter *adapter) |
| 979 | { | 979 | { |
| 980 | int err; | ||
| 981 | struct qlcnic_info nic_info; | 980 | struct qlcnic_info nic_info; |
| 981 | int err = 0; | ||
| 982 | 982 | ||
| 983 | memset(&nic_info, 0, sizeof(struct qlcnic_info)); | 983 | memset(&nic_info, 0, sizeof(struct qlcnic_info)); |
| 984 | err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func); | 984 | err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func); |
| @@ -993,7 +993,9 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter) | |||
| 993 | 993 | ||
| 994 | if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { | 994 | if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { |
| 995 | u32 temp; | 995 | u32 temp; |
| 996 | temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2); | 996 | temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err); |
| 997 | if (err == -EIO) | ||
| 998 | return err; | ||
| 997 | adapter->ahw->extra_capability[0] = temp; | 999 | adapter->ahw->extra_capability[0] = temp; |
| 998 | } | 1000 | } |
| 999 | adapter->ahw->max_mac_filters = nic_info.max_mac_filters; | 1001 | adapter->ahw->max_mac_filters = nic_info.max_mac_filters; |
| @@ -1383,6 +1385,8 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) | |||
| 1383 | if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { | 1385 | if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { |
| 1384 | if (qlcnic_82xx_check(adapter)) | 1386 | if (qlcnic_82xx_check(adapter)) |
| 1385 | handler = qlcnic_tmp_intr; | 1387 | handler = qlcnic_tmp_intr; |
| 1388 | else | ||
| 1389 | handler = qlcnic_83xx_tmp_intr; | ||
| 1386 | if (!QLCNIC_IS_MSI_FAMILY(adapter)) | 1390 | if (!QLCNIC_IS_MSI_FAMILY(adapter)) |
| 1387 | flags |= IRQF_SHARED; | 1391 | flags |= IRQF_SHARED; |
| 1388 | 1392 | ||
| @@ -1531,12 +1535,12 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) | |||
| 1531 | if (netdev->features & NETIF_F_LRO) | 1535 | if (netdev->features & NETIF_F_LRO) |
| 1532 | qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED); | 1536 | qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED); |
| 1533 | 1537 | ||
| 1538 | set_bit(__QLCNIC_DEV_UP, &adapter->state); | ||
| 1534 | qlcnic_napi_enable(adapter); | 1539 | qlcnic_napi_enable(adapter); |
| 1535 | 1540 | ||
| 1536 | qlcnic_linkevent_request(adapter, 1); | 1541 | qlcnic_linkevent_request(adapter, 1); |
| 1537 | 1542 | ||
| 1538 | adapter->ahw->reset_context = 0; | 1543 | adapter->ahw->reset_context = 0; |
| 1539 | set_bit(__QLCNIC_DEV_UP, &adapter->state); | ||
| 1540 | return 0; | 1544 | return 0; |
| 1541 | } | 1545 | } |
| 1542 | 1546 | ||
| @@ -2139,7 +2143,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2139 | if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x && | 2143 | if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x && |
| 2140 | !!qlcnic_use_msi) | 2144 | !!qlcnic_use_msi) |
| 2141 | dev_warn(&pdev->dev, | 2145 | dev_warn(&pdev->dev, |
| 2142 | "83xx adapter do not support MSI interrupts\n"); | 2146 | "Device does not support MSI interrupts\n"); |
| 2143 | 2147 | ||
| 2144 | err = qlcnic_setup_intr(adapter, 0); | 2148 | err = qlcnic_setup_intr(adapter, 0); |
| 2145 | if (err) { | 2149 | if (err) { |
| @@ -2161,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2161 | if (err) | 2165 | if (err) |
| 2162 | goto err_out_disable_mbx_intr; | 2166 | goto err_out_disable_mbx_intr; |
| 2163 | 2167 | ||
| 2164 | qlcnic_set_drv_version(adapter); | 2168 | if (adapter->portnum == 0) |
| 2169 | qlcnic_set_drv_version(adapter); | ||
| 2165 | 2170 | ||
| 2166 | pci_set_drvdata(pdev, adapter); | 2171 | pci_set_drvdata(pdev, adapter); |
| 2167 | 2172 | ||
| @@ -3081,7 +3086,8 @@ done: | |||
| 3081 | adapter->fw_fail_cnt = 0; | 3086 | adapter->fw_fail_cnt = 0; |
| 3082 | adapter->flags &= ~QLCNIC_FW_HANG; | 3087 | adapter->flags &= ~QLCNIC_FW_HANG; |
| 3083 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | 3088 | clear_bit(__QLCNIC_RESETTING, &adapter->state); |
| 3084 | qlcnic_set_drv_version(adapter); | 3089 | if (adapter->portnum == 0) |
| 3090 | qlcnic_set_drv_version(adapter); | ||
| 3085 | 3091 | ||
| 3086 | if (!qlcnic_clr_drv_state(adapter)) | 3092 | if (!qlcnic_clr_drv_state(adapter)) |
| 3087 | qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, | 3093 | qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, |
| @@ -3093,6 +3099,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter) | |||
| 3093 | { | 3099 | { |
| 3094 | u32 state = 0, heartbeat; | 3100 | u32 state = 0, heartbeat; |
| 3095 | u32 peg_status; | 3101 | u32 peg_status; |
| 3102 | int err = 0; | ||
| 3096 | 3103 | ||
| 3097 | if (qlcnic_check_temp(adapter)) | 3104 | if (qlcnic_check_temp(adapter)) |
| 3098 | goto detach; | 3105 | goto detach; |
| @@ -3139,11 +3146,11 @@ qlcnic_check_health(struct qlcnic_adapter *adapter) | |||
| 3139 | "PEG_NET_4_PC: 0x%x\n", | 3146 | "PEG_NET_4_PC: 0x%x\n", |
| 3140 | peg_status, | 3147 | peg_status, |
| 3141 | QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2), | 3148 | QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2), |
| 3142 | QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c), | 3149 | QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err), |
| 3143 | QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c), | 3150 | QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err), |
| 3144 | QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c), | 3151 | QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err), |
| 3145 | QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c), | 3152 | QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err), |
| 3146 | QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c)); | 3153 | QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err)); |
| 3147 | if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) | 3154 | if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) |
| 3148 | dev_err(&adapter->pdev->dev, | 3155 | dev_err(&adapter->pdev->dev, |
| 3149 | "Firmware aborted with error code 0x00006700. " | 3156 | "Firmware aborted with error code 0x00006700. " |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index ab8a6744d402..79e54efe07b9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c | |||
| @@ -1084,7 +1084,7 @@ flash_temp: | |||
| 1084 | tmpl_hdr = ahw->fw_dump.tmpl_hdr; | 1084 | tmpl_hdr = ahw->fw_dump.tmpl_hdr; |
| 1085 | tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; | 1085 | tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; |
| 1086 | 1086 | ||
| 1087 | if ((tmpl_hdr->version & 0xffffff) >= 0x20001) | 1087 | if ((tmpl_hdr->version & 0xfffff) >= 0x20001) |
| 1088 | ahw->fw_dump.use_pex_dma = true; | 1088 | ahw->fw_dump.use_pex_dma = true; |
| 1089 | else | 1089 | else |
| 1090 | ahw->fw_dump.use_pex_dma = false; | 1090 | ahw->fw_dump.use_pex_dma = false; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 62380ce89905..5d40045b3cea 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | |||
| @@ -562,7 +562,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, | |||
| 562 | INIT_LIST_HEAD(&adapter->vf_mc_list); | 562 | INIT_LIST_HEAD(&adapter->vf_mc_list); |
| 563 | if (!qlcnic_use_msi_x && !!qlcnic_use_msi) | 563 | if (!qlcnic_use_msi_x && !!qlcnic_use_msi) |
| 564 | dev_warn(&adapter->pdev->dev, | 564 | dev_warn(&adapter->pdev->dev, |
| 565 | "83xx adapter do not support MSI interrupts\n"); | 565 | "Device does not support MSI interrupts\n"); |
| 566 | 566 | ||
| 567 | err = qlcnic_setup_intr(adapter, 1); | 567 | err = qlcnic_setup_intr(adapter, 1); |
| 568 | if (err) { | 568 | if (err) { |
| @@ -762,6 +762,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type) | |||
| 762 | memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); | 762 | memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); |
| 763 | mbx->req.arg[0] = (type | (mbx->req.num << 16) | | 763 | mbx->req.arg[0] = (type | (mbx->req.num << 16) | |
| 764 | (3 << 29)); | 764 | (3 << 29)); |
| 765 | mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16; | ||
| 765 | return 0; | 766 | return 0; |
| 766 | } | 767 | } |
| 767 | } | 768 | } |
| @@ -813,6 +814,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans, | |||
| 813 | cmd->req.num = trans->req_pay_size / 4; | 814 | cmd->req.num = trans->req_pay_size / 4; |
| 814 | cmd->rsp.num = trans->rsp_pay_size / 4; | 815 | cmd->rsp.num = trans->rsp_pay_size / 4; |
| 815 | hdr = trans->rsp_hdr; | 816 | hdr = trans->rsp_hdr; |
| 817 | cmd->op_type = trans->req_hdr->op_type; | ||
| 816 | } | 818 | } |
| 817 | 819 | ||
| 818 | trans->trans_id = seq; | 820 | trans->trans_id = seq; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index ee0c1d307966..eb49cd65378c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | |||
| @@ -635,12 +635,12 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans, | |||
| 635 | struct qlcnic_cmd_args *cmd) | 635 | struct qlcnic_cmd_args *cmd) |
| 636 | { | 636 | { |
| 637 | struct qlcnic_vf_info *vf = trans->vf; | 637 | struct qlcnic_vf_info *vf = trans->vf; |
| 638 | struct qlcnic_adapter *adapter = vf->adapter; | 638 | struct qlcnic_vport *vp = vf->vp; |
| 639 | int err; | 639 | struct qlcnic_adapter *adapter; |
| 640 | u16 func = vf->pci_func; | 640 | u16 func = vf->pci_func; |
| 641 | int err; | ||
| 641 | 642 | ||
| 642 | cmd->rsp.arg[0] = trans->req_hdr->cmd_op; | 643 | adapter = vf->adapter; |
| 643 | cmd->rsp.arg[0] |= (1 << 16); | ||
| 644 | 644 | ||
| 645 | if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) { | 645 | if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) { |
| 646 | err = qlcnic_sriov_pf_config_vport(adapter, 1, func); | 646 | err = qlcnic_sriov_pf_config_vport(adapter, 1, func); |
| @@ -650,6 +650,8 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans, | |||
| 650 | qlcnic_sriov_pf_config_vport(adapter, 0, func); | 650 | qlcnic_sriov_pf_config_vport(adapter, 0, func); |
| 651 | } | 651 | } |
| 652 | } else { | 652 | } else { |
| 653 | if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) | ||
| 654 | vp->vlan = 0; | ||
| 653 | err = qlcnic_sriov_pf_config_vport(adapter, 0, func); | 655 | err = qlcnic_sriov_pf_config_vport(adapter, 0, func); |
| 654 | } | 656 | } |
| 655 | 657 | ||
| @@ -1183,7 +1185,7 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans, | |||
| 1183 | u8 cmd_op, mode = vp->vlan_mode; | 1185 | u8 cmd_op, mode = vp->vlan_mode; |
| 1184 | 1186 | ||
| 1185 | cmd_op = trans->req_hdr->cmd_op; | 1187 | cmd_op = trans->req_hdr->cmd_op; |
| 1186 | cmd->rsp.arg[0] = (cmd_op & 0xffff) | 14 << 16 | 1 << 25; | 1188 | cmd->rsp.arg[0] |= 1 << 25; |
| 1187 | 1189 | ||
| 1188 | switch (mode) { | 1190 | switch (mode) { |
| 1189 | case QLC_GUEST_VLAN_MODE: | 1191 | case QLC_GUEST_VLAN_MODE: |
| @@ -1561,6 +1563,7 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov, | |||
| 1561 | struct qlcnic_vf_info *vf) | 1563 | struct qlcnic_vf_info *vf) |
| 1562 | { | 1564 | { |
| 1563 | struct net_device *dev = vf->adapter->netdev; | 1565 | struct net_device *dev = vf->adapter->netdev; |
| 1566 | struct qlcnic_vport *vp = vf->vp; | ||
| 1564 | 1567 | ||
| 1565 | if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) { | 1568 | if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) { |
| 1566 | clear_bit(QLC_BC_VF_FLR, &vf->state); | 1569 | clear_bit(QLC_BC_VF_FLR, &vf->state); |
| @@ -1573,6 +1576,9 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov, | |||
| 1573 | return; | 1576 | return; |
| 1574 | } | 1577 | } |
| 1575 | 1578 | ||
| 1579 | if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) | ||
| 1580 | vp->vlan = 0; | ||
| 1581 | |||
| 1576 | qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); | 1582 | qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); |
| 1577 | netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func); | 1583 | netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func); |
| 1578 | } | 1584 | } |
| @@ -1621,13 +1627,15 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) | |||
| 1621 | { | 1627 | { |
| 1622 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 1628 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
| 1623 | struct qlcnic_sriov *sriov = adapter->ahw->sriov; | 1629 | struct qlcnic_sriov *sriov = adapter->ahw->sriov; |
| 1624 | int i, num_vfs = sriov->num_vfs; | 1630 | int i, num_vfs; |
| 1625 | struct qlcnic_vf_info *vf_info; | 1631 | struct qlcnic_vf_info *vf_info; |
| 1626 | u8 *curr_mac; | 1632 | u8 *curr_mac; |
| 1627 | 1633 | ||
| 1628 | if (!qlcnic_sriov_pf_check(adapter)) | 1634 | if (!qlcnic_sriov_pf_check(adapter)) |
| 1629 | return -EOPNOTSUPP; | 1635 | return -EOPNOTSUPP; |
| 1630 | 1636 | ||
| 1637 | num_vfs = sriov->num_vfs; | ||
| 1638 | |||
| 1631 | if (!is_valid_ether_addr(mac) || vf >= num_vfs) | 1639 | if (!is_valid_ether_addr(mac) || vf >= num_vfs) |
| 1632 | return -EINVAL; | 1640 | return -EINVAL; |
| 1633 | 1641 | ||
| @@ -1741,6 +1749,7 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf, | |||
| 1741 | 1749 | ||
| 1742 | switch (vlan) { | 1750 | switch (vlan) { |
| 1743 | case 4095: | 1751 | case 4095: |
| 1752 | vp->vlan = 0; | ||
| 1744 | vp->vlan_mode = QLC_GUEST_VLAN_MODE; | 1753 | vp->vlan_mode = QLC_GUEST_VLAN_MODE; |
| 1745 | break; | 1754 | break; |
| 1746 | case 0: | 1755 | case 0: |
| @@ -1759,6 +1768,29 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf, | |||
| 1759 | return 0; | 1768 | return 0; |
| 1760 | } | 1769 | } |
| 1761 | 1770 | ||
| 1771 | static inline __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter, | ||
| 1772 | struct qlcnic_vport *vp, int vf) | ||
| 1773 | { | ||
| 1774 | __u32 vlan = 0; | ||
| 1775 | |||
| 1776 | switch (vp->vlan_mode) { | ||
| 1777 | case QLC_PVID_MODE: | ||
| 1778 | vlan = vp->vlan; | ||
| 1779 | break; | ||
| 1780 | case QLC_GUEST_VLAN_MODE: | ||
| 1781 | vlan = MAX_VLAN_ID; | ||
| 1782 | break; | ||
| 1783 | case QLC_NO_VLAN_MODE: | ||
| 1784 | vlan = 0; | ||
| 1785 | break; | ||
| 1786 | default: | ||
| 1787 | netdev_info(adapter->netdev, "Invalid VLAN mode = %d for VF %d\n", | ||
| 1788 | vp->vlan_mode, vf); | ||
| 1789 | } | ||
| 1790 | |||
| 1791 | return vlan; | ||
| 1792 | } | ||
| 1793 | |||
| 1762 | int qlcnic_sriov_get_vf_config(struct net_device *netdev, | 1794 | int qlcnic_sriov_get_vf_config(struct net_device *netdev, |
| 1763 | int vf, struct ifla_vf_info *ivi) | 1795 | int vf, struct ifla_vf_info *ivi) |
| 1764 | { | 1796 | { |
| @@ -1774,7 +1806,7 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev, | |||
| 1774 | 1806 | ||
| 1775 | vp = sriov->vf_info[vf].vp; | 1807 | vp = sriov->vf_info[vf].vp; |
| 1776 | memcpy(&ivi->mac, vp->mac, ETH_ALEN); | 1808 | memcpy(&ivi->mac, vp->mac, ETH_ALEN); |
| 1777 | ivi->vlan = vp->vlan; | 1809 | ivi->vlan = qlcnic_sriov_get_vf_vlan(adapter, vp, vf); |
| 1778 | ivi->qos = vp->qos; | 1810 | ivi->qos = vp->qos; |
| 1779 | ivi->spoofchk = vp->spoofchk; | 1811 | ivi->spoofchk = vp->spoofchk; |
| 1780 | if (vp->max_tx_bw == MAX_BW) | 1812 | if (vp->max_tx_bw == MAX_BW) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 10ed82b3baca..660c3f5b2237 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
| @@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, | |||
| 170 | 170 | ||
| 171 | if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { | 171 | if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { |
| 172 | err = qlcnic_get_beacon_state(adapter, &h_beacon_state); | 172 | err = qlcnic_get_beacon_state(adapter, &h_beacon_state); |
| 173 | if (!err) { | 173 | if (err) { |
| 174 | dev_info(&adapter->pdev->dev, | 174 | netdev_err(adapter->netdev, |
| 175 | "Failed to get current beacon state\n"); | 175 | "Failed to get current beacon state\n"); |
| 176 | } else { | 176 | } else { |
| 177 | if (h_beacon_state == QLCNIC_BEACON_DISABLE) | 177 | if (h_beacon_state == QLCNIC_BEACON_DISABLE) |
| 178 | ahw->beacon_state = 0; | 178 | ahw->beacon_state = 0; |
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index e6acb9fa5767..d2e591955bdd 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
| @@ -478,7 +478,7 @@ rx_status_loop: | |||
| 478 | 478 | ||
| 479 | while (1) { | 479 | while (1) { |
| 480 | u32 status, len; | 480 | u32 status, len; |
| 481 | dma_addr_t mapping; | 481 | dma_addr_t mapping, new_mapping; |
| 482 | struct sk_buff *skb, *new_skb; | 482 | struct sk_buff *skb, *new_skb; |
| 483 | struct cp_desc *desc; | 483 | struct cp_desc *desc; |
| 484 | const unsigned buflen = cp->rx_buf_sz; | 484 | const unsigned buflen = cp->rx_buf_sz; |
| @@ -520,6 +520,14 @@ rx_status_loop: | |||
| 520 | goto rx_next; | 520 | goto rx_next; |
| 521 | } | 521 | } |
| 522 | 522 | ||
| 523 | new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, | ||
| 524 | PCI_DMA_FROMDEVICE); | ||
| 525 | if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { | ||
| 526 | dev->stats.rx_dropped++; | ||
| 527 | kfree_skb(new_skb); | ||
| 528 | goto rx_next; | ||
| 529 | } | ||
| 530 | |||
| 523 | dma_unmap_single(&cp->pdev->dev, mapping, | 531 | dma_unmap_single(&cp->pdev->dev, mapping, |
| 524 | buflen, PCI_DMA_FROMDEVICE); | 532 | buflen, PCI_DMA_FROMDEVICE); |
| 525 | 533 | ||
| @@ -531,12 +539,11 @@ rx_status_loop: | |||
| 531 | 539 | ||
| 532 | skb_put(skb, len); | 540 | skb_put(skb, len); |
| 533 | 541 | ||
| 534 | mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, | ||
| 535 | PCI_DMA_FROMDEVICE); | ||
| 536 | cp->rx_skb[rx_tail] = new_skb; | 542 | cp->rx_skb[rx_tail] = new_skb; |
| 537 | 543 | ||
| 538 | cp_rx_skb(cp, skb, desc); | 544 | cp_rx_skb(cp, skb, desc); |
| 539 | rx++; | 545 | rx++; |
| 546 | mapping = new_mapping; | ||
| 540 | 547 | ||
| 541 | rx_next: | 548 | rx_next: |
| 542 | cp->rx_ring[rx_tail].opts2 = 0; | 549 | cp->rx_ring[rx_tail].opts2 = 0; |
| @@ -716,6 +723,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb) | |||
| 716 | TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; | 723 | TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; |
| 717 | } | 724 | } |
| 718 | 725 | ||
| 726 | static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, | ||
| 727 | int first, int entry_last) | ||
| 728 | { | ||
| 729 | int frag, index; | ||
| 730 | struct cp_desc *txd; | ||
| 731 | skb_frag_t *this_frag; | ||
| 732 | for (frag = 0; frag+first < entry_last; frag++) { | ||
| 733 | index = first+frag; | ||
| 734 | cp->tx_skb[index] = NULL; | ||
| 735 | txd = &cp->tx_ring[index]; | ||
| 736 | this_frag = &skb_shinfo(skb)->frags[frag]; | ||
| 737 | dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), | ||
| 738 | skb_frag_size(this_frag), PCI_DMA_TODEVICE); | ||
| 739 | } | ||
| 740 | } | ||
| 741 | |||
| 719 | static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | 742 | static netdev_tx_t cp_start_xmit (struct sk_buff *skb, |
| 720 | struct net_device *dev) | 743 | struct net_device *dev) |
| 721 | { | 744 | { |
| @@ -749,6 +772,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
| 749 | 772 | ||
| 750 | len = skb->len; | 773 | len = skb->len; |
| 751 | mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); | 774 | mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); |
| 775 | if (dma_mapping_error(&cp->pdev->dev, mapping)) | ||
| 776 | goto out_dma_error; | ||
| 777 | |||
| 752 | txd->opts2 = opts2; | 778 | txd->opts2 = opts2; |
| 753 | txd->addr = cpu_to_le64(mapping); | 779 | txd->addr = cpu_to_le64(mapping); |
| 754 | wmb(); | 780 | wmb(); |
| @@ -786,6 +812,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
| 786 | first_len = skb_headlen(skb); | 812 | first_len = skb_headlen(skb); |
| 787 | first_mapping = dma_map_single(&cp->pdev->dev, skb->data, | 813 | first_mapping = dma_map_single(&cp->pdev->dev, skb->data, |
| 788 | first_len, PCI_DMA_TODEVICE); | 814 | first_len, PCI_DMA_TODEVICE); |
| 815 | if (dma_mapping_error(&cp->pdev->dev, first_mapping)) | ||
| 816 | goto out_dma_error; | ||
| 817 | |||
| 789 | cp->tx_skb[entry] = skb; | 818 | cp->tx_skb[entry] = skb; |
| 790 | entry = NEXT_TX(entry); | 819 | entry = NEXT_TX(entry); |
| 791 | 820 | ||
| @@ -799,6 +828,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
| 799 | mapping = dma_map_single(&cp->pdev->dev, | 828 | mapping = dma_map_single(&cp->pdev->dev, |
| 800 | skb_frag_address(this_frag), | 829 | skb_frag_address(this_frag), |
| 801 | len, PCI_DMA_TODEVICE); | 830 | len, PCI_DMA_TODEVICE); |
| 831 | if (dma_mapping_error(&cp->pdev->dev, mapping)) { | ||
| 832 | unwind_tx_frag_mapping(cp, skb, first_entry, entry); | ||
| 833 | goto out_dma_error; | ||
| 834 | } | ||
| 835 | |||
| 802 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; | 836 | eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; |
| 803 | 837 | ||
| 804 | ctrl = eor | len | DescOwn; | 838 | ctrl = eor | len | DescOwn; |
| @@ -859,11 +893,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
| 859 | if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) | 893 | if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) |
| 860 | netif_stop_queue(dev); | 894 | netif_stop_queue(dev); |
| 861 | 895 | ||
| 896 | out_unlock: | ||
| 862 | spin_unlock_irqrestore(&cp->lock, intr_flags); | 897 | spin_unlock_irqrestore(&cp->lock, intr_flags); |
| 863 | 898 | ||
| 864 | cpw8(TxPoll, NormalTxPoll); | 899 | cpw8(TxPoll, NormalTxPoll); |
| 865 | 900 | ||
| 866 | return NETDEV_TX_OK; | 901 | return NETDEV_TX_OK; |
| 902 | out_dma_error: | ||
| 903 | kfree_skb(skb); | ||
| 904 | cp->dev->stats.tx_dropped++; | ||
| 905 | goto out_unlock; | ||
| 867 | } | 906 | } |
| 868 | 907 | ||
| 869 | /* Set or clear the multicast filter for this adaptor. | 908 | /* Set or clear the multicast filter for this adaptor. |
| @@ -1054,6 +1093,10 @@ static int cp_refill_rx(struct cp_private *cp) | |||
| 1054 | 1093 | ||
| 1055 | mapping = dma_map_single(&cp->pdev->dev, skb->data, | 1094 | mapping = dma_map_single(&cp->pdev->dev, skb->data, |
| 1056 | cp->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1095 | cp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
| 1096 | if (dma_mapping_error(&cp->pdev->dev, mapping)) { | ||
| 1097 | kfree_skb(skb); | ||
| 1098 | goto err_out; | ||
| 1099 | } | ||
| 1057 | cp->rx_skb[i] = skb; | 1100 | cp->rx_skb[i] = skb; |
| 1058 | 1101 | ||
| 1059 | cp->rx_ring[i].opts2 = 0; | 1102 | cp->rx_ring[i].opts2 = 0; |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 4106a743ca74..85e5c97191dd 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -3689,7 +3689,7 @@ static void rtl_phy_work(struct rtl8169_private *tp) | |||
| 3689 | if (tp->link_ok(ioaddr)) | 3689 | if (tp->link_ok(ioaddr)) |
| 3690 | return; | 3690 | return; |
| 3691 | 3691 | ||
| 3692 | netif_warn(tp, link, tp->dev, "PHY reset until link up\n"); | 3692 | netif_dbg(tp, link, tp->dev, "PHY reset until link up\n"); |
| 3693 | 3693 | ||
| 3694 | tp->phy_reset_enable(tp); | 3694 | tp->phy_reset_enable(tp); |
| 3695 | 3695 | ||
| @@ -6468,6 +6468,8 @@ static int rtl8169_close(struct net_device *dev) | |||
| 6468 | rtl8169_down(dev); | 6468 | rtl8169_down(dev); |
| 6469 | rtl_unlock_work(tp); | 6469 | rtl_unlock_work(tp); |
| 6470 | 6470 | ||
| 6471 | cancel_work_sync(&tp->wk.work); | ||
| 6472 | |||
| 6471 | free_irq(pdev->irq, dev); | 6473 | free_irq(pdev->irq, dev); |
| 6472 | 6474 | ||
| 6473 | dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, | 6475 | dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, |
| @@ -6793,8 +6795,6 @@ static void rtl_remove_one(struct pci_dev *pdev) | |||
| 6793 | rtl8168_driver_stop(tp); | 6795 | rtl8168_driver_stop(tp); |
| 6794 | } | 6796 | } |
| 6795 | 6797 | ||
| 6796 | cancel_work_sync(&tp->wk.work); | ||
| 6797 | |||
| 6798 | netif_napi_del(&tp->napi); | 6798 | netif_napi_del(&tp->napi); |
| 6799 | 6799 | ||
| 6800 | unregister_netdev(dev); | 6800 | unregister_netdev(dev); |
| @@ -7088,7 +7088,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 7088 | 7088 | ||
| 7089 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 7089 | RTL_W8(Cfg9346, Cfg9346_Unlock); |
| 7090 | RTL_W8(Config1, RTL_R8(Config1) | PMEnable); | 7090 | RTL_W8(Config1, RTL_R8(Config1) | PMEnable); |
| 7091 | RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); | 7091 | RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); |
| 7092 | if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) | 7092 | if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) |
| 7093 | tp->features |= RTL_FEATURE_WOL; | 7093 | tp->features |= RTL_FEATURE_WOL; |
| 7094 | if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) | 7094 | if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) |
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index b74a60ab9ac7..30d744235d27 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c | |||
| @@ -675,7 +675,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, | |||
| 675 | BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); | 675 | BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); |
| 676 | BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != | 676 | BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != |
| 677 | EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); | 677 | EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); |
| 678 | rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF; | 678 | rep_index = spec->type - EFX_FILTER_UC_DEF; |
| 679 | ins_index = rep_index; | 679 | ins_index = rep_index; |
| 680 | 680 | ||
| 681 | spin_lock_bh(&state->lock); | 681 | spin_lock_bh(&state->lock); |
| @@ -1209,7 +1209,9 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
| 1209 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); | 1209 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); |
| 1210 | ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); | 1210 | ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); |
| 1211 | 1211 | ||
| 1212 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index); | 1212 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, |
| 1213 | efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, | ||
| 1214 | rxq_index); | ||
| 1213 | rc = efx_filter_set_ipv4_full(&spec, ip->protocol, | 1215 | rc = efx_filter_set_ipv4_full(&spec, ip->protocol, |
| 1214 | ip->daddr, ports[1], ip->saddr, ports[0]); | 1216 | ip->daddr, ports[1], ip->saddr, ports[0]); |
| 1215 | if (rc) | 1217 | if (rc) |
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index eb4aea3fe793..f5d7ad75e479 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c | |||
| @@ -1318,7 +1318,7 @@ static void sis900_timer(unsigned long data) | |||
| 1318 | if (duplex){ | 1318 | if (duplex){ |
| 1319 | sis900_set_mode(sis_priv, speed, duplex); | 1319 | sis900_set_mode(sis_priv, speed, duplex); |
| 1320 | sis630_set_eq(net_dev, sis_priv->chipset_rev); | 1320 | sis630_set_eq(net_dev, sis_priv->chipset_rev); |
| 1321 | netif_start_queue(net_dev); | 1321 | netif_carrier_on(net_dev); |
| 1322 | } | 1322 | } |
| 1323 | 1323 | ||
| 1324 | sis_priv->timer.expires = jiffies + HZ; | 1324 | sis_priv->timer.expires = jiffies + HZ; |
| @@ -1336,10 +1336,8 @@ static void sis900_timer(unsigned long data) | |||
| 1336 | status = sis900_default_phy(net_dev); | 1336 | status = sis900_default_phy(net_dev); |
| 1337 | mii_phy = sis_priv->mii; | 1337 | mii_phy = sis_priv->mii; |
| 1338 | 1338 | ||
| 1339 | if (status & MII_STAT_LINK){ | 1339 | if (status & MII_STAT_LINK) |
| 1340 | sis900_check_mode(net_dev, mii_phy); | 1340 | sis900_check_mode(net_dev, mii_phy); |
| 1341 | netif_carrier_on(net_dev); | ||
| 1342 | } | ||
| 1343 | } else { | 1341 | } else { |
| 1344 | /* Link ON -> OFF */ | 1342 | /* Link ON -> OFF */ |
| 1345 | if (!(status & MII_STAT_LINK)){ | 1343 | if (!(status & MII_STAT_LINK)){ |
| @@ -1612,12 +1610,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |||
| 1612 | unsigned int index_cur_tx, index_dirty_tx; | 1610 | unsigned int index_cur_tx, index_dirty_tx; |
| 1613 | unsigned int count_dirty_tx; | 1611 | unsigned int count_dirty_tx; |
| 1614 | 1612 | ||
| 1615 | /* Don't transmit data before the complete of auto-negotiation */ | ||
| 1616 | if(!sis_priv->autong_complete){ | ||
| 1617 | netif_stop_queue(net_dev); | ||
| 1618 | return NETDEV_TX_BUSY; | ||
| 1619 | } | ||
| 1620 | |||
| 1621 | spin_lock_irqsave(&sis_priv->lock, flags); | 1613 | spin_lock_irqsave(&sis_priv->lock, flags); |
| 1622 | 1614 | ||
| 1623 | /* Calculate the next Tx descriptor entry. */ | 1615 | /* Calculate the next Tx descriptor entry. */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index c9d942a5c335..1ef9d8a555aa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
| @@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
| 33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; | 33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; |
| 34 | unsigned int txsize = priv->dma_tx_size; | 34 | unsigned int txsize = priv->dma_tx_size; |
| 35 | unsigned int entry = priv->cur_tx % txsize; | 35 | unsigned int entry = priv->cur_tx % txsize; |
| 36 | struct dma_desc *desc = priv->dma_tx + entry; | 36 | struct dma_desc *desc; |
| 37 | unsigned int nopaged_len = skb_headlen(skb); | 37 | unsigned int nopaged_len = skb_headlen(skb); |
| 38 | unsigned int bmax, len; | 38 | unsigned int bmax, len; |
| 39 | 39 | ||
| 40 | if (priv->extend_desc) | ||
| 41 | desc = (struct dma_desc *)(priv->dma_etx + entry); | ||
| 42 | else | ||
| 43 | desc = priv->dma_tx + entry; | ||
| 44 | |||
| 40 | if (priv->plat->enh_desc) | 45 | if (priv->plat->enh_desc) |
| 41 | bmax = BUF_SIZE_8KiB; | 46 | bmax = BUF_SIZE_8KiB; |
| 42 | else | 47 | else |
| @@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
| 54 | STMMAC_RING_MODE); | 59 | STMMAC_RING_MODE); |
| 55 | wmb(); | 60 | wmb(); |
| 56 | entry = (++priv->cur_tx) % txsize; | 61 | entry = (++priv->cur_tx) % txsize; |
| 57 | desc = priv->dma_tx + entry; | 62 | |
| 63 | if (priv->extend_desc) | ||
| 64 | desc = (struct dma_desc *)(priv->dma_etx + entry); | ||
| 65 | else | ||
| 66 | desc = priv->dma_tx + entry; | ||
| 58 | 67 | ||
| 59 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, | 68 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, |
| 60 | len, DMA_TO_DEVICE); | 69 | len, DMA_TO_DEVICE); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f2ccb36e8685..0a9bb9d30c3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
| 939 | 939 | ||
| 940 | skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, | 940 | skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, |
| 941 | GFP_KERNEL); | 941 | GFP_KERNEL); |
| 942 | if (unlikely(skb == NULL)) { | 942 | if (!skb) { |
| 943 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); | 943 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); |
| 944 | return 1; | 944 | return -ENOMEM; |
| 945 | } | 945 | } |
| 946 | skb_reserve(skb, NET_IP_ALIGN); | 946 | skb_reserve(skb, NET_IP_ALIGN); |
| 947 | priv->rx_skbuff[i] = skb; | 947 | priv->rx_skbuff[i] = skb; |
| 948 | priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, | 948 | priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, |
| 949 | priv->dma_buf_sz, | 949 | priv->dma_buf_sz, |
| 950 | DMA_FROM_DEVICE); | 950 | DMA_FROM_DEVICE); |
| 951 | if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { | ||
| 952 | pr_err("%s: DMA mapping error\n", __func__); | ||
| 953 | dev_kfree_skb_any(skb); | ||
| 954 | return -EINVAL; | ||
| 955 | } | ||
| 951 | 956 | ||
| 952 | p->des2 = priv->rx_skbuff_dma[i]; | 957 | p->des2 = priv->rx_skbuff_dma[i]; |
| 953 | 958 | ||
| @@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
| 958 | return 0; | 963 | return 0; |
| 959 | } | 964 | } |
| 960 | 965 | ||
| 966 | static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) | ||
| 967 | { | ||
| 968 | if (priv->rx_skbuff[i]) { | ||
| 969 | dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], | ||
| 970 | priv->dma_buf_sz, DMA_FROM_DEVICE); | ||
| 971 | dev_kfree_skb_any(priv->rx_skbuff[i]); | ||
| 972 | } | ||
| 973 | priv->rx_skbuff[i] = NULL; | ||
| 974 | } | ||
| 975 | |||
| 961 | /** | 976 | /** |
| 962 | * init_dma_desc_rings - init the RX/TX descriptor rings | 977 | * init_dma_desc_rings - init the RX/TX descriptor rings |
| 963 | * @dev: net device structure | 978 | * @dev: net device structure |
| @@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
| 965 | * and allocates the socket buffers. It suppors the chained and ring | 980 | * and allocates the socket buffers. It suppors the chained and ring |
| 966 | * modes. | 981 | * modes. |
| 967 | */ | 982 | */ |
| 968 | static void init_dma_desc_rings(struct net_device *dev) | 983 | static int init_dma_desc_rings(struct net_device *dev) |
| 969 | { | 984 | { |
| 970 | int i; | 985 | int i; |
| 971 | struct stmmac_priv *priv = netdev_priv(dev); | 986 | struct stmmac_priv *priv = netdev_priv(dev); |
| 972 | unsigned int txsize = priv->dma_tx_size; | 987 | unsigned int txsize = priv->dma_tx_size; |
| 973 | unsigned int rxsize = priv->dma_rx_size; | 988 | unsigned int rxsize = priv->dma_rx_size; |
| 974 | unsigned int bfsize = 0; | 989 | unsigned int bfsize = 0; |
| 990 | int ret = -ENOMEM; | ||
| 975 | 991 | ||
| 976 | /* Set the max buffer size according to the DESC mode | 992 | /* Set the max buffer size according to the DESC mode |
| 977 | * and the MTU. Note that RING mode allows 16KiB bsize. | 993 | * and the MTU. Note that RING mode allows 16KiB bsize. |
| @@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
| 992 | dma_extended_desc), | 1008 | dma_extended_desc), |
| 993 | &priv->dma_rx_phy, | 1009 | &priv->dma_rx_phy, |
| 994 | GFP_KERNEL); | 1010 | GFP_KERNEL); |
| 1011 | if (!priv->dma_erx) | ||
| 1012 | goto err_dma; | ||
| 1013 | |||
| 995 | priv->dma_etx = dma_alloc_coherent(priv->device, txsize * | 1014 | priv->dma_etx = dma_alloc_coherent(priv->device, txsize * |
| 996 | sizeof(struct | 1015 | sizeof(struct |
| 997 | dma_extended_desc), | 1016 | dma_extended_desc), |
| 998 | &priv->dma_tx_phy, | 1017 | &priv->dma_tx_phy, |
| 999 | GFP_KERNEL); | 1018 | GFP_KERNEL); |
| 1000 | if ((!priv->dma_erx) || (!priv->dma_etx)) | 1019 | if (!priv->dma_etx) { |
| 1001 | return; | 1020 | dma_free_coherent(priv->device, priv->dma_rx_size * |
| 1021 | sizeof(struct dma_extended_desc), | ||
| 1022 | priv->dma_erx, priv->dma_rx_phy); | ||
| 1023 | goto err_dma; | ||
| 1024 | } | ||
| 1002 | } else { | 1025 | } else { |
| 1003 | priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * | 1026 | priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * |
| 1004 | sizeof(struct dma_desc), | 1027 | sizeof(struct dma_desc), |
| 1005 | &priv->dma_rx_phy, | 1028 | &priv->dma_rx_phy, |
| 1006 | GFP_KERNEL); | 1029 | GFP_KERNEL); |
| 1030 | if (!priv->dma_rx) | ||
| 1031 | goto err_dma; | ||
| 1032 | |||
| 1007 | priv->dma_tx = dma_alloc_coherent(priv->device, txsize * | 1033 | priv->dma_tx = dma_alloc_coherent(priv->device, txsize * |
| 1008 | sizeof(struct dma_desc), | 1034 | sizeof(struct dma_desc), |
| 1009 | &priv->dma_tx_phy, | 1035 | &priv->dma_tx_phy, |
| 1010 | GFP_KERNEL); | 1036 | GFP_KERNEL); |
| 1011 | if ((!priv->dma_rx) || (!priv->dma_tx)) | 1037 | if (!priv->dma_tx) { |
| 1012 | return; | 1038 | dma_free_coherent(priv->device, priv->dma_rx_size * |
| 1039 | sizeof(struct dma_desc), | ||
| 1040 | priv->dma_rx, priv->dma_rx_phy); | ||
| 1041 | goto err_dma; | ||
| 1042 | } | ||
| 1013 | } | 1043 | } |
| 1014 | 1044 | ||
| 1015 | priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), | 1045 | priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), |
| 1016 | GFP_KERNEL); | 1046 | GFP_KERNEL); |
| 1047 | if (!priv->rx_skbuff_dma) | ||
| 1048 | goto err_rx_skbuff_dma; | ||
| 1049 | |||
| 1017 | priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), | 1050 | priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), |
| 1018 | GFP_KERNEL); | 1051 | GFP_KERNEL); |
| 1052 | if (!priv->rx_skbuff) | ||
| 1053 | goto err_rx_skbuff; | ||
| 1054 | |||
| 1019 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), | 1055 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), |
| 1020 | GFP_KERNEL); | 1056 | GFP_KERNEL); |
| 1057 | if (!priv->tx_skbuff_dma) | ||
| 1058 | goto err_tx_skbuff_dma; | ||
| 1059 | |||
| 1021 | priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), | 1060 | priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), |
| 1022 | GFP_KERNEL); | 1061 | GFP_KERNEL); |
| 1062 | if (!priv->tx_skbuff) | ||
| 1063 | goto err_tx_skbuff; | ||
| 1064 | |||
| 1023 | if (netif_msg_probe(priv)) { | 1065 | if (netif_msg_probe(priv)) { |
| 1024 | pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, | 1066 | pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, |
| 1025 | (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); | 1067 | (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); |
| @@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
| 1034 | else | 1076 | else |
| 1035 | p = priv->dma_rx + i; | 1077 | p = priv->dma_rx + i; |
| 1036 | 1078 | ||
| 1037 | if (stmmac_init_rx_buffers(priv, p, i)) | 1079 | ret = stmmac_init_rx_buffers(priv, p, i); |
| 1038 | break; | 1080 | if (ret) |
| 1081 | goto err_init_rx_buffers; | ||
| 1039 | 1082 | ||
| 1040 | if (netif_msg_probe(priv)) | 1083 | if (netif_msg_probe(priv)) |
| 1041 | pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], | 1084 | pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], |
| @@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
| 1081 | 1124 | ||
| 1082 | if (netif_msg_hw(priv)) | 1125 | if (netif_msg_hw(priv)) |
| 1083 | stmmac_display_rings(priv); | 1126 | stmmac_display_rings(priv); |
| 1127 | |||
| 1128 | return 0; | ||
| 1129 | err_init_rx_buffers: | ||
| 1130 | while (--i >= 0) | ||
| 1131 | stmmac_free_rx_buffers(priv, i); | ||
| 1132 | kfree(priv->tx_skbuff); | ||
| 1133 | err_tx_skbuff: | ||
| 1134 | kfree(priv->tx_skbuff_dma); | ||
| 1135 | err_tx_skbuff_dma: | ||
| 1136 | kfree(priv->rx_skbuff); | ||
| 1137 | err_rx_skbuff: | ||
| 1138 | kfree(priv->rx_skbuff_dma); | ||
| 1139 | err_rx_skbuff_dma: | ||
| 1140 | if (priv->extend_desc) { | ||
| 1141 | dma_free_coherent(priv->device, priv->dma_tx_size * | ||
| 1142 | sizeof(struct dma_extended_desc), | ||
| 1143 | priv->dma_etx, priv->dma_tx_phy); | ||
| 1144 | dma_free_coherent(priv->device, priv->dma_rx_size * | ||
| 1145 | sizeof(struct dma_extended_desc), | ||
| 1146 | priv->dma_erx, priv->dma_rx_phy); | ||
| 1147 | } else { | ||
| 1148 | dma_free_coherent(priv->device, | ||
| 1149 | priv->dma_tx_size * sizeof(struct dma_desc), | ||
| 1150 | priv->dma_tx, priv->dma_tx_phy); | ||
| 1151 | dma_free_coherent(priv->device, | ||
| 1152 | priv->dma_rx_size * sizeof(struct dma_desc), | ||
| 1153 | priv->dma_rx, priv->dma_rx_phy); | ||
| 1154 | } | ||
| 1155 | err_dma: | ||
| 1156 | return ret; | ||
| 1084 | } | 1157 | } |
| 1085 | 1158 | ||
| 1086 | static void dma_free_rx_skbufs(struct stmmac_priv *priv) | 1159 | static void dma_free_rx_skbufs(struct stmmac_priv *priv) |
| 1087 | { | 1160 | { |
| 1088 | int i; | 1161 | int i; |
| 1089 | 1162 | ||
| 1090 | for (i = 0; i < priv->dma_rx_size; i++) { | 1163 | for (i = 0; i < priv->dma_rx_size; i++) |
| 1091 | if (priv->rx_skbuff[i]) { | 1164 | stmmac_free_rx_buffers(priv, i); |
| 1092 | dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], | ||
| 1093 | priv->dma_buf_sz, DMA_FROM_DEVICE); | ||
| 1094 | dev_kfree_skb_any(priv->rx_skbuff[i]); | ||
| 1095 | } | ||
| 1096 | priv->rx_skbuff[i] = NULL; | ||
| 1097 | } | ||
| 1098 | } | 1165 | } |
| 1099 | 1166 | ||
| 1100 | static void dma_free_tx_skbufs(struct stmmac_priv *priv) | 1167 | static void dma_free_tx_skbufs(struct stmmac_priv *priv) |
| @@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev) | |||
| 1560 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); | 1627 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); |
| 1561 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); | 1628 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); |
| 1562 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); | 1629 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); |
| 1563 | init_dma_desc_rings(dev); | 1630 | |
| 1631 | ret = init_dma_desc_rings(dev); | ||
| 1632 | if (ret < 0) { | ||
| 1633 | pr_err("%s: DMA descriptors initialization failed\n", __func__); | ||
| 1634 | goto dma_desc_error; | ||
| 1635 | } | ||
| 1564 | 1636 | ||
| 1565 | /* DMA initialization and SW reset */ | 1637 | /* DMA initialization and SW reset */ |
| 1566 | ret = stmmac_init_dma_engine(priv); | 1638 | ret = stmmac_init_dma_engine(priv); |
| 1567 | if (ret < 0) { | 1639 | if (ret < 0) { |
| 1568 | pr_err("%s: DMA initialization failed\n", __func__); | 1640 | pr_err("%s: DMA engine initialization failed\n", __func__); |
| 1569 | goto init_error; | 1641 | goto init_error; |
| 1570 | } | 1642 | } |
| 1571 | 1643 | ||
| @@ -1672,6 +1744,7 @@ wolirq_error: | |||
| 1672 | 1744 | ||
| 1673 | init_error: | 1745 | init_error: |
| 1674 | free_dma_desc_resources(priv); | 1746 | free_dma_desc_resources(priv); |
| 1747 | dma_desc_error: | ||
| 1675 | if (priv->phydev) | 1748 | if (priv->phydev) |
| 1676 | phy_disconnect(priv->phydev); | 1749 | phy_disconnect(priv->phydev); |
| 1677 | phy_error: | 1750 | phy_error: |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 03de76c7a177..1c83a44c547b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
| @@ -71,14 +71,18 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, | |||
| 71 | plat->force_sf_dma_mode = 1; | 71 | plat->force_sf_dma_mode = 1; |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); | 74 | if (of_find_property(np, "snps,pbl", NULL)) { |
| 75 | if (!dma_cfg) | 75 | dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), |
| 76 | return -ENOMEM; | 76 | GFP_KERNEL); |
| 77 | 77 | if (!dma_cfg) | |
| 78 | plat->dma_cfg = dma_cfg; | 78 | return -ENOMEM; |
| 79 | of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); | 79 | plat->dma_cfg = dma_cfg; |
| 80 | dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); | 80 | of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); |
| 81 | dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); | 81 | dma_cfg->fixed_burst = |
| 82 | of_property_read_bool(np, "snps,fixed-burst"); | ||
| 83 | dma_cfg->mixed_burst = | ||
| 84 | of_property_read_bool(np, "snps,mixed-burst"); | ||
| 85 | } | ||
| 82 | 86 | ||
| 83 | return 0; | 87 | return 0; |
| 84 | } | 88 | } |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 05a1674e204f..22a7a4336211 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -1867,7 +1867,7 @@ static int cpsw_probe(struct platform_device *pdev) | |||
| 1867 | 1867 | ||
| 1868 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { | 1868 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { |
| 1869 | for (i = res->start; i <= res->end; i++) { | 1869 | for (i = res->start; i <= res->end; i++) { |
| 1870 | if (request_irq(i, cpsw_interrupt, IRQF_DISABLED, | 1870 | if (request_irq(i, cpsw_interrupt, 0, |
| 1871 | dev_name(&pdev->dev), priv)) { | 1871 | dev_name(&pdev->dev), priv)) { |
| 1872 | dev_err(priv->dev, "error attaching irq\n"); | 1872 | dev_err(priv->dev, "error attaching irq\n"); |
| 1873 | goto clean_ale_ret; | 1873 | goto clean_ale_ret; |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 07b176bcf929..1a222bce4bd7 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
| @@ -1568,8 +1568,7 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1568 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { | 1568 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { |
| 1569 | for (i = res->start; i <= res->end; i++) { | 1569 | for (i = res->start; i <= res->end; i++) { |
| 1570 | if (devm_request_irq(&priv->pdev->dev, i, emac_irq, | 1570 | if (devm_request_irq(&priv->pdev->dev, i, emac_irq, |
| 1571 | IRQF_DISABLED, | 1571 | 0, ndev->name, ndev)) |
| 1572 | ndev->name, ndev)) | ||
| 1573 | goto rollback; | 1572 | goto rollback; |
| 1574 | } | 1573 | } |
| 1575 | k++; | 1574 | k++; |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index ad32af67e618..9c805e0c0cae 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c | |||
| @@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev, | |||
| 1466 | { | 1466 | { |
| 1467 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; | 1467 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; |
| 1468 | /* NAPI */ | 1468 | /* NAPI */ |
| 1469 | netif_napi_add(netdev, napi, | 1469 | netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT); |
| 1470 | gelic_net_poll, GELIC_NET_NAPI_WEIGHT); | ||
| 1471 | netdev->ethtool_ops = &gelic_ether_ethtool_ops; | 1470 | netdev->ethtool_ops = &gelic_ether_ethtool_ops; |
| 1472 | netdev->netdev_ops = &gelic_netdevice_ops; | 1471 | netdev->netdev_ops = &gelic_netdevice_ops; |
| 1473 | } | 1472 | } |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index a93df6ac1909..309abb472aa2 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h | |||
| @@ -37,7 +37,6 @@ | |||
| 37 | #define GELIC_NET_RXBUF_ALIGN 128 | 37 | #define GELIC_NET_RXBUF_ALIGN 128 |
| 38 | #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ | 38 | #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ |
| 39 | #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ | 39 | #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ |
| 40 | #define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS) | ||
| 41 | #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL | 40 | #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL |
| 42 | 41 | ||
| 43 | #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ | 42 | #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ |
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 1d6dc41f755d..d01cacf8a7c2 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c | |||
| @@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
| 2100 | 2100 | ||
| 2101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); | 2101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); |
| 2102 | } | 2102 | } |
| 2103 | netif_rx(skb); | 2103 | netif_receive_skb(skb); |
| 2104 | 2104 | ||
| 2105 | stats->rx_bytes += pkt_len; | 2105 | stats->rx_bytes += pkt_len; |
| 2106 | stats->rx_packets++; | 2106 | stats->rx_packets++; |
| @@ -2884,6 +2884,7 @@ out: | |||
| 2884 | return ret; | 2884 | return ret; |
| 2885 | 2885 | ||
| 2886 | err_iounmap: | 2886 | err_iounmap: |
| 2887 | netif_napi_del(&vptr->napi); | ||
| 2887 | iounmap(regs); | 2888 | iounmap(regs); |
| 2888 | err_free_dev: | 2889 | err_free_dev: |
| 2889 | free_netdev(netdev); | 2890 | free_netdev(netdev); |
| @@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev) | |||
| 2904 | struct velocity_info *vptr = netdev_priv(netdev); | 2905 | struct velocity_info *vptr = netdev_priv(netdev); |
| 2905 | 2906 | ||
| 2906 | unregister_netdev(netdev); | 2907 | unregister_netdev(netdev); |
| 2908 | netif_napi_del(&vptr->napi); | ||
| 2907 | iounmap(vptr->mac_regs); | 2909 | iounmap(vptr->mac_regs); |
| 2908 | free_netdev(netdev); | 2910 | free_netdev(netdev); |
| 2909 | velocity_nics--; | 2911 | velocity_nics--; |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index e90e1f46121e..64b4639f43b6 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c | |||
| @@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np) | |||
| 175 | printk(KERN_WARNING "Setting MDIO clock divisor to " | 175 | printk(KERN_WARNING "Setting MDIO clock divisor to " |
| 176 | "default %d\n", DEFAULT_CLOCK_DIVISOR); | 176 | "default %d\n", DEFAULT_CLOCK_DIVISOR); |
| 177 | clk_div = DEFAULT_CLOCK_DIVISOR; | 177 | clk_div = DEFAULT_CLOCK_DIVISOR; |
| 178 | of_node_put(np1); | ||
| 178 | goto issue; | 179 | goto issue; |
| 179 | } | 180 | } |
| 180 | 181 | ||
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 51f2bc376101..2dcc60fb37f1 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c | |||
| @@ -210,8 +210,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) | |||
| 210 | pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); | 210 | pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); |
| 211 | pci_write_config_byte(pcidev,0x5a,0xc0); | 211 | pci_write_config_byte(pcidev,0x5a,0xc0); |
| 212 | WriteLPCReg(0x28, 0x70 ); | 212 | WriteLPCReg(0x28, 0x70 ); |
| 213 | if (via_ircc_open(pcidev, &info, 0x3076) == 0) | 213 | rc = via_ircc_open(pcidev, &info, 0x3076); |
| 214 | rc=0; | ||
| 215 | } else | 214 | } else |
| 216 | rc = -ENODEV; //IR not turn on | 215 | rc = -ENODEV; //IR not turn on |
| 217 | } else { //Not VT1211 | 216 | } else { //Not VT1211 |
| @@ -249,8 +248,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) | |||
| 249 | info.irq=FirIRQ; | 248 | info.irq=FirIRQ; |
| 250 | info.dma=FirDRQ1; | 249 | info.dma=FirDRQ1; |
| 251 | info.dma2=FirDRQ0; | 250 | info.dma2=FirDRQ0; |
| 252 | if (via_ircc_open(pcidev, &info, 0x3096) == 0) | 251 | rc = via_ircc_open(pcidev, &info, 0x3096); |
| 253 | rc=0; | ||
| 254 | } else | 252 | } else |
| 255 | rc = -ENODEV; //IR not turn on !!!!! | 253 | rc = -ENODEV; //IR not turn on !!!!! |
| 256 | }//Not VT1211 | 254 | }//Not VT1211 |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 18373b6ae37d..16b43bf544b7 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -337,8 +337,11 @@ static int macvlan_open(struct net_device *dev) | |||
| 337 | int err; | 337 | int err; |
| 338 | 338 | ||
| 339 | if (vlan->port->passthru) { | 339 | if (vlan->port->passthru) { |
| 340 | if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) | 340 | if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) { |
| 341 | dev_set_promiscuity(lowerdev, 1); | 341 | err = dev_set_promiscuity(lowerdev, 1); |
| 342 | if (err < 0) | ||
| 343 | goto out; | ||
| 344 | } | ||
| 342 | goto hash_add; | 345 | goto hash_add; |
| 343 | } | 346 | } |
| 344 | 347 | ||
| @@ -736,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
| 736 | return -EADDRNOTAVAIL; | 739 | return -EADDRNOTAVAIL; |
| 737 | } | 740 | } |
| 738 | 741 | ||
| 742 | if (data && data[IFLA_MACVLAN_FLAGS] && | ||
| 743 | nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC) | ||
| 744 | return -EINVAL; | ||
| 745 | |||
| 739 | if (data && data[IFLA_MACVLAN_MODE]) { | 746 | if (data && data[IFLA_MACVLAN_MODE]) { |
| 740 | switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { | 747 | switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { |
| 741 | case MACVLAN_MODE_PRIVATE: | 748 | case MACVLAN_MODE_PRIVATE: |
| @@ -863,6 +870,18 @@ static int macvlan_changelink(struct net_device *dev, | |||
| 863 | struct nlattr *tb[], struct nlattr *data[]) | 870 | struct nlattr *tb[], struct nlattr *data[]) |
| 864 | { | 871 | { |
| 865 | struct macvlan_dev *vlan = netdev_priv(dev); | 872 | struct macvlan_dev *vlan = netdev_priv(dev); |
| 873 | enum macvlan_mode mode; | ||
| 874 | bool set_mode = false; | ||
| 875 | |||
| 876 | /* Validate mode, but don't set yet: setting flags may fail. */ | ||
| 877 | if (data && data[IFLA_MACVLAN_MODE]) { | ||
| 878 | set_mode = true; | ||
| 879 | mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); | ||
| 880 | /* Passthrough mode can't be set or cleared dynamically */ | ||
| 881 | if ((mode == MACVLAN_MODE_PASSTHRU) != | ||
| 882 | (vlan->mode == MACVLAN_MODE_PASSTHRU)) | ||
| 883 | return -EINVAL; | ||
| 884 | } | ||
| 866 | 885 | ||
| 867 | if (data && data[IFLA_MACVLAN_FLAGS]) { | 886 | if (data && data[IFLA_MACVLAN_FLAGS]) { |
| 868 | __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); | 887 | __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); |
| @@ -879,8 +898,8 @@ static int macvlan_changelink(struct net_device *dev, | |||
| 879 | } | 898 | } |
| 880 | vlan->flags = flags; | 899 | vlan->flags = flags; |
| 881 | } | 900 | } |
| 882 | if (data && data[IFLA_MACVLAN_MODE]) | 901 | if (set_mode) |
| 883 | vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); | 902 | vlan->mode = mode; |
| 884 | return 0; | 903 | return 0; |
| 885 | } | 904 | } |
| 886 | 905 | ||
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a98fb0ed6aef..ea53abb20988 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
| @@ -68,6 +68,8 @@ static const struct proto_ops macvtap_socket_ops; | |||
| 68 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ | 68 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ |
| 69 | NETIF_F_TSO6 | NETIF_F_UFO) | 69 | NETIF_F_TSO6 | NETIF_F_UFO) |
| 70 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) | 70 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) |
| 71 | #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) | ||
| 72 | |||
| 71 | /* | 73 | /* |
| 72 | * RCU usage: | 74 | * RCU usage: |
| 73 | * The macvtap_queue and the macvlan_dev are loosely coupled, the | 75 | * The macvtap_queue and the macvlan_dev are loosely coupled, the |
| @@ -278,7 +280,8 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) | |||
| 278 | { | 280 | { |
| 279 | struct macvlan_dev *vlan = netdev_priv(dev); | 281 | struct macvlan_dev *vlan = netdev_priv(dev); |
| 280 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); | 282 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); |
| 281 | netdev_features_t features; | 283 | netdev_features_t features = TAP_FEATURES; |
| 284 | |||
| 282 | if (!q) | 285 | if (!q) |
| 283 | goto drop; | 286 | goto drop; |
| 284 | 287 | ||
| @@ -287,9 +290,11 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) | |||
| 287 | 290 | ||
| 288 | skb->dev = dev; | 291 | skb->dev = dev; |
| 289 | /* Apply the forward feature mask so that we perform segmentation | 292 | /* Apply the forward feature mask so that we perform segmentation |
| 290 | * according to users wishes. | 293 | * according to users wishes. This only works if VNET_HDR is |
| 294 | * enabled. | ||
| 291 | */ | 295 | */ |
| 292 | features = netif_skb_features(skb) & vlan->tap_features; | 296 | if (q->flags & IFF_VNET_HDR) |
| 297 | features |= vlan->tap_features; | ||
| 293 | if (netif_needs_gso(skb, features)) { | 298 | if (netif_needs_gso(skb, features)) { |
| 294 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); | 299 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); |
| 295 | 300 | ||
| @@ -818,10 +823,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | |||
| 818 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | 823 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
| 819 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; | 824 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; |
| 820 | } | 825 | } |
| 821 | if (vlan) | 826 | if (vlan) { |
| 827 | local_bh_disable(); | ||
| 822 | macvlan_start_xmit(skb, vlan->dev); | 828 | macvlan_start_xmit(skb, vlan->dev); |
| 823 | else | 829 | local_bh_enable(); |
| 830 | } else { | ||
| 824 | kfree_skb(skb); | 831 | kfree_skb(skb); |
| 832 | } | ||
| 825 | rcu_read_unlock(); | 833 | rcu_read_unlock(); |
| 826 | 834 | ||
| 827 | return total_len; | 835 | return total_len; |
| @@ -912,8 +920,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
| 912 | done: | 920 | done: |
| 913 | rcu_read_lock(); | 921 | rcu_read_lock(); |
| 914 | vlan = rcu_dereference(q->vlan); | 922 | vlan = rcu_dereference(q->vlan); |
| 915 | if (vlan) | 923 | if (vlan) { |
| 924 | preempt_disable(); | ||
| 916 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); | 925 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); |
| 926 | preempt_enable(); | ||
| 927 | } | ||
| 917 | rcu_read_unlock(); | 928 | rcu_read_unlock(); |
| 918 | 929 | ||
| 919 | return ret ? ret : copied; | 930 | return ret ? ret : copied; |
| @@ -1058,8 +1069,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) | |||
| 1058 | /* tap_features are the same as features on tun/tap and | 1069 | /* tap_features are the same as features on tun/tap and |
| 1059 | * reflect user expectations. | 1070 | * reflect user expectations. |
| 1060 | */ | 1071 | */ |
| 1061 | vlan->tap_features = vlan->dev->features & | 1072 | vlan->tap_features = feature_mask; |
| 1062 | (feature_mask | ~TUN_OFFLOADS); | ||
| 1063 | vlan->set_features = features; | 1073 | vlan->set_features = features; |
| 1064 | netdev_update_features(vlan->dev); | 1074 | netdev_update_features(vlan->dev); |
| 1065 | 1075 | ||
| @@ -1155,10 +1165,6 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, | |||
| 1155 | TUN_F_TSO_ECN | TUN_F_UFO)) | 1165 | TUN_F_TSO_ECN | TUN_F_UFO)) |
| 1156 | return -EINVAL; | 1166 | return -EINVAL; |
| 1157 | 1167 | ||
| 1158 | /* TODO: only accept frames with the features that | ||
| 1159 | got enabled for forwarded frames */ | ||
| 1160 | if (!(q->flags & IFF_VNET_HDR)) | ||
| 1161 | return -EINVAL; | ||
| 1162 | rtnl_lock(); | 1168 | rtnl_lock(); |
| 1163 | ret = set_offload(q, arg); | 1169 | ret = set_offload(q, arg); |
| 1164 | rtnl_unlock(); | 1170 | rtnl_unlock(); |
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c index 61d3f4ebf52e..7f25e49ae37f 100644 --- a/drivers/net/phy/mdio-sun4i.c +++ b/drivers/net/phy/mdio-sun4i.c | |||
| @@ -40,7 +40,7 @@ struct sun4i_mdio_data { | |||
| 40 | static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | 40 | static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
| 41 | { | 41 | { |
| 42 | struct sun4i_mdio_data *data = bus->priv; | 42 | struct sun4i_mdio_data *data = bus->priv; |
| 43 | unsigned long start_jiffies; | 43 | unsigned long timeout_jiffies; |
| 44 | int value; | 44 | int value; |
| 45 | 45 | ||
| 46 | /* issue the phy address and reg */ | 46 | /* issue the phy address and reg */ |
| @@ -49,10 +49,9 @@ static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | |||
| 49 | writel(0x1, data->membase + EMAC_MAC_MCMD_REG); | 49 | writel(0x1, data->membase + EMAC_MAC_MCMD_REG); |
| 50 | 50 | ||
| 51 | /* Wait read complete */ | 51 | /* Wait read complete */ |
| 52 | start_jiffies = jiffies; | 52 | timeout_jiffies = jiffies + MDIO_TIMEOUT; |
| 53 | while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) { | 53 | while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) { |
| 54 | if (time_after(start_jiffies, | 54 | if (time_is_before_jiffies(timeout_jiffies)) |
| 55 | start_jiffies + MDIO_TIMEOUT)) | ||
| 56 | return -ETIMEDOUT; | 55 | return -ETIMEDOUT; |
| 57 | msleep(1); | 56 | msleep(1); |
| 58 | } | 57 | } |
| @@ -69,7 +68,7 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | |||
| 69 | u16 value) | 68 | u16 value) |
| 70 | { | 69 | { |
| 71 | struct sun4i_mdio_data *data = bus->priv; | 70 | struct sun4i_mdio_data *data = bus->priv; |
| 72 | unsigned long start_jiffies; | 71 | unsigned long timeout_jiffies; |
| 73 | 72 | ||
| 74 | /* issue the phy address and reg */ | 73 | /* issue the phy address and reg */ |
| 75 | writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG); | 74 | writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG); |
| @@ -77,10 +76,9 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | |||
| 77 | writel(0x1, data->membase + EMAC_MAC_MCMD_REG); | 76 | writel(0x1, data->membase + EMAC_MAC_MCMD_REG); |
| 78 | 77 | ||
| 79 | /* Wait read complete */ | 78 | /* Wait read complete */ |
| 80 | start_jiffies = jiffies; | 79 | timeout_jiffies = jiffies + MDIO_TIMEOUT; |
| 81 | while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) { | 80 | while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) { |
| 82 | if (time_after(start_jiffies, | 81 | if (time_is_before_jiffies(timeout_jiffies)) |
| 83 | start_jiffies + MDIO_TIMEOUT)) | ||
| 84 | return -ETIMEDOUT; | 82 | return -ETIMEDOUT; |
| 85 | msleep(1); | 83 | msleep(1); |
| 86 | } | 84 | } |
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 8e7af8354342..138de837977f 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | #define RTL821x_INER_INIT 0x6400 | 23 | #define RTL821x_INER_INIT 0x6400 |
| 24 | #define RTL821x_INSR 0x13 | 24 | #define RTL821x_INSR 0x13 |
| 25 | 25 | ||
| 26 | #define RTL8211E_INER_LINK_STAT 0x10 | 26 | #define RTL8211E_INER_LINK_STATUS 0x400 |
| 27 | 27 | ||
| 28 | MODULE_DESCRIPTION("Realtek PHY driver"); | 28 | MODULE_DESCRIPTION("Realtek PHY driver"); |
| 29 | MODULE_AUTHOR("Johnson Leung"); | 29 | MODULE_AUTHOR("Johnson Leung"); |
| @@ -57,7 +57,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev) | |||
| 57 | 57 | ||
| 58 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) | 58 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) |
| 59 | err = phy_write(phydev, RTL821x_INER, | 59 | err = phy_write(phydev, RTL821x_INER, |
| 60 | RTL8211E_INER_LINK_STAT); | 60 | RTL8211E_INER_LINK_STATUS); |
| 61 | else | 61 | else |
| 62 | err = phy_write(phydev, RTL821x_INER, 0); | 62 | err = phy_write(phydev, RTL821x_INER, 0); |
| 63 | 63 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index db690a372260..71af122edf2d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1074 | u32 rxhash; | 1074 | u32 rxhash; |
| 1075 | 1075 | ||
| 1076 | if (!(tun->flags & TUN_NO_PI)) { | 1076 | if (!(tun->flags & TUN_NO_PI)) { |
| 1077 | if ((len -= sizeof(pi)) > total_len) | 1077 | if (len < sizeof(pi)) |
| 1078 | return -EINVAL; | 1078 | return -EINVAL; |
| 1079 | len -= sizeof(pi); | ||
| 1079 | 1080 | ||
| 1080 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) | 1081 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) |
| 1081 | return -EFAULT; | 1082 | return -EFAULT; |
| @@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1083 | } | 1084 | } |
| 1084 | 1085 | ||
| 1085 | if (tun->flags & TUN_VNET_HDR) { | 1086 | if (tun->flags & TUN_VNET_HDR) { |
| 1086 | if ((len -= tun->vnet_hdr_sz) > total_len) | 1087 | if (len < tun->vnet_hdr_sz) |
| 1087 | return -EINVAL; | 1088 | return -EINVAL; |
| 1089 | len -= tun->vnet_hdr_sz; | ||
| 1088 | 1090 | ||
| 1089 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) | 1091 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) |
| 1090 | return -EFAULT; | 1092 | return -EFAULT; |
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 1e3c302d94fe..2bc87e3a8141 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c | |||
| @@ -1029,10 +1029,10 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 1029 | dev->mii.supports_gmii = 1; | 1029 | dev->mii.supports_gmii = 1; |
| 1030 | 1030 | ||
| 1031 | dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 1031 | dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 1032 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO; | 1032 | NETIF_F_RXCSUM; |
| 1033 | 1033 | ||
| 1034 | dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 1034 | dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 1035 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO; | 1035 | NETIF_F_RXCSUM; |
| 1036 | 1036 | ||
| 1037 | /* Enable checksum offload */ | 1037 | /* Enable checksum offload */ |
| 1038 | *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | | 1038 | *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | |
| @@ -1173,7 +1173,6 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) | |||
| 1173 | if (((skb->len + 8) % frame_size) == 0) | 1173 | if (((skb->len + 8) % frame_size) == 0) |
| 1174 | tx_hdr2 |= 0x80008000; /* Enable padding */ | 1174 | tx_hdr2 |= 0x80008000; /* Enable padding */ |
| 1175 | 1175 | ||
| 1176 | skb_linearize(skb); | ||
| 1177 | headroom = skb_headroom(skb); | 1176 | headroom = skb_headroom(skb); |
| 1178 | tailroom = skb_tailroom(skb); | 1177 | tailroom = skb_tailroom(skb); |
| 1179 | 1178 | ||
| @@ -1317,10 +1316,10 @@ static int ax88179_reset(struct usbnet *dev) | |||
| 1317 | 1, 1, tmp); | 1316 | 1, 1, tmp); |
| 1318 | 1317 | ||
| 1319 | dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 1318 | dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 1320 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO; | 1319 | NETIF_F_RXCSUM; |
| 1321 | 1320 | ||
| 1322 | dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 1321 | dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 1323 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO; | 1322 | NETIF_F_RXCSUM; |
| 1324 | 1323 | ||
| 1325 | /* Enable checksum offload */ | 1324 | /* Enable checksum offload */ |
| 1326 | *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | | 1325 | *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | |
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 872819851aef..25ba7eca9a13 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
| @@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = { | |||
| 400 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | 400 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
| 401 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, | 401 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, |
| 402 | }, | 402 | }, |
| 403 | /* HP hs2434 Mobile Broadband Module needs ZLPs */ | ||
| 404 | { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | ||
| 405 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, | ||
| 406 | }, | ||
| 403 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | 407 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
| 404 | .driver_info = (unsigned long)&cdc_mbim_info, | 408 | .driver_info = (unsigned long)&cdc_mbim_info, |
| 405 | }, | 409 | }, |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index cba1d46e672e..86292e6aaf49 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
| @@ -2816,13 +2816,16 @@ exit: | |||
| 2816 | static int hso_get_config_data(struct usb_interface *interface) | 2816 | static int hso_get_config_data(struct usb_interface *interface) |
| 2817 | { | 2817 | { |
| 2818 | struct usb_device *usbdev = interface_to_usbdev(interface); | 2818 | struct usb_device *usbdev = interface_to_usbdev(interface); |
| 2819 | u8 config_data[17]; | 2819 | u8 *config_data = kmalloc(17, GFP_KERNEL); |
| 2820 | u32 if_num = interface->altsetting->desc.bInterfaceNumber; | 2820 | u32 if_num = interface->altsetting->desc.bInterfaceNumber; |
| 2821 | s32 result; | 2821 | s32 result; |
| 2822 | 2822 | ||
| 2823 | if (!config_data) | ||
| 2824 | return -ENOMEM; | ||
| 2823 | if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), | 2825 | if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), |
| 2824 | 0x86, 0xC0, 0, 0, config_data, 17, | 2826 | 0x86, 0xC0, 0, 0, config_data, 17, |
| 2825 | USB_CTRL_SET_TIMEOUT) != 0x11) { | 2827 | USB_CTRL_SET_TIMEOUT) != 0x11) { |
| 2828 | kfree(config_data); | ||
| 2826 | return -EIO; | 2829 | return -EIO; |
| 2827 | } | 2830 | } |
| 2828 | 2831 | ||
| @@ -2873,6 +2876,7 @@ static int hso_get_config_data(struct usb_interface *interface) | |||
| 2873 | if (config_data[16] & 0x1) | 2876 | if (config_data[16] & 0x1) |
| 2874 | result |= HSO_INFO_CRC_BUG; | 2877 | result |= HSO_INFO_CRC_BUG; |
| 2875 | 2878 | ||
| 2879 | kfree(config_data); | ||
| 2876 | return result; | 2880 | return result; |
| 2877 | } | 2881 | } |
| 2878 | 2882 | ||
| @@ -2886,6 +2890,11 @@ static int hso_probe(struct usb_interface *interface, | |||
| 2886 | struct hso_shared_int *shared_int; | 2890 | struct hso_shared_int *shared_int; |
| 2887 | struct hso_device *tmp_dev = NULL; | 2891 | struct hso_device *tmp_dev = NULL; |
| 2888 | 2892 | ||
| 2893 | if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { | ||
| 2894 | dev_err(&interface->dev, "Not our interface\n"); | ||
| 2895 | return -ENODEV; | ||
| 2896 | } | ||
| 2897 | |||
| 2889 | if_num = interface->altsetting->desc.bInterfaceNumber; | 2898 | if_num = interface->altsetting->desc.bInterfaceNumber; |
| 2890 | 2899 | ||
| 2891 | /* Get the interface/port specification from either driver_info or from | 2900 | /* Get the interface/port specification from either driver_info or from |
| @@ -2895,10 +2904,6 @@ static int hso_probe(struct usb_interface *interface, | |||
| 2895 | else | 2904 | else |
| 2896 | port_spec = hso_get_config_data(interface); | 2905 | port_spec = hso_get_config_data(interface); |
| 2897 | 2906 | ||
| 2898 | if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { | ||
| 2899 | dev_err(&interface->dev, "Not our interface\n"); | ||
| 2900 | return -ENODEV; | ||
| 2901 | } | ||
| 2902 | /* Check if we need to switch to alt interfaces prior to port | 2907 | /* Check if we need to switch to alt interfaces prior to port |
| 2903 | * configuration */ | 2908 | * configuration */ |
| 2904 | if (interface->num_altsetting > 1) | 2909 | if (interface->num_altsetting > 1) |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ee13f9eb740c..11c51f275366 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -344,17 +344,41 @@ static const int multicast_filter_limit = 32; | |||
| 344 | static | 344 | static |
| 345 | int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) | 345 | int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) |
| 346 | { | 346 | { |
| 347 | return usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), | 347 | int ret; |
| 348 | void *tmp; | ||
| 349 | |||
| 350 | tmp = kmalloc(size, GFP_KERNEL); | ||
| 351 | if (!tmp) | ||
| 352 | return -ENOMEM; | ||
| 353 | |||
| 354 | ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), | ||
| 348 | RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, | 355 | RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, |
| 349 | value, index, data, size, 500); | 356 | value, index, tmp, size, 500); |
| 357 | |||
| 358 | memcpy(data, tmp, size); | ||
| 359 | kfree(tmp); | ||
| 360 | |||
| 361 | return ret; | ||
| 350 | } | 362 | } |
| 351 | 363 | ||
| 352 | static | 364 | static |
| 353 | int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) | 365 | int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) |
| 354 | { | 366 | { |
| 355 | return usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0), | 367 | int ret; |
| 368 | void *tmp; | ||
| 369 | |||
| 370 | tmp = kmalloc(size, GFP_KERNEL); | ||
| 371 | if (!tmp) | ||
| 372 | return -ENOMEM; | ||
| 373 | |||
| 374 | memcpy(tmp, data, size); | ||
| 375 | |||
| 376 | ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0), | ||
| 356 | RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, | 377 | RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, |
| 357 | value, index, data, size, 500); | 378 | value, index, tmp, size, 500); |
| 379 | |||
| 380 | kfree(tmp); | ||
| 381 | return ret; | ||
| 358 | } | 382 | } |
| 359 | 383 | ||
| 360 | static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, | 384 | static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, |
| @@ -490,37 +514,31 @@ int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data) | |||
| 490 | 514 | ||
| 491 | static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index) | 515 | static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index) |
| 492 | { | 516 | { |
| 493 | u32 data; | 517 | __le32 data; |
| 494 | 518 | ||
| 495 | if (type == MCU_TYPE_PLA) | 519 | generic_ocp_read(tp, index, sizeof(data), &data, type); |
| 496 | pla_ocp_read(tp, index, sizeof(data), &data); | ||
| 497 | else | ||
| 498 | usb_ocp_read(tp, index, sizeof(data), &data); | ||
| 499 | 520 | ||
| 500 | return __le32_to_cpu(data); | 521 | return __le32_to_cpu(data); |
| 501 | } | 522 | } |
| 502 | 523 | ||
| 503 | static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data) | 524 | static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data) |
| 504 | { | 525 | { |
| 505 | if (type == MCU_TYPE_PLA) | 526 | __le32 tmp = __cpu_to_le32(data); |
| 506 | pla_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data); | 527 | |
| 507 | else | 528 | generic_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(tmp), &tmp, type); |
| 508 | usb_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data); | ||
| 509 | } | 529 | } |
| 510 | 530 | ||
| 511 | static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index) | 531 | static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index) |
| 512 | { | 532 | { |
| 513 | u32 data; | 533 | u32 data; |
| 534 | __le32 tmp; | ||
| 514 | u8 shift = index & 2; | 535 | u8 shift = index & 2; |
| 515 | 536 | ||
| 516 | index &= ~3; | 537 | index &= ~3; |
| 517 | 538 | ||
| 518 | if (type == MCU_TYPE_PLA) | 539 | generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); |
| 519 | pla_ocp_read(tp, index, sizeof(data), &data); | ||
| 520 | else | ||
| 521 | usb_ocp_read(tp, index, sizeof(data), &data); | ||
| 522 | 540 | ||
| 523 | data = __le32_to_cpu(data); | 541 | data = __le32_to_cpu(tmp); |
| 524 | data >>= (shift * 8); | 542 | data >>= (shift * 8); |
| 525 | data &= 0xffff; | 543 | data &= 0xffff; |
| 526 | 544 | ||
| @@ -529,7 +547,8 @@ static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index) | |||
| 529 | 547 | ||
| 530 | static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data) | 548 | static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data) |
| 531 | { | 549 | { |
| 532 | u32 tmp, mask = 0xffff; | 550 | u32 mask = 0xffff; |
| 551 | __le32 tmp; | ||
| 533 | u16 byen = BYTE_EN_WORD; | 552 | u16 byen = BYTE_EN_WORD; |
| 534 | u8 shift = index & 2; | 553 | u8 shift = index & 2; |
| 535 | 554 | ||
| @@ -542,34 +561,25 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data) | |||
| 542 | index &= ~3; | 561 | index &= ~3; |
| 543 | } | 562 | } |
| 544 | 563 | ||
| 545 | if (type == MCU_TYPE_PLA) | 564 | generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); |
| 546 | pla_ocp_read(tp, index, sizeof(tmp), &tmp); | ||
| 547 | else | ||
| 548 | usb_ocp_read(tp, index, sizeof(tmp), &tmp); | ||
| 549 | 565 | ||
| 550 | tmp = __le32_to_cpu(tmp) & ~mask; | 566 | data |= __le32_to_cpu(tmp) & ~mask; |
| 551 | tmp |= data; | 567 | tmp = __cpu_to_le32(data); |
| 552 | tmp = __cpu_to_le32(tmp); | ||
| 553 | 568 | ||
| 554 | if (type == MCU_TYPE_PLA) | 569 | generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); |
| 555 | pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp); | ||
| 556 | else | ||
| 557 | usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp); | ||
| 558 | } | 570 | } |
| 559 | 571 | ||
| 560 | static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index) | 572 | static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index) |
| 561 | { | 573 | { |
| 562 | u32 data; | 574 | u32 data; |
| 575 | __le32 tmp; | ||
| 563 | u8 shift = index & 3; | 576 | u8 shift = index & 3; |
| 564 | 577 | ||
| 565 | index &= ~3; | 578 | index &= ~3; |
| 566 | 579 | ||
| 567 | if (type == MCU_TYPE_PLA) | 580 | generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); |
| 568 | pla_ocp_read(tp, index, sizeof(data), &data); | ||
| 569 | else | ||
| 570 | usb_ocp_read(tp, index, sizeof(data), &data); | ||
| 571 | 581 | ||
| 572 | data = __le32_to_cpu(data); | 582 | data = __le32_to_cpu(tmp); |
| 573 | data >>= (shift * 8); | 583 | data >>= (shift * 8); |
| 574 | data &= 0xff; | 584 | data &= 0xff; |
| 575 | 585 | ||
| @@ -578,7 +588,8 @@ static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index) | |||
| 578 | 588 | ||
| 579 | static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data) | 589 | static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data) |
| 580 | { | 590 | { |
| 581 | u32 tmp, mask = 0xff; | 591 | u32 mask = 0xff; |
| 592 | __le32 tmp; | ||
| 582 | u16 byen = BYTE_EN_BYTE; | 593 | u16 byen = BYTE_EN_BYTE; |
| 583 | u8 shift = index & 3; | 594 | u8 shift = index & 3; |
| 584 | 595 | ||
| @@ -591,19 +602,12 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data) | |||
| 591 | index &= ~3; | 602 | index &= ~3; |
| 592 | } | 603 | } |
| 593 | 604 | ||
| 594 | if (type == MCU_TYPE_PLA) | 605 | generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); |
| 595 | pla_ocp_read(tp, index, sizeof(tmp), &tmp); | ||
| 596 | else | ||
| 597 | usb_ocp_read(tp, index, sizeof(tmp), &tmp); | ||
| 598 | 606 | ||
| 599 | tmp = __le32_to_cpu(tmp) & ~mask; | 607 | data |= __le32_to_cpu(tmp) & ~mask; |
| 600 | tmp |= data; | 608 | tmp = __cpu_to_le32(data); |
| 601 | tmp = __cpu_to_le32(tmp); | ||
| 602 | 609 | ||
| 603 | if (type == MCU_TYPE_PLA) | 610 | generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); |
| 604 | pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp); | ||
| 605 | else | ||
| 606 | usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp); | ||
| 607 | } | 611 | } |
| 608 | 612 | ||
| 609 | static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value) | 613 | static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value) |
| @@ -685,21 +689,14 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data) | |||
| 685 | static inline void set_ethernet_addr(struct r8152 *tp) | 689 | static inline void set_ethernet_addr(struct r8152 *tp) |
| 686 | { | 690 | { |
| 687 | struct net_device *dev = tp->netdev; | 691 | struct net_device *dev = tp->netdev; |
| 688 | u8 *node_id; | 692 | u8 node_id[8] = {0}; |
| 689 | |||
| 690 | node_id = kmalloc(sizeof(u8) * 8, GFP_KERNEL); | ||
| 691 | if (!node_id) { | ||
| 692 | netif_err(tp, probe, dev, "out of memory"); | ||
| 693 | return; | ||
| 694 | } | ||
| 695 | 693 | ||
| 696 | if (pla_ocp_read(tp, PLA_IDR, sizeof(u8) * 8, node_id) < 0) | 694 | if (pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id) < 0) |
| 697 | netif_notice(tp, probe, dev, "inet addr fail\n"); | 695 | netif_notice(tp, probe, dev, "inet addr fail\n"); |
| 698 | else { | 696 | else { |
| 699 | memcpy(dev->dev_addr, node_id, dev->addr_len); | 697 | memcpy(dev->dev_addr, node_id, dev->addr_len); |
| 700 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | 698 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
| 701 | } | 699 | } |
| 702 | kfree(node_id); | ||
| 703 | } | 700 | } |
| 704 | 701 | ||
| 705 | static int rtl8152_set_mac_address(struct net_device *netdev, void *p) | 702 | static int rtl8152_set_mac_address(struct net_device *netdev, void *p) |
| @@ -882,15 +879,10 @@ static void rtl8152_set_rx_mode(struct net_device *netdev) | |||
| 882 | static void _rtl8152_set_rx_mode(struct net_device *netdev) | 879 | static void _rtl8152_set_rx_mode(struct net_device *netdev) |
| 883 | { | 880 | { |
| 884 | struct r8152 *tp = netdev_priv(netdev); | 881 | struct r8152 *tp = netdev_priv(netdev); |
| 885 | u32 tmp, *mc_filter; /* Multicast hash filter */ | 882 | u32 mc_filter[2]; /* Multicast hash filter */ |
| 883 | __le32 tmp[2]; | ||
| 886 | u32 ocp_data; | 884 | u32 ocp_data; |
| 887 | 885 | ||
| 888 | mc_filter = kmalloc(sizeof(u32) * 2, GFP_KERNEL); | ||
| 889 | if (!mc_filter) { | ||
| 890 | netif_err(tp, link, netdev, "out of memory"); | ||
| 891 | return; | ||
| 892 | } | ||
| 893 | |||
| 894 | clear_bit(RTL8152_SET_RX_MODE, &tp->flags); | 886 | clear_bit(RTL8152_SET_RX_MODE, &tp->flags); |
| 895 | netif_stop_queue(netdev); | 887 | netif_stop_queue(netdev); |
| 896 | ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); | 888 | ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); |
| @@ -918,14 +910,12 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) | |||
| 918 | } | 910 | } |
| 919 | } | 911 | } |
| 920 | 912 | ||
| 921 | tmp = mc_filter[0]; | 913 | tmp[0] = __cpu_to_le32(swab32(mc_filter[1])); |
| 922 | mc_filter[0] = __cpu_to_le32(swab32(mc_filter[1])); | 914 | tmp[1] = __cpu_to_le32(swab32(mc_filter[0])); |
| 923 | mc_filter[1] = __cpu_to_le32(swab32(tmp)); | ||
| 924 | 915 | ||
| 925 | pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(u32) * 2, mc_filter); | 916 | pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(tmp), tmp); |
| 926 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); | 917 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); |
| 927 | netif_wake_queue(netdev); | 918 | netif_wake_queue(netdev); |
| 928 | kfree(mc_filter); | ||
| 929 | } | 919 | } |
| 930 | 920 | ||
| 931 | static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, | 921 | static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, |
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c index 852392269718..2df2f4fb42a7 100644 --- a/drivers/net/usb/r815x.c +++ b/drivers/net/usb/r815x.c | |||
| @@ -24,34 +24,43 @@ | |||
| 24 | 24 | ||
| 25 | static int pla_read_word(struct usb_device *udev, u16 index) | 25 | static int pla_read_word(struct usb_device *udev, u16 index) |
| 26 | { | 26 | { |
| 27 | int data, ret; | 27 | int ret; |
| 28 | u8 shift = index & 2; | 28 | u8 shift = index & 2; |
| 29 | __le32 ocp_data; | 29 | __le32 *tmp; |
| 30 | |||
| 31 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); | ||
| 32 | if (!tmp) | ||
| 33 | return -ENOMEM; | ||
| 30 | 34 | ||
| 31 | index &= ~3; | 35 | index &= ~3; |
| 32 | 36 | ||
| 33 | ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), | 37 | ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), |
| 34 | RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, | 38 | RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, |
| 35 | index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data), | 39 | index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); |
| 36 | 500); | ||
| 37 | if (ret < 0) | 40 | if (ret < 0) |
| 38 | return ret; | 41 | goto out2; |
| 39 | 42 | ||
| 40 | data = __le32_to_cpu(ocp_data); | 43 | ret = __le32_to_cpu(*tmp); |
| 41 | data >>= (shift * 8); | 44 | ret >>= (shift * 8); |
| 42 | data &= 0xffff; | 45 | ret &= 0xffff; |
| 43 | 46 | ||
| 44 | return data; | 47 | out2: |
| 48 | kfree(tmp); | ||
| 49 | return ret; | ||
| 45 | } | 50 | } |
| 46 | 51 | ||
| 47 | static int pla_write_word(struct usb_device *udev, u16 index, u32 data) | 52 | static int pla_write_word(struct usb_device *udev, u16 index, u32 data) |
| 48 | { | 53 | { |
| 49 | __le32 ocp_data; | 54 | __le32 *tmp; |
| 50 | u32 mask = 0xffff; | 55 | u32 mask = 0xffff; |
| 51 | u16 byen = BYTE_EN_WORD; | 56 | u16 byen = BYTE_EN_WORD; |
| 52 | u8 shift = index & 2; | 57 | u8 shift = index & 2; |
| 53 | int ret; | 58 | int ret; |
| 54 | 59 | ||
| 60 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); | ||
| 61 | if (!tmp) | ||
| 62 | return -ENOMEM; | ||
| 63 | |||
| 55 | data &= mask; | 64 | data &= mask; |
| 56 | 65 | ||
| 57 | if (shift) { | 66 | if (shift) { |
| @@ -63,19 +72,20 @@ static int pla_write_word(struct usb_device *udev, u16 index, u32 data) | |||
| 63 | 72 | ||
| 64 | ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), | 73 | ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), |
| 65 | RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, | 74 | RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, |
| 66 | index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data), | 75 | index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); |
| 67 | 500); | ||
| 68 | if (ret < 0) | 76 | if (ret < 0) |
| 69 | return ret; | 77 | goto out3; |
| 70 | 78 | ||
| 71 | data |= __le32_to_cpu(ocp_data) & ~mask; | 79 | data |= __le32_to_cpu(*tmp) & ~mask; |
| 72 | ocp_data = __cpu_to_le32(data); | 80 | *tmp = __cpu_to_le32(data); |
| 73 | 81 | ||
| 74 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 82 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
| 75 | RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE, | 83 | RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE, |
| 76 | index, MCU_TYPE_PLA | byen, &ocp_data, | 84 | index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp), |
| 77 | sizeof(ocp_data), 500); | 85 | 500); |
| 78 | 86 | ||
| 87 | out3: | ||
| 88 | kfree(tmp); | ||
| 79 | return ret; | 89 | return ret; |
| 80 | } | 90 | } |
| 81 | 91 | ||
| @@ -116,11 +126,18 @@ out1: | |||
| 116 | static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg) | 126 | static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg) |
| 117 | { | 127 | { |
| 118 | struct usbnet *dev = netdev_priv(netdev); | 128 | struct usbnet *dev = netdev_priv(netdev); |
| 129 | int ret; | ||
| 119 | 130 | ||
| 120 | if (phy_id != R815x_PHY_ID) | 131 | if (phy_id != R815x_PHY_ID) |
| 121 | return -EINVAL; | 132 | return -EINVAL; |
| 122 | 133 | ||
| 123 | return ocp_reg_read(dev, BASE_MII + reg * 2); | 134 | if (usb_autopm_get_interface(dev->intf) < 0) |
| 135 | return -ENODEV; | ||
| 136 | |||
| 137 | ret = ocp_reg_read(dev, BASE_MII + reg * 2); | ||
| 138 | |||
| 139 | usb_autopm_put_interface(dev->intf); | ||
| 140 | return ret; | ||
| 124 | } | 141 | } |
| 125 | 142 | ||
| 126 | static | 143 | static |
| @@ -131,7 +148,12 @@ void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val) | |||
| 131 | if (phy_id != R815x_PHY_ID) | 148 | if (phy_id != R815x_PHY_ID) |
| 132 | return; | 149 | return; |
| 133 | 150 | ||
| 151 | if (usb_autopm_get_interface(dev->intf) < 0) | ||
| 152 | return; | ||
| 153 | |||
| 134 | ocp_reg_write(dev, BASE_MII + reg * 2, val); | 154 | ocp_reg_write(dev, BASE_MII + reg * 2, val); |
| 155 | |||
| 156 | usb_autopm_put_interface(dev->intf); | ||
| 135 | } | 157 | } |
| 136 | 158 | ||
| 137 | static int r8153_bind(struct usbnet *dev, struct usb_interface *intf) | 159 | static int r8153_bind(struct usbnet *dev, struct usb_interface *intf) |
| @@ -150,7 +172,7 @@ static int r8153_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 150 | dev->mii.phy_id = R815x_PHY_ID; | 172 | dev->mii.phy_id = R815x_PHY_ID; |
| 151 | dev->mii.supports_gmii = 1; | 173 | dev->mii.supports_gmii = 1; |
| 152 | 174 | ||
| 153 | return 0; | 175 | return status; |
| 154 | } | 176 | } |
| 155 | 177 | ||
| 156 | static int r8152_bind(struct usbnet *dev, struct usb_interface *intf) | 178 | static int r8152_bind(struct usbnet *dev, struct usb_interface *intf) |
| @@ -169,7 +191,7 @@ static int r8152_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 169 | dev->mii.phy_id = R815x_PHY_ID; | 191 | dev->mii.phy_id = R815x_PHY_ID; |
| 170 | dev->mii.supports_gmii = 0; | 192 | dev->mii.supports_gmii = 0; |
| 171 | 193 | ||
| 172 | return 0; | 194 | return status; |
| 173 | } | 195 | } |
| 174 | 196 | ||
| 175 | static const struct driver_info r8152_info = { | 197 | static const struct driver_info r8152_info = { |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 75409748c774..66ebbacf066f 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c | |||
| @@ -45,7 +45,6 @@ | |||
| 45 | #define EEPROM_MAC_OFFSET (0x01) | 45 | #define EEPROM_MAC_OFFSET (0x01) |
| 46 | #define DEFAULT_TX_CSUM_ENABLE (true) | 46 | #define DEFAULT_TX_CSUM_ENABLE (true) |
| 47 | #define DEFAULT_RX_CSUM_ENABLE (true) | 47 | #define DEFAULT_RX_CSUM_ENABLE (true) |
| 48 | #define DEFAULT_TSO_ENABLE (true) | ||
| 49 | #define SMSC75XX_INTERNAL_PHY_ID (1) | 48 | #define SMSC75XX_INTERNAL_PHY_ID (1) |
| 50 | #define SMSC75XX_TX_OVERHEAD (8) | 49 | #define SMSC75XX_TX_OVERHEAD (8) |
| 51 | #define MAX_RX_FIFO_SIZE (20 * 1024) | 50 | #define MAX_RX_FIFO_SIZE (20 * 1024) |
| @@ -1410,17 +1409,14 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 1410 | 1409 | ||
| 1411 | INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); | 1410 | INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); |
| 1412 | 1411 | ||
| 1413 | if (DEFAULT_TX_CSUM_ENABLE) { | 1412 | if (DEFAULT_TX_CSUM_ENABLE) |
| 1414 | dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | 1413 | dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
| 1415 | if (DEFAULT_TSO_ENABLE) | 1414 | |
| 1416 | dev->net->features |= NETIF_F_SG | | ||
| 1417 | NETIF_F_TSO | NETIF_F_TSO6; | ||
| 1418 | } | ||
| 1419 | if (DEFAULT_RX_CSUM_ENABLE) | 1415 | if (DEFAULT_RX_CSUM_ENABLE) |
| 1420 | dev->net->features |= NETIF_F_RXCSUM; | 1416 | dev->net->features |= NETIF_F_RXCSUM; |
| 1421 | 1417 | ||
| 1422 | dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 1418 | dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 1423 | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM; | 1419 | NETIF_F_RXCSUM; |
| 1424 | 1420 | ||
| 1425 | ret = smsc75xx_wait_ready(dev, 0); | 1421 | ret = smsc75xx_wait_ready(dev, 0); |
| 1426 | if (ret < 0) { | 1422 | if (ret < 0) { |
| @@ -2200,8 +2196,6 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, | |||
| 2200 | { | 2196 | { |
| 2201 | u32 tx_cmd_a, tx_cmd_b; | 2197 | u32 tx_cmd_a, tx_cmd_b; |
| 2202 | 2198 | ||
| 2203 | skb_linearize(skb); | ||
| 2204 | |||
| 2205 | if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { | 2199 | if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { |
| 2206 | struct sk_buff *skb2 = | 2200 | struct sk_buff *skb2 = |
| 2207 | skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags); | 2201 | skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags); |
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index da866523cf20..eee1f19ef1e9 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
| @@ -269,6 +269,7 @@ static void veth_setup(struct net_device *dev) | |||
| 269 | dev->ethtool_ops = &veth_ethtool_ops; | 269 | dev->ethtool_ops = &veth_ethtool_ops; |
| 270 | dev->features |= NETIF_F_LLTX; | 270 | dev->features |= NETIF_F_LLTX; |
| 271 | dev->features |= VETH_FEATURES; | 271 | dev->features |= VETH_FEATURES; |
| 272 | dev->vlan_features = dev->features; | ||
| 272 | dev->destructor = veth_dev_free; | 273 | dev->destructor = veth_dev_free; |
| 273 | 274 | ||
| 274 | dev->hw_features = VETH_FEATURES; | 275 | dev->hw_features = VETH_FEATURES; |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a5ba8dd7e6be..767f7af3bd40 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -136,7 +136,8 @@ struct vxlan_dev { | |||
| 136 | u32 flags; /* VXLAN_F_* below */ | 136 | u32 flags; /* VXLAN_F_* below */ |
| 137 | 137 | ||
| 138 | struct work_struct sock_work; | 138 | struct work_struct sock_work; |
| 139 | struct work_struct igmp_work; | 139 | struct work_struct igmp_join; |
| 140 | struct work_struct igmp_leave; | ||
| 140 | 141 | ||
| 141 | unsigned long age_interval; | 142 | unsigned long age_interval; |
| 142 | struct timer_list age_timer; | 143 | struct timer_list age_timer; |
| @@ -736,7 +737,6 @@ static bool vxlan_snoop(struct net_device *dev, | |||
| 736 | return false; | 737 | return false; |
| 737 | } | 738 | } |
| 738 | 739 | ||
| 739 | |||
| 740 | /* See if multicast group is already in use by other ID */ | 740 | /* See if multicast group is already in use by other ID */ |
| 741 | static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip) | 741 | static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip) |
| 742 | { | 742 | { |
| @@ -770,12 +770,13 @@ static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs) | |||
| 770 | queue_work(vxlan_wq, &vs->del_work); | 770 | queue_work(vxlan_wq, &vs->del_work); |
| 771 | } | 771 | } |
| 772 | 772 | ||
| 773 | /* Callback to update multicast group membership. | 773 | /* Callback to update multicast group membership when first VNI on |
| 774 | * Scheduled when vxlan goes up/down. | 774 | * multicast asddress is brought up |
| 775 | * Done as workqueue because ip_mc_join_group acquires RTNL. | ||
| 775 | */ | 776 | */ |
| 776 | static void vxlan_igmp_work(struct work_struct *work) | 777 | static void vxlan_igmp_join(struct work_struct *work) |
| 777 | { | 778 | { |
| 778 | struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_work); | 779 | struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join); |
| 779 | struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id); | 780 | struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id); |
| 780 | struct vxlan_sock *vs = vxlan->vn_sock; | 781 | struct vxlan_sock *vs = vxlan->vn_sock; |
| 781 | struct sock *sk = vs->sock->sk; | 782 | struct sock *sk = vs->sock->sk; |
| @@ -785,10 +786,27 @@ static void vxlan_igmp_work(struct work_struct *work) | |||
| 785 | }; | 786 | }; |
| 786 | 787 | ||
| 787 | lock_sock(sk); | 788 | lock_sock(sk); |
| 788 | if (vxlan_group_used(vn, vxlan->default_dst.remote_ip)) | 789 | ip_mc_join_group(sk, &mreq); |
| 789 | ip_mc_join_group(sk, &mreq); | 790 | release_sock(sk); |
| 790 | else | 791 | |
| 791 | ip_mc_leave_group(sk, &mreq); | 792 | vxlan_sock_release(vn, vs); |
| 793 | dev_put(vxlan->dev); | ||
| 794 | } | ||
| 795 | |||
| 796 | /* Inverse of vxlan_igmp_join when last VNI is brought down */ | ||
| 797 | static void vxlan_igmp_leave(struct work_struct *work) | ||
| 798 | { | ||
| 799 | struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave); | ||
| 800 | struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id); | ||
| 801 | struct vxlan_sock *vs = vxlan->vn_sock; | ||
| 802 | struct sock *sk = vs->sock->sk; | ||
| 803 | struct ip_mreqn mreq = { | ||
| 804 | .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip, | ||
| 805 | .imr_ifindex = vxlan->default_dst.remote_ifindex, | ||
| 806 | }; | ||
| 807 | |||
| 808 | lock_sock(sk); | ||
| 809 | ip_mc_leave_group(sk, &mreq); | ||
| 792 | release_sock(sk); | 810 | release_sock(sk); |
| 793 | 811 | ||
| 794 | vxlan_sock_release(vn, vs); | 812 | vxlan_sock_release(vn, vs); |
| @@ -1359,6 +1377,7 @@ static void vxlan_uninit(struct net_device *dev) | |||
| 1359 | /* Start ageing timer and join group when device is brought up */ | 1377 | /* Start ageing timer and join group when device is brought up */ |
| 1360 | static int vxlan_open(struct net_device *dev) | 1378 | static int vxlan_open(struct net_device *dev) |
| 1361 | { | 1379 | { |
| 1380 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); | ||
| 1362 | struct vxlan_dev *vxlan = netdev_priv(dev); | 1381 | struct vxlan_dev *vxlan = netdev_priv(dev); |
| 1363 | struct vxlan_sock *vs = vxlan->vn_sock; | 1382 | struct vxlan_sock *vs = vxlan->vn_sock; |
| 1364 | 1383 | ||
| @@ -1366,10 +1385,11 @@ static int vxlan_open(struct net_device *dev) | |||
| 1366 | if (!vs) | 1385 | if (!vs) |
| 1367 | return -ENOTCONN; | 1386 | return -ENOTCONN; |
| 1368 | 1387 | ||
| 1369 | if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) { | 1388 | if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && |
| 1389 | vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { | ||
| 1370 | vxlan_sock_hold(vs); | 1390 | vxlan_sock_hold(vs); |
| 1371 | dev_hold(dev); | 1391 | dev_hold(dev); |
| 1372 | queue_work(vxlan_wq, &vxlan->igmp_work); | 1392 | queue_work(vxlan_wq, &vxlan->igmp_join); |
| 1373 | } | 1393 | } |
| 1374 | 1394 | ||
| 1375 | if (vxlan->age_interval) | 1395 | if (vxlan->age_interval) |
| @@ -1400,13 +1420,15 @@ static void vxlan_flush(struct vxlan_dev *vxlan) | |||
| 1400 | /* Cleanup timer and forwarding table on shutdown */ | 1420 | /* Cleanup timer and forwarding table on shutdown */ |
| 1401 | static int vxlan_stop(struct net_device *dev) | 1421 | static int vxlan_stop(struct net_device *dev) |
| 1402 | { | 1422 | { |
| 1423 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); | ||
| 1403 | struct vxlan_dev *vxlan = netdev_priv(dev); | 1424 | struct vxlan_dev *vxlan = netdev_priv(dev); |
| 1404 | struct vxlan_sock *vs = vxlan->vn_sock; | 1425 | struct vxlan_sock *vs = vxlan->vn_sock; |
| 1405 | 1426 | ||
| 1406 | if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) { | 1427 | if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && |
| 1428 | ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { | ||
| 1407 | vxlan_sock_hold(vs); | 1429 | vxlan_sock_hold(vs); |
| 1408 | dev_hold(dev); | 1430 | dev_hold(dev); |
| 1409 | queue_work(vxlan_wq, &vxlan->igmp_work); | 1431 | queue_work(vxlan_wq, &vxlan->igmp_leave); |
| 1410 | } | 1432 | } |
| 1411 | 1433 | ||
| 1412 | del_timer_sync(&vxlan->age_timer); | 1434 | del_timer_sync(&vxlan->age_timer); |
| @@ -1471,7 +1493,8 @@ static void vxlan_setup(struct net_device *dev) | |||
| 1471 | 1493 | ||
| 1472 | INIT_LIST_HEAD(&vxlan->next); | 1494 | INIT_LIST_HEAD(&vxlan->next); |
| 1473 | spin_lock_init(&vxlan->hash_lock); | 1495 | spin_lock_init(&vxlan->hash_lock); |
| 1474 | INIT_WORK(&vxlan->igmp_work, vxlan_igmp_work); | 1496 | INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join); |
| 1497 | INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave); | ||
| 1475 | INIT_WORK(&vxlan->sock_work, vxlan_sock_work); | 1498 | INIT_WORK(&vxlan->sock_work, vxlan_sock_work); |
| 1476 | 1499 | ||
| 1477 | init_timer_deferrable(&vxlan->age_timer); | 1500 | init_timer_deferrable(&vxlan->age_timer); |
| @@ -1770,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) | |||
| 1770 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); | 1793 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); |
| 1771 | struct vxlan_dev *vxlan = netdev_priv(dev); | 1794 | struct vxlan_dev *vxlan = netdev_priv(dev); |
| 1772 | 1795 | ||
| 1773 | flush_workqueue(vxlan_wq); | ||
| 1774 | |||
| 1775 | spin_lock(&vn->sock_lock); | 1796 | spin_lock(&vn->sock_lock); |
| 1776 | hlist_del_rcu(&vxlan->hlist); | 1797 | hlist_del_rcu(&vxlan->hlist); |
| 1777 | spin_unlock(&vn->sock_lock); | 1798 | spin_unlock(&vn->sock_lock); |
| @@ -1878,10 +1899,12 @@ static __net_exit void vxlan_exit_net(struct net *net) | |||
| 1878 | { | 1899 | { |
| 1879 | struct vxlan_net *vn = net_generic(net, vxlan_net_id); | 1900 | struct vxlan_net *vn = net_generic(net, vxlan_net_id); |
| 1880 | struct vxlan_dev *vxlan; | 1901 | struct vxlan_dev *vxlan; |
| 1902 | LIST_HEAD(list); | ||
| 1881 | 1903 | ||
| 1882 | rtnl_lock(); | 1904 | rtnl_lock(); |
| 1883 | list_for_each_entry(vxlan, &vn->vxlan_list, next) | 1905 | list_for_each_entry(vxlan, &vn->vxlan_list, next) |
| 1884 | dev_close(vxlan->dev); | 1906 | unregister_netdevice_queue(vxlan->dev, &list); |
| 1907 | unregister_netdevice_many(&list); | ||
| 1885 | rtnl_unlock(); | 1908 | rtnl_unlock(); |
| 1886 | } | 1909 | } |
| 1887 | 1910 | ||
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index cde58fe96254..82e8088ca9b4 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config ATH10K | 1 | config ATH10K |
| 2 | tristate "Atheros 802.11ac wireless cards support" | 2 | tristate "Atheros 802.11ac wireless cards support" |
| 3 | depends on MAC80211 | 3 | depends on MAC80211 && HAS_DMA |
| 4 | select ATH_COMMON | 4 | select ATH_COMMON |
| 5 | ---help--- | 5 | ---help--- |
| 6 | This module adds support for wireless adapters based on | 6 | This module adds support for wireless adapters based on |
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 81b686c6a376..40825d43322e 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c | |||
| @@ -325,7 +325,7 @@ ath5k_prepare_multicast(struct ieee80211_hw *hw, | |||
| 325 | struct netdev_hw_addr *ha; | 325 | struct netdev_hw_addr *ha; |
| 326 | 326 | ||
| 327 | mfilt[0] = 0; | 327 | mfilt[0] = 0; |
| 328 | mfilt[1] = 1; | 328 | mfilt[1] = 0; |
| 329 | 329 | ||
| 330 | netdev_hw_addr_list_for_each(ha, mc_list) { | 330 | netdev_hw_addr_list_for_each(ha, mc_list) { |
| 331 | /* calculate XOR of eight 6-bit values */ | 331 | /* calculate XOR of eight 6-bit values */ |
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index d1acfe98918a..1576d58291d4 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c | |||
| @@ -610,7 +610,15 @@ static void ar5008_hw_override_ini(struct ath_hw *ah, | |||
| 610 | REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); | 610 | REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); |
| 611 | 611 | ||
| 612 | if (AR_SREV_9280_20_OR_LATER(ah)) { | 612 | if (AR_SREV_9280_20_OR_LATER(ah)) { |
| 613 | val = REG_READ(ah, AR_PCU_MISC_MODE2); | 613 | /* |
| 614 | * For AR9280 and above, there is a new feature that allows | ||
| 615 | * Multicast search based on both MAC Address and Key ID. | ||
| 616 | * By default, this feature is enabled. But since the driver | ||
| 617 | * is not using this feature, we switch it off; otherwise | ||
| 618 | * multicast search based on MAC addr only will fail. | ||
| 619 | */ | ||
| 620 | val = REG_READ(ah, AR_PCU_MISC_MODE2) & | ||
| 621 | (~AR_ADHOC_MCAST_KEYID_ENABLE); | ||
| 614 | 622 | ||
| 615 | if (!AR_SREV_9271(ah)) | 623 | if (!AR_SREV_9271(ah)) |
| 616 | val &= ~AR_PCU_MISC_MODE2_HWWAR1; | 624 | val &= ~AR_PCU_MISC_MODE2_HWWAR1; |
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 9e582e14da74..5205a3625e84 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c | |||
| @@ -1082,7 +1082,7 @@ static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev) | |||
| 1082 | struct device *dev = &hif_dev->udev->dev; | 1082 | struct device *dev = &hif_dev->udev->dev; |
| 1083 | struct device *parent = dev->parent; | 1083 | struct device *parent = dev->parent; |
| 1084 | 1084 | ||
| 1085 | complete(&hif_dev->fw_done); | 1085 | complete_all(&hif_dev->fw_done); |
| 1086 | 1086 | ||
| 1087 | if (parent) | 1087 | if (parent) |
| 1088 | device_lock(parent); | 1088 | device_lock(parent); |
| @@ -1131,7 +1131,7 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context) | |||
| 1131 | 1131 | ||
| 1132 | release_firmware(fw); | 1132 | release_firmware(fw); |
| 1133 | hif_dev->flags |= HIF_USB_READY; | 1133 | hif_dev->flags |= HIF_USB_READY; |
| 1134 | complete(&hif_dev->fw_done); | 1134 | complete_all(&hif_dev->fw_done); |
| 1135 | 1135 | ||
| 1136 | return; | 1136 | return; |
| 1137 | 1137 | ||
| @@ -1295,7 +1295,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) | |||
| 1295 | 1295 | ||
| 1296 | usb_set_intfdata(interface, NULL); | 1296 | usb_set_intfdata(interface, NULL); |
| 1297 | 1297 | ||
| 1298 | if (!unplugged && (hif_dev->flags & HIF_USB_START)) | 1298 | /* If firmware was loaded we should drop it |
| 1299 | * go back to first stage bootloader. */ | ||
| 1300 | if (!unplugged && (hif_dev->flags & HIF_USB_READY)) | ||
| 1299 | ath9k_hif_usb_reboot(udev); | 1301 | ath9k_hif_usb_reboot(udev); |
| 1300 | 1302 | ||
| 1301 | kfree(hif_dev); | 1303 | kfree(hif_dev); |
| @@ -1316,7 +1318,10 @@ static int ath9k_hif_usb_suspend(struct usb_interface *interface, | |||
| 1316 | if (!(hif_dev->flags & HIF_USB_START)) | 1318 | if (!(hif_dev->flags & HIF_USB_START)) |
| 1317 | ath9k_htc_suspend(hif_dev->htc_handle); | 1319 | ath9k_htc_suspend(hif_dev->htc_handle); |
| 1318 | 1320 | ||
| 1319 | ath9k_hif_usb_dealloc_urbs(hif_dev); | 1321 | wait_for_completion(&hif_dev->fw_done); |
| 1322 | |||
| 1323 | if (hif_dev->flags & HIF_USB_READY) | ||
| 1324 | ath9k_hif_usb_dealloc_urbs(hif_dev); | ||
| 1320 | 1325 | ||
| 1321 | return 0; | 1326 | return 0; |
| 1322 | } | 1327 | } |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 71a183ffc77f..c3676bf1d6c4 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c | |||
| @@ -861,6 +861,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv, | |||
| 861 | if (error != 0) | 861 | if (error != 0) |
| 862 | goto err_rx; | 862 | goto err_rx; |
| 863 | 863 | ||
| 864 | ath9k_hw_disable(priv->ah); | ||
| 864 | #ifdef CONFIG_MAC80211_LEDS | 865 | #ifdef CONFIG_MAC80211_LEDS |
| 865 | /* must be initialized before ieee80211_register_hw */ | 866 | /* must be initialized before ieee80211_register_hw */ |
| 866 | priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw, | 867 | priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw, |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index e602c9519709..c028df76b564 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |||
| @@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv, | |||
| 448 | struct ieee80211_conf *cur_conf = &priv->hw->conf; | 448 | struct ieee80211_conf *cur_conf = &priv->hw->conf; |
| 449 | bool txok; | 449 | bool txok; |
| 450 | int slot; | 450 | int slot; |
| 451 | int hdrlen, padsize; | ||
| 451 | 452 | ||
| 452 | slot = strip_drv_header(priv, skb); | 453 | slot = strip_drv_header(priv, skb); |
| 453 | if (slot < 0) { | 454 | if (slot < 0) { |
| @@ -504,6 +505,15 @@ send_mac80211: | |||
| 504 | 505 | ||
| 505 | ath9k_htc_tx_clear_slot(priv, slot); | 506 | ath9k_htc_tx_clear_slot(priv, slot); |
| 506 | 507 | ||
| 508 | /* Remove padding before handing frame back to mac80211 */ | ||
| 509 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
| 510 | |||
| 511 | padsize = hdrlen & 3; | ||
| 512 | if (padsize && skb->len > hdrlen + padsize) { | ||
| 513 | memmove(skb->data + padsize, skb->data, hdrlen); | ||
| 514 | skb_pull(skb, padsize); | ||
| 515 | } | ||
| 516 | |||
| 507 | /* Send status to mac80211 */ | 517 | /* Send status to mac80211 */ |
| 508 | ieee80211_tx_status(priv->hw, skb); | 518 | ieee80211_tx_status(priv->hw, skb); |
| 509 | } | 519 | } |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 16f8b201642b..026a2a067b46 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
| @@ -802,7 +802,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) | |||
| 802 | IEEE80211_HW_PS_NULLFUNC_STACK | | 802 | IEEE80211_HW_PS_NULLFUNC_STACK | |
| 803 | IEEE80211_HW_SPECTRUM_MGMT | | 803 | IEEE80211_HW_SPECTRUM_MGMT | |
| 804 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | | 804 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | |
| 805 | IEEE80211_HW_SUPPORTS_RC_TABLE; | 805 | IEEE80211_HW_SUPPORTS_RC_TABLE | |
| 806 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; | ||
| 806 | 807 | ||
| 807 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { | 808 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { |
| 808 | hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; | 809 | hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 1737a3e33685..cb5a65553ac7 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
| @@ -173,8 +173,7 @@ static void ath_restart_work(struct ath_softc *sc) | |||
| 173 | { | 173 | { |
| 174 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); | 174 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); |
| 175 | 175 | ||
| 176 | if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) || | 176 | if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah)) |
| 177 | AR_SREV_9550(sc->sc_ah)) | ||
| 178 | ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, | 177 | ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, |
| 179 | msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); | 178 | msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); |
| 180 | 179 | ||
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index c59ae43b9b35..927992732620 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
| @@ -146,6 +146,28 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta, | |||
| 146 | ARRAY_SIZE(bf->rates)); | 146 | ARRAY_SIZE(bf->rates)); |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq, | ||
| 150 | struct sk_buff *skb) | ||
| 151 | { | ||
| 152 | int q; | ||
| 153 | |||
| 154 | q = skb_get_queue_mapping(skb); | ||
| 155 | if (txq == sc->tx.uapsdq) | ||
| 156 | txq = sc->tx.txq_map[q]; | ||
| 157 | |||
| 158 | if (txq != sc->tx.txq_map[q]) | ||
| 159 | return; | ||
| 160 | |||
| 161 | if (WARN_ON(--txq->pending_frames < 0)) | ||
| 162 | txq->pending_frames = 0; | ||
| 163 | |||
| 164 | if (txq->stopped && | ||
| 165 | txq->pending_frames < sc->tx.txq_max_pending[q]) { | ||
| 166 | ieee80211_wake_queue(sc->hw, q); | ||
| 167 | txq->stopped = false; | ||
| 168 | } | ||
| 169 | } | ||
| 170 | |||
| 149 | static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | 171 | static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) |
| 150 | { | 172 | { |
| 151 | struct ath_txq *txq = tid->ac->txq; | 173 | struct ath_txq *txq = tid->ac->txq; |
| @@ -167,6 +189,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | |||
| 167 | if (!bf) { | 189 | if (!bf) { |
| 168 | bf = ath_tx_setup_buffer(sc, txq, tid, skb); | 190 | bf = ath_tx_setup_buffer(sc, txq, tid, skb); |
| 169 | if (!bf) { | 191 | if (!bf) { |
| 192 | ath_txq_skb_done(sc, txq, skb); | ||
| 170 | ieee80211_free_txskb(sc->hw, skb); | 193 | ieee80211_free_txskb(sc->hw, skb); |
| 171 | continue; | 194 | continue; |
| 172 | } | 195 | } |
| @@ -811,6 +834,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, | |||
| 811 | 834 | ||
| 812 | if (!bf) { | 835 | if (!bf) { |
| 813 | __skb_unlink(skb, &tid->buf_q); | 836 | __skb_unlink(skb, &tid->buf_q); |
| 837 | ath_txq_skb_done(sc, txq, skb); | ||
| 814 | ieee80211_free_txskb(sc->hw, skb); | 838 | ieee80211_free_txskb(sc->hw, skb); |
| 815 | continue; | 839 | continue; |
| 816 | } | 840 | } |
| @@ -1824,6 +1848,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq, | |||
| 1824 | 1848 | ||
| 1825 | bf = ath_tx_setup_buffer(sc, txq, tid, skb); | 1849 | bf = ath_tx_setup_buffer(sc, txq, tid, skb); |
| 1826 | if (!bf) { | 1850 | if (!bf) { |
| 1851 | ath_txq_skb_done(sc, txq, skb); | ||
| 1827 | ieee80211_free_txskb(sc->hw, skb); | 1852 | ieee80211_free_txskb(sc->hw, skb); |
| 1828 | return; | 1853 | return; |
| 1829 | } | 1854 | } |
| @@ -2090,6 +2115,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
| 2090 | 2115 | ||
| 2091 | bf = ath_tx_setup_buffer(sc, txq, tid, skb); | 2116 | bf = ath_tx_setup_buffer(sc, txq, tid, skb); |
| 2092 | if (!bf) { | 2117 | if (!bf) { |
| 2118 | ath_txq_skb_done(sc, txq, skb); | ||
| 2093 | if (txctl->paprd) | 2119 | if (txctl->paprd) |
| 2094 | dev_kfree_skb_any(skb); | 2120 | dev_kfree_skb_any(skb); |
| 2095 | else | 2121 | else |
| @@ -2189,7 +2215,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, | |||
| 2189 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 2215 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
| 2190 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 2216 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
| 2191 | struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; | 2217 | struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; |
| 2192 | int q, padpos, padsize; | 2218 | int padpos, padsize; |
| 2193 | unsigned long flags; | 2219 | unsigned long flags; |
| 2194 | 2220 | ||
| 2195 | ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); | 2221 | ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); |
| @@ -2225,21 +2251,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, | |||
| 2225 | spin_unlock_irqrestore(&sc->sc_pm_lock, flags); | 2251 | spin_unlock_irqrestore(&sc->sc_pm_lock, flags); |
| 2226 | 2252 | ||
| 2227 | __skb_queue_tail(&txq->complete_q, skb); | 2253 | __skb_queue_tail(&txq->complete_q, skb); |
| 2228 | 2254 | ath_txq_skb_done(sc, txq, skb); | |
| 2229 | q = skb_get_queue_mapping(skb); | ||
| 2230 | if (txq == sc->tx.uapsdq) | ||
| 2231 | txq = sc->tx.txq_map[q]; | ||
| 2232 | |||
| 2233 | if (txq == sc->tx.txq_map[q]) { | ||
| 2234 | if (WARN_ON(--txq->pending_frames < 0)) | ||
| 2235 | txq->pending_frames = 0; | ||
| 2236 | |||
| 2237 | if (txq->stopped && | ||
| 2238 | txq->pending_frames < sc->tx.txq_max_pending[q]) { | ||
| 2239 | ieee80211_wake_queue(sc->hw, q); | ||
| 2240 | txq->stopped = false; | ||
| 2241 | } | ||
| 2242 | } | ||
| 2243 | } | 2255 | } |
| 2244 | 2256 | ||
| 2245 | static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, | 2257 | static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, |
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 4a33c6e39ca2..349fa22a921a 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c | |||
| @@ -1860,7 +1860,8 @@ void *carl9170_alloc(size_t priv_size) | |||
| 1860 | IEEE80211_HW_PS_NULLFUNC_STACK | | 1860 | IEEE80211_HW_PS_NULLFUNC_STACK | |
| 1861 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | | 1861 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | |
| 1862 | IEEE80211_HW_SUPPORTS_RC_TABLE | | 1862 | IEEE80211_HW_SUPPORTS_RC_TABLE | |
| 1863 | IEEE80211_HW_SIGNAL_DBM; | 1863 | IEEE80211_HW_SIGNAL_DBM | |
| 1864 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; | ||
| 1864 | 1865 | ||
| 1865 | if (!modparam_noht) { | 1866 | if (!modparam_noht) { |
| 1866 | /* | 1867 | /* |
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index e8308ec30970..ab636767fbde 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c | |||
| @@ -145,7 +145,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix, | |||
| 145 | le16_to_cpu(hdr.type), hdr.flags); | 145 | le16_to_cpu(hdr.type), hdr.flags); |
| 146 | if (len <= MAX_MBOXITEM_SIZE) { | 146 | if (len <= MAX_MBOXITEM_SIZE) { |
| 147 | int n = 0; | 147 | int n = 0; |
| 148 | unsigned char printbuf[16 * 3 + 2]; | 148 | char printbuf[16 * 3 + 2]; |
| 149 | unsigned char databuf[MAX_MBOXITEM_SIZE]; | 149 | unsigned char databuf[MAX_MBOXITEM_SIZE]; |
| 150 | void __iomem *src = wmi_buffer(wil, d.addr) + | 150 | void __iomem *src = wmi_buffer(wil, d.addr) + |
| 151 | sizeof(struct wil6210_mbox_hdr); | 151 | sizeof(struct wil6210_mbox_hdr); |
| @@ -416,7 +416,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) | |||
| 416 | seq_printf(s, " SKB = %p\n", skb); | 416 | seq_printf(s, " SKB = %p\n", skb); |
| 417 | 417 | ||
| 418 | if (skb) { | 418 | if (skb) { |
| 419 | unsigned char printbuf[16 * 3 + 2]; | 419 | char printbuf[16 * 3 + 2]; |
| 420 | int i = 0; | 420 | int i = 0; |
| 421 | int len = le16_to_cpu(d->dma.length); | 421 | int len = le16_to_cpu(d->dma.length); |
| 422 | void *p = skb->data; | 422 | void *p = skb->data; |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 8e8975562ec3..80099016d21f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c | |||
| @@ -242,7 +242,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, | |||
| 242 | { | 242 | { |
| 243 | unsigned long flags; | 243 | unsigned long flags; |
| 244 | 244 | ||
| 245 | if (!ifp) | 245 | if (!ifp || !ifp->ndev) |
| 246 | return; | 246 | return; |
| 247 | 247 | ||
| 248 | brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n", | 248 | brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n", |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c index f0d9f7f6c83d..29b1f24c2d0f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c | |||
| @@ -1744,13 +1744,14 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) | |||
| 1744 | ulong flags; | 1744 | ulong flags; |
| 1745 | int fifo = BRCMF_FWS_FIFO_BCMC; | 1745 | int fifo = BRCMF_FWS_FIFO_BCMC; |
| 1746 | bool multicast = is_multicast_ether_addr(eh->h_dest); | 1746 | bool multicast = is_multicast_ether_addr(eh->h_dest); |
| 1747 | bool pae = eh->h_proto == htons(ETH_P_PAE); | ||
| 1747 | 1748 | ||
| 1748 | /* determine the priority */ | 1749 | /* determine the priority */ |
| 1749 | if (!skb->priority) | 1750 | if (!skb->priority) |
| 1750 | skb->priority = cfg80211_classify8021d(skb); | 1751 | skb->priority = cfg80211_classify8021d(skb); |
| 1751 | 1752 | ||
| 1752 | drvr->tx_multicast += !!multicast; | 1753 | drvr->tx_multicast += !!multicast; |
| 1753 | if (ntohs(eh->h_proto) == ETH_P_PAE) | 1754 | if (pae) |
| 1754 | atomic_inc(&ifp->pend_8021x_cnt); | 1755 | atomic_inc(&ifp->pend_8021x_cnt); |
| 1755 | 1756 | ||
| 1756 | if (!brcmf_fws_fc_active(fws)) { | 1757 | if (!brcmf_fws_fc_active(fws)) { |
| @@ -1781,6 +1782,11 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) | |||
| 1781 | brcmf_fws_schedule_deq(fws); | 1782 | brcmf_fws_schedule_deq(fws); |
| 1782 | } else { | 1783 | } else { |
| 1783 | brcmf_err("drop skb: no hanger slot\n"); | 1784 | brcmf_err("drop skb: no hanger slot\n"); |
| 1785 | if (pae) { | ||
| 1786 | atomic_dec(&ifp->pend_8021x_cnt); | ||
| 1787 | if (waitqueue_active(&ifp->pend_8021x_wait)) | ||
| 1788 | wake_up(&ifp->pend_8021x_wait); | ||
| 1789 | } | ||
| 1784 | brcmu_pkt_buf_free_skb(skb); | 1790 | brcmu_pkt_buf_free_skb(skb); |
| 1785 | } | 1791 | } |
| 1786 | brcmf_fws_unlock(drvr, flags); | 1792 | brcmf_fws_unlock(drvr, flags); |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 277b37ae7126..7fa71f73cfe8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c | |||
| @@ -1093,8 +1093,11 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif) | |||
| 1093 | brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n "); | 1093 | brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n "); |
| 1094 | err = brcmf_fil_cmd_data_set(vif->ifp, | 1094 | err = brcmf_fil_cmd_data_set(vif->ifp, |
| 1095 | BRCMF_C_DISASSOC, NULL, 0); | 1095 | BRCMF_C_DISASSOC, NULL, 0); |
| 1096 | if (err) | 1096 | if (err) { |
| 1097 | brcmf_err("WLC_DISASSOC failed (%d)\n", err); | 1097 | brcmf_err("WLC_DISASSOC failed (%d)\n", err); |
| 1098 | cfg80211_disconnected(vif->wdev.netdev, 0, | ||
| 1099 | NULL, 0, GFP_KERNEL); | ||
| 1100 | } | ||
| 1098 | clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state); | 1101 | clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state); |
| 1099 | } | 1102 | } |
| 1100 | clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state); | 1103 | clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state); |
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c index 7365674366f4..010b252be584 100644 --- a/drivers/net/wireless/cw1200/sta.c +++ b/drivers/net/wireless/cw1200/sta.c | |||
| @@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv) | |||
| 1406 | if (!priv->join_status) | 1406 | if (!priv->join_status) |
| 1407 | goto done; | 1407 | goto done; |
| 1408 | 1408 | ||
| 1409 | if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { | 1409 | if (priv->join_status == CW1200_JOIN_STATUS_AP) |
| 1410 | wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", | 1410 | goto done; |
| 1411 | priv->join_status); | ||
| 1412 | BUG_ON(1); | ||
| 1413 | } | ||
| 1414 | 1411 | ||
| 1415 | cancel_work_sync(&priv->update_filtering_work); | 1412 | cancel_work_sync(&priv->update_filtering_work); |
| 1416 | cancel_work_sync(&priv->set_beacon_wakeup_period_work); | 1413 | cancel_work_sync(&priv->set_beacon_wakeup_period_work); |
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c index 5862c373d714..e824d4d4a18d 100644 --- a/drivers/net/wireless/cw1200/txrx.c +++ b/drivers/net/wireless/cw1200/txrx.c | |||
| @@ -1165,7 +1165,7 @@ void cw1200_rx_cb(struct cw1200_common *priv, | |||
| 1165 | if (cw1200_handle_action_rx(priv, skb)) | 1165 | if (cw1200_handle_action_rx(priv, skb)) |
| 1166 | return; | 1166 | return; |
| 1167 | } else if (ieee80211_is_beacon(frame->frame_control) && | 1167 | } else if (ieee80211_is_beacon(frame->frame_control) && |
| 1168 | !arg->status && | 1168 | !arg->status && priv->vif && |
| 1169 | !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid, | 1169 | !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid, |
| 1170 | ETH_ALEN)) { | 1170 | ETH_ALEN)) { |
| 1171 | const u8 *tim_ie; | 1171 | const u8 *tim_ie; |
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index ac074731335a..e5090309824e 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c | |||
| @@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev, | |||
| 523 | 523 | ||
| 524 | data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); | 524 | data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); |
| 525 | 525 | ||
| 526 | memcpy(extra, &addr, sizeof(struct sockaddr) * data->length); | 526 | memcpy(extra, addr, sizeof(struct sockaddr) * data->length); |
| 527 | data->flags = 1; /* has quality information */ | 527 | data->flags = 1; /* has quality information */ |
| 528 | memcpy(extra + sizeof(struct sockaddr) * data->length, &qual, | 528 | memcpy(extra + sizeof(struct sockaddr) * data->length, qual, |
| 529 | sizeof(struct iw_quality) * data->length); | 529 | sizeof(struct iw_quality) * data->length); |
| 530 | 530 | ||
| 531 | kfree(addr); | 531 | kfree(addr); |
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index b9b2bb51e605..7acf5ee23582 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c | |||
| @@ -4460,13 +4460,13 @@ il4965_irq_tasklet(struct il_priv *il) | |||
| 4460 | * is killed. Hence update the killswitch state here. The | 4460 | * is killed. Hence update the killswitch state here. The |
| 4461 | * rfkill handler will care about restarting if needed. | 4461 | * rfkill handler will care about restarting if needed. |
| 4462 | */ | 4462 | */ |
| 4463 | if (!test_bit(S_ALIVE, &il->status)) { | 4463 | if (hw_rf_kill) { |
| 4464 | if (hw_rf_kill) | 4464 | set_bit(S_RFKILL, &il->status); |
| 4465 | set_bit(S_RFKILL, &il->status); | 4465 | } else { |
| 4466 | else | 4466 | clear_bit(S_RFKILL, &il->status); |
| 4467 | clear_bit(S_RFKILL, &il->status); | 4467 | il_force_reset(il, true); |
| 4468 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); | ||
| 4469 | } | 4468 | } |
| 4469 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); | ||
| 4470 | 4470 | ||
| 4471 | handled |= CSR_INT_BIT_RF_KILL; | 4471 | handled |= CSR_INT_BIT_RF_KILL; |
| 4472 | } | 4472 | } |
| @@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il) | |||
| 5334 | 5334 | ||
| 5335 | il->active_rate = RATES_MASK; | 5335 | il->active_rate = RATES_MASK; |
| 5336 | 5336 | ||
| 5337 | il_power_update_mode(il, true); | ||
| 5338 | D_INFO("Updated power mode\n"); | ||
| 5339 | |||
| 5337 | if (il_is_associated(il)) { | 5340 | if (il_is_associated(il)) { |
| 5338 | struct il_rxon_cmd *active_rxon = | 5341 | struct il_rxon_cmd *active_rxon = |
| 5339 | (struct il_rxon_cmd *)&il->active; | 5342 | (struct il_rxon_cmd *)&il->active; |
| @@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il) | |||
| 5364 | D_INFO("ALIVE processing complete.\n"); | 5367 | D_INFO("ALIVE processing complete.\n"); |
| 5365 | wake_up(&il->wait_command_queue); | 5368 | wake_up(&il->wait_command_queue); |
| 5366 | 5369 | ||
| 5367 | il_power_update_mode(il, true); | ||
| 5368 | D_INFO("Updated power mode\n"); | ||
| 5369 | |||
| 5370 | return; | 5370 | return; |
| 5371 | 5371 | ||
| 5372 | restart: | 5372 | restart: |
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 3195aad440dd..b03e22ef5462 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c | |||
| @@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external) | |||
| 4660 | 4660 | ||
| 4661 | return 0; | 4661 | return 0; |
| 4662 | } | 4662 | } |
| 4663 | EXPORT_SYMBOL(il_force_reset); | ||
| 4663 | 4664 | ||
| 4664 | int | 4665 | int |
| 4665 | il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 4666 | il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 822f1a00efbb..319387263e12 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c | |||
| @@ -1068,7 +1068,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) | |||
| 1068 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | 1068 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
| 1069 | return; | 1069 | return; |
| 1070 | 1070 | ||
| 1071 | if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) | 1071 | if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) |
| 1072 | return; | ||
| 1073 | |||
| 1074 | if (ctx->vif) | ||
| 1072 | ieee80211_chswitch_done(ctx->vif, is_success); | 1075 | ieee80211_chswitch_done(ctx->vif, is_success); |
| 1073 | } | 1076 | } |
| 1074 | 1077 | ||
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index 3952ddf2ddb2..1531a4fc0960 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c | |||
| @@ -758,7 +758,7 @@ int iwl_alive_start(struct iwl_priv *priv) | |||
| 758 | BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); | 758 | BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); |
| 759 | if (ret) | 759 | if (ret) |
| 760 | return ret; | 760 | return ret; |
| 761 | } else { | 761 | } else if (priv->lib->bt_params) { |
| 762 | /* | 762 | /* |
| 763 | * default is 2-wire BT coexexistence support | 763 | * default is 2-wire BT coexexistence support |
| 764 | */ | 764 | */ |
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 7e5e5c2f9f87..83da884cf303 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c | |||
| @@ -134,7 +134,7 @@ struct wowlan_key_data { | |||
| 134 | struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; | 134 | struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; |
| 135 | struct iwl_wowlan_tkip_params_cmd *tkip; | 135 | struct iwl_wowlan_tkip_params_cmd *tkip; |
| 136 | bool error, use_rsc_tsc, use_tkip; | 136 | bool error, use_rsc_tsc, use_tkip; |
| 137 | int gtk_key_idx; | 137 | int wep_key_idx; |
| 138 | }; | 138 | }; |
| 139 | 139 | ||
| 140 | static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, | 140 | static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, |
| @@ -188,8 +188,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, | |||
| 188 | wkc.wep_key.key_offset = 0; | 188 | wkc.wep_key.key_offset = 0; |
| 189 | } else { | 189 | } else { |
| 190 | /* others start at 1 */ | 190 | /* others start at 1 */ |
| 191 | data->gtk_key_idx++; | 191 | data->wep_key_idx++; |
| 192 | wkc.wep_key.key_offset = data->gtk_key_idx; | 192 | wkc.wep_key.key_offset = data->wep_key_idx; |
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC, | 195 | ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC, |
| @@ -316,8 +316,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, | |||
| 316 | mvm->ptk_ivlen = key->iv_len; | 316 | mvm->ptk_ivlen = key->iv_len; |
| 317 | mvm->ptk_icvlen = key->icv_len; | 317 | mvm->ptk_icvlen = key->icv_len; |
| 318 | } else { | 318 | } else { |
| 319 | data->gtk_key_idx++; | 319 | /* |
| 320 | key->hw_key_idx = data->gtk_key_idx; | 320 | * firmware only supports TSC/RSC for a single key, |
| 321 | * so if there are multiple keep overwriting them | ||
| 322 | * with new ones -- this relies on mac80211 doing | ||
| 323 | * list_add_tail(). | ||
| 324 | */ | ||
| 325 | key->hw_key_idx = 1; | ||
| 321 | mvm->gtk_ivlen = key->iv_len; | 326 | mvm->gtk_ivlen = key->iv_len; |
| 322 | mvm->gtk_icvlen = key->icv_len; | 327 | mvm->gtk_icvlen = key->icv_len; |
| 323 | } | 328 | } |
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index e56ed2a84888..c24a744910ac 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c | |||
| @@ -988,7 +988,11 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
| 988 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | 988 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
| 989 | char buf[100]; | 989 | char buf[100]; |
| 990 | 990 | ||
| 991 | if (!dbgfs_dir) | 991 | /* |
| 992 | * Check if debugfs directory already exist before creating it. | ||
| 993 | * This may happen when, for example, resetting hw or suspend-resume | ||
| 994 | */ | ||
| 995 | if (!dbgfs_dir || mvmvif->dbgfs_dir) | ||
| 992 | return; | 996 | return; |
| 993 | 997 | ||
| 994 | mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); | 998 | mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index b60d14151721..365095a0c3b3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h | |||
| @@ -69,7 +69,6 @@ | |||
| 69 | /* Scan Commands, Responses, Notifications */ | 69 | /* Scan Commands, Responses, Notifications */ |
| 70 | 70 | ||
| 71 | /* Masks for iwl_scan_channel.type flags */ | 71 | /* Masks for iwl_scan_channel.type flags */ |
| 72 | #define SCAN_CHANNEL_TYPE_PASSIVE 0 | ||
| 73 | #define SCAN_CHANNEL_TYPE_ACTIVE BIT(0) | 72 | #define SCAN_CHANNEL_TYPE_ACTIVE BIT(0) |
| 74 | #define SCAN_CHANNEL_NARROW_BAND BIT(22) | 73 | #define SCAN_CHANNEL_NARROW_BAND BIT(22) |
| 75 | 74 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index e08683b20531..f19baf0dea6b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
| @@ -257,7 +257,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) | |||
| 257 | if (ret) | 257 | if (ret) |
| 258 | return ret; | 258 | return ret; |
| 259 | 259 | ||
| 260 | return ieee80211_register_hw(mvm->hw); | 260 | ret = ieee80211_register_hw(mvm->hw); |
| 261 | if (ret) | ||
| 262 | iwl_mvm_leds_exit(mvm); | ||
| 263 | |||
| 264 | return ret; | ||
| 261 | } | 265 | } |
| 262 | 266 | ||
| 263 | static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, | 267 | static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, |
| @@ -385,6 +389,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) | |||
| 385 | ieee80211_wake_queues(mvm->hw); | 389 | ieee80211_wake_queues(mvm->hw); |
| 386 | 390 | ||
| 387 | mvm->vif_count = 0; | 391 | mvm->vif_count = 0; |
| 392 | mvm->rx_ba_sessions = 0; | ||
| 388 | } | 393 | } |
| 389 | 394 | ||
| 390 | static int iwl_mvm_mac_start(struct ieee80211_hw *hw) | 395 | static int iwl_mvm_mac_start(struct ieee80211_hw *hw) |
| @@ -507,6 +512,27 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, | |||
| 507 | goto out_unlock; | 512 | goto out_unlock; |
| 508 | 513 | ||
| 509 | /* | 514 | /* |
| 515 | * TODO: remove this temporary code. | ||
| 516 | * Currently MVM FW supports power management only on single MAC. | ||
| 517 | * If new interface added, disable PM on existing interface. | ||
| 518 | * P2P device is a special case, since it is handled by FW similary to | ||
| 519 | * scan. If P2P deviced is added, PM remains enabled on existing | ||
| 520 | * interface. | ||
| 521 | * Note: the method below does not count the new interface being added | ||
| 522 | * at this moment. | ||
| 523 | */ | ||
| 524 | if (vif->type != NL80211_IFTYPE_P2P_DEVICE) | ||
| 525 | mvm->vif_count++; | ||
| 526 | if (mvm->vif_count > 1) { | ||
| 527 | IWL_DEBUG_MAC80211(mvm, | ||
| 528 | "Disable power on existing interfaces\n"); | ||
| 529 | ieee80211_iterate_active_interfaces_atomic( | ||
| 530 | mvm->hw, | ||
| 531 | IEEE80211_IFACE_ITER_NORMAL, | ||
| 532 | iwl_mvm_pm_disable_iterator, mvm); | ||
| 533 | } | ||
| 534 | |||
| 535 | /* | ||
| 510 | * The AP binding flow can be done only after the beacon | 536 | * The AP binding flow can be done only after the beacon |
| 511 | * template is configured (which happens only in the mac80211 | 537 | * template is configured (which happens only in the mac80211 |
| 512 | * start_ap() flow), and adding the broadcast station can happen | 538 | * start_ap() flow), and adding the broadcast station can happen |
| @@ -529,27 +555,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, | |||
| 529 | goto out_unlock; | 555 | goto out_unlock; |
| 530 | } | 556 | } |
| 531 | 557 | ||
| 532 | /* | ||
| 533 | * TODO: remove this temporary code. | ||
| 534 | * Currently MVM FW supports power management only on single MAC. | ||
| 535 | * If new interface added, disable PM on existing interface. | ||
| 536 | * P2P device is a special case, since it is handled by FW similary to | ||
| 537 | * scan. If P2P deviced is added, PM remains enabled on existing | ||
| 538 | * interface. | ||
| 539 | * Note: the method below does not count the new interface being added | ||
| 540 | * at this moment. | ||
| 541 | */ | ||
| 542 | if (vif->type != NL80211_IFTYPE_P2P_DEVICE) | ||
| 543 | mvm->vif_count++; | ||
| 544 | if (mvm->vif_count > 1) { | ||
| 545 | IWL_DEBUG_MAC80211(mvm, | ||
| 546 | "Disable power on existing interfaces\n"); | ||
| 547 | ieee80211_iterate_active_interfaces_atomic( | ||
| 548 | mvm->hw, | ||
| 549 | IEEE80211_IFACE_ITER_NORMAL, | ||
| 550 | iwl_mvm_pm_disable_iterator, mvm); | ||
| 551 | } | ||
| 552 | |||
| 553 | ret = iwl_mvm_mac_ctxt_add(mvm, vif); | 558 | ret = iwl_mvm_mac_ctxt_add(mvm, vif); |
| 554 | if (ret) | 559 | if (ret) |
| 555 | goto out_release; | 560 | goto out_release; |
| @@ -1006,6 +1011,21 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, | |||
| 1006 | mutex_lock(&mvm->mutex); | 1011 | mutex_lock(&mvm->mutex); |
| 1007 | if (old_state == IEEE80211_STA_NOTEXIST && | 1012 | if (old_state == IEEE80211_STA_NOTEXIST && |
| 1008 | new_state == IEEE80211_STA_NONE) { | 1013 | new_state == IEEE80211_STA_NONE) { |
| 1014 | /* | ||
| 1015 | * Firmware bug - it'll crash if the beacon interval is less | ||
| 1016 | * than 16. We can't avoid connecting at all, so refuse the | ||
| 1017 | * station state change, this will cause mac80211 to abandon | ||
| 1018 | * attempts to connect to this AP, and eventually wpa_s will | ||
| 1019 | * blacklist the AP... | ||
| 1020 | */ | ||
| 1021 | if (vif->type == NL80211_IFTYPE_STATION && | ||
| 1022 | vif->bss_conf.beacon_int < 16) { | ||
| 1023 | IWL_ERR(mvm, | ||
| 1024 | "AP %pM beacon interval is %d, refusing due to firmware bug!\n", | ||
| 1025 | sta->addr, vif->bss_conf.beacon_int); | ||
| 1026 | ret = -EINVAL; | ||
| 1027 | goto out_unlock; | ||
| 1028 | } | ||
| 1009 | ret = iwl_mvm_add_sta(mvm, vif, sta); | 1029 | ret = iwl_mvm_add_sta(mvm, vif, sta); |
| 1010 | } else if (old_state == IEEE80211_STA_NONE && | 1030 | } else if (old_state == IEEE80211_STA_NONE && |
| 1011 | new_state == IEEE80211_STA_AUTH) { | 1031 | new_state == IEEE80211_STA_AUTH) { |
| @@ -1038,6 +1058,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, | |||
| 1038 | } else { | 1058 | } else { |
| 1039 | ret = -EIO; | 1059 | ret = -EIO; |
| 1040 | } | 1060 | } |
| 1061 | out_unlock: | ||
| 1041 | mutex_unlock(&mvm->mutex); | 1062 | mutex_unlock(&mvm->mutex); |
| 1042 | 1063 | ||
| 1043 | return ret; | 1064 | return ret; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index d40d7db185d6..420e82d379d9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h | |||
| @@ -419,6 +419,7 @@ struct iwl_mvm { | |||
| 419 | struct work_struct sta_drained_wk; | 419 | struct work_struct sta_drained_wk; |
| 420 | unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; | 420 | unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; |
| 421 | atomic_t pending_frames[IWL_MVM_STATION_COUNT]; | 421 | atomic_t pending_frames[IWL_MVM_STATION_COUNT]; |
| 422 | u8 rx_ba_sessions; | ||
| 422 | 423 | ||
| 423 | /* configured by mac80211 */ | 424 | /* configured by mac80211 */ |
| 424 | u32 rts_threshold; | 425 | u32 rts_threshold; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 2157b0f8ced5..acdff6b67e04 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c | |||
| @@ -137,8 +137,8 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd, | |||
| 137 | { | 137 | { |
| 138 | int fw_idx, req_idx; | 138 | int fw_idx, req_idx; |
| 139 | 139 | ||
| 140 | fw_idx = 0; | 140 | for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0; |
| 141 | for (req_idx = req->n_ssids - 1; req_idx > 0; req_idx--) { | 141 | req_idx--, fw_idx++) { |
| 142 | cmd->direct_scan[fw_idx].id = WLAN_EID_SSID; | 142 | cmd->direct_scan[fw_idx].id = WLAN_EID_SSID; |
| 143 | cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len; | 143 | cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len; |
| 144 | memcpy(cmd->direct_scan[fw_idx].ssid, | 144 | memcpy(cmd->direct_scan[fw_idx].ssid, |
| @@ -153,7 +153,9 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd, | |||
| 153 | * just to notify that this scan is active and not passive. | 153 | * just to notify that this scan is active and not passive. |
| 154 | * In order to notify the FW of the number of SSIDs we wish to scan (including | 154 | * In order to notify the FW of the number of SSIDs we wish to scan (including |
| 155 | * the zero-length one), we need to set the corresponding bits in chan->type, | 155 | * the zero-length one), we need to set the corresponding bits in chan->type, |
| 156 | * one for each SSID, and set the active bit (first). | 156 | * one for each SSID, and set the active bit (first). The first SSID is already |
| 157 | * included in the probe template, so we need to set only req->n_ssids - 1 bits | ||
| 158 | * in addition to the first bit. | ||
| 157 | */ | 159 | */ |
| 158 | static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) | 160 | static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) |
| 159 | { | 161 | { |
| @@ -176,19 +178,12 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd, | |||
| 176 | struct iwl_scan_channel *chan = (struct iwl_scan_channel *) | 178 | struct iwl_scan_channel *chan = (struct iwl_scan_channel *) |
| 177 | (cmd->data + le16_to_cpu(cmd->tx_cmd.len)); | 179 | (cmd->data + le16_to_cpu(cmd->tx_cmd.len)); |
| 178 | int i; | 180 | int i; |
| 179 | __le32 chan_type_value; | ||
| 180 | |||
| 181 | if (req->n_ssids > 0) | ||
| 182 | chan_type_value = cpu_to_le32(BIT(req->n_ssids + 1) - 1); | ||
| 183 | else | ||
| 184 | chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE; | ||
| 185 | 181 | ||
| 186 | for (i = 0; i < cmd->channel_count; i++) { | 182 | for (i = 0; i < cmd->channel_count; i++) { |
| 187 | chan->channel = cpu_to_le16(req->channels[i]->hw_value); | 183 | chan->channel = cpu_to_le16(req->channels[i]->hw_value); |
| 184 | chan->type = cpu_to_le32(BIT(req->n_ssids) - 1); | ||
| 188 | if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) | 185 | if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) |
| 189 | chan->type = SCAN_CHANNEL_TYPE_PASSIVE; | 186 | chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE); |
| 190 | else | ||
| 191 | chan->type = chan_type_value; | ||
| 192 | chan->active_dwell = cpu_to_le16(active_dwell); | 187 | chan->active_dwell = cpu_to_le16(active_dwell); |
| 193 | chan->passive_dwell = cpu_to_le16(passive_dwell); | 188 | chan->passive_dwell = cpu_to_le16(passive_dwell); |
| 194 | chan->iteration_count = cpu_to_le16(1); | 189 | chan->iteration_count = cpu_to_le16(1); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 62fe5209093b..563f559b902d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c | |||
| @@ -608,6 +608,8 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta) | |||
| 608 | return ret; | 608 | return ret; |
| 609 | } | 609 | } |
| 610 | 610 | ||
| 611 | #define IWL_MAX_RX_BA_SESSIONS 16 | ||
| 612 | |||
| 611 | int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 613 | int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
| 612 | int tid, u16 ssn, bool start) | 614 | int tid, u16 ssn, bool start) |
| 613 | { | 615 | { |
| @@ -618,11 +620,20 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 618 | 620 | ||
| 619 | lockdep_assert_held(&mvm->mutex); | 621 | lockdep_assert_held(&mvm->mutex); |
| 620 | 622 | ||
| 623 | if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { | ||
| 624 | IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); | ||
| 625 | return -ENOSPC; | ||
| 626 | } | ||
| 627 | |||
| 621 | cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); | 628 | cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); |
| 622 | cmd.sta_id = mvm_sta->sta_id; | 629 | cmd.sta_id = mvm_sta->sta_id; |
| 623 | cmd.add_modify = STA_MODE_MODIFY; | 630 | cmd.add_modify = STA_MODE_MODIFY; |
| 624 | cmd.add_immediate_ba_tid = (u8) tid; | 631 | if (start) { |
| 625 | cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); | 632 | cmd.add_immediate_ba_tid = (u8) tid; |
| 633 | cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); | ||
| 634 | } else { | ||
| 635 | cmd.remove_immediate_ba_tid = (u8) tid; | ||
| 636 | } | ||
| 626 | cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : | 637 | cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : |
| 627 | STA_MODIFY_REMOVE_BA_TID; | 638 | STA_MODIFY_REMOVE_BA_TID; |
| 628 | 639 | ||
| @@ -648,6 +659,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 648 | break; | 659 | break; |
| 649 | } | 660 | } |
| 650 | 661 | ||
| 662 | if (!ret) { | ||
| 663 | if (start) | ||
| 664 | mvm->rx_ba_sessions++; | ||
| 665 | else if (mvm->rx_ba_sessions > 0) | ||
| 666 | /* check that restart flow didn't zero the counter */ | ||
| 667 | mvm->rx_ba_sessions--; | ||
| 668 | } | ||
| 669 | |||
| 651 | return ret; | 670 | return ret; |
| 652 | } | 671 | } |
| 653 | 672 | ||
| @@ -896,6 +915,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
| 896 | struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; | 915 | struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv; |
| 897 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; | 916 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; |
| 898 | u16 txq_id; | 917 | u16 txq_id; |
| 918 | enum iwl_mvm_agg_state old_state; | ||
| 899 | 919 | ||
| 900 | /* | 920 | /* |
| 901 | * First set the agg state to OFF to avoid calling | 921 | * First set the agg state to OFF to avoid calling |
| @@ -905,13 +925,17 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
| 905 | txq_id = tid_data->txq_id; | 925 | txq_id = tid_data->txq_id; |
| 906 | IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", | 926 | IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", |
| 907 | mvmsta->sta_id, tid, txq_id, tid_data->state); | 927 | mvmsta->sta_id, tid, txq_id, tid_data->state); |
| 928 | old_state = tid_data->state; | ||
| 908 | tid_data->state = IWL_AGG_OFF; | 929 | tid_data->state = IWL_AGG_OFF; |
| 909 | spin_unlock_bh(&mvmsta->lock); | 930 | spin_unlock_bh(&mvmsta->lock); |
| 910 | 931 | ||
| 911 | if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true)) | 932 | if (old_state >= IWL_AGG_ON) { |
| 912 | IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); | 933 | if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true)) |
| 934 | IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); | ||
| 935 | |||
| 936 | iwl_trans_txq_disable(mvm->trans, tid_data->txq_id); | ||
| 937 | } | ||
| 913 | 938 | ||
| 914 | iwl_trans_txq_disable(mvm->trans, tid_data->txq_id); | ||
| 915 | mvm->queue_to_mac80211[tid_data->txq_id] = | 939 | mvm->queue_to_mac80211[tid_data->txq_id] = |
| 916 | IWL_INVALID_MAC80211_QUEUE; | 940 | IWL_INVALID_MAC80211_QUEUE; |
| 917 | 941 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index ad9bbca99213..7fd6fbfbc1b3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c | |||
| @@ -138,6 +138,20 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) | |||
| 138 | schedule_work(&mvm->roc_done_wk); | 138 | schedule_work(&mvm->roc_done_wk); |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, | ||
| 142 | struct ieee80211_vif *vif, | ||
| 143 | const char *errmsg) | ||
| 144 | { | ||
| 145 | if (vif->type != NL80211_IFTYPE_STATION) | ||
| 146 | return false; | ||
| 147 | if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) | ||
| 148 | return false; | ||
| 149 | if (errmsg) | ||
| 150 | IWL_ERR(mvm, "%s\n", errmsg); | ||
| 151 | ieee80211_connection_loss(vif); | ||
| 152 | return true; | ||
| 153 | } | ||
| 154 | |||
| 141 | /* | 155 | /* |
| 142 | * Handles a FW notification for an event that is known to the driver. | 156 | * Handles a FW notification for an event that is known to the driver. |
| 143 | * | 157 | * |
| @@ -163,8 +177,13 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, | |||
| 163 | * P2P Device discoveribility, while there are other higher priority | 177 | * P2P Device discoveribility, while there are other higher priority |
| 164 | * events in the system). | 178 | * events in the system). |
| 165 | */ | 179 | */ |
| 166 | WARN_ONCE(!le32_to_cpu(notif->status), | 180 | if (WARN_ONCE(!le32_to_cpu(notif->status), |
| 167 | "Failed to schedule time event\n"); | 181 | "Failed to schedule time event\n")) { |
| 182 | if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) { | ||
| 183 | iwl_mvm_te_clear_data(mvm, te_data); | ||
| 184 | return; | ||
| 185 | } | ||
| 186 | } | ||
| 168 | 187 | ||
| 169 | if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { | 188 | if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { |
| 170 | IWL_DEBUG_TE(mvm, | 189 | IWL_DEBUG_TE(mvm, |
| @@ -180,14 +199,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, | |||
| 180 | * By now, we should have finished association | 199 | * By now, we should have finished association |
| 181 | * and know the dtim period. | 200 | * and know the dtim period. |
| 182 | */ | 201 | */ |
| 183 | if (te_data->vif->type == NL80211_IFTYPE_STATION && | 202 | iwl_mvm_te_check_disconnect(mvm, te_data->vif, |
| 184 | (!te_data->vif->bss_conf.assoc || | 203 | "No assocation and the time event is over already..."); |
| 185 | !te_data->vif->bss_conf.dtim_period)) { | ||
| 186 | IWL_ERR(mvm, | ||
| 187 | "No assocation and the time event is over already...\n"); | ||
| 188 | ieee80211_connection_loss(te_data->vif); | ||
| 189 | } | ||
| 190 | |||
| 191 | iwl_mvm_te_clear_data(mvm, te_data); | 204 | iwl_mvm_te_clear_data(mvm, te_data); |
| 192 | } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { | 205 | } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { |
| 193 | te_data->running = true; | 206 | te_data->running = true; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 81f3ea5b09a4..ff13458efc27 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
| @@ -130,6 +130,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { | |||
| 130 | {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ | 130 | {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ |
| 131 | {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ | 131 | {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ |
| 132 | {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ | 132 | {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ |
| 133 | {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */ | ||
| 133 | 134 | ||
| 134 | {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ | 135 | {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ |
| 135 | {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ | 136 | {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ |
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 826c15602c46..390e2f058aff 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
| @@ -670,6 +670,11 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | |||
| 670 | return err; | 670 | return err; |
| 671 | } | 671 | } |
| 672 | 672 | ||
| 673 | /* Reset the entire device */ | ||
| 674 | iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); | ||
| 675 | |||
| 676 | usleep_range(10, 15); | ||
| 677 | |||
| 673 | iwl_pcie_apm_init(trans); | 678 | iwl_pcie_apm_init(trans); |
| 674 | 679 | ||
| 675 | /* From now on, the op_mode will be kept updated about RF kill state */ | 680 | /* From now on, the op_mode will be kept updated about RF kill state */ |
| @@ -1497,16 +1502,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
| 1497 | spin_lock_init(&trans_pcie->reg_lock); | 1502 | spin_lock_init(&trans_pcie->reg_lock); |
| 1498 | init_waitqueue_head(&trans_pcie->ucode_write_waitq); | 1503 | init_waitqueue_head(&trans_pcie->ucode_write_waitq); |
| 1499 | 1504 | ||
| 1500 | /* W/A - seems to solve weird behavior. We need to remove this if we | ||
| 1501 | * don't want to stay in L1 all the time. This wastes a lot of power */ | ||
| 1502 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
| 1503 | PCIE_LINK_STATE_CLKPM); | ||
| 1504 | |||
| 1505 | if (pci_enable_device(pdev)) { | 1505 | if (pci_enable_device(pdev)) { |
| 1506 | err = -ENODEV; | 1506 | err = -ENODEV; |
| 1507 | goto out_no_pci; | 1507 | goto out_no_pci; |
| 1508 | } | 1508 | } |
| 1509 | 1509 | ||
| 1510 | /* W/A - seems to solve weird behavior. We need to remove this if we | ||
| 1511 | * don't want to stay in L1 all the time. This wastes a lot of power */ | ||
| 1512 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
| 1513 | PCIE_LINK_STATE_CLKPM); | ||
| 1514 | |||
| 1510 | pci_set_master(pdev); | 1515 | pci_set_master(pdev); |
| 1511 | 1516 | ||
| 1512 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); | 1517 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); |
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index ef5fa890a286..89459db4c53b 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c | |||
| @@ -1716,9 +1716,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, | |||
| 1716 | struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); | 1716 | struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); |
| 1717 | int ret; | 1717 | int ret; |
| 1718 | 1718 | ||
| 1719 | if (priv->bss_mode != NL80211_IFTYPE_STATION) { | 1719 | if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) { |
| 1720 | wiphy_err(wiphy, | 1720 | wiphy_err(wiphy, |
| 1721 | "%s: reject infra assoc request in non-STA mode\n", | 1721 | "%s: reject infra assoc request in non-STA role\n", |
| 1722 | dev->name); | 1722 | dev->name); |
| 1723 | return -EINVAL; | 1723 | return -EINVAL; |
| 1724 | } | 1724 | } |
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c index 988552dece75..5178c4630d89 100644 --- a/drivers/net/wireless/mwifiex/cfp.c +++ b/drivers/net/wireless/mwifiex/cfp.c | |||
| @@ -415,7 +415,8 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) | |||
| 415 | u32 k = 0; | 415 | u32 k = 0; |
| 416 | struct mwifiex_adapter *adapter = priv->adapter; | 416 | struct mwifiex_adapter *adapter = priv->adapter; |
| 417 | 417 | ||
| 418 | if (priv->bss_mode == NL80211_IFTYPE_STATION) { | 418 | if (priv->bss_mode == NL80211_IFTYPE_STATION || |
| 419 | priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) { | ||
| 419 | switch (adapter->config_bands) { | 420 | switch (adapter->config_bands) { |
| 420 | case BAND_B: | 421 | case BAND_B: |
| 421 | dev_dbg(adapter->dev, "info: infra band=%d " | 422 | dev_dbg(adapter->dev, "info: infra band=%d " |
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index caaf4bd56b30..2cf8b964e966 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c | |||
| @@ -693,7 +693,7 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, | |||
| 693 | if (!ret) { | 693 | if (!ret) { |
| 694 | dev_notice(adapter->dev, | 694 | dev_notice(adapter->dev, |
| 695 | "WLAN FW already running! Skip FW dnld\n"); | 695 | "WLAN FW already running! Skip FW dnld\n"); |
| 696 | goto done; | 696 | return 0; |
| 697 | } | 697 | } |
| 698 | 698 | ||
| 699 | poll_num = MAX_FIRMWARE_POLL_TRIES; | 699 | poll_num = MAX_FIRMWARE_POLL_TRIES; |
| @@ -719,14 +719,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, | |||
| 719 | poll_fw: | 719 | poll_fw: |
| 720 | /* Check if the firmware is downloaded successfully or not */ | 720 | /* Check if the firmware is downloaded successfully or not */ |
| 721 | ret = adapter->if_ops.check_fw_status(adapter, poll_num); | 721 | ret = adapter->if_ops.check_fw_status(adapter, poll_num); |
| 722 | if (ret) { | 722 | if (ret) |
| 723 | dev_err(adapter->dev, "FW failed to be active in time\n"); | 723 | dev_err(adapter->dev, "FW failed to be active in time\n"); |
| 724 | return -1; | ||
| 725 | } | ||
| 726 | done: | ||
| 727 | /* re-enable host interrupt for mwifiex after fw dnld is successful */ | ||
| 728 | if (adapter->if_ops.enable_int) | ||
| 729 | adapter->if_ops.enable_int(adapter); | ||
| 730 | 724 | ||
| 731 | return ret; | 725 | return ret; |
| 732 | } | 726 | } |
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 1c8a771e8e81..12e778159ec5 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c | |||
| @@ -1291,8 +1291,10 @@ int mwifiex_associate(struct mwifiex_private *priv, | |||
| 1291 | { | 1291 | { |
| 1292 | u8 current_bssid[ETH_ALEN]; | 1292 | u8 current_bssid[ETH_ALEN]; |
| 1293 | 1293 | ||
| 1294 | /* Return error if the adapter or table entry is not marked as infra */ | 1294 | /* Return error if the adapter is not STA role or table entry |
| 1295 | if ((priv->bss_mode != NL80211_IFTYPE_STATION) || | 1295 | * is not marked as infra. |
| 1296 | */ | ||
| 1297 | if ((GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) || | ||
| 1296 | (bss_desc->bss_mode != NL80211_IFTYPE_STATION)) | 1298 | (bss_desc->bss_mode != NL80211_IFTYPE_STATION)) |
| 1297 | return -1; | 1299 | return -1; |
| 1298 | 1300 | ||
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index e15ab72fb03d..1753431de361 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c | |||
| @@ -427,6 +427,10 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) | |||
| 427 | "Cal data request_firmware() failed\n"); | 427 | "Cal data request_firmware() failed\n"); |
| 428 | } | 428 | } |
| 429 | 429 | ||
| 430 | /* enable host interrupt after fw dnld is successful */ | ||
| 431 | if (adapter->if_ops.enable_int) | ||
| 432 | adapter->if_ops.enable_int(adapter); | ||
| 433 | |||
| 430 | adapter->init_wait_q_woken = false; | 434 | adapter->init_wait_q_woken = false; |
| 431 | ret = mwifiex_init_fw(adapter); | 435 | ret = mwifiex_init_fw(adapter); |
| 432 | if (ret == -1) { | 436 | if (ret == -1) { |
| @@ -478,6 +482,8 @@ err_add_intf: | |||
| 478 | mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev); | 482 | mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev); |
| 479 | rtnl_unlock(); | 483 | rtnl_unlock(); |
| 480 | err_init_fw: | 484 | err_init_fw: |
| 485 | if (adapter->if_ops.disable_int) | ||
| 486 | adapter->if_ops.disable_int(adapter); | ||
| 481 | pr_debug("info: %s: unregister device\n", __func__); | 487 | pr_debug("info: %s: unregister device\n", __func__); |
| 482 | adapter->if_ops.unregister_dev(adapter); | 488 | adapter->if_ops.unregister_dev(adapter); |
| 483 | done: | 489 | done: |
| @@ -855,7 +861,7 @@ mwifiex_add_card(void *card, struct semaphore *sem, | |||
| 855 | INIT_WORK(&adapter->main_work, mwifiex_main_work_queue); | 861 | INIT_WORK(&adapter->main_work, mwifiex_main_work_queue); |
| 856 | 862 | ||
| 857 | /* Register the device. Fill up the private data structure with relevant | 863 | /* Register the device. Fill up the private data structure with relevant |
| 858 | information from the card and request for the required IRQ. */ | 864 | information from the card. */ |
| 859 | if (adapter->if_ops.register_dev(adapter)) { | 865 | if (adapter->if_ops.register_dev(adapter)) { |
| 860 | pr_err("%s: failed to register mwifiex device\n", __func__); | 866 | pr_err("%s: failed to register mwifiex device\n", __func__); |
| 861 | goto err_registerdev; | 867 | goto err_registerdev; |
| @@ -919,6 +925,11 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) | |||
| 919 | if (!adapter) | 925 | if (!adapter) |
| 920 | goto exit_remove; | 926 | goto exit_remove; |
| 921 | 927 | ||
| 928 | /* We can no longer handle interrupts once we start doing the teardown | ||
| 929 | * below. */ | ||
| 930 | if (adapter->if_ops.disable_int) | ||
| 931 | adapter->if_ops.disable_int(adapter); | ||
| 932 | |||
| 922 | adapter->surprise_removed = true; | 933 | adapter->surprise_removed = true; |
| 923 | 934 | ||
| 924 | /* Stop data */ | 935 | /* Stop data */ |
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 3da73d36acdf..253e0bd38e25 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h | |||
| @@ -601,6 +601,7 @@ struct mwifiex_if_ops { | |||
| 601 | int (*register_dev) (struct mwifiex_adapter *); | 601 | int (*register_dev) (struct mwifiex_adapter *); |
| 602 | void (*unregister_dev) (struct mwifiex_adapter *); | 602 | void (*unregister_dev) (struct mwifiex_adapter *); |
| 603 | int (*enable_int) (struct mwifiex_adapter *); | 603 | int (*enable_int) (struct mwifiex_adapter *); |
| 604 | void (*disable_int) (struct mwifiex_adapter *); | ||
| 604 | int (*process_int_status) (struct mwifiex_adapter *); | 605 | int (*process_int_status) (struct mwifiex_adapter *); |
| 605 | int (*host_to_card) (struct mwifiex_adapter *, u8, struct sk_buff *, | 606 | int (*host_to_card) (struct mwifiex_adapter *, u8, struct sk_buff *, |
| 606 | struct mwifiex_tx_param *); | 607 | struct mwifiex_tx_param *); |
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 5ee5ed02eccd..09185c963248 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c | |||
| @@ -51,6 +51,7 @@ static struct mwifiex_if_ops sdio_ops; | |||
| 51 | static struct semaphore add_remove_card_sem; | 51 | static struct semaphore add_remove_card_sem; |
| 52 | 52 | ||
| 53 | static int mwifiex_sdio_resume(struct device *dev); | 53 | static int mwifiex_sdio_resume(struct device *dev); |
| 54 | static void mwifiex_sdio_interrupt(struct sdio_func *func); | ||
| 54 | 55 | ||
| 55 | /* | 56 | /* |
| 56 | * SDIO probe. | 57 | * SDIO probe. |
| @@ -296,6 +297,15 @@ static struct sdio_driver mwifiex_sdio = { | |||
| 296 | } | 297 | } |
| 297 | }; | 298 | }; |
| 298 | 299 | ||
| 300 | /* Write data into SDIO card register. Caller claims SDIO device. */ | ||
| 301 | static int | ||
| 302 | mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data) | ||
| 303 | { | ||
| 304 | int ret = -1; | ||
| 305 | sdio_writeb(func, data, reg, &ret); | ||
| 306 | return ret; | ||
| 307 | } | ||
| 308 | |||
| 299 | /* | 309 | /* |
| 300 | * This function writes data into SDIO card register. | 310 | * This function writes data into SDIO card register. |
| 301 | */ | 311 | */ |
| @@ -303,10 +313,10 @@ static int | |||
| 303 | mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data) | 313 | mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data) |
| 304 | { | 314 | { |
| 305 | struct sdio_mmc_card *card = adapter->card; | 315 | struct sdio_mmc_card *card = adapter->card; |
| 306 | int ret = -1; | 316 | int ret; |
| 307 | 317 | ||
| 308 | sdio_claim_host(card->func); | 318 | sdio_claim_host(card->func); |
| 309 | sdio_writeb(card->func, data, reg, &ret); | 319 | ret = mwifiex_write_reg_locked(card->func, reg, data); |
| 310 | sdio_release_host(card->func); | 320 | sdio_release_host(card->func); |
| 311 | 321 | ||
| 312 | return ret; | 322 | return ret; |
| @@ -685,23 +695,15 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat) | |||
| 685 | * The host interrupt mask is read, the disable bit is reset and | 695 | * The host interrupt mask is read, the disable bit is reset and |
| 686 | * written back to the card host interrupt mask register. | 696 | * written back to the card host interrupt mask register. |
| 687 | */ | 697 | */ |
| 688 | static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter) | 698 | static void mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter) |
| 689 | { | 699 | { |
| 690 | u8 host_int_mask, host_int_disable = HOST_INT_DISABLE; | 700 | struct sdio_mmc_card *card = adapter->card; |
| 691 | 701 | struct sdio_func *func = card->func; | |
| 692 | /* Read back the host_int_mask register */ | ||
| 693 | if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask)) | ||
| 694 | return -1; | ||
| 695 | |||
| 696 | /* Update with the mask and write back to the register */ | ||
| 697 | host_int_mask &= ~host_int_disable; | ||
| 698 | |||
| 699 | if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) { | ||
| 700 | dev_err(adapter->dev, "disable host interrupt failed\n"); | ||
| 701 | return -1; | ||
| 702 | } | ||
| 703 | 702 | ||
| 704 | return 0; | 703 | sdio_claim_host(func); |
| 704 | mwifiex_write_reg_locked(func, HOST_INT_MASK_REG, 0); | ||
| 705 | sdio_release_irq(func); | ||
| 706 | sdio_release_host(func); | ||
| 705 | } | 707 | } |
| 706 | 708 | ||
| 707 | /* | 709 | /* |
| @@ -713,14 +715,29 @@ static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter) | |||
| 713 | static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter) | 715 | static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter) |
| 714 | { | 716 | { |
| 715 | struct sdio_mmc_card *card = adapter->card; | 717 | struct sdio_mmc_card *card = adapter->card; |
| 718 | struct sdio_func *func = card->func; | ||
| 719 | int ret; | ||
| 720 | |||
| 721 | sdio_claim_host(func); | ||
| 722 | |||
| 723 | /* Request the SDIO IRQ */ | ||
| 724 | ret = sdio_claim_irq(func, mwifiex_sdio_interrupt); | ||
| 725 | if (ret) { | ||
| 726 | dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret); | ||
| 727 | goto out; | ||
| 728 | } | ||
| 716 | 729 | ||
| 717 | /* Simply write the mask to the register */ | 730 | /* Simply write the mask to the register */ |
| 718 | if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, | 731 | ret = mwifiex_write_reg_locked(func, HOST_INT_MASK_REG, |
| 719 | card->reg->host_int_enable)) { | 732 | card->reg->host_int_enable); |
| 733 | if (ret) { | ||
| 720 | dev_err(adapter->dev, "enable host interrupt failed\n"); | 734 | dev_err(adapter->dev, "enable host interrupt failed\n"); |
| 721 | return -1; | 735 | sdio_release_irq(func); |
| 722 | } | 736 | } |
| 723 | return 0; | 737 | |
| 738 | out: | ||
| 739 | sdio_release_host(func); | ||
| 740 | return ret; | ||
| 724 | } | 741 | } |
| 725 | 742 | ||
| 726 | /* | 743 | /* |
| @@ -997,9 +1014,6 @@ mwifiex_sdio_interrupt(struct sdio_func *func) | |||
| 997 | } | 1014 | } |
| 998 | adapter = card->adapter; | 1015 | adapter = card->adapter; |
| 999 | 1016 | ||
| 1000 | if (adapter->surprise_removed) | ||
| 1001 | return; | ||
| 1002 | |||
| 1003 | if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP) | 1017 | if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP) |
| 1004 | adapter->ps_state = PS_STATE_AWAKE; | 1018 | adapter->ps_state = PS_STATE_AWAKE; |
| 1005 | 1019 | ||
| @@ -1625,8 +1639,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter, | |||
| 1625 | /* Allocate buffer and copy payload */ | 1639 | /* Allocate buffer and copy payload */ |
| 1626 | blk_size = MWIFIEX_SDIO_BLOCK_SIZE; | 1640 | blk_size = MWIFIEX_SDIO_BLOCK_SIZE; |
| 1627 | buf_block_len = (pkt_len + blk_size - 1) / blk_size; | 1641 | buf_block_len = (pkt_len + blk_size - 1) / blk_size; |
| 1628 | *(u16 *) &payload[0] = (u16) pkt_len; | 1642 | *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len); |
| 1629 | *(u16 *) &payload[2] = type; | 1643 | *(__le16 *)&payload[2] = cpu_to_le16(type); |
| 1630 | 1644 | ||
| 1631 | /* | 1645 | /* |
| 1632 | * This is SDIO specific header | 1646 | * This is SDIO specific header |
| @@ -1728,9 +1742,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter) | |||
| 1728 | struct sdio_mmc_card *card = adapter->card; | 1742 | struct sdio_mmc_card *card = adapter->card; |
| 1729 | 1743 | ||
| 1730 | if (adapter->card) { | 1744 | if (adapter->card) { |
| 1731 | /* Release the SDIO IRQ */ | ||
| 1732 | sdio_claim_host(card->func); | 1745 | sdio_claim_host(card->func); |
| 1733 | sdio_release_irq(card->func); | ||
| 1734 | sdio_disable_func(card->func); | 1746 | sdio_disable_func(card->func); |
| 1735 | sdio_release_host(card->func); | 1747 | sdio_release_host(card->func); |
| 1736 | sdio_set_drvdata(card->func, NULL); | 1748 | sdio_set_drvdata(card->func, NULL); |
| @@ -1744,7 +1756,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter) | |||
| 1744 | */ | 1756 | */ |
| 1745 | static int mwifiex_register_dev(struct mwifiex_adapter *adapter) | 1757 | static int mwifiex_register_dev(struct mwifiex_adapter *adapter) |
| 1746 | { | 1758 | { |
| 1747 | int ret = 0; | 1759 | int ret; |
| 1748 | struct sdio_mmc_card *card = adapter->card; | 1760 | struct sdio_mmc_card *card = adapter->card; |
| 1749 | struct sdio_func *func = card->func; | 1761 | struct sdio_func *func = card->func; |
| 1750 | 1762 | ||
| @@ -1753,22 +1765,14 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) | |||
| 1753 | 1765 | ||
| 1754 | sdio_claim_host(func); | 1766 | sdio_claim_host(func); |
| 1755 | 1767 | ||
| 1756 | /* Request the SDIO IRQ */ | ||
| 1757 | ret = sdio_claim_irq(func, mwifiex_sdio_interrupt); | ||
| 1758 | if (ret) { | ||
| 1759 | pr_err("claim irq failed: ret=%d\n", ret); | ||
| 1760 | goto disable_func; | ||
| 1761 | } | ||
| 1762 | |||
| 1763 | /* Set block size */ | 1768 | /* Set block size */ |
| 1764 | ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE); | 1769 | ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE); |
| 1770 | sdio_release_host(func); | ||
| 1765 | if (ret) { | 1771 | if (ret) { |
| 1766 | pr_err("cannot set SDIO block size\n"); | 1772 | pr_err("cannot set SDIO block size\n"); |
| 1767 | ret = -1; | 1773 | return ret; |
| 1768 | goto release_irq; | ||
| 1769 | } | 1774 | } |
| 1770 | 1775 | ||
| 1771 | sdio_release_host(func); | ||
| 1772 | sdio_set_drvdata(func, card); | 1776 | sdio_set_drvdata(func, card); |
| 1773 | 1777 | ||
| 1774 | adapter->dev = &func->dev; | 1778 | adapter->dev = &func->dev; |
| @@ -1776,15 +1780,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) | |||
| 1776 | strcpy(adapter->fw_name, card->firmware); | 1780 | strcpy(adapter->fw_name, card->firmware); |
| 1777 | 1781 | ||
| 1778 | return 0; | 1782 | return 0; |
| 1779 | |||
| 1780 | release_irq: | ||
| 1781 | sdio_release_irq(func); | ||
| 1782 | disable_func: | ||
| 1783 | sdio_disable_func(func); | ||
| 1784 | sdio_release_host(func); | ||
| 1785 | adapter->card = NULL; | ||
| 1786 | |||
| 1787 | return -1; | ||
| 1788 | } | 1783 | } |
| 1789 | 1784 | ||
| 1790 | /* | 1785 | /* |
| @@ -1813,9 +1808,6 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) | |||
| 1813 | */ | 1808 | */ |
| 1814 | mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg); | 1809 | mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg); |
| 1815 | 1810 | ||
| 1816 | /* Disable host interrupt mask register for SDIO */ | ||
| 1817 | mwifiex_sdio_disable_host_int(adapter); | ||
| 1818 | |||
| 1819 | /* Get SDIO ioport */ | 1811 | /* Get SDIO ioport */ |
| 1820 | mwifiex_init_sdio_ioport(adapter); | 1812 | mwifiex_init_sdio_ioport(adapter); |
| 1821 | 1813 | ||
| @@ -1957,6 +1949,7 @@ static struct mwifiex_if_ops sdio_ops = { | |||
| 1957 | .register_dev = mwifiex_register_dev, | 1949 | .register_dev = mwifiex_register_dev, |
| 1958 | .unregister_dev = mwifiex_unregister_dev, | 1950 | .unregister_dev = mwifiex_unregister_dev, |
| 1959 | .enable_int = mwifiex_sdio_enable_host_int, | 1951 | .enable_int = mwifiex_sdio_enable_host_int, |
| 1952 | .disable_int = mwifiex_sdio_disable_host_int, | ||
| 1960 | .process_int_status = mwifiex_process_int_status, | 1953 | .process_int_status = mwifiex_process_int_status, |
| 1961 | .host_to_card = mwifiex_sdio_host_to_card, | 1954 | .host_to_card = mwifiex_sdio_host_to_card, |
| 1962 | .wakeup = mwifiex_pm_wakeup_card, | 1955 | .wakeup = mwifiex_pm_wakeup_card, |
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h index 6d51dfdd8251..532ae0ac4dfb 100644 --- a/drivers/net/wireless/mwifiex/sdio.h +++ b/drivers/net/wireless/mwifiex/sdio.h | |||
| @@ -92,9 +92,6 @@ | |||
| 92 | /* Host Control Registers : Download host interrupt mask */ | 92 | /* Host Control Registers : Download host interrupt mask */ |
| 93 | #define DN_LD_HOST_INT_MASK (0x2U) | 93 | #define DN_LD_HOST_INT_MASK (0x2U) |
| 94 | 94 | ||
| 95 | /* Disable Host interrupt mask */ | ||
| 96 | #define HOST_INT_DISABLE 0xff | ||
| 97 | |||
| 98 | /* Host Control Registers : Host interrupt status */ | 95 | /* Host Control Registers : Host interrupt status */ |
| 99 | #define HOST_INTSTATUS_REG 0x03 | 96 | #define HOST_INTSTATUS_REG 0x03 |
| 100 | /* Host Control Registers : Upload host interrupt status */ | 97 | /* Host Control Registers : Upload host interrupt status */ |
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 206c3e038072..8af97abf7108 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c | |||
| @@ -257,10 +257,10 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, | |||
| 257 | goto done; | 257 | goto done; |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | if (priv->bss_mode == NL80211_IFTYPE_STATION) { | 260 | if (priv->bss_mode == NL80211_IFTYPE_STATION || |
| 261 | priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) { | ||
| 261 | u8 config_bands; | 262 | u8 config_bands; |
| 262 | 263 | ||
| 263 | /* Infra mode */ | ||
| 264 | ret = mwifiex_deauthenticate(priv, NULL); | 264 | ret = mwifiex_deauthenticate(priv, NULL); |
| 265 | if (ret) | 265 | if (ret) |
| 266 | goto done; | 266 | goto done; |
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 9b915d3a44be..3e60a31582f8 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | menuconfig RT2X00 | 1 | menuconfig RT2X00 |
| 2 | tristate "Ralink driver support" | 2 | tristate "Ralink driver support" |
| 3 | depends on MAC80211 | 3 | depends on MAC80211 && HAS_DMA |
| 4 | ---help--- | 4 | ---help--- |
| 5 | This will enable the support for the Ralink drivers, | 5 | This will enable the support for the Ralink drivers, |
| 6 | developed in the rt2x00 project <http://rt2x00.serialmonkey.com>. | 6 | developed in the rt2x00 project <http://rt2x00.serialmonkey.com>. |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 1f80ea5e29dd..1b41c8eda12d 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
| @@ -6133,7 +6133,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) | |||
| 6133 | IEEE80211_HW_SUPPORTS_PS | | 6133 | IEEE80211_HW_SUPPORTS_PS | |
| 6134 | IEEE80211_HW_PS_NULLFUNC_STACK | | 6134 | IEEE80211_HW_PS_NULLFUNC_STACK | |
| 6135 | IEEE80211_HW_AMPDU_AGGREGATION | | 6135 | IEEE80211_HW_AMPDU_AGGREGATION | |
| 6136 | IEEE80211_HW_REPORTS_TX_ACK_STATUS; | 6136 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | |
| 6137 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; | ||
| 6137 | 6138 | ||
| 6138 | /* | 6139 | /* |
| 6139 | * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices | 6140 | * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices |
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 6c0a91ff963c..aa95c6cf3545 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c | |||
| @@ -936,13 +936,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) | |||
| 936 | spin_unlock_irqrestore(&queue->index_lock, irqflags); | 936 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
| 937 | } | 937 | } |
| 938 | 938 | ||
| 939 | void rt2x00queue_pause_queue(struct data_queue *queue) | 939 | void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) |
| 940 | { | 940 | { |
| 941 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | ||
| 942 | !test_bit(QUEUE_STARTED, &queue->flags) || | ||
| 943 | test_and_set_bit(QUEUE_PAUSED, &queue->flags)) | ||
| 944 | return; | ||
| 945 | |||
| 946 | switch (queue->qid) { | 941 | switch (queue->qid) { |
| 947 | case QID_AC_VO: | 942 | case QID_AC_VO: |
| 948 | case QID_AC_VI: | 943 | case QID_AC_VI: |
| @@ -958,6 +953,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue) | |||
| 958 | break; | 953 | break; |
| 959 | } | 954 | } |
| 960 | } | 955 | } |
| 956 | void rt2x00queue_pause_queue(struct data_queue *queue) | ||
| 957 | { | ||
| 958 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | ||
| 959 | !test_bit(QUEUE_STARTED, &queue->flags) || | ||
| 960 | test_and_set_bit(QUEUE_PAUSED, &queue->flags)) | ||
| 961 | return; | ||
| 962 | |||
| 963 | rt2x00queue_pause_queue_nocheck(queue); | ||
| 964 | } | ||
| 961 | EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); | 965 | EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); |
| 962 | 966 | ||
| 963 | void rt2x00queue_unpause_queue(struct data_queue *queue) | 967 | void rt2x00queue_unpause_queue(struct data_queue *queue) |
| @@ -1019,7 +1023,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue) | |||
| 1019 | return; | 1023 | return; |
| 1020 | } | 1024 | } |
| 1021 | 1025 | ||
| 1022 | rt2x00queue_pause_queue(queue); | 1026 | rt2x00queue_pause_queue_nocheck(queue); |
| 1023 | 1027 | ||
| 1024 | queue->rt2x00dev->ops->lib->stop_queue(queue); | 1028 | queue->rt2x00dev->ops->lib->stop_queue(queue); |
| 1025 | 1029 | ||
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig index 7253de3d8c66..c2ffce7a907c 100644 --- a/drivers/net/wireless/rtlwifi/Kconfig +++ b/drivers/net/wireless/rtlwifi/Kconfig | |||
| @@ -1,27 +1,20 @@ | |||
| 1 | config RTLWIFI | 1 | menuconfig RTL_CARDS |
| 2 | tristate "Realtek wireless card support" | 2 | tristate "Realtek rtlwifi family of devices" |
| 3 | depends on MAC80211 | 3 | depends on MAC80211 && (PCI || USB) |
| 4 | select FW_LOADER | ||
| 5 | ---help--- | ||
| 6 | This is common code for RTL8192CE/RTL8192CU/RTL8192SE/RTL8723AE | ||
| 7 | drivers. This module does nothing by itself - the various front-end | ||
| 8 | drivers need to be enabled to support any desired devices. | ||
| 9 | |||
| 10 | If you choose to build as a module, it'll be called rtlwifi. | ||
| 11 | |||
| 12 | config RTLWIFI_DEBUG | ||
| 13 | bool "Debugging output for rtlwifi driver family" | ||
| 14 | depends on RTLWIFI | ||
| 15 | default y | 4 | default y |
| 16 | ---help--- | 5 | ---help--- |
| 17 | To use the module option that sets the dynamic-debugging level for, | 6 | This option will enable support for the Realtek mac80211-based |
| 18 | the front-end driver, this parameter must be "Y". For memory-limited | 7 | wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de, |
| 19 | systems, choose "N". If in doubt, choose "Y". | 8 | rtl8723eu, and rtl8188eu share some common code. |
| 9 | |||
| 10 | if RTL_CARDS | ||
| 20 | 11 | ||
| 21 | config RTL8192CE | 12 | config RTL8192CE |
| 22 | tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter" | 13 | tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter" |
| 23 | depends on RTLWIFI && PCI | 14 | depends on PCI |
| 24 | select RTL8192C_COMMON | 15 | select RTL8192C_COMMON |
| 16 | select RTLWIFI | ||
| 17 | select RTLWIFI_PCI | ||
| 25 | ---help--- | 18 | ---help--- |
| 26 | This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe | 19 | This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe |
| 27 | wireless network adapters. | 20 | wireless network adapters. |
| @@ -30,7 +23,9 @@ config RTL8192CE | |||
| 30 | 23 | ||
| 31 | config RTL8192SE | 24 | config RTL8192SE |
| 32 | tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter" | 25 | tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter" |
| 33 | depends on RTLWIFI && PCI | 26 | depends on PCI |
| 27 | select RTLWIFI | ||
| 28 | select RTLWIFI_PCI | ||
| 34 | ---help--- | 29 | ---help--- |
| 35 | This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe | 30 | This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe |
| 36 | wireless network adapters. | 31 | wireless network adapters. |
| @@ -39,7 +34,9 @@ config RTL8192SE | |||
| 39 | 34 | ||
| 40 | config RTL8192DE | 35 | config RTL8192DE |
| 41 | tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter" | 36 | tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter" |
| 42 | depends on RTLWIFI && PCI | 37 | depends on PCI |
| 38 | select RTLWIFI | ||
| 39 | select RTLWIFI_PCI | ||
| 43 | ---help--- | 40 | ---help--- |
| 44 | This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe | 41 | This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe |
| 45 | wireless network adapters. | 42 | wireless network adapters. |
| @@ -48,7 +45,9 @@ config RTL8192DE | |||
| 48 | 45 | ||
| 49 | config RTL8723AE | 46 | config RTL8723AE |
| 50 | tristate "Realtek RTL8723AE PCIe Wireless Network Adapter" | 47 | tristate "Realtek RTL8723AE PCIe Wireless Network Adapter" |
| 51 | depends on RTLWIFI && PCI | 48 | depends on PCI |
| 49 | select RTLWIFI | ||
| 50 | select RTLWIFI_PCI | ||
| 52 | ---help--- | 51 | ---help--- |
| 53 | This is the driver for Realtek RTL8723AE 802.11n PCIe | 52 | This is the driver for Realtek RTL8723AE 802.11n PCIe |
| 54 | wireless network adapters. | 53 | wireless network adapters. |
| @@ -57,7 +56,9 @@ config RTL8723AE | |||
| 57 | 56 | ||
| 58 | config RTL8188EE | 57 | config RTL8188EE |
| 59 | tristate "Realtek RTL8188EE Wireless Network Adapter" | 58 | tristate "Realtek RTL8188EE Wireless Network Adapter" |
| 60 | depends on RTLWIFI && PCI | 59 | depends on PCI |
| 60 | select RTLWIFI | ||
| 61 | select RTLWIFI_PCI | ||
| 61 | ---help--- | 62 | ---help--- |
| 62 | This is the driver for Realtek RTL8188EE 802.11n PCIe | 63 | This is the driver for Realtek RTL8188EE 802.11n PCIe |
| 63 | wireless network adapters. | 64 | wireless network adapters. |
| @@ -66,7 +67,9 @@ config RTL8188EE | |||
| 66 | 67 | ||
| 67 | config RTL8192CU | 68 | config RTL8192CU |
| 68 | tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter" | 69 | tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter" |
| 69 | depends on RTLWIFI && USB | 70 | depends on USB |
| 71 | select RTLWIFI | ||
| 72 | select RTLWIFI_USB | ||
| 70 | select RTL8192C_COMMON | 73 | select RTL8192C_COMMON |
| 71 | ---help--- | 74 | ---help--- |
| 72 | This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB | 75 | This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB |
| @@ -74,7 +77,28 @@ config RTL8192CU | |||
| 74 | 77 | ||
| 75 | If you choose to build it as a module, it will be called rtl8192cu | 78 | If you choose to build it as a module, it will be called rtl8192cu |
| 76 | 79 | ||
| 80 | config RTLWIFI | ||
| 81 | tristate | ||
| 82 | select FW_LOADER | ||
| 83 | |||
| 84 | config RTLWIFI_PCI | ||
| 85 | tristate | ||
| 86 | |||
| 87 | config RTLWIFI_USB | ||
| 88 | tristate | ||
| 89 | |||
| 90 | config RTLWIFI_DEBUG | ||
| 91 | bool "Debugging output for rtlwifi driver family" | ||
| 92 | depends on RTLWIFI | ||
| 93 | default y | ||
| 94 | ---help--- | ||
| 95 | To use the module option that sets the dynamic-debugging level for, | ||
| 96 | the front-end driver, this parameter must be "Y". For memory-limited | ||
| 97 | systems, choose "N". If in doubt, choose "Y". | ||
| 98 | |||
| 77 | config RTL8192C_COMMON | 99 | config RTL8192C_COMMON |
| 78 | tristate | 100 | tristate |
| 79 | depends on RTL8192CE || RTL8192CU | 101 | depends on RTL8192CE || RTL8192CU |
| 80 | default m | 102 | default y |
| 103 | |||
| 104 | endif | ||
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile index ff02b874f8d8..d56f023a4b90 100644 --- a/drivers/net/wireless/rtlwifi/Makefile +++ b/drivers/net/wireless/rtlwifi/Makefile | |||
| @@ -12,13 +12,11 @@ rtlwifi-objs := \ | |||
| 12 | 12 | ||
| 13 | rtl8192c_common-objs += \ | 13 | rtl8192c_common-objs += \ |
| 14 | 14 | ||
| 15 | ifneq ($(CONFIG_PCI),) | 15 | obj-$(CONFIG_RTLWIFI_PCI) += rtl_pci.o |
| 16 | rtlwifi-objs += pci.o | 16 | rtl_pci-objs := pci.o |
| 17 | endif | ||
| 18 | 17 | ||
| 19 | ifneq ($(CONFIG_USB),) | 18 | obj-$(CONFIG_RTLWIFI_USB) += rtl_usb.o |
| 20 | rtlwifi-objs += usb.o | 19 | rtl_usb-objs := usb.o |
| 21 | endif | ||
| 22 | 20 | ||
| 23 | obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/ | 21 | obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/ |
| 24 | obj-$(CONFIG_RTL8192CE) += rtl8192ce/ | 22 | obj-$(CONFIG_RTL8192CE) += rtl8192ce/ |
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 9d558ac77b0c..7651f5acc14b 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c | |||
| @@ -172,6 +172,7 @@ u8 rtl_tid_to_ac(u8 tid) | |||
| 172 | { | 172 | { |
| 173 | return tid_to_ac[tid]; | 173 | return tid_to_ac[tid]; |
| 174 | } | 174 | } |
| 175 | EXPORT_SYMBOL_GPL(rtl_tid_to_ac); | ||
| 175 | 176 | ||
| 176 | static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, | 177 | static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, |
| 177 | struct ieee80211_sta_ht_cap *ht_cap) | 178 | struct ieee80211_sta_ht_cap *ht_cap) |
| @@ -406,6 +407,7 @@ void rtl_deinit_deferred_work(struct ieee80211_hw *hw) | |||
| 406 | cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); | 407 | cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); |
| 407 | cancel_delayed_work(&rtlpriv->works.fwevt_wq); | 408 | cancel_delayed_work(&rtlpriv->works.fwevt_wq); |
| 408 | } | 409 | } |
| 410 | EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work); | ||
| 409 | 411 | ||
| 410 | void rtl_init_rfkill(struct ieee80211_hw *hw) | 412 | void rtl_init_rfkill(struct ieee80211_hw *hw) |
| 411 | { | 413 | { |
| @@ -439,6 +441,7 @@ void rtl_deinit_rfkill(struct ieee80211_hw *hw) | |||
| 439 | { | 441 | { |
| 440 | wiphy_rfkill_stop_polling(hw->wiphy); | 442 | wiphy_rfkill_stop_polling(hw->wiphy); |
| 441 | } | 443 | } |
| 444 | EXPORT_SYMBOL_GPL(rtl_deinit_rfkill); | ||
| 442 | 445 | ||
| 443 | int rtl_init_core(struct ieee80211_hw *hw) | 446 | int rtl_init_core(struct ieee80211_hw *hw) |
| 444 | { | 447 | { |
| @@ -489,10 +492,12 @@ int rtl_init_core(struct ieee80211_hw *hw) | |||
| 489 | 492 | ||
| 490 | return 0; | 493 | return 0; |
| 491 | } | 494 | } |
| 495 | EXPORT_SYMBOL_GPL(rtl_init_core); | ||
| 492 | 496 | ||
| 493 | void rtl_deinit_core(struct ieee80211_hw *hw) | 497 | void rtl_deinit_core(struct ieee80211_hw *hw) |
| 494 | { | 498 | { |
| 495 | } | 499 | } |
| 500 | EXPORT_SYMBOL_GPL(rtl_deinit_core); | ||
| 496 | 501 | ||
| 497 | void rtl_init_rx_config(struct ieee80211_hw *hw) | 502 | void rtl_init_rx_config(struct ieee80211_hw *hw) |
| 498 | { | 503 | { |
| @@ -501,6 +506,7 @@ void rtl_init_rx_config(struct ieee80211_hw *hw) | |||
| 501 | 506 | ||
| 502 | rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf)); | 507 | rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf)); |
| 503 | } | 508 | } |
| 509 | EXPORT_SYMBOL_GPL(rtl_init_rx_config); | ||
| 504 | 510 | ||
| 505 | /********************************************************* | 511 | /********************************************************* |
| 506 | * | 512 | * |
| @@ -879,6 +885,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
| 879 | 885 | ||
| 880 | return true; | 886 | return true; |
| 881 | } | 887 | } |
| 888 | EXPORT_SYMBOL_GPL(rtl_tx_mgmt_proc); | ||
| 882 | 889 | ||
| 883 | void rtl_get_tcb_desc(struct ieee80211_hw *hw, | 890 | void rtl_get_tcb_desc(struct ieee80211_hw *hw, |
| 884 | struct ieee80211_tx_info *info, | 891 | struct ieee80211_tx_info *info, |
| @@ -1052,6 +1059,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) | |||
| 1052 | 1059 | ||
| 1053 | return true; | 1060 | return true; |
| 1054 | } | 1061 | } |
| 1062 | EXPORT_SYMBOL_GPL(rtl_action_proc); | ||
| 1055 | 1063 | ||
| 1056 | /*should call before software enc*/ | 1064 | /*should call before software enc*/ |
| 1057 | u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) | 1065 | u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) |
| @@ -1125,6 +1133,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) | |||
| 1125 | 1133 | ||
| 1126 | return false; | 1134 | return false; |
| 1127 | } | 1135 | } |
| 1136 | EXPORT_SYMBOL_GPL(rtl_is_special_data); | ||
| 1128 | 1137 | ||
| 1129 | /********************************************************* | 1138 | /********************************************************* |
| 1130 | * | 1139 | * |
| @@ -1300,6 +1309,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
| 1300 | 1309 | ||
| 1301 | rtlpriv->link_info.bcn_rx_inperiod++; | 1310 | rtlpriv->link_info.bcn_rx_inperiod++; |
| 1302 | } | 1311 | } |
| 1312 | EXPORT_SYMBOL_GPL(rtl_beacon_statistic); | ||
| 1303 | 1313 | ||
| 1304 | void rtl_watchdog_wq_callback(void *data) | 1314 | void rtl_watchdog_wq_callback(void *data) |
| 1305 | { | 1315 | { |
| @@ -1793,6 +1803,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len) | |||
| 1793 | 1803 | ||
| 1794 | mac->vendor = vendor; | 1804 | mac->vendor = vendor; |
| 1795 | } | 1805 | } |
| 1806 | EXPORT_SYMBOL_GPL(rtl_recognize_peer); | ||
| 1796 | 1807 | ||
| 1797 | /********************************************************* | 1808 | /********************************************************* |
| 1798 | * | 1809 | * |
| @@ -1849,6 +1860,7 @@ struct attribute_group rtl_attribute_group = { | |||
| 1849 | .name = "rtlsysfs", | 1860 | .name = "rtlsysfs", |
| 1850 | .attrs = rtl_sysfs_entries, | 1861 | .attrs = rtl_sysfs_entries, |
| 1851 | }; | 1862 | }; |
| 1863 | EXPORT_SYMBOL_GPL(rtl_attribute_group); | ||
| 1852 | 1864 | ||
| 1853 | MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); | 1865 | MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); |
| 1854 | MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); | 1866 | MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); |
| @@ -1856,7 +1868,8 @@ MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>"); | |||
| 1856 | MODULE_LICENSE("GPL"); | 1868 | MODULE_LICENSE("GPL"); |
| 1857 | MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core"); | 1869 | MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core"); |
| 1858 | 1870 | ||
| 1859 | struct rtl_global_var global_var = {}; | 1871 | struct rtl_global_var rtl_global_var = {}; |
| 1872 | EXPORT_SYMBOL_GPL(rtl_global_var); | ||
| 1860 | 1873 | ||
| 1861 | static int __init rtl_core_module_init(void) | 1874 | static int __init rtl_core_module_init(void) |
| 1862 | { | 1875 | { |
| @@ -1864,8 +1877,8 @@ static int __init rtl_core_module_init(void) | |||
| 1864 | pr_err("Unable to register rtl_rc, use default RC !!\n"); | 1877 | pr_err("Unable to register rtl_rc, use default RC !!\n"); |
| 1865 | 1878 | ||
| 1866 | /* init some global vars */ | 1879 | /* init some global vars */ |
| 1867 | INIT_LIST_HEAD(&global_var.glb_priv_list); | 1880 | INIT_LIST_HEAD(&rtl_global_var.glb_priv_list); |
| 1868 | spin_lock_init(&global_var.glb_list_lock); | 1881 | spin_lock_init(&rtl_global_var.glb_list_lock); |
| 1869 | 1882 | ||
| 1870 | return 0; | 1883 | return 0; |
| 1871 | } | 1884 | } |
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h index 8576bc34b032..0e5fe0902daf 100644 --- a/drivers/net/wireless/rtlwifi/base.h +++ b/drivers/net/wireless/rtlwifi/base.h | |||
| @@ -147,7 +147,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len); | |||
| 147 | u8 rtl_tid_to_ac(u8 tid); | 147 | u8 rtl_tid_to_ac(u8 tid); |
| 148 | extern struct attribute_group rtl_attribute_group; | 148 | extern struct attribute_group rtl_attribute_group; |
| 149 | void rtl_easy_concurrent_retrytimer_callback(unsigned long data); | 149 | void rtl_easy_concurrent_retrytimer_callback(unsigned long data); |
| 150 | extern struct rtl_global_var global_var; | 150 | extern struct rtl_global_var rtl_global_var; |
| 151 | int rtlwifi_rate_mapping(struct ieee80211_hw *hw, | 151 | int rtlwifi_rate_mapping(struct ieee80211_hw *hw, |
| 152 | bool isht, u8 desc_rate, bool first_ampdu); | 152 | bool isht, u8 desc_rate, bool first_ampdu); |
| 153 | bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb); | 153 | bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb); |
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index ee84844be008..733b7ce7f0e2 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c | |||
| @@ -1330,3 +1330,4 @@ const struct ieee80211_ops rtl_ops = { | |||
| 1330 | .rfkill_poll = rtl_op_rfkill_poll, | 1330 | .rfkill_poll = rtl_op_rfkill_poll, |
| 1331 | .flush = rtl_op_flush, | 1331 | .flush = rtl_op_flush, |
| 1332 | }; | 1332 | }; |
| 1333 | EXPORT_SYMBOL_GPL(rtl_ops); | ||
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/rtlwifi/debug.c index 7d52d3d7769f..76e2086e137e 100644 --- a/drivers/net/wireless/rtlwifi/debug.c +++ b/drivers/net/wireless/rtlwifi/debug.c | |||
| @@ -51,3 +51,4 @@ void rtl_dbgp_flag_init(struct ieee80211_hw *hw) | |||
| 51 | 51 | ||
| 52 | /*Init Debug flag enable condition */ | 52 | /*Init Debug flag enable condition */ |
| 53 | } | 53 | } |
| 54 | EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init); | ||
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index 9e3894178e77..838a1ed3f194 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c | |||
| @@ -229,6 +229,7 @@ void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf) | |||
| 229 | 229 | ||
| 230 | *pbuf = (u8) (value32 & 0xff); | 230 | *pbuf = (u8) (value32 & 0xff); |
| 231 | } | 231 | } |
| 232 | EXPORT_SYMBOL_GPL(read_efuse_byte); | ||
| 232 | 233 | ||
| 233 | void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf) | 234 | void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf) |
| 234 | { | 235 | { |
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index c97e9d327331..703f839af6ca 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c | |||
| @@ -35,6 +35,13 @@ | |||
| 35 | #include "efuse.h" | 35 | #include "efuse.h" |
| 36 | #include <linux/export.h> | 36 | #include <linux/export.h> |
| 37 | #include <linux/kmemleak.h> | 37 | #include <linux/kmemleak.h> |
| 38 | #include <linux/module.h> | ||
| 39 | |||
| 40 | MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); | ||
| 41 | MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); | ||
| 42 | MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>"); | ||
| 43 | MODULE_LICENSE("GPL"); | ||
| 44 | MODULE_DESCRIPTION("PCI basic driver for rtlwifi"); | ||
| 38 | 45 | ||
| 39 | static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = { | 46 | static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = { |
| 40 | PCI_VENDOR_ID_INTEL, | 47 | PCI_VENDOR_ID_INTEL, |
| @@ -1008,19 +1015,6 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) | |||
| 1008 | return; | 1015 | return; |
| 1009 | } | 1016 | } |
| 1010 | 1017 | ||
| 1011 | static void rtl_lps_change_work_callback(struct work_struct *work) | ||
| 1012 | { | ||
| 1013 | struct rtl_works *rtlworks = | ||
| 1014 | container_of(work, struct rtl_works, lps_change_work); | ||
| 1015 | struct ieee80211_hw *hw = rtlworks->hw; | ||
| 1016 | struct rtl_priv *rtlpriv = rtl_priv(hw); | ||
| 1017 | |||
| 1018 | if (rtlpriv->enter_ps) | ||
| 1019 | rtl_lps_enter(hw); | ||
| 1020 | else | ||
| 1021 | rtl_lps_leave(hw); | ||
| 1022 | } | ||
| 1023 | |||
| 1024 | static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw) | 1018 | static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw) |
| 1025 | { | 1019 | { |
| 1026 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); | 1020 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
| @@ -1899,7 +1893,7 @@ int rtl_pci_probe(struct pci_dev *pdev, | |||
| 1899 | rtlpriv->rtlhal.interface = INTF_PCI; | 1893 | rtlpriv->rtlhal.interface = INTF_PCI; |
| 1900 | rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data); | 1894 | rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data); |
| 1901 | rtlpriv->intf_ops = &rtl_pci_ops; | 1895 | rtlpriv->intf_ops = &rtl_pci_ops; |
| 1902 | rtlpriv->glb_var = &global_var; | 1896 | rtlpriv->glb_var = &rtl_global_var; |
| 1903 | 1897 | ||
| 1904 | /* | 1898 | /* |
| 1905 | *init dbgp flags before all | 1899 | *init dbgp flags before all |
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index 884bceae38a9..298b615964e8 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c | |||
| @@ -269,6 +269,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) | |||
| 269 | 269 | ||
| 270 | spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags); | 270 | spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags); |
| 271 | } | 271 | } |
| 272 | EXPORT_SYMBOL_GPL(rtl_ips_nic_on); | ||
| 272 | 273 | ||
| 273 | /*for FW LPS*/ | 274 | /*for FW LPS*/ |
| 274 | 275 | ||
| @@ -518,6 +519,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len) | |||
| 518 | "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed); | 519 | "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed); |
| 519 | } | 520 | } |
| 520 | } | 521 | } |
| 522 | EXPORT_SYMBOL_GPL(rtl_swlps_beacon); | ||
| 521 | 523 | ||
| 522 | void rtl_swlps_rf_awake(struct ieee80211_hw *hw) | 524 | void rtl_swlps_rf_awake(struct ieee80211_hw *hw) |
| 523 | { | 525 | { |
| @@ -611,6 +613,19 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) | |||
| 611 | MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40)); | 613 | MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40)); |
| 612 | } | 614 | } |
| 613 | 615 | ||
| 616 | void rtl_lps_change_work_callback(struct work_struct *work) | ||
| 617 | { | ||
| 618 | struct rtl_works *rtlworks = | ||
| 619 | container_of(work, struct rtl_works, lps_change_work); | ||
| 620 | struct ieee80211_hw *hw = rtlworks->hw; | ||
| 621 | struct rtl_priv *rtlpriv = rtl_priv(hw); | ||
| 622 | |||
| 623 | if (rtlpriv->enter_ps) | ||
| 624 | rtl_lps_enter(hw); | ||
| 625 | else | ||
| 626 | rtl_lps_leave(hw); | ||
| 627 | } | ||
| 628 | EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback); | ||
| 614 | 629 | ||
| 615 | void rtl_swlps_wq_callback(void *data) | 630 | void rtl_swlps_wq_callback(void *data) |
| 616 | { | 631 | { |
| @@ -922,3 +937,4 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len) | |||
| 922 | else | 937 | else |
| 923 | rtl_p2p_noa_ie(hw, data, len - FCS_LEN); | 938 | rtl_p2p_noa_ie(hw, data, len - FCS_LEN); |
| 924 | } | 939 | } |
| 940 | EXPORT_SYMBOL_GPL(rtl_p2p_info); | ||
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h index 4d682b753f50..88bd76ea88f7 100644 --- a/drivers/net/wireless/rtlwifi/ps.h +++ b/drivers/net/wireless/rtlwifi/ps.h | |||
| @@ -49,5 +49,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw); | |||
| 49 | void rtl_swlps_rf_sleep(struct ieee80211_hw *hw); | 49 | void rtl_swlps_rf_sleep(struct ieee80211_hw *hw); |
| 50 | void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); | 50 | void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); |
| 51 | void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len); | 51 | void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len); |
| 52 | void rtl_lps_change_work_callback(struct work_struct *work); | ||
| 52 | 53 | ||
| 53 | #endif | 54 | #endif |
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index a3532e077871..e56778cac9bf 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c | |||
| @@ -32,6 +32,13 @@ | |||
| 32 | #include "ps.h" | 32 | #include "ps.h" |
| 33 | #include "rtl8192c/fw_common.h" | 33 | #include "rtl8192c/fw_common.h" |
| 34 | #include <linux/export.h> | 34 | #include <linux/export.h> |
| 35 | #include <linux/module.h> | ||
| 36 | |||
| 37 | MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); | ||
| 38 | MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); | ||
| 39 | MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>"); | ||
| 40 | MODULE_LICENSE("GPL"); | ||
| 41 | MODULE_DESCRIPTION("USB basic driver for rtlwifi"); | ||
| 35 | 42 | ||
| 36 | #define REALTEK_USB_VENQT_READ 0xC0 | 43 | #define REALTEK_USB_VENQT_READ 0xC0 |
| 37 | #define REALTEK_USB_VENQT_WRITE 0x40 | 44 | #define REALTEK_USB_VENQT_WRITE 0x40 |
| @@ -1070,6 +1077,8 @@ int rtl_usb_probe(struct usb_interface *intf, | |||
| 1070 | spin_lock_init(&rtlpriv->locks.usb_lock); | 1077 | spin_lock_init(&rtlpriv->locks.usb_lock); |
| 1071 | INIT_WORK(&rtlpriv->works.fill_h2c_cmd, | 1078 | INIT_WORK(&rtlpriv->works.fill_h2c_cmd, |
| 1072 | rtl_fill_h2c_cmd_work_callback); | 1079 | rtl_fill_h2c_cmd_work_callback); |
| 1080 | INIT_WORK(&rtlpriv->works.lps_change_work, | ||
| 1081 | rtl_lps_change_work_callback); | ||
| 1073 | 1082 | ||
| 1074 | rtlpriv->usb_data_index = 0; | 1083 | rtlpriv->usb_data_index = 0; |
| 1075 | init_completion(&rtlpriv->firmware_loading_complete); | 1084 | init_completion(&rtlpriv->firmware_loading_complete); |
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 4941f201d6c8..b8ba1f925e75 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c | |||
| @@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw) | |||
| 98 | goto exit; | 98 | goto exit; |
| 99 | 99 | ||
| 100 | err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, | 100 | err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, |
| 101 | USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); | 101 | USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT); |
| 102 | if (err < 0) | 102 | if (err < 0) |
| 103 | goto exit; | 103 | goto exit; |
| 104 | 104 | ||
| 105 | memcpy(&ret, buf, sizeof(ret)); | ||
| 106 | |||
| 105 | if (ret & 0x80) { | 107 | if (ret & 0x80) { |
| 106 | err = -EIO; | 108 | err = -EIO; |
| 107 | goto exit; | 109 | goto exit; |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 6bb7cf2de556..b10ba00cc3e6 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
| @@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob, | |||
| 392 | mem = (unsigned long) | 392 | mem = (unsigned long) |
| 393 | dt_alloc(size + 4, __alignof__(struct device_node)); | 393 | dt_alloc(size + 4, __alignof__(struct device_node)); |
| 394 | 394 | ||
| 395 | memset((void *)mem, 0, size); | ||
| 396 | |||
| 395 | ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); | 397 | ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); |
| 396 | 398 | ||
| 397 | pr_debug(" unflattening %lx...\n", mem); | 399 | pr_debug(" unflattening %lx...\n", mem); |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index a3c1c5aae6a9..1264923ade0f 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
| @@ -345,6 +345,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) | |||
| 345 | if (r && irq) { | 345 | if (r && irq) { |
| 346 | const char *name = NULL; | 346 | const char *name = NULL; |
| 347 | 347 | ||
| 348 | memset(r, 0, sizeof(*r)); | ||
| 348 | /* | 349 | /* |
| 349 | * Get optional "interrupts-names" property to add a name | 350 | * Get optional "interrupts-names" property to add a name |
| 350 | * to the resource. | 351 | * to the resource. |
| @@ -482,8 +483,9 @@ void __init of_irq_init(const struct of_device_id *matches) | |||
| 482 | } | 483 | } |
| 483 | 484 | ||
| 484 | /* Get the next pending parent that might have children */ | 485 | /* Get the next pending parent that might have children */ |
| 485 | desc = list_first_entry(&intc_parent_list, typeof(*desc), list); | 486 | desc = list_first_entry_or_null(&intc_parent_list, |
| 486 | if (list_empty(&intc_parent_list) || !desc) { | 487 | typeof(*desc), list); |
| 488 | if (!desc) { | ||
| 487 | pr_err("of_irq_init: children remain, but no parents\n"); | 489 | pr_err("of_irq_init: children remain, but no parents\n"); |
| 488 | break; | 490 | break; |
| 489 | } | 491 | } |
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index e79e006eb9ab..9ee04b4b68bf 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c | |||
| @@ -811,18 +811,28 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev) | |||
| 811 | return pcidev->irq; | 811 | return pcidev->irq; |
| 812 | } | 812 | } |
| 813 | 813 | ||
| 814 | static struct iosapic_info *first_isi = NULL; | 814 | static struct iosapic_info *iosapic_list; |
| 815 | 815 | ||
| 816 | #ifdef CONFIG_64BIT | 816 | #ifdef CONFIG_64BIT |
| 817 | int iosapic_serial_irq(int num) | 817 | int iosapic_serial_irq(struct parisc_device *dev) |
| 818 | { | 818 | { |
| 819 | struct iosapic_info *isi = first_isi; | 819 | struct iosapic_info *isi; |
| 820 | struct irt_entry *irte = NULL; /* only used if PAT PDC */ | 820 | struct irt_entry *irte; |
| 821 | struct vector_info *vi; | 821 | struct vector_info *vi; |
| 822 | int isi_line; /* line used by device */ | 822 | int cnt; |
| 823 | int intin; | ||
| 824 | |||
| 825 | intin = (dev->mod_info >> 24) & 15; | ||
| 823 | 826 | ||
| 824 | /* lookup IRT entry for isi/slot/pin set */ | 827 | /* lookup IRT entry for isi/slot/pin set */ |
| 825 | irte = &irt_cell[num]; | 828 | for (cnt = 0; cnt < irt_num_entry; cnt++) { |
| 829 | irte = &irt_cell[cnt]; | ||
| 830 | if (COMPARE_IRTE_ADDR(irte, dev->mod0) && | ||
| 831 | irte->dest_iosapic_intin == intin) | ||
| 832 | break; | ||
| 833 | } | ||
| 834 | if (cnt >= irt_num_entry) | ||
| 835 | return 0; /* no irq found, force polling */ | ||
| 826 | 836 | ||
| 827 | DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n", | 837 | DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n", |
| 828 | irte, | 838 | irte, |
| @@ -834,11 +844,17 @@ int iosapic_serial_irq(int num) | |||
| 834 | irte->src_seg_id, | 844 | irte->src_seg_id, |
| 835 | irte->dest_iosapic_intin, | 845 | irte->dest_iosapic_intin, |
| 836 | (u32) irte->dest_iosapic_addr); | 846 | (u32) irte->dest_iosapic_addr); |
| 837 | isi_line = irte->dest_iosapic_intin; | 847 | |
| 848 | /* search for iosapic */ | ||
| 849 | for (isi = iosapic_list; isi; isi = isi->isi_next) | ||
| 850 | if (isi->isi_hpa == dev->mod0) | ||
| 851 | break; | ||
| 852 | if (!isi) | ||
| 853 | return 0; /* no iosapic found, force polling */ | ||
| 838 | 854 | ||
| 839 | /* get vector info for this input line */ | 855 | /* get vector info for this input line */ |
| 840 | vi = isi->isi_vector + isi_line; | 856 | vi = isi->isi_vector + intin; |
| 841 | DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi); | 857 | DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", iosapic_intin, vi); |
| 842 | 858 | ||
| 843 | /* If this IRQ line has already been setup, skip it */ | 859 | /* If this IRQ line has already been setup, skip it */ |
| 844 | if (vi->irte) | 860 | if (vi->irte) |
| @@ -941,8 +957,8 @@ void *iosapic_register(unsigned long hpa) | |||
| 941 | vip->irqline = (unsigned char) cnt; | 957 | vip->irqline = (unsigned char) cnt; |
| 942 | vip->iosapic = isi; | 958 | vip->iosapic = isi; |
| 943 | } | 959 | } |
| 944 | if (!first_isi) | 960 | isi->isi_next = iosapic_list; |
| 945 | first_isi = isi; | 961 | iosapic_list = isi; |
| 946 | return isi; | 962 | return isi; |
| 947 | } | 963 | } |
| 948 | 964 | ||
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 13a633b1612e..7bf3926aecc0 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c | |||
| @@ -86,10 +86,6 @@ struct mvebu_sw_pci_bridge { | |||
| 86 | u16 secondary_status; | 86 | u16 secondary_status; |
| 87 | u16 membase; | 87 | u16 membase; |
| 88 | u16 memlimit; | 88 | u16 memlimit; |
| 89 | u16 prefmembase; | ||
| 90 | u16 prefmemlimit; | ||
| 91 | u32 prefbaseupper; | ||
| 92 | u32 preflimitupper; | ||
| 93 | u16 iobaseupper; | 89 | u16 iobaseupper; |
| 94 | u16 iolimitupper; | 90 | u16 iolimitupper; |
| 95 | u8 cappointer; | 91 | u8 cappointer; |
| @@ -419,15 +415,7 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, | |||
| 419 | break; | 415 | break; |
| 420 | 416 | ||
| 421 | case PCI_PREF_MEMORY_BASE: | 417 | case PCI_PREF_MEMORY_BASE: |
| 422 | *value = (bridge->prefmemlimit << 16 | bridge->prefmembase); | 418 | *value = 0; |
| 423 | break; | ||
| 424 | |||
| 425 | case PCI_PREF_BASE_UPPER32: | ||
| 426 | *value = bridge->prefbaseupper; | ||
| 427 | break; | ||
| 428 | |||
| 429 | case PCI_PREF_LIMIT_UPPER32: | ||
| 430 | *value = bridge->preflimitupper; | ||
| 431 | break; | 419 | break; |
| 432 | 420 | ||
| 433 | case PCI_IO_BASE_UPPER16: | 421 | case PCI_IO_BASE_UPPER16: |
| @@ -501,19 +489,6 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, | |||
| 501 | mvebu_pcie_handle_membase_change(port); | 489 | mvebu_pcie_handle_membase_change(port); |
| 502 | break; | 490 | break; |
| 503 | 491 | ||
| 504 | case PCI_PREF_MEMORY_BASE: | ||
| 505 | bridge->prefmembase = value & 0xffff; | ||
| 506 | bridge->prefmemlimit = value >> 16; | ||
| 507 | break; | ||
| 508 | |||
| 509 | case PCI_PREF_BASE_UPPER32: | ||
| 510 | bridge->prefbaseupper = value; | ||
| 511 | break; | ||
| 512 | |||
| 513 | case PCI_PREF_LIMIT_UPPER32: | ||
| 514 | bridge->preflimitupper = value; | ||
| 515 | break; | ||
| 516 | |||
| 517 | case PCI_IO_BASE_UPPER16: | 492 | case PCI_IO_BASE_UPPER16: |
| 518 | bridge->iobaseupper = value & 0xffff; | 493 | bridge->iobaseupper = value & 0xffff; |
| 519 | bridge->iolimitupper = value >> 16; | 494 | bridge->iolimitupper = value >> 16; |
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index bb7ebb22db01..d85009de713d 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig | |||
| @@ -3,16 +3,13 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | menuconfig HOTPLUG_PCI | 5 | menuconfig HOTPLUG_PCI |
| 6 | tristate "Support for PCI Hotplug" | 6 | bool "Support for PCI Hotplug" |
| 7 | depends on PCI && SYSFS | 7 | depends on PCI && SYSFS |
| 8 | ---help--- | 8 | ---help--- |
| 9 | Say Y here if you have a motherboard with a PCI Hotplug controller. | 9 | Say Y here if you have a motherboard with a PCI Hotplug controller. |
| 10 | This allows you to add and remove PCI cards while the machine is | 10 | This allows you to add and remove PCI cards while the machine is |
| 11 | powered up and running. | 11 | powered up and running. |
| 12 | 12 | ||
| 13 | To compile this driver as a module, choose M here: the | ||
| 14 | module will be called pci_hotplug. | ||
| 15 | |||
| 16 | When in doubt, say N. | 13 | When in doubt, say N. |
| 17 | 14 | ||
| 18 | if HOTPLUG_PCI | 15 | if HOTPLUG_PCI |
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index aac7a40e4a4a..0e0d0f7f63fd 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
| @@ -92,7 +92,14 @@ int pciehp_unconfigure_device(struct slot *p_slot) | |||
| 92 | if (ret) | 92 | if (ret) |
| 93 | presence = 0; | 93 | presence = 0; |
| 94 | 94 | ||
| 95 | list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) { | 95 | /* |
| 96 | * Stopping an SR-IOV PF device removes all the associated VFs, | ||
| 97 | * which will update the bus->devices list and confuse the | ||
| 98 | * iterator. Therefore, iterate in reverse so we remove the VFs | ||
| 99 | * first, then the PF. We do the same in pci_stop_bus_device(). | ||
| 100 | */ | ||
| 101 | list_for_each_entry_safe_reverse(dev, temp, &parent->devices, | ||
| 102 | bus_list) { | ||
| 96 | pci_dev_get(dev); | 103 | pci_dev_get(dev); |
| 97 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { | 104 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { |
| 98 | pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl); | 105 | pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl); |
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index b29e20b7862f..bb7af78e4eed 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c | |||
| @@ -388,7 +388,6 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) | |||
| 388 | /* Remove the EADS bridge device itself */ | 388 | /* Remove the EADS bridge device itself */ |
| 389 | BUG_ON(!bus->self); | 389 | BUG_ON(!bus->self); |
| 390 | pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); | 390 | pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); |
| 391 | eeh_remove_bus_device(bus->self, true); | ||
| 392 | pci_stop_and_remove_bus_device(bus->self); | 391 | pci_stop_and_remove_bus_device(bus->self); |
| 393 | 392 | ||
| 394 | return 0; | 393 | return 0; |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dbdc5f7e2b29..01e264fb50e0 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
| @@ -317,13 +317,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus) | |||
| 317 | /* ACPI bus type */ | 317 | /* ACPI bus type */ |
| 318 | static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) | 318 | static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) |
| 319 | { | 319 | { |
| 320 | struct pci_dev * pci_dev; | 320 | struct pci_dev *pci_dev = to_pci_dev(dev); |
| 321 | u64 addr; | 321 | bool is_bridge; |
| 322 | u64 addr; | ||
| 322 | 323 | ||
| 323 | pci_dev = to_pci_dev(dev); | 324 | /* |
| 325 | * pci_is_bridge() is not suitable here, because pci_dev->subordinate | ||
| 326 | * is set only after acpi_pci_find_device() has been called for the | ||
| 327 | * given device. | ||
| 328 | */ | ||
| 329 | is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE | ||
| 330 | || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; | ||
| 324 | /* Please ref to ACPI spec for the syntax of _ADR */ | 331 | /* Please ref to ACPI spec for the syntax of _ADR */ |
| 325 | addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); | 332 | addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); |
| 326 | *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr); | 333 | *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge); |
| 327 | if (!*handle) | 334 | if (!*handle) |
| 328 | return -ENODEV; | 335 | return -ENODEV; |
| 329 | return 0; | 336 | return 0; |
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 569f82fc9e22..3b94cfcfa03b 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig | |||
| @@ -14,15 +14,12 @@ config PCIEPORTBUS | |||
| 14 | # Include service Kconfig here | 14 | # Include service Kconfig here |
| 15 | # | 15 | # |
| 16 | config HOTPLUG_PCI_PCIE | 16 | config HOTPLUG_PCI_PCIE |
| 17 | tristate "PCI Express Hotplug driver" | 17 | bool "PCI Express Hotplug driver" |
| 18 | depends on HOTPLUG_PCI && PCIEPORTBUS | 18 | depends on HOTPLUG_PCI && PCIEPORTBUS |
| 19 | help | 19 | help |
| 20 | Say Y here if you have a motherboard that supports PCI Express Native | 20 | Say Y here if you have a motherboard that supports PCI Express Native |
| 21 | Hotplug | 21 | Hotplug |
| 22 | 22 | ||
| 23 | To compile this driver as a module, choose M here: the | ||
| 24 | module will be called pciehp. | ||
| 25 | |||
| 26 | When in doubt, say N. | 23 | When in doubt, say N. |
| 27 | 24 | ||
| 28 | source "drivers/pci/pcie/aer/Kconfig" | 25 | source "drivers/pci/pcie/aer/Kconfig" |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index d254e2379533..64a7de22d9af 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
| @@ -300,6 +300,47 @@ static void assign_requested_resources_sorted(struct list_head *head, | |||
| 300 | } | 300 | } |
| 301 | } | 301 | } |
| 302 | 302 | ||
| 303 | static unsigned long pci_fail_res_type_mask(struct list_head *fail_head) | ||
| 304 | { | ||
| 305 | struct pci_dev_resource *fail_res; | ||
| 306 | unsigned long mask = 0; | ||
| 307 | |||
| 308 | /* check failed type */ | ||
| 309 | list_for_each_entry(fail_res, fail_head, list) | ||
| 310 | mask |= fail_res->flags; | ||
| 311 | |||
| 312 | /* | ||
| 313 | * one pref failed resource will set IORESOURCE_MEM, | ||
| 314 | * as we can allocate pref in non-pref range. | ||
| 315 | * Will release all assigned non-pref sibling resources | ||
| 316 | * according to that bit. | ||
| 317 | */ | ||
| 318 | return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH); | ||
| 319 | } | ||
| 320 | |||
| 321 | static bool pci_need_to_release(unsigned long mask, struct resource *res) | ||
| 322 | { | ||
| 323 | if (res->flags & IORESOURCE_IO) | ||
| 324 | return !!(mask & IORESOURCE_IO); | ||
| 325 | |||
| 326 | /* check pref at first */ | ||
| 327 | if (res->flags & IORESOURCE_PREFETCH) { | ||
| 328 | if (mask & IORESOURCE_PREFETCH) | ||
| 329 | return true; | ||
| 330 | /* count pref if its parent is non-pref */ | ||
| 331 | else if ((mask & IORESOURCE_MEM) && | ||
| 332 | !(res->parent->flags & IORESOURCE_PREFETCH)) | ||
| 333 | return true; | ||
| 334 | else | ||
| 335 | return false; | ||
| 336 | } | ||
| 337 | |||
| 338 | if (res->flags & IORESOURCE_MEM) | ||
| 339 | return !!(mask & IORESOURCE_MEM); | ||
| 340 | |||
| 341 | return false; /* should not get here */ | ||
| 342 | } | ||
| 343 | |||
| 303 | static void __assign_resources_sorted(struct list_head *head, | 344 | static void __assign_resources_sorted(struct list_head *head, |
| 304 | struct list_head *realloc_head, | 345 | struct list_head *realloc_head, |
| 305 | struct list_head *fail_head) | 346 | struct list_head *fail_head) |
| @@ -312,11 +353,24 @@ static void __assign_resources_sorted(struct list_head *head, | |||
| 312 | * if could do that, could get out early. | 353 | * if could do that, could get out early. |
| 313 | * if could not do that, we still try to assign requested at first, | 354 | * if could not do that, we still try to assign requested at first, |
| 314 | * then try to reassign add_size for some resources. | 355 | * then try to reassign add_size for some resources. |
| 356 | * | ||
| 357 | * Separate three resource type checking if we need to release | ||
| 358 | * assigned resource after requested + add_size try. | ||
| 359 | * 1. if there is io port assign fail, will release assigned | ||
| 360 | * io port. | ||
| 361 | * 2. if there is pref mmio assign fail, release assigned | ||
| 362 | * pref mmio. | ||
| 363 | * if assigned pref mmio's parent is non-pref mmio and there | ||
| 364 | * is non-pref mmio assign fail, will release that assigned | ||
| 365 | * pref mmio. | ||
| 366 | * 3. if there is non-pref mmio assign fail or pref mmio | ||
| 367 | * assigned fail, will release assigned non-pref mmio. | ||
| 315 | */ | 368 | */ |
| 316 | LIST_HEAD(save_head); | 369 | LIST_HEAD(save_head); |
| 317 | LIST_HEAD(local_fail_head); | 370 | LIST_HEAD(local_fail_head); |
| 318 | struct pci_dev_resource *save_res; | 371 | struct pci_dev_resource *save_res; |
| 319 | struct pci_dev_resource *dev_res; | 372 | struct pci_dev_resource *dev_res, *tmp_res; |
| 373 | unsigned long fail_type; | ||
| 320 | 374 | ||
| 321 | /* Check if optional add_size is there */ | 375 | /* Check if optional add_size is there */ |
| 322 | if (!realloc_head || list_empty(realloc_head)) | 376 | if (!realloc_head || list_empty(realloc_head)) |
| @@ -348,6 +402,19 @@ static void __assign_resources_sorted(struct list_head *head, | |||
| 348 | return; | 402 | return; |
| 349 | } | 403 | } |
| 350 | 404 | ||
| 405 | /* check failed type */ | ||
| 406 | fail_type = pci_fail_res_type_mask(&local_fail_head); | ||
| 407 | /* remove not need to be released assigned res from head list etc */ | ||
| 408 | list_for_each_entry_safe(dev_res, tmp_res, head, list) | ||
| 409 | if (dev_res->res->parent && | ||
| 410 | !pci_need_to_release(fail_type, dev_res->res)) { | ||
| 411 | /* remove it from realloc_head list */ | ||
| 412 | remove_from_list(realloc_head, dev_res->res); | ||
| 413 | remove_from_list(&save_head, dev_res->res); | ||
| 414 | list_del(&dev_res->list); | ||
| 415 | kfree(dev_res); | ||
| 416 | } | ||
| 417 | |||
| 351 | free_list(&local_fail_head); | 418 | free_list(&local_fail_head); |
| 352 | /* Release assigned resource */ | 419 | /* Release assigned resource */ |
| 353 | list_for_each_entry(dev_res, head, list) | 420 | list_for_each_entry(dev_res, head, list) |
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 5b272bfd261d..2a00239661b3 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c | |||
| @@ -1193,6 +1193,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map) | |||
| 1193 | list_for_each_entry(maps_node, &pinctrl_maps, node) { | 1193 | list_for_each_entry(maps_node, &pinctrl_maps, node) { |
| 1194 | if (maps_node->maps == map) { | 1194 | if (maps_node->maps == map) { |
| 1195 | list_del(&maps_node->node); | 1195 | list_del(&maps_node->node); |
| 1196 | kfree(maps_node); | ||
| 1196 | mutex_unlock(&pinctrl_maps_mutex); | 1197 | mutex_unlock(&pinctrl_maps_mutex); |
| 1197 | return; | 1198 | return; |
| 1198 | } | 1199 | } |
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 6866548fab31..7323cca440b5 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c | |||
| @@ -1483,6 +1483,7 @@ static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs) | |||
| 1483 | return ret; | 1483 | return ret; |
| 1484 | } | 1484 | } |
| 1485 | 1485 | ||
| 1486 | #ifdef CONFIG_PM | ||
| 1486 | static int pinctrl_single_suspend(struct platform_device *pdev, | 1487 | static int pinctrl_single_suspend(struct platform_device *pdev, |
| 1487 | pm_message_t state) | 1488 | pm_message_t state) |
| 1488 | { | 1489 | { |
| @@ -1505,6 +1506,7 @@ static int pinctrl_single_resume(struct platform_device *pdev) | |||
| 1505 | 1506 | ||
| 1506 | return pinctrl_force_default(pcs->pctl); | 1507 | return pinctrl_force_default(pcs->pctl); |
| 1507 | } | 1508 | } |
| 1509 | #endif | ||
| 1508 | 1510 | ||
| 1509 | static int pcs_probe(struct platform_device *pdev) | 1511 | static int pcs_probe(struct platform_device *pdev) |
| 1510 | { | 1512 | { |
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c index c47fd1e5450b..94716c779800 100644 --- a/drivers/pinctrl/pinctrl-sunxi.c +++ b/drivers/pinctrl/pinctrl-sunxi.c | |||
| @@ -278,6 +278,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, | |||
| 278 | { | 278 | { |
| 279 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); | 279 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); |
| 280 | struct sunxi_pinctrl_group *g = &pctl->groups[group]; | 280 | struct sunxi_pinctrl_group *g = &pctl->groups[group]; |
| 281 | unsigned long flags; | ||
| 281 | u32 val, mask; | 282 | u32 val, mask; |
| 282 | u16 strength; | 283 | u16 strength; |
| 283 | u8 dlevel; | 284 | u8 dlevel; |
| @@ -295,22 +296,35 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, | |||
| 295 | * 3: 40mA | 296 | * 3: 40mA |
| 296 | */ | 297 | */ |
| 297 | dlevel = strength / 10 - 1; | 298 | dlevel = strength / 10 - 1; |
| 299 | |||
| 300 | spin_lock_irqsave(&pctl->lock, flags); | ||
| 301 | |||
| 298 | val = readl(pctl->membase + sunxi_dlevel_reg(g->pin)); | 302 | val = readl(pctl->membase + sunxi_dlevel_reg(g->pin)); |
| 299 | mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin); | 303 | mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin); |
| 300 | writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin), | 304 | writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin), |
| 301 | pctl->membase + sunxi_dlevel_reg(g->pin)); | 305 | pctl->membase + sunxi_dlevel_reg(g->pin)); |
| 306 | |||
| 307 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
| 302 | break; | 308 | break; |
| 303 | case PIN_CONFIG_BIAS_PULL_UP: | 309 | case PIN_CONFIG_BIAS_PULL_UP: |
| 310 | spin_lock_irqsave(&pctl->lock, flags); | ||
| 311 | |||
| 304 | val = readl(pctl->membase + sunxi_pull_reg(g->pin)); | 312 | val = readl(pctl->membase + sunxi_pull_reg(g->pin)); |
| 305 | mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); | 313 | mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); |
| 306 | writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin), | 314 | writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin), |
| 307 | pctl->membase + sunxi_pull_reg(g->pin)); | 315 | pctl->membase + sunxi_pull_reg(g->pin)); |
| 316 | |||
| 317 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
| 308 | break; | 318 | break; |
| 309 | case PIN_CONFIG_BIAS_PULL_DOWN: | 319 | case PIN_CONFIG_BIAS_PULL_DOWN: |
| 320 | spin_lock_irqsave(&pctl->lock, flags); | ||
| 321 | |||
| 310 | val = readl(pctl->membase + sunxi_pull_reg(g->pin)); | 322 | val = readl(pctl->membase + sunxi_pull_reg(g->pin)); |
| 311 | mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); | 323 | mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); |
| 312 | writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin), | 324 | writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin), |
| 313 | pctl->membase + sunxi_pull_reg(g->pin)); | 325 | pctl->membase + sunxi_pull_reg(g->pin)); |
| 326 | |||
| 327 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
| 314 | break; | 328 | break; |
| 315 | default: | 329 | default: |
| 316 | break; | 330 | break; |
| @@ -360,11 +374,17 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev, | |||
| 360 | u8 config) | 374 | u8 config) |
| 361 | { | 375 | { |
| 362 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); | 376 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); |
| 377 | unsigned long flags; | ||
| 378 | u32 val, mask; | ||
| 379 | |||
| 380 | spin_lock_irqsave(&pctl->lock, flags); | ||
| 363 | 381 | ||
| 364 | u32 val = readl(pctl->membase + sunxi_mux_reg(pin)); | 382 | val = readl(pctl->membase + sunxi_mux_reg(pin)); |
| 365 | u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin); | 383 | mask = MUX_PINS_MASK << sunxi_mux_offset(pin); |
| 366 | writel((val & ~mask) | config << sunxi_mux_offset(pin), | 384 | writel((val & ~mask) | config << sunxi_mux_offset(pin), |
| 367 | pctl->membase + sunxi_mux_reg(pin)); | 385 | pctl->membase + sunxi_mux_reg(pin)); |
| 386 | |||
| 387 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
| 368 | } | 388 | } |
| 369 | 389 | ||
| 370 | static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, | 390 | static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, |
| @@ -464,8 +484,21 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip, | |||
| 464 | struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); | 484 | struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); |
| 465 | u32 reg = sunxi_data_reg(offset); | 485 | u32 reg = sunxi_data_reg(offset); |
| 466 | u8 index = sunxi_data_offset(offset); | 486 | u8 index = sunxi_data_offset(offset); |
| 487 | unsigned long flags; | ||
| 488 | u32 regval; | ||
| 489 | |||
| 490 | spin_lock_irqsave(&pctl->lock, flags); | ||
| 491 | |||
| 492 | regval = readl(pctl->membase + reg); | ||
| 467 | 493 | ||
| 468 | writel((value & DATA_PINS_MASK) << index, pctl->membase + reg); | 494 | if (value) |
| 495 | regval |= BIT(index); | ||
| 496 | else | ||
| 497 | regval &= ~(BIT(index)); | ||
| 498 | |||
| 499 | writel(regval, pctl->membase + reg); | ||
| 500 | |||
| 501 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
| 469 | } | 502 | } |
| 470 | 503 | ||
| 471 | static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, | 504 | static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, |
| @@ -526,6 +559,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, | |||
| 526 | struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); | 559 | struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); |
| 527 | u32 reg = sunxi_irq_cfg_reg(d->hwirq); | 560 | u32 reg = sunxi_irq_cfg_reg(d->hwirq); |
| 528 | u8 index = sunxi_irq_cfg_offset(d->hwirq); | 561 | u8 index = sunxi_irq_cfg_offset(d->hwirq); |
| 562 | unsigned long flags; | ||
| 563 | u32 regval; | ||
| 529 | u8 mode; | 564 | u8 mode; |
| 530 | 565 | ||
| 531 | switch (type) { | 566 | switch (type) { |
| @@ -548,7 +583,13 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, | |||
| 548 | return -EINVAL; | 583 | return -EINVAL; |
| 549 | } | 584 | } |
| 550 | 585 | ||
| 551 | writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg); | 586 | spin_lock_irqsave(&pctl->lock, flags); |
| 587 | |||
| 588 | regval = readl(pctl->membase + reg); | ||
| 589 | regval &= ~IRQ_CFG_IRQ_MASK; | ||
| 590 | writel(regval | (mode << index), pctl->membase + reg); | ||
| 591 | |||
| 592 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
| 552 | 593 | ||
| 553 | return 0; | 594 | return 0; |
| 554 | } | 595 | } |
| @@ -560,14 +601,19 @@ static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d) | |||
| 560 | u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); | 601 | u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); |
| 561 | u32 status_reg = sunxi_irq_status_reg(d->hwirq); | 602 | u32 status_reg = sunxi_irq_status_reg(d->hwirq); |
| 562 | u8 status_idx = sunxi_irq_status_offset(d->hwirq); | 603 | u8 status_idx = sunxi_irq_status_offset(d->hwirq); |
| 604 | unsigned long flags; | ||
| 563 | u32 val; | 605 | u32 val; |
| 564 | 606 | ||
| 607 | spin_lock_irqsave(&pctl->lock, flags); | ||
| 608 | |||
| 565 | /* Mask the IRQ */ | 609 | /* Mask the IRQ */ |
| 566 | val = readl(pctl->membase + ctrl_reg); | 610 | val = readl(pctl->membase + ctrl_reg); |
| 567 | writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg); | 611 | writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg); |
| 568 | 612 | ||
| 569 | /* Clear the IRQ */ | 613 | /* Clear the IRQ */ |
| 570 | writel(1 << status_idx, pctl->membase + status_reg); | 614 | writel(1 << status_idx, pctl->membase + status_reg); |
| 615 | |||
| 616 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
| 571 | } | 617 | } |
| 572 | 618 | ||
| 573 | static void sunxi_pinctrl_irq_mask(struct irq_data *d) | 619 | static void sunxi_pinctrl_irq_mask(struct irq_data *d) |
| @@ -575,11 +621,16 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d) | |||
| 575 | struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); | 621 | struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); |
| 576 | u32 reg = sunxi_irq_ctrl_reg(d->hwirq); | 622 | u32 reg = sunxi_irq_ctrl_reg(d->hwirq); |
| 577 | u8 idx = sunxi_irq_ctrl_offset(d->hwirq); | 623 | u8 idx = sunxi_irq_ctrl_offset(d->hwirq); |
| 624 | unsigned long flags; | ||
| 578 | u32 val; | 625 | u32 val; |
| 579 | 626 | ||
| 627 | spin_lock_irqsave(&pctl->lock, flags); | ||
| 628 | |||
| 580 | /* Mask the IRQ */ | 629 | /* Mask the IRQ */ |
| 581 | val = readl(pctl->membase + reg); | 630 | val = readl(pctl->membase + reg); |
| 582 | writel(val & ~(1 << idx), pctl->membase + reg); | 631 | writel(val & ~(1 << idx), pctl->membase + reg); |
| 632 | |||
| 633 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
| 583 | } | 634 | } |
| 584 | 635 | ||
| 585 | static void sunxi_pinctrl_irq_unmask(struct irq_data *d) | 636 | static void sunxi_pinctrl_irq_unmask(struct irq_data *d) |
| @@ -588,6 +639,7 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d) | |||
| 588 | struct sunxi_desc_function *func; | 639 | struct sunxi_desc_function *func; |
| 589 | u32 reg = sunxi_irq_ctrl_reg(d->hwirq); | 640 | u32 reg = sunxi_irq_ctrl_reg(d->hwirq); |
| 590 | u8 idx = sunxi_irq_ctrl_offset(d->hwirq); | 641 | u8 idx = sunxi_irq_ctrl_offset(d->hwirq); |
| 642 | unsigned long flags; | ||
| 591 | u32 val; | 643 | u32 val; |
| 592 | 644 | ||
| 593 | func = sunxi_pinctrl_desc_find_function_by_pin(pctl, | 645 | func = sunxi_pinctrl_desc_find_function_by_pin(pctl, |
| @@ -597,9 +649,13 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d) | |||
| 597 | /* Change muxing to INT mode */ | 649 | /* Change muxing to INT mode */ |
| 598 | sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); | 650 | sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); |
| 599 | 651 | ||
| 652 | spin_lock_irqsave(&pctl->lock, flags); | ||
| 653 | |||
| 600 | /* Unmask the IRQ */ | 654 | /* Unmask the IRQ */ |
| 601 | val = readl(pctl->membase + reg); | 655 | val = readl(pctl->membase + reg); |
| 602 | writel(val | (1 << idx), pctl->membase + reg); | 656 | writel(val | (1 << idx), pctl->membase + reg); |
| 657 | |||
| 658 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
| 603 | } | 659 | } |
| 604 | 660 | ||
| 605 | static struct irq_chip sunxi_pinctrl_irq_chip = { | 661 | static struct irq_chip sunxi_pinctrl_irq_chip = { |
| @@ -752,6 +808,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev) | |||
| 752 | return -ENOMEM; | 808 | return -ENOMEM; |
| 753 | platform_set_drvdata(pdev, pctl); | 809 | platform_set_drvdata(pdev, pctl); |
| 754 | 810 | ||
| 811 | spin_lock_init(&pctl->lock); | ||
| 812 | |||
| 755 | pctl->membase = of_iomap(node, 0); | 813 | pctl->membase = of_iomap(node, 0); |
| 756 | if (!pctl->membase) | 814 | if (!pctl->membase) |
| 757 | return -ENOMEM; | 815 | return -ENOMEM; |
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h index d68047d8f699..01c494f8a14f 100644 --- a/drivers/pinctrl/pinctrl-sunxi.h +++ b/drivers/pinctrl/pinctrl-sunxi.h | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #define __PINCTRL_SUNXI_H | 14 | #define __PINCTRL_SUNXI_H |
| 15 | 15 | ||
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/spinlock.h> | ||
| 17 | 18 | ||
| 18 | #define PA_BASE 0 | 19 | #define PA_BASE 0 |
| 19 | #define PB_BASE 32 | 20 | #define PB_BASE 32 |
| @@ -407,6 +408,7 @@ struct sunxi_pinctrl { | |||
| 407 | unsigned ngroups; | 408 | unsigned ngroups; |
| 408 | int irq; | 409 | int irq; |
| 409 | int irq_array[SUNXI_IRQ_NUMBER]; | 410 | int irq_array[SUNXI_IRQ_NUMBER]; |
| 411 | spinlock_t lock; | ||
| 410 | struct pinctrl_dev *pctl_dev; | 412 | struct pinctrl_dev *pctl_dev; |
| 411 | }; | 413 | }; |
| 412 | 414 | ||
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c index 7956df58d751..31f7d0e04aaa 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c | |||
| @@ -3785,6 +3785,7 @@ static const struct regulator_desc sh73a0_vccq_mc0_desc = { | |||
| 3785 | 3785 | ||
| 3786 | static struct regulator_consumer_supply sh73a0_vccq_mc0_consumers[] = { | 3786 | static struct regulator_consumer_supply sh73a0_vccq_mc0_consumers[] = { |
| 3787 | REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), | 3787 | REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), |
| 3788 | REGULATOR_SUPPLY("vqmmc", "ee100000.sdhi"), | ||
| 3788 | }; | 3789 | }; |
| 3789 | 3790 | ||
| 3790 | static const struct regulator_init_data sh73a0_vccq_mc0_init_data = { | 3791 | static const struct regulator_init_data sh73a0_vccq_mc0_init_data = { |
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c index 1fa39a444171..867c9681763c 100644 --- a/drivers/pinctrl/sirf/pinctrl-atlas6.c +++ b/drivers/pinctrl/sirf/pinctrl-atlas6.c | |||
| @@ -496,7 +496,7 @@ static const unsigned sdmmc5_pins[] = { 24, 25, 26 }; | |||
| 496 | static const struct sirfsoc_muxmask usp0_muxmask[] = { | 496 | static const struct sirfsoc_muxmask usp0_muxmask[] = { |
| 497 | { | 497 | { |
| 498 | .group = 1, | 498 | .group = 1, |
| 499 | .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22), | 499 | .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23), |
| 500 | }, | 500 | }, |
| 501 | }; | 501 | }; |
| 502 | 502 | ||
| @@ -507,8 +507,21 @@ static const struct sirfsoc_padmux usp0_padmux = { | |||
| 507 | .funcval = 0, | 507 | .funcval = 0, |
| 508 | }; | 508 | }; |
| 509 | 509 | ||
| 510 | static const unsigned usp0_pins[] = { 51, 52, 53, 54 }; | 510 | static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 }; |
| 511 | 511 | ||
| 512 | static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = { | ||
| 513 | { | ||
| 514 | .group = 1, | ||
| 515 | .mask = BIT(20) | BIT(21), | ||
| 516 | }, | ||
| 517 | }; | ||
| 518 | |||
| 519 | static const struct sirfsoc_padmux usp0_uart_nostreamctrl_padmux = { | ||
| 520 | .muxmask_counts = ARRAY_SIZE(usp0_uart_nostreamctrl_muxmask), | ||
| 521 | .muxmask = usp0_uart_nostreamctrl_muxmask, | ||
| 522 | }; | ||
| 523 | |||
| 524 | static const unsigned usp0_uart_nostreamctrl_pins[] = { 52, 53 }; | ||
| 512 | static const struct sirfsoc_muxmask usp1_muxmask[] = { | 525 | static const struct sirfsoc_muxmask usp1_muxmask[] = { |
| 513 | { | 526 | { |
| 514 | .group = 0, | 527 | .group = 0, |
| @@ -822,6 +835,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = { | |||
| 822 | SIRFSOC_PIN_GROUP("uart2grp", uart2_pins), | 835 | SIRFSOC_PIN_GROUP("uart2grp", uart2_pins), |
| 823 | SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins), | 836 | SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins), |
| 824 | SIRFSOC_PIN_GROUP("usp0grp", usp0_pins), | 837 | SIRFSOC_PIN_GROUP("usp0grp", usp0_pins), |
| 838 | SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp", | ||
| 839 | usp0_uart_nostreamctrl_pins), | ||
| 825 | SIRFSOC_PIN_GROUP("usp1grp", usp1_pins), | 840 | SIRFSOC_PIN_GROUP("usp1grp", usp1_pins), |
| 826 | SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins), | 841 | SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins), |
| 827 | SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins), | 842 | SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins), |
| @@ -862,6 +877,8 @@ static const char * const uart0grp[] = { "uart0grp" }; | |||
| 862 | static const char * const uart1grp[] = { "uart1grp" }; | 877 | static const char * const uart1grp[] = { "uart1grp" }; |
| 863 | static const char * const uart2grp[] = { "uart2grp" }; | 878 | static const char * const uart2grp[] = { "uart2grp" }; |
| 864 | static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" }; | 879 | static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" }; |
| 880 | static const char * const usp0_uart_nostreamctrl_grp[] = { | ||
| 881 | "usp0_uart_nostreamctrl_grp" }; | ||
| 865 | static const char * const usp0grp[] = { "usp0grp" }; | 882 | static const char * const usp0grp[] = { "usp0grp" }; |
| 866 | static const char * const usp1grp[] = { "usp1grp" }; | 883 | static const char * const usp1grp[] = { "usp1grp" }; |
| 867 | static const char * const i2c0grp[] = { "i2c0grp" }; | 884 | static const char * const i2c0grp[] = { "i2c0grp" }; |
| @@ -904,6 +921,9 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = { | |||
| 904 | SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux), | 921 | SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux), |
| 905 | SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux), | 922 | SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux), |
| 906 | SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux), | 923 | SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux), |
| 924 | SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl", | ||
| 925 | usp0_uart_nostreamctrl_grp, | ||
| 926 | usp0_uart_nostreamctrl_padmux), | ||
| 907 | SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux), | 927 | SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux), |
| 908 | SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux), | 928 | SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux), |
| 909 | SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux), | 929 | SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux), |
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c index 0f9f8596b300..f9119525f557 100644 --- a/drivers/platform/olpc/olpc-ec.c +++ b/drivers/platform/olpc/olpc-ec.c | |||
| @@ -330,7 +330,7 @@ static int __init olpc_ec_init_module(void) | |||
| 330 | return platform_driver_register(&olpc_ec_plat_driver); | 330 | return platform_driver_register(&olpc_ec_plat_driver); |
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | module_init(olpc_ec_init_module); | 333 | arch_initcall(olpc_ec_init_module); |
| 334 | 334 | ||
| 335 | MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); | 335 | MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); |
| 336 | MODULE_LICENSE("GPL"); | 336 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 97bb05edcb5a..d6970f47ae72 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
| @@ -53,7 +53,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); | |||
| 53 | #define HPWMI_ALS_QUERY 0x3 | 53 | #define HPWMI_ALS_QUERY 0x3 |
| 54 | #define HPWMI_HARDWARE_QUERY 0x4 | 54 | #define HPWMI_HARDWARE_QUERY 0x4 |
| 55 | #define HPWMI_WIRELESS_QUERY 0x5 | 55 | #define HPWMI_WIRELESS_QUERY 0x5 |
| 56 | #define HPWMI_BIOS_QUERY 0x9 | ||
| 57 | #define HPWMI_HOTKEY_QUERY 0xc | 56 | #define HPWMI_HOTKEY_QUERY 0xc |
| 58 | #define HPWMI_WIRELESS2_QUERY 0x1b | 57 | #define HPWMI_WIRELESS2_QUERY 0x1b |
| 59 | #define HPWMI_POSTCODEERROR_QUERY 0x2a | 58 | #define HPWMI_POSTCODEERROR_QUERY 0x2a |
| @@ -293,19 +292,6 @@ static int hp_wmi_tablet_state(void) | |||
| 293 | return (state & 0x4) ? 1 : 0; | 292 | return (state & 0x4) ? 1 : 0; |
| 294 | } | 293 | } |
| 295 | 294 | ||
| 296 | static int hp_wmi_enable_hotkeys(void) | ||
| 297 | { | ||
| 298 | int ret; | ||
| 299 | int query = 0x6e; | ||
| 300 | |||
| 301 | ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query), | ||
| 302 | 0); | ||
| 303 | |||
| 304 | if (ret) | ||
| 305 | return -EINVAL; | ||
| 306 | return 0; | ||
| 307 | } | ||
| 308 | |||
| 309 | static int hp_wmi_set_block(void *data, bool blocked) | 295 | static int hp_wmi_set_block(void *data, bool blocked) |
| 310 | { | 296 | { |
| 311 | enum hp_wmi_radio r = (enum hp_wmi_radio) data; | 297 | enum hp_wmi_radio r = (enum hp_wmi_radio) data; |
| @@ -1009,8 +995,6 @@ static int __init hp_wmi_init(void) | |||
| 1009 | err = hp_wmi_input_setup(); | 995 | err = hp_wmi_input_setup(); |
| 1010 | if (err) | 996 | if (err) |
| 1011 | return err; | 997 | return err; |
| 1012 | |||
| 1013 | hp_wmi_enable_hotkeys(); | ||
| 1014 | } | 998 | } |
| 1015 | 999 | ||
| 1016 | if (bios_capable) { | 1000 | if (bios_capable) { |
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 2ac045f27f10..3a1b6bf326a8 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
| @@ -2440,7 +2440,10 @@ static ssize_t sony_nc_gfx_switch_status_show(struct device *dev, | |||
| 2440 | if (pos < 0) | 2440 | if (pos < 0) |
| 2441 | return pos; | 2441 | return pos; |
| 2442 | 2442 | ||
| 2443 | return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina"); | 2443 | return snprintf(buffer, PAGE_SIZE, "%s\n", |
| 2444 | pos == SPEED ? "speed" : | ||
| 2445 | pos == STAMINA ? "stamina" : | ||
| 2446 | pos == AUTO ? "auto" : "unknown"); | ||
| 2444 | } | 2447 | } |
| 2445 | 2448 | ||
| 2446 | static int sony_nc_gfx_switch_setup(struct platform_device *pd, | 2449 | static int sony_nc_gfx_switch_setup(struct platform_device *pd, |
| @@ -4320,7 +4323,8 @@ static int sony_pic_add(struct acpi_device *device) | |||
| 4320 | goto err_free_resources; | 4323 | goto err_free_resources; |
| 4321 | } | 4324 | } |
| 4322 | 4325 | ||
| 4323 | if (sonypi_compat_init()) | 4326 | result = sonypi_compat_init(); |
| 4327 | if (result) | ||
| 4324 | goto err_remove_input; | 4328 | goto err_remove_input; |
| 4325 | 4329 | ||
| 4326 | /* request io port */ | 4330 | /* request io port */ |
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index f4f30af2df68..2e8a20cac588 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c | |||
| @@ -1715,11 +1715,13 @@ int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops) | |||
| 1715 | (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops)) | 1715 | (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops)) |
| 1716 | port->nscan = NULL; | 1716 | port->nscan = NULL; |
| 1717 | 1717 | ||
| 1718 | list_for_each_entry(scan, &rio_scans, node) | 1718 | list_for_each_entry(scan, &rio_scans, node) { |
| 1719 | if (scan->mport_id == mport_id) { | 1719 | if (scan->mport_id == mport_id) { |
| 1720 | list_del(&scan->node); | 1720 | list_del(&scan->node); |
| 1721 | kfree(scan); | 1721 | kfree(scan); |
| 1722 | break; | ||
| 1722 | } | 1723 | } |
| 1724 | } | ||
| 1723 | 1725 | ||
| 1724 | mutex_unlock(&rio_mport_list_lock); | 1726 | mutex_unlock(&rio_mport_list_lock); |
| 1725 | 1727 | ||
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c index 767fee2ab340..26019531db15 100644 --- a/drivers/rtc/rtc-stmp3xxx.c +++ b/drivers/rtc/rtc-stmp3xxx.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
| 24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
| 25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/delay.h> | ||
| 26 | #include <linux/rtc.h> | 27 | #include <linux/rtc.h> |
| 27 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 28 | #include <linux/of_device.h> | 29 | #include <linux/of_device.h> |
| @@ -119,24 +120,39 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev) | |||
| 119 | } | 120 | } |
| 120 | #endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ | 121 | #endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ |
| 121 | 122 | ||
| 122 | static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) | 123 | static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) |
| 123 | { | 124 | { |
| 125 | int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */ | ||
| 124 | /* | 126 | /* |
| 125 | * The datasheet doesn't say which way round the | 127 | * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010 |
| 126 | * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0, | 128 | * states: |
| 127 | * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS | 129 | * | The order in which registers are updated is |
| 130 | * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds. | ||
| 131 | * | (This list is in bitfield order, from LSB to MSB, as they would | ||
| 132 | * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT | ||
| 133 | * | register. For example, the Seconds register corresponds to | ||
| 134 | * | STALE_REGS or NEW_REGS containing 0x80.) | ||
| 128 | */ | 135 | */ |
| 129 | while (readl(rtc_data->io + STMP3XXX_RTC_STAT) & | 136 | do { |
| 130 | (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) | 137 | if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) & |
| 131 | cpu_relax(); | 138 | (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))) |
| 139 | return 0; | ||
| 140 | udelay(1); | ||
| 141 | } while (--timeout > 0); | ||
| 142 | return (readl(rtc_data->io + STMP3XXX_RTC_STAT) & | ||
| 143 | (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0; | ||
| 132 | } | 144 | } |
| 133 | 145 | ||
| 134 | /* Time read/write */ | 146 | /* Time read/write */ |
| 135 | static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) | 147 | static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) |
| 136 | { | 148 | { |
| 149 | int ret; | ||
| 137 | struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); | 150 | struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); |
| 138 | 151 | ||
| 139 | stmp3xxx_wait_time(rtc_data); | 152 | ret = stmp3xxx_wait_time(rtc_data); |
| 153 | if (ret) | ||
| 154 | return ret; | ||
| 155 | |||
| 140 | rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); | 156 | rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); |
| 141 | return 0; | 157 | return 0; |
| 142 | } | 158 | } |
| @@ -146,8 +162,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t) | |||
| 146 | struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); | 162 | struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); |
| 147 | 163 | ||
| 148 | writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); | 164 | writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); |
| 149 | stmp3xxx_wait_time(rtc_data); | 165 | return stmp3xxx_wait_time(rtc_data); |
| 150 | return 0; | ||
| 151 | } | 166 | } |
| 152 | 167 | ||
| 153 | /* interrupt(s) handler */ | 168 | /* interrupt(s) handler */ |
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index 02faf3c4e0d5..c2e80d7ca5e2 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c | |||
| @@ -524,6 +524,8 @@ static int twl_rtc_probe(struct platform_device *pdev) | |||
| 524 | if (ret < 0) | 524 | if (ret < 0) |
| 525 | goto out1; | 525 | goto out1; |
| 526 | 526 | ||
| 527 | device_init_wakeup(&pdev->dev, 1); | ||
| 528 | |||
| 527 | rtc = rtc_device_register(pdev->name, | 529 | rtc = rtc_device_register(pdev->name, |
| 528 | &pdev->dev, &twl_rtc_ops, THIS_MODULE); | 530 | &pdev->dev, &twl_rtc_ops, THIS_MODULE); |
| 529 | if (IS_ERR(rtc)) { | 531 | if (IS_ERR(rtc)) { |
| @@ -542,7 +544,6 @@ static int twl_rtc_probe(struct platform_device *pdev) | |||
| 542 | } | 544 | } |
| 543 | 545 | ||
| 544 | platform_set_drvdata(pdev, rtc); | 546 | platform_set_drvdata(pdev, rtc); |
| 545 | device_init_wakeup(&pdev->dev, 1); | ||
| 546 | return 0; | 547 | return 0; |
| 547 | 548 | ||
| 548 | out2: | 549 | out2: |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 17150a778984..451bf99582ff 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
| @@ -2392,6 +2392,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) | |||
| 2392 | rc = cqr->intrc; | 2392 | rc = cqr->intrc; |
| 2393 | else | 2393 | else |
| 2394 | rc = -EIO; | 2394 | rc = -EIO; |
| 2395 | |||
| 2396 | /* kick tasklets */ | ||
| 2397 | dasd_schedule_device_bh(device); | ||
| 2398 | if (device->block) | ||
| 2399 | dasd_schedule_block_bh(device->block); | ||
| 2400 | |||
| 2395 | return rc; | 2401 | return rc; |
| 2396 | } | 2402 | } |
| 2397 | 2403 | ||
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 1d4c8fe72752..c82fe65c4128 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
| @@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) | |||
| 102 | 102 | ||
| 103 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) | 103 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) |
| 104 | zfcp_erp_action_dismiss(&port->erp_action); | 104 | zfcp_erp_action_dismiss(&port->erp_action); |
| 105 | else | 105 | else { |
| 106 | shost_for_each_device(sdev, port->adapter->scsi_host) | 106 | spin_lock(port->adapter->scsi_host->host_lock); |
| 107 | __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
| 107 | if (sdev_to_zfcp(sdev)->port == port) | 108 | if (sdev_to_zfcp(sdev)->port == port) |
| 108 | zfcp_erp_action_dismiss_lun(sdev); | 109 | zfcp_erp_action_dismiss_lun(sdev); |
| 110 | spin_unlock(port->adapter->scsi_host->host_lock); | ||
| 111 | } | ||
| 109 | } | 112 | } |
| 110 | 113 | ||
| 111 | static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) | 114 | static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) |
| @@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, | |||
| 592 | { | 595 | { |
| 593 | struct scsi_device *sdev; | 596 | struct scsi_device *sdev; |
| 594 | 597 | ||
| 595 | shost_for_each_device(sdev, port->adapter->scsi_host) | 598 | spin_lock(port->adapter->scsi_host->host_lock); |
| 599 | __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
| 596 | if (sdev_to_zfcp(sdev)->port == port) | 600 | if (sdev_to_zfcp(sdev)->port == port) |
| 597 | _zfcp_erp_lun_reopen(sdev, clear, id, 0); | 601 | _zfcp_erp_lun_reopen(sdev, clear, id, 0); |
| 602 | spin_unlock(port->adapter->scsi_host->host_lock); | ||
| 598 | } | 603 | } |
| 599 | 604 | ||
| 600 | static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) | 605 | static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) |
| @@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) | |||
| 1434 | atomic_set_mask(common_mask, &port->status); | 1439 | atomic_set_mask(common_mask, &port->status); |
| 1435 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 1440 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
| 1436 | 1441 | ||
| 1437 | shost_for_each_device(sdev, adapter->scsi_host) | 1442 | spin_lock_irqsave(adapter->scsi_host->host_lock, flags); |
| 1443 | __shost_for_each_device(sdev, adapter->scsi_host) | ||
| 1438 | atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); | 1444 | atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); |
| 1445 | spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); | ||
| 1439 | } | 1446 | } |
| 1440 | 1447 | ||
| 1441 | /** | 1448 | /** |
| @@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) | |||
| 1469 | } | 1476 | } |
| 1470 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 1477 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
| 1471 | 1478 | ||
| 1472 | shost_for_each_device(sdev, adapter->scsi_host) { | 1479 | spin_lock_irqsave(adapter->scsi_host->host_lock, flags); |
| 1480 | __shost_for_each_device(sdev, adapter->scsi_host) { | ||
| 1473 | atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); | 1481 | atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); |
| 1474 | if (clear_counter) | 1482 | if (clear_counter) |
| 1475 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); | 1483 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); |
| 1476 | } | 1484 | } |
| 1485 | spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); | ||
| 1477 | } | 1486 | } |
| 1478 | 1487 | ||
| 1479 | /** | 1488 | /** |
| @@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) | |||
| 1487 | { | 1496 | { |
| 1488 | struct scsi_device *sdev; | 1497 | struct scsi_device *sdev; |
| 1489 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1498 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
| 1499 | unsigned long flags; | ||
| 1490 | 1500 | ||
| 1491 | atomic_set_mask(mask, &port->status); | 1501 | atomic_set_mask(mask, &port->status); |
| 1492 | 1502 | ||
| 1493 | if (!common_mask) | 1503 | if (!common_mask) |
| 1494 | return; | 1504 | return; |
| 1495 | 1505 | ||
| 1496 | shost_for_each_device(sdev, port->adapter->scsi_host) | 1506 | spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); |
| 1507 | __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
| 1497 | if (sdev_to_zfcp(sdev)->port == port) | 1508 | if (sdev_to_zfcp(sdev)->port == port) |
| 1498 | atomic_set_mask(common_mask, | 1509 | atomic_set_mask(common_mask, |
| 1499 | &sdev_to_zfcp(sdev)->status); | 1510 | &sdev_to_zfcp(sdev)->status); |
| 1511 | spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); | ||
| 1500 | } | 1512 | } |
| 1501 | 1513 | ||
| 1502 | /** | 1514 | /** |
| @@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) | |||
| 1511 | struct scsi_device *sdev; | 1523 | struct scsi_device *sdev; |
| 1512 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1524 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
| 1513 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; | 1525 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; |
| 1526 | unsigned long flags; | ||
| 1514 | 1527 | ||
| 1515 | atomic_clear_mask(mask, &port->status); | 1528 | atomic_clear_mask(mask, &port->status); |
| 1516 | 1529 | ||
| @@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) | |||
| 1520 | if (clear_counter) | 1533 | if (clear_counter) |
| 1521 | atomic_set(&port->erp_counter, 0); | 1534 | atomic_set(&port->erp_counter, 0); |
| 1522 | 1535 | ||
| 1523 | shost_for_each_device(sdev, port->adapter->scsi_host) | 1536 | spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); |
| 1537 | __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
| 1524 | if (sdev_to_zfcp(sdev)->port == port) { | 1538 | if (sdev_to_zfcp(sdev)->port == port) { |
| 1525 | atomic_clear_mask(common_mask, | 1539 | atomic_clear_mask(common_mask, |
| 1526 | &sdev_to_zfcp(sdev)->status); | 1540 | &sdev_to_zfcp(sdev)->status); |
| 1527 | if (clear_counter) | 1541 | if (clear_counter) |
| 1528 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); | 1542 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); |
| 1529 | } | 1543 | } |
| 1544 | spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); | ||
| 1530 | } | 1545 | } |
| 1531 | 1546 | ||
| 1532 | /** | 1547 | /** |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 665e3cfaaf85..de0598eaacd2 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
| @@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, | |||
| 224 | 224 | ||
| 225 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) | 225 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) |
| 226 | { | 226 | { |
| 227 | spin_lock_irq(&qdio->req_q_lock); | ||
| 228 | if (atomic_read(&qdio->req_q_free) || | 227 | if (atomic_read(&qdio->req_q_free) || |
| 229 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 228 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
| 230 | return 1; | 229 | return 1; |
| 231 | spin_unlock_irq(&qdio->req_q_lock); | ||
| 232 | return 0; | 230 | return 0; |
| 233 | } | 231 | } |
| 234 | 232 | ||
| @@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
| 246 | { | 244 | { |
| 247 | long ret; | 245 | long ret; |
| 248 | 246 | ||
| 249 | spin_unlock_irq(&qdio->req_q_lock); | 247 | ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, |
| 250 | ret = wait_event_interruptible_timeout(qdio->req_q_wq, | 248 | zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); |
| 251 | zfcp_qdio_sbal_check(qdio), 5 * HZ); | ||
| 252 | 249 | ||
| 253 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 250 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
| 254 | return -EIO; | 251 | return -EIO; |
| @@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
| 262 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); | 259 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); |
| 263 | } | 260 | } |
| 264 | 261 | ||
| 265 | spin_lock_irq(&qdio->req_q_lock); | ||
| 266 | return -EIO; | 262 | return -EIO; |
| 267 | } | 263 | } |
| 268 | 264 | ||
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 3f01bbf0609f..890639274bcf 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
| @@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ | |||
| 27 | static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ | 27 | static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ |
| 28 | zfcp_sysfs_##_feat##_##_name##_show, NULL); | 28 | zfcp_sysfs_##_feat##_##_name##_show, NULL); |
| 29 | 29 | ||
| 30 | #define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \ | ||
| 31 | static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ | ||
| 32 | struct device_attribute *at,\ | ||
| 33 | char *buf) \ | ||
| 34 | { \ | ||
| 35 | return sprintf(buf, _format, _value); \ | ||
| 36 | } \ | ||
| 37 | static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ | ||
| 38 | zfcp_sysfs_##_feat##_##_name##_show, NULL); | ||
| 39 | |||
| 30 | #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ | 40 | #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ |
| 31 | static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ | 41 | static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ |
| 32 | struct device_attribute *at,\ | 42 | struct device_attribute *at,\ |
| @@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", | |||
| 75 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", | 85 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", |
| 76 | (zfcp_unit_sdev_status(unit) & | 86 | (zfcp_unit_sdev_status(unit) & |
| 77 | ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); | 87 | ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); |
| 88 | ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0); | ||
| 89 | ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0); | ||
| 78 | 90 | ||
| 79 | static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, | 91 | static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, |
| 80 | struct device_attribute *attr, | 92 | struct device_attribute *attr, |
| @@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = { | |||
| 347 | &dev_attr_unit_in_recovery.attr, | 359 | &dev_attr_unit_in_recovery.attr, |
| 348 | &dev_attr_unit_status.attr, | 360 | &dev_attr_unit_status.attr, |
| 349 | &dev_attr_unit_access_denied.attr, | 361 | &dev_attr_unit_access_denied.attr, |
| 362 | &dev_attr_unit_access_shared.attr, | ||
| 363 | &dev_attr_unit_access_readonly.attr, | ||
| 350 | NULL | 364 | NULL |
| 351 | }; | 365 | }; |
| 352 | static struct attribute_group zfcp_unit_attr_group = { | 366 | static struct attribute_group zfcp_unit_attr_group = { |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 48b2918e0d65..92ff027746f2 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
| @@ -1353,7 +1353,6 @@ config SCSI_LPFC | |||
| 1353 | tristate "Emulex LightPulse Fibre Channel Support" | 1353 | tristate "Emulex LightPulse Fibre Channel Support" |
| 1354 | depends on PCI && SCSI | 1354 | depends on PCI && SCSI |
| 1355 | select SCSI_FC_ATTRS | 1355 | select SCSI_FC_ATTRS |
| 1356 | select GENERIC_CSUM | ||
| 1357 | select CRC_T10DIF | 1356 | select CRC_T10DIF |
| 1358 | help | 1357 | help |
| 1359 | This lpfc driver supports the Emulex LightPulse | 1358 | This lpfc driver supports the Emulex LightPulse |
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index b6d1f92ed33c..c18c68150e9f 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h | |||
| @@ -38,7 +38,7 @@ | |||
| 38 | 38 | ||
| 39 | #define DRV_NAME "fnic" | 39 | #define DRV_NAME "fnic" |
| 40 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" | 40 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" |
| 41 | #define DRV_VERSION "1.5.0.22" | 41 | #define DRV_VERSION "1.5.0.23" |
| 42 | #define PFX DRV_NAME ": " | 42 | #define PFX DRV_NAME ": " |
| 43 | #define DFX DRV_NAME "%d: " | 43 | #define DFX DRV_NAME "%d: " |
| 44 | 44 | ||
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 5f09d1814d26..42e15ee6e1bb 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c | |||
| @@ -642,19 +642,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 642 | INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); | 642 | INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); |
| 643 | INIT_WORK(&fnic->event_work, fnic_handle_event); | 643 | INIT_WORK(&fnic->event_work, fnic_handle_event); |
| 644 | skb_queue_head_init(&fnic->fip_frame_queue); | 644 | skb_queue_head_init(&fnic->fip_frame_queue); |
| 645 | spin_lock_irqsave(&fnic_list_lock, flags); | ||
| 646 | if (!fnic_fip_queue) { | ||
| 647 | fnic_fip_queue = | ||
| 648 | create_singlethread_workqueue("fnic_fip_q"); | ||
| 649 | if (!fnic_fip_queue) { | ||
| 650 | spin_unlock_irqrestore(&fnic_list_lock, flags); | ||
| 651 | printk(KERN_ERR PFX "fnic FIP work queue " | ||
| 652 | "create failed\n"); | ||
| 653 | err = -ENOMEM; | ||
| 654 | goto err_out_free_max_pool; | ||
| 655 | } | ||
| 656 | } | ||
| 657 | spin_unlock_irqrestore(&fnic_list_lock, flags); | ||
| 658 | INIT_LIST_HEAD(&fnic->evlist); | 645 | INIT_LIST_HEAD(&fnic->evlist); |
| 659 | INIT_LIST_HEAD(&fnic->vlans); | 646 | INIT_LIST_HEAD(&fnic->vlans); |
| 660 | } else { | 647 | } else { |
| @@ -960,6 +947,13 @@ static int __init fnic_init_module(void) | |||
| 960 | spin_lock_init(&fnic_list_lock); | 947 | spin_lock_init(&fnic_list_lock); |
| 961 | INIT_LIST_HEAD(&fnic_list); | 948 | INIT_LIST_HEAD(&fnic_list); |
| 962 | 949 | ||
| 950 | fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q"); | ||
| 951 | if (!fnic_fip_queue) { | ||
| 952 | printk(KERN_ERR PFX "fnic FIP work queue create failed\n"); | ||
| 953 | err = -ENOMEM; | ||
| 954 | goto err_create_fip_workq; | ||
| 955 | } | ||
| 956 | |||
| 963 | fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); | 957 | fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); |
| 964 | if (!fnic_fc_transport) { | 958 | if (!fnic_fc_transport) { |
| 965 | printk(KERN_ERR PFX "fc_attach_transport error\n"); | 959 | printk(KERN_ERR PFX "fc_attach_transport error\n"); |
| @@ -978,6 +972,8 @@ static int __init fnic_init_module(void) | |||
| 978 | err_pci_register: | 972 | err_pci_register: |
| 979 | fc_release_transport(fnic_fc_transport); | 973 | fc_release_transport(fnic_fc_transport); |
| 980 | err_fc_transport: | 974 | err_fc_transport: |
| 975 | destroy_workqueue(fnic_fip_queue); | ||
| 976 | err_create_fip_workq: | ||
| 981 | destroy_workqueue(fnic_event_queue); | 977 | destroy_workqueue(fnic_event_queue); |
| 982 | err_create_fnic_workq: | 978 | err_create_fnic_workq: |
| 983 | kmem_cache_destroy(fnic_io_req_cache); | 979 | kmem_cache_destroy(fnic_io_req_cache); |
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 7b082157eb79..99d2930b18c8 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c | |||
| @@ -185,7 +185,7 @@ static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) | |||
| 185 | cmd_iu->_r_c = 0; | 185 | cmd_iu->_r_c = 0; |
| 186 | 186 | ||
| 187 | sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, | 187 | sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, |
| 188 | task->ssp_task.cmd->cmd_len / sizeof(u32)); | 188 | (task->ssp_task.cmd->cmd_len+3) / sizeof(u32)); |
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) | 191 | static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) |
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 9bb020ac089c..0d30ca849e8f 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
| @@ -491,6 +491,7 @@ int isci_task_abort_task(struct sas_task *task) | |||
| 491 | struct isci_tmf tmf; | 491 | struct isci_tmf tmf; |
| 492 | int ret = TMF_RESP_FUNC_FAILED; | 492 | int ret = TMF_RESP_FUNC_FAILED; |
| 493 | unsigned long flags; | 493 | unsigned long flags; |
| 494 | int target_done_already = 0; | ||
| 494 | 495 | ||
| 495 | /* Get the isci_request reference from the task. Note that | 496 | /* Get the isci_request reference from the task. Note that |
| 496 | * this check does not depend on the pending request list | 497 | * this check does not depend on the pending request list |
| @@ -505,9 +506,11 @@ int isci_task_abort_task(struct sas_task *task) | |||
| 505 | /* If task is already done, the request isn't valid */ | 506 | /* If task is already done, the request isn't valid */ |
| 506 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && | 507 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && |
| 507 | (task->task_state_flags & SAS_TASK_AT_INITIATOR) && | 508 | (task->task_state_flags & SAS_TASK_AT_INITIATOR) && |
| 508 | old_request) | 509 | old_request) { |
| 509 | idev = isci_get_device(task->dev->lldd_dev); | 510 | idev = isci_get_device(task->dev->lldd_dev); |
| 510 | 511 | target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET, | |
| 512 | &old_request->flags); | ||
| 513 | } | ||
| 511 | spin_unlock(&task->task_state_lock); | 514 | spin_unlock(&task->task_state_lock); |
| 512 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 515 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
| 513 | 516 | ||
| @@ -561,7 +564,7 @@ int isci_task_abort_task(struct sas_task *task) | |||
| 561 | 564 | ||
| 562 | if (task->task_proto == SAS_PROTOCOL_SMP || | 565 | if (task->task_proto == SAS_PROTOCOL_SMP || |
| 563 | sas_protocol_ata(task->task_proto) || | 566 | sas_protocol_ata(task->task_proto) || |
| 564 | test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) || | 567 | target_done_already || |
| 565 | test_bit(IDEV_GONE, &idev->flags)) { | 568 | test_bit(IDEV_GONE, &idev->flags)) { |
| 566 | 569 | ||
| 567 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 570 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 0177295599e0..1f0ca68409d4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
| @@ -3547,11 +3547,21 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
| 3547 | break; | 3547 | break; |
| 3548 | } | 3548 | } |
| 3549 | 3549 | ||
| 3550 | /* | 3550 | if (megasas_transition_to_ready(instance, 0)) { |
| 3551 | * We expect the FW state to be READY | 3551 | atomic_set(&instance->fw_reset_no_pci_access, 1); |
| 3552 | */ | 3552 | instance->instancet->adp_reset |
| 3553 | if (megasas_transition_to_ready(instance, 0)) | 3553 | (instance, instance->reg_set); |
| 3554 | goto fail_ready_state; | 3554 | atomic_set(&instance->fw_reset_no_pci_access, 0); |
| 3555 | dev_info(&instance->pdev->dev, | ||
| 3556 | "megasas: FW restarted successfully from %s!\n", | ||
| 3557 | __func__); | ||
| 3558 | |||
| 3559 | /*waitting for about 30 second before retry*/ | ||
| 3560 | ssleep(30); | ||
| 3561 | |||
| 3562 | if (megasas_transition_to_ready(instance, 0)) | ||
| 3563 | goto fail_ready_state; | ||
| 3564 | } | ||
| 3555 | 3565 | ||
| 3556 | /* | 3566 | /* |
| 3557 | * MSI-X host index 0 is common for all adapter. | 3567 | * MSI-X host index 0 is common for all adapter. |
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index f14665a6293d..6b1b4e91e53f 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c | |||
| @@ -1857,11 +1857,16 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) | |||
| 1857 | goto out; | 1857 | goto out; |
| 1858 | } | 1858 | } |
| 1859 | 1859 | ||
| 1860 | /* error info record present */ | 1860 | /* |
| 1861 | if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { | 1861 | * error info record present; slot->response is 32 bit aligned but may |
| 1862 | * not be 64 bit aligned, so check for zero in two 32 bit reads | ||
| 1863 | */ | ||
| 1864 | if (unlikely((rx_desc & RXQ_ERR) | ||
| 1865 | && (*((u32 *)slot->response) | ||
| 1866 | || *(((u32 *)slot->response) + 1)))) { | ||
| 1862 | mv_dprintk("port %d slot %d rx_desc %X has error info" | 1867 | mv_dprintk("port %d slot %d rx_desc %X has error info" |
| 1863 | "%016llX.\n", slot->port->sas_port.id, slot_idx, | 1868 | "%016llX.\n", slot->port->sas_port.id, slot_idx, |
| 1864 | rx_desc, (u64)(*(u64 *)slot->response)); | 1869 | rx_desc, get_unaligned_le64(slot->response)); |
| 1865 | tstat->stat = mvs_slot_err(mvi, task, slot_idx); | 1870 | tstat->stat = mvs_slot_err(mvi, task, slot_idx); |
| 1866 | tstat->resp = SAS_TASK_COMPLETE; | 1871 | tstat->resp = SAS_TASK_COMPLETE; |
| 1867 | goto out; | 1872 | goto out; |
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 60e2fb7f2dca..d6b19dc80bee 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <linux/irq.h> | 39 | #include <linux/irq.h> |
| 40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
| 41 | #include <linux/vmalloc.h> | 41 | #include <linux/vmalloc.h> |
| 42 | #include <asm/unaligned.h> | ||
| 42 | #include <scsi/libsas.h> | 43 | #include <scsi/libsas.h> |
| 43 | #include <scsi/scsi.h> | 44 | #include <scsi/scsi.h> |
| 44 | #include <scsi/scsi_tcq.h> | 45 | #include <scsi/scsi_tcq.h> |
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 5456f5c73593..4a2195752198 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c | |||
| @@ -221,7 +221,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) | |||
| 221 | pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; | 221 | pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; |
| 222 | for (i = 0; i < PM8001_MAX_INB_NUM; i++) { | 222 | for (i = 0; i < PM8001_MAX_INB_NUM; i++) { |
| 223 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = | 223 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = |
| 224 | PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); | 224 | PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); |
| 225 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = | 225 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = |
| 226 | pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; | 226 | pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; |
| 227 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = | 227 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = |
| @@ -247,7 +247,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) | |||
| 247 | } | 247 | } |
| 248 | for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { | 248 | for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { |
| 249 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = | 249 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = |
| 250 | PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); | 250 | PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); |
| 251 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = | 251 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = |
| 252 | pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; | 252 | pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; |
| 253 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = | 253 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = |
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 7f77210f5cf3..9f91030211e8 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c | |||
| @@ -275,7 +275,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) | |||
| 275 | 275 | ||
| 276 | for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { | 276 | for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { |
| 277 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = | 277 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = |
| 278 | PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); | 278 | PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); |
| 279 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = | 279 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = |
| 280 | pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; | 280 | pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; |
| 281 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = | 281 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = |
| @@ -301,7 +301,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) | |||
| 301 | } | 301 | } |
| 302 | for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { | 302 | for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { |
| 303 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = | 303 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = |
| 304 | PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); | 304 | PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); |
| 305 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = | 305 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = |
| 306 | pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; | 306 | pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; |
| 307 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = | 307 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 42ef481db942..ef0a5481b9dd 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
| @@ -419,6 +419,8 @@ qla2x00_start_scsi(srb_t *sp) | |||
| 419 | __constant_cpu_to_le16(CF_SIMPLE_TAG); | 419 | __constant_cpu_to_le16(CF_SIMPLE_TAG); |
| 420 | break; | 420 | break; |
| 421 | } | 421 | } |
| 422 | } else { | ||
| 423 | cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); | ||
| 422 | } | 424 | } |
| 423 | 425 | ||
| 424 | /* Load SCSI command packet. */ | 426 | /* Load SCSI command packet. */ |
| @@ -1307,11 +1309,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
| 1307 | fcp_cmnd->task_attribute = TSK_ORDERED; | 1309 | fcp_cmnd->task_attribute = TSK_ORDERED; |
| 1308 | break; | 1310 | break; |
| 1309 | default: | 1311 | default: |
| 1310 | fcp_cmnd->task_attribute = 0; | 1312 | fcp_cmnd->task_attribute = TSK_SIMPLE; |
| 1311 | break; | 1313 | break; |
| 1312 | } | 1314 | } |
| 1313 | } else { | 1315 | } else { |
| 1314 | fcp_cmnd->task_attribute = 0; | 1316 | fcp_cmnd->task_attribute = TSK_SIMPLE; |
| 1315 | } | 1317 | } |
| 1316 | 1318 | ||
| 1317 | cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ | 1319 | cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ |
| @@ -1525,7 +1527,12 @@ qla24xx_start_scsi(srb_t *sp) | |||
| 1525 | case ORDERED_QUEUE_TAG: | 1527 | case ORDERED_QUEUE_TAG: |
| 1526 | cmd_pkt->task = TSK_ORDERED; | 1528 | cmd_pkt->task = TSK_ORDERED; |
| 1527 | break; | 1529 | break; |
| 1530 | default: | ||
| 1531 | cmd_pkt->task = TSK_SIMPLE; | ||
| 1532 | break; | ||
| 1528 | } | 1533 | } |
| 1534 | } else { | ||
| 1535 | cmd_pkt->task = TSK_SIMPLE; | ||
| 1529 | } | 1536 | } |
| 1530 | 1537 | ||
| 1531 | /* Load SCSI command packet. */ | 1538 | /* Load SCSI command packet. */ |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3b1ea34e1f5a..eaa808e6ba91 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
| @@ -1031,6 +1031,9 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, | |||
| 1031 | { | 1031 | { |
| 1032 | int i, result; | 1032 | int i, result; |
| 1033 | 1033 | ||
| 1034 | if (sdev->skip_vpd_pages) | ||
| 1035 | goto fail; | ||
| 1036 | |||
| 1034 | /* Ask for all the pages supported by this device */ | 1037 | /* Ask for all the pages supported by this device */ |
| 1035 | result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); | 1038 | result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); |
| 1036 | if (result) | 1039 | if (result) |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 80f39b8b0223..86fcf2c313ad 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -838,10 +838,17 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) | |||
| 838 | 838 | ||
| 839 | static void sd_unprep_fn(struct request_queue *q, struct request *rq) | 839 | static void sd_unprep_fn(struct request_queue *q, struct request *rq) |
| 840 | { | 840 | { |
| 841 | struct scsi_cmnd *SCpnt = rq->special; | ||
| 842 | |||
| 841 | if (rq->cmd_flags & REQ_DISCARD) { | 843 | if (rq->cmd_flags & REQ_DISCARD) { |
| 842 | free_page((unsigned long)rq->buffer); | 844 | free_page((unsigned long)rq->buffer); |
| 843 | rq->buffer = NULL; | 845 | rq->buffer = NULL; |
| 844 | } | 846 | } |
| 847 | if (SCpnt->cmnd != rq->cmd) { | ||
| 848 | mempool_free(SCpnt->cmnd, sd_cdb_pool); | ||
| 849 | SCpnt->cmnd = NULL; | ||
| 850 | SCpnt->cmd_len = 0; | ||
| 851 | } | ||
| 845 | } | 852 | } |
| 846 | 853 | ||
| 847 | /** | 854 | /** |
| @@ -1720,21 +1727,6 @@ static int sd_done(struct scsi_cmnd *SCpnt) | |||
| 1720 | if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt)) | 1727 | if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt)) |
| 1721 | sd_dif_complete(SCpnt, good_bytes); | 1728 | sd_dif_complete(SCpnt, good_bytes); |
| 1722 | 1729 | ||
| 1723 | if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type) | ||
| 1724 | == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) { | ||
| 1725 | |||
| 1726 | /* We have to print a failed command here as the | ||
| 1727 | * extended CDB gets freed before scsi_io_completion() | ||
| 1728 | * is called. | ||
| 1729 | */ | ||
| 1730 | if (result) | ||
| 1731 | scsi_print_command(SCpnt); | ||
| 1732 | |||
| 1733 | mempool_free(SCpnt->cmnd, sd_cdb_pool); | ||
| 1734 | SCpnt->cmnd = NULL; | ||
| 1735 | SCpnt->cmd_len = 0; | ||
| 1736 | } | ||
| 1737 | |||
| 1738 | return good_bytes; | 1730 | return good_bytes; |
| 1739 | } | 1731 | } |
| 1740 | 1732 | ||
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 2168258fb2c3..74b88efde6ad 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
| @@ -751,7 +751,7 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) | |||
| 751 | 751 | ||
| 752 | vscsi->affinity_hint_set = true; | 752 | vscsi->affinity_hint_set = true; |
| 753 | } else { | 753 | } else { |
| 754 | for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++) | 754 | for (i = 0; i < vscsi->num_queues; i++) |
| 755 | virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); | 755 | virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); |
| 756 | 756 | ||
| 757 | vscsi->affinity_hint_set = false; | 757 | vscsi->affinity_hint_set = false; |
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 222d3e37fc28..707966bd5610 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c | |||
| @@ -609,7 +609,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
| 609 | else | 609 | else |
| 610 | buf = (void *)t->tx_buf; | 610 | buf = (void *)t->tx_buf; |
| 611 | t->tx_dma = dma_map_single(&spi->dev, buf, | 611 | t->tx_dma = dma_map_single(&spi->dev, buf, |
| 612 | t->len, DMA_FROM_DEVICE); | 612 | t->len, DMA_TO_DEVICE); |
| 613 | if (!t->tx_dma) { | 613 | if (!t->tx_dma) { |
| 614 | ret = -EFAULT; | 614 | ret = -EFAULT; |
| 615 | goto err_tx_map; | 615 | goto err_tx_map; |
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c index 080abf2faf97..a8c344422a77 100644 --- a/drivers/staging/android/logger.c +++ b/drivers/staging/android/logger.c | |||
| @@ -469,7 +469,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 469 | unsigned long nr_segs, loff_t ppos) | 469 | unsigned long nr_segs, loff_t ppos) |
| 470 | { | 470 | { |
| 471 | struct logger_log *log = file_get_log(iocb->ki_filp); | 471 | struct logger_log *log = file_get_log(iocb->ki_filp); |
| 472 | size_t orig = log->w_off; | 472 | size_t orig; |
| 473 | struct logger_entry header; | 473 | struct logger_entry header; |
| 474 | struct timespec now; | 474 | struct timespec now; |
| 475 | ssize_t ret = 0; | 475 | ssize_t ret = 0; |
| @@ -490,6 +490,8 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 490 | 490 | ||
| 491 | mutex_lock(&log->mutex); | 491 | mutex_lock(&log->mutex); |
| 492 | 492 | ||
| 493 | orig = log->w_off; | ||
| 494 | |||
| 493 | /* | 495 | /* |
| 494 | * Fix up any readers, pulling them forward to the first readable | 496 | * Fix up any readers, pulling them forward to the first readable |
| 495 | * entry after (what will be) the new write offset. We do this now | 497 | * entry after (what will be) the new write offset. We do this now |
diff --git a/drivers/staging/comedi/TODO b/drivers/staging/comedi/TODO index b10f739b7e3e..fa8da9aada30 100644 --- a/drivers/staging/comedi/TODO +++ b/drivers/staging/comedi/TODO | |||
| @@ -9,4 +9,4 @@ TODO: | |||
| 9 | Please send patches to Greg Kroah-Hartman <greg@kroah.com> and | 9 | Please send patches to Greg Kroah-Hartman <greg@kroah.com> and |
| 10 | copy: | 10 | copy: |
| 11 | Ian Abbott <abbotti@mev.co.uk> | 11 | Ian Abbott <abbotti@mev.co.uk> |
| 12 | Frank Mori Hess <fmhess@users.sourceforge.net> | 12 | H Hartley Sweeten <hsweeten@visionengravers.com> |
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 8647518259f6..f4a197b2d1fd 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c | |||
| @@ -1413,22 +1413,19 @@ static int do_cmd_ioctl(struct comedi_device *dev, | |||
| 1413 | DPRINTK("subdevice busy\n"); | 1413 | DPRINTK("subdevice busy\n"); |
| 1414 | return -EBUSY; | 1414 | return -EBUSY; |
| 1415 | } | 1415 | } |
| 1416 | s->busy = file; | ||
| 1417 | 1416 | ||
| 1418 | /* make sure channel/gain list isn't too long */ | 1417 | /* make sure channel/gain list isn't too long */ |
| 1419 | if (cmd.chanlist_len > s->len_chanlist) { | 1418 | if (cmd.chanlist_len > s->len_chanlist) { |
| 1420 | DPRINTK("channel/gain list too long %u > %d\n", | 1419 | DPRINTK("channel/gain list too long %u > %d\n", |
| 1421 | cmd.chanlist_len, s->len_chanlist); | 1420 | cmd.chanlist_len, s->len_chanlist); |
| 1422 | ret = -EINVAL; | 1421 | return -EINVAL; |
| 1423 | goto cleanup; | ||
| 1424 | } | 1422 | } |
| 1425 | 1423 | ||
| 1426 | /* make sure channel/gain list isn't too short */ | 1424 | /* make sure channel/gain list isn't too short */ |
| 1427 | if (cmd.chanlist_len < 1) { | 1425 | if (cmd.chanlist_len < 1) { |
| 1428 | DPRINTK("channel/gain list too short %u < 1\n", | 1426 | DPRINTK("channel/gain list too short %u < 1\n", |
| 1429 | cmd.chanlist_len); | 1427 | cmd.chanlist_len); |
| 1430 | ret = -EINVAL; | 1428 | return -EINVAL; |
| 1431 | goto cleanup; | ||
| 1432 | } | 1429 | } |
| 1433 | 1430 | ||
| 1434 | async->cmd = cmd; | 1431 | async->cmd = cmd; |
| @@ -1438,8 +1435,7 @@ static int do_cmd_ioctl(struct comedi_device *dev, | |||
| 1438 | kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL); | 1435 | kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL); |
| 1439 | if (!async->cmd.chanlist) { | 1436 | if (!async->cmd.chanlist) { |
| 1440 | DPRINTK("allocation failed\n"); | 1437 | DPRINTK("allocation failed\n"); |
| 1441 | ret = -ENOMEM; | 1438 | return -ENOMEM; |
| 1442 | goto cleanup; | ||
| 1443 | } | 1439 | } |
| 1444 | 1440 | ||
| 1445 | if (copy_from_user(async->cmd.chanlist, user_chanlist, | 1441 | if (copy_from_user(async->cmd.chanlist, user_chanlist, |
| @@ -1491,6 +1487,9 @@ static int do_cmd_ioctl(struct comedi_device *dev, | |||
| 1491 | 1487 | ||
| 1492 | comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING); | 1488 | comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING); |
| 1493 | 1489 | ||
| 1490 | /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with | ||
| 1491 | * comedi_read() or comedi_write() */ | ||
| 1492 | s->busy = file; | ||
| 1494 | ret = s->do_cmd(dev, s); | 1493 | ret = s->do_cmd(dev, s); |
| 1495 | if (ret == 0) | 1494 | if (ret == 0) |
| 1496 | return 0; | 1495 | return 0; |
| @@ -1705,6 +1704,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg, | |||
| 1705 | void *file) | 1704 | void *file) |
| 1706 | { | 1705 | { |
| 1707 | struct comedi_subdevice *s; | 1706 | struct comedi_subdevice *s; |
| 1707 | int ret; | ||
| 1708 | 1708 | ||
| 1709 | if (arg >= dev->n_subdevices) | 1709 | if (arg >= dev->n_subdevices) |
| 1710 | return -EINVAL; | 1710 | return -EINVAL; |
| @@ -1721,7 +1721,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg, | |||
| 1721 | if (s->busy != file) | 1721 | if (s->busy != file) |
| 1722 | return -EBUSY; | 1722 | return -EBUSY; |
| 1723 | 1723 | ||
| 1724 | return do_cancel(dev, s); | 1724 | ret = do_cancel(dev, s); |
| 1725 | if (comedi_get_subdevice_runflags(s) & SRF_USER) | ||
| 1726 | wake_up_interruptible(&s->async->wait_head); | ||
| 1727 | |||
| 1728 | return ret; | ||
| 1725 | } | 1729 | } |
| 1726 | 1730 | ||
| 1727 | /* | 1731 | /* |
| @@ -2053,11 +2057,13 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, | |||
| 2053 | 2057 | ||
| 2054 | if (!comedi_is_subdevice_running(s)) { | 2058 | if (!comedi_is_subdevice_running(s)) { |
| 2055 | if (count == 0) { | 2059 | if (count == 0) { |
| 2060 | mutex_lock(&dev->mutex); | ||
| 2056 | if (comedi_is_subdevice_in_error(s)) | 2061 | if (comedi_is_subdevice_in_error(s)) |
| 2057 | retval = -EPIPE; | 2062 | retval = -EPIPE; |
| 2058 | else | 2063 | else |
| 2059 | retval = 0; | 2064 | retval = 0; |
| 2060 | do_become_nonbusy(dev, s); | 2065 | do_become_nonbusy(dev, s); |
| 2066 | mutex_unlock(&dev->mutex); | ||
| 2061 | } | 2067 | } |
| 2062 | break; | 2068 | break; |
| 2063 | } | 2069 | } |
| @@ -2156,11 +2162,13 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, | |||
| 2156 | 2162 | ||
| 2157 | if (n == 0) { | 2163 | if (n == 0) { |
| 2158 | if (!comedi_is_subdevice_running(s)) { | 2164 | if (!comedi_is_subdevice_running(s)) { |
| 2165 | mutex_lock(&dev->mutex); | ||
| 2159 | do_become_nonbusy(dev, s); | 2166 | do_become_nonbusy(dev, s); |
| 2160 | if (comedi_is_subdevice_in_error(s)) | 2167 | if (comedi_is_subdevice_in_error(s)) |
| 2161 | retval = -EPIPE; | 2168 | retval = -EPIPE; |
| 2162 | else | 2169 | else |
| 2163 | retval = 0; | 2170 | retval = 0; |
| 2171 | mutex_unlock(&dev->mutex); | ||
| 2164 | break; | 2172 | break; |
| 2165 | } | 2173 | } |
| 2166 | if (file->f_flags & O_NONBLOCK) { | 2174 | if (file->f_flags & O_NONBLOCK) { |
| @@ -2198,9 +2206,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, | |||
| 2198 | buf += n; | 2206 | buf += n; |
| 2199 | break; /* makes device work like a pipe */ | 2207 | break; /* makes device work like a pipe */ |
| 2200 | } | 2208 | } |
| 2201 | if (comedi_is_subdevice_idle(s) && | 2209 | if (comedi_is_subdevice_idle(s)) { |
| 2202 | async->buf_read_count - async->buf_write_count == 0) { | 2210 | mutex_lock(&dev->mutex); |
| 2203 | do_become_nonbusy(dev, s); | 2211 | if (async->buf_read_count - async->buf_write_count == 0) |
| 2212 | do_become_nonbusy(dev, s); | ||
| 2213 | mutex_unlock(&dev->mutex); | ||
| 2204 | } | 2214 | } |
| 2205 | set_current_state(TASK_RUNNING); | 2215 | set_current_state(TASK_RUNNING); |
| 2206 | remove_wait_queue(&async->wait_head, &wait); | 2216 | remove_wait_queue(&async->wait_head, &wait); |
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index e25eba5713c1..b3b5125faa72 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c | |||
| @@ -482,7 +482,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it) | |||
| 482 | ret = comedi_device_postconfig(dev); | 482 | ret = comedi_device_postconfig(dev); |
| 483 | if (ret < 0) { | 483 | if (ret < 0) { |
| 484 | comedi_device_detach(dev); | 484 | comedi_device_detach(dev); |
| 485 | module_put(dev->driver->module); | 485 | module_put(driv->module); |
| 486 | } | 486 | } |
| 487 | /* On success, the driver module count has been incremented. */ | 487 | /* On success, the driver module count has been incremented. */ |
| 488 | return ret; | 488 | return ret; |
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c index 5590ebf1da15..817f837b240d 100644 --- a/drivers/staging/frontier/alphatrack.c +++ b/drivers/staging/frontier/alphatrack.c | |||
| @@ -827,11 +827,11 @@ static void usb_alphatrack_disconnect(struct usb_interface *intf) | |||
| 827 | mutex_unlock(&dev->mtx); | 827 | mutex_unlock(&dev->mtx); |
| 828 | usb_alphatrack_delete(dev); | 828 | usb_alphatrack_delete(dev); |
| 829 | } else { | 829 | } else { |
| 830 | atomic_set(&dev->writes_pending, 0); | ||
| 830 | dev->intf = NULL; | 831 | dev->intf = NULL; |
| 831 | mutex_unlock(&dev->mtx); | 832 | mutex_unlock(&dev->mtx); |
| 832 | } | 833 | } |
| 833 | 834 | ||
| 834 | atomic_set(&dev->writes_pending, 0); | ||
| 835 | mutex_unlock(&disconnect_mutex); | 835 | mutex_unlock(&disconnect_mutex); |
| 836 | 836 | ||
| 837 | dev_info(&intf->dev, "Alphatrack Surface #%d now disconnected\n", | 837 | dev_info(&intf->dev, "Alphatrack Surface #%d now disconnected\n", |
diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c index b795353e8348..cc3692439a5c 100644 --- a/drivers/staging/gdm72xx/gdm_qos.c +++ b/drivers/staging/gdm72xx/gdm_qos.c | |||
| @@ -250,8 +250,8 @@ static void send_qos_list(struct nic *nic, struct list_head *head) | |||
| 250 | 250 | ||
| 251 | list_for_each_entry_safe(entry, n, head, list) { | 251 | list_for_each_entry_safe(entry, n, head, list) { |
| 252 | list_del(&entry->list); | 252 | list_del(&entry->list); |
| 253 | free_qos_entry(entry); | ||
| 254 | gdm_wimax_send_tx(entry->skb, entry->dev); | 253 | gdm_wimax_send_tx(entry->skb, entry->dev); |
| 254 | free_qos_entry(entry); | ||
| 255 | } | 255 | } |
| 256 | } | 256 | } |
| 257 | 257 | ||
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig index 22339059837f..bd0f2fd01db4 100644 --- a/drivers/staging/imx-drm/Kconfig +++ b/drivers/staging/imx-drm/Kconfig | |||
| @@ -33,7 +33,6 @@ config DRM_IMX_TVE | |||
| 33 | config DRM_IMX_LDB | 33 | config DRM_IMX_LDB |
| 34 | tristate "Support for LVDS displays" | 34 | tristate "Support for LVDS displays" |
| 35 | depends on DRM_IMX | 35 | depends on DRM_IMX |
| 36 | select OF_VIDEOMODE | ||
| 37 | help | 36 | help |
| 38 | Choose this to enable the internal LVDS Display Bridge (LDB) | 37 | Choose this to enable the internal LVDS Display Bridge (LDB) |
| 39 | found on i.MX53 and i.MX6 processors. | 38 | found on i.MX53 and i.MX6 processors. |
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c index c191ae203565..41e88abe47af 100644 --- a/drivers/staging/tidspbridge/pmgr/dbll.c +++ b/drivers/staging/tidspbridge/pmgr/dbll.c | |||
| @@ -1120,8 +1120,11 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this, | |||
| 1120 | or DYN_EXTERNAL, then mem granularity information is present | 1120 | or DYN_EXTERNAL, then mem granularity information is present |
| 1121 | within the section name - only process if there are at least three | 1121 | within the section name - only process if there are at least three |
| 1122 | tokens within the section name (just a minor optimization) */ | 1122 | tokens within the section name (just a minor optimization) */ |
| 1123 | if (count >= 3) | 1123 | if (count >= 3) { |
| 1124 | strict_strtol(sz_last_token, 10, (long *)&req); | 1124 | status = kstrtos32(sz_last_token, 10, &req); |
| 1125 | if (status) | ||
| 1126 | goto func_cont; | ||
| 1127 | } | ||
| 1125 | 1128 | ||
| 1126 | if ((req == 0) || (req == 1)) { | 1129 | if ((req == 0) || (req == 1)) { |
| 1127 | if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) { | 1130 | if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) { |
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index dcceed29d31a..81972fa47beb 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c | |||
| @@ -1811,10 +1811,12 @@ static int zcache_comp_init(void) | |||
| 1811 | #else | 1811 | #else |
| 1812 | if (*zcache_comp_name != '\0') { | 1812 | if (*zcache_comp_name != '\0') { |
| 1813 | ret = crypto_has_comp(zcache_comp_name, 0, 0); | 1813 | ret = crypto_has_comp(zcache_comp_name, 0, 0); |
| 1814 | if (!ret) | 1814 | if (!ret) { |
| 1815 | pr_info("zcache: %s not supported\n", | 1815 | pr_info("zcache: %s not supported\n", |
| 1816 | zcache_comp_name); | 1816 | zcache_comp_name); |
| 1817 | goto out; | 1817 | ret = 1; |
| 1818 | goto out; | ||
| 1819 | } | ||
| 1818 | } | 1820 | } |
| 1819 | if (!ret) | 1821 | if (!ret) |
| 1820 | strcpy(zcache_comp_name, "lzo"); | 1822 | strcpy(zcache_comp_name, "lzo"); |
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 82c7202fd5cc..e77fb6ea40c9 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c | |||
| @@ -527,8 +527,11 @@ static void zram_reset_device(struct zram *zram) | |||
| 527 | size_t index; | 527 | size_t index; |
| 528 | struct zram_meta *meta; | 528 | struct zram_meta *meta; |
| 529 | 529 | ||
| 530 | if (!zram->init_done) | 530 | down_write(&zram->init_lock); |
| 531 | if (!zram->init_done) { | ||
| 532 | up_write(&zram->init_lock); | ||
| 531 | return; | 533 | return; |
| 534 | } | ||
| 532 | 535 | ||
| 533 | meta = zram->meta; | 536 | meta = zram->meta; |
| 534 | zram->init_done = 0; | 537 | zram->init_done = 0; |
| @@ -549,6 +552,7 @@ static void zram_reset_device(struct zram *zram) | |||
| 549 | 552 | ||
| 550 | zram->disksize = 0; | 553 | zram->disksize = 0; |
| 551 | set_capacity(zram->disk, 0); | 554 | set_capacity(zram->disk, 0); |
| 555 | up_write(&zram->init_lock); | ||
| 552 | } | 556 | } |
| 553 | 557 | ||
| 554 | static void zram_init_device(struct zram *zram, struct zram_meta *meta) | 558 | static void zram_init_device(struct zram *zram, struct zram_meta *meta) |
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c index 5de56f671a9d..f36950e4134f 100644 --- a/drivers/thermal/x86_pkg_temp_thermal.c +++ b/drivers/thermal/x86_pkg_temp_thermal.c | |||
| @@ -54,6 +54,8 @@ MODULE_PARM_DESC(notify_delay_ms, | |||
| 54 | * is some wrong values returned by cpuid for number of thresholds. | 54 | * is some wrong values returned by cpuid for number of thresholds. |
| 55 | */ | 55 | */ |
| 56 | #define MAX_NUMBER_OF_TRIPS 2 | 56 | #define MAX_NUMBER_OF_TRIPS 2 |
| 57 | /* Limit number of package temp zones */ | ||
| 58 | #define MAX_PKG_TEMP_ZONE_IDS 256 | ||
| 57 | 59 | ||
| 58 | struct phy_dev_entry { | 60 | struct phy_dev_entry { |
| 59 | struct list_head list; | 61 | struct list_head list; |
| @@ -394,12 +396,16 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) | |||
| 394 | char buffer[30]; | 396 | char buffer[30]; |
| 395 | int thres_count; | 397 | int thres_count; |
| 396 | u32 eax, ebx, ecx, edx; | 398 | u32 eax, ebx, ecx, edx; |
| 399 | u8 *temp; | ||
| 397 | 400 | ||
| 398 | cpuid(6, &eax, &ebx, &ecx, &edx); | 401 | cpuid(6, &eax, &ebx, &ecx, &edx); |
| 399 | thres_count = ebx & 0x07; | 402 | thres_count = ebx & 0x07; |
| 400 | if (!thres_count) | 403 | if (!thres_count) |
| 401 | return -ENODEV; | 404 | return -ENODEV; |
| 402 | 405 | ||
| 406 | if (topology_physical_package_id(cpu) > MAX_PKG_TEMP_ZONE_IDS) | ||
| 407 | return -ENODEV; | ||
| 408 | |||
| 403 | thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS); | 409 | thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS); |
| 404 | 410 | ||
| 405 | err = get_tj_max(cpu, &tj_max); | 411 | err = get_tj_max(cpu, &tj_max); |
| @@ -417,13 +423,14 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) | |||
| 417 | spin_lock(&pkg_work_lock); | 423 | spin_lock(&pkg_work_lock); |
| 418 | if (topology_physical_package_id(cpu) > max_phy_id) | 424 | if (topology_physical_package_id(cpu) > max_phy_id) |
| 419 | max_phy_id = topology_physical_package_id(cpu); | 425 | max_phy_id = topology_physical_package_id(cpu); |
| 420 | pkg_work_scheduled = krealloc(pkg_work_scheduled, | 426 | temp = krealloc(pkg_work_scheduled, |
| 421 | (max_phy_id+1) * sizeof(u8), GFP_ATOMIC); | 427 | (max_phy_id+1) * sizeof(u8), GFP_ATOMIC); |
| 422 | if (!pkg_work_scheduled) { | 428 | if (!temp) { |
| 423 | spin_unlock(&pkg_work_lock); | 429 | spin_unlock(&pkg_work_lock); |
| 424 | err = -ENOMEM; | 430 | err = -ENOMEM; |
| 425 | goto err_ret_free; | 431 | goto err_ret_free; |
| 426 | } | 432 | } |
| 433 | pkg_work_scheduled = temp; | ||
| 427 | pkg_work_scheduled[topology_physical_package_id(cpu)] = 0; | 434 | pkg_work_scheduled[topology_physical_package_id(cpu)] = 0; |
| 428 | spin_unlock(&pkg_work_lock); | 435 | spin_unlock(&pkg_work_lock); |
| 429 | 436 | ||
| @@ -511,7 +518,7 @@ static int get_core_online(unsigned int cpu) | |||
| 511 | 518 | ||
| 512 | /* Check if there is already an instance for this package */ | 519 | /* Check if there is already an instance for this package */ |
| 513 | if (!phdev) { | 520 | if (!phdev) { |
| 514 | if (!cpu_has(c, X86_FEATURE_DTHERM) && | 521 | if (!cpu_has(c, X86_FEATURE_DTHERM) || |
| 515 | !cpu_has(c, X86_FEATURE_PTS)) | 522 | !cpu_has(c, X86_FEATURE_PTS)) |
| 516 | return -ENODEV; | 523 | return -ENODEV; |
| 517 | if (pkg_temp_thermal_device_add(cpu)) | 524 | if (pkg_temp_thermal_device_add(cpu)) |
| @@ -562,7 +569,7 @@ static struct notifier_block pkg_temp_thermal_notifier __refdata = { | |||
| 562 | }; | 569 | }; |
| 563 | 570 | ||
| 564 | static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = { | 571 | static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = { |
| 565 | { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM }, | 572 | { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_PTS }, |
| 566 | {} | 573 | {} |
| 567 | }; | 574 | }; |
| 568 | MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids); | 575 | MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids); |
| @@ -592,7 +599,6 @@ static int __init pkg_temp_thermal_init(void) | |||
| 592 | return 0; | 599 | return 0; |
| 593 | 600 | ||
| 594 | err_ret: | 601 | err_ret: |
| 595 | get_online_cpus(); | ||
| 596 | for_each_online_cpu(i) | 602 | for_each_online_cpu(i) |
| 597 | put_core_offline(i); | 603 | put_core_offline(i); |
| 598 | put_online_cpus(); | 604 | put_online_cpus(); |
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c index 3396eb9d57a3..ac2767100df5 100644 --- a/drivers/tty/hvc/hvsi_lib.c +++ b/drivers/tty/hvc/hvsi_lib.c | |||
| @@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv) | |||
| 341 | 341 | ||
| 342 | pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno); | 342 | pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno); |
| 343 | 343 | ||
| 344 | /* Try for up to 200s */ | 344 | /* Try for up to 400ms */ |
| 345 | for (timeout = 0; timeout < 20; timeout++) { | 345 | for (timeout = 0; timeout < 40; timeout++) { |
| 346 | if (pv->established) | 346 | if (pv->established) |
| 347 | goto established; | 347 | goto established; |
| 348 | if (!hvsi_get_packet(pv)) | 348 | if (!hvsi_get_packet(pv)) |
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c index 721904f8efa9..946ddd2b3a54 100644 --- a/drivers/tty/serial/8250/8250_early.c +++ b/drivers/tty/serial/8250/8250_early.c | |||
| @@ -193,7 +193,8 @@ static int __init parse_options(struct early_serial8250_device *device, | |||
| 193 | if (options) { | 193 | if (options) { |
| 194 | options++; | 194 | options++; |
| 195 | device->baud = simple_strtoul(options, NULL, 0); | 195 | device->baud = simple_strtoul(options, NULL, 0); |
| 196 | length = min(strcspn(options, " "), sizeof(device->options)); | 196 | length = min(strcspn(options, " ") + 1, |
| 197 | sizeof(device->options)); | ||
| 197 | strlcpy(device->options, options, length); | 198 | strlcpy(device->options, options, length); |
| 198 | } else { | 199 | } else { |
| 199 | device->baud = probe_baud(port); | 200 | device->baud = probe_baud(port); |
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index bb91b4713ebd..2e3ea1a70d7b 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c | |||
| @@ -31,9 +31,8 @@ static int __init serial_init_chip(struct parisc_device *dev) | |||
| 31 | int err; | 31 | int err; |
| 32 | 32 | ||
| 33 | #ifdef CONFIG_64BIT | 33 | #ifdef CONFIG_64BIT |
| 34 | extern int iosapic_serial_irq(int cellnum); | ||
| 35 | if (!dev->irq && (dev->id.sversion == 0xad)) | 34 | if (!dev->irq && (dev->id.sversion == 0xad)) |
| 36 | dev->irq = iosapic_serial_irq(dev->mod_index-1); | 35 | dev->irq = iosapic_serial_irq(dev); |
| 37 | #endif | 36 | #endif |
| 38 | 37 | ||
| 39 | if (!dev->irq) { | 38 | if (!dev->irq) { |
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 5e3d68917ffe..1456673bcca0 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig | |||
| @@ -277,7 +277,7 @@ config SERIAL_TEGRA | |||
| 277 | select SERIAL_CORE | 277 | select SERIAL_CORE |
| 278 | help | 278 | help |
| 279 | Support for the on-chip UARTs on the NVIDIA Tegra series SOCs | 279 | Support for the on-chip UARTs on the NVIDIA Tegra series SOCs |
| 280 | providing /dev/ttyHS0, 1, 2, 3 and 4 (note, some machines may not | 280 | providing /dev/ttyTHS0, 1, 2, 3 and 4 (note, some machines may not |
| 281 | provide all of these ports, depending on how the serial port | 281 | provide all of these ports, depending on how the serial port |
| 282 | are enabled). This driver uses the APB DMA to achieve higher baudrate | 282 | are enabled). This driver uses the APB DMA to achieve higher baudrate |
| 283 | and better performance. | 283 | and better performance. |
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c index cbf1d155b7b2..22f280aa4f2c 100644 --- a/drivers/tty/serial/arc_uart.c +++ b/drivers/tty/serial/arc_uart.c | |||
| @@ -773,6 +773,6 @@ module_init(arc_serial_init); | |||
| 773 | module_exit(arc_serial_exit); | 773 | module_exit(arc_serial_exit); |
| 774 | 774 | ||
| 775 | MODULE_LICENSE("GPL"); | 775 | MODULE_LICENSE("GPL"); |
| 776 | MODULE_ALIAS("plat-arcfpga/uart"); | 776 | MODULE_ALIAS("platform:" DRIVER_NAME); |
| 777 | MODULE_AUTHOR("Vineet Gupta"); | 777 | MODULE_AUTHOR("Vineet Gupta"); |
| 778 | MODULE_DESCRIPTION("ARC(Synopsys) On-Chip(fpga) serial driver"); | 778 | MODULE_DESCRIPTION("ARC(Synopsys) On-Chip(fpga) serial driver"); |
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 4f5f161896a1..f85b8e6d0346 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c | |||
| @@ -678,11 +678,18 @@ static void mxs_auart_settermios(struct uart_port *u, | |||
| 678 | 678 | ||
| 679 | static irqreturn_t mxs_auart_irq_handle(int irq, void *context) | 679 | static irqreturn_t mxs_auart_irq_handle(int irq, void *context) |
| 680 | { | 680 | { |
| 681 | u32 istatus, istat; | 681 | u32 istat; |
| 682 | struct mxs_auart_port *s = context; | 682 | struct mxs_auart_port *s = context; |
| 683 | u32 stat = readl(s->port.membase + AUART_STAT); | 683 | u32 stat = readl(s->port.membase + AUART_STAT); |
| 684 | 684 | ||
| 685 | istatus = istat = readl(s->port.membase + AUART_INTR); | 685 | istat = readl(s->port.membase + AUART_INTR); |
| 686 | |||
| 687 | /* ack irq */ | ||
| 688 | writel(istat & (AUART_INTR_RTIS | ||
| 689 | | AUART_INTR_TXIS | ||
| 690 | | AUART_INTR_RXIS | ||
| 691 | | AUART_INTR_CTSMIS), | ||
| 692 | s->port.membase + AUART_INTR_CLR); | ||
| 686 | 693 | ||
| 687 | if (istat & AUART_INTR_CTSMIS) { | 694 | if (istat & AUART_INTR_CTSMIS) { |
| 688 | uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS); | 695 | uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS); |
| @@ -702,12 +709,6 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context) | |||
| 702 | istat &= ~AUART_INTR_TXIS; | 709 | istat &= ~AUART_INTR_TXIS; |
| 703 | } | 710 | } |
| 704 | 711 | ||
| 705 | writel(istatus & (AUART_INTR_RTIS | ||
| 706 | | AUART_INTR_TXIS | ||
| 707 | | AUART_INTR_RXIS | ||
| 708 | | AUART_INTR_CTSMIS), | ||
| 709 | s->port.membase + AUART_INTR_CLR); | ||
| 710 | |||
| 711 | return IRQ_HANDLED; | 712 | return IRQ_HANDLED; |
| 712 | } | 713 | } |
| 713 | 714 | ||
| @@ -850,7 +851,7 @@ auart_console_write(struct console *co, const char *str, unsigned int count) | |||
| 850 | struct mxs_auart_port *s; | 851 | struct mxs_auart_port *s; |
| 851 | struct uart_port *port; | 852 | struct uart_port *port; |
| 852 | unsigned int old_ctrl0, old_ctrl2; | 853 | unsigned int old_ctrl0, old_ctrl2; |
| 853 | unsigned int to = 1000; | 854 | unsigned int to = 20000; |
| 854 | 855 | ||
| 855 | if (co->index >= MXS_AUART_PORTS || co->index < 0) | 856 | if (co->index >= MXS_AUART_PORTS || co->index < 0) |
| 856 | return; | 857 | return; |
| @@ -871,18 +872,23 @@ auart_console_write(struct console *co, const char *str, unsigned int count) | |||
| 871 | 872 | ||
| 872 | uart_console_write(port, str, count, mxs_auart_console_putchar); | 873 | uart_console_write(port, str, count, mxs_auart_console_putchar); |
| 873 | 874 | ||
| 874 | /* | 875 | /* Finally, wait for transmitter to become empty ... */ |
| 875 | * Finally, wait for transmitter to become empty | ||
| 876 | * and restore the TCR | ||
| 877 | */ | ||
| 878 | while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) { | 876 | while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) { |
| 877 | udelay(1); | ||
| 879 | if (!to--) | 878 | if (!to--) |
| 880 | break; | 879 | break; |
| 881 | udelay(1); | ||
| 882 | } | 880 | } |
| 883 | 881 | ||
| 884 | writel(old_ctrl0, port->membase + AUART_CTRL0); | 882 | /* |
| 885 | writel(old_ctrl2, port->membase + AUART_CTRL2); | 883 | * ... and restore the TCR if we waited long enough for the transmitter |
| 884 | * to be idle. This might keep the transmitter enabled although it is | ||
| 885 | * unused, but that is better than to disable it while it is still | ||
| 886 | * transmitting. | ||
| 887 | */ | ||
| 888 | if (!(readl(port->membase + AUART_STAT) & AUART_STAT_BUSY)) { | ||
| 889 | writel(old_ctrl0, port->membase + AUART_CTRL0); | ||
| 890 | writel(old_ctrl2, port->membase + AUART_CTRL2); | ||
| 891 | } | ||
| 886 | 892 | ||
| 887 | clk_disable(s->clk); | 893 | clk_disable(s->clk); |
| 888 | } | 894 | } |
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c index ff171384ea52..dc6e96996ead 100644 --- a/drivers/tty/synclinkmp.c +++ b/drivers/tty/synclinkmp.c | |||
| @@ -3478,7 +3478,7 @@ static int alloc_buf_list(SLMP_INFO *info) | |||
| 3478 | for ( i = 0; i < info->rx_buf_count; i++ ) { | 3478 | for ( i = 0; i < info->rx_buf_count; i++ ) { |
| 3479 | /* calculate and store physical address of this buffer entry */ | 3479 | /* calculate and store physical address of this buffer entry */ |
| 3480 | info->rx_buf_list_ex[i].phys_entry = | 3480 | info->rx_buf_list_ex[i].phys_entry = |
| 3481 | info->buffer_list_phys + (i * sizeof(SCABUFSIZE)); | 3481 | info->buffer_list_phys + (i * SCABUFSIZE); |
| 3482 | 3482 | ||
| 3483 | /* calculate and store physical address of */ | 3483 | /* calculate and store physical address of */ |
| 3484 | /* next entry in cirular list of entries */ | 3484 | /* next entry in cirular list of entries */ |
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 121aeb9393e1..f597e88a705d 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c | |||
| @@ -256,10 +256,9 @@ void tty_port_tty_hangup(struct tty_port *port, bool check_clocal) | |||
| 256 | { | 256 | { |
| 257 | struct tty_struct *tty = tty_port_tty_get(port); | 257 | struct tty_struct *tty = tty_port_tty_get(port); |
| 258 | 258 | ||
| 259 | if (tty && (!check_clocal || !C_CLOCAL(tty))) { | 259 | if (tty && (!check_clocal || !C_CLOCAL(tty))) |
| 260 | tty_hangup(tty); | 260 | tty_hangup(tty); |
| 261 | tty_kref_put(tty); | 261 | tty_kref_put(tty); |
| 262 | } | ||
| 263 | } | 262 | } |
| 264 | EXPORT_SYMBOL_GPL(tty_port_tty_hangup); | 263 | EXPORT_SYMBOL_GPL(tty_port_tty_hangup); |
| 265 | 264 | ||
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig index eb2aa2e5a842..d1bd8ef1f9c1 100644 --- a/drivers/usb/chipidea/Kconfig +++ b/drivers/usb/chipidea/Kconfig | |||
| @@ -12,7 +12,7 @@ if USB_CHIPIDEA | |||
| 12 | 12 | ||
| 13 | config USB_CHIPIDEA_UDC | 13 | config USB_CHIPIDEA_UDC |
| 14 | bool "ChipIdea device controller" | 14 | bool "ChipIdea device controller" |
| 15 | depends on USB_GADGET=y || USB_CHIPIDEA=m | 15 | depends on USB_GADGET=y || (USB_CHIPIDEA=m && USB_GADGET=m) |
| 16 | help | 16 | help |
| 17 | Say Y here to enable device controller functionality of the | 17 | Say Y here to enable device controller functionality of the |
| 18 | ChipIdea driver. | 18 | ChipIdea driver. |
| @@ -20,7 +20,7 @@ config USB_CHIPIDEA_UDC | |||
| 20 | config USB_CHIPIDEA_HOST | 20 | config USB_CHIPIDEA_HOST |
| 21 | bool "ChipIdea host controller" | 21 | bool "ChipIdea host controller" |
| 22 | depends on USB=y | 22 | depends on USB=y |
| 23 | depends on USB_EHCI_HCD=y || USB_CHIPIDEA=m | 23 | depends on USB_EHCI_HCD=y || (USB_CHIPIDEA=m && USB_EHCI_HCD=m) |
| 24 | select USB_EHCI_ROOT_HUB_TT | 24 | select USB_EHCI_ROOT_HUB_TT |
| 25 | help | 25 | help |
| 26 | Say Y here to enable host controller functionality of the | 26 | Say Y here to enable host controller functionality of the |
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h index aefa0261220c..1b23e354f9fb 100644 --- a/drivers/usb/chipidea/bits.h +++ b/drivers/usb/chipidea/bits.h | |||
| @@ -50,7 +50,7 @@ | |||
| 50 | #define PORTSC_PTC (0x0FUL << 16) | 50 | #define PORTSC_PTC (0x0FUL << 16) |
| 51 | /* PTS and PTW for non lpm version only */ | 51 | /* PTS and PTW for non lpm version only */ |
| 52 | #define PORTSC_PTS(d) \ | 52 | #define PORTSC_PTS(d) \ |
| 53 | ((((d) & 0x3) << 30) | (((d) & 0x4) ? BIT(25) : 0)) | 53 | (u32)((((d) & 0x3) << 30) | (((d) & 0x4) ? BIT(25) : 0)) |
| 54 | #define PORTSC_PTW BIT(28) | 54 | #define PORTSC_PTW BIT(28) |
| 55 | #define PORTSC_STS BIT(29) | 55 | #define PORTSC_STS BIT(29) |
| 56 | 56 | ||
| @@ -59,7 +59,7 @@ | |||
| 59 | #define DEVLC_PSPD_HS (0x02UL << 25) | 59 | #define DEVLC_PSPD_HS (0x02UL << 25) |
| 60 | #define DEVLC_PTW BIT(27) | 60 | #define DEVLC_PTW BIT(27) |
| 61 | #define DEVLC_STS BIT(28) | 61 | #define DEVLC_STS BIT(28) |
| 62 | #define DEVLC_PTS(d) (((d) & 0x7) << 29) | 62 | #define DEVLC_PTS(d) (u32)(((d) & 0x7) << 29) |
| 63 | 63 | ||
| 64 | /* Encoding for DEVLC_PTS and PORTSC_PTS */ | 64 | /* Encoding for DEVLC_PTS and PORTSC_PTS */ |
| 65 | #define PTS_UTMI 0 | 65 | #define PTS_UTMI 0 |
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 609dbc2f7151..83b4ef4dfcf8 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c | |||
| @@ -1119,11 +1119,11 @@ static int usbtmc_probe(struct usb_interface *intf, | |||
| 1119 | /* Determine if it is a Rigol or not */ | 1119 | /* Determine if it is a Rigol or not */ |
| 1120 | data->rigol_quirk = 0; | 1120 | data->rigol_quirk = 0; |
| 1121 | dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", | 1121 | dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", |
| 1122 | data->usb_dev->descriptor.idVendor, | 1122 | le16_to_cpu(data->usb_dev->descriptor.idVendor), |
| 1123 | data->usb_dev->descriptor.idProduct); | 1123 | le16_to_cpu(data->usb_dev->descriptor.idProduct)); |
| 1124 | for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { | 1124 | for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { |
| 1125 | if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) && | 1125 | if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) && |
| 1126 | (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) { | 1126 | (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) { |
| 1127 | dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); | 1127 | dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); |
| 1128 | data->rigol_quirk = 1; | 1128 | data->rigol_quirk = 1; |
| 1129 | break; | 1129 | break; |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 4191db32f12c..558313de4911 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
| @@ -668,6 +668,15 @@ resubmit: | |||
| 668 | static inline int | 668 | static inline int |
| 669 | hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) | 669 | hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) |
| 670 | { | 670 | { |
| 671 | /* Need to clear both directions for control ep */ | ||
| 672 | if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) == | ||
| 673 | USB_ENDPOINT_XFER_CONTROL) { | ||
| 674 | int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), | ||
| 675 | HUB_CLEAR_TT_BUFFER, USB_RT_PORT, | ||
| 676 | devinfo ^ 0x8000, tt, NULL, 0, 1000); | ||
| 677 | if (status) | ||
| 678 | return status; | ||
| 679 | } | ||
| 671 | return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), | 680 | return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), |
| 672 | HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo, | 681 | HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo, |
| 673 | tt, NULL, 0, 1000); | 682 | tt, NULL, 0, 1000); |
| @@ -2848,6 +2857,15 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev) | |||
| 2848 | USB_CTRL_SET_TIMEOUT); | 2857 | USB_CTRL_SET_TIMEOUT); |
| 2849 | } | 2858 | } |
| 2850 | 2859 | ||
| 2860 | /* Count of wakeup-enabled devices at or below udev */ | ||
| 2861 | static unsigned wakeup_enabled_descendants(struct usb_device *udev) | ||
| 2862 | { | ||
| 2863 | struct usb_hub *hub = usb_hub_to_struct_hub(udev); | ||
| 2864 | |||
| 2865 | return udev->do_remote_wakeup + | ||
| 2866 | (hub ? hub->wakeup_enabled_descendants : 0); | ||
| 2867 | } | ||
| 2868 | |||
| 2851 | /* | 2869 | /* |
| 2852 | * usb_port_suspend - suspend a usb device's upstream port | 2870 | * usb_port_suspend - suspend a usb device's upstream port |
| 2853 | * @udev: device that's no longer in active use, not a root hub | 2871 | * @udev: device that's no longer in active use, not a root hub |
| @@ -2888,8 +2906,8 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev) | |||
| 2888 | * Linux (2.6) currently has NO mechanisms to initiate that: no khubd | 2906 | * Linux (2.6) currently has NO mechanisms to initiate that: no khubd |
| 2889 | * timer, no SRP, no requests through sysfs. | 2907 | * timer, no SRP, no requests through sysfs. |
| 2890 | * | 2908 | * |
| 2891 | * If Runtime PM isn't enabled or used, non-SuperSpeed devices really get | 2909 | * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get |
| 2892 | * suspended only when their bus goes into global suspend (i.e., the root | 2910 | * suspended until their bus goes into global suspend (i.e., the root |
| 2893 | * hub is suspended). Nevertheless, we change @udev->state to | 2911 | * hub is suspended). Nevertheless, we change @udev->state to |
| 2894 | * USB_STATE_SUSPENDED as this is the device's "logical" state. The actual | 2912 | * USB_STATE_SUSPENDED as this is the device's "logical" state. The actual |
| 2895 | * upstream port setting is stored in @udev->port_is_suspended. | 2913 | * upstream port setting is stored in @udev->port_is_suspended. |
| @@ -2960,15 +2978,21 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) | |||
| 2960 | /* see 7.1.7.6 */ | 2978 | /* see 7.1.7.6 */ |
| 2961 | if (hub_is_superspeed(hub->hdev)) | 2979 | if (hub_is_superspeed(hub->hdev)) |
| 2962 | status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3); | 2980 | status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3); |
| 2963 | else if (PMSG_IS_AUTO(msg)) | 2981 | |
| 2964 | status = set_port_feature(hub->hdev, port1, | ||
| 2965 | USB_PORT_FEAT_SUSPEND); | ||
| 2966 | /* | 2982 | /* |
| 2967 | * For system suspend, we do not need to enable the suspend feature | 2983 | * For system suspend, we do not need to enable the suspend feature |
| 2968 | * on individual USB-2 ports. The devices will automatically go | 2984 | * on individual USB-2 ports. The devices will automatically go |
| 2969 | * into suspend a few ms after the root hub stops sending packets. | 2985 | * into suspend a few ms after the root hub stops sending packets. |
| 2970 | * The USB 2.0 spec calls this "global suspend". | 2986 | * The USB 2.0 spec calls this "global suspend". |
| 2987 | * | ||
| 2988 | * However, many USB hubs have a bug: They don't relay wakeup requests | ||
| 2989 | * from a downstream port if the port's suspend feature isn't on. | ||
| 2990 | * Therefore we will turn on the suspend feature if udev or any of its | ||
| 2991 | * descendants is enabled for remote wakeup. | ||
| 2971 | */ | 2992 | */ |
| 2993 | else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0) | ||
| 2994 | status = set_port_feature(hub->hdev, port1, | ||
| 2995 | USB_PORT_FEAT_SUSPEND); | ||
| 2972 | else { | 2996 | else { |
| 2973 | really_suspend = false; | 2997 | really_suspend = false; |
| 2974 | status = 0; | 2998 | status = 0; |
| @@ -3003,15 +3027,16 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) | |||
| 3003 | if (!PMSG_IS_AUTO(msg)) | 3027 | if (!PMSG_IS_AUTO(msg)) |
| 3004 | status = 0; | 3028 | status = 0; |
| 3005 | } else { | 3029 | } else { |
| 3006 | /* device has up to 10 msec to fully suspend */ | ||
| 3007 | dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n", | 3030 | dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n", |
| 3008 | (PMSG_IS_AUTO(msg) ? "auto-" : ""), | 3031 | (PMSG_IS_AUTO(msg) ? "auto-" : ""), |
| 3009 | udev->do_remote_wakeup); | 3032 | udev->do_remote_wakeup); |
| 3010 | usb_set_device_state(udev, USB_STATE_SUSPENDED); | ||
| 3011 | if (really_suspend) { | 3033 | if (really_suspend) { |
| 3012 | udev->port_is_suspended = 1; | 3034 | udev->port_is_suspended = 1; |
| 3035 | |||
| 3036 | /* device has up to 10 msec to fully suspend */ | ||
| 3013 | msleep(10); | 3037 | msleep(10); |
| 3014 | } | 3038 | } |
| 3039 | usb_set_device_state(udev, USB_STATE_SUSPENDED); | ||
| 3015 | } | 3040 | } |
| 3016 | 3041 | ||
| 3017 | /* | 3042 | /* |
| @@ -3293,7 +3318,11 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg) | |||
| 3293 | unsigned port1; | 3318 | unsigned port1; |
| 3294 | int status; | 3319 | int status; |
| 3295 | 3320 | ||
| 3296 | /* Warn if children aren't already suspended */ | 3321 | /* |
| 3322 | * Warn if children aren't already suspended. | ||
| 3323 | * Also, add up the number of wakeup-enabled descendants. | ||
| 3324 | */ | ||
| 3325 | hub->wakeup_enabled_descendants = 0; | ||
| 3297 | for (port1 = 1; port1 <= hdev->maxchild; port1++) { | 3326 | for (port1 = 1; port1 <= hdev->maxchild; port1++) { |
| 3298 | struct usb_device *udev; | 3327 | struct usb_device *udev; |
| 3299 | 3328 | ||
| @@ -3303,6 +3332,9 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg) | |||
| 3303 | if (PMSG_IS_AUTO(msg)) | 3332 | if (PMSG_IS_AUTO(msg)) |
| 3304 | return -EBUSY; | 3333 | return -EBUSY; |
| 3305 | } | 3334 | } |
| 3335 | if (udev) | ||
| 3336 | hub->wakeup_enabled_descendants += | ||
| 3337 | wakeup_enabled_descendants(udev); | ||
| 3306 | } | 3338 | } |
| 3307 | 3339 | ||
| 3308 | if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) { | 3340 | if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) { |
| @@ -4766,7 +4798,8 @@ static void hub_events(void) | |||
| 4766 | hub->ports[i - 1]->child; | 4798 | hub->ports[i - 1]->child; |
| 4767 | 4799 | ||
| 4768 | dev_dbg(hub_dev, "warm reset port %d\n", i); | 4800 | dev_dbg(hub_dev, "warm reset port %d\n", i); |
| 4769 | if (!udev) { | 4801 | if (!udev || !(portstatus & |
| 4802 | USB_PORT_STAT_CONNECTION)) { | ||
| 4770 | status = hub_port_reset(hub, i, | 4803 | status = hub_port_reset(hub, i, |
| 4771 | NULL, HUB_BH_RESET_TIME, | 4804 | NULL, HUB_BH_RESET_TIME, |
| 4772 | true); | 4805 | true); |
| @@ -4776,8 +4809,8 @@ static void hub_events(void) | |||
| 4776 | usb_lock_device(udev); | 4809 | usb_lock_device(udev); |
| 4777 | status = usb_reset_device(udev); | 4810 | status = usb_reset_device(udev); |
| 4778 | usb_unlock_device(udev); | 4811 | usb_unlock_device(udev); |
| 4812 | connect_change = 0; | ||
| 4779 | } | 4813 | } |
| 4780 | connect_change = 0; | ||
| 4781 | } | 4814 | } |
| 4782 | 4815 | ||
| 4783 | if (connect_change) | 4816 | if (connect_change) |
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index 6508e02b3dac..4e4790dea343 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h | |||
| @@ -59,6 +59,9 @@ struct usb_hub { | |||
| 59 | struct usb_tt tt; /* Transaction Translator */ | 59 | struct usb_tt tt; /* Transaction Translator */ |
| 60 | 60 | ||
| 61 | unsigned mA_per_port; /* current for each child */ | 61 | unsigned mA_per_port; /* current for each child */ |
| 62 | #ifdef CONFIG_PM | ||
| 63 | unsigned wakeup_enabled_descendants; | ||
| 64 | #endif | ||
| 62 | 65 | ||
| 63 | unsigned limited_power:1; | 66 | unsigned limited_power:1; |
| 64 | unsigned quiescing:1; | 67 | unsigned quiescing:1; |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a63598895077..5b44cd47da5b 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
| @@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 78 | { USB_DEVICE(0x04d8, 0x000c), .driver_info = | 78 | { USB_DEVICE(0x04d8, 0x000c), .driver_info = |
| 79 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 79 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
| 80 | 80 | ||
| 81 | /* CarrolTouch 4000U */ | ||
| 82 | { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
| 83 | |||
| 84 | /* CarrolTouch 4500U */ | ||
| 85 | { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
| 86 | |||
| 81 | /* Samsung Android phone modem - ID conflict with SPH-I500 */ | 87 | /* Samsung Android phone modem - ID conflict with SPH-I500 */ |
| 82 | { USB_DEVICE(0x04e8, 0x6601), .driver_info = | 88 | { USB_DEVICE(0x04e8, 0x6601), .driver_info = |
| 83 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 89 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig index 757aa18027d0..2378958ea63e 100644 --- a/drivers/usb/dwc3/Kconfig +++ b/drivers/usb/dwc3/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config USB_DWC3 | 1 | config USB_DWC3 |
| 2 | tristate "DesignWare USB3 DRD Core Support" | 2 | tristate "DesignWare USB3 DRD Core Support" |
| 3 | depends on (USB || USB_GADGET) && GENERIC_HARDIRQS | 3 | depends on (USB || USB_GADGET) && GENERIC_HARDIRQS && HAS_DMA |
| 4 | select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD | 4 | select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD |
| 5 | help | 5 | help |
| 6 | Say Y or M here if your system has a Dual Role SuperSpeed | 6 | Say Y or M here if your system has a Dual Role SuperSpeed |
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index c35d49d39b76..358375e0b291 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c | |||
| @@ -450,7 +450,7 @@ static int dwc3_probe(struct platform_device *pdev) | |||
| 450 | } | 450 | } |
| 451 | 451 | ||
| 452 | if (IS_ERR(dwc->usb3_phy)) { | 452 | if (IS_ERR(dwc->usb3_phy)) { |
| 453 | ret = PTR_ERR(dwc->usb2_phy); | 453 | ret = PTR_ERR(dwc->usb3_phy); |
| 454 | 454 | ||
| 455 | /* | 455 | /* |
| 456 | * if -ENXIO is returned, it means PHY layer wasn't | 456 | * if -ENXIO is returned, it means PHY layer wasn't |
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index b69d322e3cab..27dad993b007 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h | |||
| @@ -759,8 +759,8 @@ struct dwc3 { | |||
| 759 | 759 | ||
| 760 | struct dwc3_event_type { | 760 | struct dwc3_event_type { |
| 761 | u32 is_devspec:1; | 761 | u32 is_devspec:1; |
| 762 | u32 type:6; | 762 | u32 type:7; |
| 763 | u32 reserved8_31:25; | 763 | u32 reserved8_31:24; |
| 764 | } __packed; | 764 | } __packed; |
| 765 | 765 | ||
| 766 | #define DWC3_DEPEVT_XFERCOMPLETE 0x01 | 766 | #define DWC3_DEPEVT_XFERCOMPLETE 0x01 |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index b5e5b35df49c..f77083fedc68 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -1584,6 +1584,7 @@ err1: | |||
| 1584 | __dwc3_gadget_ep_disable(dwc->eps[0]); | 1584 | __dwc3_gadget_ep_disable(dwc->eps[0]); |
| 1585 | 1585 | ||
| 1586 | err0: | 1586 | err0: |
| 1587 | dwc->gadget_driver = NULL; | ||
| 1587 | spin_unlock_irqrestore(&dwc->lock, flags); | 1588 | spin_unlock_irqrestore(&dwc->lock, flags); |
| 1588 | 1589 | ||
| 1589 | return ret; | 1590 | return ret; |
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 62f6802f6e0f..8e9368330b10 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig | |||
| @@ -193,6 +193,7 @@ config USB_FUSB300 | |||
| 193 | Faraday usb device controller FUSB300 driver | 193 | Faraday usb device controller FUSB300 driver |
| 194 | 194 | ||
| 195 | config USB_FOTG210_UDC | 195 | config USB_FOTG210_UDC |
| 196 | depends on HAS_DMA | ||
| 196 | tristate "Faraday FOTG210 USB Peripheral Controller" | 197 | tristate "Faraday FOTG210 USB Peripheral Controller" |
| 197 | help | 198 | help |
| 198 | Faraday USB2.0 OTG controller which can be configured as | 199 | Faraday USB2.0 OTG controller which can be configured as |
| @@ -328,13 +329,14 @@ config USB_S3C_HSUDC | |||
| 328 | 329 | ||
| 329 | config USB_MV_UDC | 330 | config USB_MV_UDC |
| 330 | tristate "Marvell USB2.0 Device Controller" | 331 | tristate "Marvell USB2.0 Device Controller" |
| 331 | depends on GENERIC_HARDIRQS | 332 | depends on GENERIC_HARDIRQS && HAS_DMA |
| 332 | help | 333 | help |
| 333 | Marvell Socs (including PXA and MMP series) include a high speed | 334 | Marvell Socs (including PXA and MMP series) include a high speed |
| 334 | USB2.0 OTG controller, which can be configured as high speed or | 335 | USB2.0 OTG controller, which can be configured as high speed or |
| 335 | full speed USB peripheral. | 336 | full speed USB peripheral. |
| 336 | 337 | ||
| 337 | config USB_MV_U3D | 338 | config USB_MV_U3D |
| 339 | depends on HAS_DMA | ||
| 338 | tristate "MARVELL PXA2128 USB 3.0 controller" | 340 | tristate "MARVELL PXA2128 USB 3.0 controller" |
| 339 | help | 341 | help |
| 340 | MARVELL PXA2128 Processor series include a super speed USB3.0 device | 342 | MARVELL PXA2128 Processor series include a super speed USB3.0 device |
| @@ -639,6 +641,7 @@ config USB_CONFIGFS_RNDIS | |||
| 639 | depends on USB_CONFIGFS | 641 | depends on USB_CONFIGFS |
| 640 | depends on NET | 642 | depends on NET |
| 641 | select USB_U_ETHER | 643 | select USB_U_ETHER |
| 644 | select USB_U_RNDIS | ||
| 642 | select USB_F_RNDIS | 645 | select USB_F_RNDIS |
| 643 | help | 646 | help |
| 644 | Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol, | 647 | Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol, |
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c index 073b938f9135..d9a6add0c852 100644 --- a/drivers/usb/gadget/at91_udc.c +++ b/drivers/usb/gadget/at91_udc.c | |||
| @@ -870,8 +870,8 @@ static void clk_on(struct at91_udc *udc) | |||
| 870 | if (udc->clocked) | 870 | if (udc->clocked) |
| 871 | return; | 871 | return; |
| 872 | udc->clocked = 1; | 872 | udc->clocked = 1; |
| 873 | clk_enable(udc->iclk); | 873 | clk_prepare_enable(udc->iclk); |
| 874 | clk_enable(udc->fclk); | 874 | clk_prepare_enable(udc->fclk); |
| 875 | } | 875 | } |
| 876 | 876 | ||
| 877 | static void clk_off(struct at91_udc *udc) | 877 | static void clk_off(struct at91_udc *udc) |
| @@ -880,8 +880,8 @@ static void clk_off(struct at91_udc *udc) | |||
| 880 | return; | 880 | return; |
| 881 | udc->clocked = 0; | 881 | udc->clocked = 0; |
| 882 | udc->gadget.speed = USB_SPEED_UNKNOWN; | 882 | udc->gadget.speed = USB_SPEED_UNKNOWN; |
| 883 | clk_disable(udc->fclk); | 883 | clk_disable_unprepare(udc->fclk); |
| 884 | clk_disable(udc->iclk); | 884 | clk_disable_unprepare(udc->iclk); |
| 885 | } | 885 | } |
| 886 | 886 | ||
| 887 | /* | 887 | /* |
| @@ -1725,7 +1725,7 @@ static int at91udc_probe(struct platform_device *pdev) | |||
| 1725 | /* init software state */ | 1725 | /* init software state */ |
| 1726 | udc = &controller; | 1726 | udc = &controller; |
| 1727 | udc->gadget.dev.parent = dev; | 1727 | udc->gadget.dev.parent = dev; |
| 1728 | if (pdev->dev.of_node) | 1728 | if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) |
| 1729 | at91udc_of_init(udc, pdev->dev.of_node); | 1729 | at91udc_of_init(udc, pdev->dev.of_node); |
| 1730 | else | 1730 | else |
| 1731 | memcpy(&udc->board, dev->platform_data, | 1731 | memcpy(&udc->board, dev->platform_data, |
| @@ -1782,12 +1782,14 @@ static int at91udc_probe(struct platform_device *pdev) | |||
| 1782 | } | 1782 | } |
| 1783 | 1783 | ||
| 1784 | /* don't do anything until we have both gadget driver and VBUS */ | 1784 | /* don't do anything until we have both gadget driver and VBUS */ |
| 1785 | clk_enable(udc->iclk); | 1785 | retval = clk_prepare_enable(udc->iclk); |
| 1786 | if (retval) | ||
| 1787 | goto fail1; | ||
| 1786 | at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); | 1788 | at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); |
| 1787 | at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff); | 1789 | at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff); |
| 1788 | /* Clear all pending interrupts - UDP may be used by bootloader. */ | 1790 | /* Clear all pending interrupts - UDP may be used by bootloader. */ |
| 1789 | at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff); | 1791 | at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff); |
| 1790 | clk_disable(udc->iclk); | 1792 | clk_disable_unprepare(udc->iclk); |
| 1791 | 1793 | ||
| 1792 | /* request UDC and maybe VBUS irqs */ | 1794 | /* request UDC and maybe VBUS irqs */ |
| 1793 | udc->udp_irq = platform_get_irq(pdev, 0); | 1795 | udc->udp_irq = platform_get_irq(pdev, 0); |
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index f48712ffe261..c1c113ef950c 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c | |||
| @@ -449,14 +449,20 @@ fail: | |||
| 449 | 449 | ||
| 450 | static int __exit eth_unbind(struct usb_composite_dev *cdev) | 450 | static int __exit eth_unbind(struct usb_composite_dev *cdev) |
| 451 | { | 451 | { |
| 452 | if (has_rndis()) | 452 | if (has_rndis()) { |
| 453 | usb_put_function(f_rndis); | ||
| 453 | usb_put_function_instance(fi_rndis); | 454 | usb_put_function_instance(fi_rndis); |
| 454 | if (use_eem) | 455 | } |
| 456 | if (use_eem) { | ||
| 457 | usb_put_function(f_eem); | ||
| 455 | usb_put_function_instance(fi_eem); | 458 | usb_put_function_instance(fi_eem); |
| 456 | else if (can_support_ecm(cdev->gadget)) | 459 | } else if (can_support_ecm(cdev->gadget)) { |
| 460 | usb_put_function(f_ecm); | ||
| 457 | usb_put_function_instance(fi_ecm); | 461 | usb_put_function_instance(fi_ecm); |
| 458 | else | 462 | } else { |
| 463 | usb_put_function(f_geth); | ||
| 459 | usb_put_function_instance(fi_geth); | 464 | usb_put_function_instance(fi_geth); |
| 465 | } | ||
| 460 | return 0; | 466 | return 0; |
| 461 | } | 467 | } |
| 462 | 468 | ||
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c index 5d3561ea1c15..edab45da3741 100644 --- a/drivers/usb/gadget/f_ecm.c +++ b/drivers/usb/gadget/f_ecm.c | |||
| @@ -959,8 +959,11 @@ static struct usb_function_instance *ecm_alloc_inst(void) | |||
| 959 | mutex_init(&opts->lock); | 959 | mutex_init(&opts->lock); |
| 960 | opts->func_inst.free_func_inst = ecm_free_inst; | 960 | opts->func_inst.free_func_inst = ecm_free_inst; |
| 961 | opts->net = gether_setup_default(); | 961 | opts->net = gether_setup_default(); |
| 962 | if (IS_ERR(opts->net)) | 962 | if (IS_ERR(opts->net)) { |
| 963 | return ERR_PTR(PTR_ERR(opts->net)); | 963 | struct net_device *net = opts->net; |
| 964 | kfree(opts); | ||
| 965 | return ERR_CAST(net); | ||
| 966 | } | ||
| 964 | 967 | ||
| 965 | config_group_init_type_name(&opts->func_inst.group, "", &ecm_func_type); | 968 | config_group_init_type_name(&opts->func_inst.group, "", &ecm_func_type); |
| 966 | 969 | ||
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c index 90ee8022e8d8..d00392d879db 100644 --- a/drivers/usb/gadget/f_eem.c +++ b/drivers/usb/gadget/f_eem.c | |||
| @@ -593,8 +593,11 @@ static struct usb_function_instance *eem_alloc_inst(void) | |||
| 593 | mutex_init(&opts->lock); | 593 | mutex_init(&opts->lock); |
| 594 | opts->func_inst.free_func_inst = eem_free_inst; | 594 | opts->func_inst.free_func_inst = eem_free_inst; |
| 595 | opts->net = gether_setup_default(); | 595 | opts->net = gether_setup_default(); |
| 596 | if (IS_ERR(opts->net)) | 596 | if (IS_ERR(opts->net)) { |
| 597 | return ERR_CAST(opts->net); | 597 | struct net_device *net = opts->net; |
| 598 | kfree(opts); | ||
| 599 | return ERR_CAST(net); | ||
| 600 | } | ||
| 598 | 601 | ||
| 599 | config_group_init_type_name(&opts->func_inst.group, "", &eem_func_type); | 602 | config_group_init_type_name(&opts->func_inst.group, "", &eem_func_type); |
| 600 | 603 | ||
diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c index 952177f7eb9b..1c28fe13328a 100644 --- a/drivers/usb/gadget/f_ncm.c +++ b/drivers/usb/gadget/f_ncm.c | |||
| @@ -1350,8 +1350,11 @@ static struct usb_function_instance *ncm_alloc_inst(void) | |||
| 1350 | mutex_init(&opts->lock); | 1350 | mutex_init(&opts->lock); |
| 1351 | opts->func_inst.free_func_inst = ncm_free_inst; | 1351 | opts->func_inst.free_func_inst = ncm_free_inst; |
| 1352 | opts->net = gether_setup_default(); | 1352 | opts->net = gether_setup_default(); |
| 1353 | if (IS_ERR(opts->net)) | 1353 | if (IS_ERR(opts->net)) { |
| 1354 | return ERR_PTR(PTR_ERR(opts->net)); | 1354 | struct net_device *net = opts->net; |
| 1355 | kfree(opts); | ||
| 1356 | return ERR_CAST(net); | ||
| 1357 | } | ||
| 1355 | 1358 | ||
| 1356 | config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type); | 1359 | config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type); |
| 1357 | 1360 | ||
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c index 7944fb0efe3b..eb3aa817a662 100644 --- a/drivers/usb/gadget/f_phonet.c +++ b/drivers/usb/gadget/f_phonet.c | |||
| @@ -488,7 +488,6 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f) | |||
| 488 | struct usb_ep *ep; | 488 | struct usb_ep *ep; |
| 489 | int status, i; | 489 | int status, i; |
| 490 | 490 | ||
| 491 | #ifndef USBF_PHONET_INCLUDED | ||
| 492 | struct f_phonet_opts *phonet_opts; | 491 | struct f_phonet_opts *phonet_opts; |
| 493 | 492 | ||
| 494 | phonet_opts = container_of(f->fi, struct f_phonet_opts, func_inst); | 493 | phonet_opts = container_of(f->fi, struct f_phonet_opts, func_inst); |
| @@ -507,7 +506,6 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f) | |||
| 507 | return status; | 506 | return status; |
| 508 | phonet_opts->bound = true; | 507 | phonet_opts->bound = true; |
| 509 | } | 508 | } |
| 510 | #endif | ||
| 511 | 509 | ||
| 512 | /* Reserve interface IDs */ | 510 | /* Reserve interface IDs */ |
| 513 | status = usb_interface_id(c, f); | 511 | status = usb_interface_id(c, f); |
| @@ -656,8 +654,11 @@ static struct usb_function_instance *phonet_alloc_inst(void) | |||
| 656 | 654 | ||
| 657 | opts->func_inst.free_func_inst = phonet_free_inst; | 655 | opts->func_inst.free_func_inst = phonet_free_inst; |
| 658 | opts->net = gphonet_setup_default(); | 656 | opts->net = gphonet_setup_default(); |
| 659 | if (IS_ERR(opts->net)) | 657 | if (IS_ERR(opts->net)) { |
| 660 | return ERR_PTR(PTR_ERR(opts->net)); | 658 | struct net_device *net = opts->net; |
| 659 | kfree(opts); | ||
| 660 | return ERR_CAST(net); | ||
| 661 | } | ||
| 661 | 662 | ||
| 662 | config_group_init_type_name(&opts->func_inst.group, "", | 663 | config_group_init_type_name(&opts->func_inst.group, "", |
| 663 | &phonet_func_type); | 664 | &phonet_func_type); |
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 191df35ae69d..717ed7f95639 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c | |||
| @@ -963,8 +963,11 @@ static struct usb_function_instance *rndis_alloc_inst(void) | |||
| 963 | mutex_init(&opts->lock); | 963 | mutex_init(&opts->lock); |
| 964 | opts->func_inst.free_func_inst = rndis_free_inst; | 964 | opts->func_inst.free_func_inst = rndis_free_inst; |
| 965 | opts->net = gether_setup_default(); | 965 | opts->net = gether_setup_default(); |
| 966 | if (IS_ERR(opts->net)) | 966 | if (IS_ERR(opts->net)) { |
| 967 | return ERR_CAST(opts->net); | 967 | struct net_device *net = opts->net; |
| 968 | kfree(opts); | ||
| 969 | return ERR_CAST(net); | ||
| 970 | } | ||
| 968 | 971 | ||
| 969 | config_group_init_type_name(&opts->func_inst.group, "", | 972 | config_group_init_type_name(&opts->func_inst.group, "", |
| 970 | &rndis_func_type); | 973 | &rndis_func_type); |
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c index 5601e1d96c4f..7c8674fa7e80 100644 --- a/drivers/usb/gadget/f_subset.c +++ b/drivers/usb/gadget/f_subset.c | |||
| @@ -505,8 +505,11 @@ static struct usb_function_instance *geth_alloc_inst(void) | |||
| 505 | mutex_init(&opts->lock); | 505 | mutex_init(&opts->lock); |
| 506 | opts->func_inst.free_func_inst = geth_free_inst; | 506 | opts->func_inst.free_func_inst = geth_free_inst; |
| 507 | opts->net = gether_setup_default(); | 507 | opts->net = gether_setup_default(); |
| 508 | if (IS_ERR(opts->net)) | 508 | if (IS_ERR(opts->net)) { |
| 509 | return ERR_CAST(opts->net); | 509 | struct net_device *net = opts->net; |
| 510 | kfree(opts); | ||
| 511 | return ERR_CAST(net); | ||
| 512 | } | ||
| 510 | 513 | ||
| 511 | config_group_init_type_name(&opts->func_inst.group, "", | 514 | config_group_init_type_name(&opts->func_inst.group, "", |
| 512 | &gether_func_type); | 515 | &gether_func_type); |
diff --git a/drivers/usb/gadget/fotg210-udc.c b/drivers/usb/gadget/fotg210-udc.c index cce5535b1dc6..10cd18ddd0d4 100644 --- a/drivers/usb/gadget/fotg210-udc.c +++ b/drivers/usb/gadget/fotg210-udc.c | |||
| @@ -1074,7 +1074,7 @@ static struct usb_gadget_ops fotg210_gadget_ops = { | |||
| 1074 | .udc_stop = fotg210_udc_stop, | 1074 | .udc_stop = fotg210_udc_stop, |
| 1075 | }; | 1075 | }; |
| 1076 | 1076 | ||
| 1077 | static int __exit fotg210_udc_remove(struct platform_device *pdev) | 1077 | static int fotg210_udc_remove(struct platform_device *pdev) |
| 1078 | { | 1078 | { |
| 1079 | struct fotg210_udc *fotg210 = dev_get_drvdata(&pdev->dev); | 1079 | struct fotg210_udc *fotg210 = dev_get_drvdata(&pdev->dev); |
| 1080 | 1080 | ||
| @@ -1088,7 +1088,7 @@ static int __exit fotg210_udc_remove(struct platform_device *pdev) | |||
| 1088 | return 0; | 1088 | return 0; |
| 1089 | } | 1089 | } |
| 1090 | 1090 | ||
| 1091 | static int __init fotg210_udc_probe(struct platform_device *pdev) | 1091 | static int fotg210_udc_probe(struct platform_device *pdev) |
| 1092 | { | 1092 | { |
| 1093 | struct resource *res, *ires; | 1093 | struct resource *res, *ires; |
| 1094 | struct fotg210_udc *fotg210 = NULL; | 1094 | struct fotg210_udc *fotg210 = NULL; |
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c index 032b96a51ce4..2a1ebefd8f9e 100644 --- a/drivers/usb/gadget/multi.c +++ b/drivers/usb/gadget/multi.c | |||
| @@ -160,10 +160,8 @@ static __init int rndis_do_config(struct usb_configuration *c) | |||
| 160 | return ret; | 160 | return ret; |
| 161 | 161 | ||
| 162 | f_acm_rndis = usb_get_function(fi_acm); | 162 | f_acm_rndis = usb_get_function(fi_acm); |
| 163 | if (IS_ERR(f_acm_rndis)) { | 163 | if (IS_ERR(f_acm_rndis)) |
| 164 | ret = PTR_ERR(f_acm_rndis); | 164 | return PTR_ERR(f_acm_rndis); |
| 165 | goto err_func_acm; | ||
| 166 | } | ||
| 167 | 165 | ||
| 168 | ret = usb_add_function(c, f_acm_rndis); | 166 | ret = usb_add_function(c, f_acm_rndis); |
| 169 | if (ret) | 167 | if (ret) |
| @@ -178,7 +176,6 @@ err_fsg: | |||
| 178 | usb_remove_function(c, f_acm_rndis); | 176 | usb_remove_function(c, f_acm_rndis); |
| 179 | err_conf: | 177 | err_conf: |
| 180 | usb_put_function(f_acm_rndis); | 178 | usb_put_function(f_acm_rndis); |
| 181 | err_func_acm: | ||
| 182 | return ret; | 179 | return ret; |
| 183 | } | 180 | } |
| 184 | 181 | ||
| @@ -226,7 +223,7 @@ static __init int cdc_do_config(struct usb_configuration *c) | |||
| 226 | /* implicit port_num is zero */ | 223 | /* implicit port_num is zero */ |
| 227 | f_acm_multi = usb_get_function(fi_acm); | 224 | f_acm_multi = usb_get_function(fi_acm); |
| 228 | if (IS_ERR(f_acm_multi)) | 225 | if (IS_ERR(f_acm_multi)) |
| 229 | goto err_func_acm; | 226 | return PTR_ERR(f_acm_multi); |
| 230 | 227 | ||
| 231 | ret = usb_add_function(c, f_acm_multi); | 228 | ret = usb_add_function(c, f_acm_multi); |
| 232 | if (ret) | 229 | if (ret) |
| @@ -241,7 +238,6 @@ err_fsg: | |||
| 241 | usb_remove_function(c, f_acm_multi); | 238 | usb_remove_function(c, f_acm_multi); |
| 242 | err_conf: | 239 | err_conf: |
| 243 | usb_put_function(f_acm_multi); | 240 | usb_put_function(f_acm_multi); |
| 244 | err_func_acm: | ||
| 245 | return ret; | 241 | return ret; |
| 246 | } | 242 | } |
| 247 | 243 | ||
diff --git a/drivers/usb/gadget/mv_u3d_core.c b/drivers/usb/gadget/mv_u3d_core.c index 07fdb3eaf48a..ec6a2d290398 100644 --- a/drivers/usb/gadget/mv_u3d_core.c +++ b/drivers/usb/gadget/mv_u3d_core.c | |||
| @@ -1776,7 +1776,7 @@ static int mv_u3d_remove(struct platform_device *dev) | |||
| 1776 | kfree(u3d->eps); | 1776 | kfree(u3d->eps); |
| 1777 | 1777 | ||
| 1778 | if (u3d->irq) | 1778 | if (u3d->irq) |
| 1779 | free_irq(u3d->irq, &dev->dev); | 1779 | free_irq(u3d->irq, u3d); |
| 1780 | 1780 | ||
| 1781 | if (u3d->cap_regs) | 1781 | if (u3d->cap_regs) |
| 1782 | iounmap(u3d->cap_regs); | 1782 | iounmap(u3d->cap_regs); |
| @@ -1974,7 +1974,7 @@ static int mv_u3d_probe(struct platform_device *dev) | |||
| 1974 | return 0; | 1974 | return 0; |
| 1975 | 1975 | ||
| 1976 | err_unregister: | 1976 | err_unregister: |
| 1977 | free_irq(u3d->irq, &dev->dev); | 1977 | free_irq(u3d->irq, u3d); |
| 1978 | err_request_irq: | 1978 | err_request_irq: |
| 1979 | err_get_irq: | 1979 | err_get_irq: |
| 1980 | kfree(u3d->status_req); | 1980 | kfree(u3d->status_req); |
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c index ffd8fa541101..13e25f80fc20 100644 --- a/drivers/usb/gadget/udc-core.c +++ b/drivers/usb/gadget/udc-core.c | |||
| @@ -50,6 +50,8 @@ static DEFINE_MUTEX(udc_lock); | |||
| 50 | 50 | ||
| 51 | /* ------------------------------------------------------------------------- */ | 51 | /* ------------------------------------------------------------------------- */ |
| 52 | 52 | ||
| 53 | #ifdef CONFIG_HAS_DMA | ||
| 54 | |||
| 53 | int usb_gadget_map_request(struct usb_gadget *gadget, | 55 | int usb_gadget_map_request(struct usb_gadget *gadget, |
| 54 | struct usb_request *req, int is_in) | 56 | struct usb_request *req, int is_in) |
| 55 | { | 57 | { |
| @@ -99,13 +101,15 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget, | |||
| 99 | } | 101 | } |
| 100 | EXPORT_SYMBOL_GPL(usb_gadget_unmap_request); | 102 | EXPORT_SYMBOL_GPL(usb_gadget_unmap_request); |
| 101 | 103 | ||
| 104 | #endif /* CONFIG_HAS_DMA */ | ||
| 105 | |||
| 102 | /* ------------------------------------------------------------------------- */ | 106 | /* ------------------------------------------------------------------------- */ |
| 103 | 107 | ||
| 104 | void usb_gadget_set_state(struct usb_gadget *gadget, | 108 | void usb_gadget_set_state(struct usb_gadget *gadget, |
| 105 | enum usb_device_state state) | 109 | enum usb_device_state state) |
| 106 | { | 110 | { |
| 107 | gadget->state = state; | 111 | gadget->state = state; |
| 108 | sysfs_notify(&gadget->dev.kobj, NULL, "status"); | 112 | sysfs_notify(&gadget->dev.kobj, NULL, "state"); |
| 109 | } | 113 | } |
| 110 | EXPORT_SYMBOL_GPL(usb_gadget_set_state); | 114 | EXPORT_SYMBOL_GPL(usb_gadget_set_state); |
| 111 | 115 | ||
| @@ -194,9 +198,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, | |||
| 194 | dev_set_name(&gadget->dev, "gadget"); | 198 | dev_set_name(&gadget->dev, "gadget"); |
| 195 | gadget->dev.parent = parent; | 199 | gadget->dev.parent = parent; |
| 196 | 200 | ||
| 201 | #ifdef CONFIG_HAS_DMA | ||
| 197 | dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask); | 202 | dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask); |
| 198 | gadget->dev.dma_parms = parent->dma_parms; | 203 | gadget->dev.dma_parms = parent->dma_parms; |
| 199 | gadget->dev.dma_mask = parent->dma_mask; | 204 | gadget->dev.dma_mask = parent->dma_mask; |
| 205 | #endif | ||
| 200 | 206 | ||
| 201 | if (release) | 207 | if (release) |
| 202 | gadget->dev.release = release; | 208 | gadget->dev.release = release; |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 2b702772d04d..6dce37555c4f 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
| @@ -874,6 +874,7 @@ static int ehci_hub_control ( | |||
| 874 | ehci->reset_done[wIndex] = jiffies | 874 | ehci->reset_done[wIndex] = jiffies |
| 875 | + msecs_to_jiffies(20); | 875 | + msecs_to_jiffies(20); |
| 876 | usb_hcd_start_port_resume(&hcd->self, wIndex); | 876 | usb_hcd_start_port_resume(&hcd->self, wIndex); |
| 877 | set_bit(wIndex, &ehci->resuming_ports); | ||
| 877 | /* check the port again */ | 878 | /* check the port again */ |
| 878 | mod_timer(&ehci_to_hcd(ehci)->rh_timer, | 879 | mod_timer(&ehci_to_hcd(ehci)->rh_timer, |
| 879 | ehci->reset_done[wIndex]); | 880 | ehci->reset_done[wIndex]); |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index f80d0330d548..8e3c878f38cf 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
| @@ -1391,21 +1391,20 @@ iso_stream_schedule ( | |||
| 1391 | 1391 | ||
| 1392 | /* Behind the scheduling threshold? */ | 1392 | /* Behind the scheduling threshold? */ |
| 1393 | if (unlikely(start < next)) { | 1393 | if (unlikely(start < next)) { |
| 1394 | unsigned now2 = (now - base) & (mod - 1); | ||
| 1394 | 1395 | ||
| 1395 | /* USB_ISO_ASAP: Round up to the first available slot */ | 1396 | /* USB_ISO_ASAP: Round up to the first available slot */ |
| 1396 | if (urb->transfer_flags & URB_ISO_ASAP) | 1397 | if (urb->transfer_flags & URB_ISO_ASAP) |
| 1397 | start += (next - start + period - 1) & -period; | 1398 | start += (next - start + period - 1) & -period; |
| 1398 | 1399 | ||
| 1399 | /* | 1400 | /* |
| 1400 | * Not ASAP: Use the next slot in the stream. If | 1401 | * Not ASAP: Use the next slot in the stream, |
| 1401 | * the entire URB falls before the threshold, fail. | 1402 | * no matter what. |
| 1402 | */ | 1403 | */ |
| 1403 | else if (start + span - period < next) { | 1404 | else if (start + span - period < now2) { |
| 1404 | ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n", | 1405 | ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n", |
| 1405 | urb, start + base, | 1406 | urb, start + base, |
| 1406 | span - period, next + base); | 1407 | span - period, now2 + base); |
| 1407 | status = -EXDEV; | ||
| 1408 | goto fail; | ||
| 1409 | } | 1408 | } |
| 1410 | } | 1409 | } |
| 1411 | 1410 | ||
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 08613e241894..279b04910f00 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c | |||
| @@ -304,6 +304,13 @@ static int __init ohci_pci_init(void) | |||
| 304 | pr_info("%s: " DRIVER_DESC "\n", hcd_name); | 304 | pr_info("%s: " DRIVER_DESC "\n", hcd_name); |
| 305 | 305 | ||
| 306 | ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); | 306 | ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); |
| 307 | |||
| 308 | #ifdef CONFIG_PM | ||
| 309 | /* Entries for the PCI suspend/resume callbacks are special */ | ||
| 310 | ohci_pci_hc_driver.pci_suspend = ohci_suspend; | ||
| 311 | ohci_pci_hc_driver.pci_resume = ohci_resume; | ||
| 312 | #endif | ||
| 313 | |||
| 307 | return pci_register_driver(&ohci_pci_driver); | 314 | return pci_register_driver(&ohci_pci_driver); |
| 308 | } | 315 | } |
| 309 | module_init(ohci_pci_init); | 316 | module_init(ohci_pci_init); |
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index 4b8a2092432f..978c849f9c9a 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h | |||
| @@ -13,6 +13,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev); | |||
| 13 | void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); | 13 | void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); |
| 14 | void sb800_prefetch(struct device *dev, int on); | 14 | void sb800_prefetch(struct device *dev, int on); |
| 15 | #else | 15 | #else |
| 16 | struct pci_dev; | ||
| 16 | static inline void usb_amd_quirk_pll_disable(void) {} | 17 | static inline void usb_amd_quirk_pll_disable(void) {} |
| 17 | static inline void usb_amd_quirk_pll_enable(void) {} | 18 | static inline void usb_amd_quirk_pll_enable(void) {} |
| 18 | static inline void usb_amd_dev_put(void) {} | 19 | static inline void usb_amd_dev_put(void) {} |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index df6978abd7e6..6f8c2fd47675 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/pci.h> | 24 | #include <linux/pci.h> |
| 25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 26 | #include <linux/dmapool.h> | 26 | #include <linux/dmapool.h> |
| 27 | #include <linux/dma-mapping.h> | ||
| 27 | 28 | ||
| 28 | #include "xhci.h" | 29 | #include "xhci.h" |
| 29 | 30 | ||
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index cc24e39b97d5..f00cb203faea 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
| @@ -93,7 +93,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
| 93 | } | 93 | } |
| 94 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 94 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
| 95 | pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { | 95 | pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { |
| 96 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; | ||
| 97 | xhci->quirks |= XHCI_EP_LIMIT_QUIRK; | 96 | xhci->quirks |= XHCI_EP_LIMIT_QUIRK; |
| 98 | xhci->limit_active_eps = 64; | 97 | xhci->limit_active_eps = 64; |
| 99 | xhci->quirks |= XHCI_SW_BW_CHECKING; | 98 | xhci->quirks |= XHCI_SW_BW_CHECKING; |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1e57eafa6910..5b08cd85f8e7 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -434,7 +434,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, | |||
| 434 | 434 | ||
| 435 | /* A ring has pending URBs if its TD list is not empty */ | 435 | /* A ring has pending URBs if its TD list is not empty */ |
| 436 | if (!(ep->ep_state & EP_HAS_STREAMS)) { | 436 | if (!(ep->ep_state & EP_HAS_STREAMS)) { |
| 437 | if (!(list_empty(&ep->ring->td_list))) | 437 | if (ep->ring && !(list_empty(&ep->ring->td_list))) |
| 438 | xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); | 438 | xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); |
| 439 | return; | 439 | return; |
| 440 | } | 440 | } |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2c49f00260ca..9478caa2f71f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/dmi.h> | 29 | #include <linux/dmi.h> |
| 30 | #include <linux/dma-mapping.h> | ||
| 30 | 31 | ||
| 31 | #include "xhci.h" | 32 | #include "xhci.h" |
| 32 | 33 | ||
| @@ -329,7 +330,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci) | |||
| 329 | return; | 330 | return; |
| 330 | } | 331 | } |
| 331 | 332 | ||
| 332 | static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) | 333 | static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) |
| 333 | { | 334 | { |
| 334 | int i; | 335 | int i; |
| 335 | 336 | ||
| @@ -1181,9 +1182,6 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | |||
| 1181 | } | 1182 | } |
| 1182 | 1183 | ||
| 1183 | xhci = hcd_to_xhci(hcd); | 1184 | xhci = hcd_to_xhci(hcd); |
| 1184 | if (xhci->xhc_state & XHCI_STATE_HALTED) | ||
| 1185 | return -ENODEV; | ||
| 1186 | |||
| 1187 | if (check_virt_dev) { | 1185 | if (check_virt_dev) { |
| 1188 | if (!udev->slot_id || !xhci->devs[udev->slot_id]) { | 1186 | if (!udev->slot_id || !xhci->devs[udev->slot_id]) { |
| 1189 | printk(KERN_DEBUG "xHCI %s called with unaddressed " | 1187 | printk(KERN_DEBUG "xHCI %s called with unaddressed " |
| @@ -1199,6 +1197,9 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | |||
| 1199 | } | 1197 | } |
| 1200 | } | 1198 | } |
| 1201 | 1199 | ||
| 1200 | if (xhci->xhc_state & XHCI_STATE_HALTED) | ||
| 1201 | return -ENODEV; | ||
| 1202 | |||
| 1202 | return 1; | 1203 | return 1; |
| 1203 | } | 1204 | } |
| 1204 | 1205 | ||
| @@ -3898,7 +3899,7 @@ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) | |||
| 3898 | * Issue an Evaluate Context command to change the Maximum Exit Latency in the | 3899 | * Issue an Evaluate Context command to change the Maximum Exit Latency in the |
| 3899 | * slot context. If that succeeds, store the new MEL in the xhci_virt_device. | 3900 | * slot context. If that succeeds, store the new MEL in the xhci_virt_device. |
| 3900 | */ | 3901 | */ |
| 3901 | static int xhci_change_max_exit_latency(struct xhci_hcd *xhci, | 3902 | static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, |
| 3902 | struct usb_device *udev, u16 max_exit_latency) | 3903 | struct usb_device *udev, u16 max_exit_latency) |
| 3903 | { | 3904 | { |
| 3904 | struct xhci_virt_device *virt_dev; | 3905 | struct xhci_virt_device *virt_dev; |
| @@ -4892,6 +4893,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
| 4892 | 4893 | ||
| 4893 | get_quirks(dev, xhci); | 4894 | get_quirks(dev, xhci); |
| 4894 | 4895 | ||
| 4896 | /* In xhci controllers which follow xhci 1.0 spec gives a spurious | ||
| 4897 | * success event after a short transfer. This quirk will ignore such | ||
| 4898 | * spurious event. | ||
| 4899 | */ | ||
| 4900 | if (xhci->hci_version > 0x96) | ||
| 4901 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; | ||
| 4902 | |||
| 4895 | /* Make sure the HC is halted. */ | 4903 | /* Make sure the HC is halted. */ |
| 4896 | retval = xhci_halt(xhci); | 4904 | retval = xhci_halt(xhci); |
| 4897 | if (retval) | 4905 | if (retval) |
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index eb3c8c142fa9..eeb27208c0d1 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c | |||
| @@ -830,7 +830,7 @@ static int adu_probe(struct usb_interface *interface, | |||
| 830 | 830 | ||
| 831 | /* let the user know what node this device is now attached to */ | 831 | /* let the user know what node this device is now attached to */ |
| 832 | dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", | 832 | dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", |
| 833 | udev->descriptor.idProduct, dev->serial_number, | 833 | le16_to_cpu(udev->descriptor.idProduct), dev->serial_number, |
| 834 | (dev->minor - ADU_MINOR_BASE)); | 834 | (dev->minor - ADU_MINOR_BASE)); |
| 835 | exit: | 835 | exit: |
| 836 | dbg(2, " %s : leave, return value %p (dev)", __func__, dev); | 836 | dbg(2, " %s : leave, return value %p (dev)", __func__, dev); |
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index c21386ec5d35..de98906f786d 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c | |||
| @@ -3247,6 +3247,7 @@ static const struct usb_device_id sisusb_table[] = { | |||
| 3247 | { USB_DEVICE(0x0711, 0x0903) }, | 3247 | { USB_DEVICE(0x0711, 0x0903) }, |
| 3248 | { USB_DEVICE(0x0711, 0x0918) }, | 3248 | { USB_DEVICE(0x0711, 0x0918) }, |
| 3249 | { USB_DEVICE(0x0711, 0x0920) }, | 3249 | { USB_DEVICE(0x0711, 0x0920) }, |
| 3250 | { USB_DEVICE(0x0711, 0x0950) }, | ||
| 3250 | { USB_DEVICE(0x182d, 0x021c) }, | 3251 | { USB_DEVICE(0x182d, 0x021c) }, |
| 3251 | { USB_DEVICE(0x182d, 0x0269) }, | 3252 | { USB_DEVICE(0x182d, 0x0269) }, |
| 3252 | { } | 3253 | { } |
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 6708a3b78ad8..f44e8b5e00c9 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c | |||
| @@ -481,7 +481,7 @@ static u64 omap2430_dmamask = DMA_BIT_MASK(32); | |||
| 481 | 481 | ||
| 482 | static int omap2430_probe(struct platform_device *pdev) | 482 | static int omap2430_probe(struct platform_device *pdev) |
| 483 | { | 483 | { |
| 484 | struct resource musb_resources[2]; | 484 | struct resource musb_resources[3]; |
| 485 | struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; | 485 | struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; |
| 486 | struct omap_musb_board_data *data; | 486 | struct omap_musb_board_data *data; |
| 487 | struct platform_device *musb; | 487 | struct platform_device *musb; |
| @@ -581,6 +581,11 @@ static int omap2430_probe(struct platform_device *pdev) | |||
| 581 | musb_resources[1].end = pdev->resource[1].end; | 581 | musb_resources[1].end = pdev->resource[1].end; |
| 582 | musb_resources[1].flags = pdev->resource[1].flags; | 582 | musb_resources[1].flags = pdev->resource[1].flags; |
| 583 | 583 | ||
| 584 | musb_resources[2].name = pdev->resource[2].name; | ||
| 585 | musb_resources[2].start = pdev->resource[2].start; | ||
| 586 | musb_resources[2].end = pdev->resource[2].end; | ||
| 587 | musb_resources[2].flags = pdev->resource[2].flags; | ||
| 588 | |||
| 584 | ret = platform_device_add_resources(musb, musb_resources, | 589 | ret = platform_device_add_resources(musb, musb_resources, |
| 585 | ARRAY_SIZE(musb_resources)); | 590 | ARRAY_SIZE(musb_resources)); |
| 586 | if (ret) { | 591 | if (ret) { |
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 2c06a8969a9f..6f8a9ca96ae7 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c | |||
| @@ -1156,7 +1156,7 @@ static u64 tusb_dmamask = DMA_BIT_MASK(32); | |||
| 1156 | 1156 | ||
| 1157 | static int tusb_probe(struct platform_device *pdev) | 1157 | static int tusb_probe(struct platform_device *pdev) |
| 1158 | { | 1158 | { |
| 1159 | struct resource musb_resources[2]; | 1159 | struct resource musb_resources[3]; |
| 1160 | struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; | 1160 | struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; |
| 1161 | struct platform_device *musb; | 1161 | struct platform_device *musb; |
| 1162 | struct tusb6010_glue *glue; | 1162 | struct tusb6010_glue *glue; |
| @@ -1199,6 +1199,11 @@ static int tusb_probe(struct platform_device *pdev) | |||
| 1199 | musb_resources[1].end = pdev->resource[1].end; | 1199 | musb_resources[1].end = pdev->resource[1].end; |
| 1200 | musb_resources[1].flags = pdev->resource[1].flags; | 1200 | musb_resources[1].flags = pdev->resource[1].flags; |
| 1201 | 1201 | ||
| 1202 | musb_resources[2].name = pdev->resource[2].name; | ||
| 1203 | musb_resources[2].start = pdev->resource[2].start; | ||
| 1204 | musb_resources[2].end = pdev->resource[2].end; | ||
| 1205 | musb_resources[2].flags = pdev->resource[2].flags; | ||
| 1206 | |||
| 1202 | ret = platform_device_add_resources(musb, musb_resources, | 1207 | ret = platform_device_add_resources(musb, musb_resources, |
| 1203 | ARRAY_SIZE(musb_resources)); | 1208 | ARRAY_SIZE(musb_resources)); |
| 1204 | if (ret) { | 1209 | if (ret) { |
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h index ca266280895d..e1859b8ef567 100644 --- a/drivers/usb/phy/phy-fsl-usb.h +++ b/drivers/usb/phy/phy-fsl-usb.h | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | * 675 Mass Ave, Cambridge, MA 02139, USA. | 15 | * 675 Mass Ave, Cambridge, MA 02139, USA. |
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #include "otg_fsm.h" | 18 | #include "phy-fsm-usb.h" |
| 19 | #include <linux/usb/otg.h> | 19 | #include <linux/usb/otg.h> |
| 20 | #include <linux/ioctl.h> | 20 | #include <linux/ioctl.h> |
| 21 | 21 | ||
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c index c520b3548e7c..7f4596606e18 100644 --- a/drivers/usb/phy/phy-fsm-usb.c +++ b/drivers/usb/phy/phy-fsm-usb.c | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | #include <linux/usb/gadget.h> | 29 | #include <linux/usb/gadget.h> |
| 30 | #include <linux/usb/otg.h> | 30 | #include <linux/usb/otg.h> |
| 31 | 31 | ||
| 32 | #include "phy-otg-fsm.h" | 32 | #include "phy-fsm-usb.h" |
| 33 | 33 | ||
| 34 | /* Change USB protocol when there is a protocol change */ | 34 | /* Change USB protocol when there is a protocol change */ |
| 35 | static int otg_set_protocol(struct otg_fsm *fsm, int protocol) | 35 | static int otg_set_protocol(struct otg_fsm *fsm, int protocol) |
diff --git a/drivers/usb/phy/phy-omap-usb3.c b/drivers/usb/phy/phy-omap-usb3.c index efe6e1464f45..a2fb30bbb971 100644 --- a/drivers/usb/phy/phy-omap-usb3.c +++ b/drivers/usb/phy/phy-omap-usb3.c | |||
| @@ -71,9 +71,9 @@ static struct usb_dpll_params omap_usb3_dpll_params[NUM_SYS_CLKS] = { | |||
| 71 | {1250, 5, 4, 20, 0}, /* 12 MHz */ | 71 | {1250, 5, 4, 20, 0}, /* 12 MHz */ |
| 72 | {3125, 20, 4, 20, 0}, /* 16.8 MHz */ | 72 | {3125, 20, 4, 20, 0}, /* 16.8 MHz */ |
| 73 | {1172, 8, 4, 20, 65537}, /* 19.2 MHz */ | 73 | {1172, 8, 4, 20, 65537}, /* 19.2 MHz */ |
| 74 | {1000, 7, 4, 10, 0}, /* 20 MHz */ | ||
| 74 | {1250, 12, 4, 20, 0}, /* 26 MHz */ | 75 | {1250, 12, 4, 20, 0}, /* 26 MHz */ |
| 75 | {3125, 47, 4, 20, 92843}, /* 38.4 MHz */ | 76 | {3125, 47, 4, 20, 92843}, /* 38.4 MHz */ |
| 76 | {1000, 7, 4, 10, 0}, /* 20 MHz */ | ||
| 77 | 77 | ||
| 78 | }; | 78 | }; |
| 79 | 79 | ||
diff --git a/drivers/usb/phy/phy-samsung-usb2.c b/drivers/usb/phy/phy-samsung-usb2.c index 1011c16ade7e..758b86d0fcb3 100644 --- a/drivers/usb/phy/phy-samsung-usb2.c +++ b/drivers/usb/phy/phy-samsung-usb2.c | |||
| @@ -388,7 +388,7 @@ static int samsung_usb2phy_probe(struct platform_device *pdev) | |||
| 388 | clk = devm_clk_get(dev, "otg"); | 388 | clk = devm_clk_get(dev, "otg"); |
| 389 | 389 | ||
| 390 | if (IS_ERR(clk)) { | 390 | if (IS_ERR(clk)) { |
| 391 | dev_err(dev, "Failed to get otg clock\n"); | 391 | dev_err(dev, "Failed to get usbhost/otg clock\n"); |
| 392 | return PTR_ERR(clk); | 392 | return PTR_ERR(clk); |
| 393 | } | 393 | } |
| 394 | 394 | ||
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index ed4949faa70d..805940c37353 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c | |||
| @@ -855,10 +855,6 @@ static int usbhsg_gadget_stop(struct usb_gadget *gadget, | |||
| 855 | struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); | 855 | struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); |
| 856 | struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); | 856 | struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); |
| 857 | 857 | ||
| 858 | if (!driver || | ||
| 859 | !driver->unbind) | ||
| 860 | return -EINVAL; | ||
| 861 | |||
| 862 | usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD); | 858 | usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD); |
| 863 | gpriv->driver = NULL; | 859 | gpriv->driver = NULL; |
| 864 | 860 | ||
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index 8c3a42ea910c..7eef9b33fde6 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig | |||
| @@ -719,6 +719,13 @@ config USB_SERIAL_FLASHLOADER | |||
| 719 | To compile this driver as a module, choose M here: the | 719 | To compile this driver as a module, choose M here: the |
| 720 | module will be called flashloader. | 720 | module will be called flashloader. |
| 721 | 721 | ||
| 722 | config USB_SERIAL_SUUNTO | ||
| 723 | tristate "USB Suunto ANT+ driver" | ||
| 724 | help | ||
| 725 | Say Y here if you want to use the Suunto ANT+ USB device. | ||
| 726 | |||
| 727 | To compile this driver as a module, choose M here: the | ||
| 728 | module will be called suunto. | ||
| 722 | 729 | ||
| 723 | config USB_SERIAL_DEBUG | 730 | config USB_SERIAL_DEBUG |
| 724 | tristate "USB Debugging Device" | 731 | tristate "USB Debugging Device" |
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile index f7130114488f..a14a870d993f 100644 --- a/drivers/usb/serial/Makefile +++ b/drivers/usb/serial/Makefile | |||
| @@ -54,6 +54,7 @@ obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o | |||
| 54 | obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o | 54 | obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o |
| 55 | obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o | 55 | obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o |
| 56 | obj-$(CONFIG_USB_SERIAL_SSU100) += ssu100.o | 56 | obj-$(CONFIG_USB_SERIAL_SSU100) += ssu100.o |
| 57 | obj-$(CONFIG_USB_SERIAL_SUUNTO) += suunto.o | ||
| 57 | obj-$(CONFIG_USB_SERIAL_SYMBOL) += symbolserial.o | 58 | obj-$(CONFIG_USB_SERIAL_SYMBOL) += symbolserial.o |
| 58 | obj-$(CONFIG_USB_SERIAL_WWAN) += usb_wwan.o | 59 | obj-$(CONFIG_USB_SERIAL_WWAN) += usb_wwan.o |
| 59 | obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o | 60 | obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index d6ef2f8da37d..0eae4ba3760e 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
| @@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = { | |||
| 53 | { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ | 53 | { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ |
| 54 | { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ | 54 | { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ |
| 55 | { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ | 55 | { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ |
| 56 | { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */ | ||
| 56 | { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ | 57 | { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ |
| 57 | { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ | 58 | { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ |
| 58 | { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ | 59 | { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ |
| @@ -118,6 +119,8 @@ static const struct usb_device_id id_table[] = { | |||
| 118 | { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ | 119 | { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ |
| 119 | { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ | 120 | { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ |
| 120 | { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ | 121 | { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ |
| 122 | { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ | ||
| 123 | { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ | ||
| 121 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ | 124 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
| 122 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ | 125 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
| 123 | { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ | 126 | { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ |
| @@ -148,6 +151,7 @@ static const struct usb_device_id id_table[] = { | |||
| 148 | { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ | 151 | { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ |
| 149 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ | 152 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ |
| 150 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ | 153 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ |
| 154 | { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ | ||
| 151 | { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ | 155 | { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ |
| 152 | { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ | 156 | { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ |
| 153 | { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ | 157 | { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 7260ec660347..b65e657c641d 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -735,9 +735,34 @@ static struct usb_device_id id_table_combined [] = { | |||
| 735 | { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), | 735 | { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), |
| 736 | .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, | 736 | .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, |
| 737 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, | 737 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, |
| 738 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, | 738 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, |
| 739 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, | 739 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, |
| 740 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) }, | 740 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, |
| 741 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57B_PID) }, | ||
| 742 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29A_PID) }, | ||
| 743 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29B_PID) }, | ||
| 744 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29F_PID) }, | ||
| 745 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62B_PID) }, | ||
| 746 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S01_PID) }, | ||
| 747 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63_PID) }, | ||
| 748 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29C_PID) }, | ||
| 749 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_81B_PID) }, | ||
| 750 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_82B_PID) }, | ||
| 751 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5D_PID) }, | ||
| 752 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K4Y_PID) }, | ||
| 753 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5G_PID) }, | ||
| 754 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S05_PID) }, | ||
| 755 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_60_PID) }, | ||
| 756 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_61_PID) }, | ||
| 757 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62_PID) }, | ||
| 758 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63B_PID) }, | ||
| 759 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_64_PID) }, | ||
| 760 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_65_PID) }, | ||
| 761 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92_PID) }, | ||
| 762 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92D_PID) }, | ||
| 763 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_W5R_PID) }, | ||
| 764 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_A5R_PID) }, | ||
| 765 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_PW1_PID) }, | ||
| 741 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, | 766 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, |
| 742 | { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, | 767 | { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, |
| 743 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, | 768 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 6dd79253205d..1b8af461b522 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
| @@ -815,11 +815,35 @@ | |||
| 815 | /* | 815 | /* |
| 816 | * RT Systems programming cables for various ham radios | 816 | * RT Systems programming cables for various ham radios |
| 817 | */ | 817 | */ |
| 818 | #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ | 818 | #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ |
| 819 | #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ | 819 | #define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */ |
| 820 | #define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ | 820 | #define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */ |
| 821 | #define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */ | 821 | #define RTSYSTEMS_USB_57A_PID 0x9e51 /* USB-57A USB to 4pin 3.5mm plug */ |
| 822 | 822 | #define RTSYSTEMS_USB_57B_PID 0x9e52 /* USB-57B USB to extended 4pin 3.5mm plug */ | |
| 823 | #define RTSYSTEMS_USB_29A_PID 0x9e53 /* USB-29A USB to 3.5mm stereo plug */ | ||
| 824 | #define RTSYSTEMS_USB_29B_PID 0x9e54 /* USB-29B USB to 6 pin mini din */ | ||
| 825 | #define RTSYSTEMS_USB_29F_PID 0x9e55 /* USB-29F USB to 6 pin modular plug */ | ||
| 826 | #define RTSYSTEMS_USB_62B_PID 0x9e56 /* USB-62B USB to 8 pin mini din plug*/ | ||
| 827 | #define RTSYSTEMS_USB_S01_PID 0x9e57 /* USB-RTS01 USB to 3.5 mm stereo plug*/ | ||
| 828 | #define RTSYSTEMS_USB_63_PID 0x9e58 /* USB-63 USB to 9 pin female*/ | ||
| 829 | #define RTSYSTEMS_USB_29C_PID 0x9e59 /* USB-29C USB to 4 pin modular plug*/ | ||
| 830 | #define RTSYSTEMS_USB_81B_PID 0x9e5A /* USB-81 USB to 8 pin mini din plug*/ | ||
| 831 | #define RTSYSTEMS_USB_82B_PID 0x9e5B /* USB-82 USB to 2.5 mm stereo plug*/ | ||
| 832 | #define RTSYSTEMS_USB_K5D_PID 0x9e5C /* USB-K5D USB to 8 pin modular plug*/ | ||
| 833 | #define RTSYSTEMS_USB_K4Y_PID 0x9e5D /* USB-K4Y USB to 2.5/3.5 mm plugs*/ | ||
| 834 | #define RTSYSTEMS_USB_K5G_PID 0x9e5E /* USB-K5G USB to 8 pin modular plug*/ | ||
| 835 | #define RTSYSTEMS_USB_S05_PID 0x9e5F /* USB-RTS05 USB to 2.5 mm stereo plug*/ | ||
| 836 | #define RTSYSTEMS_USB_60_PID 0x9e60 /* USB-60 USB to 6 pin din*/ | ||
| 837 | #define RTSYSTEMS_USB_61_PID 0x9e61 /* USB-61 USB to 6 pin mini din*/ | ||
| 838 | #define RTSYSTEMS_USB_62_PID 0x9e62 /* USB-62 USB to 8 pin mini din*/ | ||
| 839 | #define RTSYSTEMS_USB_63B_PID 0x9e63 /* USB-63 USB to 9 pin female*/ | ||
| 840 | #define RTSYSTEMS_USB_64_PID 0x9e64 /* USB-64 USB to 9 pin male*/ | ||
| 841 | #define RTSYSTEMS_USB_65_PID 0x9e65 /* USB-65 USB to 9 pin female null modem*/ | ||
| 842 | #define RTSYSTEMS_USB_92_PID 0x9e66 /* USB-92 USB to 12 pin plug*/ | ||
| 843 | #define RTSYSTEMS_USB_92D_PID 0x9e67 /* USB-92D USB to 12 pin plug data*/ | ||
| 844 | #define RTSYSTEMS_USB_W5R_PID 0x9e68 /* USB-W5R USB to 8 pin modular plug*/ | ||
| 845 | #define RTSYSTEMS_USB_A5R_PID 0x9e69 /* USB-A5R USB to 8 pin modular plug*/ | ||
| 846 | #define RTSYSTEMS_USB_PW1_PID 0x9e6A /* USB-PW1 USB to 8 pin modular plug*/ | ||
| 823 | 847 | ||
| 824 | /* | 848 | /* |
| 825 | * Physik Instrumente | 849 | * Physik Instrumente |
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 5a979729f8ec..58c17fdc85eb 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c | |||
| @@ -2303,7 +2303,7 @@ static int keyspan_startup(struct usb_serial *serial) | |||
| 2303 | if (d_details == NULL) { | 2303 | if (d_details == NULL) { |
| 2304 | dev_err(&serial->dev->dev, "%s - unknown product id %x\n", | 2304 | dev_err(&serial->dev->dev, "%s - unknown product id %x\n", |
| 2305 | __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); | 2305 | __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); |
| 2306 | return 1; | 2306 | return -ENODEV; |
| 2307 | } | 2307 | } |
| 2308 | 2308 | ||
| 2309 | /* Setup private data for serial driver */ | 2309 | /* Setup private data for serial driver */ |
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 51da424327b0..b01300164fc0 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c | |||
| @@ -90,6 +90,7 @@ struct urbtracker { | |||
| 90 | struct list_head urblist_entry; | 90 | struct list_head urblist_entry; |
| 91 | struct kref ref_count; | 91 | struct kref ref_count; |
| 92 | struct urb *urb; | 92 | struct urb *urb; |
| 93 | struct usb_ctrlrequest *setup; | ||
| 93 | }; | 94 | }; |
| 94 | 95 | ||
| 95 | enum mos7715_pp_modes { | 96 | enum mos7715_pp_modes { |
| @@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref) | |||
| 271 | struct mos7715_parport *mos_parport = urbtrack->mos_parport; | 272 | struct mos7715_parport *mos_parport = urbtrack->mos_parport; |
| 272 | 273 | ||
| 273 | usb_free_urb(urbtrack->urb); | 274 | usb_free_urb(urbtrack->urb); |
| 275 | kfree(urbtrack->setup); | ||
| 274 | kfree(urbtrack); | 276 | kfree(urbtrack); |
| 275 | kref_put(&mos_parport->ref_count, destroy_mos_parport); | 277 | kref_put(&mos_parport->ref_count, destroy_mos_parport); |
| 276 | } | 278 | } |
| @@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, | |||
| 355 | struct urbtracker *urbtrack; | 357 | struct urbtracker *urbtrack; |
| 356 | int ret_val; | 358 | int ret_val; |
| 357 | unsigned long flags; | 359 | unsigned long flags; |
| 358 | struct usb_ctrlrequest setup; | ||
| 359 | struct usb_serial *serial = mos_parport->serial; | 360 | struct usb_serial *serial = mos_parport->serial; |
| 360 | struct usb_device *usbdev = serial->dev; | 361 | struct usb_device *usbdev = serial->dev; |
| 361 | 362 | ||
| @@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, | |||
| 373 | kfree(urbtrack); | 374 | kfree(urbtrack); |
| 374 | return -ENOMEM; | 375 | return -ENOMEM; |
| 375 | } | 376 | } |
| 376 | setup.bRequestType = (__u8)0x40; | 377 | urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL); |
| 377 | setup.bRequest = (__u8)0x0e; | 378 | if (!urbtrack->setup) { |
| 378 | setup.wValue = get_reg_value(reg, dummy); | 379 | usb_free_urb(urbtrack->urb); |
| 379 | setup.wIndex = get_reg_index(reg); | 380 | kfree(urbtrack); |
| 380 | setup.wLength = 0; | 381 | return -ENOMEM; |
| 382 | } | ||
| 383 | urbtrack->setup->bRequestType = (__u8)0x40; | ||
| 384 | urbtrack->setup->bRequest = (__u8)0x0e; | ||
| 385 | urbtrack->setup->wValue = get_reg_value(reg, dummy); | ||
| 386 | urbtrack->setup->wIndex = get_reg_index(reg); | ||
| 387 | urbtrack->setup->wLength = 0; | ||
| 381 | usb_fill_control_urb(urbtrack->urb, usbdev, | 388 | usb_fill_control_urb(urbtrack->urb, usbdev, |
| 382 | usb_sndctrlpipe(usbdev, 0), | 389 | usb_sndctrlpipe(usbdev, 0), |
| 383 | (unsigned char *)&setup, | 390 | (unsigned char *)urbtrack->setup, |
| 384 | NULL, 0, async_complete, urbtrack); | 391 | NULL, 0, async_complete, urbtrack); |
| 385 | kref_init(&urbtrack->ref_count); | 392 | kref_init(&urbtrack->ref_count); |
| 386 | INIT_LIST_HEAD(&urbtrack->urblist_entry); | 393 | INIT_LIST_HEAD(&urbtrack->urblist_entry); |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 0a818b238508..3bac4693c038 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
| @@ -183,7 +183,10 @@ | |||
| 183 | #define LED_ON_MS 500 | 183 | #define LED_ON_MS 500 |
| 184 | #define LED_OFF_MS 500 | 184 | #define LED_OFF_MS 500 |
| 185 | 185 | ||
| 186 | static int device_type; | 186 | enum mos7840_flag { |
| 187 | MOS7840_FLAG_CTRL_BUSY, | ||
| 188 | MOS7840_FLAG_LED_BUSY, | ||
| 189 | }; | ||
| 187 | 190 | ||
| 188 | static const struct usb_device_id id_table[] = { | 191 | static const struct usb_device_id id_table[] = { |
| 189 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 192 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
| @@ -238,9 +241,12 @@ struct moschip_port { | |||
| 238 | 241 | ||
| 239 | /* For device(s) with LED indicator */ | 242 | /* For device(s) with LED indicator */ |
| 240 | bool has_led; | 243 | bool has_led; |
| 241 | bool led_flag; | ||
| 242 | struct timer_list led_timer1; /* Timer for LED on */ | 244 | struct timer_list led_timer1; /* Timer for LED on */ |
| 243 | struct timer_list led_timer2; /* Timer for LED off */ | 245 | struct timer_list led_timer2; /* Timer for LED off */ |
| 246 | struct urb *led_urb; | ||
| 247 | struct usb_ctrlrequest *led_dr; | ||
| 248 | |||
| 249 | unsigned long flags; | ||
| 244 | }; | 250 | }; |
| 245 | 251 | ||
| 246 | /* | 252 | /* |
| @@ -460,10 +466,10 @@ static void mos7840_control_callback(struct urb *urb) | |||
| 460 | case -ESHUTDOWN: | 466 | case -ESHUTDOWN: |
| 461 | /* this urb is terminated, clean up */ | 467 | /* this urb is terminated, clean up */ |
| 462 | dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status); | 468 | dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status); |
| 463 | return; | 469 | goto out; |
| 464 | default: | 470 | default: |
| 465 | dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status); | 471 | dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status); |
| 466 | return; | 472 | goto out; |
| 467 | } | 473 | } |
| 468 | 474 | ||
| 469 | dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); | 475 | dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); |
| @@ -476,6 +482,8 @@ static void mos7840_control_callback(struct urb *urb) | |||
| 476 | mos7840_handle_new_msr(mos7840_port, regval); | 482 | mos7840_handle_new_msr(mos7840_port, regval); |
| 477 | else if (mos7840_port->MsrLsr == 1) | 483 | else if (mos7840_port->MsrLsr == 1) |
| 478 | mos7840_handle_new_lsr(mos7840_port, regval); | 484 | mos7840_handle_new_lsr(mos7840_port, regval); |
| 485 | out: | ||
| 486 | clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags); | ||
| 479 | } | 487 | } |
| 480 | 488 | ||
| 481 | static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, | 489 | static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, |
| @@ -486,6 +494,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, | |||
| 486 | unsigned char *buffer = mcs->ctrl_buf; | 494 | unsigned char *buffer = mcs->ctrl_buf; |
| 487 | int ret; | 495 | int ret; |
| 488 | 496 | ||
| 497 | if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags)) | ||
| 498 | return -EBUSY; | ||
| 499 | |||
| 489 | dr->bRequestType = MCS_RD_RTYPE; | 500 | dr->bRequestType = MCS_RD_RTYPE; |
| 490 | dr->bRequest = MCS_RDREQ; | 501 | dr->bRequest = MCS_RDREQ; |
| 491 | dr->wValue = cpu_to_le16(Wval); /* 0 */ | 502 | dr->wValue = cpu_to_le16(Wval); /* 0 */ |
| @@ -497,6 +508,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, | |||
| 497 | mos7840_control_callback, mcs); | 508 | mos7840_control_callback, mcs); |
| 498 | mcs->control_urb->transfer_buffer_length = 2; | 509 | mcs->control_urb->transfer_buffer_length = 2; |
| 499 | ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC); | 510 | ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC); |
| 511 | if (ret) | ||
| 512 | clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags); | ||
| 513 | |||
| 500 | return ret; | 514 | return ret; |
| 501 | } | 515 | } |
| 502 | 516 | ||
| @@ -523,7 +537,7 @@ static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval, | |||
| 523 | __u16 reg) | 537 | __u16 reg) |
| 524 | { | 538 | { |
| 525 | struct usb_device *dev = mcs->port->serial->dev; | 539 | struct usb_device *dev = mcs->port->serial->dev; |
| 526 | struct usb_ctrlrequest *dr = mcs->dr; | 540 | struct usb_ctrlrequest *dr = mcs->led_dr; |
| 527 | 541 | ||
| 528 | dr->bRequestType = MCS_WR_RTYPE; | 542 | dr->bRequestType = MCS_WR_RTYPE; |
| 529 | dr->bRequest = MCS_WRREQ; | 543 | dr->bRequest = MCS_WRREQ; |
| @@ -531,10 +545,10 @@ static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval, | |||
| 531 | dr->wIndex = cpu_to_le16(reg); | 545 | dr->wIndex = cpu_to_le16(reg); |
| 532 | dr->wLength = cpu_to_le16(0); | 546 | dr->wLength = cpu_to_le16(0); |
| 533 | 547 | ||
| 534 | usb_fill_control_urb(mcs->control_urb, dev, usb_sndctrlpipe(dev, 0), | 548 | usb_fill_control_urb(mcs->led_urb, dev, usb_sndctrlpipe(dev, 0), |
| 535 | (unsigned char *)dr, NULL, 0, mos7840_set_led_callback, NULL); | 549 | (unsigned char *)dr, NULL, 0, mos7840_set_led_callback, NULL); |
| 536 | 550 | ||
| 537 | usb_submit_urb(mcs->control_urb, GFP_ATOMIC); | 551 | usb_submit_urb(mcs->led_urb, GFP_ATOMIC); |
| 538 | } | 552 | } |
| 539 | 553 | ||
| 540 | static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg, | 554 | static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg, |
| @@ -560,7 +574,19 @@ static void mos7840_led_flag_off(unsigned long arg) | |||
| 560 | { | 574 | { |
| 561 | struct moschip_port *mcs = (struct moschip_port *) arg; | 575 | struct moschip_port *mcs = (struct moschip_port *) arg; |
| 562 | 576 | ||
| 563 | mcs->led_flag = false; | 577 | clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags); |
| 578 | } | ||
| 579 | |||
| 580 | static void mos7840_led_activity(struct usb_serial_port *port) | ||
| 581 | { | ||
| 582 | struct moschip_port *mos7840_port = usb_get_serial_port_data(port); | ||
| 583 | |||
| 584 | if (test_and_set_bit_lock(MOS7840_FLAG_LED_BUSY, &mos7840_port->flags)) | ||
| 585 | return; | ||
| 586 | |||
| 587 | mos7840_set_led_async(mos7840_port, 0x0301, MODEM_CONTROL_REGISTER); | ||
| 588 | mod_timer(&mos7840_port->led_timer1, | ||
| 589 | jiffies + msecs_to_jiffies(LED_ON_MS)); | ||
| 564 | } | 590 | } |
| 565 | 591 | ||
| 566 | /***************************************************************************** | 592 | /***************************************************************************** |
| @@ -758,14 +784,8 @@ static void mos7840_bulk_in_callback(struct urb *urb) | |||
| 758 | return; | 784 | return; |
| 759 | } | 785 | } |
| 760 | 786 | ||
| 761 | /* Turn on LED */ | 787 | if (mos7840_port->has_led) |
| 762 | if (mos7840_port->has_led && !mos7840_port->led_flag) { | 788 | mos7840_led_activity(port); |
| 763 | mos7840_port->led_flag = true; | ||
| 764 | mos7840_set_led_async(mos7840_port, 0x0301, | ||
| 765 | MODEM_CONTROL_REGISTER); | ||
| 766 | mod_timer(&mos7840_port->led_timer1, | ||
| 767 | jiffies + msecs_to_jiffies(LED_ON_MS)); | ||
| 768 | } | ||
| 769 | 789 | ||
| 770 | mos7840_port->read_urb_busy = true; | 790 | mos7840_port->read_urb_busy = true; |
| 771 | retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC); | 791 | retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC); |
| @@ -816,18 +836,6 @@ static void mos7840_bulk_out_data_callback(struct urb *urb) | |||
| 816 | /************************************************************************/ | 836 | /************************************************************************/ |
| 817 | /* D R I V E R T T Y I N T E R F A C E F U N C T I O N S */ | 837 | /* D R I V E R T T Y I N T E R F A C E F U N C T I O N S */ |
| 818 | /************************************************************************/ | 838 | /************************************************************************/ |
| 819 | #ifdef MCSSerialProbe | ||
| 820 | static int mos7840_serial_probe(struct usb_serial *serial, | ||
| 821 | const struct usb_device_id *id) | ||
| 822 | { | ||
| 823 | |||
| 824 | /*need to implement the mode_reg reading and updating\ | ||
| 825 | structures usb_serial_ device_type\ | ||
| 826 | (i.e num_ports, num_bulkin,bulkout etc) */ | ||
| 827 | /* Also we can update the changes attach */ | ||
| 828 | return 1; | ||
| 829 | } | ||
| 830 | #endif | ||
| 831 | 839 | ||
| 832 | /***************************************************************************** | 840 | /***************************************************************************** |
| 833 | * mos7840_open | 841 | * mos7840_open |
| @@ -905,20 +913,20 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
| 905 | status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); | 913 | status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); |
| 906 | if (status < 0) { | 914 | if (status < 0) { |
| 907 | dev_dbg(&port->dev, "Reading Spreg failed\n"); | 915 | dev_dbg(&port->dev, "Reading Spreg failed\n"); |
| 908 | return -1; | 916 | goto err; |
| 909 | } | 917 | } |
| 910 | Data |= 0x80; | 918 | Data |= 0x80; |
| 911 | status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); | 919 | status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); |
| 912 | if (status < 0) { | 920 | if (status < 0) { |
| 913 | dev_dbg(&port->dev, "writing Spreg failed\n"); | 921 | dev_dbg(&port->dev, "writing Spreg failed\n"); |
| 914 | return -1; | 922 | goto err; |
| 915 | } | 923 | } |
| 916 | 924 | ||
| 917 | Data &= ~0x80; | 925 | Data &= ~0x80; |
| 918 | status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); | 926 | status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); |
| 919 | if (status < 0) { | 927 | if (status < 0) { |
| 920 | dev_dbg(&port->dev, "writing Spreg failed\n"); | 928 | dev_dbg(&port->dev, "writing Spreg failed\n"); |
| 921 | return -1; | 929 | goto err; |
| 922 | } | 930 | } |
| 923 | /* End of block to be checked */ | 931 | /* End of block to be checked */ |
| 924 | 932 | ||
| @@ -927,7 +935,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
| 927 | &Data); | 935 | &Data); |
| 928 | if (status < 0) { | 936 | if (status < 0) { |
| 929 | dev_dbg(&port->dev, "Reading Controlreg failed\n"); | 937 | dev_dbg(&port->dev, "Reading Controlreg failed\n"); |
| 930 | return -1; | 938 | goto err; |
| 931 | } | 939 | } |
| 932 | Data |= 0x08; /* Driver done bit */ | 940 | Data |= 0x08; /* Driver done bit */ |
| 933 | Data |= 0x20; /* rx_disable */ | 941 | Data |= 0x20; /* rx_disable */ |
| @@ -935,7 +943,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
| 935 | mos7840_port->ControlRegOffset, Data); | 943 | mos7840_port->ControlRegOffset, Data); |
| 936 | if (status < 0) { | 944 | if (status < 0) { |
| 937 | dev_dbg(&port->dev, "writing Controlreg failed\n"); | 945 | dev_dbg(&port->dev, "writing Controlreg failed\n"); |
| 938 | return -1; | 946 | goto err; |
| 939 | } | 947 | } |
| 940 | /* do register settings here */ | 948 | /* do register settings here */ |
| 941 | /* Set all regs to the device default values. */ | 949 | /* Set all regs to the device default values. */ |
| @@ -946,21 +954,21 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
| 946 | status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); | 954 | status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); |
| 947 | if (status < 0) { | 955 | if (status < 0) { |
| 948 | dev_dbg(&port->dev, "disabling interrupts failed\n"); | 956 | dev_dbg(&port->dev, "disabling interrupts failed\n"); |
| 949 | return -1; | 957 | goto err; |
| 950 | } | 958 | } |
| 951 | /* Set FIFO_CONTROL_REGISTER to the default value */ | 959 | /* Set FIFO_CONTROL_REGISTER to the default value */ |
| 952 | Data = 0x00; | 960 | Data = 0x00; |
| 953 | status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); | 961 | status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); |
| 954 | if (status < 0) { | 962 | if (status < 0) { |
| 955 | dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n"); | 963 | dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n"); |
| 956 | return -1; | 964 | goto err; |
| 957 | } | 965 | } |
| 958 | 966 | ||
| 959 | Data = 0xcf; | 967 | Data = 0xcf; |
| 960 | status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); | 968 | status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); |
| 961 | if (status < 0) { | 969 | if (status < 0) { |
| 962 | dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n"); | 970 | dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n"); |
| 963 | return -1; | 971 | goto err; |
| 964 | } | 972 | } |
| 965 | 973 | ||
| 966 | Data = 0x03; | 974 | Data = 0x03; |
| @@ -1103,6 +1111,15 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
| 1103 | /* mos7840_change_port_settings(mos7840_port,old_termios); */ | 1111 | /* mos7840_change_port_settings(mos7840_port,old_termios); */ |
| 1104 | 1112 | ||
| 1105 | return 0; | 1113 | return 0; |
| 1114 | err: | ||
| 1115 | for (j = 0; j < NUM_URBS; ++j) { | ||
| 1116 | urb = mos7840_port->write_urb_pool[j]; | ||
| 1117 | if (!urb) | ||
| 1118 | continue; | ||
| 1119 | kfree(urb->transfer_buffer); | ||
| 1120 | usb_free_urb(urb); | ||
| 1121 | } | ||
| 1122 | return status; | ||
| 1106 | } | 1123 | } |
| 1107 | 1124 | ||
| 1108 | /***************************************************************************** | 1125 | /***************************************************************************** |
| @@ -1445,13 +1462,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, | |||
| 1445 | data1 = urb->transfer_buffer; | 1462 | data1 = urb->transfer_buffer; |
| 1446 | dev_dbg(&port->dev, "bulkout endpoint is %d\n", port->bulk_out_endpointAddress); | 1463 | dev_dbg(&port->dev, "bulkout endpoint is %d\n", port->bulk_out_endpointAddress); |
| 1447 | 1464 | ||
| 1448 | /* Turn on LED */ | 1465 | if (mos7840_port->has_led) |
| 1449 | if (mos7840_port->has_led && !mos7840_port->led_flag) { | 1466 | mos7840_led_activity(port); |
| 1450 | mos7840_port->led_flag = true; | ||
| 1451 | mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0301); | ||
| 1452 | mod_timer(&mos7840_port->led_timer1, | ||
| 1453 | jiffies + msecs_to_jiffies(LED_ON_MS)); | ||
| 1454 | } | ||
| 1455 | 1467 | ||
| 1456 | /* send it down the pipe */ | 1468 | /* send it down the pipe */ |
| 1457 | status = usb_submit_urb(urb, GFP_ATOMIC); | 1469 | status = usb_submit_urb(urb, GFP_ATOMIC); |
| @@ -2178,38 +2190,48 @@ static int mos7810_check(struct usb_serial *serial) | |||
| 2178 | return 0; | 2190 | return 0; |
| 2179 | } | 2191 | } |
| 2180 | 2192 | ||
| 2181 | static int mos7840_calc_num_ports(struct usb_serial *serial) | 2193 | static int mos7840_probe(struct usb_serial *serial, |
| 2194 | const struct usb_device_id *id) | ||
| 2182 | { | 2195 | { |
| 2183 | __u16 data = 0x00; | 2196 | u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); |
| 2184 | u8 *buf; | 2197 | u8 *buf; |
| 2185 | int mos7840_num_ports; | 2198 | int device_type; |
| 2199 | |||
| 2200 | if (product == MOSCHIP_DEVICE_ID_7810 || | ||
| 2201 | product == MOSCHIP_DEVICE_ID_7820) { | ||
| 2202 | device_type = product; | ||
| 2203 | goto out; | ||
| 2204 | } | ||
| 2186 | 2205 | ||
| 2187 | buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); | 2206 | buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); |
| 2188 | if (buf) { | 2207 | if (!buf) |
| 2189 | usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), | 2208 | return -ENOMEM; |
| 2209 | |||
| 2210 | usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), | ||
| 2190 | MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf, | 2211 | MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf, |
| 2191 | VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); | 2212 | VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); |
| 2192 | data = *buf; | ||
| 2193 | kfree(buf); | ||
| 2194 | } | ||
| 2195 | 2213 | ||
| 2196 | if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 || | 2214 | /* For a MCS7840 device GPIO0 must be set to 1 */ |
| 2197 | serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) { | 2215 | if (buf[0] & 0x01) |
| 2198 | device_type = serial->dev->descriptor.idProduct; | 2216 | device_type = MOSCHIP_DEVICE_ID_7840; |
| 2199 | } else { | 2217 | else if (mos7810_check(serial)) |
| 2200 | /* For a MCS7840 device GPIO0 must be set to 1 */ | 2218 | device_type = MOSCHIP_DEVICE_ID_7810; |
| 2201 | if ((data & 0x01) == 1) | 2219 | else |
| 2202 | device_type = MOSCHIP_DEVICE_ID_7840; | 2220 | device_type = MOSCHIP_DEVICE_ID_7820; |
| 2203 | else if (mos7810_check(serial)) | 2221 | |
| 2204 | device_type = MOSCHIP_DEVICE_ID_7810; | 2222 | kfree(buf); |
| 2205 | else | 2223 | out: |
| 2206 | device_type = MOSCHIP_DEVICE_ID_7820; | 2224 | usb_set_serial_data(serial, (void *)(unsigned long)device_type); |
| 2207 | } | 2225 | |
| 2226 | return 0; | ||
| 2227 | } | ||
| 2228 | |||
| 2229 | static int mos7840_calc_num_ports(struct usb_serial *serial) | ||
| 2230 | { | ||
| 2231 | int device_type = (unsigned long)usb_get_serial_data(serial); | ||
| 2232 | int mos7840_num_ports; | ||
| 2208 | 2233 | ||
| 2209 | mos7840_num_ports = (device_type >> 4) & 0x000F; | 2234 | mos7840_num_ports = (device_type >> 4) & 0x000F; |
| 2210 | serial->num_bulk_in = mos7840_num_ports; | ||
| 2211 | serial->num_bulk_out = mos7840_num_ports; | ||
| 2212 | serial->num_ports = mos7840_num_ports; | ||
| 2213 | 2235 | ||
| 2214 | return mos7840_num_ports; | 2236 | return mos7840_num_ports; |
| 2215 | } | 2237 | } |
| @@ -2217,6 +2239,7 @@ static int mos7840_calc_num_ports(struct usb_serial *serial) | |||
| 2217 | static int mos7840_port_probe(struct usb_serial_port *port) | 2239 | static int mos7840_port_probe(struct usb_serial_port *port) |
| 2218 | { | 2240 | { |
| 2219 | struct usb_serial *serial = port->serial; | 2241 | struct usb_serial *serial = port->serial; |
| 2242 | int device_type = (unsigned long)usb_get_serial_data(serial); | ||
| 2220 | struct moschip_port *mos7840_port; | 2243 | struct moschip_port *mos7840_port; |
| 2221 | int status; | 2244 | int status; |
| 2222 | int pnum; | 2245 | int pnum; |
| @@ -2392,6 +2415,14 @@ static int mos7840_port_probe(struct usb_serial_port *port) | |||
| 2392 | if (device_type == MOSCHIP_DEVICE_ID_7810) { | 2415 | if (device_type == MOSCHIP_DEVICE_ID_7810) { |
| 2393 | mos7840_port->has_led = true; | 2416 | mos7840_port->has_led = true; |
| 2394 | 2417 | ||
| 2418 | mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
| 2419 | mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr), | ||
| 2420 | GFP_KERNEL); | ||
| 2421 | if (!mos7840_port->led_urb || !mos7840_port->led_dr) { | ||
| 2422 | status = -ENOMEM; | ||
| 2423 | goto error; | ||
| 2424 | } | ||
| 2425 | |||
| 2395 | init_timer(&mos7840_port->led_timer1); | 2426 | init_timer(&mos7840_port->led_timer1); |
| 2396 | mos7840_port->led_timer1.function = mos7840_led_off; | 2427 | mos7840_port->led_timer1.function = mos7840_led_off; |
| 2397 | mos7840_port->led_timer1.expires = | 2428 | mos7840_port->led_timer1.expires = |
| @@ -2404,8 +2435,6 @@ static int mos7840_port_probe(struct usb_serial_port *port) | |||
| 2404 | jiffies + msecs_to_jiffies(LED_OFF_MS); | 2435 | jiffies + msecs_to_jiffies(LED_OFF_MS); |
| 2405 | mos7840_port->led_timer2.data = (unsigned long)mos7840_port; | 2436 | mos7840_port->led_timer2.data = (unsigned long)mos7840_port; |
| 2406 | 2437 | ||
| 2407 | mos7840_port->led_flag = false; | ||
| 2408 | |||
| 2409 | /* Turn off LED */ | 2438 | /* Turn off LED */ |
| 2410 | mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300); | 2439 | mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300); |
| 2411 | } | 2440 | } |
| @@ -2427,6 +2456,8 @@ out: | |||
| 2427 | } | 2456 | } |
| 2428 | return 0; | 2457 | return 0; |
| 2429 | error: | 2458 | error: |
| 2459 | kfree(mos7840_port->led_dr); | ||
| 2460 | usb_free_urb(mos7840_port->led_urb); | ||
| 2430 | kfree(mos7840_port->dr); | 2461 | kfree(mos7840_port->dr); |
| 2431 | kfree(mos7840_port->ctrl_buf); | 2462 | kfree(mos7840_port->ctrl_buf); |
| 2432 | usb_free_urb(mos7840_port->control_urb); | 2463 | usb_free_urb(mos7840_port->control_urb); |
| @@ -2447,6 +2478,10 @@ static int mos7840_port_remove(struct usb_serial_port *port) | |||
| 2447 | 2478 | ||
| 2448 | del_timer_sync(&mos7840_port->led_timer1); | 2479 | del_timer_sync(&mos7840_port->led_timer1); |
| 2449 | del_timer_sync(&mos7840_port->led_timer2); | 2480 | del_timer_sync(&mos7840_port->led_timer2); |
| 2481 | |||
| 2482 | usb_kill_urb(mos7840_port->led_urb); | ||
| 2483 | usb_free_urb(mos7840_port->led_urb); | ||
| 2484 | kfree(mos7840_port->led_dr); | ||
| 2450 | } | 2485 | } |
| 2451 | usb_kill_urb(mos7840_port->control_urb); | 2486 | usb_kill_urb(mos7840_port->control_urb); |
| 2452 | usb_free_urb(mos7840_port->control_urb); | 2487 | usb_free_urb(mos7840_port->control_urb); |
| @@ -2473,9 +2508,7 @@ static struct usb_serial_driver moschip7840_4port_device = { | |||
| 2473 | .throttle = mos7840_throttle, | 2508 | .throttle = mos7840_throttle, |
| 2474 | .unthrottle = mos7840_unthrottle, | 2509 | .unthrottle = mos7840_unthrottle, |
| 2475 | .calc_num_ports = mos7840_calc_num_ports, | 2510 | .calc_num_ports = mos7840_calc_num_ports, |
| 2476 | #ifdef MCSSerialProbe | 2511 | .probe = mos7840_probe, |
| 2477 | .probe = mos7840_serial_probe, | ||
| 2478 | #endif | ||
| 2479 | .ioctl = mos7840_ioctl, | 2512 | .ioctl = mos7840_ioctl, |
| 2480 | .set_termios = mos7840_set_termios, | 2513 | .set_termios = mos7840_set_termios, |
| 2481 | .break_ctl = mos7840_break, | 2514 | .break_ctl = mos7840_break, |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 5dd857de05b0..1cf6f125f5f0 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -341,17 +341,12 @@ static void option_instat_callback(struct urb *urb); | |||
| 341 | #define OLIVETTI_VENDOR_ID 0x0b3c | 341 | #define OLIVETTI_VENDOR_ID 0x0b3c |
| 342 | #define OLIVETTI_PRODUCT_OLICARD100 0xc000 | 342 | #define OLIVETTI_PRODUCT_OLICARD100 0xc000 |
| 343 | #define OLIVETTI_PRODUCT_OLICARD145 0xc003 | 343 | #define OLIVETTI_PRODUCT_OLICARD145 0xc003 |
| 344 | #define OLIVETTI_PRODUCT_OLICARD200 0xc005 | ||
| 344 | 345 | ||
| 345 | /* Celot products */ | 346 | /* Celot products */ |
| 346 | #define CELOT_VENDOR_ID 0x211f | 347 | #define CELOT_VENDOR_ID 0x211f |
| 347 | #define CELOT_PRODUCT_CT680M 0x6801 | 348 | #define CELOT_PRODUCT_CT680M 0x6801 |
| 348 | 349 | ||
| 349 | /* ONDA Communication vendor id */ | ||
| 350 | #define ONDA_VENDOR_ID 0x1ee8 | ||
| 351 | |||
| 352 | /* ONDA MT825UP HSDPA 14.2 modem */ | ||
| 353 | #define ONDA_MT825UP 0x000b | ||
| 354 | |||
| 355 | /* Samsung products */ | 350 | /* Samsung products */ |
| 356 | #define SAMSUNG_VENDOR_ID 0x04e8 | 351 | #define SAMSUNG_VENDOR_ID 0x04e8 |
| 357 | #define SAMSUNG_PRODUCT_GT_B3730 0x6889 | 352 | #define SAMSUNG_PRODUCT_GT_B3730 0x6889 |
| @@ -444,7 +439,8 @@ static void option_instat_callback(struct urb *urb); | |||
| 444 | 439 | ||
| 445 | /* Hyundai Petatel Inc. products */ | 440 | /* Hyundai Petatel Inc. products */ |
| 446 | #define PETATEL_VENDOR_ID 0x1ff4 | 441 | #define PETATEL_VENDOR_ID 0x1ff4 |
| 447 | #define PETATEL_PRODUCT_NP10T 0x600e | 442 | #define PETATEL_PRODUCT_NP10T_600A 0x600a |
| 443 | #define PETATEL_PRODUCT_NP10T_600E 0x600e | ||
| 448 | 444 | ||
| 449 | /* TP-LINK Incorporated products */ | 445 | /* TP-LINK Incorporated products */ |
| 450 | #define TPLINK_VENDOR_ID 0x2357 | 446 | #define TPLINK_VENDOR_ID 0x2357 |
| @@ -782,6 +778,7 @@ static const struct usb_device_id option_ids[] = { | |||
| 782 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, | 778 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, |
| 783 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, | 779 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, |
| 784 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ | 780 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ |
| 781 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ | ||
| 785 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ | 782 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ |
| 786 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ | 783 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ |
| 787 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, | 784 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, |
| @@ -817,7 +814,8 @@ static const struct usb_device_id option_ids[] = { | |||
| 817 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff), | 814 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff), |
| 818 | .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, | 815 | .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, |
| 819 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, | 816 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, |
| 820 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) }, | 817 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff), |
| 818 | .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, | ||
| 821 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, | 819 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, |
| 822 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff), | 820 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff), |
| 823 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 821 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
| @@ -1256,8 +1254,8 @@ static const struct usb_device_id option_ids[] = { | |||
| 1256 | 1254 | ||
| 1257 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, | 1255 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, |
| 1258 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, | 1256 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, |
| 1257 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) }, | ||
| 1259 | { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ | 1258 | { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ |
| 1260 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ | ||
| 1261 | { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ | 1259 | { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ |
| 1262 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, | 1260 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, |
| 1263 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) }, | 1261 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) }, |
| @@ -1329,9 +1327,12 @@ static const struct usb_device_id option_ids[] = { | |||
| 1329 | { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, | 1327 | { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, |
| 1330 | { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, | 1328 | { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, |
| 1331 | { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, | 1329 | { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, |
| 1332 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, | 1330 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, |
| 1331 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, | ||
| 1333 | { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), | 1332 | { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), |
| 1334 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 1333 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
| 1334 | { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ | ||
| 1335 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | ||
| 1335 | { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, | 1336 | { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, |
| 1336 | { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */ | 1337 | { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */ |
| 1337 | { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */ | 1338 | { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */ |
| @@ -1339,6 +1340,8 @@ static const struct usb_device_id option_ids[] = { | |||
| 1339 | { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, | 1340 | { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, |
| 1340 | { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, | 1341 | { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, |
| 1341 | { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, | 1342 | { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, |
| 1343 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ | ||
| 1344 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ | ||
| 1342 | { } /* Terminating entry */ | 1345 | { } /* Terminating entry */ |
| 1343 | }; | 1346 | }; |
| 1344 | MODULE_DEVICE_TABLE(usb, option_ids); | 1347 | MODULE_DEVICE_TABLE(usb, option_ids); |
diff --git a/drivers/usb/serial/suunto.c b/drivers/usb/serial/suunto.c new file mode 100644 index 000000000000..2248e7a7d5ad --- /dev/null +++ b/drivers/usb/serial/suunto.c | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | /* | ||
| 2 | * Suunto ANT+ USB Driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013 Greg Kroah-Hartman <gregkh@linuxfoundation.org> | ||
| 5 | * Copyright (C) 2013 Linux Foundation | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms of the GNU General Public License version 2 as published by | ||
| 9 | * the Free Software Foundation only. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/kernel.h> | ||
| 13 | #include <linux/init.h> | ||
| 14 | #include <linux/tty.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/usb.h> | ||
| 17 | #include <linux/usb/serial.h> | ||
| 18 | #include <linux/uaccess.h> | ||
| 19 | |||
| 20 | static const struct usb_device_id id_table[] = { | ||
| 21 | { USB_DEVICE(0x0fcf, 0x1008) }, | ||
| 22 | { }, | ||
| 23 | }; | ||
| 24 | MODULE_DEVICE_TABLE(usb, id_table); | ||
| 25 | |||
| 26 | static struct usb_serial_driver suunto_device = { | ||
| 27 | .driver = { | ||
| 28 | .owner = THIS_MODULE, | ||
| 29 | .name = KBUILD_MODNAME, | ||
| 30 | }, | ||
| 31 | .id_table = id_table, | ||
| 32 | .num_ports = 1, | ||
| 33 | }; | ||
| 34 | |||
| 35 | static struct usb_serial_driver * const serial_drivers[] = { | ||
| 36 | &suunto_device, | ||
| 37 | NULL, | ||
| 38 | }; | ||
| 39 | |||
| 40 | module_usb_serial_driver(serial_drivers, id_table); | ||
| 41 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 7182bb774b79..5c9f9b1d7736 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
| @@ -371,7 +371,7 @@ static int ti_startup(struct usb_serial *serial) | |||
| 371 | usb_set_serial_data(serial, tdev); | 371 | usb_set_serial_data(serial, tdev); |
| 372 | 372 | ||
| 373 | /* determine device type */ | 373 | /* determine device type */ |
| 374 | if (usb_match_id(serial->interface, ti_id_table_3410)) | 374 | if (serial->type == &ti_1port_device) |
| 375 | tdev->td_is_3410 = 1; | 375 | tdev->td_is_3410 = 1; |
| 376 | dev_dbg(&dev->dev, "%s - device type is %s\n", __func__, | 376 | dev_dbg(&dev->dev, "%s - device type is %s\n", __func__, |
| 377 | tdev->td_is_3410 ? "3410" : "5052"); | 377 | tdev->td_is_3410 ? "3410" : "5052"); |
| @@ -1536,14 +1536,15 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
| 1536 | char buf[32]; | 1536 | char buf[32]; |
| 1537 | 1537 | ||
| 1538 | /* try ID specific firmware first, then try generic firmware */ | 1538 | /* try ID specific firmware first, then try generic firmware */ |
| 1539 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, | 1539 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", |
| 1540 | dev->descriptor.idProduct); | 1540 | le16_to_cpu(dev->descriptor.idVendor), |
| 1541 | le16_to_cpu(dev->descriptor.idProduct)); | ||
| 1541 | status = request_firmware(&fw_p, buf, &dev->dev); | 1542 | status = request_firmware(&fw_p, buf, &dev->dev); |
| 1542 | 1543 | ||
| 1543 | if (status != 0) { | 1544 | if (status != 0) { |
| 1544 | buf[0] = '\0'; | 1545 | buf[0] = '\0'; |
| 1545 | if (dev->descriptor.idVendor == MTS_VENDOR_ID) { | 1546 | if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) { |
| 1546 | switch (dev->descriptor.idProduct) { | 1547 | switch (le16_to_cpu(dev->descriptor.idProduct)) { |
| 1547 | case MTS_CDMA_PRODUCT_ID: | 1548 | case MTS_CDMA_PRODUCT_ID: |
| 1548 | strcpy(buf, "mts_cdma.fw"); | 1549 | strcpy(buf, "mts_cdma.fw"); |
| 1549 | break; | 1550 | break; |
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 8257d30c4072..85365784040b 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c | |||
| @@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb) | |||
| 291 | tty_flip_buffer_push(&port->port); | 291 | tty_flip_buffer_push(&port->port); |
| 292 | } else | 292 | } else |
| 293 | dev_dbg(dev, "%s: empty read urb received\n", __func__); | 293 | dev_dbg(dev, "%s: empty read urb received\n", __func__); |
| 294 | 294 | } | |
| 295 | /* Resubmit urb so we continue receiving */ | 295 | /* Resubmit urb so we continue receiving */ |
| 296 | err = usb_submit_urb(urb, GFP_ATOMIC); | 296 | err = usb_submit_urb(urb, GFP_ATOMIC); |
| 297 | if (err) { | 297 | if (err) { |
| 298 | if (err != -EPERM) { | 298 | if (err != -EPERM) { |
| 299 | dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err); | 299 | dev_err(dev, "%s: resubmit read urb failed. (%d)\n", |
| 300 | /* busy also in error unless we are killed */ | 300 | __func__, err); |
| 301 | usb_mark_last_busy(port->serial->dev); | 301 | /* busy also in error unless we are killed */ |
| 302 | } | ||
| 303 | } else { | ||
| 304 | usb_mark_last_busy(port->serial->dev); | 302 | usb_mark_last_busy(port->serial->dev); |
| 305 | } | 303 | } |
| 304 | } else { | ||
| 305 | usb_mark_last_busy(port->serial->dev); | ||
| 306 | } | 306 | } |
| 307 | } | 307 | } |
| 308 | 308 | ||
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1799335288bd..c015f2c16729 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
| @@ -665,6 +665,13 @@ UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999, | |||
| 665 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 665 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
| 666 | US_FL_FIX_INQUIRY ), | 666 | US_FL_FIX_INQUIRY ), |
| 667 | 667 | ||
| 668 | /* Submitted by Ren Bigcren <bigcren.ren@sonymobile.com> */ | ||
| 669 | UNUSUAL_DEV( 0x054c, 0x02a5, 0x0100, 0x0100, | ||
| 670 | "Sony Corp.", | ||
| 671 | "MicroVault Flash Drive", | ||
| 672 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
| 673 | US_FL_NO_READ_CAPACITY_16 ), | ||
| 674 | |||
| 668 | /* floppy reports multiple luns */ | 675 | /* floppy reports multiple luns */ |
| 669 | UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210, | 676 | UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210, |
| 670 | "SAMSUNG", | 677 | "SAMSUNG", |
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 16968c899493..d3493ca0525d 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c | |||
| @@ -1226,6 +1226,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb) | |||
| 1226 | } | 1226 | } |
| 1227 | spin_lock_irqsave(&xfer->lock, flags); | 1227 | spin_lock_irqsave(&xfer->lock, flags); |
| 1228 | rpipe = xfer->ep->hcpriv; | 1228 | rpipe = xfer->ep->hcpriv; |
| 1229 | if (rpipe == NULL) { | ||
| 1230 | pr_debug("%s: xfer id 0x%08X has no RPIPE. %s", | ||
| 1231 | __func__, wa_xfer_id(xfer), | ||
| 1232 | "Probably already aborted.\n" ); | ||
| 1233 | goto out_unlock; | ||
| 1234 | } | ||
| 1229 | /* Check the delayed list -> if there, release and complete */ | 1235 | /* Check the delayed list -> if there, release and complete */ |
| 1230 | spin_lock_irqsave(&wa->xfer_list_lock, flags2); | 1236 | spin_lock_irqsave(&wa->xfer_list_lock, flags2); |
| 1231 | if (!list_empty(&xfer->list_node) && xfer->seg == NULL) | 1237 | if (!list_empty(&xfer->list_node) && xfer->seg == NULL) |
| @@ -1644,8 +1650,7 @@ static void wa_xfer_result_cb(struct urb *urb) | |||
| 1644 | break; | 1650 | break; |
| 1645 | } | 1651 | } |
| 1646 | usb_status = xfer_result->bTransferStatus & 0x3f; | 1652 | usb_status = xfer_result->bTransferStatus & 0x3f; |
| 1647 | if (usb_status == WA_XFER_STATUS_ABORTED | 1653 | if (usb_status == WA_XFER_STATUS_NOT_FOUND) |
| 1648 | || usb_status == WA_XFER_STATUS_NOT_FOUND) | ||
| 1649 | /* taken care of already */ | 1654 | /* taken care of already */ |
| 1650 | break; | 1655 | break; |
| 1651 | xfer_id = xfer_result->dwTransferID; | 1656 | xfer_id = xfer_result->dwTransferID; |
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index c5179e269df6..cef6002acbd4 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c | |||
| @@ -137,8 +137,27 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) | |||
| 137 | */ | 137 | */ |
| 138 | pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); | 138 | pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); |
| 139 | 139 | ||
| 140 | if (vdev->reset_works) | 140 | /* |
| 141 | __pci_reset_function(pdev); | 141 | * Careful, device_lock may already be held. This is the case if |
| 142 | * a driver unbind is blocked. Try to get the locks ourselves to | ||
| 143 | * prevent a deadlock. | ||
| 144 | */ | ||
| 145 | if (vdev->reset_works) { | ||
| 146 | bool reset_done = false; | ||
| 147 | |||
| 148 | if (pci_cfg_access_trylock(pdev)) { | ||
| 149 | if (device_trylock(&pdev->dev)) { | ||
| 150 | __pci_reset_function_locked(pdev); | ||
| 151 | reset_done = true; | ||
| 152 | device_unlock(&pdev->dev); | ||
| 153 | } | ||
| 154 | pci_cfg_access_unlock(pdev); | ||
| 155 | } | ||
| 156 | |||
| 157 | if (!reset_done) | ||
| 158 | pr_warn("%s: Unable to acquire locks for reset of %s\n", | ||
| 159 | __func__, dev_name(&pdev->dev)); | ||
| 160 | } | ||
| 142 | 161 | ||
| 143 | pci_restore_state(pdev); | 162 | pci_restore_state(pdev); |
| 144 | } | 163 | } |
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index c488da5db7c7..842f4507883e 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c | |||
| @@ -494,27 +494,6 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) | |||
| 494 | return 0; | 494 | return 0; |
| 495 | } | 495 | } |
| 496 | 496 | ||
| 497 | static int vfio_group_nb_del_dev(struct vfio_group *group, struct device *dev) | ||
| 498 | { | ||
| 499 | struct vfio_device *device; | ||
| 500 | |||
| 501 | /* | ||
| 502 | * Expect to fall out here. If a device was in use, it would | ||
| 503 | * have been bound to a vfio sub-driver, which would have blocked | ||
| 504 | * in .remove at vfio_del_group_dev. Sanity check that we no | ||
| 505 | * longer track the device, so it's safe to remove. | ||
| 506 | */ | ||
| 507 | device = vfio_group_get_device(group, dev); | ||
| 508 | if (likely(!device)) | ||
| 509 | return 0; | ||
| 510 | |||
| 511 | WARN("Device %s removed from live group %d!\n", dev_name(dev), | ||
| 512 | iommu_group_id(group->iommu_group)); | ||
| 513 | |||
| 514 | vfio_device_put(device); | ||
| 515 | return 0; | ||
| 516 | } | ||
| 517 | |||
| 518 | static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) | 497 | static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) |
| 519 | { | 498 | { |
| 520 | /* We don't care what happens when the group isn't in use */ | 499 | /* We don't care what happens when the group isn't in use */ |
| @@ -531,13 +510,11 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, | |||
| 531 | struct device *dev = data; | 510 | struct device *dev = data; |
| 532 | 511 | ||
| 533 | /* | 512 | /* |
| 534 | * Need to go through a group_lock lookup to get a reference or | 513 | * Need to go through a group_lock lookup to get a reference or we |
| 535 | * we risk racing a group being removed. Leave a WARN_ON for | 514 | * risk racing a group being removed. Ignore spurious notifies. |
| 536 | * debuging, but if the group no longer exists, a spurious notify | ||
| 537 | * is harmless. | ||
| 538 | */ | 515 | */ |
| 539 | group = vfio_group_try_get(group); | 516 | group = vfio_group_try_get(group); |
| 540 | if (WARN_ON(!group)) | 517 | if (!group) |
| 541 | return NOTIFY_OK; | 518 | return NOTIFY_OK; |
| 542 | 519 | ||
| 543 | switch (action) { | 520 | switch (action) { |
| @@ -545,7 +522,13 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, | |||
| 545 | vfio_group_nb_add_dev(group, dev); | 522 | vfio_group_nb_add_dev(group, dev); |
| 546 | break; | 523 | break; |
| 547 | case IOMMU_GROUP_NOTIFY_DEL_DEVICE: | 524 | case IOMMU_GROUP_NOTIFY_DEL_DEVICE: |
| 548 | vfio_group_nb_del_dev(group, dev); | 525 | /* |
| 526 | * Nothing to do here. If the device is in use, then the | ||
| 527 | * vfio sub-driver should block the remove callback until | ||
| 528 | * it is unused. If the device is unused or attached to a | ||
| 529 | * stub driver, then it should be released and we don't | ||
| 530 | * care that it will be going away. | ||
| 531 | */ | ||
| 549 | break; | 532 | break; |
| 550 | case IOMMU_GROUP_NOTIFY_BIND_DRIVER: | 533 | case IOMMU_GROUP_NOTIFY_BIND_DRIVER: |
| 551 | pr_debug("%s: Device %s, group %d binding to driver\n", | 534 | pr_debug("%s: Device %s, group %d binding to driver\n", |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 027be91db139..969a85960e9f 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
| 16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
| 17 | #include <linux/workqueue.h> | 17 | #include <linux/workqueue.h> |
| 18 | #include <linux/rcupdate.h> | ||
| 19 | #include <linux/file.h> | 18 | #include <linux/file.h> |
| 20 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 21 | 20 | ||
| @@ -346,12 +345,11 @@ static void handle_tx(struct vhost_net *net) | |||
| 346 | struct vhost_net_ubuf_ref *uninitialized_var(ubufs); | 345 | struct vhost_net_ubuf_ref *uninitialized_var(ubufs); |
| 347 | bool zcopy, zcopy_used; | 346 | bool zcopy, zcopy_used; |
| 348 | 347 | ||
| 349 | /* TODO: check that we are running from vhost_worker? */ | 348 | mutex_lock(&vq->mutex); |
| 350 | sock = rcu_dereference_check(vq->private_data, 1); | 349 | sock = vq->private_data; |
| 351 | if (!sock) | 350 | if (!sock) |
| 352 | return; | 351 | goto out; |
| 353 | 352 | ||
| 354 | mutex_lock(&vq->mutex); | ||
| 355 | vhost_disable_notify(&net->dev, vq); | 353 | vhost_disable_notify(&net->dev, vq); |
| 356 | 354 | ||
| 357 | hdr_size = nvq->vhost_hlen; | 355 | hdr_size = nvq->vhost_hlen; |
| @@ -461,7 +459,7 @@ static void handle_tx(struct vhost_net *net) | |||
| 461 | break; | 459 | break; |
| 462 | } | 460 | } |
| 463 | } | 461 | } |
| 464 | 462 | out: | |
| 465 | mutex_unlock(&vq->mutex); | 463 | mutex_unlock(&vq->mutex); |
| 466 | } | 464 | } |
| 467 | 465 | ||
| @@ -570,14 +568,14 @@ static void handle_rx(struct vhost_net *net) | |||
| 570 | s16 headcount; | 568 | s16 headcount; |
| 571 | size_t vhost_hlen, sock_hlen; | 569 | size_t vhost_hlen, sock_hlen; |
| 572 | size_t vhost_len, sock_len; | 570 | size_t vhost_len, sock_len; |
| 573 | /* TODO: check that we are running from vhost_worker? */ | 571 | struct socket *sock; |
| 574 | struct socket *sock = rcu_dereference_check(vq->private_data, 1); | ||
| 575 | |||
| 576 | if (!sock) | ||
| 577 | return; | ||
| 578 | 572 | ||
| 579 | mutex_lock(&vq->mutex); | 573 | mutex_lock(&vq->mutex); |
| 574 | sock = vq->private_data; | ||
| 575 | if (!sock) | ||
| 576 | goto out; | ||
| 580 | vhost_disable_notify(&net->dev, vq); | 577 | vhost_disable_notify(&net->dev, vq); |
| 578 | |||
| 581 | vhost_hlen = nvq->vhost_hlen; | 579 | vhost_hlen = nvq->vhost_hlen; |
| 582 | sock_hlen = nvq->sock_hlen; | 580 | sock_hlen = nvq->sock_hlen; |
| 583 | 581 | ||
| @@ -652,7 +650,7 @@ static void handle_rx(struct vhost_net *net) | |||
| 652 | break; | 650 | break; |
| 653 | } | 651 | } |
| 654 | } | 652 | } |
| 655 | 653 | out: | |
| 656 | mutex_unlock(&vq->mutex); | 654 | mutex_unlock(&vq->mutex); |
| 657 | } | 655 | } |
| 658 | 656 | ||
| @@ -750,8 +748,7 @@ static int vhost_net_enable_vq(struct vhost_net *n, | |||
| 750 | struct vhost_poll *poll = n->poll + (nvq - n->vqs); | 748 | struct vhost_poll *poll = n->poll + (nvq - n->vqs); |
| 751 | struct socket *sock; | 749 | struct socket *sock; |
| 752 | 750 | ||
| 753 | sock = rcu_dereference_protected(vq->private_data, | 751 | sock = vq->private_data; |
| 754 | lockdep_is_held(&vq->mutex)); | ||
| 755 | if (!sock) | 752 | if (!sock) |
| 756 | return 0; | 753 | return 0; |
| 757 | 754 | ||
| @@ -764,10 +761,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n, | |||
| 764 | struct socket *sock; | 761 | struct socket *sock; |
| 765 | 762 | ||
| 766 | mutex_lock(&vq->mutex); | 763 | mutex_lock(&vq->mutex); |
| 767 | sock = rcu_dereference_protected(vq->private_data, | 764 | sock = vq->private_data; |
| 768 | lockdep_is_held(&vq->mutex)); | ||
| 769 | vhost_net_disable_vq(n, vq); | 765 | vhost_net_disable_vq(n, vq); |
| 770 | rcu_assign_pointer(vq->private_data, NULL); | 766 | vq->private_data = NULL; |
| 771 | mutex_unlock(&vq->mutex); | 767 | mutex_unlock(&vq->mutex); |
| 772 | return sock; | 768 | return sock; |
| 773 | } | 769 | } |
| @@ -923,8 +919,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
| 923 | } | 919 | } |
| 924 | 920 | ||
| 925 | /* start polling new socket */ | 921 | /* start polling new socket */ |
| 926 | oldsock = rcu_dereference_protected(vq->private_data, | 922 | oldsock = vq->private_data; |
| 927 | lockdep_is_held(&vq->mutex)); | ||
| 928 | if (sock != oldsock) { | 923 | if (sock != oldsock) { |
| 929 | ubufs = vhost_net_ubuf_alloc(vq, | 924 | ubufs = vhost_net_ubuf_alloc(vq, |
| 930 | sock && vhost_sock_zcopy(sock)); | 925 | sock && vhost_sock_zcopy(sock)); |
| @@ -934,7 +929,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
| 934 | } | 929 | } |
| 935 | 930 | ||
| 936 | vhost_net_disable_vq(n, vq); | 931 | vhost_net_disable_vq(n, vq); |
| 937 | rcu_assign_pointer(vq->private_data, sock); | 932 | vq->private_data = sock; |
| 938 | r = vhost_init_used(vq); | 933 | r = vhost_init_used(vq); |
| 939 | if (r) | 934 | if (r) |
| 940 | goto err_used; | 935 | goto err_used; |
| @@ -968,7 +963,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
| 968 | return 0; | 963 | return 0; |
| 969 | 964 | ||
| 970 | err_used: | 965 | err_used: |
| 971 | rcu_assign_pointer(vq->private_data, oldsock); | 966 | vq->private_data = oldsock; |
| 972 | vhost_net_enable_vq(n, vq); | 967 | vhost_net_enable_vq(n, vq); |
| 973 | if (ubufs) | 968 | if (ubufs) |
| 974 | vhost_net_ubuf_put_wait_and_free(ubufs); | 969 | vhost_net_ubuf_put_wait_and_free(ubufs); |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 06adf31a9248..0c27c7df1b09 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
| @@ -902,19 +902,15 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
| 902 | int head, ret; | 902 | int head, ret; |
| 903 | u8 target; | 903 | u8 target; |
| 904 | 904 | ||
| 905 | mutex_lock(&vq->mutex); | ||
| 905 | /* | 906 | /* |
| 906 | * We can handle the vq only after the endpoint is setup by calling the | 907 | * We can handle the vq only after the endpoint is setup by calling the |
| 907 | * VHOST_SCSI_SET_ENDPOINT ioctl. | 908 | * VHOST_SCSI_SET_ENDPOINT ioctl. |
| 908 | * | ||
| 909 | * TODO: Check that we are running from vhost_worker which acts | ||
| 910 | * as read-side critical section for vhost kind of RCU. | ||
| 911 | * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h | ||
| 912 | */ | 909 | */ |
| 913 | vs_tpg = rcu_dereference_check(vq->private_data, 1); | 910 | vs_tpg = vq->private_data; |
| 914 | if (!vs_tpg) | 911 | if (!vs_tpg) |
| 915 | return; | 912 | goto out; |
| 916 | 913 | ||
| 917 | mutex_lock(&vq->mutex); | ||
| 918 | vhost_disable_notify(&vs->dev, vq); | 914 | vhost_disable_notify(&vs->dev, vq); |
| 919 | 915 | ||
| 920 | for (;;) { | 916 | for (;;) { |
| @@ -1064,6 +1060,7 @@ err_free: | |||
| 1064 | vhost_scsi_free_cmd(cmd); | 1060 | vhost_scsi_free_cmd(cmd); |
| 1065 | err_cmd: | 1061 | err_cmd: |
| 1066 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1062 | vhost_scsi_send_bad_target(vs, vq, head, out); |
| 1063 | out: | ||
| 1067 | mutex_unlock(&vq->mutex); | 1064 | mutex_unlock(&vq->mutex); |
| 1068 | } | 1065 | } |
| 1069 | 1066 | ||
| @@ -1232,9 +1229,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, | |||
| 1232 | sizeof(vs->vs_vhost_wwpn)); | 1229 | sizeof(vs->vs_vhost_wwpn)); |
| 1233 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { | 1230 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { |
| 1234 | vq = &vs->vqs[i].vq; | 1231 | vq = &vs->vqs[i].vq; |
| 1235 | /* Flushing the vhost_work acts as synchronize_rcu */ | ||
| 1236 | mutex_lock(&vq->mutex); | 1232 | mutex_lock(&vq->mutex); |
| 1237 | rcu_assign_pointer(vq->private_data, vs_tpg); | 1233 | vq->private_data = vs_tpg; |
| 1238 | vhost_init_used(vq); | 1234 | vhost_init_used(vq); |
| 1239 | mutex_unlock(&vq->mutex); | 1235 | mutex_unlock(&vq->mutex); |
| 1240 | } | 1236 | } |
| @@ -1313,9 +1309,8 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, | |||
| 1313 | if (match) { | 1309 | if (match) { |
| 1314 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { | 1310 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { |
| 1315 | vq = &vs->vqs[i].vq; | 1311 | vq = &vs->vqs[i].vq; |
| 1316 | /* Flushing the vhost_work acts as synchronize_rcu */ | ||
| 1317 | mutex_lock(&vq->mutex); | 1312 | mutex_lock(&vq->mutex); |
| 1318 | rcu_assign_pointer(vq->private_data, NULL); | 1313 | vq->private_data = NULL; |
| 1319 | mutex_unlock(&vq->mutex); | 1314 | mutex_unlock(&vq->mutex); |
| 1320 | } | 1315 | } |
| 1321 | } | 1316 | } |
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index a73ea217f24d..339eae85859a 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
| 15 | #include <linux/workqueue.h> | 15 | #include <linux/workqueue.h> |
| 16 | #include <linux/rcupdate.h> | ||
| 17 | #include <linux/file.h> | 16 | #include <linux/file.h> |
| 18 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
| 19 | 18 | ||
| @@ -200,9 +199,8 @@ static long vhost_test_run(struct vhost_test *n, int test) | |||
| 200 | priv = test ? n : NULL; | 199 | priv = test ? n : NULL; |
| 201 | 200 | ||
| 202 | /* start polling new socket */ | 201 | /* start polling new socket */ |
| 203 | oldpriv = rcu_dereference_protected(vq->private_data, | 202 | oldpriv = vq->private_data; |
| 204 | lockdep_is_held(&vq->mutex)); | 203 | vq->private_data = priv; |
| 205 | rcu_assign_pointer(vq->private_data, priv); | ||
| 206 | 204 | ||
| 207 | r = vhost_init_used(&n->vqs[index]); | 205 | r = vhost_init_used(&n->vqs[index]); |
| 208 | 206 | ||
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 42298cd23c73..4465ed5f316d 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
| @@ -103,14 +103,8 @@ struct vhost_virtqueue { | |||
| 103 | struct iovec iov[UIO_MAXIOV]; | 103 | struct iovec iov[UIO_MAXIOV]; |
| 104 | struct iovec *indirect; | 104 | struct iovec *indirect; |
| 105 | struct vring_used_elem *heads; | 105 | struct vring_used_elem *heads; |
| 106 | /* We use a kind of RCU to access private pointer. | 106 | /* Protected by virtqueue mutex. */ |
| 107 | * All readers access it from worker, which makes it possible to | 107 | void *private_data; |
| 108 | * flush the vhost_work instead of synchronize_rcu. Therefore readers do | ||
| 109 | * not need to call rcu_read_lock/rcu_read_unlock: the beginning of | ||
| 110 | * vhost_work execution acts instead of rcu_read_lock() and the end of | ||
| 111 | * vhost_work execution acts instead of rcu_read_unlock(). | ||
| 112 | * Writers use virtqueue mutex. */ | ||
| 113 | void __rcu *private_data; | ||
| 114 | /* Log write descriptors */ | 108 | /* Log write descriptors */ |
| 115 | void __user *log_base; | 109 | void __user *log_base; |
| 116 | struct vhost_log *log; | 110 | struct vhost_log *log; |
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index a89c15de9f45..9b0f12c5c284 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c | |||
| @@ -435,8 +435,8 @@ static int correct_chipset(struct atyfb_par *par) | |||
| 435 | const char *name; | 435 | const char *name; |
| 436 | int i; | 436 | int i; |
| 437 | 437 | ||
| 438 | for (i = ARRAY_SIZE(aty_chips); i > 0; i--) | 438 | for (i = (int)ARRAY_SIZE(aty_chips) - 1; i >= 0; i--) |
| 439 | if (par->pci_id == aty_chips[i - 1].pci_id) | 439 | if (par->pci_id == aty_chips[i].pci_id) |
| 440 | break; | 440 | break; |
| 441 | 441 | ||
| 442 | if (i < 0) | 442 | if (i < 0) |
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c index 5ca11b066b7e..886e797f75f9 100644 --- a/drivers/video/backlight/max8925_bl.c +++ b/drivers/video/backlight/max8925_bl.c | |||
| @@ -101,33 +101,37 @@ static const struct backlight_ops max8925_backlight_ops = { | |||
| 101 | .get_brightness = max8925_backlight_get_brightness, | 101 | .get_brightness = max8925_backlight_get_brightness, |
| 102 | }; | 102 | }; |
| 103 | 103 | ||
| 104 | #ifdef CONFIG_OF | 104 | static void max8925_backlight_dt_init(struct platform_device *pdev) |
| 105 | static int max8925_backlight_dt_init(struct platform_device *pdev, | ||
| 106 | struct max8925_backlight_pdata *pdata) | ||
| 107 | { | 105 | { |
| 108 | struct device_node *nproot = pdev->dev.parent->of_node, *np; | 106 | struct device_node *nproot = pdev->dev.parent->of_node, *np; |
| 109 | int dual_string; | 107 | struct max8925_backlight_pdata *pdata; |
| 108 | u32 val; | ||
| 109 | |||
| 110 | if (!nproot || !IS_ENABLED(CONFIG_OF)) | ||
| 111 | return; | ||
| 112 | |||
| 113 | pdata = devm_kzalloc(&pdev->dev, | ||
| 114 | sizeof(struct max8925_backlight_pdata), | ||
| 115 | GFP_KERNEL); | ||
| 116 | if (!pdata) | ||
| 117 | return; | ||
| 110 | 118 | ||
| 111 | if (!nproot) | ||
| 112 | return -ENODEV; | ||
| 113 | np = of_find_node_by_name(nproot, "backlight"); | 119 | np = of_find_node_by_name(nproot, "backlight"); |
| 114 | if (!np) { | 120 | if (!np) { |
| 115 | dev_err(&pdev->dev, "failed to find backlight node\n"); | 121 | dev_err(&pdev->dev, "failed to find backlight node\n"); |
| 116 | return -ENODEV; | 122 | return; |
| 117 | } | 123 | } |
| 118 | 124 | ||
| 119 | of_property_read_u32(np, "maxim,max8925-dual-string", &dual_string); | 125 | if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val)) |
| 120 | pdata->dual_string = dual_string; | 126 | pdata->dual_string = val; |
| 121 | return 0; | 127 | |
| 128 | pdev->dev.platform_data = pdata; | ||
| 122 | } | 129 | } |
| 123 | #else | ||
| 124 | #define max8925_backlight_dt_init(x, y) (-1) | ||
| 125 | #endif | ||
| 126 | 130 | ||
| 127 | static int max8925_backlight_probe(struct platform_device *pdev) | 131 | static int max8925_backlight_probe(struct platform_device *pdev) |
| 128 | { | 132 | { |
| 129 | struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); | 133 | struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); |
| 130 | struct max8925_backlight_pdata *pdata = pdev->dev.platform_data; | 134 | struct max8925_backlight_pdata *pdata; |
| 131 | struct max8925_backlight_data *data; | 135 | struct max8925_backlight_data *data; |
| 132 | struct backlight_device *bl; | 136 | struct backlight_device *bl; |
| 133 | struct backlight_properties props; | 137 | struct backlight_properties props; |
| @@ -170,13 +174,10 @@ static int max8925_backlight_probe(struct platform_device *pdev) | |||
| 170 | platform_set_drvdata(pdev, bl); | 174 | platform_set_drvdata(pdev, bl); |
| 171 | 175 | ||
| 172 | value = 0; | 176 | value = 0; |
| 173 | if (pdev->dev.parent->of_node && !pdata) { | 177 | if (!pdev->dev.platform_data) |
| 174 | pdata = devm_kzalloc(&pdev->dev, | 178 | max8925_backlight_dt_init(pdev); |
| 175 | sizeof(struct max8925_backlight_pdata), | ||
| 176 | GFP_KERNEL); | ||
| 177 | max8925_backlight_dt_init(pdev, pdata); | ||
| 178 | } | ||
| 179 | 179 | ||
| 180 | pdata = pdev->dev.platform_data; | ||
| 180 | if (pdata) { | 181 | if (pdata) { |
| 181 | if (pdata->lxw_scl) | 182 | if (pdata->lxw_scl) |
| 182 | value |= (1 << 7); | 183 | value |= (1 << 7); |
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index 3ba37713b1f9..dc09ebe4aba5 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c | |||
| @@ -239,24 +239,6 @@ static const struct fb_bitfield def_rgb565[] = { | |||
| 239 | } | 239 | } |
| 240 | }; | 240 | }; |
| 241 | 241 | ||
| 242 | static const struct fb_bitfield def_rgb666[] = { | ||
| 243 | [RED] = { | ||
| 244 | .offset = 16, | ||
| 245 | .length = 6, | ||
| 246 | }, | ||
| 247 | [GREEN] = { | ||
| 248 | .offset = 8, | ||
| 249 | .length = 6, | ||
| 250 | }, | ||
| 251 | [BLUE] = { | ||
| 252 | .offset = 0, | ||
| 253 | .length = 6, | ||
| 254 | }, | ||
| 255 | [TRANSP] = { /* no support for transparency */ | ||
| 256 | .length = 0, | ||
| 257 | } | ||
| 258 | }; | ||
| 259 | |||
| 260 | static const struct fb_bitfield def_rgb888[] = { | 242 | static const struct fb_bitfield def_rgb888[] = { |
| 261 | [RED] = { | 243 | [RED] = { |
| 262 | .offset = 16, | 244 | .offset = 16, |
| @@ -309,9 +291,6 @@ static int mxsfb_check_var(struct fb_var_screeninfo *var, | |||
| 309 | break; | 291 | break; |
| 310 | case STMLCDIF_16BIT: | 292 | case STMLCDIF_16BIT: |
| 311 | case STMLCDIF_18BIT: | 293 | case STMLCDIF_18BIT: |
| 312 | /* 24 bit to 18 bit mapping */ | ||
| 313 | rgb = def_rgb666; | ||
| 314 | break; | ||
| 315 | case STMLCDIF_24BIT: | 294 | case STMLCDIF_24BIT: |
| 316 | /* real 24 bit */ | 295 | /* real 24 bit */ |
| 317 | rgb = def_rgb888; | 296 | rgb = def_rgb888; |
| @@ -453,11 +432,6 @@ static int mxsfb_set_par(struct fb_info *fb_info) | |||
| 453 | return -EINVAL; | 432 | return -EINVAL; |
| 454 | case STMLCDIF_16BIT: | 433 | case STMLCDIF_16BIT: |
| 455 | case STMLCDIF_18BIT: | 434 | case STMLCDIF_18BIT: |
| 456 | /* 24 bit to 18 bit mapping */ | ||
| 457 | ctrl |= CTRL_DF24; /* ignore the upper 2 bits in | ||
| 458 | * each colour component | ||
| 459 | */ | ||
| 460 | break; | ||
| 461 | case STMLCDIF_24BIT: | 435 | case STMLCDIF_24BIT: |
| 462 | /* real 24 bit */ | 436 | /* real 24 bit */ |
| 463 | break; | 437 | break; |
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c index 8c527e5b293c..796e5112ceee 100644 --- a/drivers/video/nuc900fb.c +++ b/drivers/video/nuc900fb.c | |||
| @@ -587,8 +587,7 @@ static int nuc900fb_probe(struct platform_device *pdev) | |||
| 587 | fbinfo->flags = FBINFO_FLAG_DEFAULT; | 587 | fbinfo->flags = FBINFO_FLAG_DEFAULT; |
| 588 | fbinfo->pseudo_palette = &fbi->pseudo_pal; | 588 | fbinfo->pseudo_palette = &fbi->pseudo_pal; |
| 589 | 589 | ||
| 590 | ret = request_irq(irq, nuc900fb_irqhandler, 0, | 590 | ret = request_irq(irq, nuc900fb_irqhandler, 0, pdev->name, fbi); |
| 591 | pdev->name, fbinfo); | ||
| 592 | if (ret) { | 591 | if (ret) { |
| 593 | dev_err(&pdev->dev, "cannot register irq handler %d -err %d\n", | 592 | dev_err(&pdev->dev, "cannot register irq handler %d -err %d\n", |
| 594 | irq, ret); | 593 | irq, ret); |
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c index 5338f362293b..1b60698f141e 100644 --- a/drivers/video/omap2/displays-new/connector-analog-tv.c +++ b/drivers/video/omap2/displays-new/connector-analog-tv.c | |||
| @@ -28,6 +28,20 @@ struct panel_drv_data { | |||
| 28 | bool invert_polarity; | 28 | bool invert_polarity; |
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| 31 | static const struct omap_video_timings tvc_pal_timings = { | ||
| 32 | .x_res = 720, | ||
| 33 | .y_res = 574, | ||
| 34 | .pixel_clock = 13500, | ||
| 35 | .hsw = 64, | ||
| 36 | .hfp = 12, | ||
| 37 | .hbp = 68, | ||
| 38 | .vsw = 5, | ||
| 39 | .vfp = 5, | ||
| 40 | .vbp = 41, | ||
| 41 | |||
| 42 | .interlace = true, | ||
| 43 | }; | ||
| 44 | |||
| 31 | #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) | 45 | #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) |
| 32 | 46 | ||
| 33 | static int tvc_connect(struct omap_dss_device *dssdev) | 47 | static int tvc_connect(struct omap_dss_device *dssdev) |
| @@ -212,14 +226,14 @@ static int tvc_probe(struct platform_device *pdev) | |||
| 212 | return -ENODEV; | 226 | return -ENODEV; |
| 213 | } | 227 | } |
| 214 | 228 | ||
| 215 | ddata->timings = omap_dss_pal_timings; | 229 | ddata->timings = tvc_pal_timings; |
| 216 | 230 | ||
| 217 | dssdev = &ddata->dssdev; | 231 | dssdev = &ddata->dssdev; |
| 218 | dssdev->driver = &tvc_driver; | 232 | dssdev->driver = &tvc_driver; |
| 219 | dssdev->dev = &pdev->dev; | 233 | dssdev->dev = &pdev->dev; |
| 220 | dssdev->type = OMAP_DISPLAY_TYPE_VENC; | 234 | dssdev->type = OMAP_DISPLAY_TYPE_VENC; |
| 221 | dssdev->owner = THIS_MODULE; | 235 | dssdev->owner = THIS_MODULE; |
| 222 | dssdev->panel.timings = omap_dss_pal_timings; | 236 | dssdev->panel.timings = tvc_pal_timings; |
| 223 | 237 | ||
| 224 | r = omapdss_register_display(dssdev); | 238 | r = omapdss_register_display(dssdev); |
| 225 | if (r) { | 239 | if (r) { |
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c index b2a8912f6435..a9ac3ce2d0e9 100644 --- a/drivers/video/sgivwfb.c +++ b/drivers/video/sgivwfb.c | |||
| @@ -713,7 +713,7 @@ static int sgivwfb_mmap(struct fb_info *info, | |||
| 713 | r = vm_iomap_memory(vma, sgivwfb_mem_phys, sgivwfb_mem_size); | 713 | r = vm_iomap_memory(vma, sgivwfb_mem_phys, sgivwfb_mem_size); |
| 714 | 714 | ||
| 715 | printk(KERN_DEBUG "sgivwfb: mmap framebuffer P(%lx)->V(%lx)\n", | 715 | printk(KERN_DEBUG "sgivwfb: mmap framebuffer P(%lx)->V(%lx)\n", |
| 716 | offset, vma->vm_start); | 716 | sgivwfb_mem_phys + (vma->vm_pgoff << PAGE_SHIFT), vma->vm_start); |
| 717 | 717 | ||
| 718 | return r; | 718 | return r; |
| 719 | } | 719 | } |
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c index a8c6c43a4658..1265b25f9f99 100644 --- a/drivers/video/sh7760fb.c +++ b/drivers/video/sh7760fb.c | |||
| @@ -567,7 +567,7 @@ static int sh7760fb_remove(struct platform_device *dev) | |||
| 567 | fb_dealloc_cmap(&info->cmap); | 567 | fb_dealloc_cmap(&info->cmap); |
| 568 | sh7760fb_free_mem(info); | 568 | sh7760fb_free_mem(info); |
| 569 | if (par->irq >= 0) | 569 | if (par->irq >= 0) |
| 570 | free_irq(par->irq, par); | 570 | free_irq(par->irq, &par->vsync); |
| 571 | iounmap(par->base); | 571 | iounmap(par->base); |
| 572 | release_mem_region(par->ioarea->start, resource_size(par->ioarea)); | 572 | release_mem_region(par->ioarea->start, resource_size(par->ioarea)); |
| 573 | framebuffer_release(info); | 573 | framebuffer_release(info); |
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c index 830ded45fd47..2827333703d9 100644 --- a/drivers/video/vga16fb.c +++ b/drivers/video/vga16fb.c | |||
| @@ -1265,7 +1265,6 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image | |||
| 1265 | 1265 | ||
| 1266 | static void vga16fb_destroy(struct fb_info *info) | 1266 | static void vga16fb_destroy(struct fb_info *info) |
| 1267 | { | 1267 | { |
| 1268 | struct platform_device *dev = container_of(info->device, struct platform_device, dev); | ||
| 1269 | iounmap(info->screen_base); | 1268 | iounmap(info->screen_base); |
| 1270 | fb_dealloc_cmap(&info->cmap); | 1269 | fb_dealloc_cmap(&info->cmap); |
| 1271 | /* XXX unshare VGA regions */ | 1270 | /* XXX unshare VGA regions */ |
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c index f3d4a69e1e4e..6629b29a8202 100644 --- a/drivers/video/xilinxfb.c +++ b/drivers/video/xilinxfb.c | |||
| @@ -341,8 +341,8 @@ static int xilinxfb_assign(struct platform_device *pdev, | |||
| 341 | 341 | ||
| 342 | if (drvdata->flags & BUS_ACCESS_FLAG) { | 342 | if (drvdata->flags & BUS_ACCESS_FLAG) { |
| 343 | /* Put a banner in the log (for DEBUG) */ | 343 | /* Put a banner in the log (for DEBUG) */ |
| 344 | dev_dbg(dev, "regs: phys=%x, virt=%p\n", drvdata->regs_phys, | 344 | dev_dbg(dev, "regs: phys=%pa, virt=%p\n", |
| 345 | drvdata->regs); | 345 | &drvdata->regs_phys, drvdata->regs); |
| 346 | } | 346 | } |
| 347 | /* Put a banner in the log (for DEBUG) */ | 347 | /* Put a banner in the log (for DEBUG) */ |
| 348 | dev_dbg(dev, "fb: phys=%llx, virt=%p, size=%x\n", | 348 | dev_dbg(dev, "fb: phys=%llx, virt=%p, size=%x\n", |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 9e02d60a364b..23eae5cb69c2 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
| @@ -145,7 +145,7 @@ config SWIOTLB_XEN | |||
| 145 | 145 | ||
| 146 | config XEN_TMEM | 146 | config XEN_TMEM |
| 147 | tristate | 147 | tristate |
| 148 | depends on !ARM | 148 | depends on !ARM && !ARM64 |
| 149 | default m if (CLEANCACHE || FRONTSWAP) | 149 | default m if (CLEANCACHE || FRONTSWAP) |
| 150 | help | 150 | help |
| 151 | Shim to interface in-kernel Transcendent Memory hooks | 151 | Shim to interface in-kernel Transcendent Memory hooks |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index eabd0ee1c2bc..14fe79d8634a 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
| @@ -1,9 +1,8 @@ | |||
| 1 | ifneq ($(CONFIG_ARM),y) | 1 | ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),) |
| 2 | obj-y += manage.o | ||
| 3 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o | 2 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o |
| 4 | endif | 3 | endif |
| 5 | obj-$(CONFIG_X86) += fallback.o | 4 | obj-$(CONFIG_X86) += fallback.o |
| 6 | obj-y += grant-table.o features.o events.o balloon.o | 5 | obj-y += grant-table.o features.o events.o balloon.o manage.o |
| 7 | obj-y += xenbus/ | 6 | obj-y += xenbus/ |
| 8 | 7 | ||
| 9 | nostackp := $(call cc-option, -fno-stack-protector) | 8 | nostackp := $(call cc-option, -fno-stack-protector) |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index a58ac435a9a4..5e8be462aed5 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
| @@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void) | |||
| 348 | 348 | ||
| 349 | for_each_possible_cpu(i) | 349 | for_each_possible_cpu(i) |
| 350 | memset(per_cpu(cpu_evtchn_mask, i), | 350 | memset(per_cpu(cpu_evtchn_mask, i), |
| 351 | (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); | 351 | (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); |
| 352 | } | 352 | } |
| 353 | 353 | ||
| 354 | static inline void clear_evtchn(int port) | 354 | static inline void clear_evtchn(int port) |
| @@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
| 1493 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ | 1493 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ |
| 1494 | static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | 1494 | static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) |
| 1495 | { | 1495 | { |
| 1496 | struct shared_info *s = HYPERVISOR_shared_info; | ||
| 1496 | struct evtchn_bind_vcpu bind_vcpu; | 1497 | struct evtchn_bind_vcpu bind_vcpu; |
| 1497 | int evtchn = evtchn_from_irq(irq); | 1498 | int evtchn = evtchn_from_irq(irq); |
| 1499 | int masked; | ||
| 1498 | 1500 | ||
| 1499 | if (!VALID_EVTCHN(evtchn)) | 1501 | if (!VALID_EVTCHN(evtchn)) |
| 1500 | return -1; | 1502 | return -1; |
| @@ -1511,6 +1513,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
| 1511 | bind_vcpu.vcpu = tcpu; | 1513 | bind_vcpu.vcpu = tcpu; |
| 1512 | 1514 | ||
| 1513 | /* | 1515 | /* |
| 1516 | * Mask the event while changing the VCPU binding to prevent | ||
| 1517 | * it being delivered on an unexpected VCPU. | ||
| 1518 | */ | ||
| 1519 | masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask)); | ||
| 1520 | |||
| 1521 | /* | ||
| 1514 | * If this fails, it usually just indicates that we're dealing with a | 1522 | * If this fails, it usually just indicates that we're dealing with a |
| 1515 | * virq or IPI channel, which don't actually need to be rebound. Ignore | 1523 | * virq or IPI channel, which don't actually need to be rebound. Ignore |
| 1516 | * it, but don't do the xenlinux-level rebind in that case. | 1524 | * it, but don't do the xenlinux-level rebind in that case. |
| @@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
| 1518 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) | 1526 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) |
| 1519 | bind_evtchn_to_cpu(evtchn, tcpu); | 1527 | bind_evtchn_to_cpu(evtchn, tcpu); |
| 1520 | 1528 | ||
| 1529 | if (!masked) | ||
| 1530 | unmask_evtchn(evtchn); | ||
| 1531 | |||
| 1521 | return 0; | 1532 | return 0; |
| 1522 | } | 1533 | } |
| 1523 | 1534 | ||
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 8feecf01d55c..b6165e047f48 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c | |||
| @@ -379,18 +379,12 @@ static long evtchn_ioctl(struct file *file, | |||
| 379 | if (unbind.port >= NR_EVENT_CHANNELS) | 379 | if (unbind.port >= NR_EVENT_CHANNELS) |
| 380 | break; | 380 | break; |
| 381 | 381 | ||
| 382 | spin_lock_irq(&port_user_lock); | ||
| 383 | |||
| 384 | rc = -ENOTCONN; | 382 | rc = -ENOTCONN; |
| 385 | if (get_port_user(unbind.port) != u) { | 383 | if (get_port_user(unbind.port) != u) |
| 386 | spin_unlock_irq(&port_user_lock); | ||
| 387 | break; | 384 | break; |
| 388 | } | ||
| 389 | 385 | ||
| 390 | disable_irq(irq_from_evtchn(unbind.port)); | 386 | disable_irq(irq_from_evtchn(unbind.port)); |
| 391 | 387 | ||
| 392 | spin_unlock_irq(&port_user_lock); | ||
| 393 | |||
| 394 | evtchn_unbind_from_user(u, unbind.port); | 388 | evtchn_unbind_from_user(u, unbind.port); |
| 395 | 389 | ||
| 396 | rc = 0; | 390 | rc = 0; |
| @@ -490,26 +484,15 @@ static int evtchn_release(struct inode *inode, struct file *filp) | |||
| 490 | int i; | 484 | int i; |
| 491 | struct per_user_data *u = filp->private_data; | 485 | struct per_user_data *u = filp->private_data; |
| 492 | 486 | ||
| 493 | spin_lock_irq(&port_user_lock); | ||
| 494 | |||
| 495 | free_page((unsigned long)u->ring); | ||
| 496 | |||
| 497 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { | 487 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { |
| 498 | if (get_port_user(i) != u) | 488 | if (get_port_user(i) != u) |
| 499 | continue; | 489 | continue; |
| 500 | 490 | ||
| 501 | disable_irq(irq_from_evtchn(i)); | 491 | disable_irq(irq_from_evtchn(i)); |
| 502 | } | ||
| 503 | |||
| 504 | spin_unlock_irq(&port_user_lock); | ||
| 505 | |||
| 506 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { | ||
| 507 | if (get_port_user(i) != u) | ||
| 508 | continue; | ||
| 509 | |||
| 510 | evtchn_unbind_from_user(get_port_user(i), i); | 492 | evtchn_unbind_from_user(get_port_user(i), i); |
| 511 | } | 493 | } |
| 512 | 494 | ||
| 495 | free_page((unsigned long)u->ring); | ||
| 513 | kfree(u->name); | 496 | kfree(u->name); |
| 514 | kfree(u); | 497 | kfree(u); |
| 515 | 498 | ||
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index 6ed8a9df4472..34b20bfa4e8c 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c | |||
| @@ -115,7 +115,6 @@ static int xenbus_frontend_dev_resume(struct device *dev) | |||
| 115 | return -EFAULT; | 115 | return -EFAULT; |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume); | ||
| 119 | queue_work(xenbus_frontend_wq, &xdev->work); | 118 | queue_work(xenbus_frontend_wq, &xdev->work); |
| 120 | 119 | ||
| 121 | return 0; | 120 | return 0; |
| @@ -124,6 +123,16 @@ static int xenbus_frontend_dev_resume(struct device *dev) | |||
| 124 | return xenbus_dev_resume(dev); | 123 | return xenbus_dev_resume(dev); |
| 125 | } | 124 | } |
| 126 | 125 | ||
| 126 | static int xenbus_frontend_dev_probe(struct device *dev) | ||
| 127 | { | ||
| 128 | if (xen_store_domain_type == XS_LOCAL) { | ||
| 129 | struct xenbus_device *xdev = to_xenbus_device(dev); | ||
| 130 | INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume); | ||
| 131 | } | ||
| 132 | |||
| 133 | return xenbus_dev_probe(dev); | ||
| 134 | } | ||
| 135 | |||
| 127 | static const struct dev_pm_ops xenbus_pm_ops = { | 136 | static const struct dev_pm_ops xenbus_pm_ops = { |
| 128 | .suspend = xenbus_dev_suspend, | 137 | .suspend = xenbus_dev_suspend, |
| 129 | .resume = xenbus_frontend_dev_resume, | 138 | .resume = xenbus_frontend_dev_resume, |
| @@ -142,7 +151,7 @@ static struct xen_bus_type xenbus_frontend = { | |||
| 142 | .name = "xen", | 151 | .name = "xen", |
| 143 | .match = xenbus_match, | 152 | .match = xenbus_match, |
| 144 | .uevent = xenbus_uevent_frontend, | 153 | .uevent = xenbus_uevent_frontend, |
| 145 | .probe = xenbus_dev_probe, | 154 | .probe = xenbus_frontend_dev_probe, |
| 146 | .remove = xenbus_dev_remove, | 155 | .remove = xenbus_dev_remove, |
| 147 | .shutdown = xenbus_dev_shutdown, | 156 | .shutdown = xenbus_dev_shutdown, |
| 148 | .dev_attrs = xenbus_dev_attrs, | 157 | .dev_attrs = xenbus_dev_attrs, |
| @@ -474,7 +483,11 @@ static int __init xenbus_probe_frontend_init(void) | |||
| 474 | 483 | ||
| 475 | register_xenstore_notifier(&xenstore_notifier); | 484 | register_xenstore_notifier(&xenstore_notifier); |
| 476 | 485 | ||
| 477 | xenbus_frontend_wq = create_workqueue("xenbus_frontend"); | 486 | if (xen_store_domain_type == XS_LOCAL) { |
| 487 | xenbus_frontend_wq = create_workqueue("xenbus_frontend"); | ||
| 488 | if (!xenbus_frontend_wq) | ||
| 489 | pr_warn("create xenbus frontend workqueue failed, S3 resume is likely to fail\n"); | ||
| 490 | } | ||
| 478 | 491 | ||
| 479 | return 0; | 492 | return 0; |
| 480 | } | 493 | } |
