diff options
Diffstat (limited to 'drivers')
77 files changed, 6337 insertions, 1521 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 88e42abf5d88..0df8fcb687d6 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
@@ -61,6 +61,7 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file); | |||
61 | static int acpi_ac_add(struct acpi_device *device); | 61 | static int acpi_ac_add(struct acpi_device *device); |
62 | static int acpi_ac_remove(struct acpi_device *device, int type); | 62 | static int acpi_ac_remove(struct acpi_device *device, int type); |
63 | static int acpi_ac_resume(struct acpi_device *device); | 63 | static int acpi_ac_resume(struct acpi_device *device); |
64 | static void acpi_ac_notify(struct acpi_device *device, u32 event); | ||
64 | 65 | ||
65 | static const struct acpi_device_id ac_device_ids[] = { | 66 | static const struct acpi_device_id ac_device_ids[] = { |
66 | {"ACPI0003", 0}, | 67 | {"ACPI0003", 0}, |
@@ -72,10 +73,12 @@ static struct acpi_driver acpi_ac_driver = { | |||
72 | .name = "ac", | 73 | .name = "ac", |
73 | .class = ACPI_AC_CLASS, | 74 | .class = ACPI_AC_CLASS, |
74 | .ids = ac_device_ids, | 75 | .ids = ac_device_ids, |
76 | .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, | ||
75 | .ops = { | 77 | .ops = { |
76 | .add = acpi_ac_add, | 78 | .add = acpi_ac_add, |
77 | .remove = acpi_ac_remove, | 79 | .remove = acpi_ac_remove, |
78 | .resume = acpi_ac_resume, | 80 | .resume = acpi_ac_resume, |
81 | .notify = acpi_ac_notify, | ||
79 | }, | 82 | }, |
80 | }; | 83 | }; |
81 | 84 | ||
@@ -220,16 +223,14 @@ static int acpi_ac_remove_fs(struct acpi_device *device) | |||
220 | Driver Model | 223 | Driver Model |
221 | -------------------------------------------------------------------------- */ | 224 | -------------------------------------------------------------------------- */ |
222 | 225 | ||
223 | static void acpi_ac_notify(acpi_handle handle, u32 event, void *data) | 226 | static void acpi_ac_notify(struct acpi_device *device, u32 event) |
224 | { | 227 | { |
225 | struct acpi_ac *ac = data; | 228 | struct acpi_ac *ac = acpi_driver_data(device); |
226 | struct acpi_device *device = NULL; | ||
227 | 229 | ||
228 | 230 | ||
229 | if (!ac) | 231 | if (!ac) |
230 | return; | 232 | return; |
231 | 233 | ||
232 | device = ac->device; | ||
233 | switch (event) { | 234 | switch (event) { |
234 | default: | 235 | default: |
235 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 236 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
@@ -253,7 +254,6 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data) | |||
253 | static int acpi_ac_add(struct acpi_device *device) | 254 | static int acpi_ac_add(struct acpi_device *device) |
254 | { | 255 | { |
255 | int result = 0; | 256 | int result = 0; |
256 | acpi_status status = AE_OK; | ||
257 | struct acpi_ac *ac = NULL; | 257 | struct acpi_ac *ac = NULL; |
258 | 258 | ||
259 | 259 | ||
@@ -286,13 +286,6 @@ static int acpi_ac_add(struct acpi_device *device) | |||
286 | ac->charger.get_property = get_ac_property; | 286 | ac->charger.get_property = get_ac_property; |
287 | power_supply_register(&ac->device->dev, &ac->charger); | 287 | power_supply_register(&ac->device->dev, &ac->charger); |
288 | #endif | 288 | #endif |
289 | status = acpi_install_notify_handler(device->handle, | ||
290 | ACPI_ALL_NOTIFY, acpi_ac_notify, | ||
291 | ac); | ||
292 | if (ACPI_FAILURE(status)) { | ||
293 | result = -ENODEV; | ||
294 | goto end; | ||
295 | } | ||
296 | 289 | ||
297 | printk(KERN_INFO PREFIX "%s [%s] (%s)\n", | 290 | printk(KERN_INFO PREFIX "%s [%s] (%s)\n", |
298 | acpi_device_name(device), acpi_device_bid(device), | 291 | acpi_device_name(device), acpi_device_bid(device), |
@@ -328,7 +321,6 @@ static int acpi_ac_resume(struct acpi_device *device) | |||
328 | 321 | ||
329 | static int acpi_ac_remove(struct acpi_device *device, int type) | 322 | static int acpi_ac_remove(struct acpi_device *device, int type) |
330 | { | 323 | { |
331 | acpi_status status = AE_OK; | ||
332 | struct acpi_ac *ac = NULL; | 324 | struct acpi_ac *ac = NULL; |
333 | 325 | ||
334 | 326 | ||
@@ -337,8 +329,6 @@ static int acpi_ac_remove(struct acpi_device *device, int type) | |||
337 | 329 | ||
338 | ac = acpi_driver_data(device); | 330 | ac = acpi_driver_data(device); |
339 | 331 | ||
340 | status = acpi_remove_notify_handler(device->handle, | ||
341 | ACPI_ALL_NOTIFY, acpi_ac_notify); | ||
342 | #ifdef CONFIG_ACPI_SYSFS_POWER | 332 | #ifdef CONFIG_ACPI_SYSFS_POWER |
343 | if (ac->charger.dev) | 333 | if (ac->charger.dev) |
344 | power_supply_unregister(&ac->charger); | 334 | power_supply_unregister(&ac->charger); |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index b0de6312919a..58b4517ce712 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -796,13 +796,12 @@ static void acpi_battery_remove_fs(struct acpi_device *device) | |||
796 | Driver Interface | 796 | Driver Interface |
797 | -------------------------------------------------------------------------- */ | 797 | -------------------------------------------------------------------------- */ |
798 | 798 | ||
799 | static void acpi_battery_notify(acpi_handle handle, u32 event, void *data) | 799 | static void acpi_battery_notify(struct acpi_device *device, u32 event) |
800 | { | 800 | { |
801 | struct acpi_battery *battery = data; | 801 | struct acpi_battery *battery = acpi_driver_data(device); |
802 | struct acpi_device *device; | 802 | |
803 | if (!battery) | 803 | if (!battery) |
804 | return; | 804 | return; |
805 | device = battery->device; | ||
806 | acpi_battery_update(battery); | 805 | acpi_battery_update(battery); |
807 | acpi_bus_generate_proc_event(device, event, | 806 | acpi_bus_generate_proc_event(device, event, |
808 | acpi_battery_present(battery)); | 807 | acpi_battery_present(battery)); |
@@ -819,7 +818,6 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data) | |||
819 | static int acpi_battery_add(struct acpi_device *device) | 818 | static int acpi_battery_add(struct acpi_device *device) |
820 | { | 819 | { |
821 | int result = 0; | 820 | int result = 0; |
822 | acpi_status status = 0; | ||
823 | struct acpi_battery *battery = NULL; | 821 | struct acpi_battery *battery = NULL; |
824 | if (!device) | 822 | if (!device) |
825 | return -EINVAL; | 823 | return -EINVAL; |
@@ -834,22 +832,12 @@ static int acpi_battery_add(struct acpi_device *device) | |||
834 | acpi_battery_update(battery); | 832 | acpi_battery_update(battery); |
835 | #ifdef CONFIG_ACPI_PROCFS_POWER | 833 | #ifdef CONFIG_ACPI_PROCFS_POWER |
836 | result = acpi_battery_add_fs(device); | 834 | result = acpi_battery_add_fs(device); |
837 | if (result) | ||
838 | goto end; | ||
839 | #endif | 835 | #endif |
840 | status = acpi_install_notify_handler(device->handle, | 836 | if (!result) { |
841 | ACPI_ALL_NOTIFY, | 837 | printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", |
842 | acpi_battery_notify, battery); | 838 | ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), |
843 | if (ACPI_FAILURE(status)) { | 839 | device->status.battery_present ? "present" : "absent"); |
844 | ACPI_EXCEPTION((AE_INFO, status, "Installing notify handler")); | 840 | } else { |
845 | result = -ENODEV; | ||
846 | goto end; | ||
847 | } | ||
848 | printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", | ||
849 | ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), | ||
850 | device->status.battery_present ? "present" : "absent"); | ||
851 | end: | ||
852 | if (result) { | ||
853 | #ifdef CONFIG_ACPI_PROCFS_POWER | 841 | #ifdef CONFIG_ACPI_PROCFS_POWER |
854 | acpi_battery_remove_fs(device); | 842 | acpi_battery_remove_fs(device); |
855 | #endif | 843 | #endif |
@@ -860,15 +848,11 @@ static int acpi_battery_add(struct acpi_device *device) | |||
860 | 848 | ||
861 | static int acpi_battery_remove(struct acpi_device *device, int type) | 849 | static int acpi_battery_remove(struct acpi_device *device, int type) |
862 | { | 850 | { |
863 | acpi_status status = 0; | ||
864 | struct acpi_battery *battery = NULL; | 851 | struct acpi_battery *battery = NULL; |
865 | 852 | ||
866 | if (!device || !acpi_driver_data(device)) | 853 | if (!device || !acpi_driver_data(device)) |
867 | return -EINVAL; | 854 | return -EINVAL; |
868 | battery = acpi_driver_data(device); | 855 | battery = acpi_driver_data(device); |
869 | status = acpi_remove_notify_handler(device->handle, | ||
870 | ACPI_ALL_NOTIFY, | ||
871 | acpi_battery_notify); | ||
872 | #ifdef CONFIG_ACPI_PROCFS_POWER | 856 | #ifdef CONFIG_ACPI_PROCFS_POWER |
873 | acpi_battery_remove_fs(device); | 857 | acpi_battery_remove_fs(device); |
874 | #endif | 858 | #endif |
@@ -896,10 +880,12 @@ static struct acpi_driver acpi_battery_driver = { | |||
896 | .name = "battery", | 880 | .name = "battery", |
897 | .class = ACPI_BATTERY_CLASS, | 881 | .class = ACPI_BATTERY_CLASS, |
898 | .ids = battery_device_ids, | 882 | .ids = battery_device_ids, |
883 | .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, | ||
899 | .ops = { | 884 | .ops = { |
900 | .add = acpi_battery_add, | 885 | .add = acpi_battery_add, |
901 | .resume = acpi_battery_resume, | 886 | .resume = acpi_battery_resume, |
902 | .remove = acpi_battery_remove, | 887 | .remove = acpi_battery_remove, |
888 | .notify = acpi_battery_notify, | ||
903 | }, | 889 | }, |
904 | }; | 890 | }; |
905 | 891 | ||
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 09c69806c1fc..f6baa77deefb 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c | |||
@@ -192,6 +192,22 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
192 | DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"), | 192 | DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"), |
193 | }, | 193 | }, |
194 | }, | 194 | }, |
195 | { | ||
196 | .callback = dmi_disable_osi_vista, | ||
197 | .ident = "Sony VGN-NS10J_S", | ||
198 | .matches = { | ||
199 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
200 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS10J_S"), | ||
201 | }, | ||
202 | }, | ||
203 | { | ||
204 | .callback = dmi_disable_osi_vista, | ||
205 | .ident = "Sony VGN-SR290J", | ||
206 | .matches = { | ||
207 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
208 | DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"), | ||
209 | }, | ||
210 | }, | ||
195 | 211 | ||
196 | /* | 212 | /* |
197 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. | 213 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index ae862f1798dc..2876fc70c3a9 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -450,18 +450,16 @@ int acpi_bus_receive_event(struct acpi_bus_event *event) | |||
450 | Notification Handling | 450 | Notification Handling |
451 | -------------------------------------------------------------------------- */ | 451 | -------------------------------------------------------------------------- */ |
452 | 452 | ||
453 | static int | 453 | static void acpi_bus_check_device(acpi_handle handle) |
454 | acpi_bus_check_device(struct acpi_device *device, int *status_changed) | ||
455 | { | 454 | { |
456 | acpi_status status = 0; | 455 | struct acpi_device *device; |
456 | acpi_status status; | ||
457 | struct acpi_device_status old_status; | 457 | struct acpi_device_status old_status; |
458 | 458 | ||
459 | 459 | if (acpi_bus_get_device(handle, &device)) | |
460 | return; | ||
460 | if (!device) | 461 | if (!device) |
461 | return -EINVAL; | 462 | return; |
462 | |||
463 | if (status_changed) | ||
464 | *status_changed = 0; | ||
465 | 463 | ||
466 | old_status = device->status; | 464 | old_status = device->status; |
467 | 465 | ||
@@ -471,22 +469,15 @@ acpi_bus_check_device(struct acpi_device *device, int *status_changed) | |||
471 | */ | 469 | */ |
472 | if (device->parent && !device->parent->status.present) { | 470 | if (device->parent && !device->parent->status.present) { |
473 | device->status = device->parent->status; | 471 | device->status = device->parent->status; |
474 | if (STRUCT_TO_INT(old_status) != STRUCT_TO_INT(device->status)) { | 472 | return; |
475 | if (status_changed) | ||
476 | *status_changed = 1; | ||
477 | } | ||
478 | return 0; | ||
479 | } | 473 | } |
480 | 474 | ||
481 | status = acpi_bus_get_status(device); | 475 | status = acpi_bus_get_status(device); |
482 | if (ACPI_FAILURE(status)) | 476 | if (ACPI_FAILURE(status)) |
483 | return -ENODEV; | 477 | return; |
484 | 478 | ||
485 | if (STRUCT_TO_INT(old_status) == STRUCT_TO_INT(device->status)) | 479 | if (STRUCT_TO_INT(old_status) == STRUCT_TO_INT(device->status)) |
486 | return 0; | 480 | return; |
487 | |||
488 | if (status_changed) | ||
489 | *status_changed = 1; | ||
490 | 481 | ||
491 | /* | 482 | /* |
492 | * Device Insertion/Removal | 483 | * Device Insertion/Removal |
@@ -498,33 +489,17 @@ acpi_bus_check_device(struct acpi_device *device, int *status_changed) | |||
498 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device removal detected\n")); | 489 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device removal detected\n")); |
499 | /* TBD: Handle device removal */ | 490 | /* TBD: Handle device removal */ |
500 | } | 491 | } |
501 | |||
502 | return 0; | ||
503 | } | 492 | } |
504 | 493 | ||
505 | static int acpi_bus_check_scope(struct acpi_device *device) | 494 | static void acpi_bus_check_scope(acpi_handle handle) |
506 | { | 495 | { |
507 | int result = 0; | ||
508 | int status_changed = 0; | ||
509 | |||
510 | |||
511 | if (!device) | ||
512 | return -EINVAL; | ||
513 | |||
514 | /* Status Change? */ | 496 | /* Status Change? */ |
515 | result = acpi_bus_check_device(device, &status_changed); | 497 | acpi_bus_check_device(handle); |
516 | if (result) | ||
517 | return result; | ||
518 | |||
519 | if (!status_changed) | ||
520 | return 0; | ||
521 | 498 | ||
522 | /* | 499 | /* |
523 | * TBD: Enumerate child devices within this device's scope and | 500 | * TBD: Enumerate child devices within this device's scope and |
524 | * run acpi_bus_check_device()'s on them. | 501 | * run acpi_bus_check_device()'s on them. |
525 | */ | 502 | */ |
526 | |||
527 | return 0; | ||
528 | } | 503 | } |
529 | 504 | ||
530 | static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list); | 505 | static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list); |
@@ -547,22 +522,19 @@ EXPORT_SYMBOL_GPL(unregister_acpi_bus_notifier); | |||
547 | */ | 522 | */ |
548 | static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) | 523 | static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) |
549 | { | 524 | { |
550 | int result = 0; | ||
551 | struct acpi_device *device = NULL; | 525 | struct acpi_device *device = NULL; |
526 | struct acpi_driver *driver; | ||
527 | |||
528 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n", | ||
529 | type, handle)); | ||
552 | 530 | ||
553 | blocking_notifier_call_chain(&acpi_bus_notify_list, | 531 | blocking_notifier_call_chain(&acpi_bus_notify_list, |
554 | type, (void *)handle); | 532 | type, (void *)handle); |
555 | 533 | ||
556 | if (acpi_bus_get_device(handle, &device)) | ||
557 | return; | ||
558 | |||
559 | switch (type) { | 534 | switch (type) { |
560 | 535 | ||
561 | case ACPI_NOTIFY_BUS_CHECK: | 536 | case ACPI_NOTIFY_BUS_CHECK: |
562 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 537 | acpi_bus_check_scope(handle); |
563 | "Received BUS CHECK notification for device [%s]\n", | ||
564 | device->pnp.bus_id)); | ||
565 | result = acpi_bus_check_scope(device); | ||
566 | /* | 538 | /* |
567 | * TBD: We'll need to outsource certain events to non-ACPI | 539 | * TBD: We'll need to outsource certain events to non-ACPI |
568 | * drivers via the device manager (device.c). | 540 | * drivers via the device manager (device.c). |
@@ -570,10 +542,7 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) | |||
570 | break; | 542 | break; |
571 | 543 | ||
572 | case ACPI_NOTIFY_DEVICE_CHECK: | 544 | case ACPI_NOTIFY_DEVICE_CHECK: |
573 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 545 | acpi_bus_check_device(handle); |
574 | "Received DEVICE CHECK notification for device [%s]\n", | ||
575 | device->pnp.bus_id)); | ||
576 | result = acpi_bus_check_device(device, NULL); | ||
577 | /* | 546 | /* |
578 | * TBD: We'll need to outsource certain events to non-ACPI | 547 | * TBD: We'll need to outsource certain events to non-ACPI |
579 | * drivers via the device manager (device.c). | 548 | * drivers via the device manager (device.c). |
@@ -581,44 +550,26 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) | |||
581 | break; | 550 | break; |
582 | 551 | ||
583 | case ACPI_NOTIFY_DEVICE_WAKE: | 552 | case ACPI_NOTIFY_DEVICE_WAKE: |
584 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
585 | "Received DEVICE WAKE notification for device [%s]\n", | ||
586 | device->pnp.bus_id)); | ||
587 | /* TBD */ | 553 | /* TBD */ |
588 | break; | 554 | break; |
589 | 555 | ||
590 | case ACPI_NOTIFY_EJECT_REQUEST: | 556 | case ACPI_NOTIFY_EJECT_REQUEST: |
591 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
592 | "Received EJECT REQUEST notification for device [%s]\n", | ||
593 | device->pnp.bus_id)); | ||
594 | /* TBD */ | 557 | /* TBD */ |
595 | break; | 558 | break; |
596 | 559 | ||
597 | case ACPI_NOTIFY_DEVICE_CHECK_LIGHT: | 560 | case ACPI_NOTIFY_DEVICE_CHECK_LIGHT: |
598 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
599 | "Received DEVICE CHECK LIGHT notification for device [%s]\n", | ||
600 | device->pnp.bus_id)); | ||
601 | /* TBD: Exactly what does 'light' mean? */ | 561 | /* TBD: Exactly what does 'light' mean? */ |
602 | break; | 562 | break; |
603 | 563 | ||
604 | case ACPI_NOTIFY_FREQUENCY_MISMATCH: | 564 | case ACPI_NOTIFY_FREQUENCY_MISMATCH: |
605 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
606 | "Received FREQUENCY MISMATCH notification for device [%s]\n", | ||
607 | device->pnp.bus_id)); | ||
608 | /* TBD */ | 565 | /* TBD */ |
609 | break; | 566 | break; |
610 | 567 | ||
611 | case ACPI_NOTIFY_BUS_MODE_MISMATCH: | 568 | case ACPI_NOTIFY_BUS_MODE_MISMATCH: |
612 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
613 | "Received BUS MODE MISMATCH notification for device [%s]\n", | ||
614 | device->pnp.bus_id)); | ||
615 | /* TBD */ | 569 | /* TBD */ |
616 | break; | 570 | break; |
617 | 571 | ||
618 | case ACPI_NOTIFY_POWER_FAULT: | 572 | case ACPI_NOTIFY_POWER_FAULT: |
619 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
620 | "Received POWER FAULT notification for device [%s]\n", | ||
621 | device->pnp.bus_id)); | ||
622 | /* TBD */ | 573 | /* TBD */ |
623 | break; | 574 | break; |
624 | 575 | ||
@@ -629,7 +580,13 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) | |||
629 | break; | 580 | break; |
630 | } | 581 | } |
631 | 582 | ||
632 | return; | 583 | acpi_bus_get_device(handle, &device); |
584 | if (device) { | ||
585 | driver = device->driver; | ||
586 | if (driver && driver->ops.notify && | ||
587 | (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS)) | ||
588 | driver->ops.notify(device, type); | ||
589 | } | ||
633 | } | 590 | } |
634 | 591 | ||
635 | /* -------------------------------------------------------------------------- | 592 | /* -------------------------------------------------------------------------- |
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 8bd2c2a6884d..a8a5c29958c8 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
@@ -140,46 +140,6 @@ struct device *acpi_get_physical_device(acpi_handle handle) | |||
140 | 140 | ||
141 | EXPORT_SYMBOL(acpi_get_physical_device); | 141 | EXPORT_SYMBOL(acpi_get_physical_device); |
142 | 142 | ||
143 | /* ToDo: When a PCI bridge is found, return the PCI device behind the bridge | ||
144 | * This should work in general, but did not on a Lenovo T61 for the | ||
145 | * graphics card. But this must be fixed when the PCI device is | ||
146 | * bound and the kernel device struct is attached to the acpi device | ||
147 | * Note: A success call will increase reference count by one | ||
148 | * Do call put_device(dev) on the returned device then | ||
149 | */ | ||
150 | struct device *acpi_get_physical_pci_device(acpi_handle handle) | ||
151 | { | ||
152 | struct device *dev; | ||
153 | long long device_id; | ||
154 | acpi_status status; | ||
155 | |||
156 | status = | ||
157 | acpi_evaluate_integer(handle, "_ADR", NULL, &device_id); | ||
158 | |||
159 | if (ACPI_FAILURE(status)) | ||
160 | return NULL; | ||
161 | |||
162 | /* We need to attempt to determine whether the _ADR refers to a | ||
163 | PCI device or not. There's no terribly good way to do this, | ||
164 | so the best we can hope for is to assume that there'll never | ||
165 | be a device in the host bridge */ | ||
166 | if (device_id >= 0x10000) { | ||
167 | /* It looks like a PCI device. Does it exist? */ | ||
168 | dev = acpi_get_physical_device(handle); | ||
169 | } else { | ||
170 | /* It doesn't look like a PCI device. Does its parent | ||
171 | exist? */ | ||
172 | acpi_handle phandle; | ||
173 | if (acpi_get_parent(handle, &phandle)) | ||
174 | return NULL; | ||
175 | dev = acpi_get_physical_device(phandle); | ||
176 | } | ||
177 | if (!dev) | ||
178 | return NULL; | ||
179 | return dev; | ||
180 | } | ||
181 | EXPORT_SYMBOL(acpi_get_physical_pci_device); | ||
182 | |||
183 | static int acpi_bind_one(struct device *dev, acpi_handle handle) | 143 | static int acpi_bind_one(struct device *dev, acpi_handle handle) |
184 | { | 144 | { |
185 | struct acpi_device *acpi_dev; | 145 | struct acpi_device *acpi_dev; |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index d916bea729f1..71670719d61a 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -79,6 +79,7 @@ static acpi_osd_handler acpi_irq_handler; | |||
79 | static void *acpi_irq_context; | 79 | static void *acpi_irq_context; |
80 | static struct workqueue_struct *kacpid_wq; | 80 | static struct workqueue_struct *kacpid_wq; |
81 | static struct workqueue_struct *kacpi_notify_wq; | 81 | static struct workqueue_struct *kacpi_notify_wq; |
82 | static struct workqueue_struct *kacpi_hotplug_wq; | ||
82 | 83 | ||
83 | struct acpi_res_list { | 84 | struct acpi_res_list { |
84 | resource_size_t start; | 85 | resource_size_t start; |
@@ -192,8 +193,10 @@ acpi_status acpi_os_initialize1(void) | |||
192 | { | 193 | { |
193 | kacpid_wq = create_singlethread_workqueue("kacpid"); | 194 | kacpid_wq = create_singlethread_workqueue("kacpid"); |
194 | kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); | 195 | kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); |
196 | kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug"); | ||
195 | BUG_ON(!kacpid_wq); | 197 | BUG_ON(!kacpid_wq); |
196 | BUG_ON(!kacpi_notify_wq); | 198 | BUG_ON(!kacpi_notify_wq); |
199 | BUG_ON(!kacpi_hotplug_wq); | ||
197 | return AE_OK; | 200 | return AE_OK; |
198 | } | 201 | } |
199 | 202 | ||
@@ -206,6 +209,7 @@ acpi_status acpi_os_terminate(void) | |||
206 | 209 | ||
207 | destroy_workqueue(kacpid_wq); | 210 | destroy_workqueue(kacpid_wq); |
208 | destroy_workqueue(kacpi_notify_wq); | 211 | destroy_workqueue(kacpi_notify_wq); |
212 | destroy_workqueue(kacpi_hotplug_wq); | ||
209 | 213 | ||
210 | return AE_OK; | 214 | return AE_OK; |
211 | } | 215 | } |
@@ -716,6 +720,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
716 | acpi_status status = AE_OK; | 720 | acpi_status status = AE_OK; |
717 | struct acpi_os_dpc *dpc; | 721 | struct acpi_os_dpc *dpc; |
718 | struct workqueue_struct *queue; | 722 | struct workqueue_struct *queue; |
723 | work_func_t func; | ||
719 | int ret; | 724 | int ret; |
720 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | 725 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, |
721 | "Scheduling function [%p(%p)] for deferred execution.\n", | 726 | "Scheduling function [%p(%p)] for deferred execution.\n", |
@@ -740,15 +745,17 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
740 | dpc->function = function; | 745 | dpc->function = function; |
741 | dpc->context = context; | 746 | dpc->context = context; |
742 | 747 | ||
743 | if (!hp) { | 748 | /* |
744 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | 749 | * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq |
745 | queue = (type == OSL_NOTIFY_HANDLER) ? | 750 | * because the hotplug code may call driver .remove() functions, |
746 | kacpi_notify_wq : kacpid_wq; | 751 | * which invoke flush_scheduled_work/acpi_os_wait_events_complete |
747 | ret = queue_work(queue, &dpc->work); | 752 | * to flush these workqueues. |
748 | } else { | 753 | */ |
749 | INIT_WORK(&dpc->work, acpi_os_execute_hp_deferred); | 754 | queue = hp ? kacpi_hotplug_wq : |
750 | ret = schedule_work(&dpc->work); | 755 | (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq); |
751 | } | 756 | func = hp ? acpi_os_execute_hp_deferred : acpi_os_execute_deferred; |
757 | INIT_WORK(&dpc->work, func); | ||
758 | ret = queue_work(queue, &dpc->work); | ||
752 | 759 | ||
753 | if (!ret) { | 760 | if (!ret) { |
754 | printk(KERN_ERR PREFIX | 761 | printk(KERN_ERR PREFIX |
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c index bc46de3d967f..a5a77b78a723 100644 --- a/drivers/acpi/pci_bind.c +++ b/drivers/acpi/pci_bind.c | |||
@@ -24,12 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/types.h> | 27 | #include <linux/types.h> |
30 | #include <linux/proc_fs.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/pm.h> | ||
33 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
34 | #include <linux/acpi.h> | 29 | #include <linux/acpi.h> |
35 | #include <acpi/acpi_bus.h> | 30 | #include <acpi/acpi_bus.h> |
@@ -38,310 +33,76 @@ | |||
38 | #define _COMPONENT ACPI_PCI_COMPONENT | 33 | #define _COMPONENT ACPI_PCI_COMPONENT |
39 | ACPI_MODULE_NAME("pci_bind"); | 34 | ACPI_MODULE_NAME("pci_bind"); |
40 | 35 | ||
41 | struct acpi_pci_data { | 36 | static int acpi_pci_unbind(struct acpi_device *device) |
42 | struct acpi_pci_id id; | ||
43 | struct pci_bus *bus; | ||
44 | struct pci_dev *dev; | ||
45 | }; | ||
46 | |||
47 | static int acpi_pci_unbind(struct acpi_device *device); | ||
48 | |||
49 | static void acpi_pci_data_handler(acpi_handle handle, u32 function, | ||
50 | void *context) | ||
51 | { | ||
52 | |||
53 | /* TBD: Anything we need to do here? */ | ||
54 | |||
55 | return; | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * acpi_get_pci_id | ||
60 | * ------------------ | ||
61 | * This function is used by the ACPI Interpreter (a.k.a. Core Subsystem) | ||
62 | * to resolve PCI information for ACPI-PCI devices defined in the namespace. | ||
63 | * This typically occurs when resolving PCI operation region information. | ||
64 | */ | ||
65 | acpi_status acpi_get_pci_id(acpi_handle handle, struct acpi_pci_id *id) | ||
66 | { | 37 | { |
67 | int result = 0; | 38 | struct pci_dev *dev; |
68 | acpi_status status = AE_OK; | ||
69 | struct acpi_device *device = NULL; | ||
70 | struct acpi_pci_data *data = NULL; | ||
71 | |||
72 | |||
73 | if (!id) | ||
74 | return AE_BAD_PARAMETER; | ||
75 | |||
76 | result = acpi_bus_get_device(handle, &device); | ||
77 | if (result) { | ||
78 | printk(KERN_ERR PREFIX | ||
79 | "Invalid ACPI Bus context for device %s\n", | ||
80 | acpi_device_bid(device)); | ||
81 | return AE_NOT_EXIST; | ||
82 | } | ||
83 | |||
84 | status = acpi_get_data(handle, acpi_pci_data_handler, (void **)&data); | ||
85 | if (ACPI_FAILURE(status) || !data) { | ||
86 | ACPI_EXCEPTION((AE_INFO, status, | ||
87 | "Invalid ACPI-PCI context for device %s", | ||
88 | acpi_device_bid(device))); | ||
89 | return status; | ||
90 | } | ||
91 | 39 | ||
92 | *id = data->id; | 40 | dev = acpi_get_pci_dev(device->handle); |
41 | if (!dev || !dev->subordinate) | ||
42 | goto out; | ||
93 | 43 | ||
94 | /* | 44 | acpi_pci_irq_del_prt(dev->subordinate); |
95 | id->segment = data->id.segment; | ||
96 | id->bus = data->id.bus; | ||
97 | id->device = data->id.device; | ||
98 | id->function = data->id.function; | ||
99 | */ | ||
100 | 45 | ||
101 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 46 | device->ops.bind = NULL; |
102 | "Device %s has PCI address %04x:%02x:%02x.%d\n", | 47 | device->ops.unbind = NULL; |
103 | acpi_device_bid(device), id->segment, id->bus, | ||
104 | id->device, id->function)); | ||
105 | 48 | ||
106 | return AE_OK; | 49 | out: |
50 | pci_dev_put(dev); | ||
51 | return 0; | ||
107 | } | 52 | } |
108 | 53 | ||
109 | EXPORT_SYMBOL(acpi_get_pci_id); | 54 | static int acpi_pci_bind(struct acpi_device *device) |
110 | |||
111 | int acpi_pci_bind(struct acpi_device *device) | ||
112 | { | 55 | { |
113 | int result = 0; | ||
114 | acpi_status status; | 56 | acpi_status status; |
115 | struct acpi_pci_data *data; | ||
116 | struct acpi_pci_data *pdata; | ||
117 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
118 | acpi_handle handle; | 57 | acpi_handle handle; |
58 | struct pci_bus *bus; | ||
59 | struct pci_dev *dev; | ||
119 | 60 | ||
120 | if (!device || !device->parent) | 61 | dev = acpi_get_pci_dev(device->handle); |
121 | return -EINVAL; | 62 | if (!dev) |
122 | 63 | return 0; | |
123 | data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL); | ||
124 | if (!data) | ||
125 | return -ENOMEM; | ||
126 | |||
127 | status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); | ||
128 | if (ACPI_FAILURE(status)) { | ||
129 | kfree(data); | ||
130 | return -ENODEV; | ||
131 | } | ||
132 | |||
133 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI device [%s]...\n", | ||
134 | (char *)buffer.pointer)); | ||
135 | |||
136 | /* | ||
137 | * Segment & Bus | ||
138 | * ------------- | ||
139 | * These are obtained via the parent device's ACPI-PCI context. | ||
140 | */ | ||
141 | status = acpi_get_data(device->parent->handle, acpi_pci_data_handler, | ||
142 | (void **)&pdata); | ||
143 | if (ACPI_FAILURE(status) || !pdata || !pdata->bus) { | ||
144 | ACPI_EXCEPTION((AE_INFO, status, | ||
145 | "Invalid ACPI-PCI context for parent device %s", | ||
146 | acpi_device_bid(device->parent))); | ||
147 | result = -ENODEV; | ||
148 | goto end; | ||
149 | } | ||
150 | data->id.segment = pdata->id.segment; | ||
151 | data->id.bus = pdata->bus->number; | ||
152 | |||
153 | /* | ||
154 | * Device & Function | ||
155 | * ----------------- | ||
156 | * These are simply obtained from the device's _ADR method. Note | ||
157 | * that a value of zero is valid. | ||
158 | */ | ||
159 | data->id.device = device->pnp.bus_address >> 16; | ||
160 | data->id.function = device->pnp.bus_address & 0xFFFF; | ||
161 | |||
162 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %04x:%02x:%02x.%d\n", | ||
163 | data->id.segment, data->id.bus, data->id.device, | ||
164 | data->id.function)); | ||
165 | |||
166 | /* | ||
167 | * TBD: Support slot devices (e.g. function=0xFFFF). | ||
168 | */ | ||
169 | |||
170 | /* | ||
171 | * Locate PCI Device | ||
172 | * ----------------- | ||
173 | * Locate matching device in PCI namespace. If it doesn't exist | ||
174 | * this typically means that the device isn't currently inserted | ||
175 | * (e.g. docking station, port replicator, etc.). | ||
176 | */ | ||
177 | data->dev = pci_get_slot(pdata->bus, | ||
178 | PCI_DEVFN(data->id.device, data->id.function)); | ||
179 | if (!data->dev) { | ||
180 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
181 | "Device %04x:%02x:%02x.%d not present in PCI namespace\n", | ||
182 | data->id.segment, data->id.bus, | ||
183 | data->id.device, data->id.function)); | ||
184 | result = -ENODEV; | ||
185 | goto end; | ||
186 | } | ||
187 | if (!data->dev->bus) { | ||
188 | printk(KERN_ERR PREFIX | ||
189 | "Device %04x:%02x:%02x.%d has invalid 'bus' field\n", | ||
190 | data->id.segment, data->id.bus, | ||
191 | data->id.device, data->id.function); | ||
192 | result = -ENODEV; | ||
193 | goto end; | ||
194 | } | ||
195 | 64 | ||
196 | /* | 65 | /* |
197 | * PCI Bridge? | 66 | * Install the 'bind' function to facilitate callbacks for |
198 | * ----------- | 67 | * children of the P2P bridge. |
199 | * If so, set the 'bus' field and install the 'bind' function to | ||
200 | * facilitate callbacks for all of its children. | ||
201 | */ | 68 | */ |
202 | if (data->dev->subordinate) { | 69 | if (dev->subordinate) { |
203 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 70 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
204 | "Device %04x:%02x:%02x.%d is a PCI bridge\n", | 71 | "Device %04x:%02x:%02x.%d is a PCI bridge\n", |
205 | data->id.segment, data->id.bus, | 72 | pci_domain_nr(dev->bus), dev->bus->number, |
206 | data->id.device, data->id.function)); | 73 | PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn))); |
207 | data->bus = data->dev->subordinate; | ||
208 | device->ops.bind = acpi_pci_bind; | 74 | device->ops.bind = acpi_pci_bind; |
209 | device->ops.unbind = acpi_pci_unbind; | 75 | device->ops.unbind = acpi_pci_unbind; |
210 | } | 76 | } |
211 | 77 | ||
212 | /* | 78 | /* |
213 | * Attach ACPI-PCI Context | 79 | * Evaluate and parse _PRT, if exists. This code allows parsing of |
214 | * ----------------------- | 80 | * _PRT objects within the scope of non-bridge devices. Note that |
215 | * Thus binding the ACPI and PCI devices. | 81 | * _PRTs within the scope of a PCI bridge assume the bridge's |
216 | */ | 82 | * subordinate bus number. |
217 | status = acpi_attach_data(device->handle, acpi_pci_data_handler, data); | ||
218 | if (ACPI_FAILURE(status)) { | ||
219 | ACPI_EXCEPTION((AE_INFO, status, | ||
220 | "Unable to attach ACPI-PCI context to device %s", | ||
221 | acpi_device_bid(device))); | ||
222 | result = -ENODEV; | ||
223 | goto end; | ||
224 | } | ||
225 | |||
226 | /* | ||
227 | * PCI Routing Table | ||
228 | * ----------------- | ||
229 | * Evaluate and parse _PRT, if exists. This code is independent of | ||
230 | * PCI bridges (above) to allow parsing of _PRT objects within the | ||
231 | * scope of non-bridge devices. Note that _PRTs within the scope of | ||
232 | * a PCI bridge assume the bridge's subordinate bus number. | ||
233 | * | 83 | * |
234 | * TBD: Can _PRTs exist within the scope of non-bridge PCI devices? | 84 | * TBD: Can _PRTs exist within the scope of non-bridge PCI devices? |
235 | */ | 85 | */ |
236 | status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); | 86 | status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); |
237 | if (ACPI_SUCCESS(status)) { | ||
238 | if (data->bus) /* PCI-PCI bridge */ | ||
239 | acpi_pci_irq_add_prt(device->handle, data->id.segment, | ||
240 | data->bus->number); | ||
241 | else /* non-bridge PCI device */ | ||
242 | acpi_pci_irq_add_prt(device->handle, data->id.segment, | ||
243 | data->id.bus); | ||
244 | } | ||
245 | |||
246 | end: | ||
247 | kfree(buffer.pointer); | ||
248 | if (result) { | ||
249 | pci_dev_put(data->dev); | ||
250 | kfree(data); | ||
251 | } | ||
252 | return result; | ||
253 | } | ||
254 | |||
255 | static int acpi_pci_unbind(struct acpi_device *device) | ||
256 | { | ||
257 | int result = 0; | ||
258 | acpi_status status; | ||
259 | struct acpi_pci_data *data; | ||
260 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
261 | |||
262 | |||
263 | if (!device || !device->parent) | ||
264 | return -EINVAL; | ||
265 | |||
266 | status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); | ||
267 | if (ACPI_FAILURE(status)) | 87 | if (ACPI_FAILURE(status)) |
268 | return -ENODEV; | 88 | goto out; |
269 | 89 | ||
270 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unbinding PCI device [%s]...\n", | 90 | if (dev->subordinate) |
271 | (char *) buffer.pointer)); | 91 | bus = dev->subordinate; |
272 | kfree(buffer.pointer); | 92 | else |
93 | bus = dev->bus; | ||
273 | 94 | ||
274 | status = | 95 | acpi_pci_irq_add_prt(device->handle, bus); |
275 | acpi_get_data(device->handle, acpi_pci_data_handler, | ||
276 | (void **)&data); | ||
277 | if (ACPI_FAILURE(status)) { | ||
278 | result = -ENODEV; | ||
279 | goto end; | ||
280 | } | ||
281 | 96 | ||
282 | status = acpi_detach_data(device->handle, acpi_pci_data_handler); | 97 | out: |
283 | if (ACPI_FAILURE(status)) { | 98 | pci_dev_put(dev); |
284 | ACPI_EXCEPTION((AE_INFO, status, | 99 | return 0; |
285 | "Unable to detach data from device %s", | ||
286 | acpi_device_bid(device))); | ||
287 | result = -ENODEV; | ||
288 | goto end; | ||
289 | } | ||
290 | if (data->dev->subordinate) { | ||
291 | acpi_pci_irq_del_prt(data->id.segment, data->bus->number); | ||
292 | } | ||
293 | pci_dev_put(data->dev); | ||
294 | kfree(data); | ||
295 | |||
296 | end: | ||
297 | return result; | ||
298 | } | 100 | } |
299 | 101 | ||
300 | int | 102 | int acpi_pci_bind_root(struct acpi_device *device) |
301 | acpi_pci_bind_root(struct acpi_device *device, | ||
302 | struct acpi_pci_id *id, struct pci_bus *bus) | ||
303 | { | 103 | { |
304 | int result = 0; | ||
305 | acpi_status status; | ||
306 | struct acpi_pci_data *data = NULL; | ||
307 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
308 | |||
309 | if (!device || !id || !bus) { | ||
310 | return -EINVAL; | ||
311 | } | ||
312 | |||
313 | data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL); | ||
314 | if (!data) | ||
315 | return -ENOMEM; | ||
316 | |||
317 | data->id = *id; | ||
318 | data->bus = bus; | ||
319 | device->ops.bind = acpi_pci_bind; | 104 | device->ops.bind = acpi_pci_bind; |
320 | device->ops.unbind = acpi_pci_unbind; | 105 | device->ops.unbind = acpi_pci_unbind; |
321 | 106 | ||
322 | status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); | 107 | return 0; |
323 | if (ACPI_FAILURE(status)) { | ||
324 | kfree (data); | ||
325 | return -ENODEV; | ||
326 | } | ||
327 | |||
328 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI root bridge [%s] to " | ||
329 | "%04x:%02x\n", (char *)buffer.pointer, | ||
330 | id->segment, id->bus)); | ||
331 | |||
332 | status = acpi_attach_data(device->handle, acpi_pci_data_handler, data); | ||
333 | if (ACPI_FAILURE(status)) { | ||
334 | ACPI_EXCEPTION((AE_INFO, status, | ||
335 | "Unable to attach ACPI-PCI context to device %s", | ||
336 | (char *)buffer.pointer)); | ||
337 | result = -ENODEV; | ||
338 | goto end; | ||
339 | } | ||
340 | |||
341 | end: | ||
342 | kfree(buffer.pointer); | ||
343 | if (result != 0) | ||
344 | kfree(data); | ||
345 | |||
346 | return result; | ||
347 | } | 108 | } |
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 2faa9e2ac893..b794eb88ab90 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -182,7 +182,7 @@ static void do_prt_fixups(struct acpi_prt_entry *entry, | |||
182 | } | 182 | } |
183 | } | 183 | } |
184 | 184 | ||
185 | static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus, | 185 | static int acpi_pci_irq_add_entry(acpi_handle handle, struct pci_bus *bus, |
186 | struct acpi_pci_routing_table *prt) | 186 | struct acpi_pci_routing_table *prt) |
187 | { | 187 | { |
188 | struct acpi_prt_entry *entry; | 188 | struct acpi_prt_entry *entry; |
@@ -196,8 +196,8 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus, | |||
196 | * 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert | 196 | * 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert |
197 | * it here. | 197 | * it here. |
198 | */ | 198 | */ |
199 | entry->id.segment = segment; | 199 | entry->id.segment = pci_domain_nr(bus); |
200 | entry->id.bus = bus; | 200 | entry->id.bus = bus->number; |
201 | entry->id.device = (prt->address >> 16) & 0xFFFF; | 201 | entry->id.device = (prt->address >> 16) & 0xFFFF; |
202 | entry->pin = prt->pin + 1; | 202 | entry->pin = prt->pin + 1; |
203 | 203 | ||
@@ -242,7 +242,7 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus, | |||
242 | return 0; | 242 | return 0; |
243 | } | 243 | } |
244 | 244 | ||
245 | int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus) | 245 | int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus) |
246 | { | 246 | { |
247 | acpi_status status; | 247 | acpi_status status; |
248 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 248 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
@@ -271,7 +271,7 @@ int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus) | |||
271 | 271 | ||
272 | entry = buffer.pointer; | 272 | entry = buffer.pointer; |
273 | while (entry && (entry->length > 0)) { | 273 | while (entry && (entry->length > 0)) { |
274 | acpi_pci_irq_add_entry(handle, segment, bus, entry); | 274 | acpi_pci_irq_add_entry(handle, bus, entry); |
275 | entry = (struct acpi_pci_routing_table *) | 275 | entry = (struct acpi_pci_routing_table *) |
276 | ((unsigned long)entry + entry->length); | 276 | ((unsigned long)entry + entry->length); |
277 | } | 277 | } |
@@ -280,16 +280,17 @@ int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus) | |||
280 | return 0; | 280 | return 0; |
281 | } | 281 | } |
282 | 282 | ||
283 | void acpi_pci_irq_del_prt(int segment, int bus) | 283 | void acpi_pci_irq_del_prt(struct pci_bus *bus) |
284 | { | 284 | { |
285 | struct acpi_prt_entry *entry, *tmp; | 285 | struct acpi_prt_entry *entry, *tmp; |
286 | 286 | ||
287 | printk(KERN_DEBUG | 287 | printk(KERN_DEBUG |
288 | "ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n", | 288 | "ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n", |
289 | segment, bus); | 289 | pci_domain_nr(bus), bus->number); |
290 | spin_lock(&acpi_prt_lock); | 290 | spin_lock(&acpi_prt_lock); |
291 | list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) { | 291 | list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) { |
292 | if (segment == entry->id.segment && bus == entry->id.bus) { | 292 | if (pci_domain_nr(bus) == entry->id.segment |
293 | && bus->number == entry->id.bus) { | ||
293 | list_del(&entry->list); | 294 | list_del(&entry->list); |
294 | kfree(entry); | 295 | kfree(entry); |
295 | } | 296 | } |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 196f97d00956..8a5bf3b356fa 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -63,9 +63,10 @@ static struct acpi_driver acpi_pci_root_driver = { | |||
63 | 63 | ||
64 | struct acpi_pci_root { | 64 | struct acpi_pci_root { |
65 | struct list_head node; | 65 | struct list_head node; |
66 | struct acpi_device * device; | 66 | struct acpi_device *device; |
67 | struct acpi_pci_id id; | ||
68 | struct pci_bus *bus; | 67 | struct pci_bus *bus; |
68 | u16 segment; | ||
69 | u8 bus_nr; | ||
69 | 70 | ||
70 | u32 osc_support_set; /* _OSC state of support bits */ | 71 | u32 osc_support_set; /* _OSC state of support bits */ |
71 | u32 osc_control_set; /* _OSC state of control bits */ | 72 | u32 osc_control_set; /* _OSC state of control bits */ |
@@ -82,7 +83,7 @@ static DEFINE_MUTEX(osc_lock); | |||
82 | int acpi_pci_register_driver(struct acpi_pci_driver *driver) | 83 | int acpi_pci_register_driver(struct acpi_pci_driver *driver) |
83 | { | 84 | { |
84 | int n = 0; | 85 | int n = 0; |
85 | struct list_head *entry; | 86 | struct acpi_pci_root *root; |
86 | 87 | ||
87 | struct acpi_pci_driver **pptr = &sub_driver; | 88 | struct acpi_pci_driver **pptr = &sub_driver; |
88 | while (*pptr) | 89 | while (*pptr) |
@@ -92,9 +93,7 @@ int acpi_pci_register_driver(struct acpi_pci_driver *driver) | |||
92 | if (!driver->add) | 93 | if (!driver->add) |
93 | return 0; | 94 | return 0; |
94 | 95 | ||
95 | list_for_each(entry, &acpi_pci_roots) { | 96 | list_for_each_entry(root, &acpi_pci_roots, node) { |
96 | struct acpi_pci_root *root; | ||
97 | root = list_entry(entry, struct acpi_pci_root, node); | ||
98 | driver->add(root->device->handle); | 97 | driver->add(root->device->handle); |
99 | n++; | 98 | n++; |
100 | } | 99 | } |
@@ -106,7 +105,7 @@ EXPORT_SYMBOL(acpi_pci_register_driver); | |||
106 | 105 | ||
107 | void acpi_pci_unregister_driver(struct acpi_pci_driver *driver) | 106 | void acpi_pci_unregister_driver(struct acpi_pci_driver *driver) |
108 | { | 107 | { |
109 | struct list_head *entry; | 108 | struct acpi_pci_root *root; |
110 | 109 | ||
111 | struct acpi_pci_driver **pptr = &sub_driver; | 110 | struct acpi_pci_driver **pptr = &sub_driver; |
112 | while (*pptr) { | 111 | while (*pptr) { |
@@ -120,28 +119,48 @@ void acpi_pci_unregister_driver(struct acpi_pci_driver *driver) | |||
120 | if (!driver->remove) | 119 | if (!driver->remove) |
121 | return; | 120 | return; |
122 | 121 | ||
123 | list_for_each(entry, &acpi_pci_roots) { | 122 | list_for_each_entry(root, &acpi_pci_roots, node) |
124 | struct acpi_pci_root *root; | ||
125 | root = list_entry(entry, struct acpi_pci_root, node); | ||
126 | driver->remove(root->device->handle); | 123 | driver->remove(root->device->handle); |
127 | } | ||
128 | } | 124 | } |
129 | 125 | ||
130 | EXPORT_SYMBOL(acpi_pci_unregister_driver); | 126 | EXPORT_SYMBOL(acpi_pci_unregister_driver); |
131 | 127 | ||
132 | acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus) | 128 | acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus) |
133 | { | 129 | { |
134 | struct acpi_pci_root *tmp; | 130 | struct acpi_pci_root *root; |
135 | 131 | ||
136 | list_for_each_entry(tmp, &acpi_pci_roots, node) { | 132 | list_for_each_entry(root, &acpi_pci_roots, node) |
137 | if ((tmp->id.segment == (u16) seg) && (tmp->id.bus == (u16) bus)) | 133 | if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus)) |
138 | return tmp->device->handle; | 134 | return root->device->handle; |
139 | } | ||
140 | return NULL; | 135 | return NULL; |
141 | } | 136 | } |
142 | 137 | ||
143 | EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle); | 138 | EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle); |
144 | 139 | ||
140 | /** | ||
141 | * acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge | ||
142 | * @handle - the ACPI CA node in question. | ||
143 | * | ||
144 | * Note: we could make this API take a struct acpi_device * instead, but | ||
145 | * for now, it's more convenient to operate on an acpi_handle. | ||
146 | */ | ||
147 | int acpi_is_root_bridge(acpi_handle handle) | ||
148 | { | ||
149 | int ret; | ||
150 | struct acpi_device *device; | ||
151 | |||
152 | ret = acpi_bus_get_device(handle, &device); | ||
153 | if (ret) | ||
154 | return 0; | ||
155 | |||
156 | ret = acpi_match_device_ids(device, root_device_ids); | ||
157 | if (ret) | ||
158 | return 0; | ||
159 | else | ||
160 | return 1; | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(acpi_is_root_bridge); | ||
163 | |||
145 | static acpi_status | 164 | static acpi_status |
146 | get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) | 165 | get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) |
147 | { | 166 | { |
@@ -161,19 +180,22 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) | |||
161 | return AE_OK; | 180 | return AE_OK; |
162 | } | 181 | } |
163 | 182 | ||
164 | static acpi_status try_get_root_bridge_busnr(acpi_handle handle, int *busnum) | 183 | static acpi_status try_get_root_bridge_busnr(acpi_handle handle, |
184 | unsigned long long *bus) | ||
165 | { | 185 | { |
166 | acpi_status status; | 186 | acpi_status status; |
187 | int busnum; | ||
167 | 188 | ||
168 | *busnum = -1; | 189 | busnum = -1; |
169 | status = | 190 | status = |
170 | acpi_walk_resources(handle, METHOD_NAME__CRS, | 191 | acpi_walk_resources(handle, METHOD_NAME__CRS, |
171 | get_root_bridge_busnr_callback, busnum); | 192 | get_root_bridge_busnr_callback, &busnum); |
172 | if (ACPI_FAILURE(status)) | 193 | if (ACPI_FAILURE(status)) |
173 | return status; | 194 | return status; |
174 | /* Check if we really get a bus number from _CRS */ | 195 | /* Check if we really get a bus number from _CRS */ |
175 | if (*busnum == -1) | 196 | if (busnum == -1) |
176 | return AE_ERROR; | 197 | return AE_ERROR; |
198 | *bus = busnum; | ||
177 | return AE_OK; | 199 | return AE_OK; |
178 | } | 200 | } |
179 | 201 | ||
@@ -298,6 +320,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags) | |||
298 | static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle) | 320 | static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle) |
299 | { | 321 | { |
300 | struct acpi_pci_root *root; | 322 | struct acpi_pci_root *root; |
323 | |||
301 | list_for_each_entry(root, &acpi_pci_roots, node) { | 324 | list_for_each_entry(root, &acpi_pci_roots, node) { |
302 | if (root->device->handle == handle) | 325 | if (root->device->handle == handle) |
303 | return root; | 326 | return root; |
@@ -305,6 +328,87 @@ static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle) | |||
305 | return NULL; | 328 | return NULL; |
306 | } | 329 | } |
307 | 330 | ||
331 | struct acpi_handle_node { | ||
332 | struct list_head node; | ||
333 | acpi_handle handle; | ||
334 | }; | ||
335 | |||
336 | /** | ||
337 | * acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev | ||
338 | * @handle: the handle in question | ||
339 | * | ||
340 | * Given an ACPI CA handle, the desired PCI device is located in the | ||
341 | * list of PCI devices. | ||
342 | * | ||
343 | * If the device is found, its reference count is increased and this | ||
344 | * function returns a pointer to its data structure. The caller must | ||
345 | * decrement the reference count by calling pci_dev_put(). | ||
346 | * If no device is found, %NULL is returned. | ||
347 | */ | ||
348 | struct pci_dev *acpi_get_pci_dev(acpi_handle handle) | ||
349 | { | ||
350 | int dev, fn; | ||
351 | unsigned long long adr; | ||
352 | acpi_status status; | ||
353 | acpi_handle phandle; | ||
354 | struct pci_bus *pbus; | ||
355 | struct pci_dev *pdev = NULL; | ||
356 | struct acpi_handle_node *node, *tmp; | ||
357 | struct acpi_pci_root *root; | ||
358 | LIST_HEAD(device_list); | ||
359 | |||
360 | /* | ||
361 | * Walk up the ACPI CA namespace until we reach a PCI root bridge. | ||
362 | */ | ||
363 | phandle = handle; | ||
364 | while (!acpi_is_root_bridge(phandle)) { | ||
365 | node = kzalloc(sizeof(struct acpi_handle_node), GFP_KERNEL); | ||
366 | if (!node) | ||
367 | goto out; | ||
368 | |||
369 | INIT_LIST_HEAD(&node->node); | ||
370 | node->handle = phandle; | ||
371 | list_add(&node->node, &device_list); | ||
372 | |||
373 | status = acpi_get_parent(phandle, &phandle); | ||
374 | if (ACPI_FAILURE(status)) | ||
375 | goto out; | ||
376 | } | ||
377 | |||
378 | root = acpi_pci_find_root(phandle); | ||
379 | if (!root) | ||
380 | goto out; | ||
381 | |||
382 | pbus = root->bus; | ||
383 | |||
384 | /* | ||
385 | * Now, walk back down the PCI device tree until we return to our | ||
386 | * original handle. Assumes that everything between the PCI root | ||
387 | * bridge and the device we're looking for must be a P2P bridge. | ||
388 | */ | ||
389 | list_for_each_entry(node, &device_list, node) { | ||
390 | acpi_handle hnd = node->handle; | ||
391 | status = acpi_evaluate_integer(hnd, "_ADR", NULL, &adr); | ||
392 | if (ACPI_FAILURE(status)) | ||
393 | goto out; | ||
394 | dev = (adr >> 16) & 0xffff; | ||
395 | fn = adr & 0xffff; | ||
396 | |||
397 | pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn)); | ||
398 | if (hnd == handle) | ||
399 | break; | ||
400 | |||
401 | pbus = pdev->subordinate; | ||
402 | pci_dev_put(pdev); | ||
403 | } | ||
404 | out: | ||
405 | list_for_each_entry_safe(node, tmp, &device_list, node) | ||
406 | kfree(node); | ||
407 | |||
408 | return pdev; | ||
409 | } | ||
410 | EXPORT_SYMBOL_GPL(acpi_get_pci_dev); | ||
411 | |||
308 | /** | 412 | /** |
309 | * acpi_pci_osc_control_set - commit requested control to Firmware | 413 | * acpi_pci_osc_control_set - commit requested control to Firmware |
310 | * @handle: acpi_handle for the target ACPI object | 414 | * @handle: acpi_handle for the target ACPI object |
@@ -363,31 +467,46 @@ EXPORT_SYMBOL(acpi_pci_osc_control_set); | |||
363 | 467 | ||
364 | static int __devinit acpi_pci_root_add(struct acpi_device *device) | 468 | static int __devinit acpi_pci_root_add(struct acpi_device *device) |
365 | { | 469 | { |
366 | int result = 0; | 470 | unsigned long long segment, bus; |
367 | struct acpi_pci_root *root = NULL; | 471 | acpi_status status; |
368 | struct acpi_pci_root *tmp; | 472 | int result; |
369 | acpi_status status = AE_OK; | 473 | struct acpi_pci_root *root; |
370 | unsigned long long value = 0; | 474 | acpi_handle handle; |
371 | acpi_handle handle = NULL; | ||
372 | struct acpi_device *child; | 475 | struct acpi_device *child; |
373 | u32 flags, base_flags; | 476 | u32 flags, base_flags; |
374 | 477 | ||
478 | segment = 0; | ||
479 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL, | ||
480 | &segment); | ||
481 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | ||
482 | printk(KERN_ERR PREFIX "can't evaluate _SEG\n"); | ||
483 | return -ENODEV; | ||
484 | } | ||
375 | 485 | ||
376 | if (!device) | 486 | /* Check _CRS first, then _BBN. If no _BBN, default to zero. */ |
377 | return -EINVAL; | 487 | bus = 0; |
488 | status = try_get_root_bridge_busnr(device->handle, &bus); | ||
489 | if (ACPI_FAILURE(status)) { | ||
490 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); | ||
491 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | ||
492 | printk(KERN_ERR PREFIX | ||
493 | "no bus number in _CRS and can't evaluate _BBN\n"); | ||
494 | return -ENODEV; | ||
495 | } | ||
496 | } | ||
378 | 497 | ||
379 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); | 498 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); |
380 | if (!root) | 499 | if (!root) |
381 | return -ENOMEM; | 500 | return -ENOMEM; |
382 | INIT_LIST_HEAD(&root->node); | ||
383 | 501 | ||
502 | INIT_LIST_HEAD(&root->node); | ||
384 | root->device = device; | 503 | root->device = device; |
504 | root->segment = segment & 0xFFFF; | ||
505 | root->bus_nr = bus & 0xFF; | ||
385 | strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); | 506 | strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); |
386 | strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); | 507 | strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); |
387 | device->driver_data = root; | 508 | device->driver_data = root; |
388 | 509 | ||
389 | device->ops.bind = acpi_pci_bind; | ||
390 | |||
391 | /* | 510 | /* |
392 | * All supported architectures that use ACPI have support for | 511 | * All supported architectures that use ACPI have support for |
393 | * PCI domains, so we indicate this in _OSC support capabilities. | 512 | * PCI domains, so we indicate this in _OSC support capabilities. |
@@ -395,79 +514,6 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
395 | flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; | 514 | flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; |
396 | acpi_pci_osc_support(root, flags); | 515 | acpi_pci_osc_support(root, flags); |
397 | 516 | ||
398 | /* | ||
399 | * Segment | ||
400 | * ------- | ||
401 | * Obtained via _SEG, if exists, otherwise assumed to be zero (0). | ||
402 | */ | ||
403 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL, | ||
404 | &value); | ||
405 | switch (status) { | ||
406 | case AE_OK: | ||
407 | root->id.segment = (u16) value; | ||
408 | break; | ||
409 | case AE_NOT_FOUND: | ||
410 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
411 | "Assuming segment 0 (no _SEG)\n")); | ||
412 | root->id.segment = 0; | ||
413 | break; | ||
414 | default: | ||
415 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SEG")); | ||
416 | result = -ENODEV; | ||
417 | goto end; | ||
418 | } | ||
419 | |||
420 | /* | ||
421 | * Bus | ||
422 | * --- | ||
423 | * Obtained via _BBN, if exists, otherwise assumed to be zero (0). | ||
424 | */ | ||
425 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, | ||
426 | &value); | ||
427 | switch (status) { | ||
428 | case AE_OK: | ||
429 | root->id.bus = (u16) value; | ||
430 | break; | ||
431 | case AE_NOT_FOUND: | ||
432 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Assuming bus 0 (no _BBN)\n")); | ||
433 | root->id.bus = 0; | ||
434 | break; | ||
435 | default: | ||
436 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BBN")); | ||
437 | result = -ENODEV; | ||
438 | goto end; | ||
439 | } | ||
440 | |||
441 | /* Some systems have wrong _BBN */ | ||
442 | list_for_each_entry(tmp, &acpi_pci_roots, node) { | ||
443 | if ((tmp->id.segment == root->id.segment) | ||
444 | && (tmp->id.bus == root->id.bus)) { | ||
445 | int bus = 0; | ||
446 | acpi_status status; | ||
447 | |||
448 | printk(KERN_ERR PREFIX | ||
449 | "Wrong _BBN value, reboot" | ||
450 | " and use option 'pci=noacpi'\n"); | ||
451 | |||
452 | status = try_get_root_bridge_busnr(device->handle, &bus); | ||
453 | if (ACPI_FAILURE(status)) | ||
454 | break; | ||
455 | if (bus != root->id.bus) { | ||
456 | printk(KERN_INFO PREFIX | ||
457 | "PCI _CRS %d overrides _BBN 0\n", bus); | ||
458 | root->id.bus = bus; | ||
459 | } | ||
460 | break; | ||
461 | } | ||
462 | } | ||
463 | /* | ||
464 | * Device & Function | ||
465 | * ----------------- | ||
466 | * Obtained from _ADR (which has already been evaluated for us). | ||
467 | */ | ||
468 | root->id.device = device->pnp.bus_address >> 16; | ||
469 | root->id.function = device->pnp.bus_address & 0xFFFF; | ||
470 | |||
471 | /* | 517 | /* |
472 | * TBD: Need PCI interface for enumeration/configuration of roots. | 518 | * TBD: Need PCI interface for enumeration/configuration of roots. |
473 | */ | 519 | */ |
@@ -477,7 +523,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
477 | 523 | ||
478 | printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n", | 524 | printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n", |
479 | acpi_device_name(device), acpi_device_bid(device), | 525 | acpi_device_name(device), acpi_device_bid(device), |
480 | root->id.segment, root->id.bus); | 526 | root->segment, root->bus_nr); |
481 | 527 | ||
482 | /* | 528 | /* |
483 | * Scan the Root Bridge | 529 | * Scan the Root Bridge |
@@ -486,11 +532,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
486 | * PCI namespace does not get created until this call is made (and | 532 | * PCI namespace does not get created until this call is made (and |
487 | * thus the root bridge's pci_dev does not exist). | 533 | * thus the root bridge's pci_dev does not exist). |
488 | */ | 534 | */ |
489 | root->bus = pci_acpi_scan_root(device, root->id.segment, root->id.bus); | 535 | root->bus = pci_acpi_scan_root(device, segment, bus); |
490 | if (!root->bus) { | 536 | if (!root->bus) { |
491 | printk(KERN_ERR PREFIX | 537 | printk(KERN_ERR PREFIX |
492 | "Bus %04x:%02x not present in PCI namespace\n", | 538 | "Bus %04x:%02x not present in PCI namespace\n", |
493 | root->id.segment, root->id.bus); | 539 | root->segment, root->bus_nr); |
494 | result = -ENODEV; | 540 | result = -ENODEV; |
495 | goto end; | 541 | goto end; |
496 | } | 542 | } |
@@ -500,7 +546,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
500 | * ----------------------- | 546 | * ----------------------- |
501 | * Thus binding the ACPI and PCI devices. | 547 | * Thus binding the ACPI and PCI devices. |
502 | */ | 548 | */ |
503 | result = acpi_pci_bind_root(device, &root->id, root->bus); | 549 | result = acpi_pci_bind_root(device); |
504 | if (result) | 550 | if (result) |
505 | goto end; | 551 | goto end; |
506 | 552 | ||
@@ -511,8 +557,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
511 | */ | 557 | */ |
512 | status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); | 558 | status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); |
513 | if (ACPI_SUCCESS(status)) | 559 | if (ACPI_SUCCESS(status)) |
514 | result = acpi_pci_irq_add_prt(device->handle, root->id.segment, | 560 | result = acpi_pci_irq_add_prt(device->handle, root->bus); |
515 | root->id.bus); | ||
516 | 561 | ||
517 | /* | 562 | /* |
518 | * Scan and bind all _ADR-Based Devices | 563 | * Scan and bind all _ADR-Based Devices |
@@ -531,42 +576,28 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
531 | if (flags != base_flags) | 576 | if (flags != base_flags) |
532 | acpi_pci_osc_support(root, flags); | 577 | acpi_pci_osc_support(root, flags); |
533 | 578 | ||
534 | end: | 579 | return 0; |
535 | if (result) { | ||
536 | if (!list_empty(&root->node)) | ||
537 | list_del(&root->node); | ||
538 | kfree(root); | ||
539 | } | ||
540 | 580 | ||
581 | end: | ||
582 | if (!list_empty(&root->node)) | ||
583 | list_del(&root->node); | ||
584 | kfree(root); | ||
541 | return result; | 585 | return result; |
542 | } | 586 | } |
543 | 587 | ||
544 | static int acpi_pci_root_start(struct acpi_device *device) | 588 | static int acpi_pci_root_start(struct acpi_device *device) |
545 | { | 589 | { |
546 | struct acpi_pci_root *root; | 590 | struct acpi_pci_root *root = acpi_driver_data(device); |
547 | 591 | ||
548 | 592 | pci_bus_add_devices(root->bus); | |
549 | list_for_each_entry(root, &acpi_pci_roots, node) { | 593 | return 0; |
550 | if (root->device == device) { | ||
551 | pci_bus_add_devices(root->bus); | ||
552 | return 0; | ||
553 | } | ||
554 | } | ||
555 | return -ENODEV; | ||
556 | } | 594 | } |
557 | 595 | ||
558 | static int acpi_pci_root_remove(struct acpi_device *device, int type) | 596 | static int acpi_pci_root_remove(struct acpi_device *device, int type) |
559 | { | 597 | { |
560 | struct acpi_pci_root *root = NULL; | 598 | struct acpi_pci_root *root = acpi_driver_data(device); |
561 | |||
562 | |||
563 | if (!device || !acpi_driver_data(device)) | ||
564 | return -EINVAL; | ||
565 | |||
566 | root = acpi_driver_data(device); | ||
567 | 599 | ||
568 | kfree(root); | 600 | kfree(root); |
569 | |||
570 | return 0; | 601 | return 0; |
571 | } | 602 | } |
572 | 603 | ||
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 56665a63bf19..d74365d4a6e7 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
@@ -194,7 +194,7 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state) | |||
194 | 194 | ||
195 | static int acpi_power_on(acpi_handle handle, struct acpi_device *dev) | 195 | static int acpi_power_on(acpi_handle handle, struct acpi_device *dev) |
196 | { | 196 | { |
197 | int result = 0, state; | 197 | int result = 0; |
198 | int found = 0; | 198 | int found = 0; |
199 | acpi_status status = AE_OK; | 199 | acpi_status status = AE_OK; |
200 | struct acpi_power_resource *resource = NULL; | 200 | struct acpi_power_resource *resource = NULL; |
@@ -236,18 +236,6 @@ static int acpi_power_on(acpi_handle handle, struct acpi_device *dev) | |||
236 | if (ACPI_FAILURE(status)) | 236 | if (ACPI_FAILURE(status)) |
237 | return -ENODEV; | 237 | return -ENODEV; |
238 | 238 | ||
239 | if (!acpi_power_nocheck) { | ||
240 | /* | ||
241 | * If acpi_power_nocheck is set, it is unnecessary to check | ||
242 | * the power state after power transition. | ||
243 | */ | ||
244 | result = acpi_power_get_state(resource->device->handle, | ||
245 | &state); | ||
246 | if (result) | ||
247 | return result; | ||
248 | if (state != ACPI_POWER_RESOURCE_STATE_ON) | ||
249 | return -ENOEXEC; | ||
250 | } | ||
251 | /* Update the power resource's _device_ power state */ | 239 | /* Update the power resource's _device_ power state */ |
252 | resource->device->power.state = ACPI_STATE_D0; | 240 | resource->device->power.state = ACPI_STATE_D0; |
253 | 241 | ||
@@ -258,7 +246,7 @@ static int acpi_power_on(acpi_handle handle, struct acpi_device *dev) | |||
258 | 246 | ||
259 | static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev) | 247 | static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev) |
260 | { | 248 | { |
261 | int result = 0, state; | 249 | int result = 0; |
262 | acpi_status status = AE_OK; | 250 | acpi_status status = AE_OK; |
263 | struct acpi_power_resource *resource = NULL; | 251 | struct acpi_power_resource *resource = NULL; |
264 | struct list_head *node, *next; | 252 | struct list_head *node, *next; |
@@ -293,18 +281,6 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev) | |||
293 | if (ACPI_FAILURE(status)) | 281 | if (ACPI_FAILURE(status)) |
294 | return -ENODEV; | 282 | return -ENODEV; |
295 | 283 | ||
296 | if (!acpi_power_nocheck) { | ||
297 | /* | ||
298 | * If acpi_power_nocheck is set, it is unnecessary to check | ||
299 | * the power state after power transition. | ||
300 | */ | ||
301 | result = acpi_power_get_state(handle, &state); | ||
302 | if (result) | ||
303 | return result; | ||
304 | if (state != ACPI_POWER_RESOURCE_STATE_OFF) | ||
305 | return -ENOEXEC; | ||
306 | } | ||
307 | |||
308 | /* Update the power resource's _device_ power state */ | 284 | /* Update the power resource's _device_ power state */ |
309 | resource->device->power.state = ACPI_STATE_D3; | 285 | resource->device->power.state = ACPI_STATE_D3; |
310 | 286 | ||
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 23f0fb84f1c1..84e0f3c07442 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -89,7 +89,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr); | |||
89 | 89 | ||
90 | static const struct acpi_device_id processor_device_ids[] = { | 90 | static const struct acpi_device_id processor_device_ids[] = { |
91 | {ACPI_PROCESSOR_OBJECT_HID, 0}, | 91 | {ACPI_PROCESSOR_OBJECT_HID, 0}, |
92 | {ACPI_PROCESSOR_HID, 0}, | 92 | {"ACPI0007", 0}, |
93 | {"", 0}, | 93 | {"", 0}, |
94 | }; | 94 | }; |
95 | MODULE_DEVICE_TABLE(acpi, processor_device_ids); | 95 | MODULE_DEVICE_TABLE(acpi, processor_device_ids); |
@@ -596,7 +596,21 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
596 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 596 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
597 | "No bus mastering arbitration control\n")); | 597 | "No bus mastering arbitration control\n")); |
598 | 598 | ||
599 | if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_HID)) { | 599 | if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { |
600 | /* Declared with "Processor" statement; match ProcessorID */ | ||
601 | status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); | ||
602 | if (ACPI_FAILURE(status)) { | ||
603 | printk(KERN_ERR PREFIX "Evaluating processor object\n"); | ||
604 | return -ENODEV; | ||
605 | } | ||
606 | |||
607 | /* | ||
608 | * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. | ||
609 | * >>> 'acpi_get_processor_id(acpi_id, &id)' in | ||
610 | * arch/xxx/acpi.c | ||
611 | */ | ||
612 | pr->acpi_id = object.processor.proc_id; | ||
613 | } else { | ||
600 | /* | 614 | /* |
601 | * Declared with "Device" statement; match _UID. | 615 | * Declared with "Device" statement; match _UID. |
602 | * Note that we don't handle string _UIDs yet. | 616 | * Note that we don't handle string _UIDs yet. |
@@ -611,20 +625,6 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
611 | } | 625 | } |
612 | device_declaration = 1; | 626 | device_declaration = 1; |
613 | pr->acpi_id = value; | 627 | pr->acpi_id = value; |
614 | } else { | ||
615 | /* Declared with "Processor" statement; match ProcessorID */ | ||
616 | status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); | ||
617 | if (ACPI_FAILURE(status)) { | ||
618 | printk(KERN_ERR PREFIX "Evaluating processor object\n"); | ||
619 | return -ENODEV; | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. | ||
624 | * >>> 'acpi_get_processor_id(acpi_id, &id)' in | ||
625 | * arch/xxx/acpi.c | ||
626 | */ | ||
627 | pr->acpi_id = object.processor.proc_id; | ||
628 | } | 628 | } |
629 | cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id); | 629 | cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id); |
630 | 630 | ||
@@ -649,7 +649,16 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
649 | return -ENODEV; | 649 | return -ENODEV; |
650 | } | 650 | } |
651 | } | 651 | } |
652 | 652 | /* | |
653 | * On some boxes several processors use the same processor bus id. | ||
654 | * But they are located in different scope. For example: | ||
655 | * \_SB.SCK0.CPU0 | ||
656 | * \_SB.SCK1.CPU0 | ||
657 | * Rename the processor device bus id. And the new bus id will be | ||
658 | * generated as the following format: | ||
659 | * CPU+CPU ID. | ||
660 | */ | ||
661 | sprintf(acpi_device_bid(device), "CPU%X", pr->id); | ||
653 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, | 662 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, |
654 | pr->acpi_id)); | 663 | pr->acpi_id)); |
655 | 664 | ||
@@ -731,6 +740,8 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device) | |||
731 | /* _PDC call should be done before doing anything else (if reqd.). */ | 740 | /* _PDC call should be done before doing anything else (if reqd.). */ |
732 | arch_acpi_processor_init_pdc(pr); | 741 | arch_acpi_processor_init_pdc(pr); |
733 | acpi_processor_set_pdc(pr); | 742 | acpi_processor_set_pdc(pr); |
743 | arch_acpi_processor_cleanup_pdc(pr); | ||
744 | |||
734 | #ifdef CONFIG_CPU_FREQ | 745 | #ifdef CONFIG_CPU_FREQ |
735 | acpi_processor_ppc_has_changed(pr); | 746 | acpi_processor_ppc_has_changed(pr); |
736 | #endif | 747 | #endif |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 10a2d913635a..0efa59e7e3af 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -139,7 +139,7 @@ static void acpi_safe_halt(void) | |||
139 | * are affected too. We pick the most conservative approach: we assume | 139 | * are affected too. We pick the most conservative approach: we assume |
140 | * that the local APIC stops in both C2 and C3. | 140 | * that the local APIC stops in both C2 and C3. |
141 | */ | 141 | */ |
142 | static void acpi_timer_check_state(int state, struct acpi_processor *pr, | 142 | static void lapic_timer_check_state(int state, struct acpi_processor *pr, |
143 | struct acpi_processor_cx *cx) | 143 | struct acpi_processor_cx *cx) |
144 | { | 144 | { |
145 | struct acpi_processor_power *pwr = &pr->power; | 145 | struct acpi_processor_power *pwr = &pr->power; |
@@ -162,7 +162,7 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr, | |||
162 | pr->power.timer_broadcast_on_state = state; | 162 | pr->power.timer_broadcast_on_state = state; |
163 | } | 163 | } |
164 | 164 | ||
165 | static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) | 165 | static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) |
166 | { | 166 | { |
167 | unsigned long reason; | 167 | unsigned long reason; |
168 | 168 | ||
@@ -173,7 +173,7 @@ static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) | |||
173 | } | 173 | } |
174 | 174 | ||
175 | /* Power(C) State timer broadcast control */ | 175 | /* Power(C) State timer broadcast control */ |
176 | static void acpi_state_timer_broadcast(struct acpi_processor *pr, | 176 | static void lapic_timer_state_broadcast(struct acpi_processor *pr, |
177 | struct acpi_processor_cx *cx, | 177 | struct acpi_processor_cx *cx, |
178 | int broadcast) | 178 | int broadcast) |
179 | { | 179 | { |
@@ -190,10 +190,10 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr, | |||
190 | 190 | ||
191 | #else | 191 | #else |
192 | 192 | ||
193 | static void acpi_timer_check_state(int state, struct acpi_processor *pr, | 193 | static void lapic_timer_check_state(int state, struct acpi_processor *pr, |
194 | struct acpi_processor_cx *cstate) { } | 194 | struct acpi_processor_cx *cstate) { } |
195 | static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } | 195 | static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } |
196 | static void acpi_state_timer_broadcast(struct acpi_processor *pr, | 196 | static void lapic_timer_state_broadcast(struct acpi_processor *pr, |
197 | struct acpi_processor_cx *cx, | 197 | struct acpi_processor_cx *cx, |
198 | int broadcast) | 198 | int broadcast) |
199 | { | 199 | { |
@@ -515,7 +515,8 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | |||
515 | static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | 515 | static void acpi_processor_power_verify_c3(struct acpi_processor *pr, |
516 | struct acpi_processor_cx *cx) | 516 | struct acpi_processor_cx *cx) |
517 | { | 517 | { |
518 | static int bm_check_flag; | 518 | static int bm_check_flag = -1; |
519 | static int bm_control_flag = -1; | ||
519 | 520 | ||
520 | 521 | ||
521 | if (!cx->address) | 522 | if (!cx->address) |
@@ -545,12 +546,14 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | |||
545 | } | 546 | } |
546 | 547 | ||
547 | /* All the logic here assumes flags.bm_check is same across all CPUs */ | 548 | /* All the logic here assumes flags.bm_check is same across all CPUs */ |
548 | if (!bm_check_flag) { | 549 | if (bm_check_flag == -1) { |
549 | /* Determine whether bm_check is needed based on CPU */ | 550 | /* Determine whether bm_check is needed based on CPU */ |
550 | acpi_processor_power_init_bm_check(&(pr->flags), pr->id); | 551 | acpi_processor_power_init_bm_check(&(pr->flags), pr->id); |
551 | bm_check_flag = pr->flags.bm_check; | 552 | bm_check_flag = pr->flags.bm_check; |
553 | bm_control_flag = pr->flags.bm_control; | ||
552 | } else { | 554 | } else { |
553 | pr->flags.bm_check = bm_check_flag; | 555 | pr->flags.bm_check = bm_check_flag; |
556 | pr->flags.bm_control = bm_control_flag; | ||
554 | } | 557 | } |
555 | 558 | ||
556 | if (pr->flags.bm_check) { | 559 | if (pr->flags.bm_check) { |
@@ -614,29 +617,25 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) | |||
614 | switch (cx->type) { | 617 | switch (cx->type) { |
615 | case ACPI_STATE_C1: | 618 | case ACPI_STATE_C1: |
616 | cx->valid = 1; | 619 | cx->valid = 1; |
617 | acpi_timer_check_state(i, pr, cx); | ||
618 | break; | 620 | break; |
619 | 621 | ||
620 | case ACPI_STATE_C2: | 622 | case ACPI_STATE_C2: |
621 | acpi_processor_power_verify_c2(cx); | 623 | acpi_processor_power_verify_c2(cx); |
622 | if (cx->valid) | ||
623 | acpi_timer_check_state(i, pr, cx); | ||
624 | break; | 624 | break; |
625 | 625 | ||
626 | case ACPI_STATE_C3: | 626 | case ACPI_STATE_C3: |
627 | acpi_processor_power_verify_c3(pr, cx); | 627 | acpi_processor_power_verify_c3(pr, cx); |
628 | if (cx->valid) | ||
629 | acpi_timer_check_state(i, pr, cx); | ||
630 | break; | 628 | break; |
631 | } | 629 | } |
632 | if (cx->valid) | 630 | if (!cx->valid) |
633 | tsc_check_state(cx->type); | 631 | continue; |
634 | 632 | ||
635 | if (cx->valid) | 633 | lapic_timer_check_state(i, pr, cx); |
636 | working++; | 634 | tsc_check_state(cx->type); |
635 | working++; | ||
637 | } | 636 | } |
638 | 637 | ||
639 | acpi_propagate_timer_broadcast(pr); | 638 | lapic_timer_propagate_broadcast(pr); |
640 | 639 | ||
641 | return (working); | 640 | return (working); |
642 | } | 641 | } |
@@ -839,7 +838,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
839 | return 0; | 838 | return 0; |
840 | } | 839 | } |
841 | 840 | ||
842 | acpi_state_timer_broadcast(pr, cx, 1); | 841 | lapic_timer_state_broadcast(pr, cx, 1); |
843 | kt1 = ktime_get_real(); | 842 | kt1 = ktime_get_real(); |
844 | acpi_idle_do_entry(cx); | 843 | acpi_idle_do_entry(cx); |
845 | kt2 = ktime_get_real(); | 844 | kt2 = ktime_get_real(); |
@@ -847,7 +846,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
847 | 846 | ||
848 | local_irq_enable(); | 847 | local_irq_enable(); |
849 | cx->usage++; | 848 | cx->usage++; |
850 | acpi_state_timer_broadcast(pr, cx, 0); | 849 | lapic_timer_state_broadcast(pr, cx, 0); |
851 | 850 | ||
852 | return idle_time; | 851 | return idle_time; |
853 | } | 852 | } |
@@ -892,7 +891,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
892 | * Must be done before busmaster disable as we might need to | 891 | * Must be done before busmaster disable as we might need to |
893 | * access HPET ! | 892 | * access HPET ! |
894 | */ | 893 | */ |
895 | acpi_state_timer_broadcast(pr, cx, 1); | 894 | lapic_timer_state_broadcast(pr, cx, 1); |
896 | 895 | ||
897 | if (cx->type == ACPI_STATE_C3) | 896 | if (cx->type == ACPI_STATE_C3) |
898 | ACPI_FLUSH_CPU_CACHE(); | 897 | ACPI_FLUSH_CPU_CACHE(); |
@@ -914,7 +913,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
914 | 913 | ||
915 | cx->usage++; | 914 | cx->usage++; |
916 | 915 | ||
917 | acpi_state_timer_broadcast(pr, cx, 0); | 916 | lapic_timer_state_broadcast(pr, cx, 0); |
918 | cx->time += sleep_ticks; | 917 | cx->time += sleep_ticks; |
919 | return idle_time; | 918 | return idle_time; |
920 | } | 919 | } |
@@ -981,7 +980,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
981 | * Must be done before busmaster disable as we might need to | 980 | * Must be done before busmaster disable as we might need to |
982 | * access HPET ! | 981 | * access HPET ! |
983 | */ | 982 | */ |
984 | acpi_state_timer_broadcast(pr, cx, 1); | 983 | lapic_timer_state_broadcast(pr, cx, 1); |
985 | 984 | ||
986 | kt1 = ktime_get_real(); | 985 | kt1 = ktime_get_real(); |
987 | /* | 986 | /* |
@@ -1026,7 +1025,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
1026 | 1025 | ||
1027 | cx->usage++; | 1026 | cx->usage++; |
1028 | 1027 | ||
1029 | acpi_state_timer_broadcast(pr, cx, 0); | 1028 | lapic_timer_state_broadcast(pr, cx, 0); |
1030 | cx->time += sleep_ticks; | 1029 | cx->time += sleep_ticks; |
1031 | return idle_time; | 1030 | return idle_time; |
1032 | } | 1031 | } |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 8ff510b91d88..781435d7e369 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -95,7 +95,7 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha | |||
95 | } | 95 | } |
96 | static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); | 96 | static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); |
97 | 97 | ||
98 | static int acpi_bus_hot_remove_device(void *context) | 98 | static void acpi_bus_hot_remove_device(void *context) |
99 | { | 99 | { |
100 | struct acpi_device *device; | 100 | struct acpi_device *device; |
101 | acpi_handle handle = context; | 101 | acpi_handle handle = context; |
@@ -104,10 +104,10 @@ static int acpi_bus_hot_remove_device(void *context) | |||
104 | acpi_status status = AE_OK; | 104 | acpi_status status = AE_OK; |
105 | 105 | ||
106 | if (acpi_bus_get_device(handle, &device)) | 106 | if (acpi_bus_get_device(handle, &device)) |
107 | return 0; | 107 | return; |
108 | 108 | ||
109 | if (!device) | 109 | if (!device) |
110 | return 0; | 110 | return; |
111 | 111 | ||
112 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 112 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
113 | "Hot-removing device %s...\n", dev_name(&device->dev))); | 113 | "Hot-removing device %s...\n", dev_name(&device->dev))); |
@@ -115,7 +115,7 @@ static int acpi_bus_hot_remove_device(void *context) | |||
115 | if (acpi_bus_trim(device, 1)) { | 115 | if (acpi_bus_trim(device, 1)) { |
116 | printk(KERN_ERR PREFIX | 116 | printk(KERN_ERR PREFIX |
117 | "Removing device failed\n"); | 117 | "Removing device failed\n"); |
118 | return -1; | 118 | return; |
119 | } | 119 | } |
120 | 120 | ||
121 | /* power off device */ | 121 | /* power off device */ |
@@ -142,9 +142,10 @@ static int acpi_bus_hot_remove_device(void *context) | |||
142 | */ | 142 | */ |
143 | status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); | 143 | status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); |
144 | if (ACPI_FAILURE(status)) | 144 | if (ACPI_FAILURE(status)) |
145 | return -ENODEV; | 145 | printk(KERN_WARNING PREFIX |
146 | "Eject device failed\n"); | ||
146 | 147 | ||
147 | return 0; | 148 | return; |
148 | } | 149 | } |
149 | 150 | ||
150 | static ssize_t | 151 | static ssize_t |
@@ -155,7 +156,6 @@ acpi_eject_store(struct device *d, struct device_attribute *attr, | |||
155 | acpi_status status; | 156 | acpi_status status; |
156 | acpi_object_type type = 0; | 157 | acpi_object_type type = 0; |
157 | struct acpi_device *acpi_device = to_acpi_device(d); | 158 | struct acpi_device *acpi_device = to_acpi_device(d); |
158 | struct task_struct *task; | ||
159 | 159 | ||
160 | if ((!count) || (buf[0] != '1')) { | 160 | if ((!count) || (buf[0] != '1')) { |
161 | return -EINVAL; | 161 | return -EINVAL; |
@@ -172,11 +172,7 @@ acpi_eject_store(struct device *d, struct device_attribute *attr, | |||
172 | goto err; | 172 | goto err; |
173 | } | 173 | } |
174 | 174 | ||
175 | /* remove the device in another thread to fix the deadlock issue */ | 175 | acpi_os_hotplug_execute(acpi_bus_hot_remove_device, acpi_device->handle); |
176 | task = kthread_run(acpi_bus_hot_remove_device, | ||
177 | acpi_device->handle, "acpi_hot_remove_device"); | ||
178 | if (IS_ERR(task)) | ||
179 | ret = PTR_ERR(task); | ||
180 | err: | 176 | err: |
181 | return ret; | 177 | return ret; |
182 | } | 178 | } |
@@ -198,12 +194,12 @@ acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *b | |||
198 | int result; | 194 | int result; |
199 | 195 | ||
200 | result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path); | 196 | result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path); |
201 | if(result) | 197 | if (result) |
202 | goto end; | 198 | goto end; |
203 | 199 | ||
204 | result = sprintf(buf, "%s\n", (char*)path.pointer); | 200 | result = sprintf(buf, "%s\n", (char*)path.pointer); |
205 | kfree(path.pointer); | 201 | kfree(path.pointer); |
206 | end: | 202 | end: |
207 | return result; | 203 | return result; |
208 | } | 204 | } |
209 | static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL); | 205 | static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL); |
@@ -217,21 +213,21 @@ static int acpi_device_setup_files(struct acpi_device *dev) | |||
217 | /* | 213 | /* |
218 | * Devices gotten from FADT don't have a "path" attribute | 214 | * Devices gotten from FADT don't have a "path" attribute |
219 | */ | 215 | */ |
220 | if(dev->handle) { | 216 | if (dev->handle) { |
221 | result = device_create_file(&dev->dev, &dev_attr_path); | 217 | result = device_create_file(&dev->dev, &dev_attr_path); |
222 | if(result) | 218 | if (result) |
223 | goto end; | 219 | goto end; |
224 | } | 220 | } |
225 | 221 | ||
226 | if(dev->flags.hardware_id) { | 222 | if (dev->flags.hardware_id) { |
227 | result = device_create_file(&dev->dev, &dev_attr_hid); | 223 | result = device_create_file(&dev->dev, &dev_attr_hid); |
228 | if(result) | 224 | if (result) |
229 | goto end; | 225 | goto end; |
230 | } | 226 | } |
231 | 227 | ||
232 | if (dev->flags.hardware_id || dev->flags.compatible_ids){ | 228 | if (dev->flags.hardware_id || dev->flags.compatible_ids) { |
233 | result = device_create_file(&dev->dev, &dev_attr_modalias); | 229 | result = device_create_file(&dev->dev, &dev_attr_modalias); |
234 | if(result) | 230 | if (result) |
235 | goto end; | 231 | goto end; |
236 | } | 232 | } |
237 | 233 | ||
@@ -242,7 +238,7 @@ static int acpi_device_setup_files(struct acpi_device *dev) | |||
242 | status = acpi_get_handle(dev->handle, "_EJ0", &temp); | 238 | status = acpi_get_handle(dev->handle, "_EJ0", &temp); |
243 | if (ACPI_SUCCESS(status)) | 239 | if (ACPI_SUCCESS(status)) |
244 | result = device_create_file(&dev->dev, &dev_attr_eject); | 240 | result = device_create_file(&dev->dev, &dev_attr_eject); |
245 | end: | 241 | end: |
246 | return result; | 242 | return result; |
247 | } | 243 | } |
248 | 244 | ||
@@ -262,9 +258,9 @@ static void acpi_device_remove_files(struct acpi_device *dev) | |||
262 | if (dev->flags.hardware_id || dev->flags.compatible_ids) | 258 | if (dev->flags.hardware_id || dev->flags.compatible_ids) |
263 | device_remove_file(&dev->dev, &dev_attr_modalias); | 259 | device_remove_file(&dev->dev, &dev_attr_modalias); |
264 | 260 | ||
265 | if(dev->flags.hardware_id) | 261 | if (dev->flags.hardware_id) |
266 | device_remove_file(&dev->dev, &dev_attr_hid); | 262 | device_remove_file(&dev->dev, &dev_attr_hid); |
267 | if(dev->handle) | 263 | if (dev->handle) |
268 | device_remove_file(&dev->dev, &dev_attr_path); | 264 | device_remove_file(&dev->dev, &dev_attr_path); |
269 | } | 265 | } |
270 | /* -------------------------------------------------------------------------- | 266 | /* -------------------------------------------------------------------------- |
@@ -512,7 +508,7 @@ static int acpi_device_register(struct acpi_device *device, | |||
512 | break; | 508 | break; |
513 | } | 509 | } |
514 | } | 510 | } |
515 | if(!found) { | 511 | if (!found) { |
516 | acpi_device_bus_id = new_bus_id; | 512 | acpi_device_bus_id = new_bus_id; |
517 | strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device"); | 513 | strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device"); |
518 | acpi_device_bus_id->instance_no = 0; | 514 | acpi_device_bus_id->instance_no = 0; |
@@ -530,22 +526,21 @@ static int acpi_device_register(struct acpi_device *device, | |||
530 | if (device->parent) | 526 | if (device->parent) |
531 | device->dev.parent = &parent->dev; | 527 | device->dev.parent = &parent->dev; |
532 | device->dev.bus = &acpi_bus_type; | 528 | device->dev.bus = &acpi_bus_type; |
533 | device_initialize(&device->dev); | ||
534 | device->dev.release = &acpi_device_release; | 529 | device->dev.release = &acpi_device_release; |
535 | result = device_add(&device->dev); | 530 | result = device_register(&device->dev); |
536 | if(result) { | 531 | if (result) { |
537 | dev_err(&device->dev, "Error adding device\n"); | 532 | dev_err(&device->dev, "Error registering device\n"); |
538 | goto end; | 533 | goto end; |
539 | } | 534 | } |
540 | 535 | ||
541 | result = acpi_device_setup_files(device); | 536 | result = acpi_device_setup_files(device); |
542 | if(result) | 537 | if (result) |
543 | printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n", | 538 | printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n", |
544 | dev_name(&device->dev)); | 539 | dev_name(&device->dev)); |
545 | 540 | ||
546 | device->removal_type = ACPI_BUS_REMOVAL_NORMAL; | 541 | device->removal_type = ACPI_BUS_REMOVAL_NORMAL; |
547 | return 0; | 542 | return 0; |
548 | end: | 543 | end: |
549 | mutex_lock(&acpi_device_lock); | 544 | mutex_lock(&acpi_device_lock); |
550 | if (device->parent) | 545 | if (device->parent) |
551 | list_del(&device->node); | 546 | list_del(&device->node); |
@@ -577,7 +572,7 @@ static void acpi_device_unregister(struct acpi_device *device, int type) | |||
577 | * @device: the device to add and initialize | 572 | * @device: the device to add and initialize |
578 | * @driver: driver for the device | 573 | * @driver: driver for the device |
579 | * | 574 | * |
580 | * Used to initialize a device via its device driver. Called whenever a | 575 | * Used to initialize a device via its device driver. Called whenever a |
581 | * driver is bound to a device. Invokes the driver's add() ops. | 576 | * driver is bound to a device. Invokes the driver's add() ops. |
582 | */ | 577 | */ |
583 | static int | 578 | static int |
@@ -585,7 +580,6 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver) | |||
585 | { | 580 | { |
586 | int result = 0; | 581 | int result = 0; |
587 | 582 | ||
588 | |||
589 | if (!device || !driver) | 583 | if (!device || !driver) |
590 | return -EINVAL; | 584 | return -EINVAL; |
591 | 585 | ||
@@ -802,7 +796,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | |||
802 | if (!acpi_match_device_ids(device, button_device_ids)) | 796 | if (!acpi_match_device_ids(device, button_device_ids)) |
803 | device->wakeup.flags.run_wake = 1; | 797 | device->wakeup.flags.run_wake = 1; |
804 | 798 | ||
805 | end: | 799 | end: |
806 | if (ACPI_FAILURE(status)) | 800 | if (ACPI_FAILURE(status)) |
807 | device->flags.wake_capable = 0; | 801 | device->flags.wake_capable = 0; |
808 | return 0; | 802 | return 0; |
@@ -1070,7 +1064,7 @@ static void acpi_device_set_id(struct acpi_device *device, | |||
1070 | break; | 1064 | break; |
1071 | } | 1065 | } |
1072 | 1066 | ||
1073 | /* | 1067 | /* |
1074 | * \_SB | 1068 | * \_SB |
1075 | * ---- | 1069 | * ---- |
1076 | * Fix for the system root bus device -- the only root-level device. | 1070 | * Fix for the system root bus device -- the only root-level device. |
@@ -1320,7 +1314,7 @@ acpi_add_single_object(struct acpi_device **child, | |||
1320 | device->parent->ops.bind(device); | 1314 | device->parent->ops.bind(device); |
1321 | } | 1315 | } |
1322 | 1316 | ||
1323 | end: | 1317 | end: |
1324 | if (!result) | 1318 | if (!result) |
1325 | *child = device; | 1319 | *child = device; |
1326 | else { | 1320 | else { |
@@ -1464,7 +1458,6 @@ acpi_bus_add(struct acpi_device **child, | |||
1464 | 1458 | ||
1465 | return result; | 1459 | return result; |
1466 | } | 1460 | } |
1467 | |||
1468 | EXPORT_SYMBOL(acpi_bus_add); | 1461 | EXPORT_SYMBOL(acpi_bus_add); |
1469 | 1462 | ||
1470 | int acpi_bus_start(struct acpi_device *device) | 1463 | int acpi_bus_start(struct acpi_device *device) |
@@ -1484,7 +1477,6 @@ int acpi_bus_start(struct acpi_device *device) | |||
1484 | } | 1477 | } |
1485 | return result; | 1478 | return result; |
1486 | } | 1479 | } |
1487 | |||
1488 | EXPORT_SYMBOL(acpi_bus_start); | 1480 | EXPORT_SYMBOL(acpi_bus_start); |
1489 | 1481 | ||
1490 | int acpi_bus_trim(struct acpi_device *start, int rmdevice) | 1482 | int acpi_bus_trim(struct acpi_device *start, int rmdevice) |
@@ -1542,7 +1534,6 @@ int acpi_bus_trim(struct acpi_device *start, int rmdevice) | |||
1542 | } | 1534 | } |
1543 | EXPORT_SYMBOL_GPL(acpi_bus_trim); | 1535 | EXPORT_SYMBOL_GPL(acpi_bus_trim); |
1544 | 1536 | ||
1545 | |||
1546 | static int acpi_bus_scan_fixed(struct acpi_device *root) | 1537 | static int acpi_bus_scan_fixed(struct acpi_device *root) |
1547 | { | 1538 | { |
1548 | int result = 0; | 1539 | int result = 0; |
@@ -1610,6 +1601,6 @@ int __init acpi_scan_init(void) | |||
1610 | if (result) | 1601 | if (result) |
1611 | acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); | 1602 | acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); |
1612 | 1603 | ||
1613 | Done: | 1604 | Done: |
1614 | return result; | 1605 | return result; |
1615 | } | 1606 | } |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 1bdfb37377e3..8851315ce858 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -76,6 +76,7 @@ MODULE_LICENSE("GPL"); | |||
76 | static int brightness_switch_enabled = 1; | 76 | static int brightness_switch_enabled = 1; |
77 | module_param(brightness_switch_enabled, bool, 0644); | 77 | module_param(brightness_switch_enabled, bool, 0644); |
78 | 78 | ||
79 | static int register_count = 0; | ||
79 | static int acpi_video_bus_add(struct acpi_device *device); | 80 | static int acpi_video_bus_add(struct acpi_device *device); |
80 | static int acpi_video_bus_remove(struct acpi_device *device, int type); | 81 | static int acpi_video_bus_remove(struct acpi_device *device, int type); |
81 | static int acpi_video_resume(struct acpi_device *device); | 82 | static int acpi_video_resume(struct acpi_device *device); |
@@ -586,6 +587,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
586 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"), | 587 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"), |
587 | }, | 588 | }, |
588 | }, | 589 | }, |
590 | { | ||
591 | .callback = video_set_bqc_offset, | ||
592 | .ident = "Acer Aspire 7720", | ||
593 | .matches = { | ||
594 | DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), | ||
595 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), | ||
596 | }, | ||
597 | }, | ||
589 | {} | 598 | {} |
590 | }; | 599 | }; |
591 | 600 | ||
@@ -976,6 +985,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
976 | device->backlight->props.max_brightness = device->brightness->count-3; | 985 | device->backlight->props.max_brightness = device->brightness->count-3; |
977 | kfree(name); | 986 | kfree(name); |
978 | 987 | ||
988 | result = sysfs_create_link(&device->backlight->dev.kobj, | ||
989 | &device->dev->dev.kobj, "device"); | ||
990 | if (result) | ||
991 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | ||
992 | |||
979 | device->cdev = thermal_cooling_device_register("LCD", | 993 | device->cdev = thermal_cooling_device_register("LCD", |
980 | device->dev, &video_cooling_ops); | 994 | device->dev, &video_cooling_ops); |
981 | if (IS_ERR(device->cdev)) | 995 | if (IS_ERR(device->cdev)) |
@@ -1054,15 +1068,15 @@ static void acpi_video_bus_find_cap(struct acpi_video_bus *video) | |||
1054 | static int acpi_video_bus_check(struct acpi_video_bus *video) | 1068 | static int acpi_video_bus_check(struct acpi_video_bus *video) |
1055 | { | 1069 | { |
1056 | acpi_status status = -ENOENT; | 1070 | acpi_status status = -ENOENT; |
1057 | struct device *dev; | 1071 | struct pci_dev *dev; |
1058 | 1072 | ||
1059 | if (!video) | 1073 | if (!video) |
1060 | return -EINVAL; | 1074 | return -EINVAL; |
1061 | 1075 | ||
1062 | dev = acpi_get_physical_pci_device(video->device->handle); | 1076 | dev = acpi_get_pci_dev(video->device->handle); |
1063 | if (!dev) | 1077 | if (!dev) |
1064 | return -ENODEV; | 1078 | return -ENODEV; |
1065 | put_device(dev); | 1079 | pci_dev_put(dev); |
1066 | 1080 | ||
1067 | /* Since there is no HID, CID and so on for VGA driver, we have | 1081 | /* Since there is no HID, CID and so on for VGA driver, we have |
1068 | * to check well known required nodes. | 1082 | * to check well known required nodes. |
@@ -1990,6 +2004,7 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device) | |||
1990 | status = acpi_remove_notify_handler(device->dev->handle, | 2004 | status = acpi_remove_notify_handler(device->dev->handle, |
1991 | ACPI_DEVICE_NOTIFY, | 2005 | ACPI_DEVICE_NOTIFY, |
1992 | acpi_video_device_notify); | 2006 | acpi_video_device_notify); |
2007 | sysfs_remove_link(&device->backlight->dev.kobj, "device"); | ||
1993 | backlight_device_unregister(device->backlight); | 2008 | backlight_device_unregister(device->backlight); |
1994 | if (device->cdev) { | 2009 | if (device->cdev) { |
1995 | sysfs_remove_link(&device->dev->dev.kobj, | 2010 | sysfs_remove_link(&device->dev->dev.kobj, |
@@ -2318,6 +2333,13 @@ static int __init intel_opregion_present(void) | |||
2318 | int acpi_video_register(void) | 2333 | int acpi_video_register(void) |
2319 | { | 2334 | { |
2320 | int result = 0; | 2335 | int result = 0; |
2336 | if (register_count) { | ||
2337 | /* | ||
2338 | * if the function of acpi_video_register is already called, | ||
2339 | * don't register the acpi_vide_bus again and return no error. | ||
2340 | */ | ||
2341 | return 0; | ||
2342 | } | ||
2321 | 2343 | ||
2322 | acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir); | 2344 | acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir); |
2323 | if (!acpi_video_dir) | 2345 | if (!acpi_video_dir) |
@@ -2329,10 +2351,35 @@ int acpi_video_register(void) | |||
2329 | return -ENODEV; | 2351 | return -ENODEV; |
2330 | } | 2352 | } |
2331 | 2353 | ||
2354 | /* | ||
2355 | * When the acpi_video_bus is loaded successfully, increase | ||
2356 | * the counter reference. | ||
2357 | */ | ||
2358 | register_count = 1; | ||
2359 | |||
2332 | return 0; | 2360 | return 0; |
2333 | } | 2361 | } |
2334 | EXPORT_SYMBOL(acpi_video_register); | 2362 | EXPORT_SYMBOL(acpi_video_register); |
2335 | 2363 | ||
2364 | void acpi_video_unregister(void) | ||
2365 | { | ||
2366 | if (!register_count) { | ||
2367 | /* | ||
2368 | * If the acpi video bus is already unloaded, don't | ||
2369 | * unload it again and return directly. | ||
2370 | */ | ||
2371 | return; | ||
2372 | } | ||
2373 | acpi_bus_unregister_driver(&acpi_video_bus); | ||
2374 | |||
2375 | remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir); | ||
2376 | |||
2377 | register_count = 0; | ||
2378 | |||
2379 | return; | ||
2380 | } | ||
2381 | EXPORT_SYMBOL(acpi_video_unregister); | ||
2382 | |||
2336 | /* | 2383 | /* |
2337 | * This is kind of nasty. Hardware using Intel chipsets may require | 2384 | * This is kind of nasty. Hardware using Intel chipsets may require |
2338 | * the video opregion code to be run first in order to initialise | 2385 | * the video opregion code to be run first in order to initialise |
@@ -2350,16 +2397,12 @@ static int __init acpi_video_init(void) | |||
2350 | return acpi_video_register(); | 2397 | return acpi_video_register(); |
2351 | } | 2398 | } |
2352 | 2399 | ||
2353 | void acpi_video_exit(void) | 2400 | static void __exit acpi_video_exit(void) |
2354 | { | 2401 | { |
2355 | 2402 | acpi_video_unregister(); | |
2356 | acpi_bus_unregister_driver(&acpi_video_bus); | ||
2357 | |||
2358 | remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir); | ||
2359 | 2403 | ||
2360 | return; | 2404 | return; |
2361 | } | 2405 | } |
2362 | EXPORT_SYMBOL(acpi_video_exit); | ||
2363 | 2406 | ||
2364 | module_init(acpi_video_init); | 2407 | module_init(acpi_video_init); |
2365 | module_exit(acpi_video_exit); | 2408 | module_exit(acpi_video_exit); |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 09737275e25f..7cd2b63435ea 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * assinged | 10 | * assinged |
11 | * | 11 | * |
12 | * After PCI devices are glued with ACPI devices | 12 | * After PCI devices are glued with ACPI devices |
13 | * acpi_get_physical_pci_device() can be called to identify ACPI graphics | 13 | * acpi_get_pci_dev() can be called to identify ACPI graphics |
14 | * devices for which a real graphics card is plugged in | 14 | * devices for which a real graphics card is plugged in |
15 | * | 15 | * |
16 | * Now acpi_video_get_capabilities() can be called to check which | 16 | * Now acpi_video_get_capabilities() can be called to check which |
@@ -36,6 +36,7 @@ | |||
36 | 36 | ||
37 | #include <linux/acpi.h> | 37 | #include <linux/acpi.h> |
38 | #include <linux/dmi.h> | 38 | #include <linux/dmi.h> |
39 | #include <linux/pci.h> | ||
39 | 40 | ||
40 | ACPI_MODULE_NAME("video"); | 41 | ACPI_MODULE_NAME("video"); |
41 | #define _COMPONENT ACPI_VIDEO_COMPONENT | 42 | #define _COMPONENT ACPI_VIDEO_COMPONENT |
@@ -109,7 +110,7 @@ static acpi_status | |||
109 | find_video(acpi_handle handle, u32 lvl, void *context, void **rv) | 110 | find_video(acpi_handle handle, u32 lvl, void *context, void **rv) |
110 | { | 111 | { |
111 | long *cap = context; | 112 | long *cap = context; |
112 | struct device *dev; | 113 | struct pci_dev *dev; |
113 | struct acpi_device *acpi_dev; | 114 | struct acpi_device *acpi_dev; |
114 | 115 | ||
115 | const struct acpi_device_id video_ids[] = { | 116 | const struct acpi_device_id video_ids[] = { |
@@ -120,10 +121,10 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
120 | return AE_OK; | 121 | return AE_OK; |
121 | 122 | ||
122 | if (!acpi_match_device_ids(acpi_dev, video_ids)) { | 123 | if (!acpi_match_device_ids(acpi_dev, video_ids)) { |
123 | dev = acpi_get_physical_pci_device(handle); | 124 | dev = acpi_get_pci_dev(handle); |
124 | if (!dev) | 125 | if (!dev) |
125 | return AE_OK; | 126 | return AE_OK; |
126 | put_device(dev); | 127 | pci_dev_put(dev); |
127 | *cap |= acpi_is_video_device(acpi_dev); | 128 | *cap |= acpi_is_video_device(acpi_dev); |
128 | } | 129 | } |
129 | return AE_OK; | 130 | return AE_OK; |
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 9533f43a30bb..52d953eb30c3 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c | |||
@@ -1048,8 +1048,6 @@ static int mxser_open(struct tty_struct *tty, struct file *filp) | |||
1048 | if (retval) | 1048 | if (retval) |
1049 | return retval; | 1049 | return retval; |
1050 | 1050 | ||
1051 | /* unmark here for very high baud rate (ex. 921600 bps) used */ | ||
1052 | tty->low_latency = 1; | ||
1053 | return 0; | 1051 | return 0; |
1054 | } | 1052 | } |
1055 | 1053 | ||
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c index d6102b644b55..574f1c79b6e6 100644 --- a/drivers/char/nozomi.c +++ b/drivers/char/nozomi.c | |||
@@ -1591,8 +1591,6 @@ static int ntty_open(struct tty_struct *tty, struct file *file) | |||
1591 | 1591 | ||
1592 | /* Enable interrupt downlink for channel */ | 1592 | /* Enable interrupt downlink for channel */ |
1593 | if (port->port.count == 1) { | 1593 | if (port->port.count == 1) { |
1594 | /* FIXME: is this needed now ? */ | ||
1595 | tty->low_latency = 1; | ||
1596 | tty->driver_data = port; | 1594 | tty->driver_data = port; |
1597 | tty_port_tty_set(&port->port, tty); | 1595 | tty_port_tty_set(&port->port, tty); |
1598 | DBG1("open: %d", port->token_dl); | 1596 | DBG1("open: %d", port->token_dl); |
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index 1386625fc4ca..a2e67e6df3a1 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
@@ -467,7 +467,6 @@ static unsigned int free_tbuf_count(struct slgt_info *info); | |||
467 | static unsigned int tbuf_bytes(struct slgt_info *info); | 467 | static unsigned int tbuf_bytes(struct slgt_info *info); |
468 | static void reset_tbufs(struct slgt_info *info); | 468 | static void reset_tbufs(struct slgt_info *info); |
469 | static void tdma_reset(struct slgt_info *info); | 469 | static void tdma_reset(struct slgt_info *info); |
470 | static void tdma_start(struct slgt_info *info); | ||
471 | static void tx_load(struct slgt_info *info, const char *buf, unsigned int count); | 470 | static void tx_load(struct slgt_info *info, const char *buf, unsigned int count); |
472 | 471 | ||
473 | static void get_signals(struct slgt_info *info); | 472 | static void get_signals(struct slgt_info *info); |
@@ -795,6 +794,18 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios) | |||
795 | } | 794 | } |
796 | } | 795 | } |
797 | 796 | ||
797 | static void update_tx_timer(struct slgt_info *info) | ||
798 | { | ||
799 | /* | ||
800 | * use worst case speed of 1200bps to calculate transmit timeout | ||
801 | * based on data in buffers (tbuf_bytes) and FIFO (128 bytes) | ||
802 | */ | ||
803 | if (info->params.mode == MGSL_MODE_HDLC) { | ||
804 | int timeout = (tbuf_bytes(info) * 7) + 1000; | ||
805 | mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout)); | ||
806 | } | ||
807 | } | ||
808 | |||
798 | static int write(struct tty_struct *tty, | 809 | static int write(struct tty_struct *tty, |
799 | const unsigned char *buf, int count) | 810 | const unsigned char *buf, int count) |
800 | { | 811 | { |
@@ -838,8 +849,18 @@ start: | |||
838 | spin_lock_irqsave(&info->lock,flags); | 849 | spin_lock_irqsave(&info->lock,flags); |
839 | if (!info->tx_active) | 850 | if (!info->tx_active) |
840 | tx_start(info); | 851 | tx_start(info); |
841 | else | 852 | else if (!(rd_reg32(info, TDCSR) & BIT0)) { |
842 | tdma_start(info); | 853 | /* transmit still active but transmit DMA stopped */ |
854 | unsigned int i = info->tbuf_current; | ||
855 | if (!i) | ||
856 | i = info->tbuf_count; | ||
857 | i--; | ||
858 | /* if DMA buf unsent must try later after tx idle */ | ||
859 | if (desc_count(info->tbufs[i])) | ||
860 | ret = 0; | ||
861 | } | ||
862 | if (ret > 0) | ||
863 | update_tx_timer(info); | ||
843 | spin_unlock_irqrestore(&info->lock,flags); | 864 | spin_unlock_irqrestore(&info->lock,flags); |
844 | } | 865 | } |
845 | 866 | ||
@@ -1502,10 +1523,9 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1502 | /* save start time for transmit timeout detection */ | 1523 | /* save start time for transmit timeout detection */ |
1503 | dev->trans_start = jiffies; | 1524 | dev->trans_start = jiffies; |
1504 | 1525 | ||
1505 | /* start hardware transmitter if necessary */ | ||
1506 | spin_lock_irqsave(&info->lock,flags); | 1526 | spin_lock_irqsave(&info->lock,flags); |
1507 | if (!info->tx_active) | 1527 | tx_start(info); |
1508 | tx_start(info); | 1528 | update_tx_timer(info); |
1509 | spin_unlock_irqrestore(&info->lock,flags); | 1529 | spin_unlock_irqrestore(&info->lock,flags); |
1510 | 1530 | ||
1511 | return 0; | 1531 | return 0; |
@@ -3946,50 +3966,19 @@ static void tx_start(struct slgt_info *info) | |||
3946 | slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE); | 3966 | slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE); |
3947 | /* clear tx idle and underrun status bits */ | 3967 | /* clear tx idle and underrun status bits */ |
3948 | wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER)); | 3968 | wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER)); |
3949 | if (info->params.mode == MGSL_MODE_HDLC) | ||
3950 | mod_timer(&info->tx_timer, jiffies + | ||
3951 | msecs_to_jiffies(5000)); | ||
3952 | } else { | 3969 | } else { |
3953 | slgt_irq_off(info, IRQ_TXDATA); | 3970 | slgt_irq_off(info, IRQ_TXDATA); |
3954 | slgt_irq_on(info, IRQ_TXIDLE); | 3971 | slgt_irq_on(info, IRQ_TXIDLE); |
3955 | /* clear tx idle status bit */ | 3972 | /* clear tx idle status bit */ |
3956 | wr_reg16(info, SSR, IRQ_TXIDLE); | 3973 | wr_reg16(info, SSR, IRQ_TXIDLE); |
3957 | } | 3974 | } |
3958 | tdma_start(info); | 3975 | /* set 1st descriptor address and start DMA */ |
3976 | wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc); | ||
3977 | wr_reg32(info, TDCSR, BIT2 + BIT0); | ||
3959 | info->tx_active = true; | 3978 | info->tx_active = true; |
3960 | } | 3979 | } |
3961 | } | 3980 | } |
3962 | 3981 | ||
3963 | /* | ||
3964 | * start transmit DMA if inactive and there are unsent buffers | ||
3965 | */ | ||
3966 | static void tdma_start(struct slgt_info *info) | ||
3967 | { | ||
3968 | unsigned int i; | ||
3969 | |||
3970 | if (rd_reg32(info, TDCSR) & BIT0) | ||
3971 | return; | ||
3972 | |||
3973 | /* transmit DMA inactive, check for unsent buffers */ | ||
3974 | i = info->tbuf_start; | ||
3975 | while (!desc_count(info->tbufs[i])) { | ||
3976 | if (++i == info->tbuf_count) | ||
3977 | i = 0; | ||
3978 | if (i == info->tbuf_current) | ||
3979 | return; | ||
3980 | } | ||
3981 | info->tbuf_start = i; | ||
3982 | |||
3983 | /* there are unsent buffers, start transmit DMA */ | ||
3984 | |||
3985 | /* reset needed if previous error condition */ | ||
3986 | tdma_reset(info); | ||
3987 | |||
3988 | /* set 1st descriptor address */ | ||
3989 | wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc); | ||
3990 | wr_reg32(info, TDCSR, BIT2 + BIT0); /* IRQ + DMA enable */ | ||
3991 | } | ||
3992 | |||
3993 | static void tx_stop(struct slgt_info *info) | 3982 | static void tx_stop(struct slgt_info *info) |
3994 | { | 3983 | { |
3995 | unsigned short val; | 3984 | unsigned short val; |
@@ -5004,8 +4993,7 @@ static void tx_timeout(unsigned long context) | |||
5004 | info->icount.txtimeout++; | 4993 | info->icount.txtimeout++; |
5005 | } | 4994 | } |
5006 | spin_lock_irqsave(&info->lock,flags); | 4995 | spin_lock_irqsave(&info->lock,flags); |
5007 | info->tx_active = false; | 4996 | tx_stop(info); |
5008 | info->tx_count = 0; | ||
5009 | spin_unlock_irqrestore(&info->lock,flags); | 4997 | spin_unlock_irqrestore(&info->lock,flags); |
5010 | 4998 | ||
5011 | #if SYNCLINK_GENERIC_HDLC | 4999 | #if SYNCLINK_GENERIC_HDLC |
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index 62dadfc95e34..4e862a75f7ff 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c | |||
@@ -193,7 +193,7 @@ int tty_port_block_til_ready(struct tty_port *port, | |||
193 | { | 193 | { |
194 | int do_clocal = 0, retval; | 194 | int do_clocal = 0, retval; |
195 | unsigned long flags; | 195 | unsigned long flags; |
196 | DECLARE_WAITQUEUE(wait, current); | 196 | DEFINE_WAIT(wait); |
197 | int cd; | 197 | int cd; |
198 | 198 | ||
199 | /* block if port is in the process of being closed */ | 199 | /* block if port is in the process of being closed */ |
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index dc425e74a268..e4b4e8898e39 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c | |||
@@ -419,7 +419,7 @@ void intel_opregion_free(struct drm_device *dev, int suspend) | |||
419 | return; | 419 | return; |
420 | 420 | ||
421 | if (!suspend) | 421 | if (!suspend) |
422 | acpi_video_exit(); | 422 | acpi_video_unregister(); |
423 | 423 | ||
424 | opregion->acpi->drdy = 0; | 424 | opregion->acpi->drdy = 0; |
425 | 425 | ||
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 3c259ee7ddda..aa87b6a3bbef 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -326,6 +326,15 @@ config I2C_DAVINCI | |||
326 | devices such as DaVinci NIC. | 326 | devices such as DaVinci NIC. |
327 | For details please see http://www.ti.com/davinci | 327 | For details please see http://www.ti.com/davinci |
328 | 328 | ||
329 | config I2C_DESIGNWARE | ||
330 | tristate "Synopsys DesignWare" | ||
331 | help | ||
332 | If you say yes to this option, support will be included for the | ||
333 | Synopsys DesignWare I2C adapter. Only master mode is supported. | ||
334 | |||
335 | This driver can also be built as a module. If so, the module | ||
336 | will be called i2c-designware. | ||
337 | |||
329 | config I2C_GPIO | 338 | config I2C_GPIO |
330 | tristate "GPIO-based bitbanging I2C" | 339 | tristate "GPIO-based bitbanging I2C" |
331 | depends on GENERIC_GPIO | 340 | depends on GENERIC_GPIO |
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index edeabf003106..e654263bfc01 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile | |||
@@ -30,6 +30,7 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o | |||
30 | obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o | 30 | obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o |
31 | obj-$(CONFIG_I2C_CPM) += i2c-cpm.o | 31 | obj-$(CONFIG_I2C_CPM) += i2c-cpm.o |
32 | obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o | 32 | obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o |
33 | obj-$(CONFIG_I2C_DESIGNWARE) += i2c-designware.o | ||
33 | obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o | 34 | obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o |
34 | obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o | 35 | obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o |
35 | obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o | 36 | obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o |
diff --git a/drivers/i2c/busses/i2c-designware.c b/drivers/i2c/busses/i2c-designware.c new file mode 100644 index 000000000000..b444762e9b9f --- /dev/null +++ b/drivers/i2c/busses/i2c-designware.c | |||
@@ -0,0 +1,624 @@ | |||
1 | /* | ||
2 | * Synopsys Designware I2C adapter driver (master only). | ||
3 | * | ||
4 | * Based on the TI DAVINCI I2C adapter driver. | ||
5 | * | ||
6 | * Copyright (C) 2006 Texas Instruments. | ||
7 | * Copyright (C) 2007 MontaVista Software Inc. | ||
8 | * Copyright (C) 2009 Provigent Ltd. | ||
9 | * | ||
10 | * ---------------------------------------------------------------------------- | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
25 | * ---------------------------------------------------------------------------- | ||
26 | * | ||
27 | */ | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/i2c.h> | ||
32 | #include <linux/clk.h> | ||
33 | #include <linux/errno.h> | ||
34 | #include <linux/sched.h> | ||
35 | #include <linux/err.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | #include <linux/platform_device.h> | ||
38 | #include <linux/io.h> | ||
39 | |||
40 | /* | ||
41 | * Registers offset | ||
42 | */ | ||
43 | #define DW_IC_CON 0x0 | ||
44 | #define DW_IC_TAR 0x4 | ||
45 | #define DW_IC_DATA_CMD 0x10 | ||
46 | #define DW_IC_SS_SCL_HCNT 0x14 | ||
47 | #define DW_IC_SS_SCL_LCNT 0x18 | ||
48 | #define DW_IC_FS_SCL_HCNT 0x1c | ||
49 | #define DW_IC_FS_SCL_LCNT 0x20 | ||
50 | #define DW_IC_INTR_STAT 0x2c | ||
51 | #define DW_IC_INTR_MASK 0x30 | ||
52 | #define DW_IC_CLR_INTR 0x40 | ||
53 | #define DW_IC_ENABLE 0x6c | ||
54 | #define DW_IC_STATUS 0x70 | ||
55 | #define DW_IC_TXFLR 0x74 | ||
56 | #define DW_IC_RXFLR 0x78 | ||
57 | #define DW_IC_COMP_PARAM_1 0xf4 | ||
58 | #define DW_IC_TX_ABRT_SOURCE 0x80 | ||
59 | |||
60 | #define DW_IC_CON_MASTER 0x1 | ||
61 | #define DW_IC_CON_SPEED_STD 0x2 | ||
62 | #define DW_IC_CON_SPEED_FAST 0x4 | ||
63 | #define DW_IC_CON_10BITADDR_MASTER 0x10 | ||
64 | #define DW_IC_CON_RESTART_EN 0x20 | ||
65 | #define DW_IC_CON_SLAVE_DISABLE 0x40 | ||
66 | |||
67 | #define DW_IC_INTR_TX_EMPTY 0x10 | ||
68 | #define DW_IC_INTR_TX_ABRT 0x40 | ||
69 | #define DW_IC_INTR_STOP_DET 0x200 | ||
70 | |||
71 | #define DW_IC_STATUS_ACTIVITY 0x1 | ||
72 | |||
73 | #define DW_IC_ERR_TX_ABRT 0x1 | ||
74 | |||
75 | /* | ||
76 | * status codes | ||
77 | */ | ||
78 | #define STATUS_IDLE 0x0 | ||
79 | #define STATUS_WRITE_IN_PROGRESS 0x1 | ||
80 | #define STATUS_READ_IN_PROGRESS 0x2 | ||
81 | |||
82 | #define TIMEOUT 20 /* ms */ | ||
83 | |||
84 | /* | ||
85 | * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register | ||
86 | * | ||
87 | * only expected abort codes are listed here | ||
88 | * refer to the datasheet for the full list | ||
89 | */ | ||
90 | #define ABRT_7B_ADDR_NOACK 0 | ||
91 | #define ABRT_10ADDR1_NOACK 1 | ||
92 | #define ABRT_10ADDR2_NOACK 2 | ||
93 | #define ABRT_TXDATA_NOACK 3 | ||
94 | #define ABRT_GCALL_NOACK 4 | ||
95 | #define ABRT_GCALL_READ 5 | ||
96 | #define ABRT_SBYTE_ACKDET 7 | ||
97 | #define ABRT_SBYTE_NORSTRT 9 | ||
98 | #define ABRT_10B_RD_NORSTRT 10 | ||
99 | #define ARB_MASTER_DIS 11 | ||
100 | #define ARB_LOST 12 | ||
101 | |||
102 | static char *abort_sources[] = { | ||
103 | [ABRT_7B_ADDR_NOACK] = | ||
104 | "slave address not acknowledged (7bit mode)", | ||
105 | [ABRT_10ADDR1_NOACK] = | ||
106 | "first address byte not acknowledged (10bit mode)", | ||
107 | [ABRT_10ADDR2_NOACK] = | ||
108 | "second address byte not acknowledged (10bit mode)", | ||
109 | [ABRT_TXDATA_NOACK] = | ||
110 | "data not acknowledged", | ||
111 | [ABRT_GCALL_NOACK] = | ||
112 | "no acknowledgement for a general call", | ||
113 | [ABRT_GCALL_READ] = | ||
114 | "read after general call", | ||
115 | [ABRT_SBYTE_ACKDET] = | ||
116 | "start byte acknowledged", | ||
117 | [ABRT_SBYTE_NORSTRT] = | ||
118 | "trying to send start byte when restart is disabled", | ||
119 | [ABRT_10B_RD_NORSTRT] = | ||
120 | "trying to read when restart is disabled (10bit mode)", | ||
121 | [ARB_MASTER_DIS] = | ||
122 | "trying to use disabled adapter", | ||
123 | [ARB_LOST] = | ||
124 | "lost arbitration", | ||
125 | }; | ||
126 | |||
127 | /** | ||
128 | * struct dw_i2c_dev - private i2c-designware data | ||
129 | * @dev: driver model device node | ||
130 | * @base: IO registers pointer | ||
131 | * @cmd_complete: tx completion indicator | ||
132 | * @pump_msg: continue in progress transfers | ||
133 | * @lock: protect this struct and IO registers | ||
134 | * @clk: input reference clock | ||
135 | * @cmd_err: run time hadware error code | ||
136 | * @msgs: points to an array of messages currently being transfered | ||
137 | * @msgs_num: the number of elements in msgs | ||
138 | * @msg_write_idx: the element index of the current tx message in the msgs | ||
139 | * array | ||
140 | * @tx_buf_len: the length of the current tx buffer | ||
141 | * @tx_buf: the current tx buffer | ||
142 | * @msg_read_idx: the element index of the current rx message in the msgs | ||
143 | * array | ||
144 | * @rx_buf_len: the length of the current rx buffer | ||
145 | * @rx_buf: the current rx buffer | ||
146 | * @msg_err: error status of the current transfer | ||
147 | * @status: i2c master status, one of STATUS_* | ||
148 | * @abort_source: copy of the TX_ABRT_SOURCE register | ||
149 | * @irq: interrupt number for the i2c master | ||
150 | * @adapter: i2c subsystem adapter node | ||
151 | * @tx_fifo_depth: depth of the hardware tx fifo | ||
152 | * @rx_fifo_depth: depth of the hardware rx fifo | ||
153 | */ | ||
154 | struct dw_i2c_dev { | ||
155 | struct device *dev; | ||
156 | void __iomem *base; | ||
157 | struct completion cmd_complete; | ||
158 | struct tasklet_struct pump_msg; | ||
159 | struct mutex lock; | ||
160 | struct clk *clk; | ||
161 | int cmd_err; | ||
162 | struct i2c_msg *msgs; | ||
163 | int msgs_num; | ||
164 | int msg_write_idx; | ||
165 | u16 tx_buf_len; | ||
166 | u8 *tx_buf; | ||
167 | int msg_read_idx; | ||
168 | u16 rx_buf_len; | ||
169 | u8 *rx_buf; | ||
170 | int msg_err; | ||
171 | unsigned int status; | ||
172 | u16 abort_source; | ||
173 | int irq; | ||
174 | struct i2c_adapter adapter; | ||
175 | unsigned int tx_fifo_depth; | ||
176 | unsigned int rx_fifo_depth; | ||
177 | }; | ||
178 | |||
179 | /** | ||
180 | * i2c_dw_init() - initialize the designware i2c master hardware | ||
181 | * @dev: device private data | ||
182 | * | ||
183 | * This functions configures and enables the I2C master. | ||
184 | * This function is called during I2C init function, and in case of timeout at | ||
185 | * run time. | ||
186 | */ | ||
187 | static void i2c_dw_init(struct dw_i2c_dev *dev) | ||
188 | { | ||
189 | u32 input_clock_khz = clk_get_rate(dev->clk) / 1000; | ||
190 | u16 ic_con; | ||
191 | |||
192 | /* Disable the adapter */ | ||
193 | writeb(0, dev->base + DW_IC_ENABLE); | ||
194 | |||
195 | /* set standard and fast speed deviders for high/low periods */ | ||
196 | writew((input_clock_khz * 40 / 10000)+1, /* std speed high, 4us */ | ||
197 | dev->base + DW_IC_SS_SCL_HCNT); | ||
198 | writew((input_clock_khz * 47 / 10000)+1, /* std speed low, 4.7us */ | ||
199 | dev->base + DW_IC_SS_SCL_LCNT); | ||
200 | writew((input_clock_khz * 6 / 10000)+1, /* fast speed high, 0.6us */ | ||
201 | dev->base + DW_IC_FS_SCL_HCNT); | ||
202 | writew((input_clock_khz * 13 / 10000)+1, /* fast speed low, 1.3us */ | ||
203 | dev->base + DW_IC_FS_SCL_LCNT); | ||
204 | |||
205 | /* configure the i2c master */ | ||
206 | ic_con = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE | | ||
207 | DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST; | ||
208 | writew(ic_con, dev->base + DW_IC_CON); | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * Waiting for bus not busy | ||
213 | */ | ||
214 | static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev) | ||
215 | { | ||
216 | int timeout = TIMEOUT; | ||
217 | |||
218 | while (readb(dev->base + DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) { | ||
219 | if (timeout <= 0) { | ||
220 | dev_warn(dev->dev, "timeout waiting for bus ready\n"); | ||
221 | return -ETIMEDOUT; | ||
222 | } | ||
223 | timeout--; | ||
224 | mdelay(1); | ||
225 | } | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * Initiate low level master read/write transaction. | ||
232 | * This function is called from i2c_dw_xfer when starting a transfer. | ||
233 | * This function is also called from dw_i2c_pump_msg to continue a transfer | ||
234 | * that is longer than the size of the TX FIFO. | ||
235 | */ | ||
236 | static void | ||
237 | i2c_dw_xfer_msg(struct i2c_adapter *adap) | ||
238 | { | ||
239 | struct dw_i2c_dev *dev = i2c_get_adapdata(adap); | ||
240 | struct i2c_msg *msgs = dev->msgs; | ||
241 | int num = dev->msgs_num; | ||
242 | u16 ic_con, intr_mask; | ||
243 | int tx_limit = dev->tx_fifo_depth - readb(dev->base + DW_IC_TXFLR); | ||
244 | int rx_limit = dev->rx_fifo_depth - readb(dev->base + DW_IC_RXFLR); | ||
245 | u16 addr = msgs[dev->msg_write_idx].addr; | ||
246 | u16 buf_len = dev->tx_buf_len; | ||
247 | |||
248 | if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { | ||
249 | /* Disable the adapter */ | ||
250 | writeb(0, dev->base + DW_IC_ENABLE); | ||
251 | |||
252 | /* set the slave (target) address */ | ||
253 | writew(msgs[dev->msg_write_idx].addr, dev->base + DW_IC_TAR); | ||
254 | |||
255 | /* if the slave address is ten bit address, enable 10BITADDR */ | ||
256 | ic_con = readw(dev->base + DW_IC_CON); | ||
257 | if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) | ||
258 | ic_con |= DW_IC_CON_10BITADDR_MASTER; | ||
259 | else | ||
260 | ic_con &= ~DW_IC_CON_10BITADDR_MASTER; | ||
261 | writew(ic_con, dev->base + DW_IC_CON); | ||
262 | |||
263 | /* Enable the adapter */ | ||
264 | writeb(1, dev->base + DW_IC_ENABLE); | ||
265 | } | ||
266 | |||
267 | for (; dev->msg_write_idx < num; dev->msg_write_idx++) { | ||
268 | /* if target address has changed, we need to | ||
269 | * reprogram the target address in the i2c | ||
270 | * adapter when we are done with this transfer | ||
271 | */ | ||
272 | if (msgs[dev->msg_write_idx].addr != addr) | ||
273 | return; | ||
274 | |||
275 | if (msgs[dev->msg_write_idx].len == 0) { | ||
276 | dev_err(dev->dev, | ||
277 | "%s: invalid message length\n", __func__); | ||
278 | dev->msg_err = -EINVAL; | ||
279 | return; | ||
280 | } | ||
281 | |||
282 | if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { | ||
283 | /* new i2c_msg */ | ||
284 | dev->tx_buf = msgs[dev->msg_write_idx].buf; | ||
285 | buf_len = msgs[dev->msg_write_idx].len; | ||
286 | } | ||
287 | |||
288 | while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { | ||
289 | if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { | ||
290 | writew(0x100, dev->base + DW_IC_DATA_CMD); | ||
291 | rx_limit--; | ||
292 | } else | ||
293 | writew(*(dev->tx_buf++), | ||
294 | dev->base + DW_IC_DATA_CMD); | ||
295 | tx_limit--; buf_len--; | ||
296 | } | ||
297 | } | ||
298 | |||
299 | intr_mask = DW_IC_INTR_STOP_DET | DW_IC_INTR_TX_ABRT; | ||
300 | if (buf_len > 0) { /* more bytes to be written */ | ||
301 | intr_mask |= DW_IC_INTR_TX_EMPTY; | ||
302 | dev->status |= STATUS_WRITE_IN_PROGRESS; | ||
303 | } else | ||
304 | dev->status &= ~STATUS_WRITE_IN_PROGRESS; | ||
305 | writew(intr_mask, dev->base + DW_IC_INTR_MASK); | ||
306 | |||
307 | dev->tx_buf_len = buf_len; | ||
308 | } | ||
309 | |||
310 | static void | ||
311 | i2c_dw_read(struct i2c_adapter *adap) | ||
312 | { | ||
313 | struct dw_i2c_dev *dev = i2c_get_adapdata(adap); | ||
314 | struct i2c_msg *msgs = dev->msgs; | ||
315 | int num = dev->msgs_num; | ||
316 | u16 addr = msgs[dev->msg_read_idx].addr; | ||
317 | int rx_valid = readw(dev->base + DW_IC_RXFLR); | ||
318 | |||
319 | for (; dev->msg_read_idx < num; dev->msg_read_idx++) { | ||
320 | u16 len; | ||
321 | u8 *buf; | ||
322 | |||
323 | if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) | ||
324 | continue; | ||
325 | |||
326 | /* different i2c client, reprogram the i2c adapter */ | ||
327 | if (msgs[dev->msg_read_idx].addr != addr) | ||
328 | return; | ||
329 | |||
330 | if (!(dev->status & STATUS_READ_IN_PROGRESS)) { | ||
331 | len = msgs[dev->msg_read_idx].len; | ||
332 | buf = msgs[dev->msg_read_idx].buf; | ||
333 | } else { | ||
334 | len = dev->rx_buf_len; | ||
335 | buf = dev->rx_buf; | ||
336 | } | ||
337 | |||
338 | for (; len > 0 && rx_valid > 0; len--, rx_valid--) | ||
339 | *buf++ = readb(dev->base + DW_IC_DATA_CMD); | ||
340 | |||
341 | if (len > 0) { | ||
342 | dev->status |= STATUS_READ_IN_PROGRESS; | ||
343 | dev->rx_buf_len = len; | ||
344 | dev->rx_buf = buf; | ||
345 | return; | ||
346 | } else | ||
347 | dev->status &= ~STATUS_READ_IN_PROGRESS; | ||
348 | } | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * Prepare controller for a transaction and call i2c_dw_xfer_msg | ||
353 | */ | ||
354 | static int | ||
355 | i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) | ||
356 | { | ||
357 | struct dw_i2c_dev *dev = i2c_get_adapdata(adap); | ||
358 | int ret; | ||
359 | |||
360 | dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); | ||
361 | |||
362 | mutex_lock(&dev->lock); | ||
363 | |||
364 | INIT_COMPLETION(dev->cmd_complete); | ||
365 | dev->msgs = msgs; | ||
366 | dev->msgs_num = num; | ||
367 | dev->cmd_err = 0; | ||
368 | dev->msg_write_idx = 0; | ||
369 | dev->msg_read_idx = 0; | ||
370 | dev->msg_err = 0; | ||
371 | dev->status = STATUS_IDLE; | ||
372 | |||
373 | ret = i2c_dw_wait_bus_not_busy(dev); | ||
374 | if (ret < 0) | ||
375 | goto done; | ||
376 | |||
377 | /* start the transfers */ | ||
378 | i2c_dw_xfer_msg(adap); | ||
379 | |||
380 | /* wait for tx to complete */ | ||
381 | ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, HZ); | ||
382 | if (ret == 0) { | ||
383 | dev_err(dev->dev, "controller timed out\n"); | ||
384 | i2c_dw_init(dev); | ||
385 | ret = -ETIMEDOUT; | ||
386 | goto done; | ||
387 | } else if (ret < 0) | ||
388 | goto done; | ||
389 | |||
390 | if (dev->msg_err) { | ||
391 | ret = dev->msg_err; | ||
392 | goto done; | ||
393 | } | ||
394 | |||
395 | /* no error */ | ||
396 | if (likely(!dev->cmd_err)) { | ||
397 | /* read rx fifo, and disable the adapter */ | ||
398 | do { | ||
399 | i2c_dw_read(adap); | ||
400 | } while (dev->status & STATUS_READ_IN_PROGRESS); | ||
401 | writeb(0, dev->base + DW_IC_ENABLE); | ||
402 | ret = num; | ||
403 | goto done; | ||
404 | } | ||
405 | |||
406 | /* We have an error */ | ||
407 | if (dev->cmd_err == DW_IC_ERR_TX_ABRT) { | ||
408 | unsigned long abort_source = dev->abort_source; | ||
409 | int i; | ||
410 | |||
411 | for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) { | ||
412 | dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); | ||
413 | } | ||
414 | } | ||
415 | ret = -EIO; | ||
416 | |||
417 | done: | ||
418 | mutex_unlock(&dev->lock); | ||
419 | |||
420 | return ret; | ||
421 | } | ||
422 | |||
423 | static u32 i2c_dw_func(struct i2c_adapter *adap) | ||
424 | { | ||
425 | return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR; | ||
426 | } | ||
427 | |||
428 | static void dw_i2c_pump_msg(unsigned long data) | ||
429 | { | ||
430 | struct dw_i2c_dev *dev = (struct dw_i2c_dev *) data; | ||
431 | u16 intr_mask; | ||
432 | |||
433 | i2c_dw_read(&dev->adapter); | ||
434 | i2c_dw_xfer_msg(&dev->adapter); | ||
435 | |||
436 | intr_mask = DW_IC_INTR_STOP_DET | DW_IC_INTR_TX_ABRT; | ||
437 | if (dev->status & STATUS_WRITE_IN_PROGRESS) | ||
438 | intr_mask |= DW_IC_INTR_TX_EMPTY; | ||
439 | writew(intr_mask, dev->base + DW_IC_INTR_MASK); | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * Interrupt service routine. This gets called whenever an I2C interrupt | ||
444 | * occurs. | ||
445 | */ | ||
446 | static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) | ||
447 | { | ||
448 | struct dw_i2c_dev *dev = dev_id; | ||
449 | u16 stat; | ||
450 | |||
451 | stat = readw(dev->base + DW_IC_INTR_STAT); | ||
452 | dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat); | ||
453 | if (stat & DW_IC_INTR_TX_ABRT) { | ||
454 | dev->abort_source = readw(dev->base + DW_IC_TX_ABRT_SOURCE); | ||
455 | dev->cmd_err |= DW_IC_ERR_TX_ABRT; | ||
456 | dev->status = STATUS_IDLE; | ||
457 | } else if (stat & DW_IC_INTR_TX_EMPTY) | ||
458 | tasklet_schedule(&dev->pump_msg); | ||
459 | |||
460 | readb(dev->base + DW_IC_CLR_INTR); /* clear interrupts */ | ||
461 | writew(0, dev->base + DW_IC_INTR_MASK); /* disable interrupts */ | ||
462 | if (stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) | ||
463 | complete(&dev->cmd_complete); | ||
464 | |||
465 | return IRQ_HANDLED; | ||
466 | } | ||
467 | |||
468 | static struct i2c_algorithm i2c_dw_algo = { | ||
469 | .master_xfer = i2c_dw_xfer, | ||
470 | .functionality = i2c_dw_func, | ||
471 | }; | ||
472 | |||
473 | static int __devinit dw_i2c_probe(struct platform_device *pdev) | ||
474 | { | ||
475 | struct dw_i2c_dev *dev; | ||
476 | struct i2c_adapter *adap; | ||
477 | struct resource *mem, *irq, *ioarea; | ||
478 | int r; | ||
479 | |||
480 | /* NOTE: driver uses the static register mapping */ | ||
481 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
482 | if (!mem) { | ||
483 | dev_err(&pdev->dev, "no mem resource?\n"); | ||
484 | return -EINVAL; | ||
485 | } | ||
486 | |||
487 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
488 | if (!irq) { | ||
489 | dev_err(&pdev->dev, "no irq resource?\n"); | ||
490 | return -EINVAL; | ||
491 | } | ||
492 | |||
493 | ioarea = request_mem_region(mem->start, resource_size(mem), | ||
494 | pdev->name); | ||
495 | if (!ioarea) { | ||
496 | dev_err(&pdev->dev, "I2C region already claimed\n"); | ||
497 | return -EBUSY; | ||
498 | } | ||
499 | |||
500 | dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL); | ||
501 | if (!dev) { | ||
502 | r = -ENOMEM; | ||
503 | goto err_release_region; | ||
504 | } | ||
505 | |||
506 | init_completion(&dev->cmd_complete); | ||
507 | tasklet_init(&dev->pump_msg, dw_i2c_pump_msg, (unsigned long) dev); | ||
508 | mutex_init(&dev->lock); | ||
509 | dev->dev = get_device(&pdev->dev); | ||
510 | dev->irq = irq->start; | ||
511 | platform_set_drvdata(pdev, dev); | ||
512 | |||
513 | dev->clk = clk_get(&pdev->dev, NULL); | ||
514 | if (IS_ERR(dev->clk)) { | ||
515 | r = -ENODEV; | ||
516 | goto err_free_mem; | ||
517 | } | ||
518 | clk_enable(dev->clk); | ||
519 | |||
520 | dev->base = ioremap(mem->start, resource_size(mem)); | ||
521 | if (dev->base == NULL) { | ||
522 | dev_err(&pdev->dev, "failure mapping io resources\n"); | ||
523 | r = -EBUSY; | ||
524 | goto err_unuse_clocks; | ||
525 | } | ||
526 | { | ||
527 | u32 param1 = readl(dev->base + DW_IC_COMP_PARAM_1); | ||
528 | |||
529 | dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1; | ||
530 | dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1; | ||
531 | } | ||
532 | i2c_dw_init(dev); | ||
533 | |||
534 | writew(0, dev->base + DW_IC_INTR_MASK); /* disable IRQ */ | ||
535 | r = request_irq(dev->irq, i2c_dw_isr, 0, pdev->name, dev); | ||
536 | if (r) { | ||
537 | dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq); | ||
538 | goto err_iounmap; | ||
539 | } | ||
540 | |||
541 | adap = &dev->adapter; | ||
542 | i2c_set_adapdata(adap, dev); | ||
543 | adap->owner = THIS_MODULE; | ||
544 | adap->class = I2C_CLASS_HWMON; | ||
545 | strlcpy(adap->name, "Synopsys DesignWare I2C adapter", | ||
546 | sizeof(adap->name)); | ||
547 | adap->algo = &i2c_dw_algo; | ||
548 | adap->dev.parent = &pdev->dev; | ||
549 | |||
550 | adap->nr = pdev->id; | ||
551 | r = i2c_add_numbered_adapter(adap); | ||
552 | if (r) { | ||
553 | dev_err(&pdev->dev, "failure adding adapter\n"); | ||
554 | goto err_free_irq; | ||
555 | } | ||
556 | |||
557 | return 0; | ||
558 | |||
559 | err_free_irq: | ||
560 | free_irq(dev->irq, dev); | ||
561 | err_iounmap: | ||
562 | iounmap(dev->base); | ||
563 | err_unuse_clocks: | ||
564 | clk_disable(dev->clk); | ||
565 | clk_put(dev->clk); | ||
566 | dev->clk = NULL; | ||
567 | err_free_mem: | ||
568 | platform_set_drvdata(pdev, NULL); | ||
569 | put_device(&pdev->dev); | ||
570 | kfree(dev); | ||
571 | err_release_region: | ||
572 | release_mem_region(mem->start, resource_size(mem)); | ||
573 | |||
574 | return r; | ||
575 | } | ||
576 | |||
577 | static int __devexit dw_i2c_remove(struct platform_device *pdev) | ||
578 | { | ||
579 | struct dw_i2c_dev *dev = platform_get_drvdata(pdev); | ||
580 | struct resource *mem; | ||
581 | |||
582 | platform_set_drvdata(pdev, NULL); | ||
583 | i2c_del_adapter(&dev->adapter); | ||
584 | put_device(&pdev->dev); | ||
585 | |||
586 | clk_disable(dev->clk); | ||
587 | clk_put(dev->clk); | ||
588 | dev->clk = NULL; | ||
589 | |||
590 | writeb(0, dev->base + DW_IC_ENABLE); | ||
591 | free_irq(dev->irq, dev); | ||
592 | kfree(dev); | ||
593 | |||
594 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
595 | release_mem_region(mem->start, resource_size(mem)); | ||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | /* work with hotplug and coldplug */ | ||
600 | MODULE_ALIAS("platform:i2c_designware"); | ||
601 | |||
602 | static struct platform_driver dw_i2c_driver = { | ||
603 | .remove = __devexit_p(dw_i2c_remove), | ||
604 | .driver = { | ||
605 | .name = "i2c_designware", | ||
606 | .owner = THIS_MODULE, | ||
607 | }, | ||
608 | }; | ||
609 | |||
610 | static int __init dw_i2c_init_driver(void) | ||
611 | { | ||
612 | return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe); | ||
613 | } | ||
614 | module_init(dw_i2c_init_driver); | ||
615 | |||
616 | static void __exit dw_i2c_exit_driver(void) | ||
617 | { | ||
618 | platform_driver_unregister(&dw_i2c_driver); | ||
619 | } | ||
620 | module_exit(dw_i2c_exit_driver); | ||
621 | |||
622 | MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>"); | ||
623 | MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter"); | ||
624 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 36e0675be9f7..020f9573fd82 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
@@ -231,6 +231,17 @@ config DM_MIRROR | |||
231 | Allow volume managers to mirror logical volumes, also | 231 | Allow volume managers to mirror logical volumes, also |
232 | needed for live data migration tools such as 'pvmove'. | 232 | needed for live data migration tools such as 'pvmove'. |
233 | 233 | ||
234 | config DM_LOG_USERSPACE | ||
235 | tristate "Mirror userspace logging (EXPERIMENTAL)" | ||
236 | depends on DM_MIRROR && EXPERIMENTAL && NET | ||
237 | select CONNECTOR | ||
238 | ---help--- | ||
239 | The userspace logging module provides a mechanism for | ||
240 | relaying the dm-dirty-log API to userspace. Log designs | ||
241 | which are more suited to userspace implementation (e.g. | ||
242 | shared storage logs) or experimental logs can be implemented | ||
243 | by leveraging this framework. | ||
244 | |||
234 | config DM_ZERO | 245 | config DM_ZERO |
235 | tristate "Zero target" | 246 | tristate "Zero target" |
236 | depends on BLK_DEV_DM | 247 | depends on BLK_DEV_DM |
@@ -249,6 +260,25 @@ config DM_MULTIPATH | |||
249 | ---help--- | 260 | ---help--- |
250 | Allow volume managers to support multipath hardware. | 261 | Allow volume managers to support multipath hardware. |
251 | 262 | ||
263 | config DM_MULTIPATH_QL | ||
264 | tristate "I/O Path Selector based on the number of in-flight I/Os" | ||
265 | depends on DM_MULTIPATH | ||
266 | ---help--- | ||
267 | This path selector is a dynamic load balancer which selects | ||
268 | the path with the least number of in-flight I/Os. | ||
269 | |||
270 | If unsure, say N. | ||
271 | |||
272 | config DM_MULTIPATH_ST | ||
273 | tristate "I/O Path Selector based on the service time" | ||
274 | depends on DM_MULTIPATH | ||
275 | ---help--- | ||
276 | This path selector is a dynamic load balancer which selects | ||
277 | the path expected to complete the incoming I/O in the shortest | ||
278 | time. | ||
279 | |||
280 | If unsure, say N. | ||
281 | |||
252 | config DM_DELAY | 282 | config DM_DELAY |
253 | tristate "I/O delaying target (EXPERIMENTAL)" | 283 | tristate "I/O delaying target (EXPERIMENTAL)" |
254 | depends on BLK_DEV_DM && EXPERIMENTAL | 284 | depends on BLK_DEV_DM && EXPERIMENTAL |
diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 45cc5951d928..1dc4185bd781 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile | |||
@@ -8,6 +8,8 @@ dm-multipath-y += dm-path-selector.o dm-mpath.o | |||
8 | dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \ | 8 | dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \ |
9 | dm-snap-persistent.o | 9 | dm-snap-persistent.o |
10 | dm-mirror-y += dm-raid1.o | 10 | dm-mirror-y += dm-raid1.o |
11 | dm-log-userspace-y \ | ||
12 | += dm-log-userspace-base.o dm-log-userspace-transfer.o | ||
11 | md-mod-y += md.o bitmap.o | 13 | md-mod-y += md.o bitmap.o |
12 | raid456-y += raid5.o | 14 | raid456-y += raid5.o |
13 | raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \ | 15 | raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \ |
@@ -36,8 +38,11 @@ obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o | |||
36 | obj-$(CONFIG_DM_CRYPT) += dm-crypt.o | 38 | obj-$(CONFIG_DM_CRYPT) += dm-crypt.o |
37 | obj-$(CONFIG_DM_DELAY) += dm-delay.o | 39 | obj-$(CONFIG_DM_DELAY) += dm-delay.o |
38 | obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o | 40 | obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o |
41 | obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o | ||
42 | obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o | ||
39 | obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o | 43 | obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o |
40 | obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o | 44 | obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o |
45 | obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o | ||
41 | obj-$(CONFIG_DM_ZERO) += dm-zero.o | 46 | obj-$(CONFIG_DM_ZERO) += dm-zero.o |
42 | 47 | ||
43 | quiet_cmd_unroll = UNROLL $@ | 48 | quiet_cmd_unroll = UNROLL $@ |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 53394e863c74..9933eb861c71 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -1132,6 +1132,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1132 | goto bad_crypt_queue; | 1132 | goto bad_crypt_queue; |
1133 | } | 1133 | } |
1134 | 1134 | ||
1135 | ti->num_flush_requests = 1; | ||
1135 | ti->private = cc; | 1136 | ti->private = cc; |
1136 | return 0; | 1137 | return 0; |
1137 | 1138 | ||
@@ -1189,6 +1190,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
1189 | union map_info *map_context) | 1190 | union map_info *map_context) |
1190 | { | 1191 | { |
1191 | struct dm_crypt_io *io; | 1192 | struct dm_crypt_io *io; |
1193 | struct crypt_config *cc; | ||
1194 | |||
1195 | if (unlikely(bio_empty_barrier(bio))) { | ||
1196 | cc = ti->private; | ||
1197 | bio->bi_bdev = cc->dev->bdev; | ||
1198 | return DM_MAPIO_REMAPPED; | ||
1199 | } | ||
1192 | 1200 | ||
1193 | io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); | 1201 | io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); |
1194 | 1202 | ||
@@ -1305,9 +1313,17 @@ static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | |||
1305 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | 1313 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); |
1306 | } | 1314 | } |
1307 | 1315 | ||
1316 | static int crypt_iterate_devices(struct dm_target *ti, | ||
1317 | iterate_devices_callout_fn fn, void *data) | ||
1318 | { | ||
1319 | struct crypt_config *cc = ti->private; | ||
1320 | |||
1321 | return fn(ti, cc->dev, cc->start, data); | ||
1322 | } | ||
1323 | |||
1308 | static struct target_type crypt_target = { | 1324 | static struct target_type crypt_target = { |
1309 | .name = "crypt", | 1325 | .name = "crypt", |
1310 | .version= {1, 6, 0}, | 1326 | .version = {1, 7, 0}, |
1311 | .module = THIS_MODULE, | 1327 | .module = THIS_MODULE, |
1312 | .ctr = crypt_ctr, | 1328 | .ctr = crypt_ctr, |
1313 | .dtr = crypt_dtr, | 1329 | .dtr = crypt_dtr, |
@@ -1318,6 +1334,7 @@ static struct target_type crypt_target = { | |||
1318 | .resume = crypt_resume, | 1334 | .resume = crypt_resume, |
1319 | .message = crypt_message, | 1335 | .message = crypt_message, |
1320 | .merge = crypt_merge, | 1336 | .merge = crypt_merge, |
1337 | .iterate_devices = crypt_iterate_devices, | ||
1321 | }; | 1338 | }; |
1322 | 1339 | ||
1323 | static int __init dm_crypt_init(void) | 1340 | static int __init dm_crypt_init(void) |
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 559dbb52bc85..4e5b843cd4d7 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c | |||
@@ -197,6 +197,7 @@ out: | |||
197 | mutex_init(&dc->timer_lock); | 197 | mutex_init(&dc->timer_lock); |
198 | atomic_set(&dc->may_delay, 1); | 198 | atomic_set(&dc->may_delay, 1); |
199 | 199 | ||
200 | ti->num_flush_requests = 1; | ||
200 | ti->private = dc; | 201 | ti->private = dc; |
201 | return 0; | 202 | return 0; |
202 | 203 | ||
@@ -278,8 +279,9 @@ static int delay_map(struct dm_target *ti, struct bio *bio, | |||
278 | 279 | ||
279 | if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { | 280 | if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { |
280 | bio->bi_bdev = dc->dev_write->bdev; | 281 | bio->bi_bdev = dc->dev_write->bdev; |
281 | bio->bi_sector = dc->start_write + | 282 | if (bio_sectors(bio)) |
282 | (bio->bi_sector - ti->begin); | 283 | bio->bi_sector = dc->start_write + |
284 | (bio->bi_sector - ti->begin); | ||
283 | 285 | ||
284 | return delay_bio(dc, dc->write_delay, bio); | 286 | return delay_bio(dc, dc->write_delay, bio); |
285 | } | 287 | } |
@@ -316,9 +318,26 @@ static int delay_status(struct dm_target *ti, status_type_t type, | |||
316 | return 0; | 318 | return 0; |
317 | } | 319 | } |
318 | 320 | ||
321 | static int delay_iterate_devices(struct dm_target *ti, | ||
322 | iterate_devices_callout_fn fn, void *data) | ||
323 | { | ||
324 | struct delay_c *dc = ti->private; | ||
325 | int ret = 0; | ||
326 | |||
327 | ret = fn(ti, dc->dev_read, dc->start_read, data); | ||
328 | if (ret) | ||
329 | goto out; | ||
330 | |||
331 | if (dc->dev_write) | ||
332 | ret = fn(ti, dc->dev_write, dc->start_write, data); | ||
333 | |||
334 | out: | ||
335 | return ret; | ||
336 | } | ||
337 | |||
319 | static struct target_type delay_target = { | 338 | static struct target_type delay_target = { |
320 | .name = "delay", | 339 | .name = "delay", |
321 | .version = {1, 0, 2}, | 340 | .version = {1, 1, 0}, |
322 | .module = THIS_MODULE, | 341 | .module = THIS_MODULE, |
323 | .ctr = delay_ctr, | 342 | .ctr = delay_ctr, |
324 | .dtr = delay_dtr, | 343 | .dtr = delay_dtr, |
@@ -326,6 +345,7 @@ static struct target_type delay_target = { | |||
326 | .presuspend = delay_presuspend, | 345 | .presuspend = delay_presuspend, |
327 | .resume = delay_resume, | 346 | .resume = delay_resume, |
328 | .status = delay_status, | 347 | .status = delay_status, |
348 | .iterate_devices = delay_iterate_devices, | ||
329 | }; | 349 | }; |
330 | 350 | ||
331 | static int __init dm_delay_init(void) | 351 | static int __init dm_delay_init(void) |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 75d8081a9041..c3ae51584b12 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -216,7 +216,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | |||
216 | return -EINVAL; | 216 | return -EINVAL; |
217 | } | 217 | } |
218 | 218 | ||
219 | type = get_type(argv[1]); | 219 | type = get_type(&persistent); |
220 | if (!type) { | 220 | if (!type) { |
221 | ti->error = "Exception store type not recognised"; | 221 | ti->error = "Exception store type not recognised"; |
222 | r = -EINVAL; | 222 | r = -EINVAL; |
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index c92701dc5001..2442c8c07898 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h | |||
@@ -156,7 +156,7 @@ static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e) | |||
156 | */ | 156 | */ |
157 | static inline sector_t get_dev_size(struct block_device *bdev) | 157 | static inline sector_t get_dev_size(struct block_device *bdev) |
158 | { | 158 | { |
159 | return bdev->bd_inode->i_size >> SECTOR_SHIFT; | 159 | return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; |
160 | } | 160 | } |
161 | 161 | ||
162 | static inline chunk_t sector_to_chunk(struct dm_exception_store *store, | 162 | static inline chunk_t sector_to_chunk(struct dm_exception_store *store, |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index e73aabd61cd7..3a2e6a2f8bdd 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -22,6 +22,7 @@ struct dm_io_client { | |||
22 | /* FIXME: can we shrink this ? */ | 22 | /* FIXME: can we shrink this ? */ |
23 | struct io { | 23 | struct io { |
24 | unsigned long error_bits; | 24 | unsigned long error_bits; |
25 | unsigned long eopnotsupp_bits; | ||
25 | atomic_t count; | 26 | atomic_t count; |
26 | struct task_struct *sleeper; | 27 | struct task_struct *sleeper; |
27 | struct dm_io_client *client; | 28 | struct dm_io_client *client; |
@@ -107,8 +108,11 @@ static inline unsigned bio_get_region(struct bio *bio) | |||
107 | *---------------------------------------------------------------*/ | 108 | *---------------------------------------------------------------*/ |
108 | static void dec_count(struct io *io, unsigned int region, int error) | 109 | static void dec_count(struct io *io, unsigned int region, int error) |
109 | { | 110 | { |
110 | if (error) | 111 | if (error) { |
111 | set_bit(region, &io->error_bits); | 112 | set_bit(region, &io->error_bits); |
113 | if (error == -EOPNOTSUPP) | ||
114 | set_bit(region, &io->eopnotsupp_bits); | ||
115 | } | ||
112 | 116 | ||
113 | if (atomic_dec_and_test(&io->count)) { | 117 | if (atomic_dec_and_test(&io->count)) { |
114 | if (io->sleeper) | 118 | if (io->sleeper) |
@@ -360,7 +364,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, | |||
360 | return -EIO; | 364 | return -EIO; |
361 | } | 365 | } |
362 | 366 | ||
367 | retry: | ||
363 | io.error_bits = 0; | 368 | io.error_bits = 0; |
369 | io.eopnotsupp_bits = 0; | ||
364 | atomic_set(&io.count, 1); /* see dispatch_io() */ | 370 | atomic_set(&io.count, 1); /* see dispatch_io() */ |
365 | io.sleeper = current; | 371 | io.sleeper = current; |
366 | io.client = client; | 372 | io.client = client; |
@@ -377,6 +383,11 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, | |||
377 | } | 383 | } |
378 | set_current_state(TASK_RUNNING); | 384 | set_current_state(TASK_RUNNING); |
379 | 385 | ||
386 | if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { | ||
387 | rw &= ~(1 << BIO_RW_BARRIER); | ||
388 | goto retry; | ||
389 | } | ||
390 | |||
380 | if (error_bits) | 391 | if (error_bits) |
381 | *error_bits = io.error_bits; | 392 | *error_bits = io.error_bits; |
382 | 393 | ||
@@ -397,6 +408,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, | |||
397 | 408 | ||
398 | io = mempool_alloc(client->pool, GFP_NOIO); | 409 | io = mempool_alloc(client->pool, GFP_NOIO); |
399 | io->error_bits = 0; | 410 | io->error_bits = 0; |
411 | io->eopnotsupp_bits = 0; | ||
400 | atomic_set(&io->count, 1); /* see dispatch_io() */ | 412 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
401 | io->sleeper = NULL; | 413 | io->sleeper = NULL; |
402 | io->client = client; | 414 | io->client = client; |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 1128d3fba797..7f77f18fcafa 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -276,7 +276,7 @@ retry: | |||
276 | up_write(&_hash_lock); | 276 | up_write(&_hash_lock); |
277 | } | 277 | } |
278 | 278 | ||
279 | static int dm_hash_rename(const char *old, const char *new) | 279 | static int dm_hash_rename(uint32_t cookie, const char *old, const char *new) |
280 | { | 280 | { |
281 | char *new_name, *old_name; | 281 | char *new_name, *old_name; |
282 | struct hash_cell *hc; | 282 | struct hash_cell *hc; |
@@ -333,7 +333,7 @@ static int dm_hash_rename(const char *old, const char *new) | |||
333 | dm_table_put(table); | 333 | dm_table_put(table); |
334 | } | 334 | } |
335 | 335 | ||
336 | dm_kobject_uevent(hc->md); | 336 | dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie); |
337 | 337 | ||
338 | dm_put(hc->md); | 338 | dm_put(hc->md); |
339 | up_write(&_hash_lock); | 339 | up_write(&_hash_lock); |
@@ -680,6 +680,9 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size) | |||
680 | 680 | ||
681 | __hash_remove(hc); | 681 | __hash_remove(hc); |
682 | up_write(&_hash_lock); | 682 | up_write(&_hash_lock); |
683 | |||
684 | dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr); | ||
685 | |||
683 | dm_put(md); | 686 | dm_put(md); |
684 | param->data_size = 0; | 687 | param->data_size = 0; |
685 | return 0; | 688 | return 0; |
@@ -715,7 +718,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size) | |||
715 | return r; | 718 | return r; |
716 | 719 | ||
717 | param->data_size = 0; | 720 | param->data_size = 0; |
718 | return dm_hash_rename(param->name, new_name); | 721 | return dm_hash_rename(param->event_nr, param->name, new_name); |
719 | } | 722 | } |
720 | 723 | ||
721 | static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) | 724 | static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) |
@@ -842,8 +845,11 @@ static int do_resume(struct dm_ioctl *param) | |||
842 | if (dm_suspended(md)) | 845 | if (dm_suspended(md)) |
843 | r = dm_resume(md); | 846 | r = dm_resume(md); |
844 | 847 | ||
845 | if (!r) | 848 | |
849 | if (!r) { | ||
850 | dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr); | ||
846 | r = __dev_status(md, param); | 851 | r = __dev_status(md, param); |
852 | } | ||
847 | 853 | ||
848 | dm_put(md); | 854 | dm_put(md); |
849 | return r; | 855 | return r; |
@@ -1044,6 +1050,12 @@ static int populate_table(struct dm_table *table, | |||
1044 | next = spec->next; | 1050 | next = spec->next; |
1045 | } | 1051 | } |
1046 | 1052 | ||
1053 | r = dm_table_set_type(table); | ||
1054 | if (r) { | ||
1055 | DMWARN("unable to set table type"); | ||
1056 | return r; | ||
1057 | } | ||
1058 | |||
1047 | return dm_table_complete(table); | 1059 | return dm_table_complete(table); |
1048 | } | 1060 | } |
1049 | 1061 | ||
@@ -1089,6 +1101,13 @@ static int table_load(struct dm_ioctl *param, size_t param_size) | |||
1089 | goto out; | 1101 | goto out; |
1090 | } | 1102 | } |
1091 | 1103 | ||
1104 | r = dm_table_alloc_md_mempools(t); | ||
1105 | if (r) { | ||
1106 | DMWARN("unable to allocate mempools for this table"); | ||
1107 | dm_table_destroy(t); | ||
1108 | goto out; | ||
1109 | } | ||
1110 | |||
1092 | down_write(&_hash_lock); | 1111 | down_write(&_hash_lock); |
1093 | hc = dm_get_mdptr(md); | 1112 | hc = dm_get_mdptr(md); |
1094 | if (!hc || hc->md != md) { | 1113 | if (!hc || hc->md != md) { |
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 79fb53e51c70..9184b6deb868 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c | |||
@@ -53,6 +53,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
53 | goto bad; | 53 | goto bad; |
54 | } | 54 | } |
55 | 55 | ||
56 | ti->num_flush_requests = 1; | ||
56 | ti->private = lc; | 57 | ti->private = lc; |
57 | return 0; | 58 | return 0; |
58 | 59 | ||
@@ -81,7 +82,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio) | |||
81 | struct linear_c *lc = ti->private; | 82 | struct linear_c *lc = ti->private; |
82 | 83 | ||
83 | bio->bi_bdev = lc->dev->bdev; | 84 | bio->bi_bdev = lc->dev->bdev; |
84 | bio->bi_sector = linear_map_sector(ti, bio->bi_sector); | 85 | if (bio_sectors(bio)) |
86 | bio->bi_sector = linear_map_sector(ti, bio->bi_sector); | ||
85 | } | 87 | } |
86 | 88 | ||
87 | static int linear_map(struct dm_target *ti, struct bio *bio, | 89 | static int linear_map(struct dm_target *ti, struct bio *bio, |
@@ -132,9 +134,17 @@ static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | |||
132 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | 134 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); |
133 | } | 135 | } |
134 | 136 | ||
137 | static int linear_iterate_devices(struct dm_target *ti, | ||
138 | iterate_devices_callout_fn fn, void *data) | ||
139 | { | ||
140 | struct linear_c *lc = ti->private; | ||
141 | |||
142 | return fn(ti, lc->dev, lc->start, data); | ||
143 | } | ||
144 | |||
135 | static struct target_type linear_target = { | 145 | static struct target_type linear_target = { |
136 | .name = "linear", | 146 | .name = "linear", |
137 | .version= {1, 0, 3}, | 147 | .version = {1, 1, 0}, |
138 | .module = THIS_MODULE, | 148 | .module = THIS_MODULE, |
139 | .ctr = linear_ctr, | 149 | .ctr = linear_ctr, |
140 | .dtr = linear_dtr, | 150 | .dtr = linear_dtr, |
@@ -142,6 +152,7 @@ static struct target_type linear_target = { | |||
142 | .status = linear_status, | 152 | .status = linear_status, |
143 | .ioctl = linear_ioctl, | 153 | .ioctl = linear_ioctl, |
144 | .merge = linear_merge, | 154 | .merge = linear_merge, |
155 | .iterate_devices = linear_iterate_devices, | ||
145 | }; | 156 | }; |
146 | 157 | ||
147 | int __init dm_linear_init(void) | 158 | int __init dm_linear_init(void) |
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c new file mode 100644 index 000000000000..e69b96560997 --- /dev/null +++ b/drivers/md/dm-log-userspace-base.c | |||
@@ -0,0 +1,696 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2009 Red Hat, Inc. | ||
3 | * | ||
4 | * This file is released under the LGPL. | ||
5 | */ | ||
6 | |||
7 | #include <linux/bio.h> | ||
8 | #include <linux/dm-dirty-log.h> | ||
9 | #include <linux/device-mapper.h> | ||
10 | #include <linux/dm-log-userspace.h> | ||
11 | |||
12 | #include "dm-log-userspace-transfer.h" | ||
13 | |||
14 | struct flush_entry { | ||
15 | int type; | ||
16 | region_t region; | ||
17 | struct list_head list; | ||
18 | }; | ||
19 | |||
20 | struct log_c { | ||
21 | struct dm_target *ti; | ||
22 | uint32_t region_size; | ||
23 | region_t region_count; | ||
24 | char uuid[DM_UUID_LEN]; | ||
25 | |||
26 | char *usr_argv_str; | ||
27 | uint32_t usr_argc; | ||
28 | |||
29 | /* | ||
30 | * in_sync_hint gets set when doing is_remote_recovering. It | ||
31 | * represents the first region that needs recovery. IOW, the | ||
32 | * first zero bit of sync_bits. This can be useful for to limit | ||
33 | * traffic for calls like is_remote_recovering and get_resync_work, | ||
34 | * but be take care in its use for anything else. | ||
35 | */ | ||
36 | uint64_t in_sync_hint; | ||
37 | |||
38 | spinlock_t flush_lock; | ||
39 | struct list_head flush_list; /* only for clear and mark requests */ | ||
40 | }; | ||
41 | |||
42 | static mempool_t *flush_entry_pool; | ||
43 | |||
44 | static void *flush_entry_alloc(gfp_t gfp_mask, void *pool_data) | ||
45 | { | ||
46 | return kmalloc(sizeof(struct flush_entry), gfp_mask); | ||
47 | } | ||
48 | |||
49 | static void flush_entry_free(void *element, void *pool_data) | ||
50 | { | ||
51 | kfree(element); | ||
52 | } | ||
53 | |||
54 | static int userspace_do_request(struct log_c *lc, const char *uuid, | ||
55 | int request_type, char *data, size_t data_size, | ||
56 | char *rdata, size_t *rdata_size) | ||
57 | { | ||
58 | int r; | ||
59 | |||
60 | /* | ||
61 | * If the server isn't there, -ESRCH is returned, | ||
62 | * and we must keep trying until the server is | ||
63 | * restored. | ||
64 | */ | ||
65 | retry: | ||
66 | r = dm_consult_userspace(uuid, request_type, data, | ||
67 | data_size, rdata, rdata_size); | ||
68 | |||
69 | if (r != -ESRCH) | ||
70 | return r; | ||
71 | |||
72 | DMERR(" Userspace log server not found."); | ||
73 | while (1) { | ||
74 | set_current_state(TASK_INTERRUPTIBLE); | ||
75 | schedule_timeout(2*HZ); | ||
76 | DMWARN("Attempting to contact userspace log server..."); | ||
77 | r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str, | ||
78 | strlen(lc->usr_argv_str) + 1, | ||
79 | NULL, NULL); | ||
80 | if (!r) | ||
81 | break; | ||
82 | } | ||
83 | DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); | ||
84 | r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL, | ||
85 | 0, NULL, NULL); | ||
86 | if (!r) | ||
87 | goto retry; | ||
88 | |||
89 | DMERR("Error trying to resume userspace log: %d", r); | ||
90 | |||
91 | return -ESRCH; | ||
92 | } | ||
93 | |||
94 | static int build_constructor_string(struct dm_target *ti, | ||
95 | unsigned argc, char **argv, | ||
96 | char **ctr_str) | ||
97 | { | ||
98 | int i, str_size; | ||
99 | char *str = NULL; | ||
100 | |||
101 | *ctr_str = NULL; | ||
102 | |||
103 | for (i = 0, str_size = 0; i < argc; i++) | ||
104 | str_size += strlen(argv[i]) + 1; /* +1 for space between args */ | ||
105 | |||
106 | str_size += 20; /* Max number of chars in a printed u64 number */ | ||
107 | |||
108 | str = kzalloc(str_size, GFP_KERNEL); | ||
109 | if (!str) { | ||
110 | DMWARN("Unable to allocate memory for constructor string"); | ||
111 | return -ENOMEM; | ||
112 | } | ||
113 | |||
114 | for (i = 0, str_size = 0; i < argc; i++) | ||
115 | str_size += sprintf(str + str_size, "%s ", argv[i]); | ||
116 | str_size += sprintf(str + str_size, "%llu", | ||
117 | (unsigned long long)ti->len); | ||
118 | |||
119 | *ctr_str = str; | ||
120 | return str_size; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * userspace_ctr | ||
125 | * | ||
126 | * argv contains: | ||
127 | * <UUID> <other args> | ||
128 | * Where 'other args' is the userspace implementation specific log | ||
129 | * arguments. An example might be: | ||
130 | * <UUID> clustered_disk <arg count> <log dev> <region_size> [[no]sync] | ||
131 | * | ||
132 | * So, this module will strip off the <UUID> for identification purposes | ||
133 | * when communicating with userspace about a log; but will pass on everything | ||
134 | * else. | ||
135 | */ | ||
136 | static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, | ||
137 | unsigned argc, char **argv) | ||
138 | { | ||
139 | int r = 0; | ||
140 | int str_size; | ||
141 | char *ctr_str = NULL; | ||
142 | struct log_c *lc = NULL; | ||
143 | uint64_t rdata; | ||
144 | size_t rdata_size = sizeof(rdata); | ||
145 | |||
146 | if (argc < 3) { | ||
147 | DMWARN("Too few arguments to userspace dirty log"); | ||
148 | return -EINVAL; | ||
149 | } | ||
150 | |||
151 | lc = kmalloc(sizeof(*lc), GFP_KERNEL); | ||
152 | if (!lc) { | ||
153 | DMWARN("Unable to allocate userspace log context."); | ||
154 | return -ENOMEM; | ||
155 | } | ||
156 | |||
157 | lc->ti = ti; | ||
158 | |||
159 | if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { | ||
160 | DMWARN("UUID argument too long."); | ||
161 | kfree(lc); | ||
162 | return -EINVAL; | ||
163 | } | ||
164 | |||
165 | strncpy(lc->uuid, argv[0], DM_UUID_LEN); | ||
166 | spin_lock_init(&lc->flush_lock); | ||
167 | INIT_LIST_HEAD(&lc->flush_list); | ||
168 | |||
169 | str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str); | ||
170 | if (str_size < 0) { | ||
171 | kfree(lc); | ||
172 | return str_size; | ||
173 | } | ||
174 | |||
175 | /* Send table string */ | ||
176 | r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR, | ||
177 | ctr_str, str_size, NULL, NULL); | ||
178 | |||
179 | if (r == -ESRCH) { | ||
180 | DMERR("Userspace log server not found"); | ||
181 | goto out; | ||
182 | } | ||
183 | |||
184 | /* Since the region size does not change, get it now */ | ||
185 | rdata_size = sizeof(rdata); | ||
186 | r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE, | ||
187 | NULL, 0, (char *)&rdata, &rdata_size); | ||
188 | |||
189 | if (r) { | ||
190 | DMERR("Failed to get region size of dirty log"); | ||
191 | goto out; | ||
192 | } | ||
193 | |||
194 | lc->region_size = (uint32_t)rdata; | ||
195 | lc->region_count = dm_sector_div_up(ti->len, lc->region_size); | ||
196 | |||
197 | out: | ||
198 | if (r) { | ||
199 | kfree(lc); | ||
200 | kfree(ctr_str); | ||
201 | } else { | ||
202 | lc->usr_argv_str = ctr_str; | ||
203 | lc->usr_argc = argc; | ||
204 | log->context = lc; | ||
205 | } | ||
206 | |||
207 | return r; | ||
208 | } | ||
209 | |||
210 | static void userspace_dtr(struct dm_dirty_log *log) | ||
211 | { | ||
212 | int r; | ||
213 | struct log_c *lc = log->context; | ||
214 | |||
215 | r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR, | ||
216 | NULL, 0, | ||
217 | NULL, NULL); | ||
218 | |||
219 | kfree(lc->usr_argv_str); | ||
220 | kfree(lc); | ||
221 | |||
222 | return; | ||
223 | } | ||
224 | |||
225 | static int userspace_presuspend(struct dm_dirty_log *log) | ||
226 | { | ||
227 | int r; | ||
228 | struct log_c *lc = log->context; | ||
229 | |||
230 | r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND, | ||
231 | NULL, 0, | ||
232 | NULL, NULL); | ||
233 | |||
234 | return r; | ||
235 | } | ||
236 | |||
237 | static int userspace_postsuspend(struct dm_dirty_log *log) | ||
238 | { | ||
239 | int r; | ||
240 | struct log_c *lc = log->context; | ||
241 | |||
242 | r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND, | ||
243 | NULL, 0, | ||
244 | NULL, NULL); | ||
245 | |||
246 | return r; | ||
247 | } | ||
248 | |||
249 | static int userspace_resume(struct dm_dirty_log *log) | ||
250 | { | ||
251 | int r; | ||
252 | struct log_c *lc = log->context; | ||
253 | |||
254 | lc->in_sync_hint = 0; | ||
255 | r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME, | ||
256 | NULL, 0, | ||
257 | NULL, NULL); | ||
258 | |||
259 | return r; | ||
260 | } | ||
261 | |||
262 | static uint32_t userspace_get_region_size(struct dm_dirty_log *log) | ||
263 | { | ||
264 | struct log_c *lc = log->context; | ||
265 | |||
266 | return lc->region_size; | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * userspace_is_clean | ||
271 | * | ||
272 | * Check whether a region is clean. If there is any sort of | ||
273 | * failure when consulting the server, we return not clean. | ||
274 | * | ||
275 | * Returns: 1 if clean, 0 otherwise | ||
276 | */ | ||
277 | static int userspace_is_clean(struct dm_dirty_log *log, region_t region) | ||
278 | { | ||
279 | int r; | ||
280 | uint64_t region64 = (uint64_t)region; | ||
281 | int64_t is_clean; | ||
282 | size_t rdata_size; | ||
283 | struct log_c *lc = log->context; | ||
284 | |||
285 | rdata_size = sizeof(is_clean); | ||
286 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN, | ||
287 | (char *)®ion64, sizeof(region64), | ||
288 | (char *)&is_clean, &rdata_size); | ||
289 | |||
290 | return (r) ? 0 : (int)is_clean; | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * userspace_in_sync | ||
295 | * | ||
296 | * Check if the region is in-sync. If there is any sort | ||
297 | * of failure when consulting the server, we assume that | ||
298 | * the region is not in sync. | ||
299 | * | ||
300 | * If 'can_block' is set, return immediately | ||
301 | * | ||
302 | * Returns: 1 if in-sync, 0 if not-in-sync, -EWOULDBLOCK | ||
303 | */ | ||
304 | static int userspace_in_sync(struct dm_dirty_log *log, region_t region, | ||
305 | int can_block) | ||
306 | { | ||
307 | int r; | ||
308 | uint64_t region64 = region; | ||
309 | int64_t in_sync; | ||
310 | size_t rdata_size; | ||
311 | struct log_c *lc = log->context; | ||
312 | |||
313 | /* | ||
314 | * We can never respond directly - even if in_sync_hint is | ||
315 | * set. This is because another machine could see a device | ||
316 | * failure and mark the region out-of-sync. If we don't go | ||
317 | * to userspace to ask, we might think the region is in-sync | ||
318 | * and allow a read to pick up data that is stale. (This is | ||
319 | * very unlikely if a device actually fails; but it is very | ||
320 | * likely if a connection to one device from one machine fails.) | ||
321 | * | ||
322 | * There still might be a problem if the mirror caches the region | ||
323 | * state as in-sync... but then this call would not be made. So, | ||
324 | * that is a mirror problem. | ||
325 | */ | ||
326 | if (!can_block) | ||
327 | return -EWOULDBLOCK; | ||
328 | |||
329 | rdata_size = sizeof(in_sync); | ||
330 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC, | ||
331 | (char *)®ion64, sizeof(region64), | ||
332 | (char *)&in_sync, &rdata_size); | ||
333 | return (r) ? 0 : (int)in_sync; | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * userspace_flush | ||
338 | * | ||
339 | * This function is ok to block. | ||
340 | * The flush happens in two stages. First, it sends all | ||
341 | * clear/mark requests that are on the list. Then it | ||
342 | * tells the server to commit them. This gives the | ||
343 | * server a chance to optimise the commit, instead of | ||
344 | * doing it for every request. | ||
345 | * | ||
346 | * Additionally, we could implement another thread that | ||
347 | * sends the requests up to the server - reducing the | ||
348 | * load on flush. Then the flush would have less in | ||
349 | * the list and be responsible for the finishing commit. | ||
350 | * | ||
351 | * Returns: 0 on success, < 0 on failure | ||
352 | */ | ||
353 | static int userspace_flush(struct dm_dirty_log *log) | ||
354 | { | ||
355 | int r = 0; | ||
356 | unsigned long flags; | ||
357 | struct log_c *lc = log->context; | ||
358 | LIST_HEAD(flush_list); | ||
359 | struct flush_entry *fe, *tmp_fe; | ||
360 | |||
361 | spin_lock_irqsave(&lc->flush_lock, flags); | ||
362 | list_splice_init(&lc->flush_list, &flush_list); | ||
363 | spin_unlock_irqrestore(&lc->flush_lock, flags); | ||
364 | |||
365 | if (list_empty(&flush_list)) | ||
366 | return 0; | ||
367 | |||
368 | /* | ||
369 | * FIXME: Count up requests, group request types, | ||
370 | * allocate memory to stick all requests in and | ||
371 | * send to server in one go. Failing the allocation, | ||
372 | * do it one by one. | ||
373 | */ | ||
374 | |||
375 | list_for_each_entry(fe, &flush_list, list) { | ||
376 | r = userspace_do_request(lc, lc->uuid, fe->type, | ||
377 | (char *)&fe->region, | ||
378 | sizeof(fe->region), | ||
379 | NULL, NULL); | ||
380 | if (r) | ||
381 | goto fail; | ||
382 | } | ||
383 | |||
384 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, | ||
385 | NULL, 0, NULL, NULL); | ||
386 | |||
387 | fail: | ||
388 | /* | ||
389 | * We can safely remove these entries, even if failure. | ||
390 | * Calling code will receive an error and will know that | ||
391 | * the log facility has failed. | ||
392 | */ | ||
393 | list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) { | ||
394 | list_del(&fe->list); | ||
395 | mempool_free(fe, flush_entry_pool); | ||
396 | } | ||
397 | |||
398 | if (r) | ||
399 | dm_table_event(lc->ti->table); | ||
400 | |||
401 | return r; | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * userspace_mark_region | ||
406 | * | ||
407 | * This function should avoid blocking unless absolutely required. | ||
408 | * (Memory allocation is valid for blocking.) | ||
409 | */ | ||
410 | static void userspace_mark_region(struct dm_dirty_log *log, region_t region) | ||
411 | { | ||
412 | unsigned long flags; | ||
413 | struct log_c *lc = log->context; | ||
414 | struct flush_entry *fe; | ||
415 | |||
416 | /* Wait for an allocation, but _never_ fail */ | ||
417 | fe = mempool_alloc(flush_entry_pool, GFP_NOIO); | ||
418 | BUG_ON(!fe); | ||
419 | |||
420 | spin_lock_irqsave(&lc->flush_lock, flags); | ||
421 | fe->type = DM_ULOG_MARK_REGION; | ||
422 | fe->region = region; | ||
423 | list_add(&fe->list, &lc->flush_list); | ||
424 | spin_unlock_irqrestore(&lc->flush_lock, flags); | ||
425 | |||
426 | return; | ||
427 | } | ||
428 | |||
429 | /* | ||
430 | * userspace_clear_region | ||
431 | * | ||
432 | * This function must not block. | ||
433 | * So, the alloc can't block. In the worst case, it is ok to | ||
434 | * fail. It would simply mean we can't clear the region. | ||
435 | * Does nothing to current sync context, but does mean | ||
436 | * the region will be re-sync'ed on a reload of the mirror | ||
437 | * even though it is in-sync. | ||
438 | */ | ||
439 | static void userspace_clear_region(struct dm_dirty_log *log, region_t region) | ||
440 | { | ||
441 | unsigned long flags; | ||
442 | struct log_c *lc = log->context; | ||
443 | struct flush_entry *fe; | ||
444 | |||
445 | /* | ||
446 | * If we fail to allocate, we skip the clearing of | ||
447 | * the region. This doesn't hurt us in any way, except | ||
448 | * to cause the region to be resync'ed when the | ||
449 | * device is activated next time. | ||
450 | */ | ||
451 | fe = mempool_alloc(flush_entry_pool, GFP_ATOMIC); | ||
452 | if (!fe) { | ||
453 | DMERR("Failed to allocate memory to clear region."); | ||
454 | return; | ||
455 | } | ||
456 | |||
457 | spin_lock_irqsave(&lc->flush_lock, flags); | ||
458 | fe->type = DM_ULOG_CLEAR_REGION; | ||
459 | fe->region = region; | ||
460 | list_add(&fe->list, &lc->flush_list); | ||
461 | spin_unlock_irqrestore(&lc->flush_lock, flags); | ||
462 | |||
463 | return; | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * userspace_get_resync_work | ||
468 | * | ||
469 | * Get a region that needs recovery. It is valid to return | ||
470 | * an error for this function. | ||
471 | * | ||
472 | * Returns: 1 if region filled, 0 if no work, <0 on error | ||
473 | */ | ||
474 | static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region) | ||
475 | { | ||
476 | int r; | ||
477 | size_t rdata_size; | ||
478 | struct log_c *lc = log->context; | ||
479 | struct { | ||
480 | int64_t i; /* 64-bit for mix arch compatibility */ | ||
481 | region_t r; | ||
482 | } pkg; | ||
483 | |||
484 | if (lc->in_sync_hint >= lc->region_count) | ||
485 | return 0; | ||
486 | |||
487 | rdata_size = sizeof(pkg); | ||
488 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK, | ||
489 | NULL, 0, | ||
490 | (char *)&pkg, &rdata_size); | ||
491 | |||
492 | *region = pkg.r; | ||
493 | return (r) ? r : (int)pkg.i; | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * userspace_set_region_sync | ||
498 | * | ||
499 | * Set the sync status of a given region. This function | ||
500 | * must not fail. | ||
501 | */ | ||
502 | static void userspace_set_region_sync(struct dm_dirty_log *log, | ||
503 | region_t region, int in_sync) | ||
504 | { | ||
505 | int r; | ||
506 | struct log_c *lc = log->context; | ||
507 | struct { | ||
508 | region_t r; | ||
509 | int64_t i; | ||
510 | } pkg; | ||
511 | |||
512 | pkg.r = region; | ||
513 | pkg.i = (int64_t)in_sync; | ||
514 | |||
515 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC, | ||
516 | (char *)&pkg, sizeof(pkg), | ||
517 | NULL, NULL); | ||
518 | |||
519 | /* | ||
520 | * It would be nice to be able to report failures. | ||
521 | * However, it is easy emough to detect and resolve. | ||
522 | */ | ||
523 | return; | ||
524 | } | ||
525 | |||
526 | /* | ||
527 | * userspace_get_sync_count | ||
528 | * | ||
529 | * If there is any sort of failure when consulting the server, | ||
530 | * we assume that the sync count is zero. | ||
531 | * | ||
532 | * Returns: sync count on success, 0 on failure | ||
533 | */ | ||
534 | static region_t userspace_get_sync_count(struct dm_dirty_log *log) | ||
535 | { | ||
536 | int r; | ||
537 | size_t rdata_size; | ||
538 | uint64_t sync_count; | ||
539 | struct log_c *lc = log->context; | ||
540 | |||
541 | rdata_size = sizeof(sync_count); | ||
542 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT, | ||
543 | NULL, 0, | ||
544 | (char *)&sync_count, &rdata_size); | ||
545 | |||
546 | if (r) | ||
547 | return 0; | ||
548 | |||
549 | if (sync_count >= lc->region_count) | ||
550 | lc->in_sync_hint = lc->region_count; | ||
551 | |||
552 | return (region_t)sync_count; | ||
553 | } | ||
554 | |||
555 | /* | ||
556 | * userspace_status | ||
557 | * | ||
558 | * Returns: amount of space consumed | ||
559 | */ | ||
560 | static int userspace_status(struct dm_dirty_log *log, status_type_t status_type, | ||
561 | char *result, unsigned maxlen) | ||
562 | { | ||
563 | int r = 0; | ||
564 | size_t sz = (size_t)maxlen; | ||
565 | struct log_c *lc = log->context; | ||
566 | |||
567 | switch (status_type) { | ||
568 | case STATUSTYPE_INFO: | ||
569 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO, | ||
570 | NULL, 0, | ||
571 | result, &sz); | ||
572 | |||
573 | if (r) { | ||
574 | sz = 0; | ||
575 | DMEMIT("%s 1 COM_FAILURE", log->type->name); | ||
576 | } | ||
577 | break; | ||
578 | case STATUSTYPE_TABLE: | ||
579 | sz = 0; | ||
580 | DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1, | ||
581 | lc->uuid, lc->usr_argv_str); | ||
582 | break; | ||
583 | } | ||
584 | return (r) ? 0 : (int)sz; | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * userspace_is_remote_recovering | ||
589 | * | ||
590 | * Returns: 1 if region recovering, 0 otherwise | ||
591 | */ | ||
592 | static int userspace_is_remote_recovering(struct dm_dirty_log *log, | ||
593 | region_t region) | ||
594 | { | ||
595 | int r; | ||
596 | uint64_t region64 = region; | ||
597 | struct log_c *lc = log->context; | ||
598 | static unsigned long long limit; | ||
599 | struct { | ||
600 | int64_t is_recovering; | ||
601 | uint64_t in_sync_hint; | ||
602 | } pkg; | ||
603 | size_t rdata_size = sizeof(pkg); | ||
604 | |||
605 | /* | ||
606 | * Once the mirror has been reported to be in-sync, | ||
607 | * it will never again ask for recovery work. So, | ||
608 | * we can safely say there is not a remote machine | ||
609 | * recovering if the device is in-sync. (in_sync_hint | ||
610 | * must be reset at resume time.) | ||
611 | */ | ||
612 | if (region < lc->in_sync_hint) | ||
613 | return 0; | ||
614 | else if (jiffies < limit) | ||
615 | return 1; | ||
616 | |||
617 | limit = jiffies + (HZ / 4); | ||
618 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING, | ||
619 | (char *)®ion64, sizeof(region64), | ||
620 | (char *)&pkg, &rdata_size); | ||
621 | if (r) | ||
622 | return 1; | ||
623 | |||
624 | lc->in_sync_hint = pkg.in_sync_hint; | ||
625 | |||
626 | return (int)pkg.is_recovering; | ||
627 | } | ||
628 | |||
629 | static struct dm_dirty_log_type _userspace_type = { | ||
630 | .name = "userspace", | ||
631 | .module = THIS_MODULE, | ||
632 | .ctr = userspace_ctr, | ||
633 | .dtr = userspace_dtr, | ||
634 | .presuspend = userspace_presuspend, | ||
635 | .postsuspend = userspace_postsuspend, | ||
636 | .resume = userspace_resume, | ||
637 | .get_region_size = userspace_get_region_size, | ||
638 | .is_clean = userspace_is_clean, | ||
639 | .in_sync = userspace_in_sync, | ||
640 | .flush = userspace_flush, | ||
641 | .mark_region = userspace_mark_region, | ||
642 | .clear_region = userspace_clear_region, | ||
643 | .get_resync_work = userspace_get_resync_work, | ||
644 | .set_region_sync = userspace_set_region_sync, | ||
645 | .get_sync_count = userspace_get_sync_count, | ||
646 | .status = userspace_status, | ||
647 | .is_remote_recovering = userspace_is_remote_recovering, | ||
648 | }; | ||
649 | |||
650 | static int __init userspace_dirty_log_init(void) | ||
651 | { | ||
652 | int r = 0; | ||
653 | |||
654 | flush_entry_pool = mempool_create(100, flush_entry_alloc, | ||
655 | flush_entry_free, NULL); | ||
656 | |||
657 | if (!flush_entry_pool) { | ||
658 | DMWARN("Unable to create flush_entry_pool: No memory."); | ||
659 | return -ENOMEM; | ||
660 | } | ||
661 | |||
662 | r = dm_ulog_tfr_init(); | ||
663 | if (r) { | ||
664 | DMWARN("Unable to initialize userspace log communications"); | ||
665 | mempool_destroy(flush_entry_pool); | ||
666 | return r; | ||
667 | } | ||
668 | |||
669 | r = dm_dirty_log_type_register(&_userspace_type); | ||
670 | if (r) { | ||
671 | DMWARN("Couldn't register userspace dirty log type"); | ||
672 | dm_ulog_tfr_exit(); | ||
673 | mempool_destroy(flush_entry_pool); | ||
674 | return r; | ||
675 | } | ||
676 | |||
677 | DMINFO("version 1.0.0 loaded"); | ||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | static void __exit userspace_dirty_log_exit(void) | ||
682 | { | ||
683 | dm_dirty_log_type_unregister(&_userspace_type); | ||
684 | dm_ulog_tfr_exit(); | ||
685 | mempool_destroy(flush_entry_pool); | ||
686 | |||
687 | DMINFO("version 1.0.0 unloaded"); | ||
688 | return; | ||
689 | } | ||
690 | |||
691 | module_init(userspace_dirty_log_init); | ||
692 | module_exit(userspace_dirty_log_exit); | ||
693 | |||
694 | MODULE_DESCRIPTION(DM_NAME " userspace dirty log link"); | ||
695 | MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>"); | ||
696 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c new file mode 100644 index 000000000000..0ca1ee768a1f --- /dev/null +++ b/drivers/md/dm-log-userspace-transfer.c | |||
@@ -0,0 +1,276 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2009 Red Hat, Inc. | ||
3 | * | ||
4 | * This file is released under the LGPL. | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <net/sock.h> | ||
10 | #include <linux/workqueue.h> | ||
11 | #include <linux/connector.h> | ||
12 | #include <linux/device-mapper.h> | ||
13 | #include <linux/dm-log-userspace.h> | ||
14 | |||
15 | #include "dm-log-userspace-transfer.h" | ||
16 | |||
17 | static uint32_t dm_ulog_seq; | ||
18 | |||
19 | /* | ||
20 | * Netlink/Connector is an unreliable protocol. How long should | ||
21 | * we wait for a response before assuming it was lost and retrying? | ||
22 | * (If we do receive a response after this time, it will be discarded | ||
23 | * and the response to the resent request will be waited for. | ||
24 | */ | ||
25 | #define DM_ULOG_RETRY_TIMEOUT (15 * HZ) | ||
26 | |||
27 | /* | ||
28 | * Pre-allocated space for speed | ||
29 | */ | ||
30 | #define DM_ULOG_PREALLOCED_SIZE 512 | ||
31 | static struct cn_msg *prealloced_cn_msg; | ||
32 | static struct dm_ulog_request *prealloced_ulog_tfr; | ||
33 | |||
34 | static struct cb_id ulog_cn_id = { | ||
35 | .idx = CN_IDX_DM, | ||
36 | .val = CN_VAL_DM_USERSPACE_LOG | ||
37 | }; | ||
38 | |||
39 | static DEFINE_MUTEX(dm_ulog_lock); | ||
40 | |||
41 | struct receiving_pkg { | ||
42 | struct list_head list; | ||
43 | struct completion complete; | ||
44 | |||
45 | uint32_t seq; | ||
46 | |||
47 | int error; | ||
48 | size_t *data_size; | ||
49 | char *data; | ||
50 | }; | ||
51 | |||
52 | static DEFINE_SPINLOCK(receiving_list_lock); | ||
53 | static struct list_head receiving_list; | ||
54 | |||
55 | static int dm_ulog_sendto_server(struct dm_ulog_request *tfr) | ||
56 | { | ||
57 | int r; | ||
58 | struct cn_msg *msg = prealloced_cn_msg; | ||
59 | |||
60 | memset(msg, 0, sizeof(struct cn_msg)); | ||
61 | |||
62 | msg->id.idx = ulog_cn_id.idx; | ||
63 | msg->id.val = ulog_cn_id.val; | ||
64 | msg->ack = 0; | ||
65 | msg->seq = tfr->seq; | ||
66 | msg->len = sizeof(struct dm_ulog_request) + tfr->data_size; | ||
67 | |||
68 | r = cn_netlink_send(msg, 0, gfp_any()); | ||
69 | |||
70 | return r; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Parameters for this function can be either msg or tfr, but not | ||
75 | * both. This function fills in the reply for a waiting request. | ||
76 | * If just msg is given, then the reply is simply an ACK from userspace | ||
77 | * that the request was received. | ||
78 | * | ||
79 | * Returns: 0 on success, -ENOENT on failure | ||
80 | */ | ||
81 | static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr) | ||
82 | { | ||
83 | uint32_t rtn_seq = (msg) ? msg->seq : (tfr) ? tfr->seq : 0; | ||
84 | struct receiving_pkg *pkg; | ||
85 | |||
86 | /* | ||
87 | * The 'receiving_pkg' entries in this list are statically | ||
88 | * allocated on the stack in 'dm_consult_userspace'. | ||
89 | * Each process that is waiting for a reply from the user | ||
90 | * space server will have an entry in this list. | ||
91 | * | ||
92 | * We are safe to do it this way because the stack space | ||
93 | * is unique to each process, but still addressable by | ||
94 | * other processes. | ||
95 | */ | ||
96 | list_for_each_entry(pkg, &receiving_list, list) { | ||
97 | if (rtn_seq != pkg->seq) | ||
98 | continue; | ||
99 | |||
100 | if (msg) { | ||
101 | pkg->error = -msg->ack; | ||
102 | /* | ||
103 | * If we are trying again, we will need to know our | ||
104 | * storage capacity. Otherwise, along with the | ||
105 | * error code, we make explicit that we have no data. | ||
106 | */ | ||
107 | if (pkg->error != -EAGAIN) | ||
108 | *(pkg->data_size) = 0; | ||
109 | } else if (tfr->data_size > *(pkg->data_size)) { | ||
110 | DMERR("Insufficient space to receive package [%u] " | ||
111 | "(%u vs %lu)", tfr->request_type, | ||
112 | tfr->data_size, *(pkg->data_size)); | ||
113 | |||
114 | *(pkg->data_size) = 0; | ||
115 | pkg->error = -ENOSPC; | ||
116 | } else { | ||
117 | pkg->error = tfr->error; | ||
118 | memcpy(pkg->data, tfr->data, tfr->data_size); | ||
119 | *(pkg->data_size) = tfr->data_size; | ||
120 | } | ||
121 | complete(&pkg->complete); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | return -ENOENT; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * This is the connector callback that delivers data | ||
130 | * that was sent from userspace. | ||
131 | */ | ||
132 | static void cn_ulog_callback(void *data) | ||
133 | { | ||
134 | struct cn_msg *msg = (struct cn_msg *)data; | ||
135 | struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); | ||
136 | |||
137 | spin_lock(&receiving_list_lock); | ||
138 | if (msg->len == 0) | ||
139 | fill_pkg(msg, NULL); | ||
140 | else if (msg->len < sizeof(*tfr)) | ||
141 | DMERR("Incomplete message received (expected %u, got %u): [%u]", | ||
142 | (unsigned)sizeof(*tfr), msg->len, msg->seq); | ||
143 | else | ||
144 | fill_pkg(NULL, tfr); | ||
145 | spin_unlock(&receiving_list_lock); | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * dm_consult_userspace | ||
150 | * @uuid: log's uuid (must be DM_UUID_LEN in size) | ||
151 | * @request_type: found in include/linux/dm-log-userspace.h | ||
152 | * @data: data to tx to the server | ||
153 | * @data_size: size of data in bytes | ||
154 | * @rdata: place to put return data from server | ||
155 | * @rdata_size: value-result (amount of space given/amount of space used) | ||
156 | * | ||
157 | * rdata_size is undefined on failure. | ||
158 | * | ||
159 | * Memory used to communicate with userspace is zero'ed | ||
160 | * before populating to ensure that no unwanted bits leak | ||
161 | * from kernel space to user-space. All userspace log communications | ||
162 | * between kernel and user space go through this function. | ||
163 | * | ||
164 | * Returns: 0 on success, -EXXX on failure | ||
165 | **/ | ||
166 | int dm_consult_userspace(const char *uuid, int request_type, | ||
167 | char *data, size_t data_size, | ||
168 | char *rdata, size_t *rdata_size) | ||
169 | { | ||
170 | int r = 0; | ||
171 | size_t dummy = 0; | ||
172 | int overhead_size = | ||
173 | sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg); | ||
174 | struct dm_ulog_request *tfr = prealloced_ulog_tfr; | ||
175 | struct receiving_pkg pkg; | ||
176 | |||
177 | if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { | ||
178 | DMINFO("Size of tfr exceeds preallocated size"); | ||
179 | return -EINVAL; | ||
180 | } | ||
181 | |||
182 | if (!rdata_size) | ||
183 | rdata_size = &dummy; | ||
184 | resend: | ||
185 | /* | ||
186 | * We serialize the sending of requests so we can | ||
187 | * use the preallocated space. | ||
188 | */ | ||
189 | mutex_lock(&dm_ulog_lock); | ||
190 | |||
191 | memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); | ||
192 | memcpy(tfr->uuid, uuid, DM_UUID_LEN); | ||
193 | tfr->seq = dm_ulog_seq++; | ||
194 | |||
195 | /* | ||
196 | * Must be valid request type (all other bits set to | ||
197 | * zero). This reserves other bits for possible future | ||
198 | * use. | ||
199 | */ | ||
200 | tfr->request_type = request_type & DM_ULOG_REQUEST_MASK; | ||
201 | |||
202 | tfr->data_size = data_size; | ||
203 | if (data && data_size) | ||
204 | memcpy(tfr->data, data, data_size); | ||
205 | |||
206 | memset(&pkg, 0, sizeof(pkg)); | ||
207 | init_completion(&pkg.complete); | ||
208 | pkg.seq = tfr->seq; | ||
209 | pkg.data_size = rdata_size; | ||
210 | pkg.data = rdata; | ||
211 | spin_lock(&receiving_list_lock); | ||
212 | list_add(&(pkg.list), &receiving_list); | ||
213 | spin_unlock(&receiving_list_lock); | ||
214 | |||
215 | r = dm_ulog_sendto_server(tfr); | ||
216 | |||
217 | mutex_unlock(&dm_ulog_lock); | ||
218 | |||
219 | if (r) { | ||
220 | DMERR("Unable to send log request [%u] to userspace: %d", | ||
221 | request_type, r); | ||
222 | spin_lock(&receiving_list_lock); | ||
223 | list_del_init(&(pkg.list)); | ||
224 | spin_unlock(&receiving_list_lock); | ||
225 | |||
226 | goto out; | ||
227 | } | ||
228 | |||
229 | r = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT); | ||
230 | spin_lock(&receiving_list_lock); | ||
231 | list_del_init(&(pkg.list)); | ||
232 | spin_unlock(&receiving_list_lock); | ||
233 | if (!r) { | ||
234 | DMWARN("[%s] Request timed out: [%u/%u] - retrying", | ||
235 | (strlen(uuid) > 8) ? | ||
236 | (uuid + (strlen(uuid) - 8)) : (uuid), | ||
237 | request_type, pkg.seq); | ||
238 | goto resend; | ||
239 | } | ||
240 | |||
241 | r = pkg.error; | ||
242 | if (r == -EAGAIN) | ||
243 | goto resend; | ||
244 | |||
245 | out: | ||
246 | return r; | ||
247 | } | ||
248 | |||
249 | int dm_ulog_tfr_init(void) | ||
250 | { | ||
251 | int r; | ||
252 | void *prealloced; | ||
253 | |||
254 | INIT_LIST_HEAD(&receiving_list); | ||
255 | |||
256 | prealloced = kmalloc(DM_ULOG_PREALLOCED_SIZE, GFP_KERNEL); | ||
257 | if (!prealloced) | ||
258 | return -ENOMEM; | ||
259 | |||
260 | prealloced_cn_msg = prealloced; | ||
261 | prealloced_ulog_tfr = prealloced + sizeof(struct cn_msg); | ||
262 | |||
263 | r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback); | ||
264 | if (r) { | ||
265 | cn_del_callback(&ulog_cn_id); | ||
266 | return r; | ||
267 | } | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | void dm_ulog_tfr_exit(void) | ||
273 | { | ||
274 | cn_del_callback(&ulog_cn_id); | ||
275 | kfree(prealloced_cn_msg); | ||
276 | } | ||
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h new file mode 100644 index 000000000000..c26d8e4e2710 --- /dev/null +++ b/drivers/md/dm-log-userspace-transfer.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2009 Red Hat, Inc. | ||
3 | * | ||
4 | * This file is released under the LGPL. | ||
5 | */ | ||
6 | |||
7 | #ifndef __DM_LOG_USERSPACE_TRANSFER_H__ | ||
8 | #define __DM_LOG_USERSPACE_TRANSFER_H__ | ||
9 | |||
10 | #define DM_MSG_PREFIX "dm-log-userspace" | ||
11 | |||
12 | int dm_ulog_tfr_init(void); | ||
13 | void dm_ulog_tfr_exit(void); | ||
14 | int dm_consult_userspace(const char *uuid, int request_type, | ||
15 | char *data, size_t data_size, | ||
16 | char *rdata, size_t *rdata_size); | ||
17 | |||
18 | #endif /* __DM_LOG_USERSPACE_TRANSFER_H__ */ | ||
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 6fa8ccf91c70..9443896ede07 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -412,11 +412,12 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, | |||
412 | /* | 412 | /* |
413 | * Buffer holds both header and bitset. | 413 | * Buffer holds both header and bitset. |
414 | */ | 414 | */ |
415 | buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + | 415 | buf_size = |
416 | bitset_size, | 416 | dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size, |
417 | ti->limits.logical_block_size); | 417 | bdev_logical_block_size(lc->header_location. |
418 | bdev)); | ||
418 | 419 | ||
419 | if (buf_size > dev->bdev->bd_inode->i_size) { | 420 | if (buf_size > i_size_read(dev->bdev->bd_inode)) { |
420 | DMWARN("log device %s too small: need %llu bytes", | 421 | DMWARN("log device %s too small: need %llu bytes", |
421 | dev->name, (unsigned long long)buf_size); | 422 | dev->name, (unsigned long long)buf_size); |
422 | kfree(lc); | 423 | kfree(lc); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 6a386ab4f7eb..c70604a20897 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/device-mapper.h> | 8 | #include <linux/device-mapper.h> |
9 | 9 | ||
10 | #include "dm-path-selector.h" | 10 | #include "dm-path-selector.h" |
11 | #include "dm-bio-record.h" | ||
12 | #include "dm-uevent.h" | 11 | #include "dm-uevent.h" |
13 | 12 | ||
14 | #include <linux/ctype.h> | 13 | #include <linux/ctype.h> |
@@ -35,6 +34,7 @@ struct pgpath { | |||
35 | 34 | ||
36 | struct dm_path path; | 35 | struct dm_path path; |
37 | struct work_struct deactivate_path; | 36 | struct work_struct deactivate_path; |
37 | struct work_struct activate_path; | ||
38 | }; | 38 | }; |
39 | 39 | ||
40 | #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) | 40 | #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) |
@@ -64,8 +64,6 @@ struct multipath { | |||
64 | spinlock_t lock; | 64 | spinlock_t lock; |
65 | 65 | ||
66 | const char *hw_handler_name; | 66 | const char *hw_handler_name; |
67 | struct work_struct activate_path; | ||
68 | struct pgpath *pgpath_to_activate; | ||
69 | unsigned nr_priority_groups; | 67 | unsigned nr_priority_groups; |
70 | struct list_head priority_groups; | 68 | struct list_head priority_groups; |
71 | unsigned pg_init_required; /* pg_init needs calling? */ | 69 | unsigned pg_init_required; /* pg_init needs calling? */ |
@@ -84,7 +82,7 @@ struct multipath { | |||
84 | unsigned pg_init_count; /* Number of times pg_init called */ | 82 | unsigned pg_init_count; /* Number of times pg_init called */ |
85 | 83 | ||
86 | struct work_struct process_queued_ios; | 84 | struct work_struct process_queued_ios; |
87 | struct bio_list queued_ios; | 85 | struct list_head queued_ios; |
88 | unsigned queue_size; | 86 | unsigned queue_size; |
89 | 87 | ||
90 | struct work_struct trigger_event; | 88 | struct work_struct trigger_event; |
@@ -101,7 +99,7 @@ struct multipath { | |||
101 | */ | 99 | */ |
102 | struct dm_mpath_io { | 100 | struct dm_mpath_io { |
103 | struct pgpath *pgpath; | 101 | struct pgpath *pgpath; |
104 | struct dm_bio_details details; | 102 | size_t nr_bytes; |
105 | }; | 103 | }; |
106 | 104 | ||
107 | typedef int (*action_fn) (struct pgpath *pgpath); | 105 | typedef int (*action_fn) (struct pgpath *pgpath); |
@@ -128,6 +126,7 @@ static struct pgpath *alloc_pgpath(void) | |||
128 | if (pgpath) { | 126 | if (pgpath) { |
129 | pgpath->is_active = 1; | 127 | pgpath->is_active = 1; |
130 | INIT_WORK(&pgpath->deactivate_path, deactivate_path); | 128 | INIT_WORK(&pgpath->deactivate_path, deactivate_path); |
129 | INIT_WORK(&pgpath->activate_path, activate_path); | ||
131 | } | 130 | } |
132 | 131 | ||
133 | return pgpath; | 132 | return pgpath; |
@@ -160,7 +159,6 @@ static struct priority_group *alloc_priority_group(void) | |||
160 | 159 | ||
161 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | 160 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) |
162 | { | 161 | { |
163 | unsigned long flags; | ||
164 | struct pgpath *pgpath, *tmp; | 162 | struct pgpath *pgpath, *tmp; |
165 | struct multipath *m = ti->private; | 163 | struct multipath *m = ti->private; |
166 | 164 | ||
@@ -169,10 +167,6 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | |||
169 | if (m->hw_handler_name) | 167 | if (m->hw_handler_name) |
170 | scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); | 168 | scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); |
171 | dm_put_device(ti, pgpath->path.dev); | 169 | dm_put_device(ti, pgpath->path.dev); |
172 | spin_lock_irqsave(&m->lock, flags); | ||
173 | if (m->pgpath_to_activate == pgpath) | ||
174 | m->pgpath_to_activate = NULL; | ||
175 | spin_unlock_irqrestore(&m->lock, flags); | ||
176 | free_pgpath(pgpath); | 170 | free_pgpath(pgpath); |
177 | } | 171 | } |
178 | } | 172 | } |
@@ -198,11 +192,11 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
198 | m = kzalloc(sizeof(*m), GFP_KERNEL); | 192 | m = kzalloc(sizeof(*m), GFP_KERNEL); |
199 | if (m) { | 193 | if (m) { |
200 | INIT_LIST_HEAD(&m->priority_groups); | 194 | INIT_LIST_HEAD(&m->priority_groups); |
195 | INIT_LIST_HEAD(&m->queued_ios); | ||
201 | spin_lock_init(&m->lock); | 196 | spin_lock_init(&m->lock); |
202 | m->queue_io = 1; | 197 | m->queue_io = 1; |
203 | INIT_WORK(&m->process_queued_ios, process_queued_ios); | 198 | INIT_WORK(&m->process_queued_ios, process_queued_ios); |
204 | INIT_WORK(&m->trigger_event, trigger_event); | 199 | INIT_WORK(&m->trigger_event, trigger_event); |
205 | INIT_WORK(&m->activate_path, activate_path); | ||
206 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); | 200 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); |
207 | if (!m->mpio_pool) { | 201 | if (!m->mpio_pool) { |
208 | kfree(m); | 202 | kfree(m); |
@@ -250,11 +244,12 @@ static void __switch_pg(struct multipath *m, struct pgpath *pgpath) | |||
250 | m->pg_init_count = 0; | 244 | m->pg_init_count = 0; |
251 | } | 245 | } |
252 | 246 | ||
253 | static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg) | 247 | static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg, |
248 | size_t nr_bytes) | ||
254 | { | 249 | { |
255 | struct dm_path *path; | 250 | struct dm_path *path; |
256 | 251 | ||
257 | path = pg->ps.type->select_path(&pg->ps, &m->repeat_count); | 252 | path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes); |
258 | if (!path) | 253 | if (!path) |
259 | return -ENXIO; | 254 | return -ENXIO; |
260 | 255 | ||
@@ -266,7 +261,7 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg) | |||
266 | return 0; | 261 | return 0; |
267 | } | 262 | } |
268 | 263 | ||
269 | static void __choose_pgpath(struct multipath *m) | 264 | static void __choose_pgpath(struct multipath *m, size_t nr_bytes) |
270 | { | 265 | { |
271 | struct priority_group *pg; | 266 | struct priority_group *pg; |
272 | unsigned bypassed = 1; | 267 | unsigned bypassed = 1; |
@@ -278,12 +273,12 @@ static void __choose_pgpath(struct multipath *m) | |||
278 | if (m->next_pg) { | 273 | if (m->next_pg) { |
279 | pg = m->next_pg; | 274 | pg = m->next_pg; |
280 | m->next_pg = NULL; | 275 | m->next_pg = NULL; |
281 | if (!__choose_path_in_pg(m, pg)) | 276 | if (!__choose_path_in_pg(m, pg, nr_bytes)) |
282 | return; | 277 | return; |
283 | } | 278 | } |
284 | 279 | ||
285 | /* Don't change PG until it has no remaining paths */ | 280 | /* Don't change PG until it has no remaining paths */ |
286 | if (m->current_pg && !__choose_path_in_pg(m, m->current_pg)) | 281 | if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes)) |
287 | return; | 282 | return; |
288 | 283 | ||
289 | /* | 284 | /* |
@@ -295,7 +290,7 @@ static void __choose_pgpath(struct multipath *m) | |||
295 | list_for_each_entry(pg, &m->priority_groups, list) { | 290 | list_for_each_entry(pg, &m->priority_groups, list) { |
296 | if (pg->bypassed == bypassed) | 291 | if (pg->bypassed == bypassed) |
297 | continue; | 292 | continue; |
298 | if (!__choose_path_in_pg(m, pg)) | 293 | if (!__choose_path_in_pg(m, pg, nr_bytes)) |
299 | return; | 294 | return; |
300 | } | 295 | } |
301 | } while (bypassed--); | 296 | } while (bypassed--); |
@@ -322,19 +317,21 @@ static int __must_push_back(struct multipath *m) | |||
322 | dm_noflush_suspending(m->ti)); | 317 | dm_noflush_suspending(m->ti)); |
323 | } | 318 | } |
324 | 319 | ||
325 | static int map_io(struct multipath *m, struct bio *bio, | 320 | static int map_io(struct multipath *m, struct request *clone, |
326 | struct dm_mpath_io *mpio, unsigned was_queued) | 321 | struct dm_mpath_io *mpio, unsigned was_queued) |
327 | { | 322 | { |
328 | int r = DM_MAPIO_REMAPPED; | 323 | int r = DM_MAPIO_REMAPPED; |
324 | size_t nr_bytes = blk_rq_bytes(clone); | ||
329 | unsigned long flags; | 325 | unsigned long flags; |
330 | struct pgpath *pgpath; | 326 | struct pgpath *pgpath; |
327 | struct block_device *bdev; | ||
331 | 328 | ||
332 | spin_lock_irqsave(&m->lock, flags); | 329 | spin_lock_irqsave(&m->lock, flags); |
333 | 330 | ||
334 | /* Do we need to select a new pgpath? */ | 331 | /* Do we need to select a new pgpath? */ |
335 | if (!m->current_pgpath || | 332 | if (!m->current_pgpath || |
336 | (!m->queue_io && (m->repeat_count && --m->repeat_count == 0))) | 333 | (!m->queue_io && (m->repeat_count && --m->repeat_count == 0))) |
337 | __choose_pgpath(m); | 334 | __choose_pgpath(m, nr_bytes); |
338 | 335 | ||
339 | pgpath = m->current_pgpath; | 336 | pgpath = m->current_pgpath; |
340 | 337 | ||
@@ -344,21 +341,28 @@ static int map_io(struct multipath *m, struct bio *bio, | |||
344 | if ((pgpath && m->queue_io) || | 341 | if ((pgpath && m->queue_io) || |
345 | (!pgpath && m->queue_if_no_path)) { | 342 | (!pgpath && m->queue_if_no_path)) { |
346 | /* Queue for the daemon to resubmit */ | 343 | /* Queue for the daemon to resubmit */ |
347 | bio_list_add(&m->queued_ios, bio); | 344 | list_add_tail(&clone->queuelist, &m->queued_ios); |
348 | m->queue_size++; | 345 | m->queue_size++; |
349 | if ((m->pg_init_required && !m->pg_init_in_progress) || | 346 | if ((m->pg_init_required && !m->pg_init_in_progress) || |
350 | !m->queue_io) | 347 | !m->queue_io) |
351 | queue_work(kmultipathd, &m->process_queued_ios); | 348 | queue_work(kmultipathd, &m->process_queued_ios); |
352 | pgpath = NULL; | 349 | pgpath = NULL; |
353 | r = DM_MAPIO_SUBMITTED; | 350 | r = DM_MAPIO_SUBMITTED; |
354 | } else if (pgpath) | 351 | } else if (pgpath) { |
355 | bio->bi_bdev = pgpath->path.dev->bdev; | 352 | bdev = pgpath->path.dev->bdev; |
356 | else if (__must_push_back(m)) | 353 | clone->q = bdev_get_queue(bdev); |
354 | clone->rq_disk = bdev->bd_disk; | ||
355 | } else if (__must_push_back(m)) | ||
357 | r = DM_MAPIO_REQUEUE; | 356 | r = DM_MAPIO_REQUEUE; |
358 | else | 357 | else |
359 | r = -EIO; /* Failed */ | 358 | r = -EIO; /* Failed */ |
360 | 359 | ||
361 | mpio->pgpath = pgpath; | 360 | mpio->pgpath = pgpath; |
361 | mpio->nr_bytes = nr_bytes; | ||
362 | |||
363 | if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io) | ||
364 | pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path, | ||
365 | nr_bytes); | ||
362 | 366 | ||
363 | spin_unlock_irqrestore(&m->lock, flags); | 367 | spin_unlock_irqrestore(&m->lock, flags); |
364 | 368 | ||
@@ -396,30 +400,31 @@ static void dispatch_queued_ios(struct multipath *m) | |||
396 | { | 400 | { |
397 | int r; | 401 | int r; |
398 | unsigned long flags; | 402 | unsigned long flags; |
399 | struct bio *bio = NULL, *next; | ||
400 | struct dm_mpath_io *mpio; | 403 | struct dm_mpath_io *mpio; |
401 | union map_info *info; | 404 | union map_info *info; |
405 | struct request *clone, *n; | ||
406 | LIST_HEAD(cl); | ||
402 | 407 | ||
403 | spin_lock_irqsave(&m->lock, flags); | 408 | spin_lock_irqsave(&m->lock, flags); |
404 | bio = bio_list_get(&m->queued_ios); | 409 | list_splice_init(&m->queued_ios, &cl); |
405 | spin_unlock_irqrestore(&m->lock, flags); | 410 | spin_unlock_irqrestore(&m->lock, flags); |
406 | 411 | ||
407 | while (bio) { | 412 | list_for_each_entry_safe(clone, n, &cl, queuelist) { |
408 | next = bio->bi_next; | 413 | list_del_init(&clone->queuelist); |
409 | bio->bi_next = NULL; | ||
410 | 414 | ||
411 | info = dm_get_mapinfo(bio); | 415 | info = dm_get_rq_mapinfo(clone); |
412 | mpio = info->ptr; | 416 | mpio = info->ptr; |
413 | 417 | ||
414 | r = map_io(m, bio, mpio, 1); | 418 | r = map_io(m, clone, mpio, 1); |
415 | if (r < 0) | 419 | if (r < 0) { |
416 | bio_endio(bio, r); | 420 | mempool_free(mpio, m->mpio_pool); |
417 | else if (r == DM_MAPIO_REMAPPED) | 421 | dm_kill_unmapped_request(clone, r); |
418 | generic_make_request(bio); | 422 | } else if (r == DM_MAPIO_REMAPPED) |
419 | else if (r == DM_MAPIO_REQUEUE) | 423 | dm_dispatch_request(clone); |
420 | bio_endio(bio, -EIO); | 424 | else if (r == DM_MAPIO_REQUEUE) { |
421 | 425 | mempool_free(mpio, m->mpio_pool); | |
422 | bio = next; | 426 | dm_requeue_unmapped_request(clone); |
427 | } | ||
423 | } | 428 | } |
424 | } | 429 | } |
425 | 430 | ||
@@ -427,8 +432,8 @@ static void process_queued_ios(struct work_struct *work) | |||
427 | { | 432 | { |
428 | struct multipath *m = | 433 | struct multipath *m = |
429 | container_of(work, struct multipath, process_queued_ios); | 434 | container_of(work, struct multipath, process_queued_ios); |
430 | struct pgpath *pgpath = NULL; | 435 | struct pgpath *pgpath = NULL, *tmp; |
431 | unsigned init_required = 0, must_queue = 1; | 436 | unsigned must_queue = 1; |
432 | unsigned long flags; | 437 | unsigned long flags; |
433 | 438 | ||
434 | spin_lock_irqsave(&m->lock, flags); | 439 | spin_lock_irqsave(&m->lock, flags); |
@@ -437,7 +442,7 @@ static void process_queued_ios(struct work_struct *work) | |||
437 | goto out; | 442 | goto out; |
438 | 443 | ||
439 | if (!m->current_pgpath) | 444 | if (!m->current_pgpath) |
440 | __choose_pgpath(m); | 445 | __choose_pgpath(m, 0); |
441 | 446 | ||
442 | pgpath = m->current_pgpath; | 447 | pgpath = m->current_pgpath; |
443 | 448 | ||
@@ -446,19 +451,15 @@ static void process_queued_ios(struct work_struct *work) | |||
446 | must_queue = 0; | 451 | must_queue = 0; |
447 | 452 | ||
448 | if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { | 453 | if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { |
449 | m->pgpath_to_activate = pgpath; | ||
450 | m->pg_init_count++; | 454 | m->pg_init_count++; |
451 | m->pg_init_required = 0; | 455 | m->pg_init_required = 0; |
452 | m->pg_init_in_progress = 1; | 456 | list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) { |
453 | init_required = 1; | 457 | if (queue_work(kmpath_handlerd, &tmp->activate_path)) |
458 | m->pg_init_in_progress++; | ||
459 | } | ||
454 | } | 460 | } |
455 | |||
456 | out: | 461 | out: |
457 | spin_unlock_irqrestore(&m->lock, flags); | 462 | spin_unlock_irqrestore(&m->lock, flags); |
458 | |||
459 | if (init_required) | ||
460 | queue_work(kmpath_handlerd, &m->activate_path); | ||
461 | |||
462 | if (!must_queue) | 463 | if (!must_queue) |
463 | dispatch_queued_ios(m); | 464 | dispatch_queued_ios(m); |
464 | } | 465 | } |
@@ -553,6 +554,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg, | |||
553 | return -EINVAL; | 554 | return -EINVAL; |
554 | } | 555 | } |
555 | 556 | ||
557 | if (ps_argc > as->argc) { | ||
558 | dm_put_path_selector(pst); | ||
559 | ti->error = "not enough arguments for path selector"; | ||
560 | return -EINVAL; | ||
561 | } | ||
562 | |||
556 | r = pst->create(&pg->ps, ps_argc, as->argv); | 563 | r = pst->create(&pg->ps, ps_argc, as->argv); |
557 | if (r) { | 564 | if (r) { |
558 | dm_put_path_selector(pst); | 565 | dm_put_path_selector(pst); |
@@ -591,9 +598,20 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, | |||
591 | } | 598 | } |
592 | 599 | ||
593 | if (m->hw_handler_name) { | 600 | if (m->hw_handler_name) { |
594 | r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev), | 601 | struct request_queue *q = bdev_get_queue(p->path.dev->bdev); |
595 | m->hw_handler_name); | 602 | |
603 | r = scsi_dh_attach(q, m->hw_handler_name); | ||
604 | if (r == -EBUSY) { | ||
605 | /* | ||
606 | * Already attached to different hw_handler, | ||
607 | * try to reattach with correct one. | ||
608 | */ | ||
609 | scsi_dh_detach(q); | ||
610 | r = scsi_dh_attach(q, m->hw_handler_name); | ||
611 | } | ||
612 | |||
596 | if (r < 0) { | 613 | if (r < 0) { |
614 | ti->error = "error attaching hardware handler"; | ||
597 | dm_put_device(ti, p->path.dev); | 615 | dm_put_device(ti, p->path.dev); |
598 | goto bad; | 616 | goto bad; |
599 | } | 617 | } |
@@ -699,6 +717,11 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m) | |||
699 | if (!hw_argc) | 717 | if (!hw_argc) |
700 | return 0; | 718 | return 0; |
701 | 719 | ||
720 | if (hw_argc > as->argc) { | ||
721 | ti->error = "not enough arguments for hardware handler"; | ||
722 | return -EINVAL; | ||
723 | } | ||
724 | |||
702 | m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); | 725 | m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); |
703 | request_module("scsi_dh_%s", m->hw_handler_name); | 726 | request_module("scsi_dh_%s", m->hw_handler_name); |
704 | if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { | 727 | if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { |
@@ -823,6 +846,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
823 | goto bad; | 846 | goto bad; |
824 | } | 847 | } |
825 | 848 | ||
849 | ti->num_flush_requests = 1; | ||
850 | |||
826 | return 0; | 851 | return 0; |
827 | 852 | ||
828 | bad: | 853 | bad: |
@@ -836,25 +861,29 @@ static void multipath_dtr(struct dm_target *ti) | |||
836 | 861 | ||
837 | flush_workqueue(kmpath_handlerd); | 862 | flush_workqueue(kmpath_handlerd); |
838 | flush_workqueue(kmultipathd); | 863 | flush_workqueue(kmultipathd); |
864 | flush_scheduled_work(); | ||
839 | free_multipath(m); | 865 | free_multipath(m); |
840 | } | 866 | } |
841 | 867 | ||
842 | /* | 868 | /* |
843 | * Map bios, recording original fields for later in case we have to resubmit | 869 | * Map cloned requests |
844 | */ | 870 | */ |
845 | static int multipath_map(struct dm_target *ti, struct bio *bio, | 871 | static int multipath_map(struct dm_target *ti, struct request *clone, |
846 | union map_info *map_context) | 872 | union map_info *map_context) |
847 | { | 873 | { |
848 | int r; | 874 | int r; |
849 | struct dm_mpath_io *mpio; | 875 | struct dm_mpath_io *mpio; |
850 | struct multipath *m = (struct multipath *) ti->private; | 876 | struct multipath *m = (struct multipath *) ti->private; |
851 | 877 | ||
852 | mpio = mempool_alloc(m->mpio_pool, GFP_NOIO); | 878 | mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC); |
853 | dm_bio_record(&mpio->details, bio); | 879 | if (!mpio) |
880 | /* ENOMEM, requeue */ | ||
881 | return DM_MAPIO_REQUEUE; | ||
882 | memset(mpio, 0, sizeof(*mpio)); | ||
854 | 883 | ||
855 | map_context->ptr = mpio; | 884 | map_context->ptr = mpio; |
856 | bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); | 885 | clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; |
857 | r = map_io(m, bio, mpio, 0); | 886 | r = map_io(m, clone, mpio, 0); |
858 | if (r < 0 || r == DM_MAPIO_REQUEUE) | 887 | if (r < 0 || r == DM_MAPIO_REQUEUE) |
859 | mempool_free(mpio, m->mpio_pool); | 888 | mempool_free(mpio, m->mpio_pool); |
860 | 889 | ||
@@ -924,9 +953,13 @@ static int reinstate_path(struct pgpath *pgpath) | |||
924 | 953 | ||
925 | pgpath->is_active = 1; | 954 | pgpath->is_active = 1; |
926 | 955 | ||
927 | m->current_pgpath = NULL; | 956 | if (!m->nr_valid_paths++ && m->queue_size) { |
928 | if (!m->nr_valid_paths++ && m->queue_size) | 957 | m->current_pgpath = NULL; |
929 | queue_work(kmultipathd, &m->process_queued_ios); | 958 | queue_work(kmultipathd, &m->process_queued_ios); |
959 | } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { | ||
960 | if (queue_work(kmpath_handlerd, &pgpath->activate_path)) | ||
961 | m->pg_init_in_progress++; | ||
962 | } | ||
930 | 963 | ||
931 | dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, | 964 | dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, |
932 | pgpath->path.dev->name, m->nr_valid_paths); | 965 | pgpath->path.dev->name, m->nr_valid_paths); |
@@ -1102,87 +1135,70 @@ static void pg_init_done(struct dm_path *path, int errors) | |||
1102 | 1135 | ||
1103 | spin_lock_irqsave(&m->lock, flags); | 1136 | spin_lock_irqsave(&m->lock, flags); |
1104 | if (errors) { | 1137 | if (errors) { |
1105 | DMERR("Could not failover device. Error %d.", errors); | 1138 | if (pgpath == m->current_pgpath) { |
1106 | m->current_pgpath = NULL; | 1139 | DMERR("Could not failover device. Error %d.", errors); |
1107 | m->current_pg = NULL; | 1140 | m->current_pgpath = NULL; |
1141 | m->current_pg = NULL; | ||
1142 | } | ||
1108 | } else if (!m->pg_init_required) { | 1143 | } else if (!m->pg_init_required) { |
1109 | m->queue_io = 0; | 1144 | m->queue_io = 0; |
1110 | pg->bypassed = 0; | 1145 | pg->bypassed = 0; |
1111 | } | 1146 | } |
1112 | 1147 | ||
1113 | m->pg_init_in_progress = 0; | 1148 | m->pg_init_in_progress--; |
1114 | queue_work(kmultipathd, &m->process_queued_ios); | 1149 | if (!m->pg_init_in_progress) |
1150 | queue_work(kmultipathd, &m->process_queued_ios); | ||
1115 | spin_unlock_irqrestore(&m->lock, flags); | 1151 | spin_unlock_irqrestore(&m->lock, flags); |
1116 | } | 1152 | } |
1117 | 1153 | ||
1118 | static void activate_path(struct work_struct *work) | 1154 | static void activate_path(struct work_struct *work) |
1119 | { | 1155 | { |
1120 | int ret; | 1156 | int ret; |
1121 | struct multipath *m = | 1157 | struct pgpath *pgpath = |
1122 | container_of(work, struct multipath, activate_path); | 1158 | container_of(work, struct pgpath, activate_path); |
1123 | struct dm_path *path; | ||
1124 | unsigned long flags; | ||
1125 | 1159 | ||
1126 | spin_lock_irqsave(&m->lock, flags); | 1160 | ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev)); |
1127 | path = &m->pgpath_to_activate->path; | 1161 | pg_init_done(&pgpath->path, ret); |
1128 | m->pgpath_to_activate = NULL; | ||
1129 | spin_unlock_irqrestore(&m->lock, flags); | ||
1130 | if (!path) | ||
1131 | return; | ||
1132 | ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); | ||
1133 | pg_init_done(path, ret); | ||
1134 | } | 1162 | } |
1135 | 1163 | ||
1136 | /* | 1164 | /* |
1137 | * end_io handling | 1165 | * end_io handling |
1138 | */ | 1166 | */ |
1139 | static int do_end_io(struct multipath *m, struct bio *bio, | 1167 | static int do_end_io(struct multipath *m, struct request *clone, |
1140 | int error, struct dm_mpath_io *mpio) | 1168 | int error, struct dm_mpath_io *mpio) |
1141 | { | 1169 | { |
1170 | /* | ||
1171 | * We don't queue any clone request inside the multipath target | ||
1172 | * during end I/O handling, since those clone requests don't have | ||
1173 | * bio clones. If we queue them inside the multipath target, | ||
1174 | * we need to make bio clones, that requires memory allocation. | ||
1175 | * (See drivers/md/dm.c:end_clone_bio() about why the clone requests | ||
1176 | * don't have bio clones.) | ||
1177 | * Instead of queueing the clone request here, we queue the original | ||
1178 | * request into dm core, which will remake a clone request and | ||
1179 | * clone bios for it and resubmit it later. | ||
1180 | */ | ||
1181 | int r = DM_ENDIO_REQUEUE; | ||
1142 | unsigned long flags; | 1182 | unsigned long flags; |
1143 | 1183 | ||
1144 | if (!error) | 1184 | if (!error && !clone->errors) |
1145 | return 0; /* I/O complete */ | 1185 | return 0; /* I/O complete */ |
1146 | 1186 | ||
1147 | if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) | ||
1148 | return error; | ||
1149 | |||
1150 | if (error == -EOPNOTSUPP) | 1187 | if (error == -EOPNOTSUPP) |
1151 | return error; | 1188 | return error; |
1152 | 1189 | ||
1153 | spin_lock_irqsave(&m->lock, flags); | ||
1154 | if (!m->nr_valid_paths) { | ||
1155 | if (__must_push_back(m)) { | ||
1156 | spin_unlock_irqrestore(&m->lock, flags); | ||
1157 | return DM_ENDIO_REQUEUE; | ||
1158 | } else if (!m->queue_if_no_path) { | ||
1159 | spin_unlock_irqrestore(&m->lock, flags); | ||
1160 | return -EIO; | ||
1161 | } else { | ||
1162 | spin_unlock_irqrestore(&m->lock, flags); | ||
1163 | goto requeue; | ||
1164 | } | ||
1165 | } | ||
1166 | spin_unlock_irqrestore(&m->lock, flags); | ||
1167 | |||
1168 | if (mpio->pgpath) | 1190 | if (mpio->pgpath) |
1169 | fail_path(mpio->pgpath); | 1191 | fail_path(mpio->pgpath); |
1170 | 1192 | ||
1171 | requeue: | ||
1172 | dm_bio_restore(&mpio->details, bio); | ||
1173 | |||
1174 | /* queue for the daemon to resubmit or fail */ | ||
1175 | spin_lock_irqsave(&m->lock, flags); | 1193 | spin_lock_irqsave(&m->lock, flags); |
1176 | bio_list_add(&m->queued_ios, bio); | 1194 | if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m)) |
1177 | m->queue_size++; | 1195 | r = -EIO; |
1178 | if (!m->queue_io) | ||
1179 | queue_work(kmultipathd, &m->process_queued_ios); | ||
1180 | spin_unlock_irqrestore(&m->lock, flags); | 1196 | spin_unlock_irqrestore(&m->lock, flags); |
1181 | 1197 | ||
1182 | return DM_ENDIO_INCOMPLETE; /* io not complete */ | 1198 | return r; |
1183 | } | 1199 | } |
1184 | 1200 | ||
1185 | static int multipath_end_io(struct dm_target *ti, struct bio *bio, | 1201 | static int multipath_end_io(struct dm_target *ti, struct request *clone, |
1186 | int error, union map_info *map_context) | 1202 | int error, union map_info *map_context) |
1187 | { | 1203 | { |
1188 | struct multipath *m = ti->private; | 1204 | struct multipath *m = ti->private; |
@@ -1191,14 +1207,13 @@ static int multipath_end_io(struct dm_target *ti, struct bio *bio, | |||
1191 | struct path_selector *ps; | 1207 | struct path_selector *ps; |
1192 | int r; | 1208 | int r; |
1193 | 1209 | ||
1194 | r = do_end_io(m, bio, error, mpio); | 1210 | r = do_end_io(m, clone, error, mpio); |
1195 | if (pgpath) { | 1211 | if (pgpath) { |
1196 | ps = &pgpath->pg->ps; | 1212 | ps = &pgpath->pg->ps; |
1197 | if (ps->type->end_io) | 1213 | if (ps->type->end_io) |
1198 | ps->type->end_io(ps, &pgpath->path); | 1214 | ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); |
1199 | } | 1215 | } |
1200 | if (r != DM_ENDIO_INCOMPLETE) | 1216 | mempool_free(mpio, m->mpio_pool); |
1201 | mempool_free(mpio, m->mpio_pool); | ||
1202 | 1217 | ||
1203 | return r; | 1218 | return r; |
1204 | } | 1219 | } |
@@ -1411,7 +1426,7 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, | |||
1411 | spin_lock_irqsave(&m->lock, flags); | 1426 | spin_lock_irqsave(&m->lock, flags); |
1412 | 1427 | ||
1413 | if (!m->current_pgpath) | 1428 | if (!m->current_pgpath) |
1414 | __choose_pgpath(m); | 1429 | __choose_pgpath(m, 0); |
1415 | 1430 | ||
1416 | if (m->current_pgpath) { | 1431 | if (m->current_pgpath) { |
1417 | bdev = m->current_pgpath->path.dev->bdev; | 1432 | bdev = m->current_pgpath->path.dev->bdev; |
@@ -1428,22 +1443,113 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, | |||
1428 | return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); | 1443 | return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); |
1429 | } | 1444 | } |
1430 | 1445 | ||
1446 | static int multipath_iterate_devices(struct dm_target *ti, | ||
1447 | iterate_devices_callout_fn fn, void *data) | ||
1448 | { | ||
1449 | struct multipath *m = ti->private; | ||
1450 | struct priority_group *pg; | ||
1451 | struct pgpath *p; | ||
1452 | int ret = 0; | ||
1453 | |||
1454 | list_for_each_entry(pg, &m->priority_groups, list) { | ||
1455 | list_for_each_entry(p, &pg->pgpaths, list) { | ||
1456 | ret = fn(ti, p->path.dev, ti->begin, data); | ||
1457 | if (ret) | ||
1458 | goto out; | ||
1459 | } | ||
1460 | } | ||
1461 | |||
1462 | out: | ||
1463 | return ret; | ||
1464 | } | ||
1465 | |||
1466 | static int __pgpath_busy(struct pgpath *pgpath) | ||
1467 | { | ||
1468 | struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); | ||
1469 | |||
1470 | return dm_underlying_device_busy(q); | ||
1471 | } | ||
1472 | |||
1473 | /* | ||
1474 | * We return "busy", only when we can map I/Os but underlying devices | ||
1475 | * are busy (so even if we map I/Os now, the I/Os will wait on | ||
1476 | * the underlying queue). | ||
1477 | * In other words, if we want to kill I/Os or queue them inside us | ||
1478 | * due to map unavailability, we don't return "busy". Otherwise, | ||
1479 | * dm core won't give us the I/Os and we can't do what we want. | ||
1480 | */ | ||
1481 | static int multipath_busy(struct dm_target *ti) | ||
1482 | { | ||
1483 | int busy = 0, has_active = 0; | ||
1484 | struct multipath *m = ti->private; | ||
1485 | struct priority_group *pg; | ||
1486 | struct pgpath *pgpath; | ||
1487 | unsigned long flags; | ||
1488 | |||
1489 | spin_lock_irqsave(&m->lock, flags); | ||
1490 | |||
1491 | /* Guess which priority_group will be used at next mapping time */ | ||
1492 | if (unlikely(!m->current_pgpath && m->next_pg)) | ||
1493 | pg = m->next_pg; | ||
1494 | else if (likely(m->current_pg)) | ||
1495 | pg = m->current_pg; | ||
1496 | else | ||
1497 | /* | ||
1498 | * We don't know which pg will be used at next mapping time. | ||
1499 | * We don't call __choose_pgpath() here to avoid to trigger | ||
1500 | * pg_init just by busy checking. | ||
1501 | * So we don't know whether underlying devices we will be using | ||
1502 | * at next mapping time are busy or not. Just try mapping. | ||
1503 | */ | ||
1504 | goto out; | ||
1505 | |||
1506 | /* | ||
1507 | * If there is one non-busy active path at least, the path selector | ||
1508 | * will be able to select it. So we consider such a pg as not busy. | ||
1509 | */ | ||
1510 | busy = 1; | ||
1511 | list_for_each_entry(pgpath, &pg->pgpaths, list) | ||
1512 | if (pgpath->is_active) { | ||
1513 | has_active = 1; | ||
1514 | |||
1515 | if (!__pgpath_busy(pgpath)) { | ||
1516 | busy = 0; | ||
1517 | break; | ||
1518 | } | ||
1519 | } | ||
1520 | |||
1521 | if (!has_active) | ||
1522 | /* | ||
1523 | * No active path in this pg, so this pg won't be used and | ||
1524 | * the current_pg will be changed at next mapping time. | ||
1525 | * We need to try mapping to determine it. | ||
1526 | */ | ||
1527 | busy = 0; | ||
1528 | |||
1529 | out: | ||
1530 | spin_unlock_irqrestore(&m->lock, flags); | ||
1531 | |||
1532 | return busy; | ||
1533 | } | ||
1534 | |||
1431 | /*----------------------------------------------------------------- | 1535 | /*----------------------------------------------------------------- |
1432 | * Module setup | 1536 | * Module setup |
1433 | *---------------------------------------------------------------*/ | 1537 | *---------------------------------------------------------------*/ |
1434 | static struct target_type multipath_target = { | 1538 | static struct target_type multipath_target = { |
1435 | .name = "multipath", | 1539 | .name = "multipath", |
1436 | .version = {1, 0, 5}, | 1540 | .version = {1, 1, 0}, |
1437 | .module = THIS_MODULE, | 1541 | .module = THIS_MODULE, |
1438 | .ctr = multipath_ctr, | 1542 | .ctr = multipath_ctr, |
1439 | .dtr = multipath_dtr, | 1543 | .dtr = multipath_dtr, |
1440 | .map = multipath_map, | 1544 | .map_rq = multipath_map, |
1441 | .end_io = multipath_end_io, | 1545 | .rq_end_io = multipath_end_io, |
1442 | .presuspend = multipath_presuspend, | 1546 | .presuspend = multipath_presuspend, |
1443 | .resume = multipath_resume, | 1547 | .resume = multipath_resume, |
1444 | .status = multipath_status, | 1548 | .status = multipath_status, |
1445 | .message = multipath_message, | 1549 | .message = multipath_message, |
1446 | .ioctl = multipath_ioctl, | 1550 | .ioctl = multipath_ioctl, |
1551 | .iterate_devices = multipath_iterate_devices, | ||
1552 | .busy = multipath_busy, | ||
1447 | }; | 1553 | }; |
1448 | 1554 | ||
1449 | static int __init dm_multipath_init(void) | 1555 | static int __init dm_multipath_init(void) |
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h index 27357b85d73d..e7d1fa8b0459 100644 --- a/drivers/md/dm-path-selector.h +++ b/drivers/md/dm-path-selector.h | |||
@@ -56,7 +56,8 @@ struct path_selector_type { | |||
56 | * the path fails. | 56 | * the path fails. |
57 | */ | 57 | */ |
58 | struct dm_path *(*select_path) (struct path_selector *ps, | 58 | struct dm_path *(*select_path) (struct path_selector *ps, |
59 | unsigned *repeat_count); | 59 | unsigned *repeat_count, |
60 | size_t nr_bytes); | ||
60 | 61 | ||
61 | /* | 62 | /* |
62 | * Notify the selector that a path has failed. | 63 | * Notify the selector that a path has failed. |
@@ -75,7 +76,10 @@ struct path_selector_type { | |||
75 | int (*status) (struct path_selector *ps, struct dm_path *path, | 76 | int (*status) (struct path_selector *ps, struct dm_path *path, |
76 | status_type_t type, char *result, unsigned int maxlen); | 77 | status_type_t type, char *result, unsigned int maxlen); |
77 | 78 | ||
78 | int (*end_io) (struct path_selector *ps, struct dm_path *path); | 79 | int (*start_io) (struct path_selector *ps, struct dm_path *path, |
80 | size_t nr_bytes); | ||
81 | int (*end_io) (struct path_selector *ps, struct dm_path *path, | ||
82 | size_t nr_bytes); | ||
79 | }; | 83 | }; |
80 | 84 | ||
81 | /* Register a path selector */ | 85 | /* Register a path selector */ |
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c new file mode 100644 index 000000000000..f92b6cea9d9c --- /dev/null +++ b/drivers/md/dm-queue-length.c | |||
@@ -0,0 +1,263 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004-2005 IBM Corp. All Rights Reserved. | ||
3 | * Copyright (C) 2006-2009 NEC Corporation. | ||
4 | * | ||
5 | * dm-queue-length.c | ||
6 | * | ||
7 | * Module Author: Stefan Bader, IBM | ||
8 | * Modified by: Kiyoshi Ueda, NEC | ||
9 | * | ||
10 | * This file is released under the GPL. | ||
11 | * | ||
12 | * queue-length path selector - choose a path with the least number of | ||
13 | * in-flight I/Os. | ||
14 | */ | ||
15 | |||
16 | #include "dm.h" | ||
17 | #include "dm-path-selector.h" | ||
18 | |||
19 | #include <linux/slab.h> | ||
20 | #include <linux/ctype.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <asm/atomic.h> | ||
24 | |||
25 | #define DM_MSG_PREFIX "multipath queue-length" | ||
26 | #define QL_MIN_IO 128 | ||
27 | #define QL_VERSION "0.1.0" | ||
28 | |||
29 | struct selector { | ||
30 | struct list_head valid_paths; | ||
31 | struct list_head failed_paths; | ||
32 | }; | ||
33 | |||
34 | struct path_info { | ||
35 | struct list_head list; | ||
36 | struct dm_path *path; | ||
37 | unsigned repeat_count; | ||
38 | atomic_t qlen; /* the number of in-flight I/Os */ | ||
39 | }; | ||
40 | |||
41 | static struct selector *alloc_selector(void) | ||
42 | { | ||
43 | struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
44 | |||
45 | if (s) { | ||
46 | INIT_LIST_HEAD(&s->valid_paths); | ||
47 | INIT_LIST_HEAD(&s->failed_paths); | ||
48 | } | ||
49 | |||
50 | return s; | ||
51 | } | ||
52 | |||
53 | static int ql_create(struct path_selector *ps, unsigned argc, char **argv) | ||
54 | { | ||
55 | struct selector *s = alloc_selector(); | ||
56 | |||
57 | if (!s) | ||
58 | return -ENOMEM; | ||
59 | |||
60 | ps->context = s; | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static void ql_free_paths(struct list_head *paths) | ||
65 | { | ||
66 | struct path_info *pi, *next; | ||
67 | |||
68 | list_for_each_entry_safe(pi, next, paths, list) { | ||
69 | list_del(&pi->list); | ||
70 | kfree(pi); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | static void ql_destroy(struct path_selector *ps) | ||
75 | { | ||
76 | struct selector *s = ps->context; | ||
77 | |||
78 | ql_free_paths(&s->valid_paths); | ||
79 | ql_free_paths(&s->failed_paths); | ||
80 | kfree(s); | ||
81 | ps->context = NULL; | ||
82 | } | ||
83 | |||
84 | static int ql_status(struct path_selector *ps, struct dm_path *path, | ||
85 | status_type_t type, char *result, unsigned maxlen) | ||
86 | { | ||
87 | unsigned sz = 0; | ||
88 | struct path_info *pi; | ||
89 | |||
90 | /* When called with NULL path, return selector status/args. */ | ||
91 | if (!path) | ||
92 | DMEMIT("0 "); | ||
93 | else { | ||
94 | pi = path->pscontext; | ||
95 | |||
96 | switch (type) { | ||
97 | case STATUSTYPE_INFO: | ||
98 | DMEMIT("%d ", atomic_read(&pi->qlen)); | ||
99 | break; | ||
100 | case STATUSTYPE_TABLE: | ||
101 | DMEMIT("%u ", pi->repeat_count); | ||
102 | break; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | return sz; | ||
107 | } | ||
108 | |||
109 | static int ql_add_path(struct path_selector *ps, struct dm_path *path, | ||
110 | int argc, char **argv, char **error) | ||
111 | { | ||
112 | struct selector *s = ps->context; | ||
113 | struct path_info *pi; | ||
114 | unsigned repeat_count = QL_MIN_IO; | ||
115 | |||
116 | /* | ||
117 | * Arguments: [<repeat_count>] | ||
118 | * <repeat_count>: The number of I/Os before switching path. | ||
119 | * If not given, default (QL_MIN_IO) is used. | ||
120 | */ | ||
121 | if (argc > 1) { | ||
122 | *error = "queue-length ps: incorrect number of arguments"; | ||
123 | return -EINVAL; | ||
124 | } | ||
125 | |||
126 | if ((argc == 1) && (sscanf(argv[0], "%u", &repeat_count) != 1)) { | ||
127 | *error = "queue-length ps: invalid repeat count"; | ||
128 | return -EINVAL; | ||
129 | } | ||
130 | |||
131 | /* Allocate the path information structure */ | ||
132 | pi = kmalloc(sizeof(*pi), GFP_KERNEL); | ||
133 | if (!pi) { | ||
134 | *error = "queue-length ps: Error allocating path information"; | ||
135 | return -ENOMEM; | ||
136 | } | ||
137 | |||
138 | pi->path = path; | ||
139 | pi->repeat_count = repeat_count; | ||
140 | atomic_set(&pi->qlen, 0); | ||
141 | |||
142 | path->pscontext = pi; | ||
143 | |||
144 | list_add_tail(&pi->list, &s->valid_paths); | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static void ql_fail_path(struct path_selector *ps, struct dm_path *path) | ||
150 | { | ||
151 | struct selector *s = ps->context; | ||
152 | struct path_info *pi = path->pscontext; | ||
153 | |||
154 | list_move(&pi->list, &s->failed_paths); | ||
155 | } | ||
156 | |||
157 | static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path) | ||
158 | { | ||
159 | struct selector *s = ps->context; | ||
160 | struct path_info *pi = path->pscontext; | ||
161 | |||
162 | list_move_tail(&pi->list, &s->valid_paths); | ||
163 | |||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Select a path having the minimum number of in-flight I/Os | ||
169 | */ | ||
170 | static struct dm_path *ql_select_path(struct path_selector *ps, | ||
171 | unsigned *repeat_count, size_t nr_bytes) | ||
172 | { | ||
173 | struct selector *s = ps->context; | ||
174 | struct path_info *pi = NULL, *best = NULL; | ||
175 | |||
176 | if (list_empty(&s->valid_paths)) | ||
177 | return NULL; | ||
178 | |||
179 | /* Change preferred (first in list) path to evenly balance. */ | ||
180 | list_move_tail(s->valid_paths.next, &s->valid_paths); | ||
181 | |||
182 | list_for_each_entry(pi, &s->valid_paths, list) { | ||
183 | if (!best || | ||
184 | (atomic_read(&pi->qlen) < atomic_read(&best->qlen))) | ||
185 | best = pi; | ||
186 | |||
187 | if (!atomic_read(&best->qlen)) | ||
188 | break; | ||
189 | } | ||
190 | |||
191 | if (!best) | ||
192 | return NULL; | ||
193 | |||
194 | *repeat_count = best->repeat_count; | ||
195 | |||
196 | return best->path; | ||
197 | } | ||
198 | |||
199 | static int ql_start_io(struct path_selector *ps, struct dm_path *path, | ||
200 | size_t nr_bytes) | ||
201 | { | ||
202 | struct path_info *pi = path->pscontext; | ||
203 | |||
204 | atomic_inc(&pi->qlen); | ||
205 | |||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int ql_end_io(struct path_selector *ps, struct dm_path *path, | ||
210 | size_t nr_bytes) | ||
211 | { | ||
212 | struct path_info *pi = path->pscontext; | ||
213 | |||
214 | atomic_dec(&pi->qlen); | ||
215 | |||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | static struct path_selector_type ql_ps = { | ||
220 | .name = "queue-length", | ||
221 | .module = THIS_MODULE, | ||
222 | .table_args = 1, | ||
223 | .info_args = 1, | ||
224 | .create = ql_create, | ||
225 | .destroy = ql_destroy, | ||
226 | .status = ql_status, | ||
227 | .add_path = ql_add_path, | ||
228 | .fail_path = ql_fail_path, | ||
229 | .reinstate_path = ql_reinstate_path, | ||
230 | .select_path = ql_select_path, | ||
231 | .start_io = ql_start_io, | ||
232 | .end_io = ql_end_io, | ||
233 | }; | ||
234 | |||
235 | static int __init dm_ql_init(void) | ||
236 | { | ||
237 | int r = dm_register_path_selector(&ql_ps); | ||
238 | |||
239 | if (r < 0) | ||
240 | DMERR("register failed %d", r); | ||
241 | |||
242 | DMINFO("version " QL_VERSION " loaded"); | ||
243 | |||
244 | return r; | ||
245 | } | ||
246 | |||
247 | static void __exit dm_ql_exit(void) | ||
248 | { | ||
249 | int r = dm_unregister_path_selector(&ql_ps); | ||
250 | |||
251 | if (r < 0) | ||
252 | DMERR("unregister failed %d", r); | ||
253 | } | ||
254 | |||
255 | module_init(dm_ql_init); | ||
256 | module_exit(dm_ql_exit); | ||
257 | |||
258 | MODULE_AUTHOR("Stefan Bader <Stefan.Bader at de.ibm.com>"); | ||
259 | MODULE_DESCRIPTION( | ||
260 | "(C) Copyright IBM Corp. 2004,2005 All Rights Reserved.\n" | ||
261 | DM_NAME " path selector to balance the number of in-flight I/Os" | ||
262 | ); | ||
263 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 076fbb4e967a..ce8868c768cc 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -1283,9 +1283,23 @@ static int mirror_status(struct dm_target *ti, status_type_t type, | |||
1283 | return 0; | 1283 | return 0; |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | static int mirror_iterate_devices(struct dm_target *ti, | ||
1287 | iterate_devices_callout_fn fn, void *data) | ||
1288 | { | ||
1289 | struct mirror_set *ms = ti->private; | ||
1290 | int ret = 0; | ||
1291 | unsigned i; | ||
1292 | |||
1293 | for (i = 0; !ret && i < ms->nr_mirrors; i++) | ||
1294 | ret = fn(ti, ms->mirror[i].dev, | ||
1295 | ms->mirror[i].offset, data); | ||
1296 | |||
1297 | return ret; | ||
1298 | } | ||
1299 | |||
1286 | static struct target_type mirror_target = { | 1300 | static struct target_type mirror_target = { |
1287 | .name = "mirror", | 1301 | .name = "mirror", |
1288 | .version = {1, 0, 20}, | 1302 | .version = {1, 12, 0}, |
1289 | .module = THIS_MODULE, | 1303 | .module = THIS_MODULE, |
1290 | .ctr = mirror_ctr, | 1304 | .ctr = mirror_ctr, |
1291 | .dtr = mirror_dtr, | 1305 | .dtr = mirror_dtr, |
@@ -1295,6 +1309,7 @@ static struct target_type mirror_target = { | |||
1295 | .postsuspend = mirror_postsuspend, | 1309 | .postsuspend = mirror_postsuspend, |
1296 | .resume = mirror_resume, | 1310 | .resume = mirror_resume, |
1297 | .status = mirror_status, | 1311 | .status = mirror_status, |
1312 | .iterate_devices = mirror_iterate_devices, | ||
1298 | }; | 1313 | }; |
1299 | 1314 | ||
1300 | static int __init dm_mirror_init(void) | 1315 | static int __init dm_mirror_init(void) |
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 7b899be0b087..36dbe29f2fd6 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c | |||
@@ -283,7 +283,7 @@ static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region) | |||
283 | 283 | ||
284 | nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); | 284 | nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); |
285 | if (unlikely(!nreg)) | 285 | if (unlikely(!nreg)) |
286 | nreg = kmalloc(sizeof(*nreg), GFP_NOIO); | 286 | nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL); |
287 | 287 | ||
288 | nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? | 288 | nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? |
289 | DM_RH_CLEAN : DM_RH_NOSYNC; | 289 | DM_RH_CLEAN : DM_RH_NOSYNC; |
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c index cdfbf65b28cb..24752f449bef 100644 --- a/drivers/md/dm-round-robin.c +++ b/drivers/md/dm-round-robin.c | |||
@@ -161,7 +161,7 @@ static int rr_reinstate_path(struct path_selector *ps, struct dm_path *p) | |||
161 | } | 161 | } |
162 | 162 | ||
163 | static struct dm_path *rr_select_path(struct path_selector *ps, | 163 | static struct dm_path *rr_select_path(struct path_selector *ps, |
164 | unsigned *repeat_count) | 164 | unsigned *repeat_count, size_t nr_bytes) |
165 | { | 165 | { |
166 | struct selector *s = (struct selector *) ps->context; | 166 | struct selector *s = (struct selector *) ps->context; |
167 | struct path_info *pi = NULL; | 167 | struct path_info *pi = NULL; |
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c new file mode 100644 index 000000000000..cfa668f46c40 --- /dev/null +++ b/drivers/md/dm-service-time.c | |||
@@ -0,0 +1,339 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007-2009 NEC Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * Module Author: Kiyoshi Ueda | ||
5 | * | ||
6 | * This file is released under the GPL. | ||
7 | * | ||
8 | * Throughput oriented path selector. | ||
9 | */ | ||
10 | |||
11 | #include "dm.h" | ||
12 | #include "dm-path-selector.h" | ||
13 | |||
14 | #define DM_MSG_PREFIX "multipath service-time" | ||
15 | #define ST_MIN_IO 1 | ||
16 | #define ST_MAX_RELATIVE_THROUGHPUT 100 | ||
17 | #define ST_MAX_RELATIVE_THROUGHPUT_SHIFT 7 | ||
18 | #define ST_MAX_INFLIGHT_SIZE ((size_t)-1 >> ST_MAX_RELATIVE_THROUGHPUT_SHIFT) | ||
19 | #define ST_VERSION "0.2.0" | ||
20 | |||
21 | struct selector { | ||
22 | struct list_head valid_paths; | ||
23 | struct list_head failed_paths; | ||
24 | }; | ||
25 | |||
26 | struct path_info { | ||
27 | struct list_head list; | ||
28 | struct dm_path *path; | ||
29 | unsigned repeat_count; | ||
30 | unsigned relative_throughput; | ||
31 | atomic_t in_flight_size; /* Total size of in-flight I/Os */ | ||
32 | }; | ||
33 | |||
34 | static struct selector *alloc_selector(void) | ||
35 | { | ||
36 | struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
37 | |||
38 | if (s) { | ||
39 | INIT_LIST_HEAD(&s->valid_paths); | ||
40 | INIT_LIST_HEAD(&s->failed_paths); | ||
41 | } | ||
42 | |||
43 | return s; | ||
44 | } | ||
45 | |||
46 | static int st_create(struct path_selector *ps, unsigned argc, char **argv) | ||
47 | { | ||
48 | struct selector *s = alloc_selector(); | ||
49 | |||
50 | if (!s) | ||
51 | return -ENOMEM; | ||
52 | |||
53 | ps->context = s; | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static void free_paths(struct list_head *paths) | ||
58 | { | ||
59 | struct path_info *pi, *next; | ||
60 | |||
61 | list_for_each_entry_safe(pi, next, paths, list) { | ||
62 | list_del(&pi->list); | ||
63 | kfree(pi); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | static void st_destroy(struct path_selector *ps) | ||
68 | { | ||
69 | struct selector *s = ps->context; | ||
70 | |||
71 | free_paths(&s->valid_paths); | ||
72 | free_paths(&s->failed_paths); | ||
73 | kfree(s); | ||
74 | ps->context = NULL; | ||
75 | } | ||
76 | |||
77 | static int st_status(struct path_selector *ps, struct dm_path *path, | ||
78 | status_type_t type, char *result, unsigned maxlen) | ||
79 | { | ||
80 | unsigned sz = 0; | ||
81 | struct path_info *pi; | ||
82 | |||
83 | if (!path) | ||
84 | DMEMIT("0 "); | ||
85 | else { | ||
86 | pi = path->pscontext; | ||
87 | |||
88 | switch (type) { | ||
89 | case STATUSTYPE_INFO: | ||
90 | DMEMIT("%d %u ", atomic_read(&pi->in_flight_size), | ||
91 | pi->relative_throughput); | ||
92 | break; | ||
93 | case STATUSTYPE_TABLE: | ||
94 | DMEMIT("%u %u ", pi->repeat_count, | ||
95 | pi->relative_throughput); | ||
96 | break; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | return sz; | ||
101 | } | ||
102 | |||
103 | static int st_add_path(struct path_selector *ps, struct dm_path *path, | ||
104 | int argc, char **argv, char **error) | ||
105 | { | ||
106 | struct selector *s = ps->context; | ||
107 | struct path_info *pi; | ||
108 | unsigned repeat_count = ST_MIN_IO; | ||
109 | unsigned relative_throughput = 1; | ||
110 | |||
111 | /* | ||
112 | * Arguments: [<repeat_count> [<relative_throughput>]] | ||
113 | * <repeat_count>: The number of I/Os before switching path. | ||
114 | * If not given, default (ST_MIN_IO) is used. | ||
115 | * <relative_throughput>: The relative throughput value of | ||
116 | * the path among all paths in the path-group. | ||
117 | * The valid range: 0-<ST_MAX_RELATIVE_THROUGHPUT> | ||
118 | * If not given, minimum value '1' is used. | ||
119 | * If '0' is given, the path isn't selected while | ||
120 | * other paths having a positive value are | ||
121 | * available. | ||
122 | */ | ||
123 | if (argc > 2) { | ||
124 | *error = "service-time ps: incorrect number of arguments"; | ||
125 | return -EINVAL; | ||
126 | } | ||
127 | |||
128 | if (argc && (sscanf(argv[0], "%u", &repeat_count) != 1)) { | ||
129 | *error = "service-time ps: invalid repeat count"; | ||
130 | return -EINVAL; | ||
131 | } | ||
132 | |||
133 | if ((argc == 2) && | ||
134 | (sscanf(argv[1], "%u", &relative_throughput) != 1 || | ||
135 | relative_throughput > ST_MAX_RELATIVE_THROUGHPUT)) { | ||
136 | *error = "service-time ps: invalid relative_throughput value"; | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | |||
140 | /* allocate the path */ | ||
141 | pi = kmalloc(sizeof(*pi), GFP_KERNEL); | ||
142 | if (!pi) { | ||
143 | *error = "service-time ps: Error allocating path context"; | ||
144 | return -ENOMEM; | ||
145 | } | ||
146 | |||
147 | pi->path = path; | ||
148 | pi->repeat_count = repeat_count; | ||
149 | pi->relative_throughput = relative_throughput; | ||
150 | atomic_set(&pi->in_flight_size, 0); | ||
151 | |||
152 | path->pscontext = pi; | ||
153 | |||
154 | list_add_tail(&pi->list, &s->valid_paths); | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static void st_fail_path(struct path_selector *ps, struct dm_path *path) | ||
160 | { | ||
161 | struct selector *s = ps->context; | ||
162 | struct path_info *pi = path->pscontext; | ||
163 | |||
164 | list_move(&pi->list, &s->failed_paths); | ||
165 | } | ||
166 | |||
167 | static int st_reinstate_path(struct path_selector *ps, struct dm_path *path) | ||
168 | { | ||
169 | struct selector *s = ps->context; | ||
170 | struct path_info *pi = path->pscontext; | ||
171 | |||
172 | list_move_tail(&pi->list, &s->valid_paths); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Compare the estimated service time of 2 paths, pi1 and pi2, | ||
179 | * for the incoming I/O. | ||
180 | * | ||
181 | * Returns: | ||
182 | * < 0 : pi1 is better | ||
183 | * 0 : no difference between pi1 and pi2 | ||
184 | * > 0 : pi2 is better | ||
185 | * | ||
186 | * Description: | ||
187 | * Basically, the service time is estimated by: | ||
188 | * ('pi->in-flight-size' + 'incoming') / 'pi->relative_throughput' | ||
189 | * To reduce the calculation, some optimizations are made. | ||
190 | * (See comments inline) | ||
191 | */ | ||
192 | static int st_compare_load(struct path_info *pi1, struct path_info *pi2, | ||
193 | size_t incoming) | ||
194 | { | ||
195 | size_t sz1, sz2, st1, st2; | ||
196 | |||
197 | sz1 = atomic_read(&pi1->in_flight_size); | ||
198 | sz2 = atomic_read(&pi2->in_flight_size); | ||
199 | |||
200 | /* | ||
201 | * Case 1: Both have same throughput value. Choose less loaded path. | ||
202 | */ | ||
203 | if (pi1->relative_throughput == pi2->relative_throughput) | ||
204 | return sz1 - sz2; | ||
205 | |||
206 | /* | ||
207 | * Case 2a: Both have same load. Choose higher throughput path. | ||
208 | * Case 2b: One path has no throughput value. Choose the other one. | ||
209 | */ | ||
210 | if (sz1 == sz2 || | ||
211 | !pi1->relative_throughput || !pi2->relative_throughput) | ||
212 | return pi2->relative_throughput - pi1->relative_throughput; | ||
213 | |||
214 | /* | ||
215 | * Case 3: Calculate service time. Choose faster path. | ||
216 | * Service time using pi1: | ||
217 | * st1 = (sz1 + incoming) / pi1->relative_throughput | ||
218 | * Service time using pi2: | ||
219 | * st2 = (sz2 + incoming) / pi2->relative_throughput | ||
220 | * | ||
221 | * To avoid the division, transform the expression to use | ||
222 | * multiplication. | ||
223 | * Because ->relative_throughput > 0 here, if st1 < st2, | ||
224 | * the expressions below are the same meaning: | ||
225 | * (sz1 + incoming) / pi1->relative_throughput < | ||
226 | * (sz2 + incoming) / pi2->relative_throughput | ||
227 | * (sz1 + incoming) * pi2->relative_throughput < | ||
228 | * (sz2 + incoming) * pi1->relative_throughput | ||
229 | * So use the later one. | ||
230 | */ | ||
231 | sz1 += incoming; | ||
232 | sz2 += incoming; | ||
233 | if (unlikely(sz1 >= ST_MAX_INFLIGHT_SIZE || | ||
234 | sz2 >= ST_MAX_INFLIGHT_SIZE)) { | ||
235 | /* | ||
236 | * Size may be too big for multiplying pi->relative_throughput | ||
237 | * and overflow. | ||
238 | * To avoid the overflow and mis-selection, shift down both. | ||
239 | */ | ||
240 | sz1 >>= ST_MAX_RELATIVE_THROUGHPUT_SHIFT; | ||
241 | sz2 >>= ST_MAX_RELATIVE_THROUGHPUT_SHIFT; | ||
242 | } | ||
243 | st1 = sz1 * pi2->relative_throughput; | ||
244 | st2 = sz2 * pi1->relative_throughput; | ||
245 | if (st1 != st2) | ||
246 | return st1 - st2; | ||
247 | |||
248 | /* | ||
249 | * Case 4: Service time is equal. Choose higher throughput path. | ||
250 | */ | ||
251 | return pi2->relative_throughput - pi1->relative_throughput; | ||
252 | } | ||
253 | |||
254 | static struct dm_path *st_select_path(struct path_selector *ps, | ||
255 | unsigned *repeat_count, size_t nr_bytes) | ||
256 | { | ||
257 | struct selector *s = ps->context; | ||
258 | struct path_info *pi = NULL, *best = NULL; | ||
259 | |||
260 | if (list_empty(&s->valid_paths)) | ||
261 | return NULL; | ||
262 | |||
263 | /* Change preferred (first in list) path to evenly balance. */ | ||
264 | list_move_tail(s->valid_paths.next, &s->valid_paths); | ||
265 | |||
266 | list_for_each_entry(pi, &s->valid_paths, list) | ||
267 | if (!best || (st_compare_load(pi, best, nr_bytes) < 0)) | ||
268 | best = pi; | ||
269 | |||
270 | if (!best) | ||
271 | return NULL; | ||
272 | |||
273 | *repeat_count = best->repeat_count; | ||
274 | |||
275 | return best->path; | ||
276 | } | ||
277 | |||
278 | static int st_start_io(struct path_selector *ps, struct dm_path *path, | ||
279 | size_t nr_bytes) | ||
280 | { | ||
281 | struct path_info *pi = path->pscontext; | ||
282 | |||
283 | atomic_add(nr_bytes, &pi->in_flight_size); | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | static int st_end_io(struct path_selector *ps, struct dm_path *path, | ||
289 | size_t nr_bytes) | ||
290 | { | ||
291 | struct path_info *pi = path->pscontext; | ||
292 | |||
293 | atomic_sub(nr_bytes, &pi->in_flight_size); | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static struct path_selector_type st_ps = { | ||
299 | .name = "service-time", | ||
300 | .module = THIS_MODULE, | ||
301 | .table_args = 2, | ||
302 | .info_args = 2, | ||
303 | .create = st_create, | ||
304 | .destroy = st_destroy, | ||
305 | .status = st_status, | ||
306 | .add_path = st_add_path, | ||
307 | .fail_path = st_fail_path, | ||
308 | .reinstate_path = st_reinstate_path, | ||
309 | .select_path = st_select_path, | ||
310 | .start_io = st_start_io, | ||
311 | .end_io = st_end_io, | ||
312 | }; | ||
313 | |||
314 | static int __init dm_st_init(void) | ||
315 | { | ||
316 | int r = dm_register_path_selector(&st_ps); | ||
317 | |||
318 | if (r < 0) | ||
319 | DMERR("register failed %d", r); | ||
320 | |||
321 | DMINFO("version " ST_VERSION " loaded"); | ||
322 | |||
323 | return r; | ||
324 | } | ||
325 | |||
326 | static void __exit dm_st_exit(void) | ||
327 | { | ||
328 | int r = dm_unregister_path_selector(&st_ps); | ||
329 | |||
330 | if (r < 0) | ||
331 | DMERR("unregister failed %d", r); | ||
332 | } | ||
333 | |||
334 | module_init(dm_st_init); | ||
335 | module_exit(dm_st_exit); | ||
336 | |||
337 | MODULE_DESCRIPTION(DM_NAME " throughput oriented path selector"); | ||
338 | MODULE_AUTHOR("Kiyoshi Ueda <k-ueda@ct.jp.nec.com>"); | ||
339 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 2662a41337e7..6e3fe4f14934 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -636,7 +636,7 @@ static void persistent_commit_exception(struct dm_exception_store *store, | |||
636 | /* | 636 | /* |
637 | * Commit exceptions to disk. | 637 | * Commit exceptions to disk. |
638 | */ | 638 | */ |
639 | if (ps->valid && area_io(ps, WRITE)) | 639 | if (ps->valid && area_io(ps, WRITE_BARRIER)) |
640 | ps->valid = 0; | 640 | ps->valid = 0; |
641 | 641 | ||
642 | /* | 642 | /* |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index d73f17fc7778..d573165cd2b7 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -678,6 +678,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
678 | 678 | ||
679 | ti->private = s; | 679 | ti->private = s; |
680 | ti->split_io = s->store->chunk_size; | 680 | ti->split_io = s->store->chunk_size; |
681 | ti->num_flush_requests = 1; | ||
681 | 682 | ||
682 | return 0; | 683 | return 0; |
683 | 684 | ||
@@ -1030,6 +1031,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
1030 | chunk_t chunk; | 1031 | chunk_t chunk; |
1031 | struct dm_snap_pending_exception *pe = NULL; | 1032 | struct dm_snap_pending_exception *pe = NULL; |
1032 | 1033 | ||
1034 | if (unlikely(bio_empty_barrier(bio))) { | ||
1035 | bio->bi_bdev = s->store->cow->bdev; | ||
1036 | return DM_MAPIO_REMAPPED; | ||
1037 | } | ||
1038 | |||
1033 | chunk = sector_to_chunk(s->store, bio->bi_sector); | 1039 | chunk = sector_to_chunk(s->store, bio->bi_sector); |
1034 | 1040 | ||
1035 | /* Full snapshots are not usable */ | 1041 | /* Full snapshots are not usable */ |
@@ -1338,6 +1344,8 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1338 | } | 1344 | } |
1339 | 1345 | ||
1340 | ti->private = dev; | 1346 | ti->private = dev; |
1347 | ti->num_flush_requests = 1; | ||
1348 | |||
1341 | return 0; | 1349 | return 0; |
1342 | } | 1350 | } |
1343 | 1351 | ||
@@ -1353,6 +1361,9 @@ static int origin_map(struct dm_target *ti, struct bio *bio, | |||
1353 | struct dm_dev *dev = ti->private; | 1361 | struct dm_dev *dev = ti->private; |
1354 | bio->bi_bdev = dev->bdev; | 1362 | bio->bi_bdev = dev->bdev; |
1355 | 1363 | ||
1364 | if (unlikely(bio_empty_barrier(bio))) | ||
1365 | return DM_MAPIO_REMAPPED; | ||
1366 | |||
1356 | /* Only tell snapshots if this is a write */ | 1367 | /* Only tell snapshots if this is a write */ |
1357 | return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; | 1368 | return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; |
1358 | } | 1369 | } |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 41569bc60abc..b240e85ae39a 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -167,6 +167,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
167 | sc->stripes = stripes; | 167 | sc->stripes = stripes; |
168 | sc->stripe_width = width; | 168 | sc->stripe_width = width; |
169 | ti->split_io = chunk_size; | 169 | ti->split_io = chunk_size; |
170 | ti->num_flush_requests = stripes; | ||
170 | 171 | ||
171 | sc->chunk_mask = ((sector_t) chunk_size) - 1; | 172 | sc->chunk_mask = ((sector_t) chunk_size) - 1; |
172 | for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++) | 173 | for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++) |
@@ -211,10 +212,18 @@ static int stripe_map(struct dm_target *ti, struct bio *bio, | |||
211 | union map_info *map_context) | 212 | union map_info *map_context) |
212 | { | 213 | { |
213 | struct stripe_c *sc = (struct stripe_c *) ti->private; | 214 | struct stripe_c *sc = (struct stripe_c *) ti->private; |
215 | sector_t offset, chunk; | ||
216 | uint32_t stripe; | ||
214 | 217 | ||
215 | sector_t offset = bio->bi_sector - ti->begin; | 218 | if (unlikely(bio_empty_barrier(bio))) { |
216 | sector_t chunk = offset >> sc->chunk_shift; | 219 | BUG_ON(map_context->flush_request >= sc->stripes); |
217 | uint32_t stripe = sector_div(chunk, sc->stripes); | 220 | bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev; |
221 | return DM_MAPIO_REMAPPED; | ||
222 | } | ||
223 | |||
224 | offset = bio->bi_sector - ti->begin; | ||
225 | chunk = offset >> sc->chunk_shift; | ||
226 | stripe = sector_div(chunk, sc->stripes); | ||
218 | 227 | ||
219 | bio->bi_bdev = sc->stripe[stripe].dev->bdev; | 228 | bio->bi_bdev = sc->stripe[stripe].dev->bdev; |
220 | bio->bi_sector = sc->stripe[stripe].physical_start + | 229 | bio->bi_sector = sc->stripe[stripe].physical_start + |
@@ -304,15 +313,31 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, | |||
304 | return error; | 313 | return error; |
305 | } | 314 | } |
306 | 315 | ||
316 | static int stripe_iterate_devices(struct dm_target *ti, | ||
317 | iterate_devices_callout_fn fn, void *data) | ||
318 | { | ||
319 | struct stripe_c *sc = ti->private; | ||
320 | int ret = 0; | ||
321 | unsigned i = 0; | ||
322 | |||
323 | do | ||
324 | ret = fn(ti, sc->stripe[i].dev, | ||
325 | sc->stripe[i].physical_start, data); | ||
326 | while (!ret && ++i < sc->stripes); | ||
327 | |||
328 | return ret; | ||
329 | } | ||
330 | |||
307 | static struct target_type stripe_target = { | 331 | static struct target_type stripe_target = { |
308 | .name = "striped", | 332 | .name = "striped", |
309 | .version = {1, 1, 0}, | 333 | .version = {1, 2, 0}, |
310 | .module = THIS_MODULE, | 334 | .module = THIS_MODULE, |
311 | .ctr = stripe_ctr, | 335 | .ctr = stripe_ctr, |
312 | .dtr = stripe_dtr, | 336 | .dtr = stripe_dtr, |
313 | .map = stripe_map, | 337 | .map = stripe_map, |
314 | .end_io = stripe_end_io, | 338 | .end_io = stripe_end_io, |
315 | .status = stripe_status, | 339 | .status = stripe_status, |
340 | .iterate_devices = stripe_iterate_devices, | ||
316 | }; | 341 | }; |
317 | 342 | ||
318 | int __init dm_stripe_init(void) | 343 | int __init dm_stripe_init(void) |
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c index a2a45e6c7c8b..4b045903a4e2 100644 --- a/drivers/md/dm-sysfs.c +++ b/drivers/md/dm-sysfs.c | |||
@@ -57,12 +57,21 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf) | |||
57 | return strlen(buf); | 57 | return strlen(buf); |
58 | } | 58 | } |
59 | 59 | ||
60 | static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) | ||
61 | { | ||
62 | sprintf(buf, "%d\n", dm_suspended(md)); | ||
63 | |||
64 | return strlen(buf); | ||
65 | } | ||
66 | |||
60 | static DM_ATTR_RO(name); | 67 | static DM_ATTR_RO(name); |
61 | static DM_ATTR_RO(uuid); | 68 | static DM_ATTR_RO(uuid); |
69 | static DM_ATTR_RO(suspended); | ||
62 | 70 | ||
63 | static struct attribute *dm_attrs[] = { | 71 | static struct attribute *dm_attrs[] = { |
64 | &dm_attr_name.attr, | 72 | &dm_attr_name.attr, |
65 | &dm_attr_uuid.attr, | 73 | &dm_attr_uuid.attr, |
74 | &dm_attr_suspended.attr, | ||
66 | NULL, | 75 | NULL, |
67 | }; | 76 | }; |
68 | 77 | ||
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e9a73bb242b0..4899ebe767c8 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -41,6 +41,7 @@ | |||
41 | struct dm_table { | 41 | struct dm_table { |
42 | struct mapped_device *md; | 42 | struct mapped_device *md; |
43 | atomic_t holders; | 43 | atomic_t holders; |
44 | unsigned type; | ||
44 | 45 | ||
45 | /* btree table */ | 46 | /* btree table */ |
46 | unsigned int depth; | 47 | unsigned int depth; |
@@ -62,15 +63,11 @@ struct dm_table { | |||
62 | /* a list of devices used by this table */ | 63 | /* a list of devices used by this table */ |
63 | struct list_head devices; | 64 | struct list_head devices; |
64 | 65 | ||
65 | /* | ||
66 | * These are optimistic limits taken from all the | ||
67 | * targets, some targets will need smaller limits. | ||
68 | */ | ||
69 | struct io_restrictions limits; | ||
70 | |||
71 | /* events get handed up using this callback */ | 66 | /* events get handed up using this callback */ |
72 | void (*event_fn)(void *); | 67 | void (*event_fn)(void *); |
73 | void *event_context; | 68 | void *event_context; |
69 | |||
70 | struct dm_md_mempools *mempools; | ||
74 | }; | 71 | }; |
75 | 72 | ||
76 | /* | 73 | /* |
@@ -89,43 +86,6 @@ static unsigned int int_log(unsigned int n, unsigned int base) | |||
89 | } | 86 | } |
90 | 87 | ||
91 | /* | 88 | /* |
92 | * Returns the minimum that is _not_ zero, unless both are zero. | ||
93 | */ | ||
94 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | ||
95 | |||
96 | /* | ||
97 | * Combine two io_restrictions, always taking the lower value. | ||
98 | */ | ||
99 | static void combine_restrictions_low(struct io_restrictions *lhs, | ||
100 | struct io_restrictions *rhs) | ||
101 | { | ||
102 | lhs->max_sectors = | ||
103 | min_not_zero(lhs->max_sectors, rhs->max_sectors); | ||
104 | |||
105 | lhs->max_phys_segments = | ||
106 | min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments); | ||
107 | |||
108 | lhs->max_hw_segments = | ||
109 | min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); | ||
110 | |||
111 | lhs->logical_block_size = max(lhs->logical_block_size, | ||
112 | rhs->logical_block_size); | ||
113 | |||
114 | lhs->max_segment_size = | ||
115 | min_not_zero(lhs->max_segment_size, rhs->max_segment_size); | ||
116 | |||
117 | lhs->max_hw_sectors = | ||
118 | min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); | ||
119 | |||
120 | lhs->seg_boundary_mask = | ||
121 | min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); | ||
122 | |||
123 | lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn); | ||
124 | |||
125 | lhs->no_cluster |= rhs->no_cluster; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Calculate the index of the child node of the n'th node k'th key. | 89 | * Calculate the index of the child node of the n'th node k'th key. |
130 | */ | 90 | */ |
131 | static inline unsigned int get_child(unsigned int n, unsigned int k) | 91 | static inline unsigned int get_child(unsigned int n, unsigned int k) |
@@ -267,6 +227,8 @@ static void free_devices(struct list_head *devices) | |||
267 | list_for_each_safe(tmp, next, devices) { | 227 | list_for_each_safe(tmp, next, devices) { |
268 | struct dm_dev_internal *dd = | 228 | struct dm_dev_internal *dd = |
269 | list_entry(tmp, struct dm_dev_internal, list); | 229 | list_entry(tmp, struct dm_dev_internal, list); |
230 | DMWARN("dm_table_destroy: dm_put_device call missing for %s", | ||
231 | dd->dm_dev.name); | ||
270 | kfree(dd); | 232 | kfree(dd); |
271 | } | 233 | } |
272 | } | 234 | } |
@@ -296,12 +258,10 @@ void dm_table_destroy(struct dm_table *t) | |||
296 | vfree(t->highs); | 258 | vfree(t->highs); |
297 | 259 | ||
298 | /* free the device list */ | 260 | /* free the device list */ |
299 | if (t->devices.next != &t->devices) { | 261 | if (t->devices.next != &t->devices) |
300 | DMWARN("devices still present during destroy: " | ||
301 | "dm_table_remove_device calls missing"); | ||
302 | |||
303 | free_devices(&t->devices); | 262 | free_devices(&t->devices); |
304 | } | 263 | |
264 | dm_free_md_mempools(t->mempools); | ||
305 | 265 | ||
306 | kfree(t); | 266 | kfree(t); |
307 | } | 267 | } |
@@ -385,15 +345,48 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) | |||
385 | /* | 345 | /* |
386 | * If possible, this checks an area of a destination device is valid. | 346 | * If possible, this checks an area of a destination device is valid. |
387 | */ | 347 | */ |
388 | static int check_device_area(struct dm_dev_internal *dd, sector_t start, | 348 | static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, |
389 | sector_t len) | 349 | sector_t start, void *data) |
390 | { | 350 | { |
391 | sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT; | 351 | struct queue_limits *limits = data; |
352 | struct block_device *bdev = dev->bdev; | ||
353 | sector_t dev_size = | ||
354 | i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; | ||
355 | unsigned short logical_block_size_sectors = | ||
356 | limits->logical_block_size >> SECTOR_SHIFT; | ||
357 | char b[BDEVNAME_SIZE]; | ||
392 | 358 | ||
393 | if (!dev_size) | 359 | if (!dev_size) |
394 | return 1; | 360 | return 1; |
395 | 361 | ||
396 | return ((start < dev_size) && (len <= (dev_size - start))); | 362 | if ((start >= dev_size) || (start + ti->len > dev_size)) { |
363 | DMWARN("%s: %s too small for target", | ||
364 | dm_device_name(ti->table->md), bdevname(bdev, b)); | ||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | if (logical_block_size_sectors <= 1) | ||
369 | return 1; | ||
370 | |||
371 | if (start & (logical_block_size_sectors - 1)) { | ||
372 | DMWARN("%s: start=%llu not aligned to h/w " | ||
373 | "logical block size %hu of %s", | ||
374 | dm_device_name(ti->table->md), | ||
375 | (unsigned long long)start, | ||
376 | limits->logical_block_size, bdevname(bdev, b)); | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | if (ti->len & (logical_block_size_sectors - 1)) { | ||
381 | DMWARN("%s: len=%llu not aligned to h/w " | ||
382 | "logical block size %hu of %s", | ||
383 | dm_device_name(ti->table->md), | ||
384 | (unsigned long long)ti->len, | ||
385 | limits->logical_block_size, bdevname(bdev, b)); | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | return 1; | ||
397 | } | 390 | } |
398 | 391 | ||
399 | /* | 392 | /* |
@@ -479,38 +472,32 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
479 | } | 472 | } |
480 | atomic_inc(&dd->count); | 473 | atomic_inc(&dd->count); |
481 | 474 | ||
482 | if (!check_device_area(dd, start, len)) { | ||
483 | DMWARN("device %s too small for target", path); | ||
484 | dm_put_device(ti, &dd->dm_dev); | ||
485 | return -EINVAL; | ||
486 | } | ||
487 | |||
488 | *result = &dd->dm_dev; | 475 | *result = &dd->dm_dev; |
489 | |||
490 | return 0; | 476 | return 0; |
491 | } | 477 | } |
492 | 478 | ||
493 | void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | 479 | /* |
480 | * Returns the minimum that is _not_ zero, unless both are zero. | ||
481 | */ | ||
482 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | ||
483 | |||
484 | int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | ||
485 | sector_t start, void *data) | ||
494 | { | 486 | { |
487 | struct queue_limits *limits = data; | ||
488 | struct block_device *bdev = dev->bdev; | ||
495 | struct request_queue *q = bdev_get_queue(bdev); | 489 | struct request_queue *q = bdev_get_queue(bdev); |
496 | struct io_restrictions *rs = &ti->limits; | ||
497 | char b[BDEVNAME_SIZE]; | 490 | char b[BDEVNAME_SIZE]; |
498 | 491 | ||
499 | if (unlikely(!q)) { | 492 | if (unlikely(!q)) { |
500 | DMWARN("%s: Cannot set limits for nonexistent device %s", | 493 | DMWARN("%s: Cannot set limits for nonexistent device %s", |
501 | dm_device_name(ti->table->md), bdevname(bdev, b)); | 494 | dm_device_name(ti->table->md), bdevname(bdev, b)); |
502 | return; | 495 | return 0; |
503 | } | 496 | } |
504 | 497 | ||
505 | /* | 498 | if (blk_stack_limits(limits, &q->limits, start) < 0) |
506 | * Combine the device limits low. | 499 | DMWARN("%s: target device %s is misaligned", |
507 | * | 500 | dm_device_name(ti->table->md), bdevname(bdev, b)); |
508 | * FIXME: if we move an io_restriction struct | ||
509 | * into q this would just be a call to | ||
510 | * combine_restrictions_low() | ||
511 | */ | ||
512 | rs->max_sectors = | ||
513 | min_not_zero(rs->max_sectors, queue_max_sectors(q)); | ||
514 | 501 | ||
515 | /* | 502 | /* |
516 | * Check if merge fn is supported. | 503 | * Check if merge fn is supported. |
@@ -519,48 +506,21 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | |||
519 | */ | 506 | */ |
520 | 507 | ||
521 | if (q->merge_bvec_fn && !ti->type->merge) | 508 | if (q->merge_bvec_fn && !ti->type->merge) |
522 | rs->max_sectors = | 509 | limits->max_sectors = |
523 | min_not_zero(rs->max_sectors, | 510 | min_not_zero(limits->max_sectors, |
524 | (unsigned int) (PAGE_SIZE >> 9)); | 511 | (unsigned int) (PAGE_SIZE >> 9)); |
525 | 512 | return 0; | |
526 | rs->max_phys_segments = | ||
527 | min_not_zero(rs->max_phys_segments, | ||
528 | queue_max_phys_segments(q)); | ||
529 | |||
530 | rs->max_hw_segments = | ||
531 | min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q)); | ||
532 | |||
533 | rs->logical_block_size = max(rs->logical_block_size, | ||
534 | queue_logical_block_size(q)); | ||
535 | |||
536 | rs->max_segment_size = | ||
537 | min_not_zero(rs->max_segment_size, queue_max_segment_size(q)); | ||
538 | |||
539 | rs->max_hw_sectors = | ||
540 | min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q)); | ||
541 | |||
542 | rs->seg_boundary_mask = | ||
543 | min_not_zero(rs->seg_boundary_mask, | ||
544 | queue_segment_boundary(q)); | ||
545 | |||
546 | rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q)); | ||
547 | |||
548 | rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | ||
549 | } | 513 | } |
550 | EXPORT_SYMBOL_GPL(dm_set_device_limits); | 514 | EXPORT_SYMBOL_GPL(dm_set_device_limits); |
551 | 515 | ||
552 | int dm_get_device(struct dm_target *ti, const char *path, sector_t start, | 516 | int dm_get_device(struct dm_target *ti, const char *path, sector_t start, |
553 | sector_t len, fmode_t mode, struct dm_dev **result) | 517 | sector_t len, fmode_t mode, struct dm_dev **result) |
554 | { | 518 | { |
555 | int r = __table_get_device(ti->table, ti, path, | 519 | return __table_get_device(ti->table, ti, path, |
556 | start, len, mode, result); | 520 | start, len, mode, result); |
557 | |||
558 | if (!r) | ||
559 | dm_set_device_limits(ti, (*result)->bdev); | ||
560 | |||
561 | return r; | ||
562 | } | 521 | } |
563 | 522 | ||
523 | |||
564 | /* | 524 | /* |
565 | * Decrement a devices use count and remove it if necessary. | 525 | * Decrement a devices use count and remove it if necessary. |
566 | */ | 526 | */ |
@@ -675,24 +635,78 @@ int dm_split_args(int *argc, char ***argvp, char *input) | |||
675 | return 0; | 635 | return 0; |
676 | } | 636 | } |
677 | 637 | ||
678 | static void check_for_valid_limits(struct io_restrictions *rs) | 638 | /* |
639 | * Impose necessary and sufficient conditions on a devices's table such | ||
640 | * that any incoming bio which respects its logical_block_size can be | ||
641 | * processed successfully. If it falls across the boundary between | ||
642 | * two or more targets, the size of each piece it gets split into must | ||
643 | * be compatible with the logical_block_size of the target processing it. | ||
644 | */ | ||
645 | static int validate_hardware_logical_block_alignment(struct dm_table *table, | ||
646 | struct queue_limits *limits) | ||
679 | { | 647 | { |
680 | if (!rs->max_sectors) | 648 | /* |
681 | rs->max_sectors = SAFE_MAX_SECTORS; | 649 | * This function uses arithmetic modulo the logical_block_size |
682 | if (!rs->max_hw_sectors) | 650 | * (in units of 512-byte sectors). |
683 | rs->max_hw_sectors = SAFE_MAX_SECTORS; | 651 | */ |
684 | if (!rs->max_phys_segments) | 652 | unsigned short device_logical_block_size_sects = |
685 | rs->max_phys_segments = MAX_PHYS_SEGMENTS; | 653 | limits->logical_block_size >> SECTOR_SHIFT; |
686 | if (!rs->max_hw_segments) | 654 | |
687 | rs->max_hw_segments = MAX_HW_SEGMENTS; | 655 | /* |
688 | if (!rs->logical_block_size) | 656 | * Offset of the start of the next table entry, mod logical_block_size. |
689 | rs->logical_block_size = 1 << SECTOR_SHIFT; | 657 | */ |
690 | if (!rs->max_segment_size) | 658 | unsigned short next_target_start = 0; |
691 | rs->max_segment_size = MAX_SEGMENT_SIZE; | 659 | |
692 | if (!rs->seg_boundary_mask) | 660 | /* |
693 | rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 661 | * Given an aligned bio that extends beyond the end of a |
694 | if (!rs->bounce_pfn) | 662 | * target, how many sectors must the next target handle? |
695 | rs->bounce_pfn = -1; | 663 | */ |
664 | unsigned short remaining = 0; | ||
665 | |||
666 | struct dm_target *uninitialized_var(ti); | ||
667 | struct queue_limits ti_limits; | ||
668 | unsigned i = 0; | ||
669 | |||
670 | /* | ||
671 | * Check each entry in the table in turn. | ||
672 | */ | ||
673 | while (i < dm_table_get_num_targets(table)) { | ||
674 | ti = dm_table_get_target(table, i++); | ||
675 | |||
676 | blk_set_default_limits(&ti_limits); | ||
677 | |||
678 | /* combine all target devices' limits */ | ||
679 | if (ti->type->iterate_devices) | ||
680 | ti->type->iterate_devices(ti, dm_set_device_limits, | ||
681 | &ti_limits); | ||
682 | |||
683 | /* | ||
684 | * If the remaining sectors fall entirely within this | ||
685 | * table entry are they compatible with its logical_block_size? | ||
686 | */ | ||
687 | if (remaining < ti->len && | ||
688 | remaining & ((ti_limits.logical_block_size >> | ||
689 | SECTOR_SHIFT) - 1)) | ||
690 | break; /* Error */ | ||
691 | |||
692 | next_target_start = | ||
693 | (unsigned short) ((next_target_start + ti->len) & | ||
694 | (device_logical_block_size_sects - 1)); | ||
695 | remaining = next_target_start ? | ||
696 | device_logical_block_size_sects - next_target_start : 0; | ||
697 | } | ||
698 | |||
699 | if (remaining) { | ||
700 | DMWARN("%s: table line %u (start sect %llu len %llu) " | ||
701 | "not aligned to h/w logical block size %hu", | ||
702 | dm_device_name(table->md), i, | ||
703 | (unsigned long long) ti->begin, | ||
704 | (unsigned long long) ti->len, | ||
705 | limits->logical_block_size); | ||
706 | return -EINVAL; | ||
707 | } | ||
708 | |||
709 | return 0; | ||
696 | } | 710 | } |
697 | 711 | ||
698 | int dm_table_add_target(struct dm_table *t, const char *type, | 712 | int dm_table_add_target(struct dm_table *t, const char *type, |
@@ -747,9 +761,6 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
747 | 761 | ||
748 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; | 762 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; |
749 | 763 | ||
750 | /* FIXME: the plan is to combine high here and then have | ||
751 | * the merge fn apply the target level restrictions. */ | ||
752 | combine_restrictions_low(&t->limits, &tgt->limits); | ||
753 | return 0; | 764 | return 0; |
754 | 765 | ||
755 | bad: | 766 | bad: |
@@ -758,6 +769,104 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
758 | return r; | 769 | return r; |
759 | } | 770 | } |
760 | 771 | ||
772 | int dm_table_set_type(struct dm_table *t) | ||
773 | { | ||
774 | unsigned i; | ||
775 | unsigned bio_based = 0, request_based = 0; | ||
776 | struct dm_target *tgt; | ||
777 | struct dm_dev_internal *dd; | ||
778 | struct list_head *devices; | ||
779 | |||
780 | for (i = 0; i < t->num_targets; i++) { | ||
781 | tgt = t->targets + i; | ||
782 | if (dm_target_request_based(tgt)) | ||
783 | request_based = 1; | ||
784 | else | ||
785 | bio_based = 1; | ||
786 | |||
787 | if (bio_based && request_based) { | ||
788 | DMWARN("Inconsistent table: different target types" | ||
789 | " can't be mixed up"); | ||
790 | return -EINVAL; | ||
791 | } | ||
792 | } | ||
793 | |||
794 | if (bio_based) { | ||
795 | /* We must use this table as bio-based */ | ||
796 | t->type = DM_TYPE_BIO_BASED; | ||
797 | return 0; | ||
798 | } | ||
799 | |||
800 | BUG_ON(!request_based); /* No targets in this table */ | ||
801 | |||
802 | /* Non-request-stackable devices can't be used for request-based dm */ | ||
803 | devices = dm_table_get_devices(t); | ||
804 | list_for_each_entry(dd, devices, list) { | ||
805 | if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) { | ||
806 | DMWARN("table load rejected: including" | ||
807 | " non-request-stackable devices"); | ||
808 | return -EINVAL; | ||
809 | } | ||
810 | } | ||
811 | |||
812 | /* | ||
813 | * Request-based dm supports only tables that have a single target now. | ||
814 | * To support multiple targets, request splitting support is needed, | ||
815 | * and that needs lots of changes in the block-layer. | ||
816 | * (e.g. request completion process for partial completion.) | ||
817 | */ | ||
818 | if (t->num_targets > 1) { | ||
819 | DMWARN("Request-based dm doesn't support multiple targets yet"); | ||
820 | return -EINVAL; | ||
821 | } | ||
822 | |||
823 | t->type = DM_TYPE_REQUEST_BASED; | ||
824 | |||
825 | return 0; | ||
826 | } | ||
827 | |||
828 | unsigned dm_table_get_type(struct dm_table *t) | ||
829 | { | ||
830 | return t->type; | ||
831 | } | ||
832 | |||
833 | bool dm_table_bio_based(struct dm_table *t) | ||
834 | { | ||
835 | return dm_table_get_type(t) == DM_TYPE_BIO_BASED; | ||
836 | } | ||
837 | |||
838 | bool dm_table_request_based(struct dm_table *t) | ||
839 | { | ||
840 | return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; | ||
841 | } | ||
842 | |||
843 | int dm_table_alloc_md_mempools(struct dm_table *t) | ||
844 | { | ||
845 | unsigned type = dm_table_get_type(t); | ||
846 | |||
847 | if (unlikely(type == DM_TYPE_NONE)) { | ||
848 | DMWARN("no table type is set, can't allocate mempools"); | ||
849 | return -EINVAL; | ||
850 | } | ||
851 | |||
852 | t->mempools = dm_alloc_md_mempools(type); | ||
853 | if (!t->mempools) | ||
854 | return -ENOMEM; | ||
855 | |||
856 | return 0; | ||
857 | } | ||
858 | |||
859 | void dm_table_free_md_mempools(struct dm_table *t) | ||
860 | { | ||
861 | dm_free_md_mempools(t->mempools); | ||
862 | t->mempools = NULL; | ||
863 | } | ||
864 | |||
865 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) | ||
866 | { | ||
867 | return t->mempools; | ||
868 | } | ||
869 | |||
761 | static int setup_indexes(struct dm_table *t) | 870 | static int setup_indexes(struct dm_table *t) |
762 | { | 871 | { |
763 | int i; | 872 | int i; |
@@ -792,8 +901,6 @@ int dm_table_complete(struct dm_table *t) | |||
792 | int r = 0; | 901 | int r = 0; |
793 | unsigned int leaf_nodes; | 902 | unsigned int leaf_nodes; |
794 | 903 | ||
795 | check_for_valid_limits(&t->limits); | ||
796 | |||
797 | /* how many indexes will the btree have ? */ | 904 | /* how many indexes will the btree have ? */ |
798 | leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); | 905 | leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); |
799 | t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); | 906 | t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); |
@@ -869,6 +976,57 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | |||
869 | } | 976 | } |
870 | 977 | ||
871 | /* | 978 | /* |
979 | * Establish the new table's queue_limits and validate them. | ||
980 | */ | ||
981 | int dm_calculate_queue_limits(struct dm_table *table, | ||
982 | struct queue_limits *limits) | ||
983 | { | ||
984 | struct dm_target *uninitialized_var(ti); | ||
985 | struct queue_limits ti_limits; | ||
986 | unsigned i = 0; | ||
987 | |||
988 | blk_set_default_limits(limits); | ||
989 | |||
990 | while (i < dm_table_get_num_targets(table)) { | ||
991 | blk_set_default_limits(&ti_limits); | ||
992 | |||
993 | ti = dm_table_get_target(table, i++); | ||
994 | |||
995 | if (!ti->type->iterate_devices) | ||
996 | goto combine_limits; | ||
997 | |||
998 | /* | ||
999 | * Combine queue limits of all the devices this target uses. | ||
1000 | */ | ||
1001 | ti->type->iterate_devices(ti, dm_set_device_limits, | ||
1002 | &ti_limits); | ||
1003 | |||
1004 | /* | ||
1005 | * Check each device area is consistent with the target's | ||
1006 | * overall queue limits. | ||
1007 | */ | ||
1008 | if (!ti->type->iterate_devices(ti, device_area_is_valid, | ||
1009 | &ti_limits)) | ||
1010 | return -EINVAL; | ||
1011 | |||
1012 | combine_limits: | ||
1013 | /* | ||
1014 | * Merge this target's queue limits into the overall limits | ||
1015 | * for the table. | ||
1016 | */ | ||
1017 | if (blk_stack_limits(limits, &ti_limits, 0) < 0) | ||
1018 | DMWARN("%s: target device " | ||
1019 | "(start sect %llu len %llu) " | ||
1020 | "is misaligned", | ||
1021 | dm_device_name(table->md), | ||
1022 | (unsigned long long) ti->begin, | ||
1023 | (unsigned long long) ti->len); | ||
1024 | } | ||
1025 | |||
1026 | return validate_hardware_logical_block_alignment(table, limits); | ||
1027 | } | ||
1028 | |||
1029 | /* | ||
872 | * Set the integrity profile for this device if all devices used have | 1030 | * Set the integrity profile for this device if all devices used have |
873 | * matching profiles. | 1031 | * matching profiles. |
874 | */ | 1032 | */ |
@@ -907,27 +1065,42 @@ no_integrity: | |||
907 | return; | 1065 | return; |
908 | } | 1066 | } |
909 | 1067 | ||
910 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | 1068 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
1069 | struct queue_limits *limits) | ||
911 | { | 1070 | { |
912 | /* | 1071 | /* |
913 | * Make sure we obey the optimistic sub devices | 1072 | * Each target device in the table has a data area that should normally |
914 | * restrictions. | 1073 | * be aligned such that the DM device's alignment_offset is 0. |
1074 | * FIXME: Propagate alignment_offsets up the stack and warn of | ||
1075 | * sub-optimal or inconsistent settings. | ||
1076 | */ | ||
1077 | limits->alignment_offset = 0; | ||
1078 | limits->misaligned = 0; | ||
1079 | |||
1080 | /* | ||
1081 | * Copy table's limits to the DM device's request_queue | ||
915 | */ | 1082 | */ |
916 | blk_queue_max_sectors(q, t->limits.max_sectors); | 1083 | q->limits = *limits; |
917 | blk_queue_max_phys_segments(q, t->limits.max_phys_segments); | 1084 | |
918 | blk_queue_max_hw_segments(q, t->limits.max_hw_segments); | 1085 | if (limits->no_cluster) |
919 | blk_queue_logical_block_size(q, t->limits.logical_block_size); | ||
920 | blk_queue_max_segment_size(q, t->limits.max_segment_size); | ||
921 | blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); | ||
922 | blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); | ||
923 | blk_queue_bounce_limit(q, t->limits.bounce_pfn); | ||
924 | |||
925 | if (t->limits.no_cluster) | ||
926 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); | 1086 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); |
927 | else | 1087 | else |
928 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); | 1088 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); |
929 | 1089 | ||
930 | dm_table_set_integrity(t); | 1090 | dm_table_set_integrity(t); |
1091 | |||
1092 | /* | ||
1093 | * QUEUE_FLAG_STACKABLE must be set after all queue settings are | ||
1094 | * visible to other CPUs because, once the flag is set, incoming bios | ||
1095 | * are processed by request-based dm, which refers to the queue | ||
1096 | * settings. | ||
1097 | * Until the flag set, bios are passed to bio-based dm and queued to | ||
1098 | * md->deferred where queue settings are not needed yet. | ||
1099 | * Those bios are passed to request-based dm at the resume time. | ||
1100 | */ | ||
1101 | smp_mb(); | ||
1102 | if (dm_table_request_based(t)) | ||
1103 | queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q); | ||
931 | } | 1104 | } |
932 | 1105 | ||
933 | unsigned int dm_table_get_num_targets(struct dm_table *t) | 1106 | unsigned int dm_table_get_num_targets(struct dm_table *t) |
@@ -1023,6 +1196,20 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) | |||
1023 | return r; | 1196 | return r; |
1024 | } | 1197 | } |
1025 | 1198 | ||
1199 | int dm_table_any_busy_target(struct dm_table *t) | ||
1200 | { | ||
1201 | unsigned i; | ||
1202 | struct dm_target *ti; | ||
1203 | |||
1204 | for (i = 0; i < t->num_targets; i++) { | ||
1205 | ti = t->targets + i; | ||
1206 | if (ti->type->busy && ti->type->busy(ti)) | ||
1207 | return 1; | ||
1208 | } | ||
1209 | |||
1210 | return 0; | ||
1211 | } | ||
1212 | |||
1026 | void dm_table_unplug_all(struct dm_table *t) | 1213 | void dm_table_unplug_all(struct dm_table *t) |
1027 | { | 1214 | { |
1028 | struct dm_dev_internal *dd; | 1215 | struct dm_dev_internal *dd; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 48db308fae67..3c6d4ee8921d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -24,6 +24,13 @@ | |||
24 | 24 | ||
25 | #define DM_MSG_PREFIX "core" | 25 | #define DM_MSG_PREFIX "core" |
26 | 26 | ||
27 | /* | ||
28 | * Cookies are numeric values sent with CHANGE and REMOVE | ||
29 | * uevents while resuming, removing or renaming the device. | ||
30 | */ | ||
31 | #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" | ||
32 | #define DM_COOKIE_LENGTH 24 | ||
33 | |||
27 | static const char *_name = DM_NAME; | 34 | static const char *_name = DM_NAME; |
28 | 35 | ||
29 | static unsigned int major = 0; | 36 | static unsigned int major = 0; |
@@ -71,7 +78,7 @@ struct dm_rq_target_io { | |||
71 | */ | 78 | */ |
72 | struct dm_rq_clone_bio_info { | 79 | struct dm_rq_clone_bio_info { |
73 | struct bio *orig; | 80 | struct bio *orig; |
74 | struct request *rq; | 81 | struct dm_rq_target_io *tio; |
75 | }; | 82 | }; |
76 | 83 | ||
77 | union map_info *dm_get_mapinfo(struct bio *bio) | 84 | union map_info *dm_get_mapinfo(struct bio *bio) |
@@ -81,6 +88,14 @@ union map_info *dm_get_mapinfo(struct bio *bio) | |||
81 | return NULL; | 88 | return NULL; |
82 | } | 89 | } |
83 | 90 | ||
91 | union map_info *dm_get_rq_mapinfo(struct request *rq) | ||
92 | { | ||
93 | if (rq && rq->end_io_data) | ||
94 | return &((struct dm_rq_target_io *)rq->end_io_data)->info; | ||
95 | return NULL; | ||
96 | } | ||
97 | EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); | ||
98 | |||
84 | #define MINOR_ALLOCED ((void *)-1) | 99 | #define MINOR_ALLOCED ((void *)-1) |
85 | 100 | ||
86 | /* | 101 | /* |
@@ -157,13 +172,31 @@ struct mapped_device { | |||
157 | * freeze/thaw support require holding onto a super block | 172 | * freeze/thaw support require holding onto a super block |
158 | */ | 173 | */ |
159 | struct super_block *frozen_sb; | 174 | struct super_block *frozen_sb; |
160 | struct block_device *suspended_bdev; | 175 | struct block_device *bdev; |
161 | 176 | ||
162 | /* forced geometry settings */ | 177 | /* forced geometry settings */ |
163 | struct hd_geometry geometry; | 178 | struct hd_geometry geometry; |
164 | 179 | ||
180 | /* marker of flush suspend for request-based dm */ | ||
181 | struct request suspend_rq; | ||
182 | |||
183 | /* For saving the address of __make_request for request based dm */ | ||
184 | make_request_fn *saved_make_request_fn; | ||
185 | |||
165 | /* sysfs handle */ | 186 | /* sysfs handle */ |
166 | struct kobject kobj; | 187 | struct kobject kobj; |
188 | |||
189 | /* zero-length barrier that will be cloned and submitted to targets */ | ||
190 | struct bio barrier_bio; | ||
191 | }; | ||
192 | |||
193 | /* | ||
194 | * For mempools pre-allocation at the table loading time. | ||
195 | */ | ||
196 | struct dm_md_mempools { | ||
197 | mempool_t *io_pool; | ||
198 | mempool_t *tio_pool; | ||
199 | struct bio_set *bs; | ||
167 | }; | 200 | }; |
168 | 201 | ||
169 | #define MIN_IOS 256 | 202 | #define MIN_IOS 256 |
@@ -391,14 +424,29 @@ static void free_io(struct mapped_device *md, struct dm_io *io) | |||
391 | mempool_free(io, md->io_pool); | 424 | mempool_free(io, md->io_pool); |
392 | } | 425 | } |
393 | 426 | ||
394 | static struct dm_target_io *alloc_tio(struct mapped_device *md) | 427 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) |
395 | { | 428 | { |
396 | return mempool_alloc(md->tio_pool, GFP_NOIO); | 429 | mempool_free(tio, md->tio_pool); |
397 | } | 430 | } |
398 | 431 | ||
399 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) | 432 | static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md) |
400 | { | 433 | { |
401 | mempool_free(tio, md->tio_pool); | 434 | return mempool_alloc(md->tio_pool, GFP_ATOMIC); |
435 | } | ||
436 | |||
437 | static void free_rq_tio(struct dm_rq_target_io *tio) | ||
438 | { | ||
439 | mempool_free(tio, tio->md->tio_pool); | ||
440 | } | ||
441 | |||
442 | static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md) | ||
443 | { | ||
444 | return mempool_alloc(md->io_pool, GFP_ATOMIC); | ||
445 | } | ||
446 | |||
447 | static void free_bio_info(struct dm_rq_clone_bio_info *info) | ||
448 | { | ||
449 | mempool_free(info, info->tio->md->io_pool); | ||
402 | } | 450 | } |
403 | 451 | ||
404 | static void start_io_acct(struct dm_io *io) | 452 | static void start_io_acct(struct dm_io *io) |
@@ -464,12 +512,13 @@ static void queue_io(struct mapped_device *md, struct bio *bio) | |||
464 | struct dm_table *dm_get_table(struct mapped_device *md) | 512 | struct dm_table *dm_get_table(struct mapped_device *md) |
465 | { | 513 | { |
466 | struct dm_table *t; | 514 | struct dm_table *t; |
515 | unsigned long flags; | ||
467 | 516 | ||
468 | read_lock(&md->map_lock); | 517 | read_lock_irqsave(&md->map_lock, flags); |
469 | t = md->map; | 518 | t = md->map; |
470 | if (t) | 519 | if (t) |
471 | dm_table_get(t); | 520 | dm_table_get(t); |
472 | read_unlock(&md->map_lock); | 521 | read_unlock_irqrestore(&md->map_lock, flags); |
473 | 522 | ||
474 | return t; | 523 | return t; |
475 | } | 524 | } |
@@ -536,9 +585,11 @@ static void dec_pending(struct dm_io *io, int error) | |||
536 | * Target requested pushing back the I/O. | 585 | * Target requested pushing back the I/O. |
537 | */ | 586 | */ |
538 | spin_lock_irqsave(&md->deferred_lock, flags); | 587 | spin_lock_irqsave(&md->deferred_lock, flags); |
539 | if (__noflush_suspending(md)) | 588 | if (__noflush_suspending(md)) { |
540 | bio_list_add_head(&md->deferred, io->bio); | 589 | if (!bio_barrier(io->bio)) |
541 | else | 590 | bio_list_add_head(&md->deferred, |
591 | io->bio); | ||
592 | } else | ||
542 | /* noflush suspend was interrupted. */ | 593 | /* noflush suspend was interrupted. */ |
543 | io->error = -EIO; | 594 | io->error = -EIO; |
544 | spin_unlock_irqrestore(&md->deferred_lock, flags); | 595 | spin_unlock_irqrestore(&md->deferred_lock, flags); |
@@ -553,7 +604,8 @@ static void dec_pending(struct dm_io *io, int error) | |||
553 | * a per-device variable for error reporting. | 604 | * a per-device variable for error reporting. |
554 | * Note that you can't touch the bio after end_io_acct | 605 | * Note that you can't touch the bio after end_io_acct |
555 | */ | 606 | */ |
556 | md->barrier_error = io_error; | 607 | if (!md->barrier_error && io_error != -EOPNOTSUPP) |
608 | md->barrier_error = io_error; | ||
557 | end_io_acct(io); | 609 | end_io_acct(io); |
558 | } else { | 610 | } else { |
559 | end_io_acct(io); | 611 | end_io_acct(io); |
@@ -607,6 +659,262 @@ static void clone_endio(struct bio *bio, int error) | |||
607 | dec_pending(io, error); | 659 | dec_pending(io, error); |
608 | } | 660 | } |
609 | 661 | ||
662 | /* | ||
663 | * Partial completion handling for request-based dm | ||
664 | */ | ||
665 | static void end_clone_bio(struct bio *clone, int error) | ||
666 | { | ||
667 | struct dm_rq_clone_bio_info *info = clone->bi_private; | ||
668 | struct dm_rq_target_io *tio = info->tio; | ||
669 | struct bio *bio = info->orig; | ||
670 | unsigned int nr_bytes = info->orig->bi_size; | ||
671 | |||
672 | bio_put(clone); | ||
673 | |||
674 | if (tio->error) | ||
675 | /* | ||
676 | * An error has already been detected on the request. | ||
677 | * Once error occurred, just let clone->end_io() handle | ||
678 | * the remainder. | ||
679 | */ | ||
680 | return; | ||
681 | else if (error) { | ||
682 | /* | ||
683 | * Don't notice the error to the upper layer yet. | ||
684 | * The error handling decision is made by the target driver, | ||
685 | * when the request is completed. | ||
686 | */ | ||
687 | tio->error = error; | ||
688 | return; | ||
689 | } | ||
690 | |||
691 | /* | ||
692 | * I/O for the bio successfully completed. | ||
693 | * Notice the data completion to the upper layer. | ||
694 | */ | ||
695 | |||
696 | /* | ||
697 | * bios are processed from the head of the list. | ||
698 | * So the completing bio should always be rq->bio. | ||
699 | * If it's not, something wrong is happening. | ||
700 | */ | ||
701 | if (tio->orig->bio != bio) | ||
702 | DMERR("bio completion is going in the middle of the request"); | ||
703 | |||
704 | /* | ||
705 | * Update the original request. | ||
706 | * Do not use blk_end_request() here, because it may complete | ||
707 | * the original request before the clone, and break the ordering. | ||
708 | */ | ||
709 | blk_update_request(tio->orig, 0, nr_bytes); | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Don't touch any member of the md after calling this function because | ||
714 | * the md may be freed in dm_put() at the end of this function. | ||
715 | * Or do dm_get() before calling this function and dm_put() later. | ||
716 | */ | ||
717 | static void rq_completed(struct mapped_device *md, int run_queue) | ||
718 | { | ||
719 | int wakeup_waiters = 0; | ||
720 | struct request_queue *q = md->queue; | ||
721 | unsigned long flags; | ||
722 | |||
723 | spin_lock_irqsave(q->queue_lock, flags); | ||
724 | if (!queue_in_flight(q)) | ||
725 | wakeup_waiters = 1; | ||
726 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
727 | |||
728 | /* nudge anyone waiting on suspend queue */ | ||
729 | if (wakeup_waiters) | ||
730 | wake_up(&md->wait); | ||
731 | |||
732 | if (run_queue) | ||
733 | blk_run_queue(q); | ||
734 | |||
735 | /* | ||
736 | * dm_put() must be at the end of this function. See the comment above | ||
737 | */ | ||
738 | dm_put(md); | ||
739 | } | ||
740 | |||
741 | static void dm_unprep_request(struct request *rq) | ||
742 | { | ||
743 | struct request *clone = rq->special; | ||
744 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
745 | |||
746 | rq->special = NULL; | ||
747 | rq->cmd_flags &= ~REQ_DONTPREP; | ||
748 | |||
749 | blk_rq_unprep_clone(clone); | ||
750 | free_rq_tio(tio); | ||
751 | } | ||
752 | |||
753 | /* | ||
754 | * Requeue the original request of a clone. | ||
755 | */ | ||
756 | void dm_requeue_unmapped_request(struct request *clone) | ||
757 | { | ||
758 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
759 | struct mapped_device *md = tio->md; | ||
760 | struct request *rq = tio->orig; | ||
761 | struct request_queue *q = rq->q; | ||
762 | unsigned long flags; | ||
763 | |||
764 | dm_unprep_request(rq); | ||
765 | |||
766 | spin_lock_irqsave(q->queue_lock, flags); | ||
767 | if (elv_queue_empty(q)) | ||
768 | blk_plug_device(q); | ||
769 | blk_requeue_request(q, rq); | ||
770 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
771 | |||
772 | rq_completed(md, 0); | ||
773 | } | ||
774 | EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); | ||
775 | |||
776 | static void __stop_queue(struct request_queue *q) | ||
777 | { | ||
778 | blk_stop_queue(q); | ||
779 | } | ||
780 | |||
781 | static void stop_queue(struct request_queue *q) | ||
782 | { | ||
783 | unsigned long flags; | ||
784 | |||
785 | spin_lock_irqsave(q->queue_lock, flags); | ||
786 | __stop_queue(q); | ||
787 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
788 | } | ||
789 | |||
790 | static void __start_queue(struct request_queue *q) | ||
791 | { | ||
792 | if (blk_queue_stopped(q)) | ||
793 | blk_start_queue(q); | ||
794 | } | ||
795 | |||
796 | static void start_queue(struct request_queue *q) | ||
797 | { | ||
798 | unsigned long flags; | ||
799 | |||
800 | spin_lock_irqsave(q->queue_lock, flags); | ||
801 | __start_queue(q); | ||
802 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
803 | } | ||
804 | |||
805 | /* | ||
806 | * Complete the clone and the original request. | ||
807 | * Must be called without queue lock. | ||
808 | */ | ||
809 | static void dm_end_request(struct request *clone, int error) | ||
810 | { | ||
811 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
812 | struct mapped_device *md = tio->md; | ||
813 | struct request *rq = tio->orig; | ||
814 | |||
815 | if (blk_pc_request(rq)) { | ||
816 | rq->errors = clone->errors; | ||
817 | rq->resid_len = clone->resid_len; | ||
818 | |||
819 | if (rq->sense) | ||
820 | /* | ||
821 | * We are using the sense buffer of the original | ||
822 | * request. | ||
823 | * So setting the length of the sense data is enough. | ||
824 | */ | ||
825 | rq->sense_len = clone->sense_len; | ||
826 | } | ||
827 | |||
828 | BUG_ON(clone->bio); | ||
829 | free_rq_tio(tio); | ||
830 | |||
831 | blk_end_request_all(rq, error); | ||
832 | |||
833 | rq_completed(md, 1); | ||
834 | } | ||
835 | |||
836 | /* | ||
837 | * Request completion handler for request-based dm | ||
838 | */ | ||
839 | static void dm_softirq_done(struct request *rq) | ||
840 | { | ||
841 | struct request *clone = rq->completion_data; | ||
842 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
843 | dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; | ||
844 | int error = tio->error; | ||
845 | |||
846 | if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io) | ||
847 | error = rq_end_io(tio->ti, clone, error, &tio->info); | ||
848 | |||
849 | if (error <= 0) | ||
850 | /* The target wants to complete the I/O */ | ||
851 | dm_end_request(clone, error); | ||
852 | else if (error == DM_ENDIO_INCOMPLETE) | ||
853 | /* The target will handle the I/O */ | ||
854 | return; | ||
855 | else if (error == DM_ENDIO_REQUEUE) | ||
856 | /* The target wants to requeue the I/O */ | ||
857 | dm_requeue_unmapped_request(clone); | ||
858 | else { | ||
859 | DMWARN("unimplemented target endio return value: %d", error); | ||
860 | BUG(); | ||
861 | } | ||
862 | } | ||
863 | |||
864 | /* | ||
865 | * Complete the clone and the original request with the error status | ||
866 | * through softirq context. | ||
867 | */ | ||
868 | static void dm_complete_request(struct request *clone, int error) | ||
869 | { | ||
870 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
871 | struct request *rq = tio->orig; | ||
872 | |||
873 | tio->error = error; | ||
874 | rq->completion_data = clone; | ||
875 | blk_complete_request(rq); | ||
876 | } | ||
877 | |||
878 | /* | ||
879 | * Complete the not-mapped clone and the original request with the error status | ||
880 | * through softirq context. | ||
881 | * Target's rq_end_io() function isn't called. | ||
882 | * This may be used when the target's map_rq() function fails. | ||
883 | */ | ||
884 | void dm_kill_unmapped_request(struct request *clone, int error) | ||
885 | { | ||
886 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
887 | struct request *rq = tio->orig; | ||
888 | |||
889 | rq->cmd_flags |= REQ_FAILED; | ||
890 | dm_complete_request(clone, error); | ||
891 | } | ||
892 | EXPORT_SYMBOL_GPL(dm_kill_unmapped_request); | ||
893 | |||
894 | /* | ||
895 | * Called with the queue lock held | ||
896 | */ | ||
897 | static void end_clone_request(struct request *clone, int error) | ||
898 | { | ||
899 | /* | ||
900 | * For just cleaning up the information of the queue in which | ||
901 | * the clone was dispatched. | ||
902 | * The clone is *NOT* freed actually here because it is alloced from | ||
903 | * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags. | ||
904 | */ | ||
905 | __blk_put_request(clone->q, clone); | ||
906 | |||
907 | /* | ||
908 | * Actual request completion is done in a softirq context which doesn't | ||
909 | * hold the queue lock. Otherwise, deadlock could occur because: | ||
910 | * - another request may be submitted by the upper level driver | ||
911 | * of the stacking during the completion | ||
912 | * - the submission which requires queue lock may be done | ||
913 | * against this queue | ||
914 | */ | ||
915 | dm_complete_request(clone, error); | ||
916 | } | ||
917 | |||
610 | static sector_t max_io_len(struct mapped_device *md, | 918 | static sector_t max_io_len(struct mapped_device *md, |
611 | sector_t sector, struct dm_target *ti) | 919 | sector_t sector, struct dm_target *ti) |
612 | { | 920 | { |
@@ -634,11 +942,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
634 | sector_t sector; | 942 | sector_t sector; |
635 | struct mapped_device *md; | 943 | struct mapped_device *md; |
636 | 944 | ||
637 | /* | ||
638 | * Sanity checks. | ||
639 | */ | ||
640 | BUG_ON(!clone->bi_size); | ||
641 | |||
642 | clone->bi_end_io = clone_endio; | 945 | clone->bi_end_io = clone_endio; |
643 | clone->bi_private = tio; | 946 | clone->bi_private = tio; |
644 | 947 | ||
@@ -752,6 +1055,48 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
752 | return clone; | 1055 | return clone; |
753 | } | 1056 | } |
754 | 1057 | ||
1058 | static struct dm_target_io *alloc_tio(struct clone_info *ci, | ||
1059 | struct dm_target *ti) | ||
1060 | { | ||
1061 | struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO); | ||
1062 | |||
1063 | tio->io = ci->io; | ||
1064 | tio->ti = ti; | ||
1065 | memset(&tio->info, 0, sizeof(tio->info)); | ||
1066 | |||
1067 | return tio; | ||
1068 | } | ||
1069 | |||
1070 | static void __flush_target(struct clone_info *ci, struct dm_target *ti, | ||
1071 | unsigned flush_nr) | ||
1072 | { | ||
1073 | struct dm_target_io *tio = alloc_tio(ci, ti); | ||
1074 | struct bio *clone; | ||
1075 | |||
1076 | tio->info.flush_request = flush_nr; | ||
1077 | |||
1078 | clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); | ||
1079 | __bio_clone(clone, ci->bio); | ||
1080 | clone->bi_destructor = dm_bio_destructor; | ||
1081 | |||
1082 | __map_bio(ti, clone, tio); | ||
1083 | } | ||
1084 | |||
1085 | static int __clone_and_map_empty_barrier(struct clone_info *ci) | ||
1086 | { | ||
1087 | unsigned target_nr = 0, flush_nr; | ||
1088 | struct dm_target *ti; | ||
1089 | |||
1090 | while ((ti = dm_table_get_target(ci->map, target_nr++))) | ||
1091 | for (flush_nr = 0; flush_nr < ti->num_flush_requests; | ||
1092 | flush_nr++) | ||
1093 | __flush_target(ci, ti, flush_nr); | ||
1094 | |||
1095 | ci->sector_count = 0; | ||
1096 | |||
1097 | return 0; | ||
1098 | } | ||
1099 | |||
755 | static int __clone_and_map(struct clone_info *ci) | 1100 | static int __clone_and_map(struct clone_info *ci) |
756 | { | 1101 | { |
757 | struct bio *clone, *bio = ci->bio; | 1102 | struct bio *clone, *bio = ci->bio; |
@@ -759,6 +1104,9 @@ static int __clone_and_map(struct clone_info *ci) | |||
759 | sector_t len = 0, max; | 1104 | sector_t len = 0, max; |
760 | struct dm_target_io *tio; | 1105 | struct dm_target_io *tio; |
761 | 1106 | ||
1107 | if (unlikely(bio_empty_barrier(bio))) | ||
1108 | return __clone_and_map_empty_barrier(ci); | ||
1109 | |||
762 | ti = dm_table_find_target(ci->map, ci->sector); | 1110 | ti = dm_table_find_target(ci->map, ci->sector); |
763 | if (!dm_target_is_valid(ti)) | 1111 | if (!dm_target_is_valid(ti)) |
764 | return -EIO; | 1112 | return -EIO; |
@@ -768,10 +1116,7 @@ static int __clone_and_map(struct clone_info *ci) | |||
768 | /* | 1116 | /* |
769 | * Allocate a target io object. | 1117 | * Allocate a target io object. |
770 | */ | 1118 | */ |
771 | tio = alloc_tio(ci->md); | 1119 | tio = alloc_tio(ci, ti); |
772 | tio->io = ci->io; | ||
773 | tio->ti = ti; | ||
774 | memset(&tio->info, 0, sizeof(tio->info)); | ||
775 | 1120 | ||
776 | if (ci->sector_count <= max) { | 1121 | if (ci->sector_count <= max) { |
777 | /* | 1122 | /* |
@@ -827,10 +1172,7 @@ static int __clone_and_map(struct clone_info *ci) | |||
827 | 1172 | ||
828 | max = max_io_len(ci->md, ci->sector, ti); | 1173 | max = max_io_len(ci->md, ci->sector, ti); |
829 | 1174 | ||
830 | tio = alloc_tio(ci->md); | 1175 | tio = alloc_tio(ci, ti); |
831 | tio->io = ci->io; | ||
832 | tio->ti = ti; | ||
833 | memset(&tio->info, 0, sizeof(tio->info)); | ||
834 | } | 1176 | } |
835 | 1177 | ||
836 | len = min(remaining, max); | 1178 | len = min(remaining, max); |
@@ -865,7 +1207,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) | |||
865 | if (!bio_barrier(bio)) | 1207 | if (!bio_barrier(bio)) |
866 | bio_io_error(bio); | 1208 | bio_io_error(bio); |
867 | else | 1209 | else |
868 | md->barrier_error = -EIO; | 1210 | if (!md->barrier_error) |
1211 | md->barrier_error = -EIO; | ||
869 | return; | 1212 | return; |
870 | } | 1213 | } |
871 | 1214 | ||
@@ -878,6 +1221,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) | |||
878 | ci.io->md = md; | 1221 | ci.io->md = md; |
879 | ci.sector = bio->bi_sector; | 1222 | ci.sector = bio->bi_sector; |
880 | ci.sector_count = bio_sectors(bio); | 1223 | ci.sector_count = bio_sectors(bio); |
1224 | if (unlikely(bio_empty_barrier(bio))) | ||
1225 | ci.sector_count = 1; | ||
881 | ci.idx = bio->bi_idx; | 1226 | ci.idx = bio->bi_idx; |
882 | 1227 | ||
883 | start_io_acct(ci.io); | 1228 | start_io_acct(ci.io); |
@@ -925,6 +1270,16 @@ static int dm_merge_bvec(struct request_queue *q, | |||
925 | */ | 1270 | */ |
926 | if (max_size && ti->type->merge) | 1271 | if (max_size && ti->type->merge) |
927 | max_size = ti->type->merge(ti, bvm, biovec, max_size); | 1272 | max_size = ti->type->merge(ti, bvm, biovec, max_size); |
1273 | /* | ||
1274 | * If the target doesn't support merge method and some of the devices | ||
1275 | * provided their merge_bvec method (we know this by looking at | ||
1276 | * queue_max_hw_sectors), then we can't allow bios with multiple vector | ||
1277 | * entries. So always set max_size to 0, and the code below allows | ||
1278 | * just one page. | ||
1279 | */ | ||
1280 | else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) | ||
1281 | |||
1282 | max_size = 0; | ||
928 | 1283 | ||
929 | out_table: | 1284 | out_table: |
930 | dm_table_put(map); | 1285 | dm_table_put(map); |
@@ -943,7 +1298,7 @@ out: | |||
943 | * The request function that just remaps the bio built up by | 1298 | * The request function that just remaps the bio built up by |
944 | * dm_merge_bvec. | 1299 | * dm_merge_bvec. |
945 | */ | 1300 | */ |
946 | static int dm_request(struct request_queue *q, struct bio *bio) | 1301 | static int _dm_request(struct request_queue *q, struct bio *bio) |
947 | { | 1302 | { |
948 | int rw = bio_data_dir(bio); | 1303 | int rw = bio_data_dir(bio); |
949 | struct mapped_device *md = q->queuedata; | 1304 | struct mapped_device *md = q->queuedata; |
@@ -980,12 +1335,274 @@ static int dm_request(struct request_queue *q, struct bio *bio) | |||
980 | return 0; | 1335 | return 0; |
981 | } | 1336 | } |
982 | 1337 | ||
1338 | static int dm_make_request(struct request_queue *q, struct bio *bio) | ||
1339 | { | ||
1340 | struct mapped_device *md = q->queuedata; | ||
1341 | |||
1342 | if (unlikely(bio_barrier(bio))) { | ||
1343 | bio_endio(bio, -EOPNOTSUPP); | ||
1344 | return 0; | ||
1345 | } | ||
1346 | |||
1347 | return md->saved_make_request_fn(q, bio); /* call __make_request() */ | ||
1348 | } | ||
1349 | |||
1350 | static int dm_request_based(struct mapped_device *md) | ||
1351 | { | ||
1352 | return blk_queue_stackable(md->queue); | ||
1353 | } | ||
1354 | |||
1355 | static int dm_request(struct request_queue *q, struct bio *bio) | ||
1356 | { | ||
1357 | struct mapped_device *md = q->queuedata; | ||
1358 | |||
1359 | if (dm_request_based(md)) | ||
1360 | return dm_make_request(q, bio); | ||
1361 | |||
1362 | return _dm_request(q, bio); | ||
1363 | } | ||
1364 | |||
1365 | void dm_dispatch_request(struct request *rq) | ||
1366 | { | ||
1367 | int r; | ||
1368 | |||
1369 | if (blk_queue_io_stat(rq->q)) | ||
1370 | rq->cmd_flags |= REQ_IO_STAT; | ||
1371 | |||
1372 | rq->start_time = jiffies; | ||
1373 | r = blk_insert_cloned_request(rq->q, rq); | ||
1374 | if (r) | ||
1375 | dm_complete_request(rq, r); | ||
1376 | } | ||
1377 | EXPORT_SYMBOL_GPL(dm_dispatch_request); | ||
1378 | |||
1379 | static void dm_rq_bio_destructor(struct bio *bio) | ||
1380 | { | ||
1381 | struct dm_rq_clone_bio_info *info = bio->bi_private; | ||
1382 | struct mapped_device *md = info->tio->md; | ||
1383 | |||
1384 | free_bio_info(info); | ||
1385 | bio_free(bio, md->bs); | ||
1386 | } | ||
1387 | |||
1388 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, | ||
1389 | void *data) | ||
1390 | { | ||
1391 | struct dm_rq_target_io *tio = data; | ||
1392 | struct mapped_device *md = tio->md; | ||
1393 | struct dm_rq_clone_bio_info *info = alloc_bio_info(md); | ||
1394 | |||
1395 | if (!info) | ||
1396 | return -ENOMEM; | ||
1397 | |||
1398 | info->orig = bio_orig; | ||
1399 | info->tio = tio; | ||
1400 | bio->bi_end_io = end_clone_bio; | ||
1401 | bio->bi_private = info; | ||
1402 | bio->bi_destructor = dm_rq_bio_destructor; | ||
1403 | |||
1404 | return 0; | ||
1405 | } | ||
1406 | |||
1407 | static int setup_clone(struct request *clone, struct request *rq, | ||
1408 | struct dm_rq_target_io *tio) | ||
1409 | { | ||
1410 | int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, | ||
1411 | dm_rq_bio_constructor, tio); | ||
1412 | |||
1413 | if (r) | ||
1414 | return r; | ||
1415 | |||
1416 | clone->cmd = rq->cmd; | ||
1417 | clone->cmd_len = rq->cmd_len; | ||
1418 | clone->sense = rq->sense; | ||
1419 | clone->buffer = rq->buffer; | ||
1420 | clone->end_io = end_clone_request; | ||
1421 | clone->end_io_data = tio; | ||
1422 | |||
1423 | return 0; | ||
1424 | } | ||
1425 | |||
1426 | static int dm_rq_flush_suspending(struct mapped_device *md) | ||
1427 | { | ||
1428 | return !md->suspend_rq.special; | ||
1429 | } | ||
1430 | |||
1431 | /* | ||
1432 | * Called with the queue lock held. | ||
1433 | */ | ||
1434 | static int dm_prep_fn(struct request_queue *q, struct request *rq) | ||
1435 | { | ||
1436 | struct mapped_device *md = q->queuedata; | ||
1437 | struct dm_rq_target_io *tio; | ||
1438 | struct request *clone; | ||
1439 | |||
1440 | if (unlikely(rq == &md->suspend_rq)) { | ||
1441 | if (dm_rq_flush_suspending(md)) | ||
1442 | return BLKPREP_OK; | ||
1443 | else | ||
1444 | /* The flush suspend was interrupted */ | ||
1445 | return BLKPREP_KILL; | ||
1446 | } | ||
1447 | |||
1448 | if (unlikely(rq->special)) { | ||
1449 | DMWARN("Already has something in rq->special."); | ||
1450 | return BLKPREP_KILL; | ||
1451 | } | ||
1452 | |||
1453 | tio = alloc_rq_tio(md); /* Only one for each original request */ | ||
1454 | if (!tio) | ||
1455 | /* -ENOMEM */ | ||
1456 | return BLKPREP_DEFER; | ||
1457 | |||
1458 | tio->md = md; | ||
1459 | tio->ti = NULL; | ||
1460 | tio->orig = rq; | ||
1461 | tio->error = 0; | ||
1462 | memset(&tio->info, 0, sizeof(tio->info)); | ||
1463 | |||
1464 | clone = &tio->clone; | ||
1465 | if (setup_clone(clone, rq, tio)) { | ||
1466 | /* -ENOMEM */ | ||
1467 | free_rq_tio(tio); | ||
1468 | return BLKPREP_DEFER; | ||
1469 | } | ||
1470 | |||
1471 | rq->special = clone; | ||
1472 | rq->cmd_flags |= REQ_DONTPREP; | ||
1473 | |||
1474 | return BLKPREP_OK; | ||
1475 | } | ||
1476 | |||
1477 | static void map_request(struct dm_target *ti, struct request *rq, | ||
1478 | struct mapped_device *md) | ||
1479 | { | ||
1480 | int r; | ||
1481 | struct request *clone = rq->special; | ||
1482 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
1483 | |||
1484 | /* | ||
1485 | * Hold the md reference here for the in-flight I/O. | ||
1486 | * We can't rely on the reference count by device opener, | ||
1487 | * because the device may be closed during the request completion | ||
1488 | * when all bios are completed. | ||
1489 | * See the comment in rq_completed() too. | ||
1490 | */ | ||
1491 | dm_get(md); | ||
1492 | |||
1493 | tio->ti = ti; | ||
1494 | r = ti->type->map_rq(ti, clone, &tio->info); | ||
1495 | switch (r) { | ||
1496 | case DM_MAPIO_SUBMITTED: | ||
1497 | /* The target has taken the I/O to submit by itself later */ | ||
1498 | break; | ||
1499 | case DM_MAPIO_REMAPPED: | ||
1500 | /* The target has remapped the I/O so dispatch it */ | ||
1501 | dm_dispatch_request(clone); | ||
1502 | break; | ||
1503 | case DM_MAPIO_REQUEUE: | ||
1504 | /* The target wants to requeue the I/O */ | ||
1505 | dm_requeue_unmapped_request(clone); | ||
1506 | break; | ||
1507 | default: | ||
1508 | if (r > 0) { | ||
1509 | DMWARN("unimplemented target map return value: %d", r); | ||
1510 | BUG(); | ||
1511 | } | ||
1512 | |||
1513 | /* The target wants to complete the I/O */ | ||
1514 | dm_kill_unmapped_request(clone, r); | ||
1515 | break; | ||
1516 | } | ||
1517 | } | ||
1518 | |||
1519 | /* | ||
1520 | * q->request_fn for request-based dm. | ||
1521 | * Called with the queue lock held. | ||
1522 | */ | ||
1523 | static void dm_request_fn(struct request_queue *q) | ||
1524 | { | ||
1525 | struct mapped_device *md = q->queuedata; | ||
1526 | struct dm_table *map = dm_get_table(md); | ||
1527 | struct dm_target *ti; | ||
1528 | struct request *rq; | ||
1529 | |||
1530 | /* | ||
1531 | * For noflush suspend, check blk_queue_stopped() to immediately | ||
1532 | * quit I/O dispatching. | ||
1533 | */ | ||
1534 | while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { | ||
1535 | rq = blk_peek_request(q); | ||
1536 | if (!rq) | ||
1537 | goto plug_and_out; | ||
1538 | |||
1539 | if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */ | ||
1540 | if (queue_in_flight(q)) | ||
1541 | /* Not quiet yet. Wait more */ | ||
1542 | goto plug_and_out; | ||
1543 | |||
1544 | /* This device should be quiet now */ | ||
1545 | __stop_queue(q); | ||
1546 | blk_start_request(rq); | ||
1547 | __blk_end_request_all(rq, 0); | ||
1548 | wake_up(&md->wait); | ||
1549 | goto out; | ||
1550 | } | ||
1551 | |||
1552 | ti = dm_table_find_target(map, blk_rq_pos(rq)); | ||
1553 | if (ti->type->busy && ti->type->busy(ti)) | ||
1554 | goto plug_and_out; | ||
1555 | |||
1556 | blk_start_request(rq); | ||
1557 | spin_unlock(q->queue_lock); | ||
1558 | map_request(ti, rq, md); | ||
1559 | spin_lock_irq(q->queue_lock); | ||
1560 | } | ||
1561 | |||
1562 | goto out; | ||
1563 | |||
1564 | plug_and_out: | ||
1565 | if (!elv_queue_empty(q)) | ||
1566 | /* Some requests still remain, retry later */ | ||
1567 | blk_plug_device(q); | ||
1568 | |||
1569 | out: | ||
1570 | dm_table_put(map); | ||
1571 | |||
1572 | return; | ||
1573 | } | ||
1574 | |||
1575 | int dm_underlying_device_busy(struct request_queue *q) | ||
1576 | { | ||
1577 | return blk_lld_busy(q); | ||
1578 | } | ||
1579 | EXPORT_SYMBOL_GPL(dm_underlying_device_busy); | ||
1580 | |||
1581 | static int dm_lld_busy(struct request_queue *q) | ||
1582 | { | ||
1583 | int r; | ||
1584 | struct mapped_device *md = q->queuedata; | ||
1585 | struct dm_table *map = dm_get_table(md); | ||
1586 | |||
1587 | if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) | ||
1588 | r = 1; | ||
1589 | else | ||
1590 | r = dm_table_any_busy_target(map); | ||
1591 | |||
1592 | dm_table_put(map); | ||
1593 | |||
1594 | return r; | ||
1595 | } | ||
1596 | |||
983 | static void dm_unplug_all(struct request_queue *q) | 1597 | static void dm_unplug_all(struct request_queue *q) |
984 | { | 1598 | { |
985 | struct mapped_device *md = q->queuedata; | 1599 | struct mapped_device *md = q->queuedata; |
986 | struct dm_table *map = dm_get_table(md); | 1600 | struct dm_table *map = dm_get_table(md); |
987 | 1601 | ||
988 | if (map) { | 1602 | if (map) { |
1603 | if (dm_request_based(md)) | ||
1604 | generic_unplug_device(q); | ||
1605 | |||
989 | dm_table_unplug_all(map); | 1606 | dm_table_unplug_all(map); |
990 | dm_table_put(map); | 1607 | dm_table_put(map); |
991 | } | 1608 | } |
@@ -1000,7 +1617,16 @@ static int dm_any_congested(void *congested_data, int bdi_bits) | |||
1000 | if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { | 1617 | if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { |
1001 | map = dm_get_table(md); | 1618 | map = dm_get_table(md); |
1002 | if (map) { | 1619 | if (map) { |
1003 | r = dm_table_any_congested(map, bdi_bits); | 1620 | /* |
1621 | * Request-based dm cares about only own queue for | ||
1622 | * the query about congestion status of request_queue | ||
1623 | */ | ||
1624 | if (dm_request_based(md)) | ||
1625 | r = md->queue->backing_dev_info.state & | ||
1626 | bdi_bits; | ||
1627 | else | ||
1628 | r = dm_table_any_congested(map, bdi_bits); | ||
1629 | |||
1004 | dm_table_put(map); | 1630 | dm_table_put(map); |
1005 | } | 1631 | } |
1006 | } | 1632 | } |
@@ -1123,30 +1749,32 @@ static struct mapped_device *alloc_dev(int minor) | |||
1123 | INIT_LIST_HEAD(&md->uevent_list); | 1749 | INIT_LIST_HEAD(&md->uevent_list); |
1124 | spin_lock_init(&md->uevent_lock); | 1750 | spin_lock_init(&md->uevent_lock); |
1125 | 1751 | ||
1126 | md->queue = blk_alloc_queue(GFP_KERNEL); | 1752 | md->queue = blk_init_queue(dm_request_fn, NULL); |
1127 | if (!md->queue) | 1753 | if (!md->queue) |
1128 | goto bad_queue; | 1754 | goto bad_queue; |
1129 | 1755 | ||
1756 | /* | ||
1757 | * Request-based dm devices cannot be stacked on top of bio-based dm | ||
1758 | * devices. The type of this dm device has not been decided yet, | ||
1759 | * although we initialized the queue using blk_init_queue(). | ||
1760 | * The type is decided at the first table loading time. | ||
1761 | * To prevent problematic device stacking, clear the queue flag | ||
1762 | * for request stacking support until then. | ||
1763 | * | ||
1764 | * This queue is new, so no concurrency on the queue_flags. | ||
1765 | */ | ||
1766 | queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); | ||
1767 | md->saved_make_request_fn = md->queue->make_request_fn; | ||
1130 | md->queue->queuedata = md; | 1768 | md->queue->queuedata = md; |
1131 | md->queue->backing_dev_info.congested_fn = dm_any_congested; | 1769 | md->queue->backing_dev_info.congested_fn = dm_any_congested; |
1132 | md->queue->backing_dev_info.congested_data = md; | 1770 | md->queue->backing_dev_info.congested_data = md; |
1133 | blk_queue_make_request(md->queue, dm_request); | 1771 | blk_queue_make_request(md->queue, dm_request); |
1134 | blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); | ||
1135 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); | 1772 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); |
1136 | md->queue->unplug_fn = dm_unplug_all; | 1773 | md->queue->unplug_fn = dm_unplug_all; |
1137 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); | 1774 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); |
1138 | 1775 | blk_queue_softirq_done(md->queue, dm_softirq_done); | |
1139 | md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); | 1776 | blk_queue_prep_rq(md->queue, dm_prep_fn); |
1140 | if (!md->io_pool) | 1777 | blk_queue_lld_busy(md->queue, dm_lld_busy); |
1141 | goto bad_io_pool; | ||
1142 | |||
1143 | md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); | ||
1144 | if (!md->tio_pool) | ||
1145 | goto bad_tio_pool; | ||
1146 | |||
1147 | md->bs = bioset_create(16, 0); | ||
1148 | if (!md->bs) | ||
1149 | goto bad_no_bioset; | ||
1150 | 1778 | ||
1151 | md->disk = alloc_disk(1); | 1779 | md->disk = alloc_disk(1); |
1152 | if (!md->disk) | 1780 | if (!md->disk) |
@@ -1170,6 +1798,10 @@ static struct mapped_device *alloc_dev(int minor) | |||
1170 | if (!md->wq) | 1798 | if (!md->wq) |
1171 | goto bad_thread; | 1799 | goto bad_thread; |
1172 | 1800 | ||
1801 | md->bdev = bdget_disk(md->disk, 0); | ||
1802 | if (!md->bdev) | ||
1803 | goto bad_bdev; | ||
1804 | |||
1173 | /* Populate the mapping, nobody knows we exist yet */ | 1805 | /* Populate the mapping, nobody knows we exist yet */ |
1174 | spin_lock(&_minor_lock); | 1806 | spin_lock(&_minor_lock); |
1175 | old_md = idr_replace(&_minor_idr, md, minor); | 1807 | old_md = idr_replace(&_minor_idr, md, minor); |
@@ -1179,15 +1811,11 @@ static struct mapped_device *alloc_dev(int minor) | |||
1179 | 1811 | ||
1180 | return md; | 1812 | return md; |
1181 | 1813 | ||
1814 | bad_bdev: | ||
1815 | destroy_workqueue(md->wq); | ||
1182 | bad_thread: | 1816 | bad_thread: |
1183 | put_disk(md->disk); | 1817 | put_disk(md->disk); |
1184 | bad_disk: | 1818 | bad_disk: |
1185 | bioset_free(md->bs); | ||
1186 | bad_no_bioset: | ||
1187 | mempool_destroy(md->tio_pool); | ||
1188 | bad_tio_pool: | ||
1189 | mempool_destroy(md->io_pool); | ||
1190 | bad_io_pool: | ||
1191 | blk_cleanup_queue(md->queue); | 1819 | blk_cleanup_queue(md->queue); |
1192 | bad_queue: | 1820 | bad_queue: |
1193 | free_minor(minor); | 1821 | free_minor(minor); |
@@ -1204,14 +1832,15 @@ static void free_dev(struct mapped_device *md) | |||
1204 | { | 1832 | { |
1205 | int minor = MINOR(disk_devt(md->disk)); | 1833 | int minor = MINOR(disk_devt(md->disk)); |
1206 | 1834 | ||
1207 | if (md->suspended_bdev) { | 1835 | unlock_fs(md); |
1208 | unlock_fs(md); | 1836 | bdput(md->bdev); |
1209 | bdput(md->suspended_bdev); | ||
1210 | } | ||
1211 | destroy_workqueue(md->wq); | 1837 | destroy_workqueue(md->wq); |
1212 | mempool_destroy(md->tio_pool); | 1838 | if (md->tio_pool) |
1213 | mempool_destroy(md->io_pool); | 1839 | mempool_destroy(md->tio_pool); |
1214 | bioset_free(md->bs); | 1840 | if (md->io_pool) |
1841 | mempool_destroy(md->io_pool); | ||
1842 | if (md->bs) | ||
1843 | bioset_free(md->bs); | ||
1215 | blk_integrity_unregister(md->disk); | 1844 | blk_integrity_unregister(md->disk); |
1216 | del_gendisk(md->disk); | 1845 | del_gendisk(md->disk); |
1217 | free_minor(minor); | 1846 | free_minor(minor); |
@@ -1226,6 +1855,29 @@ static void free_dev(struct mapped_device *md) | |||
1226 | kfree(md); | 1855 | kfree(md); |
1227 | } | 1856 | } |
1228 | 1857 | ||
1858 | static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | ||
1859 | { | ||
1860 | struct dm_md_mempools *p; | ||
1861 | |||
1862 | if (md->io_pool && md->tio_pool && md->bs) | ||
1863 | /* the md already has necessary mempools */ | ||
1864 | goto out; | ||
1865 | |||
1866 | p = dm_table_get_md_mempools(t); | ||
1867 | BUG_ON(!p || md->io_pool || md->tio_pool || md->bs); | ||
1868 | |||
1869 | md->io_pool = p->io_pool; | ||
1870 | p->io_pool = NULL; | ||
1871 | md->tio_pool = p->tio_pool; | ||
1872 | p->tio_pool = NULL; | ||
1873 | md->bs = p->bs; | ||
1874 | p->bs = NULL; | ||
1875 | |||
1876 | out: | ||
1877 | /* mempool bind completed, now no need any mempools in the table */ | ||
1878 | dm_table_free_md_mempools(t); | ||
1879 | } | ||
1880 | |||
1229 | /* | 1881 | /* |
1230 | * Bind a table to the device. | 1882 | * Bind a table to the device. |
1231 | */ | 1883 | */ |
@@ -1249,15 +1901,17 @@ static void __set_size(struct mapped_device *md, sector_t size) | |||
1249 | { | 1901 | { |
1250 | set_capacity(md->disk, size); | 1902 | set_capacity(md->disk, size); |
1251 | 1903 | ||
1252 | mutex_lock(&md->suspended_bdev->bd_inode->i_mutex); | 1904 | mutex_lock(&md->bdev->bd_inode->i_mutex); |
1253 | i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); | 1905 | i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); |
1254 | mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex); | 1906 | mutex_unlock(&md->bdev->bd_inode->i_mutex); |
1255 | } | 1907 | } |
1256 | 1908 | ||
1257 | static int __bind(struct mapped_device *md, struct dm_table *t) | 1909 | static int __bind(struct mapped_device *md, struct dm_table *t, |
1910 | struct queue_limits *limits) | ||
1258 | { | 1911 | { |
1259 | struct request_queue *q = md->queue; | 1912 | struct request_queue *q = md->queue; |
1260 | sector_t size; | 1913 | sector_t size; |
1914 | unsigned long flags; | ||
1261 | 1915 | ||
1262 | size = dm_table_get_size(t); | 1916 | size = dm_table_get_size(t); |
1263 | 1917 | ||
@@ -1267,8 +1921,7 @@ static int __bind(struct mapped_device *md, struct dm_table *t) | |||
1267 | if (size != get_capacity(md->disk)) | 1921 | if (size != get_capacity(md->disk)) |
1268 | memset(&md->geometry, 0, sizeof(md->geometry)); | 1922 | memset(&md->geometry, 0, sizeof(md->geometry)); |
1269 | 1923 | ||
1270 | if (md->suspended_bdev) | 1924 | __set_size(md, size); |
1271 | __set_size(md, size); | ||
1272 | 1925 | ||
1273 | if (!size) { | 1926 | if (!size) { |
1274 | dm_table_destroy(t); | 1927 | dm_table_destroy(t); |
@@ -1277,10 +1930,22 @@ static int __bind(struct mapped_device *md, struct dm_table *t) | |||
1277 | 1930 | ||
1278 | dm_table_event_callback(t, event_callback, md); | 1931 | dm_table_event_callback(t, event_callback, md); |
1279 | 1932 | ||
1280 | write_lock(&md->map_lock); | 1933 | /* |
1934 | * The queue hasn't been stopped yet, if the old table type wasn't | ||
1935 | * for request-based during suspension. So stop it to prevent | ||
1936 | * I/O mapping before resume. | ||
1937 | * This must be done before setting the queue restrictions, | ||
1938 | * because request-based dm may be run just after the setting. | ||
1939 | */ | ||
1940 | if (dm_table_request_based(t) && !blk_queue_stopped(q)) | ||
1941 | stop_queue(q); | ||
1942 | |||
1943 | __bind_mempools(md, t); | ||
1944 | |||
1945 | write_lock_irqsave(&md->map_lock, flags); | ||
1281 | md->map = t; | 1946 | md->map = t; |
1282 | dm_table_set_restrictions(t, q); | 1947 | dm_table_set_restrictions(t, q, limits); |
1283 | write_unlock(&md->map_lock); | 1948 | write_unlock_irqrestore(&md->map_lock, flags); |
1284 | 1949 | ||
1285 | return 0; | 1950 | return 0; |
1286 | } | 1951 | } |
@@ -1288,14 +1953,15 @@ static int __bind(struct mapped_device *md, struct dm_table *t) | |||
1288 | static void __unbind(struct mapped_device *md) | 1953 | static void __unbind(struct mapped_device *md) |
1289 | { | 1954 | { |
1290 | struct dm_table *map = md->map; | 1955 | struct dm_table *map = md->map; |
1956 | unsigned long flags; | ||
1291 | 1957 | ||
1292 | if (!map) | 1958 | if (!map) |
1293 | return; | 1959 | return; |
1294 | 1960 | ||
1295 | dm_table_event_callback(map, NULL, NULL); | 1961 | dm_table_event_callback(map, NULL, NULL); |
1296 | write_lock(&md->map_lock); | 1962 | write_lock_irqsave(&md->map_lock, flags); |
1297 | md->map = NULL; | 1963 | md->map = NULL; |
1298 | write_unlock(&md->map_lock); | 1964 | write_unlock_irqrestore(&md->map_lock, flags); |
1299 | dm_table_destroy(map); | 1965 | dm_table_destroy(map); |
1300 | } | 1966 | } |
1301 | 1967 | ||
@@ -1399,6 +2065,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
1399 | { | 2065 | { |
1400 | int r = 0; | 2066 | int r = 0; |
1401 | DECLARE_WAITQUEUE(wait, current); | 2067 | DECLARE_WAITQUEUE(wait, current); |
2068 | struct request_queue *q = md->queue; | ||
2069 | unsigned long flags; | ||
1402 | 2070 | ||
1403 | dm_unplug_all(md->queue); | 2071 | dm_unplug_all(md->queue); |
1404 | 2072 | ||
@@ -1408,7 +2076,14 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
1408 | set_current_state(interruptible); | 2076 | set_current_state(interruptible); |
1409 | 2077 | ||
1410 | smp_mb(); | 2078 | smp_mb(); |
1411 | if (!atomic_read(&md->pending)) | 2079 | if (dm_request_based(md)) { |
2080 | spin_lock_irqsave(q->queue_lock, flags); | ||
2081 | if (!queue_in_flight(q) && blk_queue_stopped(q)) { | ||
2082 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2083 | break; | ||
2084 | } | ||
2085 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2086 | } else if (!atomic_read(&md->pending)) | ||
1412 | break; | 2087 | break; |
1413 | 2088 | ||
1414 | if (interruptible == TASK_INTERRUPTIBLE && | 2089 | if (interruptible == TASK_INTERRUPTIBLE && |
@@ -1426,34 +2101,36 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
1426 | return r; | 2101 | return r; |
1427 | } | 2102 | } |
1428 | 2103 | ||
1429 | static int dm_flush(struct mapped_device *md) | 2104 | static void dm_flush(struct mapped_device *md) |
1430 | { | 2105 | { |
1431 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); | 2106 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); |
1432 | return 0; | 2107 | |
2108 | bio_init(&md->barrier_bio); | ||
2109 | md->barrier_bio.bi_bdev = md->bdev; | ||
2110 | md->barrier_bio.bi_rw = WRITE_BARRIER; | ||
2111 | __split_and_process_bio(md, &md->barrier_bio); | ||
2112 | |||
2113 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); | ||
1433 | } | 2114 | } |
1434 | 2115 | ||
1435 | static void process_barrier(struct mapped_device *md, struct bio *bio) | 2116 | static void process_barrier(struct mapped_device *md, struct bio *bio) |
1436 | { | 2117 | { |
1437 | int error = dm_flush(md); | 2118 | md->barrier_error = 0; |
1438 | |||
1439 | if (unlikely(error)) { | ||
1440 | bio_endio(bio, error); | ||
1441 | return; | ||
1442 | } | ||
1443 | if (bio_empty_barrier(bio)) { | ||
1444 | bio_endio(bio, 0); | ||
1445 | return; | ||
1446 | } | ||
1447 | |||
1448 | __split_and_process_bio(md, bio); | ||
1449 | 2119 | ||
1450 | error = dm_flush(md); | 2120 | dm_flush(md); |
1451 | 2121 | ||
1452 | if (!error && md->barrier_error) | 2122 | if (!bio_empty_barrier(bio)) { |
1453 | error = md->barrier_error; | 2123 | __split_and_process_bio(md, bio); |
2124 | dm_flush(md); | ||
2125 | } | ||
1454 | 2126 | ||
1455 | if (md->barrier_error != DM_ENDIO_REQUEUE) | 2127 | if (md->barrier_error != DM_ENDIO_REQUEUE) |
1456 | bio_endio(bio, error); | 2128 | bio_endio(bio, md->barrier_error); |
2129 | else { | ||
2130 | spin_lock_irq(&md->deferred_lock); | ||
2131 | bio_list_add_head(&md->deferred, bio); | ||
2132 | spin_unlock_irq(&md->deferred_lock); | ||
2133 | } | ||
1457 | } | 2134 | } |
1458 | 2135 | ||
1459 | /* | 2136 | /* |
@@ -1479,10 +2156,14 @@ static void dm_wq_work(struct work_struct *work) | |||
1479 | 2156 | ||
1480 | up_write(&md->io_lock); | 2157 | up_write(&md->io_lock); |
1481 | 2158 | ||
1482 | if (bio_barrier(c)) | 2159 | if (dm_request_based(md)) |
1483 | process_barrier(md, c); | 2160 | generic_make_request(c); |
1484 | else | 2161 | else { |
1485 | __split_and_process_bio(md, c); | 2162 | if (bio_barrier(c)) |
2163 | process_barrier(md, c); | ||
2164 | else | ||
2165 | __split_and_process_bio(md, c); | ||
2166 | } | ||
1486 | 2167 | ||
1487 | down_write(&md->io_lock); | 2168 | down_write(&md->io_lock); |
1488 | } | 2169 | } |
@@ -1502,6 +2183,7 @@ static void dm_queue_flush(struct mapped_device *md) | |||
1502 | */ | 2183 | */ |
1503 | int dm_swap_table(struct mapped_device *md, struct dm_table *table) | 2184 | int dm_swap_table(struct mapped_device *md, struct dm_table *table) |
1504 | { | 2185 | { |
2186 | struct queue_limits limits; | ||
1505 | int r = -EINVAL; | 2187 | int r = -EINVAL; |
1506 | 2188 | ||
1507 | mutex_lock(&md->suspend_lock); | 2189 | mutex_lock(&md->suspend_lock); |
@@ -1510,19 +2192,96 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) | |||
1510 | if (!dm_suspended(md)) | 2192 | if (!dm_suspended(md)) |
1511 | goto out; | 2193 | goto out; |
1512 | 2194 | ||
1513 | /* without bdev, the device size cannot be changed */ | 2195 | r = dm_calculate_queue_limits(table, &limits); |
1514 | if (!md->suspended_bdev) | 2196 | if (r) |
1515 | if (get_capacity(md->disk) != dm_table_get_size(table)) | 2197 | goto out; |
1516 | goto out; | 2198 | |
2199 | /* cannot change the device type, once a table is bound */ | ||
2200 | if (md->map && | ||
2201 | (dm_table_get_type(md->map) != dm_table_get_type(table))) { | ||
2202 | DMWARN("can't change the device type after a table is bound"); | ||
2203 | goto out; | ||
2204 | } | ||
2205 | |||
2206 | /* | ||
2207 | * It is enought that blk_queue_ordered() is called only once when | ||
2208 | * the first bio-based table is bound. | ||
2209 | * | ||
2210 | * This setting should be moved to alloc_dev() when request-based dm | ||
2211 | * supports barrier. | ||
2212 | */ | ||
2213 | if (!md->map && dm_table_bio_based(table)) | ||
2214 | blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); | ||
1517 | 2215 | ||
1518 | __unbind(md); | 2216 | __unbind(md); |
1519 | r = __bind(md, table); | 2217 | r = __bind(md, table, &limits); |
1520 | 2218 | ||
1521 | out: | 2219 | out: |
1522 | mutex_unlock(&md->suspend_lock); | 2220 | mutex_unlock(&md->suspend_lock); |
1523 | return r; | 2221 | return r; |
1524 | } | 2222 | } |
1525 | 2223 | ||
2224 | static void dm_rq_invalidate_suspend_marker(struct mapped_device *md) | ||
2225 | { | ||
2226 | md->suspend_rq.special = (void *)0x1; | ||
2227 | } | ||
2228 | |||
2229 | static void dm_rq_abort_suspend(struct mapped_device *md, int noflush) | ||
2230 | { | ||
2231 | struct request_queue *q = md->queue; | ||
2232 | unsigned long flags; | ||
2233 | |||
2234 | spin_lock_irqsave(q->queue_lock, flags); | ||
2235 | if (!noflush) | ||
2236 | dm_rq_invalidate_suspend_marker(md); | ||
2237 | __start_queue(q); | ||
2238 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2239 | } | ||
2240 | |||
2241 | static void dm_rq_start_suspend(struct mapped_device *md, int noflush) | ||
2242 | { | ||
2243 | struct request *rq = &md->suspend_rq; | ||
2244 | struct request_queue *q = md->queue; | ||
2245 | |||
2246 | if (noflush) | ||
2247 | stop_queue(q); | ||
2248 | else { | ||
2249 | blk_rq_init(q, rq); | ||
2250 | blk_insert_request(q, rq, 0, NULL); | ||
2251 | } | ||
2252 | } | ||
2253 | |||
2254 | static int dm_rq_suspend_available(struct mapped_device *md, int noflush) | ||
2255 | { | ||
2256 | int r = 1; | ||
2257 | struct request *rq = &md->suspend_rq; | ||
2258 | struct request_queue *q = md->queue; | ||
2259 | unsigned long flags; | ||
2260 | |||
2261 | if (noflush) | ||
2262 | return r; | ||
2263 | |||
2264 | /* The marker must be protected by queue lock if it is in use */ | ||
2265 | spin_lock_irqsave(q->queue_lock, flags); | ||
2266 | if (unlikely(rq->ref_count)) { | ||
2267 | /* | ||
2268 | * This can happen, when the previous flush suspend was | ||
2269 | * interrupted, the marker is still in the queue and | ||
2270 | * this flush suspend has been invoked, because we don't | ||
2271 | * remove the marker at the time of suspend interruption. | ||
2272 | * We have only one marker per mapped_device, so we can't | ||
2273 | * start another flush suspend while it is in use. | ||
2274 | */ | ||
2275 | BUG_ON(!rq->special); /* The marker should be invalidated */ | ||
2276 | DMWARN("Invalidating the previous flush suspend is still in" | ||
2277 | " progress. Please retry later."); | ||
2278 | r = 0; | ||
2279 | } | ||
2280 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2281 | |||
2282 | return r; | ||
2283 | } | ||
2284 | |||
1526 | /* | 2285 | /* |
1527 | * Functions to lock and unlock any filesystem running on the | 2286 | * Functions to lock and unlock any filesystem running on the |
1528 | * device. | 2287 | * device. |
@@ -1533,7 +2292,7 @@ static int lock_fs(struct mapped_device *md) | |||
1533 | 2292 | ||
1534 | WARN_ON(md->frozen_sb); | 2293 | WARN_ON(md->frozen_sb); |
1535 | 2294 | ||
1536 | md->frozen_sb = freeze_bdev(md->suspended_bdev); | 2295 | md->frozen_sb = freeze_bdev(md->bdev); |
1537 | if (IS_ERR(md->frozen_sb)) { | 2296 | if (IS_ERR(md->frozen_sb)) { |
1538 | r = PTR_ERR(md->frozen_sb); | 2297 | r = PTR_ERR(md->frozen_sb); |
1539 | md->frozen_sb = NULL; | 2298 | md->frozen_sb = NULL; |
@@ -1542,9 +2301,6 @@ static int lock_fs(struct mapped_device *md) | |||
1542 | 2301 | ||
1543 | set_bit(DMF_FROZEN, &md->flags); | 2302 | set_bit(DMF_FROZEN, &md->flags); |
1544 | 2303 | ||
1545 | /* don't bdput right now, we don't want the bdev | ||
1546 | * to go away while it is locked. | ||
1547 | */ | ||
1548 | return 0; | 2304 | return 0; |
1549 | } | 2305 | } |
1550 | 2306 | ||
@@ -1553,7 +2309,7 @@ static void unlock_fs(struct mapped_device *md) | |||
1553 | if (!test_bit(DMF_FROZEN, &md->flags)) | 2309 | if (!test_bit(DMF_FROZEN, &md->flags)) |
1554 | return; | 2310 | return; |
1555 | 2311 | ||
1556 | thaw_bdev(md->suspended_bdev, md->frozen_sb); | 2312 | thaw_bdev(md->bdev, md->frozen_sb); |
1557 | md->frozen_sb = NULL; | 2313 | md->frozen_sb = NULL; |
1558 | clear_bit(DMF_FROZEN, &md->flags); | 2314 | clear_bit(DMF_FROZEN, &md->flags); |
1559 | } | 2315 | } |
@@ -1565,6 +2321,53 @@ static void unlock_fs(struct mapped_device *md) | |||
1565 | * dm_bind_table, dm_suspend must be called to flush any in | 2321 | * dm_bind_table, dm_suspend must be called to flush any in |
1566 | * flight bios and ensure that any further io gets deferred. | 2322 | * flight bios and ensure that any further io gets deferred. |
1567 | */ | 2323 | */ |
2324 | /* | ||
2325 | * Suspend mechanism in request-based dm. | ||
2326 | * | ||
2327 | * After the suspend starts, further incoming requests are kept in | ||
2328 | * the request_queue and deferred. | ||
2329 | * Remaining requests in the request_queue at the start of suspend are flushed | ||
2330 | * if it is flush suspend. | ||
2331 | * The suspend completes when the following conditions have been satisfied, | ||
2332 | * so wait for it: | ||
2333 | * 1. q->in_flight is 0 (which means no in_flight request) | ||
2334 | * 2. queue has been stopped (which means no request dispatching) | ||
2335 | * | ||
2336 | * | ||
2337 | * Noflush suspend | ||
2338 | * --------------- | ||
2339 | * Noflush suspend doesn't need to dispatch remaining requests. | ||
2340 | * So stop the queue immediately. Then, wait for all in_flight requests | ||
2341 | * to be completed or requeued. | ||
2342 | * | ||
2343 | * To abort noflush suspend, start the queue. | ||
2344 | * | ||
2345 | * | ||
2346 | * Flush suspend | ||
2347 | * ------------- | ||
2348 | * Flush suspend needs to dispatch remaining requests. So stop the queue | ||
2349 | * after the remaining requests are completed. (Requeued request must be also | ||
2350 | * re-dispatched and completed. Until then, we can't stop the queue.) | ||
2351 | * | ||
2352 | * During flushing the remaining requests, further incoming requests are also | ||
2353 | * inserted to the same queue. To distinguish which requests are to be | ||
2354 | * flushed, we insert a marker request to the queue at the time of starting | ||
2355 | * flush suspend, like a barrier. | ||
2356 | * The dispatching is blocked when the marker is found on the top of the queue. | ||
2357 | * And the queue is stopped when all in_flight requests are completed, since | ||
2358 | * that means the remaining requests are completely flushed. | ||
2359 | * Then, the marker is removed from the queue. | ||
2360 | * | ||
2361 | * To abort flush suspend, we also need to take care of the marker, not only | ||
2362 | * starting the queue. | ||
2363 | * We don't remove the marker forcibly from the queue since it's against | ||
2364 | * the block-layer manner. Instead, we put a invalidated mark on the marker. | ||
2365 | * When the invalidated marker is found on the top of the queue, it is | ||
2366 | * immediately removed from the queue, so it doesn't block dispatching. | ||
2367 | * Because we have only one marker per mapped_device, we can't start another | ||
2368 | * flush suspend until the invalidated marker is removed from the queue. | ||
2369 | * So fail and return with -EBUSY in such a case. | ||
2370 | */ | ||
1568 | int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | 2371 | int dm_suspend(struct mapped_device *md, unsigned suspend_flags) |
1569 | { | 2372 | { |
1570 | struct dm_table *map = NULL; | 2373 | struct dm_table *map = NULL; |
@@ -1579,6 +2382,11 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1579 | goto out_unlock; | 2382 | goto out_unlock; |
1580 | } | 2383 | } |
1581 | 2384 | ||
2385 | if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) { | ||
2386 | r = -EBUSY; | ||
2387 | goto out_unlock; | ||
2388 | } | ||
2389 | |||
1582 | map = dm_get_table(md); | 2390 | map = dm_get_table(md); |
1583 | 2391 | ||
1584 | /* | 2392 | /* |
@@ -1591,24 +2399,14 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1591 | /* This does not get reverted if there's an error later. */ | 2399 | /* This does not get reverted if there's an error later. */ |
1592 | dm_table_presuspend_targets(map); | 2400 | dm_table_presuspend_targets(map); |
1593 | 2401 | ||
1594 | /* bdget() can stall if the pending I/Os are not flushed */ | 2402 | /* |
1595 | if (!noflush) { | 2403 | * Flush I/O to the device. noflush supersedes do_lockfs, |
1596 | md->suspended_bdev = bdget_disk(md->disk, 0); | 2404 | * because lock_fs() needs to flush I/Os. |
1597 | if (!md->suspended_bdev) { | 2405 | */ |
1598 | DMWARN("bdget failed in dm_suspend"); | 2406 | if (!noflush && do_lockfs) { |
1599 | r = -ENOMEM; | 2407 | r = lock_fs(md); |
2408 | if (r) | ||
1600 | goto out; | 2409 | goto out; |
1601 | } | ||
1602 | |||
1603 | /* | ||
1604 | * Flush I/O to the device. noflush supersedes do_lockfs, | ||
1605 | * because lock_fs() needs to flush I/Os. | ||
1606 | */ | ||
1607 | if (do_lockfs) { | ||
1608 | r = lock_fs(md); | ||
1609 | if (r) | ||
1610 | goto out; | ||
1611 | } | ||
1612 | } | 2410 | } |
1613 | 2411 | ||
1614 | /* | 2412 | /* |
@@ -1634,6 +2432,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1634 | 2432 | ||
1635 | flush_workqueue(md->wq); | 2433 | flush_workqueue(md->wq); |
1636 | 2434 | ||
2435 | if (dm_request_based(md)) | ||
2436 | dm_rq_start_suspend(md, noflush); | ||
2437 | |||
1637 | /* | 2438 | /* |
1638 | * At this point no more requests are entering target request routines. | 2439 | * At this point no more requests are entering target request routines. |
1639 | * We call dm_wait_for_completion to wait for all existing requests | 2440 | * We call dm_wait_for_completion to wait for all existing requests |
@@ -1650,6 +2451,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1650 | if (r < 0) { | 2451 | if (r < 0) { |
1651 | dm_queue_flush(md); | 2452 | dm_queue_flush(md); |
1652 | 2453 | ||
2454 | if (dm_request_based(md)) | ||
2455 | dm_rq_abort_suspend(md, noflush); | ||
2456 | |||
1653 | unlock_fs(md); | 2457 | unlock_fs(md); |
1654 | goto out; /* pushback list is already flushed, so skip flush */ | 2458 | goto out; /* pushback list is already flushed, so skip flush */ |
1655 | } | 2459 | } |
@@ -1665,11 +2469,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1665 | set_bit(DMF_SUSPENDED, &md->flags); | 2469 | set_bit(DMF_SUSPENDED, &md->flags); |
1666 | 2470 | ||
1667 | out: | 2471 | out: |
1668 | if (r && md->suspended_bdev) { | ||
1669 | bdput(md->suspended_bdev); | ||
1670 | md->suspended_bdev = NULL; | ||
1671 | } | ||
1672 | |||
1673 | dm_table_put(map); | 2472 | dm_table_put(map); |
1674 | 2473 | ||
1675 | out_unlock: | 2474 | out_unlock: |
@@ -1696,21 +2495,20 @@ int dm_resume(struct mapped_device *md) | |||
1696 | 2495 | ||
1697 | dm_queue_flush(md); | 2496 | dm_queue_flush(md); |
1698 | 2497 | ||
1699 | unlock_fs(md); | 2498 | /* |
2499 | * Flushing deferred I/Os must be done after targets are resumed | ||
2500 | * so that mapping of targets can work correctly. | ||
2501 | * Request-based dm is queueing the deferred I/Os in its request_queue. | ||
2502 | */ | ||
2503 | if (dm_request_based(md)) | ||
2504 | start_queue(md->queue); | ||
1700 | 2505 | ||
1701 | if (md->suspended_bdev) { | 2506 | unlock_fs(md); |
1702 | bdput(md->suspended_bdev); | ||
1703 | md->suspended_bdev = NULL; | ||
1704 | } | ||
1705 | 2507 | ||
1706 | clear_bit(DMF_SUSPENDED, &md->flags); | 2508 | clear_bit(DMF_SUSPENDED, &md->flags); |
1707 | 2509 | ||
1708 | dm_table_unplug_all(map); | 2510 | dm_table_unplug_all(map); |
1709 | |||
1710 | dm_kobject_uevent(md); | ||
1711 | |||
1712 | r = 0; | 2511 | r = 0; |
1713 | |||
1714 | out: | 2512 | out: |
1715 | dm_table_put(map); | 2513 | dm_table_put(map); |
1716 | mutex_unlock(&md->suspend_lock); | 2514 | mutex_unlock(&md->suspend_lock); |
@@ -1721,9 +2519,19 @@ out: | |||
1721 | /*----------------------------------------------------------------- | 2519 | /*----------------------------------------------------------------- |
1722 | * Event notification. | 2520 | * Event notification. |
1723 | *---------------------------------------------------------------*/ | 2521 | *---------------------------------------------------------------*/ |
1724 | void dm_kobject_uevent(struct mapped_device *md) | 2522 | void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, |
1725 | { | 2523 | unsigned cookie) |
1726 | kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE); | 2524 | { |
2525 | char udev_cookie[DM_COOKIE_LENGTH]; | ||
2526 | char *envp[] = { udev_cookie, NULL }; | ||
2527 | |||
2528 | if (!cookie) | ||
2529 | kobject_uevent(&disk_to_dev(md->disk)->kobj, action); | ||
2530 | else { | ||
2531 | snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", | ||
2532 | DM_COOKIE_ENV_VAR_NAME, cookie); | ||
2533 | kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); | ||
2534 | } | ||
1727 | } | 2535 | } |
1728 | 2536 | ||
1729 | uint32_t dm_next_uevent_seq(struct mapped_device *md) | 2537 | uint32_t dm_next_uevent_seq(struct mapped_device *md) |
@@ -1777,6 +2585,10 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj) | |||
1777 | if (&md->kobj != kobj) | 2585 | if (&md->kobj != kobj) |
1778 | return NULL; | 2586 | return NULL; |
1779 | 2587 | ||
2588 | if (test_bit(DMF_FREEING, &md->flags) || | ||
2589 | test_bit(DMF_DELETING, &md->flags)) | ||
2590 | return NULL; | ||
2591 | |||
1780 | dm_get(md); | 2592 | dm_get(md); |
1781 | return md; | 2593 | return md; |
1782 | } | 2594 | } |
@@ -1797,6 +2609,61 @@ int dm_noflush_suspending(struct dm_target *ti) | |||
1797 | } | 2609 | } |
1798 | EXPORT_SYMBOL_GPL(dm_noflush_suspending); | 2610 | EXPORT_SYMBOL_GPL(dm_noflush_suspending); |
1799 | 2611 | ||
2612 | struct dm_md_mempools *dm_alloc_md_mempools(unsigned type) | ||
2613 | { | ||
2614 | struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); | ||
2615 | |||
2616 | if (!pools) | ||
2617 | return NULL; | ||
2618 | |||
2619 | pools->io_pool = (type == DM_TYPE_BIO_BASED) ? | ||
2620 | mempool_create_slab_pool(MIN_IOS, _io_cache) : | ||
2621 | mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache); | ||
2622 | if (!pools->io_pool) | ||
2623 | goto free_pools_and_out; | ||
2624 | |||
2625 | pools->tio_pool = (type == DM_TYPE_BIO_BASED) ? | ||
2626 | mempool_create_slab_pool(MIN_IOS, _tio_cache) : | ||
2627 | mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); | ||
2628 | if (!pools->tio_pool) | ||
2629 | goto free_io_pool_and_out; | ||
2630 | |||
2631 | pools->bs = (type == DM_TYPE_BIO_BASED) ? | ||
2632 | bioset_create(16, 0) : bioset_create(MIN_IOS, 0); | ||
2633 | if (!pools->bs) | ||
2634 | goto free_tio_pool_and_out; | ||
2635 | |||
2636 | return pools; | ||
2637 | |||
2638 | free_tio_pool_and_out: | ||
2639 | mempool_destroy(pools->tio_pool); | ||
2640 | |||
2641 | free_io_pool_and_out: | ||
2642 | mempool_destroy(pools->io_pool); | ||
2643 | |||
2644 | free_pools_and_out: | ||
2645 | kfree(pools); | ||
2646 | |||
2647 | return NULL; | ||
2648 | } | ||
2649 | |||
2650 | void dm_free_md_mempools(struct dm_md_mempools *pools) | ||
2651 | { | ||
2652 | if (!pools) | ||
2653 | return; | ||
2654 | |||
2655 | if (pools->io_pool) | ||
2656 | mempool_destroy(pools->io_pool); | ||
2657 | |||
2658 | if (pools->tio_pool) | ||
2659 | mempool_destroy(pools->tio_pool); | ||
2660 | |||
2661 | if (pools->bs) | ||
2662 | bioset_free(pools->bs); | ||
2663 | |||
2664 | kfree(pools); | ||
2665 | } | ||
2666 | |||
1800 | static struct block_device_operations dm_blk_dops = { | 2667 | static struct block_device_operations dm_blk_dops = { |
1801 | .open = dm_blk_open, | 2668 | .open = dm_blk_open, |
1802 | .release = dm_blk_close, | 2669 | .release = dm_blk_close, |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index a31506d93e91..23278ae80f08 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -23,6 +23,13 @@ | |||
23 | #define DM_SUSPEND_NOFLUSH_FLAG (1 << 1) | 23 | #define DM_SUSPEND_NOFLUSH_FLAG (1 << 1) |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * Type of table and mapped_device's mempool | ||
27 | */ | ||
28 | #define DM_TYPE_NONE 0 | ||
29 | #define DM_TYPE_BIO_BASED 1 | ||
30 | #define DM_TYPE_REQUEST_BASED 2 | ||
31 | |||
32 | /* | ||
26 | * List of devices that a metadevice uses and should open/close. | 33 | * List of devices that a metadevice uses and should open/close. |
27 | */ | 34 | */ |
28 | struct dm_dev_internal { | 35 | struct dm_dev_internal { |
@@ -32,6 +39,7 @@ struct dm_dev_internal { | |||
32 | }; | 39 | }; |
33 | 40 | ||
34 | struct dm_table; | 41 | struct dm_table; |
42 | struct dm_md_mempools; | ||
35 | 43 | ||
36 | /*----------------------------------------------------------------- | 44 | /*----------------------------------------------------------------- |
37 | * Internal table functions. | 45 | * Internal table functions. |
@@ -41,18 +49,34 @@ void dm_table_event_callback(struct dm_table *t, | |||
41 | void (*fn)(void *), void *context); | 49 | void (*fn)(void *), void *context); |
42 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); | 50 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); |
43 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); | 51 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); |
44 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q); | 52 | int dm_calculate_queue_limits(struct dm_table *table, |
53 | struct queue_limits *limits); | ||
54 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | ||
55 | struct queue_limits *limits); | ||
45 | struct list_head *dm_table_get_devices(struct dm_table *t); | 56 | struct list_head *dm_table_get_devices(struct dm_table *t); |
46 | void dm_table_presuspend_targets(struct dm_table *t); | 57 | void dm_table_presuspend_targets(struct dm_table *t); |
47 | void dm_table_postsuspend_targets(struct dm_table *t); | 58 | void dm_table_postsuspend_targets(struct dm_table *t); |
48 | int dm_table_resume_targets(struct dm_table *t); | 59 | int dm_table_resume_targets(struct dm_table *t); |
49 | int dm_table_any_congested(struct dm_table *t, int bdi_bits); | 60 | int dm_table_any_congested(struct dm_table *t, int bdi_bits); |
61 | int dm_table_any_busy_target(struct dm_table *t); | ||
62 | int dm_table_set_type(struct dm_table *t); | ||
63 | unsigned dm_table_get_type(struct dm_table *t); | ||
64 | bool dm_table_bio_based(struct dm_table *t); | ||
65 | bool dm_table_request_based(struct dm_table *t); | ||
66 | int dm_table_alloc_md_mempools(struct dm_table *t); | ||
67 | void dm_table_free_md_mempools(struct dm_table *t); | ||
68 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); | ||
50 | 69 | ||
51 | /* | 70 | /* |
52 | * To check the return value from dm_table_find_target(). | 71 | * To check the return value from dm_table_find_target(). |
53 | */ | 72 | */ |
54 | #define dm_target_is_valid(t) ((t)->table) | 73 | #define dm_target_is_valid(t) ((t)->table) |
55 | 74 | ||
75 | /* | ||
76 | * To check whether the target type is request-based or not (bio-based). | ||
77 | */ | ||
78 | #define dm_target_request_based(t) ((t)->type->map_rq != NULL) | ||
79 | |||
56 | /*----------------------------------------------------------------- | 80 | /*----------------------------------------------------------------- |
57 | * A registry of target types. | 81 | * A registry of target types. |
58 | *---------------------------------------------------------------*/ | 82 | *---------------------------------------------------------------*/ |
@@ -92,9 +116,16 @@ void dm_stripe_exit(void); | |||
92 | int dm_open_count(struct mapped_device *md); | 116 | int dm_open_count(struct mapped_device *md); |
93 | int dm_lock_for_deletion(struct mapped_device *md); | 117 | int dm_lock_for_deletion(struct mapped_device *md); |
94 | 118 | ||
95 | void dm_kobject_uevent(struct mapped_device *md); | 119 | void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, |
120 | unsigned cookie); | ||
96 | 121 | ||
97 | int dm_kcopyd_init(void); | 122 | int dm_kcopyd_init(void); |
98 | void dm_kcopyd_exit(void); | 123 | void dm_kcopyd_exit(void); |
99 | 124 | ||
125 | /* | ||
126 | * Mempool operations | ||
127 | */ | ||
128 | struct dm_md_mempools *dm_alloc_md_mempools(unsigned type); | ||
129 | void dm_free_md_mempools(struct dm_md_mempools *pools); | ||
130 | |||
100 | #endif | 131 | #endif |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 1dc721517e4c..c155bd3ec9f1 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1725,6 +1725,7 @@ config TLAN | |||
1725 | 1725 | ||
1726 | config KS8842 | 1726 | config KS8842 |
1727 | tristate "Micrel KSZ8842" | 1727 | tristate "Micrel KSZ8842" |
1728 | depends on HAS_IOMEM | ||
1728 | help | 1729 | help |
1729 | This platform driver is for Micrel KSZ8842 chip. | 1730 | This platform driver is for Micrel KSZ8842 chip. |
1730 | 1731 | ||
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 38f1c3375d7f..b70cc99962fc 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -6825,6 +6825,14 @@ bnx2_nway_reset(struct net_device *dev) | |||
6825 | return 0; | 6825 | return 0; |
6826 | } | 6826 | } |
6827 | 6827 | ||
6828 | static u32 | ||
6829 | bnx2_get_link(struct net_device *dev) | ||
6830 | { | ||
6831 | struct bnx2 *bp = netdev_priv(dev); | ||
6832 | |||
6833 | return bp->link_up; | ||
6834 | } | ||
6835 | |||
6828 | static int | 6836 | static int |
6829 | bnx2_get_eeprom_len(struct net_device *dev) | 6837 | bnx2_get_eeprom_len(struct net_device *dev) |
6830 | { | 6838 | { |
@@ -7392,7 +7400,7 @@ static const struct ethtool_ops bnx2_ethtool_ops = { | |||
7392 | .get_wol = bnx2_get_wol, | 7400 | .get_wol = bnx2_get_wol, |
7393 | .set_wol = bnx2_set_wol, | 7401 | .set_wol = bnx2_set_wol, |
7394 | .nway_reset = bnx2_nway_reset, | 7402 | .nway_reset = bnx2_nway_reset, |
7395 | .get_link = ethtool_op_get_link, | 7403 | .get_link = bnx2_get_link, |
7396 | .get_eeprom_len = bnx2_get_eeprom_len, | 7404 | .get_eeprom_len = bnx2_get_eeprom_len, |
7397 | .get_eeprom = bnx2_get_eeprom, | 7405 | .get_eeprom = bnx2_get_eeprom, |
7398 | .set_eeprom = bnx2_set_eeprom, | 7406 | .set_eeprom = bnx2_set_eeprom, |
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index d5e18812bf49..33821a81cbf8 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig | |||
@@ -36,7 +36,7 @@ config CAN_CALC_BITTIMING | |||
36 | If unsure, say Y. | 36 | If unsure, say Y. |
37 | 37 | ||
38 | config CAN_SJA1000 | 38 | config CAN_SJA1000 |
39 | depends on CAN_DEV | 39 | depends on CAN_DEV && HAS_IOMEM |
40 | tristate "Philips SJA1000" | 40 | tristate "Philips SJA1000" |
41 | ---help--- | 41 | ---help--- |
42 | Driver for the SJA1000 CAN controllers from Philips or NXP | 42 | Driver for the SJA1000 CAN controllers from Philips or NXP |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index bdb143d2b5c7..055bb61d6e77 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -944,28 +944,31 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) | |||
944 | u32 val = 0; | 944 | u32 val = 0; |
945 | int retries = 60; | 945 | int retries = 60; |
946 | 946 | ||
947 | if (!pegtune_val) { | 947 | if (pegtune_val) |
948 | do { | 948 | return 0; |
949 | val = NXRD32(adapter, CRB_CMDPEG_STATE); | ||
950 | 949 | ||
951 | if (val == PHAN_INITIALIZE_COMPLETE || | 950 | do { |
952 | val == PHAN_INITIALIZE_ACK) | 951 | val = NXRD32(adapter, CRB_CMDPEG_STATE); |
953 | return 0; | ||
954 | 952 | ||
955 | msleep(500); | 953 | switch (val) { |
954 | case PHAN_INITIALIZE_COMPLETE: | ||
955 | case PHAN_INITIALIZE_ACK: | ||
956 | return 0; | ||
957 | case PHAN_INITIALIZE_FAILED: | ||
958 | goto out_err; | ||
959 | default: | ||
960 | break; | ||
961 | } | ||
956 | 962 | ||
957 | } while (--retries); | 963 | msleep(500); |
958 | 964 | ||
959 | if (!retries) { | 965 | } while (--retries); |
960 | pegtune_val = NXRD32(adapter, | ||
961 | NETXEN_ROMUSB_GLB_PEGTUNE_DONE); | ||
962 | printk(KERN_WARNING "netxen_phantom_init: init failed, " | ||
963 | "pegtune_val=%x\n", pegtune_val); | ||
964 | return -1; | ||
965 | } | ||
966 | } | ||
967 | 966 | ||
968 | return 0; | 967 | NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); |
968 | |||
969 | out_err: | ||
970 | dev_warn(&adapter->pdev->dev, "firmware init failed\n"); | ||
971 | return -EIO; | ||
969 | } | 972 | } |
970 | 973 | ||
971 | static int | 974 | static int |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 71daa3d5f114..2919a2d12bf4 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -705,7 +705,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw) | |||
705 | first_driver = (adapter->ahw.pci_func == 0); | 705 | first_driver = (adapter->ahw.pci_func == 0); |
706 | 706 | ||
707 | if (!first_driver) | 707 | if (!first_driver) |
708 | return 0; | 708 | goto wait_init; |
709 | 709 | ||
710 | first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); | 710 | first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); |
711 | 711 | ||
@@ -752,6 +752,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw) | |||
752 | | (_NETXEN_NIC_LINUX_SUBVERSION); | 752 | | (_NETXEN_NIC_LINUX_SUBVERSION); |
753 | NXWR32(adapter, CRB_DRIVER_VERSION, val); | 753 | NXWR32(adapter, CRB_DRIVER_VERSION, val); |
754 | 754 | ||
755 | wait_init: | ||
755 | /* Handshake with the card before we register the devices. */ | 756 | /* Handshake with the card before we register the devices. */ |
756 | err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); | 757 | err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); |
757 | if (err) { | 758 | if (err) { |
@@ -1178,6 +1179,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) | |||
1178 | free_netdev(netdev); | 1179 | free_netdev(netdev); |
1179 | } | 1180 | } |
1180 | 1181 | ||
1182 | #ifdef CONFIG_PM | ||
1181 | static int | 1183 | static int |
1182 | netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) | 1184 | netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) |
1183 | { | 1185 | { |
@@ -1242,6 +1244,7 @@ netxen_nic_resume(struct pci_dev *pdev) | |||
1242 | 1244 | ||
1243 | return 0; | 1245 | return 0; |
1244 | } | 1246 | } |
1247 | #endif | ||
1245 | 1248 | ||
1246 | static int netxen_nic_open(struct net_device *netdev) | 1249 | static int netxen_nic_open(struct net_device *netdev) |
1247 | { | 1250 | { |
@@ -1771,8 +1774,10 @@ static struct pci_driver netxen_driver = { | |||
1771 | .id_table = netxen_pci_tbl, | 1774 | .id_table = netxen_pci_tbl, |
1772 | .probe = netxen_nic_probe, | 1775 | .probe = netxen_nic_probe, |
1773 | .remove = __devexit_p(netxen_nic_remove), | 1776 | .remove = __devexit_p(netxen_nic_remove), |
1777 | #ifdef CONFIG_PM | ||
1774 | .suspend = netxen_nic_suspend, | 1778 | .suspend = netxen_nic_suspend, |
1775 | .resume = netxen_nic_resume | 1779 | .resume = netxen_nic_resume |
1780 | #endif | ||
1776 | }; | 1781 | }; |
1777 | 1782 | ||
1778 | /* Driver Registration on NetXen card */ | 1783 | /* Driver Registration on NetXen card */ |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index bbc6d4d3cc94..3e4b67aaa6ea 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -3142,6 +3142,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3142 | (void __iomem *)port_regs; | 3142 | (void __iomem *)port_regs; |
3143 | u32 delay = 10; | 3143 | u32 delay = 10; |
3144 | int status = 0; | 3144 | int status = 0; |
3145 | unsigned long hw_flags = 0; | ||
3145 | 3146 | ||
3146 | if(ql_mii_setup(qdev)) | 3147 | if(ql_mii_setup(qdev)) |
3147 | return -1; | 3148 | return -1; |
@@ -3150,7 +3151,8 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3150 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 3151 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
3151 | (ISP_SERIAL_PORT_IF_WE | | 3152 | (ISP_SERIAL_PORT_IF_WE | |
3152 | (ISP_SERIAL_PORT_IF_WE << 16))); | 3153 | (ISP_SERIAL_PORT_IF_WE << 16))); |
3153 | 3154 | /* Give the PHY time to come out of reset. */ | |
3155 | mdelay(100); | ||
3154 | qdev->port_link_state = LS_DOWN; | 3156 | qdev->port_link_state = LS_DOWN; |
3155 | netif_carrier_off(qdev->ndev); | 3157 | netif_carrier_off(qdev->ndev); |
3156 | 3158 | ||
@@ -3350,7 +3352,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3350 | value = ql_read_page0_reg(qdev, &port_regs->portStatus); | 3352 | value = ql_read_page0_reg(qdev, &port_regs->portStatus); |
3351 | if (value & PORT_STATUS_IC) | 3353 | if (value & PORT_STATUS_IC) |
3352 | break; | 3354 | break; |
3355 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
3353 | msleep(500); | 3356 | msleep(500); |
3357 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
3354 | } while (--delay); | 3358 | } while (--delay); |
3355 | 3359 | ||
3356 | if (delay == 0) { | 3360 | if (delay == 0) { |
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index fbc63d5e459f..eb159587d0bf 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
@@ -354,7 +354,7 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, | |||
354 | status = acpi_run_hpp(handle, hpp); | 354 | status = acpi_run_hpp(handle, hpp); |
355 | if (ACPI_SUCCESS(status)) | 355 | if (ACPI_SUCCESS(status)) |
356 | break; | 356 | break; |
357 | if (acpi_root_bridge(handle)) | 357 | if (acpi_is_root_bridge(handle)) |
358 | break; | 358 | break; |
359 | status = acpi_get_parent(handle, &phandle); | 359 | status = acpi_get_parent(handle, &phandle); |
360 | if (ACPI_FAILURE(status)) | 360 | if (ACPI_FAILURE(status)) |
@@ -428,7 +428,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | |||
428 | status = acpi_run_oshp(handle); | 428 | status = acpi_run_oshp(handle); |
429 | if (ACPI_SUCCESS(status)) | 429 | if (ACPI_SUCCESS(status)) |
430 | goto got_one; | 430 | goto got_one; |
431 | if (acpi_root_bridge(handle)) | 431 | if (acpi_is_root_bridge(handle)) |
432 | break; | 432 | break; |
433 | chandle = handle; | 433 | chandle = handle; |
434 | status = acpi_get_parent(chandle, &handle); | 434 | status = acpi_get_parent(chandle, &handle); |
@@ -449,42 +449,6 @@ got_one: | |||
449 | } | 449 | } |
450 | EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); | 450 | EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); |
451 | 451 | ||
452 | /* acpi_root_bridge - check to see if this acpi object is a root bridge | ||
453 | * | ||
454 | * @handle - the acpi object in question. | ||
455 | */ | ||
456 | int acpi_root_bridge(acpi_handle handle) | ||
457 | { | ||
458 | acpi_status status; | ||
459 | struct acpi_device_info *info; | ||
460 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
461 | int i; | ||
462 | |||
463 | status = acpi_get_object_info(handle, &buffer); | ||
464 | if (ACPI_SUCCESS(status)) { | ||
465 | info = buffer.pointer; | ||
466 | if ((info->valid & ACPI_VALID_HID) && | ||
467 | !strcmp(PCI_ROOT_HID_STRING, | ||
468 | info->hardware_id.value)) { | ||
469 | kfree(buffer.pointer); | ||
470 | return 1; | ||
471 | } | ||
472 | if (info->valid & ACPI_VALID_CID) { | ||
473 | for (i=0; i < info->compatibility_id.count; i++) { | ||
474 | if (!strcmp(PCI_ROOT_HID_STRING, | ||
475 | info->compatibility_id.id[i].value)) { | ||
476 | kfree(buffer.pointer); | ||
477 | return 1; | ||
478 | } | ||
479 | } | ||
480 | } | ||
481 | kfree(buffer.pointer); | ||
482 | } | ||
483 | return 0; | ||
484 | } | ||
485 | EXPORT_SYMBOL_GPL(acpi_root_bridge); | ||
486 | |||
487 | |||
488 | static int is_ejectable(acpi_handle handle) | 452 | static int is_ejectable(acpi_handle handle) |
489 | { | 453 | { |
490 | acpi_status status; | 454 | acpi_status status; |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 3a6064bce561..0cb0f830a993 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -678,18 +678,9 @@ static void remove_bridge(acpi_handle handle) | |||
678 | 678 | ||
679 | static struct pci_dev * get_apic_pci_info(acpi_handle handle) | 679 | static struct pci_dev * get_apic_pci_info(acpi_handle handle) |
680 | { | 680 | { |
681 | struct acpi_pci_id id; | ||
682 | struct pci_bus *bus; | ||
683 | struct pci_dev *dev; | 681 | struct pci_dev *dev; |
684 | 682 | ||
685 | if (ACPI_FAILURE(acpi_get_pci_id(handle, &id))) | 683 | dev = acpi_get_pci_dev(handle); |
686 | return NULL; | ||
687 | |||
688 | bus = pci_find_bus(id.segment, id.bus); | ||
689 | if (!bus) | ||
690 | return NULL; | ||
691 | |||
692 | dev = pci_get_slot(bus, PCI_DEVFN(id.device, id.function)); | ||
693 | if (!dev) | 684 | if (!dev) |
694 | return NULL; | 685 | return NULL; |
695 | 686 | ||
@@ -1396,19 +1387,16 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus) | |||
1396 | /* Program resources in newly inserted bridge */ | 1387 | /* Program resources in newly inserted bridge */ |
1397 | static int acpiphp_configure_bridge (acpi_handle handle) | 1388 | static int acpiphp_configure_bridge (acpi_handle handle) |
1398 | { | 1389 | { |
1399 | struct acpi_pci_id pci_id; | 1390 | struct pci_dev *dev; |
1400 | struct pci_bus *bus; | 1391 | struct pci_bus *bus; |
1401 | 1392 | ||
1402 | if (ACPI_FAILURE(acpi_get_pci_id(handle, &pci_id))) { | 1393 | dev = acpi_get_pci_dev(handle); |
1394 | if (!dev) { | ||
1403 | err("cannot get PCI domain and bus number for bridge\n"); | 1395 | err("cannot get PCI domain and bus number for bridge\n"); |
1404 | return -EINVAL; | 1396 | return -EINVAL; |
1405 | } | 1397 | } |
1406 | bus = pci_find_bus(pci_id.segment, pci_id.bus); | 1398 | |
1407 | if (!bus) { | 1399 | bus = dev->bus; |
1408 | err("cannot find bus %d:%d\n", | ||
1409 | pci_id.segment, pci_id.bus); | ||
1410 | return -EINVAL; | ||
1411 | } | ||
1412 | 1400 | ||
1413 | pci_bus_size_bridges(bus); | 1401 | pci_bus_size_bridges(bus); |
1414 | pci_bus_assign_resources(bus); | 1402 | pci_bus_assign_resources(bus); |
@@ -1416,6 +1404,7 @@ static int acpiphp_configure_bridge (acpi_handle handle) | |||
1416 | acpiphp_set_hpp_values(handle, bus); | 1404 | acpiphp_set_hpp_values(handle, bus); |
1417 | pci_enable_bridges(bus); | 1405 | pci_enable_bridges(bus); |
1418 | acpiphp_configure_ioapics(handle); | 1406 | acpiphp_configure_ioapics(handle); |
1407 | pci_dev_put(dev); | ||
1419 | return 0; | 1408 | return 0; |
1420 | } | 1409 | } |
1421 | 1410 | ||
@@ -1631,7 +1620,7 @@ find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
1631 | { | 1620 | { |
1632 | int *count = (int *)context; | 1621 | int *count = (int *)context; |
1633 | 1622 | ||
1634 | if (acpi_root_bridge(handle)) { | 1623 | if (acpi_is_root_bridge(handle)) { |
1635 | acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, | 1624 | acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, |
1636 | handle_hotplug_event_bridge, NULL); | 1625 | handle_hotplug_event_bridge, NULL); |
1637 | (*count)++; | 1626 | (*count)++; |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 178853a07440..e53eacd75c8d 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/sysdev.h> | 39 | #include <linux/sysdev.h> |
40 | #include <asm/cacheflush.h> | 40 | #include <asm/cacheflush.h> |
41 | #include <asm/iommu.h> | 41 | #include <asm/iommu.h> |
42 | #include <asm/e820.h> | ||
42 | #include "pci.h" | 43 | #include "pci.h" |
43 | 44 | ||
44 | #define ROOT_SIZE VTD_PAGE_SIZE | 45 | #define ROOT_SIZE VTD_PAGE_SIZE |
@@ -217,6 +218,14 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
217 | return (pte->val & 3) != 0; | 218 | return (pte->val & 3) != 0; |
218 | } | 219 | } |
219 | 220 | ||
221 | /* | ||
222 | * This domain is a statically identity mapping domain. | ||
223 | * 1. This domain creats a static 1:1 mapping to all usable memory. | ||
224 | * 2. It maps to each iommu if successful. | ||
225 | * 3. Each iommu mapps to this domain if successful. | ||
226 | */ | ||
227 | struct dmar_domain *si_domain; | ||
228 | |||
220 | /* devices under the same p2p bridge are owned in one domain */ | 229 | /* devices under the same p2p bridge are owned in one domain */ |
221 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) | 230 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) |
222 | 231 | ||
@@ -225,6 +234,9 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
225 | */ | 234 | */ |
226 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) | 235 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) |
227 | 236 | ||
237 | /* si_domain contains mulitple devices */ | ||
238 | #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2) | ||
239 | |||
228 | struct dmar_domain { | 240 | struct dmar_domain { |
229 | int id; /* domain id */ | 241 | int id; /* domain id */ |
230 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ | 242 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ |
@@ -435,12 +447,14 @@ int iommu_calculate_agaw(struct intel_iommu *iommu) | |||
435 | return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); | 447 | return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
436 | } | 448 | } |
437 | 449 | ||
438 | /* in native case, each domain is related to only one iommu */ | 450 | /* This functionin only returns single iommu in a domain */ |
439 | static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) | 451 | static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) |
440 | { | 452 | { |
441 | int iommu_id; | 453 | int iommu_id; |
442 | 454 | ||
455 | /* si_domain and vm domain should not get here. */ | ||
443 | BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); | 456 | BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); |
457 | BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY); | ||
444 | 458 | ||
445 | iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | 459 | iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); |
446 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) | 460 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) |
@@ -1189,48 +1203,71 @@ void free_dmar_iommu(struct intel_iommu *iommu) | |||
1189 | free_context_table(iommu); | 1203 | free_context_table(iommu); |
1190 | } | 1204 | } |
1191 | 1205 | ||
1192 | static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | 1206 | static struct dmar_domain *alloc_domain(void) |
1193 | { | 1207 | { |
1194 | unsigned long num; | ||
1195 | unsigned long ndomains; | ||
1196 | struct dmar_domain *domain; | 1208 | struct dmar_domain *domain; |
1197 | unsigned long flags; | ||
1198 | 1209 | ||
1199 | domain = alloc_domain_mem(); | 1210 | domain = alloc_domain_mem(); |
1200 | if (!domain) | 1211 | if (!domain) |
1201 | return NULL; | 1212 | return NULL; |
1202 | 1213 | ||
1214 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); | ||
1215 | domain->flags = 0; | ||
1216 | |||
1217 | return domain; | ||
1218 | } | ||
1219 | |||
1220 | static int iommu_attach_domain(struct dmar_domain *domain, | ||
1221 | struct intel_iommu *iommu) | ||
1222 | { | ||
1223 | int num; | ||
1224 | unsigned long ndomains; | ||
1225 | unsigned long flags; | ||
1226 | |||
1203 | ndomains = cap_ndoms(iommu->cap); | 1227 | ndomains = cap_ndoms(iommu->cap); |
1204 | 1228 | ||
1205 | spin_lock_irqsave(&iommu->lock, flags); | 1229 | spin_lock_irqsave(&iommu->lock, flags); |
1230 | |||
1206 | num = find_first_zero_bit(iommu->domain_ids, ndomains); | 1231 | num = find_first_zero_bit(iommu->domain_ids, ndomains); |
1207 | if (num >= ndomains) { | 1232 | if (num >= ndomains) { |
1208 | spin_unlock_irqrestore(&iommu->lock, flags); | 1233 | spin_unlock_irqrestore(&iommu->lock, flags); |
1209 | free_domain_mem(domain); | ||
1210 | printk(KERN_ERR "IOMMU: no free domain ids\n"); | 1234 | printk(KERN_ERR "IOMMU: no free domain ids\n"); |
1211 | return NULL; | 1235 | return -ENOMEM; |
1212 | } | 1236 | } |
1213 | 1237 | ||
1214 | set_bit(num, iommu->domain_ids); | ||
1215 | domain->id = num; | 1238 | domain->id = num; |
1216 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); | 1239 | set_bit(num, iommu->domain_ids); |
1217 | set_bit(iommu->seq_id, &domain->iommu_bmp); | 1240 | set_bit(iommu->seq_id, &domain->iommu_bmp); |
1218 | domain->flags = 0; | ||
1219 | iommu->domains[num] = domain; | 1241 | iommu->domains[num] = domain; |
1220 | spin_unlock_irqrestore(&iommu->lock, flags); | 1242 | spin_unlock_irqrestore(&iommu->lock, flags); |
1221 | 1243 | ||
1222 | return domain; | 1244 | return 0; |
1223 | } | 1245 | } |
1224 | 1246 | ||
1225 | static void iommu_free_domain(struct dmar_domain *domain) | 1247 | static void iommu_detach_domain(struct dmar_domain *domain, |
1248 | struct intel_iommu *iommu) | ||
1226 | { | 1249 | { |
1227 | unsigned long flags; | 1250 | unsigned long flags; |
1228 | struct intel_iommu *iommu; | 1251 | int num, ndomains; |
1229 | 1252 | int found = 0; | |
1230 | iommu = domain_get_iommu(domain); | ||
1231 | 1253 | ||
1232 | spin_lock_irqsave(&iommu->lock, flags); | 1254 | spin_lock_irqsave(&iommu->lock, flags); |
1233 | clear_bit(domain->id, iommu->domain_ids); | 1255 | ndomains = cap_ndoms(iommu->cap); |
1256 | num = find_first_bit(iommu->domain_ids, ndomains); | ||
1257 | for (; num < ndomains; ) { | ||
1258 | if (iommu->domains[num] == domain) { | ||
1259 | found = 1; | ||
1260 | break; | ||
1261 | } | ||
1262 | num = find_next_bit(iommu->domain_ids, | ||
1263 | cap_ndoms(iommu->cap), num+1); | ||
1264 | } | ||
1265 | |||
1266 | if (found) { | ||
1267 | clear_bit(num, iommu->domain_ids); | ||
1268 | clear_bit(iommu->seq_id, &domain->iommu_bmp); | ||
1269 | iommu->domains[num] = NULL; | ||
1270 | } | ||
1234 | spin_unlock_irqrestore(&iommu->lock, flags); | 1271 | spin_unlock_irqrestore(&iommu->lock, flags); |
1235 | } | 1272 | } |
1236 | 1273 | ||
@@ -1350,6 +1387,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1350 | 1387 | ||
1351 | static void domain_exit(struct dmar_domain *domain) | 1388 | static void domain_exit(struct dmar_domain *domain) |
1352 | { | 1389 | { |
1390 | struct dmar_drhd_unit *drhd; | ||
1391 | struct intel_iommu *iommu; | ||
1353 | u64 end; | 1392 | u64 end; |
1354 | 1393 | ||
1355 | /* Domain 0 is reserved, so dont process it */ | 1394 | /* Domain 0 is reserved, so dont process it */ |
@@ -1368,7 +1407,10 @@ static void domain_exit(struct dmar_domain *domain) | |||
1368 | /* free page tables */ | 1407 | /* free page tables */ |
1369 | dma_pte_free_pagetable(domain, 0, end); | 1408 | dma_pte_free_pagetable(domain, 0, end); |
1370 | 1409 | ||
1371 | iommu_free_domain(domain); | 1410 | for_each_active_iommu(iommu, drhd) |
1411 | if (test_bit(iommu->seq_id, &domain->iommu_bmp)) | ||
1412 | iommu_detach_domain(domain, iommu); | ||
1413 | |||
1372 | free_domain_mem(domain); | 1414 | free_domain_mem(domain); |
1373 | } | 1415 | } |
1374 | 1416 | ||
@@ -1408,7 +1450,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1408 | id = domain->id; | 1450 | id = domain->id; |
1409 | pgd = domain->pgd; | 1451 | pgd = domain->pgd; |
1410 | 1452 | ||
1411 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { | 1453 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
1454 | domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) { | ||
1412 | int found = 0; | 1455 | int found = 0; |
1413 | 1456 | ||
1414 | /* find an available domain id for this device in iommu */ | 1457 | /* find an available domain id for this device in iommu */ |
@@ -1433,6 +1476,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1433 | } | 1476 | } |
1434 | 1477 | ||
1435 | set_bit(num, iommu->domain_ids); | 1478 | set_bit(num, iommu->domain_ids); |
1479 | set_bit(iommu->seq_id, &domain->iommu_bmp); | ||
1436 | iommu->domains[num] = domain; | 1480 | iommu->domains[num] = domain; |
1437 | id = num; | 1481 | id = num; |
1438 | } | 1482 | } |
@@ -1675,6 +1719,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1675 | unsigned long flags; | 1719 | unsigned long flags; |
1676 | int bus = 0, devfn = 0; | 1720 | int bus = 0, devfn = 0; |
1677 | int segment; | 1721 | int segment; |
1722 | int ret; | ||
1678 | 1723 | ||
1679 | domain = find_domain(pdev); | 1724 | domain = find_domain(pdev); |
1680 | if (domain) | 1725 | if (domain) |
@@ -1707,6 +1752,10 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1707 | } | 1752 | } |
1708 | } | 1753 | } |
1709 | 1754 | ||
1755 | domain = alloc_domain(); | ||
1756 | if (!domain) | ||
1757 | goto error; | ||
1758 | |||
1710 | /* Allocate new domain for the device */ | 1759 | /* Allocate new domain for the device */ |
1711 | drhd = dmar_find_matched_drhd_unit(pdev); | 1760 | drhd = dmar_find_matched_drhd_unit(pdev); |
1712 | if (!drhd) { | 1761 | if (!drhd) { |
@@ -1716,9 +1765,11 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1716 | } | 1765 | } |
1717 | iommu = drhd->iommu; | 1766 | iommu = drhd->iommu; |
1718 | 1767 | ||
1719 | domain = iommu_alloc_domain(iommu); | 1768 | ret = iommu_attach_domain(domain, iommu); |
1720 | if (!domain) | 1769 | if (ret) { |
1770 | domain_exit(domain); | ||
1721 | goto error; | 1771 | goto error; |
1772 | } | ||
1722 | 1773 | ||
1723 | if (domain_init(domain, gaw)) { | 1774 | if (domain_init(domain, gaw)) { |
1724 | domain_exit(domain); | 1775 | domain_exit(domain); |
@@ -1792,6 +1843,8 @@ error: | |||
1792 | return find_domain(pdev); | 1843 | return find_domain(pdev); |
1793 | } | 1844 | } |
1794 | 1845 | ||
1846 | static int iommu_identity_mapping; | ||
1847 | |||
1795 | static int iommu_prepare_identity_map(struct pci_dev *pdev, | 1848 | static int iommu_prepare_identity_map(struct pci_dev *pdev, |
1796 | unsigned long long start, | 1849 | unsigned long long start, |
1797 | unsigned long long end) | 1850 | unsigned long long end) |
@@ -1804,8 +1857,11 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, | |||
1804 | printk(KERN_INFO | 1857 | printk(KERN_INFO |
1805 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", | 1858 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", |
1806 | pci_name(pdev), start, end); | 1859 | pci_name(pdev), start, end); |
1807 | /* page table init */ | 1860 | if (iommu_identity_mapping) |
1808 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); | 1861 | domain = si_domain; |
1862 | else | ||
1863 | /* page table init */ | ||
1864 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); | ||
1809 | if (!domain) | 1865 | if (!domain) |
1810 | return -ENOMEM; | 1866 | return -ENOMEM; |
1811 | 1867 | ||
@@ -1952,7 +2008,110 @@ static int __init init_context_pass_through(void) | |||
1952 | return 0; | 2008 | return 0; |
1953 | } | 2009 | } |
1954 | 2010 | ||
1955 | static int __init init_dmars(void) | 2011 | static int md_domain_init(struct dmar_domain *domain, int guest_width); |
2012 | static int si_domain_init(void) | ||
2013 | { | ||
2014 | struct dmar_drhd_unit *drhd; | ||
2015 | struct intel_iommu *iommu; | ||
2016 | int ret = 0; | ||
2017 | |||
2018 | si_domain = alloc_domain(); | ||
2019 | if (!si_domain) | ||
2020 | return -EFAULT; | ||
2021 | |||
2022 | |||
2023 | for_each_active_iommu(iommu, drhd) { | ||
2024 | ret = iommu_attach_domain(si_domain, iommu); | ||
2025 | if (ret) { | ||
2026 | domain_exit(si_domain); | ||
2027 | return -EFAULT; | ||
2028 | } | ||
2029 | } | ||
2030 | |||
2031 | if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | ||
2032 | domain_exit(si_domain); | ||
2033 | return -EFAULT; | ||
2034 | } | ||
2035 | |||
2036 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; | ||
2037 | |||
2038 | return 0; | ||
2039 | } | ||
2040 | |||
2041 | static void domain_remove_one_dev_info(struct dmar_domain *domain, | ||
2042 | struct pci_dev *pdev); | ||
2043 | static int identity_mapping(struct pci_dev *pdev) | ||
2044 | { | ||
2045 | struct device_domain_info *info; | ||
2046 | |||
2047 | if (likely(!iommu_identity_mapping)) | ||
2048 | return 0; | ||
2049 | |||
2050 | |||
2051 | list_for_each_entry(info, &si_domain->devices, link) | ||
2052 | if (info->dev == pdev) | ||
2053 | return 1; | ||
2054 | return 0; | ||
2055 | } | ||
2056 | |||
2057 | static int domain_add_dev_info(struct dmar_domain *domain, | ||
2058 | struct pci_dev *pdev) | ||
2059 | { | ||
2060 | struct device_domain_info *info; | ||
2061 | unsigned long flags; | ||
2062 | |||
2063 | info = alloc_devinfo_mem(); | ||
2064 | if (!info) | ||
2065 | return -ENOMEM; | ||
2066 | |||
2067 | info->segment = pci_domain_nr(pdev->bus); | ||
2068 | info->bus = pdev->bus->number; | ||
2069 | info->devfn = pdev->devfn; | ||
2070 | info->dev = pdev; | ||
2071 | info->domain = domain; | ||
2072 | |||
2073 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2074 | list_add(&info->link, &domain->devices); | ||
2075 | list_add(&info->global, &device_domain_list); | ||
2076 | pdev->dev.archdata.iommu = info; | ||
2077 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2078 | |||
2079 | return 0; | ||
2080 | } | ||
2081 | |||
2082 | static int iommu_prepare_static_identity_mapping(void) | ||
2083 | { | ||
2084 | int i; | ||
2085 | struct pci_dev *pdev = NULL; | ||
2086 | int ret; | ||
2087 | |||
2088 | ret = si_domain_init(); | ||
2089 | if (ret) | ||
2090 | return -EFAULT; | ||
2091 | |||
2092 | printk(KERN_INFO "IOMMU: Setting identity map:\n"); | ||
2093 | for_each_pci_dev(pdev) { | ||
2094 | for (i = 0; i < e820.nr_map; i++) { | ||
2095 | struct e820entry *ei = &e820.map[i]; | ||
2096 | |||
2097 | if (ei->type == E820_RAM) { | ||
2098 | ret = iommu_prepare_identity_map(pdev, | ||
2099 | ei->addr, ei->addr + ei->size); | ||
2100 | if (ret) { | ||
2101 | printk(KERN_INFO "1:1 mapping to one domain failed.\n"); | ||
2102 | return -EFAULT; | ||
2103 | } | ||
2104 | } | ||
2105 | } | ||
2106 | ret = domain_add_dev_info(si_domain, pdev); | ||
2107 | if (ret) | ||
2108 | return ret; | ||
2109 | } | ||
2110 | |||
2111 | return 0; | ||
2112 | } | ||
2113 | |||
2114 | int __init init_dmars(void) | ||
1956 | { | 2115 | { |
1957 | struct dmar_drhd_unit *drhd; | 2116 | struct dmar_drhd_unit *drhd; |
1958 | struct dmar_rmrr_unit *rmrr; | 2117 | struct dmar_rmrr_unit *rmrr; |
@@ -1962,6 +2121,13 @@ static int __init init_dmars(void) | |||
1962 | int pass_through = 1; | 2121 | int pass_through = 1; |
1963 | 2122 | ||
1964 | /* | 2123 | /* |
2124 | * In case pass through can not be enabled, iommu tries to use identity | ||
2125 | * mapping. | ||
2126 | */ | ||
2127 | if (iommu_pass_through) | ||
2128 | iommu_identity_mapping = 1; | ||
2129 | |||
2130 | /* | ||
1965 | * for each drhd | 2131 | * for each drhd |
1966 | * allocate root | 2132 | * allocate root |
1967 | * initialize and program root entry to not present | 2133 | * initialize and program root entry to not present |
@@ -2090,9 +2256,12 @@ static int __init init_dmars(void) | |||
2090 | 2256 | ||
2091 | /* | 2257 | /* |
2092 | * If pass through is not set or not enabled, setup context entries for | 2258 | * If pass through is not set or not enabled, setup context entries for |
2093 | * identity mappings for rmrr, gfx, and isa. | 2259 | * identity mappings for rmrr, gfx, and isa and may fall back to static |
2260 | * identity mapping if iommu_identity_mapping is set. | ||
2094 | */ | 2261 | */ |
2095 | if (!iommu_pass_through) { | 2262 | if (!iommu_pass_through) { |
2263 | if (iommu_identity_mapping) | ||
2264 | iommu_prepare_static_identity_mapping(); | ||
2096 | /* | 2265 | /* |
2097 | * For each rmrr | 2266 | * For each rmrr |
2098 | * for each dev attached to rmrr | 2267 | * for each dev attached to rmrr |
@@ -2107,6 +2276,7 @@ static int __init init_dmars(void) | |||
2107 | * endfor | 2276 | * endfor |
2108 | * endfor | 2277 | * endfor |
2109 | */ | 2278 | */ |
2279 | printk(KERN_INFO "IOMMU: Setting RMRR:\n"); | ||
2110 | for_each_rmrr_units(rmrr) { | 2280 | for_each_rmrr_units(rmrr) { |
2111 | for (i = 0; i < rmrr->devices_cnt; i++) { | 2281 | for (i = 0; i < rmrr->devices_cnt; i++) { |
2112 | pdev = rmrr->devices[i]; | 2282 | pdev = rmrr->devices[i]; |
@@ -2248,6 +2418,52 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
2248 | return domain; | 2418 | return domain; |
2249 | } | 2419 | } |
2250 | 2420 | ||
2421 | static int iommu_dummy(struct pci_dev *pdev) | ||
2422 | { | ||
2423 | return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; | ||
2424 | } | ||
2425 | |||
2426 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ | ||
2427 | static int iommu_no_mapping(struct pci_dev *pdev) | ||
2428 | { | ||
2429 | int found; | ||
2430 | |||
2431 | if (!iommu_identity_mapping) | ||
2432 | return iommu_dummy(pdev); | ||
2433 | |||
2434 | found = identity_mapping(pdev); | ||
2435 | if (found) { | ||
2436 | if (pdev->dma_mask > DMA_BIT_MASK(32)) | ||
2437 | return 1; | ||
2438 | else { | ||
2439 | /* | ||
2440 | * 32 bit DMA is removed from si_domain and fall back | ||
2441 | * to non-identity mapping. | ||
2442 | */ | ||
2443 | domain_remove_one_dev_info(si_domain, pdev); | ||
2444 | printk(KERN_INFO "32bit %s uses non-identity mapping\n", | ||
2445 | pci_name(pdev)); | ||
2446 | return 0; | ||
2447 | } | ||
2448 | } else { | ||
2449 | /* | ||
2450 | * In case of a detached 64 bit DMA device from vm, the device | ||
2451 | * is put into si_domain for identity mapping. | ||
2452 | */ | ||
2453 | if (pdev->dma_mask > DMA_BIT_MASK(32)) { | ||
2454 | int ret; | ||
2455 | ret = domain_add_dev_info(si_domain, pdev); | ||
2456 | if (!ret) { | ||
2457 | printk(KERN_INFO "64bit %s uses identity mapping\n", | ||
2458 | pci_name(pdev)); | ||
2459 | return 1; | ||
2460 | } | ||
2461 | } | ||
2462 | } | ||
2463 | |||
2464 | return iommu_dummy(pdev); | ||
2465 | } | ||
2466 | |||
2251 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | 2467 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, |
2252 | size_t size, int dir, u64 dma_mask) | 2468 | size_t size, int dir, u64 dma_mask) |
2253 | { | 2469 | { |
@@ -2260,7 +2476,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2260 | struct intel_iommu *iommu; | 2476 | struct intel_iommu *iommu; |
2261 | 2477 | ||
2262 | BUG_ON(dir == DMA_NONE); | 2478 | BUG_ON(dir == DMA_NONE); |
2263 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2479 | |
2480 | if (iommu_no_mapping(pdev)) | ||
2264 | return paddr; | 2481 | return paddr; |
2265 | 2482 | ||
2266 | domain = get_valid_domain_for_dev(pdev); | 2483 | domain = get_valid_domain_for_dev(pdev); |
@@ -2401,8 +2618,9 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
2401 | struct iova *iova; | 2618 | struct iova *iova; |
2402 | struct intel_iommu *iommu; | 2619 | struct intel_iommu *iommu; |
2403 | 2620 | ||
2404 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2621 | if (iommu_no_mapping(pdev)) |
2405 | return; | 2622 | return; |
2623 | |||
2406 | domain = find_domain(pdev); | 2624 | domain = find_domain(pdev); |
2407 | BUG_ON(!domain); | 2625 | BUG_ON(!domain); |
2408 | 2626 | ||
@@ -2492,7 +2710,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2492 | struct scatterlist *sg; | 2710 | struct scatterlist *sg; |
2493 | struct intel_iommu *iommu; | 2711 | struct intel_iommu *iommu; |
2494 | 2712 | ||
2495 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2713 | if (iommu_no_mapping(pdev)) |
2496 | return; | 2714 | return; |
2497 | 2715 | ||
2498 | domain = find_domain(pdev); | 2716 | domain = find_domain(pdev); |
@@ -2553,7 +2771,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2553 | struct intel_iommu *iommu; | 2771 | struct intel_iommu *iommu; |
2554 | 2772 | ||
2555 | BUG_ON(dir == DMA_NONE); | 2773 | BUG_ON(dir == DMA_NONE); |
2556 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2774 | if (iommu_no_mapping(pdev)) |
2557 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); | 2775 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); |
2558 | 2776 | ||
2559 | domain = get_valid_domain_for_dev(pdev); | 2777 | domain = get_valid_domain_for_dev(pdev); |
@@ -2951,31 +3169,6 @@ int __init intel_iommu_init(void) | |||
2951 | return 0; | 3169 | return 0; |
2952 | } | 3170 | } |
2953 | 3171 | ||
2954 | static int vm_domain_add_dev_info(struct dmar_domain *domain, | ||
2955 | struct pci_dev *pdev) | ||
2956 | { | ||
2957 | struct device_domain_info *info; | ||
2958 | unsigned long flags; | ||
2959 | |||
2960 | info = alloc_devinfo_mem(); | ||
2961 | if (!info) | ||
2962 | return -ENOMEM; | ||
2963 | |||
2964 | info->segment = pci_domain_nr(pdev->bus); | ||
2965 | info->bus = pdev->bus->number; | ||
2966 | info->devfn = pdev->devfn; | ||
2967 | info->dev = pdev; | ||
2968 | info->domain = domain; | ||
2969 | |||
2970 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2971 | list_add(&info->link, &domain->devices); | ||
2972 | list_add(&info->global, &device_domain_list); | ||
2973 | pdev->dev.archdata.iommu = info; | ||
2974 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2975 | |||
2976 | return 0; | ||
2977 | } | ||
2978 | |||
2979 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | 3172 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, |
2980 | struct pci_dev *pdev) | 3173 | struct pci_dev *pdev) |
2981 | { | 3174 | { |
@@ -3003,7 +3196,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | |||
3003 | } | 3196 | } |
3004 | } | 3197 | } |
3005 | 3198 | ||
3006 | static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, | 3199 | static void domain_remove_one_dev_info(struct dmar_domain *domain, |
3007 | struct pci_dev *pdev) | 3200 | struct pci_dev *pdev) |
3008 | { | 3201 | { |
3009 | struct device_domain_info *info; | 3202 | struct device_domain_info *info; |
@@ -3136,7 +3329,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void) | |||
3136 | return domain; | 3329 | return domain; |
3137 | } | 3330 | } |
3138 | 3331 | ||
3139 | static int vm_domain_init(struct dmar_domain *domain, int guest_width) | 3332 | static int md_domain_init(struct dmar_domain *domain, int guest_width) |
3140 | { | 3333 | { |
3141 | int adjust_width; | 3334 | int adjust_width; |
3142 | 3335 | ||
@@ -3227,7 +3420,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) | |||
3227 | "intel_iommu_domain_init: dmar_domain == NULL\n"); | 3420 | "intel_iommu_domain_init: dmar_domain == NULL\n"); |
3228 | return -ENOMEM; | 3421 | return -ENOMEM; |
3229 | } | 3422 | } |
3230 | if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 3423 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
3231 | printk(KERN_ERR | 3424 | printk(KERN_ERR |
3232 | "intel_iommu_domain_init() failed\n"); | 3425 | "intel_iommu_domain_init() failed\n"); |
3233 | vm_domain_exit(dmar_domain); | 3426 | vm_domain_exit(dmar_domain); |
@@ -3262,8 +3455,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
3262 | 3455 | ||
3263 | old_domain = find_domain(pdev); | 3456 | old_domain = find_domain(pdev); |
3264 | if (old_domain) { | 3457 | if (old_domain) { |
3265 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) | 3458 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
3266 | vm_domain_remove_one_dev_info(old_domain, pdev); | 3459 | dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) |
3460 | domain_remove_one_dev_info(old_domain, pdev); | ||
3267 | else | 3461 | else |
3268 | domain_remove_dev_info(old_domain); | 3462 | domain_remove_dev_info(old_domain); |
3269 | } | 3463 | } |
@@ -3285,7 +3479,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
3285 | return -EFAULT; | 3479 | return -EFAULT; |
3286 | } | 3480 | } |
3287 | 3481 | ||
3288 | ret = vm_domain_add_dev_info(dmar_domain, pdev); | 3482 | ret = domain_add_dev_info(dmar_domain, pdev); |
3289 | if (ret) | 3483 | if (ret) |
3290 | return ret; | 3484 | return ret; |
3291 | 3485 | ||
@@ -3299,7 +3493,7 @@ static void intel_iommu_detach_device(struct iommu_domain *domain, | |||
3299 | struct dmar_domain *dmar_domain = domain->priv; | 3493 | struct dmar_domain *dmar_domain = domain->priv; |
3300 | struct pci_dev *pdev = to_pci_dev(dev); | 3494 | struct pci_dev *pdev = to_pci_dev(dev); |
3301 | 3495 | ||
3302 | vm_domain_remove_one_dev_info(dmar_domain, pdev); | 3496 | domain_remove_one_dev_info(dmar_domain, pdev); |
3303 | } | 3497 | } |
3304 | 3498 | ||
3305 | static int intel_iommu_map_range(struct iommu_domain *domain, | 3499 | static int intel_iommu_map_range(struct iommu_domain *domain, |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 1e83c8c5f985..4f5b8712931f 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -10,6 +10,8 @@ | |||
10 | #include <linux/intel-iommu.h> | 10 | #include <linux/intel-iommu.h> |
11 | #include "intr_remapping.h" | 11 | #include "intr_remapping.h" |
12 | #include <acpi/acpi.h> | 12 | #include <acpi/acpi.h> |
13 | #include <asm/pci-direct.h> | ||
14 | #include "pci.h" | ||
13 | 15 | ||
14 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | 16 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; |
15 | static int ir_ioapic_num; | 17 | static int ir_ioapic_num; |
@@ -314,7 +316,8 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
314 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 316 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
315 | irte = &iommu->ir_table->base[index]; | 317 | irte = &iommu->ir_table->base[index]; |
316 | 318 | ||
317 | set_64bit((unsigned long *)irte, irte_modified->low); | 319 | set_64bit((unsigned long *)&irte->low, irte_modified->low); |
320 | set_64bit((unsigned long *)&irte->high, irte_modified->high); | ||
318 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); | 321 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
319 | 322 | ||
320 | rc = qi_flush_iec(iommu, index, 0); | 323 | rc = qi_flush_iec(iommu, index, 0); |
@@ -369,12 +372,32 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) | |||
369 | return drhd->iommu; | 372 | return drhd->iommu; |
370 | } | 373 | } |
371 | 374 | ||
375 | static int clear_entries(struct irq_2_iommu *irq_iommu) | ||
376 | { | ||
377 | struct irte *start, *entry, *end; | ||
378 | struct intel_iommu *iommu; | ||
379 | int index; | ||
380 | |||
381 | if (irq_iommu->sub_handle) | ||
382 | return 0; | ||
383 | |||
384 | iommu = irq_iommu->iommu; | ||
385 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | ||
386 | |||
387 | start = iommu->ir_table->base + index; | ||
388 | end = start + (1 << irq_iommu->irte_mask); | ||
389 | |||
390 | for (entry = start; entry < end; entry++) { | ||
391 | set_64bit((unsigned long *)&entry->low, 0); | ||
392 | set_64bit((unsigned long *)&entry->high, 0); | ||
393 | } | ||
394 | |||
395 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); | ||
396 | } | ||
397 | |||
372 | int free_irte(int irq) | 398 | int free_irte(int irq) |
373 | { | 399 | { |
374 | int rc = 0; | 400 | int rc = 0; |
375 | int index, i; | ||
376 | struct irte *irte; | ||
377 | struct intel_iommu *iommu; | ||
378 | struct irq_2_iommu *irq_iommu; | 401 | struct irq_2_iommu *irq_iommu; |
379 | unsigned long flags; | 402 | unsigned long flags; |
380 | 403 | ||
@@ -385,16 +408,7 @@ int free_irte(int irq) | |||
385 | return -1; | 408 | return -1; |
386 | } | 409 | } |
387 | 410 | ||
388 | iommu = irq_iommu->iommu; | 411 | rc = clear_entries(irq_iommu); |
389 | |||
390 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | ||
391 | irte = &iommu->ir_table->base[index]; | ||
392 | |||
393 | if (!irq_iommu->sub_handle) { | ||
394 | for (i = 0; i < (1 << irq_iommu->irte_mask); i++) | ||
395 | set_64bit((unsigned long *)(irte + i), 0); | ||
396 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); | ||
397 | } | ||
398 | 412 | ||
399 | irq_iommu->iommu = NULL; | 413 | irq_iommu->iommu = NULL; |
400 | irq_iommu->irte_index = 0; | 414 | irq_iommu->irte_index = 0; |
@@ -406,6 +420,91 @@ int free_irte(int irq) | |||
406 | return rc; | 420 | return rc; |
407 | } | 421 | } |
408 | 422 | ||
423 | /* | ||
424 | * source validation type | ||
425 | */ | ||
426 | #define SVT_NO_VERIFY 0x0 /* no verification is required */ | ||
427 | #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */ | ||
428 | #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ | ||
429 | |||
430 | /* | ||
431 | * source-id qualifier | ||
432 | */ | ||
433 | #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ | ||
434 | #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore | ||
435 | * the third least significant bit | ||
436 | */ | ||
437 | #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore | ||
438 | * the second and third least significant bits | ||
439 | */ | ||
440 | #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore | ||
441 | * the least three significant bits | ||
442 | */ | ||
443 | |||
444 | /* | ||
445 | * set SVT, SQ and SID fields of irte to verify | ||
446 | * source ids of interrupt requests | ||
447 | */ | ||
448 | static void set_irte_sid(struct irte *irte, unsigned int svt, | ||
449 | unsigned int sq, unsigned int sid) | ||
450 | { | ||
451 | irte->svt = svt; | ||
452 | irte->sq = sq; | ||
453 | irte->sid = sid; | ||
454 | } | ||
455 | |||
456 | int set_ioapic_sid(struct irte *irte, int apic) | ||
457 | { | ||
458 | int i; | ||
459 | u16 sid = 0; | ||
460 | |||
461 | if (!irte) | ||
462 | return -1; | ||
463 | |||
464 | for (i = 0; i < MAX_IO_APICS; i++) { | ||
465 | if (ir_ioapic[i].id == apic) { | ||
466 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | ||
467 | break; | ||
468 | } | ||
469 | } | ||
470 | |||
471 | if (sid == 0) { | ||
472 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); | ||
473 | return -1; | ||
474 | } | ||
475 | |||
476 | set_irte_sid(irte, 1, 0, sid); | ||
477 | |||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | int set_msi_sid(struct irte *irte, struct pci_dev *dev) | ||
482 | { | ||
483 | struct pci_dev *bridge; | ||
484 | |||
485 | if (!irte || !dev) | ||
486 | return -1; | ||
487 | |||
488 | /* PCIe device or Root Complex integrated PCI device */ | ||
489 | if (dev->is_pcie || !dev->bus->parent) { | ||
490 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | ||
491 | (dev->bus->number << 8) | dev->devfn); | ||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | bridge = pci_find_upstream_pcie_bridge(dev); | ||
496 | if (bridge) { | ||
497 | if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */ | ||
498 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, | ||
499 | (bridge->bus->number << 8) | dev->bus->number); | ||
500 | else /* this is a legacy PCI bridge */ | ||
501 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | ||
502 | (bridge->bus->number << 8) | bridge->devfn); | ||
503 | } | ||
504 | |||
505 | return 0; | ||
506 | } | ||
507 | |||
409 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | 508 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) |
410 | { | 509 | { |
411 | u64 addr; | 510 | u64 addr; |
@@ -612,6 +711,35 @@ error: | |||
612 | return -1; | 711 | return -1; |
613 | } | 712 | } |
614 | 713 | ||
714 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, | ||
715 | struct intel_iommu *iommu) | ||
716 | { | ||
717 | struct acpi_dmar_pci_path *path; | ||
718 | u8 bus; | ||
719 | int count; | ||
720 | |||
721 | bus = scope->bus; | ||
722 | path = (struct acpi_dmar_pci_path *)(scope + 1); | ||
723 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | ||
724 | / sizeof(struct acpi_dmar_pci_path); | ||
725 | |||
726 | while (--count > 0) { | ||
727 | /* | ||
728 | * Access PCI directly due to the PCI | ||
729 | * subsystem isn't initialized yet. | ||
730 | */ | ||
731 | bus = read_pci_config_byte(bus, path->dev, path->fn, | ||
732 | PCI_SECONDARY_BUS); | ||
733 | path++; | ||
734 | } | ||
735 | |||
736 | ir_ioapic[ir_ioapic_num].bus = bus; | ||
737 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); | ||
738 | ir_ioapic[ir_ioapic_num].iommu = iommu; | ||
739 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | ||
740 | ir_ioapic_num++; | ||
741 | } | ||
742 | |||
615 | static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, | 743 | static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, |
616 | struct intel_iommu *iommu) | 744 | struct intel_iommu *iommu) |
617 | { | 745 | { |
@@ -636,9 +764,7 @@ static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, | |||
636 | " 0x%Lx\n", scope->enumeration_id, | 764 | " 0x%Lx\n", scope->enumeration_id, |
637 | drhd->address); | 765 | drhd->address); |
638 | 766 | ||
639 | ir_ioapic[ir_ioapic_num].iommu = iommu; | 767 | ir_parse_one_ioapic_scope(scope, iommu); |
640 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | ||
641 | ir_ioapic_num++; | ||
642 | } | 768 | } |
643 | start += scope->length; | 769 | start += scope->length; |
644 | } | 770 | } |
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h index ca48f0df8ac9..63a263c18415 100644 --- a/drivers/pci/intr_remapping.h +++ b/drivers/pci/intr_remapping.h | |||
@@ -3,6 +3,8 @@ | |||
3 | struct ioapic_scope { | 3 | struct ioapic_scope { |
4 | struct intel_iommu *iommu; | 4 | struct intel_iommu *iommu; |
5 | unsigned int id; | 5 | unsigned int id; |
6 | unsigned int bus; /* PCI bus number */ | ||
7 | unsigned int devfn; /* PCI devfn number */ | ||
6 | }; | 8 | }; |
7 | 9 | ||
8 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) | 10 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index c682ac536415..7232fe7104aa 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -34,10 +34,27 @@ config ACER_WMI | |||
34 | If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M | 34 | If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M |
35 | here. | 35 | here. |
36 | 36 | ||
37 | config ACERHDF | ||
38 | tristate "Acer Aspire One temperature and fan driver" | ||
39 | depends on THERMAL && THERMAL_HWMON && ACPI | ||
40 | ---help--- | ||
41 | This is a driver for Acer Aspire One netbooks. It allows to access | ||
42 | the temperature sensor and to control the fan. | ||
43 | |||
44 | After loading this driver the BIOS is still in control of the fan. | ||
45 | To let the kernel handle the fan, do: | ||
46 | echo -n enabled > /sys/class/thermal/thermal_zone0/mode | ||
47 | |||
48 | For more information about this driver see | ||
49 | <http://piie.net/files/acerhdf_README.txt> | ||
50 | |||
51 | If you have an Acer Aspire One netbook, say Y or M | ||
52 | here. | ||
53 | |||
37 | config ASUS_LAPTOP | 54 | config ASUS_LAPTOP |
38 | tristate "Asus Laptop Extras (EXPERIMENTAL)" | 55 | tristate "Asus Laptop Extras" |
39 | depends on ACPI | 56 | depends on ACPI |
40 | depends on EXPERIMENTAL && !ACPI_ASUS | 57 | depends on !ACPI_ASUS |
41 | select LEDS_CLASS | 58 | select LEDS_CLASS |
42 | select NEW_LEDS | 59 | select NEW_LEDS |
43 | select BACKLIGHT_CLASS_DEVICE | 60 | select BACKLIGHT_CLASS_DEVICE |
@@ -45,12 +62,12 @@ config ASUS_LAPTOP | |||
45 | ---help--- | 62 | ---help--- |
46 | This is the new Linux driver for Asus laptops. It may also support some | 63 | This is the new Linux driver for Asus laptops. It may also support some |
47 | MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate | 64 | MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate |
48 | standard ACPI events that go through /proc/acpi/events. It also adds | 65 | standard ACPI events and input events. It also adds |
49 | support for video output switching, LCD backlight control, Bluetooth and | 66 | support for video output switching, LCD backlight control, Bluetooth and |
50 | Wlan control, and most importantly, allows you to blink those fancy LEDs. | 67 | Wlan control, and most importantly, allows you to blink those fancy LEDs. |
51 | 68 | ||
52 | For more information and a userspace daemon for handling the extra | 69 | For more information and a userspace daemon for handling the extra |
53 | buttons see <http://acpi4asus.sf.net/>. | 70 | buttons see <http://acpi4asus.sf.net>. |
54 | 71 | ||
55 | If you have an ACPI-compatible ASUS laptop, say Y or M here. | 72 | If you have an ACPI-compatible ASUS laptop, say Y or M here. |
56 | 73 | ||
@@ -342,7 +359,10 @@ config EEEPC_LAPTOP | |||
342 | select HWMON | 359 | select HWMON |
343 | ---help--- | 360 | ---help--- |
344 | This driver supports the Fn-Fx keys on Eee PC laptops. | 361 | This driver supports the Fn-Fx keys on Eee PC laptops. |
345 | It also adds the ability to switch camera/wlan on/off. | 362 | |
363 | It also gives access to some extra laptop functionalities like | ||
364 | Bluetooth, backlight and allows powering on/off some other | ||
365 | devices. | ||
346 | 366 | ||
347 | If you have an Eee PC laptop, say Y or M here. | 367 | If you have an Eee PC laptop, say Y or M here. |
348 | 368 | ||
@@ -369,7 +389,7 @@ config ACPI_WMI | |||
369 | any ACPI-WMI devices. | 389 | any ACPI-WMI devices. |
370 | 390 | ||
371 | config ACPI_ASUS | 391 | config ACPI_ASUS |
372 | tristate "ASUS/Medion Laptop Extras" | 392 | tristate "ASUS/Medion Laptop Extras (DEPRECATED)" |
373 | depends on ACPI | 393 | depends on ACPI |
374 | select BACKLIGHT_CLASS_DEVICE | 394 | select BACKLIGHT_CLASS_DEVICE |
375 | ---help--- | 395 | ---help--- |
@@ -390,7 +410,7 @@ config ACPI_ASUS | |||
390 | parameters. | 410 | parameters. |
391 | 411 | ||
392 | More information and a userspace daemon for handling the extra buttons | 412 | More information and a userspace daemon for handling the extra buttons |
393 | at <http://sourceforge.net/projects/acpi4asus/>. | 413 | at <http://acpi4asus.sf.net>. |
394 | 414 | ||
395 | If you have an ACPI-compatible ASUS laptop, say Y or M here. This | 415 | If you have an ACPI-compatible ASUS laptop, say Y or M here. This |
396 | driver is still under development, so if your laptop is unsupported or | 416 | driver is still under development, so if your laptop is unsupported or |
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index e40c7bd1b87e..641b8bfa5538 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile | |||
@@ -9,6 +9,7 @@ obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o | |||
9 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o | 9 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o |
10 | obj-$(CONFIG_DELL_WMI) += dell-wmi.o | 10 | obj-$(CONFIG_DELL_WMI) += dell-wmi.o |
11 | obj-$(CONFIG_ACER_WMI) += acer-wmi.o | 11 | obj-$(CONFIG_ACER_WMI) += acer-wmi.o |
12 | obj-$(CONFIG_ACERHDF) += acerhdf.o | ||
12 | obj-$(CONFIG_HP_WMI) += hp-wmi.o | 13 | obj-$(CONFIG_HP_WMI) += hp-wmi.o |
13 | obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o | 14 | obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o |
14 | obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o | 15 | obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o |
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c new file mode 100644 index 000000000000..bdfee177eefb --- /dev/null +++ b/drivers/platform/x86/acerhdf.c | |||
@@ -0,0 +1,602 @@ | |||
1 | /* | ||
2 | * acerhdf - A driver which monitors the temperature | ||
3 | * of the aspire one netbook, turns on/off the fan | ||
4 | * as soon as the upper/lower threshold is reached. | ||
5 | * | ||
6 | * (C) 2009 - Peter Feuerer peter (a) piie.net | ||
7 | * http://piie.net | ||
8 | * 2009 Borislav Petkov <petkovbb@gmail.com> | ||
9 | * | ||
10 | * Inspired by and many thanks to: | ||
11 | * o acerfand - Rachel Greenham | ||
12 | * o acer_ec.pl - Michael Kurz michi.kurz (at) googlemail.com | ||
13 | * - Petr Tomasek tomasek (#) etf,cuni,cz | ||
14 | * - Carlos Corbacho cathectic (at) gmail.com | ||
15 | * o lkml - Matthew Garrett | ||
16 | * - Borislav Petkov | ||
17 | * - Andreas Mohr | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or modify | ||
20 | * it under the terms of the GNU General Public License as published by | ||
21 | * the Free Software Foundation; either version 2 of the License, or | ||
22 | * (at your option) any later version. | ||
23 | * | ||
24 | * This program is distributed in the hope that it will be useful, | ||
25 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
26 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
27 | * GNU General Public License for more details. | ||
28 | * | ||
29 | * You should have received a copy of the GNU General Public License | ||
30 | * along with this program; if not, write to the Free Software | ||
31 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
32 | */ | ||
33 | |||
34 | #define pr_fmt(fmt) "acerhdf: " fmt | ||
35 | |||
36 | #include <linux/kernel.h> | ||
37 | #include <linux/module.h> | ||
38 | #include <linux/fs.h> | ||
39 | #include <linux/dmi.h> | ||
40 | #include <acpi/acpi_drivers.h> | ||
41 | #include <linux/sched.h> | ||
42 | #include <linux/thermal.h> | ||
43 | #include <linux/platform_device.h> | ||
44 | |||
45 | /* | ||
46 | * The driver is started with "kernel mode off" by default. That means, the BIOS | ||
47 | * is still in control of the fan. In this mode the driver allows to read the | ||
48 | * temperature of the cpu and a userspace tool may take over control of the fan. | ||
49 | * If the driver is switched to "kernel mode" (e.g. via module parameter) the | ||
50 | * driver is in full control of the fan. If you want the module to be started in | ||
51 | * kernel mode by default, define the following: | ||
52 | */ | ||
53 | #undef START_IN_KERNEL_MODE | ||
54 | |||
55 | #define DRV_VER "0.5.13" | ||
56 | |||
57 | /* | ||
58 | * According to the Atom N270 datasheet, | ||
59 | * (http://download.intel.com/design/processor/datashts/320032.pdf) the | ||
60 | * CPU's optimal operating limits denoted in junction temperature as | ||
61 | * measured by the on-die thermal monitor are within 0 <= Tj <= 90. So, | ||
62 | * assume 89°C is critical temperature. | ||
63 | */ | ||
64 | #define ACERHDF_TEMP_CRIT 89 | ||
65 | #define ACERHDF_FAN_OFF 0 | ||
66 | #define ACERHDF_FAN_AUTO 1 | ||
67 | |||
68 | /* | ||
69 | * No matter what value the user puts into the fanon variable, turn on the fan | ||
70 | * at 80 degree Celsius to prevent hardware damage | ||
71 | */ | ||
72 | #define ACERHDF_MAX_FANON 80 | ||
73 | |||
74 | /* | ||
75 | * Maximum interval between two temperature checks is 15 seconds, as the die | ||
76 | * can get hot really fast under heavy load (plus we shouldn't forget about | ||
77 | * possible impact of _external_ aggressive sources such as heaters, sun etc.) | ||
78 | */ | ||
79 | #define ACERHDF_MAX_INTERVAL 15 | ||
80 | |||
81 | #ifdef START_IN_KERNEL_MODE | ||
82 | static int kernelmode = 1; | ||
83 | #else | ||
84 | static int kernelmode; | ||
85 | #endif | ||
86 | |||
87 | static unsigned int interval = 10; | ||
88 | static unsigned int fanon = 63; | ||
89 | static unsigned int fanoff = 58; | ||
90 | static unsigned int verbose; | ||
91 | static unsigned int fanstate = ACERHDF_FAN_AUTO; | ||
92 | static char force_bios[16]; | ||
93 | static unsigned int prev_interval; | ||
94 | struct thermal_zone_device *thz_dev; | ||
95 | struct thermal_cooling_device *cl_dev; | ||
96 | struct platform_device *acerhdf_dev; | ||
97 | |||
98 | module_param(kernelmode, uint, 0); | ||
99 | MODULE_PARM_DESC(kernelmode, "Kernel mode fan control on / off"); | ||
100 | module_param(interval, uint, 0600); | ||
101 | MODULE_PARM_DESC(interval, "Polling interval of temperature check"); | ||
102 | module_param(fanon, uint, 0600); | ||
103 | MODULE_PARM_DESC(fanon, "Turn the fan on above this temperature"); | ||
104 | module_param(fanoff, uint, 0600); | ||
105 | MODULE_PARM_DESC(fanoff, "Turn the fan off below this temperature"); | ||
106 | module_param(verbose, uint, 0600); | ||
107 | MODULE_PARM_DESC(verbose, "Enable verbose dmesg output"); | ||
108 | module_param_string(force_bios, force_bios, 16, 0); | ||
109 | MODULE_PARM_DESC(force_bios, "Force BIOS version and omit BIOS check"); | ||
110 | |||
111 | /* BIOS settings */ | ||
112 | struct bios_settings_t { | ||
113 | const char *vendor; | ||
114 | const char *version; | ||
115 | unsigned char fanreg; | ||
116 | unsigned char tempreg; | ||
117 | unsigned char fancmd[2]; /* fan off and auto commands */ | ||
118 | }; | ||
119 | |||
120 | /* Register addresses and values for different BIOS versions */ | ||
121 | static const struct bios_settings_t bios_tbl[] = { | ||
122 | {"Acer", "v0.3109", 0x55, 0x58, {0x1f, 0x00} }, | ||
123 | {"Acer", "v0.3114", 0x55, 0x58, {0x1f, 0x00} }, | ||
124 | {"Acer", "v0.3301", 0x55, 0x58, {0xaf, 0x00} }, | ||
125 | {"Acer", "v0.3304", 0x55, 0x58, {0xaf, 0x00} }, | ||
126 | {"Acer", "v0.3305", 0x55, 0x58, {0xaf, 0x00} }, | ||
127 | {"Acer", "v0.3308", 0x55, 0x58, {0x21, 0x00} }, | ||
128 | {"Acer", "v0.3309", 0x55, 0x58, {0x21, 0x00} }, | ||
129 | {"Acer", "v0.3310", 0x55, 0x58, {0x21, 0x00} }, | ||
130 | {"Gateway", "v0.3103", 0x55, 0x58, {0x21, 0x00} }, | ||
131 | {"Packard Bell", "v0.3105", 0x55, 0x58, {0x21, 0x00} }, | ||
132 | {"", "", 0, 0, {0, 0} } | ||
133 | }; | ||
134 | |||
135 | static const struct bios_settings_t *bios_cfg __read_mostly; | ||
136 | |||
137 | |||
138 | static int acerhdf_get_temp(int *temp) | ||
139 | { | ||
140 | u8 read_temp; | ||
141 | |||
142 | if (ec_read(bios_cfg->tempreg, &read_temp)) | ||
143 | return -EINVAL; | ||
144 | |||
145 | *temp = read_temp; | ||
146 | |||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static int acerhdf_get_fanstate(int *state) | ||
151 | { | ||
152 | u8 fan; | ||
153 | bool tmp; | ||
154 | |||
155 | if (ec_read(bios_cfg->fanreg, &fan)) | ||
156 | return -EINVAL; | ||
157 | |||
158 | tmp = (fan == bios_cfg->fancmd[ACERHDF_FAN_OFF]); | ||
159 | *state = tmp ? ACERHDF_FAN_OFF : ACERHDF_FAN_AUTO; | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static void acerhdf_change_fanstate(int state) | ||
165 | { | ||
166 | unsigned char cmd; | ||
167 | |||
168 | if (verbose) | ||
169 | pr_notice("fan %s\n", (state == ACERHDF_FAN_OFF) ? | ||
170 | "OFF" : "ON"); | ||
171 | |||
172 | if ((state != ACERHDF_FAN_OFF) && (state != ACERHDF_FAN_AUTO)) { | ||
173 | pr_err("invalid fan state %d requested, setting to auto!\n", | ||
174 | state); | ||
175 | state = ACERHDF_FAN_AUTO; | ||
176 | } | ||
177 | |||
178 | cmd = bios_cfg->fancmd[state]; | ||
179 | fanstate = state; | ||
180 | |||
181 | ec_write(bios_cfg->fanreg, cmd); | ||
182 | } | ||
183 | |||
184 | static void acerhdf_check_param(struct thermal_zone_device *thermal) | ||
185 | { | ||
186 | if (fanon > ACERHDF_MAX_FANON) { | ||
187 | pr_err("fanon temperature too high, set to %d\n", | ||
188 | ACERHDF_MAX_FANON); | ||
189 | fanon = ACERHDF_MAX_FANON; | ||
190 | } | ||
191 | |||
192 | if (kernelmode && prev_interval != interval) { | ||
193 | if (interval > ACERHDF_MAX_INTERVAL) { | ||
194 | pr_err("interval too high, set to %d\n", | ||
195 | ACERHDF_MAX_INTERVAL); | ||
196 | interval = ACERHDF_MAX_INTERVAL; | ||
197 | } | ||
198 | if (verbose) | ||
199 | pr_notice("interval changed to: %d\n", | ||
200 | interval); | ||
201 | thermal->polling_delay = interval*1000; | ||
202 | prev_interval = interval; | ||
203 | } | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * This is the thermal zone callback which does the delayed polling of the fan | ||
208 | * state. We do check /sysfs-originating settings here in acerhdf_check_param() | ||
209 | * as late as the polling interval is since we can't do that in the respective | ||
210 | * accessors of the module parameters. | ||
211 | */ | ||
212 | static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal, | ||
213 | unsigned long *t) | ||
214 | { | ||
215 | int temp, err = 0; | ||
216 | |||
217 | acerhdf_check_param(thermal); | ||
218 | |||
219 | err = acerhdf_get_temp(&temp); | ||
220 | if (err) | ||
221 | return err; | ||
222 | |||
223 | if (verbose) | ||
224 | pr_notice("temp %d\n", temp); | ||
225 | |||
226 | *t = temp; | ||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static int acerhdf_bind(struct thermal_zone_device *thermal, | ||
231 | struct thermal_cooling_device *cdev) | ||
232 | { | ||
233 | /* if the cooling device is the one from acerhdf bind it */ | ||
234 | if (cdev != cl_dev) | ||
235 | return 0; | ||
236 | |||
237 | if (thermal_zone_bind_cooling_device(thermal, 0, cdev)) { | ||
238 | pr_err("error binding cooling dev\n"); | ||
239 | return -EINVAL; | ||
240 | } | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static int acerhdf_unbind(struct thermal_zone_device *thermal, | ||
245 | struct thermal_cooling_device *cdev) | ||
246 | { | ||
247 | if (cdev != cl_dev) | ||
248 | return 0; | ||
249 | |||
250 | if (thermal_zone_unbind_cooling_device(thermal, 0, cdev)) { | ||
251 | pr_err("error unbinding cooling dev\n"); | ||
252 | return -EINVAL; | ||
253 | } | ||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | static inline void acerhdf_revert_to_bios_mode(void) | ||
258 | { | ||
259 | acerhdf_change_fanstate(ACERHDF_FAN_AUTO); | ||
260 | kernelmode = 0; | ||
261 | if (thz_dev) | ||
262 | thz_dev->polling_delay = 0; | ||
263 | pr_notice("kernel mode fan control OFF\n"); | ||
264 | } | ||
265 | static inline void acerhdf_enable_kernelmode(void) | ||
266 | { | ||
267 | kernelmode = 1; | ||
268 | |||
269 | thz_dev->polling_delay = interval*1000; | ||
270 | thermal_zone_device_update(thz_dev); | ||
271 | pr_notice("kernel mode fan control ON\n"); | ||
272 | } | ||
273 | |||
274 | static int acerhdf_get_mode(struct thermal_zone_device *thermal, | ||
275 | enum thermal_device_mode *mode) | ||
276 | { | ||
277 | if (verbose) | ||
278 | pr_notice("kernel mode fan control %d\n", kernelmode); | ||
279 | |||
280 | *mode = (kernelmode) ? THERMAL_DEVICE_ENABLED | ||
281 | : THERMAL_DEVICE_DISABLED; | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * set operation mode; | ||
288 | * enabled: the thermal layer of the kernel takes care about | ||
289 | * the temperature and the fan. | ||
290 | * disabled: the BIOS takes control of the fan. | ||
291 | */ | ||
292 | static int acerhdf_set_mode(struct thermal_zone_device *thermal, | ||
293 | enum thermal_device_mode mode) | ||
294 | { | ||
295 | if (mode == THERMAL_DEVICE_DISABLED && kernelmode) | ||
296 | acerhdf_revert_to_bios_mode(); | ||
297 | else if (mode == THERMAL_DEVICE_ENABLED && !kernelmode) | ||
298 | acerhdf_enable_kernelmode(); | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static int acerhdf_get_trip_type(struct thermal_zone_device *thermal, int trip, | ||
304 | enum thermal_trip_type *type) | ||
305 | { | ||
306 | if (trip == 0) | ||
307 | *type = THERMAL_TRIP_ACTIVE; | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip, | ||
313 | unsigned long *temp) | ||
314 | { | ||
315 | if (trip == 0) | ||
316 | *temp = fanon; | ||
317 | |||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal, | ||
322 | unsigned long *temperature) | ||
323 | { | ||
324 | *temperature = ACERHDF_TEMP_CRIT; | ||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | /* bind callback functions to thermalzone */ | ||
329 | struct thermal_zone_device_ops acerhdf_dev_ops = { | ||
330 | .bind = acerhdf_bind, | ||
331 | .unbind = acerhdf_unbind, | ||
332 | .get_temp = acerhdf_get_ec_temp, | ||
333 | .get_mode = acerhdf_get_mode, | ||
334 | .set_mode = acerhdf_set_mode, | ||
335 | .get_trip_type = acerhdf_get_trip_type, | ||
336 | .get_trip_temp = acerhdf_get_trip_temp, | ||
337 | .get_crit_temp = acerhdf_get_crit_temp, | ||
338 | }; | ||
339 | |||
340 | |||
341 | /* | ||
342 | * cooling device callback functions | ||
343 | * get maximal fan cooling state | ||
344 | */ | ||
345 | static int acerhdf_get_max_state(struct thermal_cooling_device *cdev, | ||
346 | unsigned long *state) | ||
347 | { | ||
348 | *state = 1; | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | static int acerhdf_get_cur_state(struct thermal_cooling_device *cdev, | ||
354 | unsigned long *state) | ||
355 | { | ||
356 | int err = 0, tmp; | ||
357 | |||
358 | err = acerhdf_get_fanstate(&tmp); | ||
359 | if (err) | ||
360 | return err; | ||
361 | |||
362 | *state = (tmp == ACERHDF_FAN_AUTO) ? 1 : 0; | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | /* change current fan state - is overwritten when running in kernel mode */ | ||
367 | static int acerhdf_set_cur_state(struct thermal_cooling_device *cdev, | ||
368 | unsigned long state) | ||
369 | { | ||
370 | int cur_temp, cur_state, err = 0; | ||
371 | |||
372 | if (!kernelmode) | ||
373 | return 0; | ||
374 | |||
375 | err = acerhdf_get_temp(&cur_temp); | ||
376 | if (err) { | ||
377 | pr_err("error reading temperature, hand off control to BIOS\n"); | ||
378 | goto err_out; | ||
379 | } | ||
380 | |||
381 | err = acerhdf_get_fanstate(&cur_state); | ||
382 | if (err) { | ||
383 | pr_err("error reading fan state, hand off control to BIOS\n"); | ||
384 | goto err_out; | ||
385 | } | ||
386 | |||
387 | if (state == 0) { | ||
388 | /* turn fan off only if below fanoff temperature */ | ||
389 | if ((cur_state == ACERHDF_FAN_AUTO) && | ||
390 | (cur_temp < fanoff)) | ||
391 | acerhdf_change_fanstate(ACERHDF_FAN_OFF); | ||
392 | } else { | ||
393 | if (cur_state == ACERHDF_FAN_OFF) | ||
394 | acerhdf_change_fanstate(ACERHDF_FAN_AUTO); | ||
395 | } | ||
396 | return 0; | ||
397 | |||
398 | err_out: | ||
399 | acerhdf_revert_to_bios_mode(); | ||
400 | return -EINVAL; | ||
401 | } | ||
402 | |||
403 | /* bind fan callbacks to fan device */ | ||
404 | struct thermal_cooling_device_ops acerhdf_cooling_ops = { | ||
405 | .get_max_state = acerhdf_get_max_state, | ||
406 | .get_cur_state = acerhdf_get_cur_state, | ||
407 | .set_cur_state = acerhdf_set_cur_state, | ||
408 | }; | ||
409 | |||
410 | /* suspend / resume functionality */ | ||
411 | static int acerhdf_suspend(struct platform_device *dev, pm_message_t state) | ||
412 | { | ||
413 | if (kernelmode) | ||
414 | acerhdf_change_fanstate(ACERHDF_FAN_AUTO); | ||
415 | |||
416 | if (verbose) | ||
417 | pr_notice("going suspend\n"); | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | static int acerhdf_resume(struct platform_device *device) | ||
423 | { | ||
424 | if (verbose) | ||
425 | pr_notice("resuming\n"); | ||
426 | |||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | static int __devinit acerhdf_probe(struct platform_device *device) | ||
431 | { | ||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | static int acerhdf_remove(struct platform_device *device) | ||
436 | { | ||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | struct platform_driver acerhdf_drv = { | ||
441 | .driver = { | ||
442 | .name = "acerhdf", | ||
443 | .owner = THIS_MODULE, | ||
444 | }, | ||
445 | .probe = acerhdf_probe, | ||
446 | .remove = acerhdf_remove, | ||
447 | .suspend = acerhdf_suspend, | ||
448 | .resume = acerhdf_resume, | ||
449 | }; | ||
450 | |||
451 | |||
452 | /* check hardware */ | ||
453 | static int acerhdf_check_hardware(void) | ||
454 | { | ||
455 | char const *vendor, *version, *product; | ||
456 | int i; | ||
457 | |||
458 | /* get BIOS data */ | ||
459 | vendor = dmi_get_system_info(DMI_SYS_VENDOR); | ||
460 | version = dmi_get_system_info(DMI_BIOS_VERSION); | ||
461 | product = dmi_get_system_info(DMI_PRODUCT_NAME); | ||
462 | |||
463 | pr_info("Acer Aspire One Fan driver, v.%s\n", DRV_VER); | ||
464 | |||
465 | if (!force_bios[0]) { | ||
466 | if (strncmp(product, "AO", 2)) { | ||
467 | pr_err("no Aspire One hardware found\n"); | ||
468 | return -EINVAL; | ||
469 | } | ||
470 | } else { | ||
471 | pr_info("forcing BIOS version: %s\n", version); | ||
472 | version = force_bios; | ||
473 | kernelmode = 0; | ||
474 | } | ||
475 | |||
476 | if (verbose) | ||
477 | pr_info("BIOS info: %s %s, product: %s\n", | ||
478 | vendor, version, product); | ||
479 | |||
480 | /* search BIOS version and vendor in BIOS settings table */ | ||
481 | for (i = 0; bios_tbl[i].version[0]; i++) { | ||
482 | if (!strcmp(bios_tbl[i].vendor, vendor) && | ||
483 | !strcmp(bios_tbl[i].version, version)) { | ||
484 | bios_cfg = &bios_tbl[i]; | ||
485 | break; | ||
486 | } | ||
487 | } | ||
488 | |||
489 | if (!bios_cfg) { | ||
490 | pr_err("unknown (unsupported) BIOS version %s/%s, " | ||
491 | "please report, aborting!\n", vendor, version); | ||
492 | return -EINVAL; | ||
493 | } | ||
494 | |||
495 | /* | ||
496 | * if started with kernel mode off, prevent the kernel from switching | ||
497 | * off the fan | ||
498 | */ | ||
499 | if (!kernelmode) { | ||
500 | pr_notice("Fan control off, to enable do:\n"); | ||
501 | pr_notice("echo -n \"enabled\" > " | ||
502 | "/sys/class/thermal/thermal_zone0/mode\n"); | ||
503 | } | ||
504 | |||
505 | return 0; | ||
506 | } | ||
507 | |||
508 | static int acerhdf_register_platform(void) | ||
509 | { | ||
510 | int err = 0; | ||
511 | |||
512 | err = platform_driver_register(&acerhdf_drv); | ||
513 | if (err) | ||
514 | return err; | ||
515 | |||
516 | acerhdf_dev = platform_device_alloc("acerhdf", -1); | ||
517 | platform_device_add(acerhdf_dev); | ||
518 | |||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | static void acerhdf_unregister_platform(void) | ||
523 | { | ||
524 | if (!acerhdf_dev) | ||
525 | return; | ||
526 | |||
527 | platform_device_del(acerhdf_dev); | ||
528 | platform_driver_unregister(&acerhdf_drv); | ||
529 | } | ||
530 | |||
531 | static int acerhdf_register_thermal(void) | ||
532 | { | ||
533 | cl_dev = thermal_cooling_device_register("acerhdf-fan", NULL, | ||
534 | &acerhdf_cooling_ops); | ||
535 | |||
536 | if (IS_ERR(cl_dev)) | ||
537 | return -EINVAL; | ||
538 | |||
539 | thz_dev = thermal_zone_device_register("acerhdf", 1, NULL, | ||
540 | &acerhdf_dev_ops, 0, 0, 0, | ||
541 | (kernelmode) ? interval*1000 : 0); | ||
542 | if (IS_ERR(thz_dev)) | ||
543 | return -EINVAL; | ||
544 | |||
545 | return 0; | ||
546 | } | ||
547 | |||
548 | static void acerhdf_unregister_thermal(void) | ||
549 | { | ||
550 | if (cl_dev) { | ||
551 | thermal_cooling_device_unregister(cl_dev); | ||
552 | cl_dev = NULL; | ||
553 | } | ||
554 | |||
555 | if (thz_dev) { | ||
556 | thermal_zone_device_unregister(thz_dev); | ||
557 | thz_dev = NULL; | ||
558 | } | ||
559 | } | ||
560 | |||
561 | static int __init acerhdf_init(void) | ||
562 | { | ||
563 | int err = 0; | ||
564 | |||
565 | err = acerhdf_check_hardware(); | ||
566 | if (err) | ||
567 | goto out_err; | ||
568 | |||
569 | err = acerhdf_register_platform(); | ||
570 | if (err) | ||
571 | goto err_unreg; | ||
572 | |||
573 | err = acerhdf_register_thermal(); | ||
574 | if (err) | ||
575 | goto err_unreg; | ||
576 | |||
577 | return 0; | ||
578 | |||
579 | err_unreg: | ||
580 | acerhdf_unregister_thermal(); | ||
581 | acerhdf_unregister_platform(); | ||
582 | |||
583 | out_err: | ||
584 | return -ENODEV; | ||
585 | } | ||
586 | |||
587 | static void __exit acerhdf_exit(void) | ||
588 | { | ||
589 | acerhdf_change_fanstate(ACERHDF_FAN_AUTO); | ||
590 | acerhdf_unregister_thermal(); | ||
591 | acerhdf_unregister_platform(); | ||
592 | } | ||
593 | |||
594 | MODULE_LICENSE("GPL"); | ||
595 | MODULE_AUTHOR("Peter Feuerer"); | ||
596 | MODULE_DESCRIPTION("Aspire One temperature and fan driver"); | ||
597 | MODULE_ALIAS("dmi:*:*Acer*:*:"); | ||
598 | MODULE_ALIAS("dmi:*:*Gateway*:*:"); | ||
599 | MODULE_ALIAS("dmi:*:*Packard Bell*:*:"); | ||
600 | |||
601 | module_init(acerhdf_init); | ||
602 | module_exit(acerhdf_exit); | ||
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index bfc1a8892a32..db657bbeec90 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c | |||
@@ -33,6 +33,8 @@ | |||
33 | * Sam Lin - GPS support | 33 | * Sam Lin - GPS support |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
37 | |||
36 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
37 | #include <linux/module.h> | 39 | #include <linux/module.h> |
38 | #include <linux/init.h> | 40 | #include <linux/init.h> |
@@ -53,9 +55,10 @@ | |||
53 | #define ASUS_HOTK_NAME "Asus Laptop Support" | 55 | #define ASUS_HOTK_NAME "Asus Laptop Support" |
54 | #define ASUS_HOTK_CLASS "hotkey" | 56 | #define ASUS_HOTK_CLASS "hotkey" |
55 | #define ASUS_HOTK_DEVICE_NAME "Hotkey" | 57 | #define ASUS_HOTK_DEVICE_NAME "Hotkey" |
56 | #define ASUS_HOTK_FILE "asus-laptop" | 58 | #define ASUS_HOTK_FILE KBUILD_MODNAME |
57 | #define ASUS_HOTK_PREFIX "\\_SB.ATKD." | 59 | #define ASUS_HOTK_PREFIX "\\_SB.ATKD." |
58 | 60 | ||
61 | |||
59 | /* | 62 | /* |
60 | * Some events we use, same for all Asus | 63 | * Some events we use, same for all Asus |
61 | */ | 64 | */ |
@@ -207,13 +210,17 @@ MODULE_DEVICE_TABLE(acpi, asus_device_ids); | |||
207 | 210 | ||
208 | static int asus_hotk_add(struct acpi_device *device); | 211 | static int asus_hotk_add(struct acpi_device *device); |
209 | static int asus_hotk_remove(struct acpi_device *device, int type); | 212 | static int asus_hotk_remove(struct acpi_device *device, int type); |
213 | static void asus_hotk_notify(struct acpi_device *device, u32 event); | ||
214 | |||
210 | static struct acpi_driver asus_hotk_driver = { | 215 | static struct acpi_driver asus_hotk_driver = { |
211 | .name = ASUS_HOTK_NAME, | 216 | .name = ASUS_HOTK_NAME, |
212 | .class = ASUS_HOTK_CLASS, | 217 | .class = ASUS_HOTK_CLASS, |
213 | .ids = asus_device_ids, | 218 | .ids = asus_device_ids, |
219 | .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, | ||
214 | .ops = { | 220 | .ops = { |
215 | .add = asus_hotk_add, | 221 | .add = asus_hotk_add, |
216 | .remove = asus_hotk_remove, | 222 | .remove = asus_hotk_remove, |
223 | .notify = asus_hotk_notify, | ||
217 | }, | 224 | }, |
218 | }; | 225 | }; |
219 | 226 | ||
@@ -323,7 +330,7 @@ static int read_wireless_status(int mask) | |||
323 | 330 | ||
324 | rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status); | 331 | rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status); |
325 | if (ACPI_FAILURE(rv)) | 332 | if (ACPI_FAILURE(rv)) |
326 | printk(ASUS_WARNING "Error reading Wireless status\n"); | 333 | pr_warning("Error reading Wireless status\n"); |
327 | else | 334 | else |
328 | return (status & mask) ? 1 : 0; | 335 | return (status & mask) ? 1 : 0; |
329 | 336 | ||
@@ -337,7 +344,7 @@ static int read_gps_status(void) | |||
337 | 344 | ||
338 | rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status); | 345 | rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status); |
339 | if (ACPI_FAILURE(rv)) | 346 | if (ACPI_FAILURE(rv)) |
340 | printk(ASUS_WARNING "Error reading GPS status\n"); | 347 | pr_warning("Error reading GPS status\n"); |
341 | else | 348 | else |
342 | return status ? 1 : 0; | 349 | return status ? 1 : 0; |
343 | 350 | ||
@@ -377,7 +384,7 @@ static void write_status(acpi_handle handle, int out, int mask) | |||
377 | } | 384 | } |
378 | 385 | ||
379 | if (write_acpi_int(handle, NULL, out, NULL)) | 386 | if (write_acpi_int(handle, NULL, out, NULL)) |
380 | printk(ASUS_WARNING " write failed %x\n", mask); | 387 | pr_warning(" write failed %x\n", mask); |
381 | } | 388 | } |
382 | 389 | ||
383 | /* /sys/class/led handlers */ | 390 | /* /sys/class/led handlers */ |
@@ -420,7 +427,7 @@ static int set_lcd_state(int value) | |||
420 | NULL, NULL, NULL); | 427 | NULL, NULL, NULL); |
421 | 428 | ||
422 | if (ACPI_FAILURE(status)) | 429 | if (ACPI_FAILURE(status)) |
423 | printk(ASUS_WARNING "Error switching LCD\n"); | 430 | pr_warning("Error switching LCD\n"); |
424 | } | 431 | } |
425 | 432 | ||
426 | write_status(NULL, lcd, LCD_ON); | 433 | write_status(NULL, lcd, LCD_ON); |
@@ -444,7 +451,7 @@ static int read_brightness(struct backlight_device *bd) | |||
444 | 451 | ||
445 | rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value); | 452 | rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value); |
446 | if (ACPI_FAILURE(rv)) | 453 | if (ACPI_FAILURE(rv)) |
447 | printk(ASUS_WARNING "Error reading brightness\n"); | 454 | pr_warning("Error reading brightness\n"); |
448 | 455 | ||
449 | return value; | 456 | return value; |
450 | } | 457 | } |
@@ -457,7 +464,7 @@ static int set_brightness(struct backlight_device *bd, int value) | |||
457 | /* 0 <= value <= 15 */ | 464 | /* 0 <= value <= 15 */ |
458 | 465 | ||
459 | if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) { | 466 | if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) { |
460 | printk(ASUS_WARNING "Error changing brightness\n"); | 467 | pr_warning("Error changing brightness\n"); |
461 | ret = -EIO; | 468 | ret = -EIO; |
462 | } | 469 | } |
463 | 470 | ||
@@ -587,7 +594,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr, | |||
587 | rv = parse_arg(buf, count, &value); | 594 | rv = parse_arg(buf, count, &value); |
588 | if (rv > 0) { | 595 | if (rv > 0) { |
589 | if (write_acpi_int(ledd_set_handle, NULL, value, NULL)) | 596 | if (write_acpi_int(ledd_set_handle, NULL, value, NULL)) |
590 | printk(ASUS_WARNING "LED display write failed\n"); | 597 | pr_warning("LED display write failed\n"); |
591 | else | 598 | else |
592 | hotk->ledd_status = (u32) value; | 599 | hotk->ledd_status = (u32) value; |
593 | } | 600 | } |
@@ -632,7 +639,7 @@ static void set_display(int value) | |||
632 | { | 639 | { |
633 | /* no sanity check needed for now */ | 640 | /* no sanity check needed for now */ |
634 | if (write_acpi_int(display_set_handle, NULL, value, NULL)) | 641 | if (write_acpi_int(display_set_handle, NULL, value, NULL)) |
635 | printk(ASUS_WARNING "Error setting display\n"); | 642 | pr_warning("Error setting display\n"); |
636 | return; | 643 | return; |
637 | } | 644 | } |
638 | 645 | ||
@@ -647,7 +654,7 @@ static int read_display(void) | |||
647 | rv = acpi_evaluate_integer(display_get_handle, NULL, | 654 | rv = acpi_evaluate_integer(display_get_handle, NULL, |
648 | NULL, &value); | 655 | NULL, &value); |
649 | if (ACPI_FAILURE(rv)) | 656 | if (ACPI_FAILURE(rv)) |
650 | printk(ASUS_WARNING "Error reading display status\n"); | 657 | pr_warning("Error reading display status\n"); |
651 | } | 658 | } |
652 | 659 | ||
653 | value &= 0x0F; /* needed for some models, shouldn't hurt others */ | 660 | value &= 0x0F; /* needed for some models, shouldn't hurt others */ |
@@ -689,7 +696,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr, | |||
689 | static void set_light_sens_switch(int value) | 696 | static void set_light_sens_switch(int value) |
690 | { | 697 | { |
691 | if (write_acpi_int(ls_switch_handle, NULL, value, NULL)) | 698 | if (write_acpi_int(ls_switch_handle, NULL, value, NULL)) |
692 | printk(ASUS_WARNING "Error setting light sensor switch\n"); | 699 | pr_warning("Error setting light sensor switch\n"); |
693 | hotk->light_switch = value; | 700 | hotk->light_switch = value; |
694 | } | 701 | } |
695 | 702 | ||
@@ -714,7 +721,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr, | |||
714 | static void set_light_sens_level(int value) | 721 | static void set_light_sens_level(int value) |
715 | { | 722 | { |
716 | if (write_acpi_int(ls_level_handle, NULL, value, NULL)) | 723 | if (write_acpi_int(ls_level_handle, NULL, value, NULL)) |
717 | printk(ASUS_WARNING "Error setting light sensor level\n"); | 724 | pr_warning("Error setting light sensor level\n"); |
718 | hotk->light_level = value; | 725 | hotk->light_level = value; |
719 | } | 726 | } |
720 | 727 | ||
@@ -812,7 +819,7 @@ static int asus_setkeycode(struct input_dev *dev, int scancode, int keycode) | |||
812 | return -EINVAL; | 819 | return -EINVAL; |
813 | } | 820 | } |
814 | 821 | ||
815 | static void asus_hotk_notify(acpi_handle handle, u32 event, void *data) | 822 | static void asus_hotk_notify(struct acpi_device *device, u32 event) |
816 | { | 823 | { |
817 | static struct key_entry *key; | 824 | static struct key_entry *key; |
818 | u16 count; | 825 | u16 count; |
@@ -975,11 +982,11 @@ static int asus_hotk_get_info(void) | |||
975 | */ | 982 | */ |
976 | status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info); | 983 | status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info); |
977 | if (ACPI_FAILURE(status)) | 984 | if (ACPI_FAILURE(status)) |
978 | printk(ASUS_WARNING "Couldn't get the DSDT table header\n"); | 985 | pr_warning("Couldn't get the DSDT table header\n"); |
979 | 986 | ||
980 | /* We have to write 0 on init this far for all ASUS models */ | 987 | /* We have to write 0 on init this far for all ASUS models */ |
981 | if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { | 988 | if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { |
982 | printk(ASUS_ERR "Hotkey initialization failed\n"); | 989 | pr_err("Hotkey initialization failed\n"); |
983 | return -ENODEV; | 990 | return -ENODEV; |
984 | } | 991 | } |
985 | 992 | ||
@@ -987,9 +994,9 @@ static int asus_hotk_get_info(void) | |||
987 | status = | 994 | status = |
988 | acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result); | 995 | acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result); |
989 | if (ACPI_FAILURE(status)) | 996 | if (ACPI_FAILURE(status)) |
990 | printk(ASUS_WARNING "Error calling BSTS\n"); | 997 | pr_warning("Error calling BSTS\n"); |
991 | else if (bsts_result) | 998 | else if (bsts_result) |
992 | printk(ASUS_NOTICE "BSTS called, 0x%02x returned\n", | 999 | pr_notice("BSTS called, 0x%02x returned\n", |
993 | (uint) bsts_result); | 1000 | (uint) bsts_result); |
994 | 1001 | ||
995 | /* This too ... */ | 1002 | /* This too ... */ |
@@ -1020,7 +1027,7 @@ static int asus_hotk_get_info(void) | |||
1020 | return -ENOMEM; | 1027 | return -ENOMEM; |
1021 | 1028 | ||
1022 | if (*string) | 1029 | if (*string) |
1023 | printk(ASUS_NOTICE " %s model detected\n", string); | 1030 | pr_notice(" %s model detected\n", string); |
1024 | 1031 | ||
1025 | ASUS_HANDLE_INIT(mled_set); | 1032 | ASUS_HANDLE_INIT(mled_set); |
1026 | ASUS_HANDLE_INIT(tled_set); | 1033 | ASUS_HANDLE_INIT(tled_set); |
@@ -1077,7 +1084,7 @@ static int asus_input_init(void) | |||
1077 | 1084 | ||
1078 | hotk->inputdev = input_allocate_device(); | 1085 | hotk->inputdev = input_allocate_device(); |
1079 | if (!hotk->inputdev) { | 1086 | if (!hotk->inputdev) { |
1080 | printk(ASUS_INFO "Unable to allocate input device\n"); | 1087 | pr_info("Unable to allocate input device\n"); |
1081 | return 0; | 1088 | return 0; |
1082 | } | 1089 | } |
1083 | hotk->inputdev->name = "Asus Laptop extra buttons"; | 1090 | hotk->inputdev->name = "Asus Laptop extra buttons"; |
@@ -1096,7 +1103,7 @@ static int asus_input_init(void) | |||
1096 | } | 1103 | } |
1097 | result = input_register_device(hotk->inputdev); | 1104 | result = input_register_device(hotk->inputdev); |
1098 | if (result) { | 1105 | if (result) { |
1099 | printk(ASUS_INFO "Unable to register input device\n"); | 1106 | pr_info("Unable to register input device\n"); |
1100 | input_free_device(hotk->inputdev); | 1107 | input_free_device(hotk->inputdev); |
1101 | } | 1108 | } |
1102 | return result; | 1109 | return result; |
@@ -1113,7 +1120,7 @@ static int asus_hotk_check(void) | |||
1113 | if (hotk->device->status.present) { | 1120 | if (hotk->device->status.present) { |
1114 | result = asus_hotk_get_info(); | 1121 | result = asus_hotk_get_info(); |
1115 | } else { | 1122 | } else { |
1116 | printk(ASUS_ERR "Hotkey device not present, aborting\n"); | 1123 | pr_err("Hotkey device not present, aborting\n"); |
1117 | return -EINVAL; | 1124 | return -EINVAL; |
1118 | } | 1125 | } |
1119 | 1126 | ||
@@ -1124,13 +1131,12 @@ static int asus_hotk_found; | |||
1124 | 1131 | ||
1125 | static int asus_hotk_add(struct acpi_device *device) | 1132 | static int asus_hotk_add(struct acpi_device *device) |
1126 | { | 1133 | { |
1127 | acpi_status status = AE_OK; | ||
1128 | int result; | 1134 | int result; |
1129 | 1135 | ||
1130 | if (!device) | 1136 | if (!device) |
1131 | return -EINVAL; | 1137 | return -EINVAL; |
1132 | 1138 | ||
1133 | printk(ASUS_NOTICE "Asus Laptop Support version %s\n", | 1139 | pr_notice("Asus Laptop Support version %s\n", |
1134 | ASUS_LAPTOP_VERSION); | 1140 | ASUS_LAPTOP_VERSION); |
1135 | 1141 | ||
1136 | hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL); | 1142 | hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL); |
@@ -1149,15 +1155,6 @@ static int asus_hotk_add(struct acpi_device *device) | |||
1149 | 1155 | ||
1150 | asus_hotk_add_fs(); | 1156 | asus_hotk_add_fs(); |
1151 | 1157 | ||
1152 | /* | ||
1153 | * We install the handler, it will receive the hotk in parameter, so, we | ||
1154 | * could add other data to the hotk struct | ||
1155 | */ | ||
1156 | status = acpi_install_notify_handler(hotk->handle, ACPI_ALL_NOTIFY, | ||
1157 | asus_hotk_notify, hotk); | ||
1158 | if (ACPI_FAILURE(status)) | ||
1159 | printk(ASUS_ERR "Error installing notify handler\n"); | ||
1160 | |||
1161 | asus_hotk_found = 1; | 1158 | asus_hotk_found = 1; |
1162 | 1159 | ||
1163 | /* WLED and BLED are on by default */ | 1160 | /* WLED and BLED are on by default */ |
@@ -1198,16 +1195,9 @@ end: | |||
1198 | 1195 | ||
1199 | static int asus_hotk_remove(struct acpi_device *device, int type) | 1196 | static int asus_hotk_remove(struct acpi_device *device, int type) |
1200 | { | 1197 | { |
1201 | acpi_status status = 0; | ||
1202 | |||
1203 | if (!device || !acpi_driver_data(device)) | 1198 | if (!device || !acpi_driver_data(device)) |
1204 | return -EINVAL; | 1199 | return -EINVAL; |
1205 | 1200 | ||
1206 | status = acpi_remove_notify_handler(hotk->handle, ACPI_ALL_NOTIFY, | ||
1207 | asus_hotk_notify); | ||
1208 | if (ACPI_FAILURE(status)) | ||
1209 | printk(ASUS_ERR "Error removing notify handler\n"); | ||
1210 | |||
1211 | kfree(hotk->name); | 1201 | kfree(hotk->name); |
1212 | kfree(hotk); | 1202 | kfree(hotk); |
1213 | 1203 | ||
@@ -1260,8 +1250,7 @@ static int asus_backlight_init(struct device *dev) | |||
1260 | bd = backlight_device_register(ASUS_HOTK_FILE, dev, | 1250 | bd = backlight_device_register(ASUS_HOTK_FILE, dev, |
1261 | NULL, &asusbl_ops); | 1251 | NULL, &asusbl_ops); |
1262 | if (IS_ERR(bd)) { | 1252 | if (IS_ERR(bd)) { |
1263 | printk(ASUS_ERR | 1253 | pr_err("Could not register asus backlight device\n"); |
1264 | "Could not register asus backlight device\n"); | ||
1265 | asus_backlight_device = NULL; | 1254 | asus_backlight_device = NULL; |
1266 | return PTR_ERR(bd); | 1255 | return PTR_ERR(bd); |
1267 | } | 1256 | } |
@@ -1334,7 +1323,6 @@ out: | |||
1334 | 1323 | ||
1335 | static int __init asus_laptop_init(void) | 1324 | static int __init asus_laptop_init(void) |
1336 | { | 1325 | { |
1337 | struct device *dev; | ||
1338 | int result; | 1326 | int result; |
1339 | 1327 | ||
1340 | if (acpi_disabled) | 1328 | if (acpi_disabled) |
@@ -1356,24 +1344,10 @@ static int __init asus_laptop_init(void) | |||
1356 | return -ENODEV; | 1344 | return -ENODEV; |
1357 | } | 1345 | } |
1358 | 1346 | ||
1359 | dev = acpi_get_physical_device(hotk->device->handle); | ||
1360 | |||
1361 | if (!acpi_video_backlight_support()) { | ||
1362 | result = asus_backlight_init(dev); | ||
1363 | if (result) | ||
1364 | goto fail_backlight; | ||
1365 | } else | ||
1366 | printk(ASUS_INFO "Brightness ignored, must be controlled by " | ||
1367 | "ACPI video driver\n"); | ||
1368 | |||
1369 | result = asus_input_init(); | 1347 | result = asus_input_init(); |
1370 | if (result) | 1348 | if (result) |
1371 | goto fail_input; | 1349 | goto fail_input; |
1372 | 1350 | ||
1373 | result = asus_led_init(dev); | ||
1374 | if (result) | ||
1375 | goto fail_led; | ||
1376 | |||
1377 | /* Register platform stuff */ | 1351 | /* Register platform stuff */ |
1378 | result = platform_driver_register(&asuspf_driver); | 1352 | result = platform_driver_register(&asuspf_driver); |
1379 | if (result) | 1353 | if (result) |
@@ -1394,8 +1368,27 @@ static int __init asus_laptop_init(void) | |||
1394 | if (result) | 1368 | if (result) |
1395 | goto fail_sysfs; | 1369 | goto fail_sysfs; |
1396 | 1370 | ||
1371 | result = asus_led_init(&asuspf_device->dev); | ||
1372 | if (result) | ||
1373 | goto fail_led; | ||
1374 | |||
1375 | if (!acpi_video_backlight_support()) { | ||
1376 | result = asus_backlight_init(&asuspf_device->dev); | ||
1377 | if (result) | ||
1378 | goto fail_backlight; | ||
1379 | } else | ||
1380 | pr_info("Brightness ignored, must be controlled by " | ||
1381 | "ACPI video driver\n"); | ||
1382 | |||
1397 | return 0; | 1383 | return 0; |
1398 | 1384 | ||
1385 | fail_backlight: | ||
1386 | asus_led_exit(); | ||
1387 | |||
1388 | fail_led: | ||
1389 | sysfs_remove_group(&asuspf_device->dev.kobj, | ||
1390 | &asuspf_attribute_group); | ||
1391 | |||
1399 | fail_sysfs: | 1392 | fail_sysfs: |
1400 | platform_device_del(asuspf_device); | 1393 | platform_device_del(asuspf_device); |
1401 | 1394 | ||
@@ -1406,15 +1399,9 @@ fail_platform_device1: | |||
1406 | platform_driver_unregister(&asuspf_driver); | 1399 | platform_driver_unregister(&asuspf_driver); |
1407 | 1400 | ||
1408 | fail_platform_driver: | 1401 | fail_platform_driver: |
1409 | asus_led_exit(); | ||
1410 | |||
1411 | fail_led: | ||
1412 | asus_input_exit(); | 1402 | asus_input_exit(); |
1413 | 1403 | ||
1414 | fail_input: | 1404 | fail_input: |
1415 | asus_backlight_exit(); | ||
1416 | |||
1417 | fail_backlight: | ||
1418 | 1405 | ||
1419 | return result; | 1406 | return result; |
1420 | } | 1407 | } |
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index ba1f7497e4b9..ddf5240ade8c 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c | |||
@@ -455,6 +455,8 @@ static struct asus_hotk *hotk; | |||
455 | */ | 455 | */ |
456 | static int asus_hotk_add(struct acpi_device *device); | 456 | static int asus_hotk_add(struct acpi_device *device); |
457 | static int asus_hotk_remove(struct acpi_device *device, int type); | 457 | static int asus_hotk_remove(struct acpi_device *device, int type); |
458 | static void asus_hotk_notify(struct acpi_device *device, u32 event); | ||
459 | |||
458 | static const struct acpi_device_id asus_device_ids[] = { | 460 | static const struct acpi_device_id asus_device_ids[] = { |
459 | {"ATK0100", 0}, | 461 | {"ATK0100", 0}, |
460 | {"", 0}, | 462 | {"", 0}, |
@@ -465,9 +467,11 @@ static struct acpi_driver asus_hotk_driver = { | |||
465 | .name = "asus_acpi", | 467 | .name = "asus_acpi", |
466 | .class = ACPI_HOTK_CLASS, | 468 | .class = ACPI_HOTK_CLASS, |
467 | .ids = asus_device_ids, | 469 | .ids = asus_device_ids, |
470 | .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, | ||
468 | .ops = { | 471 | .ops = { |
469 | .add = asus_hotk_add, | 472 | .add = asus_hotk_add, |
470 | .remove = asus_hotk_remove, | 473 | .remove = asus_hotk_remove, |
474 | .notify = asus_hotk_notify, | ||
471 | }, | 475 | }, |
472 | }; | 476 | }; |
473 | 477 | ||
@@ -1101,12 +1105,20 @@ static int asus_hotk_remove_fs(struct acpi_device *device) | |||
1101 | return 0; | 1105 | return 0; |
1102 | } | 1106 | } |
1103 | 1107 | ||
1104 | static void asus_hotk_notify(acpi_handle handle, u32 event, void *data) | 1108 | static void asus_hotk_notify(struct acpi_device *device, u32 event) |
1105 | { | 1109 | { |
1106 | /* TODO Find a better way to handle events count. */ | 1110 | /* TODO Find a better way to handle events count. */ |
1107 | if (!hotk) | 1111 | if (!hotk) |
1108 | return; | 1112 | return; |
1109 | 1113 | ||
1114 | /* | ||
1115 | * The BIOS *should* be sending us device events, but apparently | ||
1116 | * Asus uses system events instead, so just ignore any device | ||
1117 | * events we get. | ||
1118 | */ | ||
1119 | if (event > ACPI_MAX_SYS_NOTIFY) | ||
1120 | return; | ||
1121 | |||
1110 | if ((event & ~((u32) BR_UP)) < 16) | 1122 | if ((event & ~((u32) BR_UP)) < 16) |
1111 | hotk->brightness = (event & ~((u32) BR_UP)); | 1123 | hotk->brightness = (event & ~((u32) BR_UP)); |
1112 | else if ((event & ~((u32) BR_DOWN)) < 16) | 1124 | else if ((event & ~((u32) BR_DOWN)) < 16) |
@@ -1346,15 +1358,6 @@ static int asus_hotk_add(struct acpi_device *device) | |||
1346 | if (result) | 1358 | if (result) |
1347 | goto end; | 1359 | goto end; |
1348 | 1360 | ||
1349 | /* | ||
1350 | * We install the handler, it will receive the hotk in parameter, so, we | ||
1351 | * could add other data to the hotk struct | ||
1352 | */ | ||
1353 | status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY, | ||
1354 | asus_hotk_notify, hotk); | ||
1355 | if (ACPI_FAILURE(status)) | ||
1356 | printk(KERN_ERR " Error installing notify handler\n"); | ||
1357 | |||
1358 | /* For laptops without GPLV: init the hotk->brightness value */ | 1361 | /* For laptops without GPLV: init the hotk->brightness value */ |
1359 | if ((!hotk->methods->brightness_get) | 1362 | if ((!hotk->methods->brightness_get) |
1360 | && (!hotk->methods->brightness_status) | 1363 | && (!hotk->methods->brightness_status) |
@@ -1389,16 +1392,9 @@ end: | |||
1389 | 1392 | ||
1390 | static int asus_hotk_remove(struct acpi_device *device, int type) | 1393 | static int asus_hotk_remove(struct acpi_device *device, int type) |
1391 | { | 1394 | { |
1392 | acpi_status status = 0; | ||
1393 | |||
1394 | if (!device || !acpi_driver_data(device)) | 1395 | if (!device || !acpi_driver_data(device)) |
1395 | return -EINVAL; | 1396 | return -EINVAL; |
1396 | 1397 | ||
1397 | status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY, | ||
1398 | asus_hotk_notify); | ||
1399 | if (ACPI_FAILURE(status)) | ||
1400 | printk(KERN_ERR "Asus ACPI: Error removing notify handler\n"); | ||
1401 | |||
1402 | asus_hotk_remove_fs(device); | 1398 | asus_hotk_remove_fs(device); |
1403 | 1399 | ||
1404 | kfree(hotk); | 1400 | kfree(hotk); |
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 2fab94162147..0f900cc9fa7a 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c | |||
@@ -46,10 +46,53 @@ struct key_entry { | |||
46 | u16 keycode; | 46 | u16 keycode; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | enum { KE_KEY, KE_SW, KE_END }; | 49 | enum { KE_KEY, KE_SW, KE_IGNORE, KE_END }; |
50 | |||
51 | /* | ||
52 | * Certain keys are flagged as KE_IGNORE. All of these are either | ||
53 | * notifications (rather than requests for change) or are also sent | ||
54 | * via the keyboard controller so should not be sent again. | ||
55 | */ | ||
50 | 56 | ||
51 | static struct key_entry dell_wmi_keymap[] = { | 57 | static struct key_entry dell_wmi_keymap[] = { |
52 | {KE_KEY, 0xe045, KEY_PROG1}, | 58 | {KE_KEY, 0xe045, KEY_PROG1}, |
59 | {KE_KEY, 0xe009, KEY_EJECTCD}, | ||
60 | |||
61 | /* These also contain the brightness level at offset 6 */ | ||
62 | {KE_KEY, 0xe006, KEY_BRIGHTNESSUP}, | ||
63 | {KE_KEY, 0xe005, KEY_BRIGHTNESSDOWN}, | ||
64 | |||
65 | /* Battery health status button */ | ||
66 | {KE_KEY, 0xe007, KEY_BATTERY}, | ||
67 | |||
68 | /* This is actually for all radios. Although physically a | ||
69 | * switch, the notification does not provide an indication of | ||
70 | * state and so it should be reported as a key */ | ||
71 | {KE_KEY, 0xe008, KEY_WLAN}, | ||
72 | |||
73 | /* The next device is at offset 6, the active devices are at | ||
74 | offset 8 and the attached devices at offset 10 */ | ||
75 | {KE_KEY, 0xe00b, KEY_DISPLAYTOGGLE}, | ||
76 | |||
77 | {KE_IGNORE, 0xe00c, KEY_KBDILLUMTOGGLE}, | ||
78 | |||
79 | /* BIOS error detected */ | ||
80 | {KE_IGNORE, 0xe00d, KEY_RESERVED}, | ||
81 | |||
82 | /* Wifi Catcher */ | ||
83 | {KE_KEY, 0xe011, KEY_PROG2}, | ||
84 | |||
85 | /* Ambient light sensor toggle */ | ||
86 | {KE_IGNORE, 0xe013, KEY_RESERVED}, | ||
87 | |||
88 | {KE_IGNORE, 0xe020, KEY_MUTE}, | ||
89 | {KE_IGNORE, 0xe02e, KEY_VOLUMEDOWN}, | ||
90 | {KE_IGNORE, 0xe030, KEY_VOLUMEUP}, | ||
91 | {KE_IGNORE, 0xe033, KEY_KBDILLUMUP}, | ||
92 | {KE_IGNORE, 0xe034, KEY_KBDILLUMDOWN}, | ||
93 | {KE_IGNORE, 0xe03a, KEY_CAPSLOCK}, | ||
94 | {KE_IGNORE, 0xe045, KEY_NUMLOCK}, | ||
95 | {KE_IGNORE, 0xe046, KEY_SCROLLLOCK}, | ||
53 | {KE_END, 0} | 96 | {KE_END, 0} |
54 | }; | 97 | }; |
55 | 98 | ||
@@ -122,15 +165,20 @@ static void dell_wmi_notify(u32 value, void *context) | |||
122 | 165 | ||
123 | if (obj && obj->type == ACPI_TYPE_BUFFER) { | 166 | if (obj && obj->type == ACPI_TYPE_BUFFER) { |
124 | int *buffer = (int *)obj->buffer.pointer; | 167 | int *buffer = (int *)obj->buffer.pointer; |
125 | key = dell_wmi_get_entry_by_scancode(buffer[1]); | 168 | /* |
169 | * The upper bytes of the event may contain | ||
170 | * additional information, so mask them off for the | ||
171 | * scancode lookup | ||
172 | */ | ||
173 | key = dell_wmi_get_entry_by_scancode(buffer[1] & 0xFFFF); | ||
126 | if (key) { | 174 | if (key) { |
127 | input_report_key(dell_wmi_input_dev, key->keycode, 1); | 175 | input_report_key(dell_wmi_input_dev, key->keycode, 1); |
128 | input_sync(dell_wmi_input_dev); | 176 | input_sync(dell_wmi_input_dev); |
129 | input_report_key(dell_wmi_input_dev, key->keycode, 0); | 177 | input_report_key(dell_wmi_input_dev, key->keycode, 0); |
130 | input_sync(dell_wmi_input_dev); | 178 | input_sync(dell_wmi_input_dev); |
131 | } else | 179 | } else if (buffer[1] & 0xFFFF) |
132 | printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n", | 180 | printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n", |
133 | buffer[1]); | 181 | buffer[1] & 0xFFFF); |
134 | } | 182 | } |
135 | } | 183 | } |
136 | 184 | ||
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 8153b3e59189..4207b26ff990 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c | |||
@@ -62,7 +62,10 @@ enum { | |||
62 | DISABLE_ASL_GPS = 0x0020, | 62 | DISABLE_ASL_GPS = 0x0020, |
63 | DISABLE_ASL_DISPLAYSWITCH = 0x0040, | 63 | DISABLE_ASL_DISPLAYSWITCH = 0x0040, |
64 | DISABLE_ASL_MODEM = 0x0080, | 64 | DISABLE_ASL_MODEM = 0x0080, |
65 | DISABLE_ASL_CARDREADER = 0x0100 | 65 | DISABLE_ASL_CARDREADER = 0x0100, |
66 | DISABLE_ASL_3G = 0x0200, | ||
67 | DISABLE_ASL_WIMAX = 0x0400, | ||
68 | DISABLE_ASL_HWCF = 0x0800 | ||
66 | }; | 69 | }; |
67 | 70 | ||
68 | enum { | 71 | enum { |
@@ -87,7 +90,13 @@ enum { | |||
87 | CM_ASL_USBPORT3, | 90 | CM_ASL_USBPORT3, |
88 | CM_ASL_MODEM, | 91 | CM_ASL_MODEM, |
89 | CM_ASL_CARDREADER, | 92 | CM_ASL_CARDREADER, |
90 | CM_ASL_LID | 93 | CM_ASL_3G, |
94 | CM_ASL_WIMAX, | ||
95 | CM_ASL_HWCF, | ||
96 | CM_ASL_LID, | ||
97 | CM_ASL_TYPE, | ||
98 | CM_ASL_PANELPOWER, /*P901*/ | ||
99 | CM_ASL_TPD | ||
91 | }; | 100 | }; |
92 | 101 | ||
93 | static const char *cm_getv[] = { | 102 | static const char *cm_getv[] = { |
@@ -96,7 +105,8 @@ static const char *cm_getv[] = { | |||
96 | NULL, "PBLG", NULL, NULL, | 105 | NULL, "PBLG", NULL, NULL, |
97 | "CFVG", NULL, NULL, NULL, | 106 | "CFVG", NULL, NULL, NULL, |
98 | "USBG", NULL, NULL, "MODG", | 107 | "USBG", NULL, NULL, "MODG", |
99 | "CRDG", "LIDG" | 108 | "CRDG", "M3GG", "WIMG", "HWCF", |
109 | "LIDG", "TYPE", "PBPG", "TPDG" | ||
100 | }; | 110 | }; |
101 | 111 | ||
102 | static const char *cm_setv[] = { | 112 | static const char *cm_setv[] = { |
@@ -105,7 +115,8 @@ static const char *cm_setv[] = { | |||
105 | "SDSP", "PBLS", "HDPS", NULL, | 115 | "SDSP", "PBLS", "HDPS", NULL, |
106 | "CFVS", NULL, NULL, NULL, | 116 | "CFVS", NULL, NULL, NULL, |
107 | "USBG", NULL, NULL, "MODS", | 117 | "USBG", NULL, NULL, "MODS", |
108 | "CRDS", NULL | 118 | "CRDS", "M3GS", "WIMS", NULL, |
119 | NULL, NULL, "PBPS", "TPDS" | ||
109 | }; | 120 | }; |
110 | 121 | ||
111 | #define EEEPC_EC "\\_SB.PCI0.SBRG.EC0." | 122 | #define EEEPC_EC "\\_SB.PCI0.SBRG.EC0." |
@@ -181,6 +192,7 @@ static struct key_entry eeepc_keymap[] = { | |||
181 | static int eeepc_hotk_add(struct acpi_device *device); | 192 | static int eeepc_hotk_add(struct acpi_device *device); |
182 | static int eeepc_hotk_remove(struct acpi_device *device, int type); | 193 | static int eeepc_hotk_remove(struct acpi_device *device, int type); |
183 | static int eeepc_hotk_resume(struct acpi_device *device); | 194 | static int eeepc_hotk_resume(struct acpi_device *device); |
195 | static void eeepc_hotk_notify(struct acpi_device *device, u32 event); | ||
184 | 196 | ||
185 | static const struct acpi_device_id eeepc_device_ids[] = { | 197 | static const struct acpi_device_id eeepc_device_ids[] = { |
186 | {EEEPC_HOTK_HID, 0}, | 198 | {EEEPC_HOTK_HID, 0}, |
@@ -192,10 +204,12 @@ static struct acpi_driver eeepc_hotk_driver = { | |||
192 | .name = EEEPC_HOTK_NAME, | 204 | .name = EEEPC_HOTK_NAME, |
193 | .class = EEEPC_HOTK_CLASS, | 205 | .class = EEEPC_HOTK_CLASS, |
194 | .ids = eeepc_device_ids, | 206 | .ids = eeepc_device_ids, |
207 | .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, | ||
195 | .ops = { | 208 | .ops = { |
196 | .add = eeepc_hotk_add, | 209 | .add = eeepc_hotk_add, |
197 | .remove = eeepc_hotk_remove, | 210 | .remove = eeepc_hotk_remove, |
198 | .resume = eeepc_hotk_resume, | 211 | .resume = eeepc_hotk_resume, |
212 | .notify = eeepc_hotk_notify, | ||
199 | }, | 213 | }, |
200 | }; | 214 | }; |
201 | 215 | ||
@@ -318,6 +332,15 @@ static const struct rfkill_ops eeepc_rfkill_ops = { | |||
318 | .set_block = eeepc_rfkill_set, | 332 | .set_block = eeepc_rfkill_set, |
319 | }; | 333 | }; |
320 | 334 | ||
335 | static void __init eeepc_enable_camera(void) | ||
336 | { | ||
337 | /* | ||
338 | * If the following call to set_acpi() fails, it's because there's no | ||
339 | * camera so we can ignore the error. | ||
340 | */ | ||
341 | set_acpi(CM_ASL_CAMERA, 1); | ||
342 | } | ||
343 | |||
321 | /* | 344 | /* |
322 | * Sys helpers | 345 | * Sys helpers |
323 | */ | 346 | */ |
@@ -369,13 +392,88 @@ static ssize_t show_sys_acpi(int cm, char *buf) | |||
369 | EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA); | 392 | EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA); |
370 | EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER); | 393 | EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER); |
371 | EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH); | 394 | EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH); |
372 | EEEPC_CREATE_DEVICE_ATTR(cpufv, CM_ASL_CPUFV); | 395 | |
396 | struct eeepc_cpufv { | ||
397 | int num; | ||
398 | int cur; | ||
399 | }; | ||
400 | |||
401 | static int get_cpufv(struct eeepc_cpufv *c) | ||
402 | { | ||
403 | c->cur = get_acpi(CM_ASL_CPUFV); | ||
404 | c->num = (c->cur >> 8) & 0xff; | ||
405 | c->cur &= 0xff; | ||
406 | if (c->cur < 0 || c->num <= 0 || c->num > 12) | ||
407 | return -ENODEV; | ||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | static ssize_t show_available_cpufv(struct device *dev, | ||
412 | struct device_attribute *attr, | ||
413 | char *buf) | ||
414 | { | ||
415 | struct eeepc_cpufv c; | ||
416 | int i; | ||
417 | ssize_t len = 0; | ||
418 | |||
419 | if (get_cpufv(&c)) | ||
420 | return -ENODEV; | ||
421 | for (i = 0; i < c.num; i++) | ||
422 | len += sprintf(buf + len, "%d ", i); | ||
423 | len += sprintf(buf + len, "\n"); | ||
424 | return len; | ||
425 | } | ||
426 | |||
427 | static ssize_t show_cpufv(struct device *dev, | ||
428 | struct device_attribute *attr, | ||
429 | char *buf) | ||
430 | { | ||
431 | struct eeepc_cpufv c; | ||
432 | |||
433 | if (get_cpufv(&c)) | ||
434 | return -ENODEV; | ||
435 | return sprintf(buf, "%#x\n", (c.num << 8) | c.cur); | ||
436 | } | ||
437 | |||
438 | static ssize_t store_cpufv(struct device *dev, | ||
439 | struct device_attribute *attr, | ||
440 | const char *buf, size_t count) | ||
441 | { | ||
442 | struct eeepc_cpufv c; | ||
443 | int rv, value; | ||
444 | |||
445 | if (get_cpufv(&c)) | ||
446 | return -ENODEV; | ||
447 | rv = parse_arg(buf, count, &value); | ||
448 | if (rv < 0) | ||
449 | return rv; | ||
450 | if (!rv || value < 0 || value >= c.num) | ||
451 | return -EINVAL; | ||
452 | set_acpi(CM_ASL_CPUFV, value); | ||
453 | return rv; | ||
454 | } | ||
455 | |||
456 | static struct device_attribute dev_attr_cpufv = { | ||
457 | .attr = { | ||
458 | .name = "cpufv", | ||
459 | .mode = 0644 }, | ||
460 | .show = show_cpufv, | ||
461 | .store = store_cpufv | ||
462 | }; | ||
463 | |||
464 | static struct device_attribute dev_attr_available_cpufv = { | ||
465 | .attr = { | ||
466 | .name = "available_cpufv", | ||
467 | .mode = 0444 }, | ||
468 | .show = show_available_cpufv | ||
469 | }; | ||
373 | 470 | ||
374 | static struct attribute *platform_attributes[] = { | 471 | static struct attribute *platform_attributes[] = { |
375 | &dev_attr_camera.attr, | 472 | &dev_attr_camera.attr, |
376 | &dev_attr_cardr.attr, | 473 | &dev_attr_cardr.attr, |
377 | &dev_attr_disp.attr, | 474 | &dev_attr_disp.attr, |
378 | &dev_attr_cpufv.attr, | 475 | &dev_attr_cpufv.attr, |
476 | &dev_attr_available_cpufv.attr, | ||
379 | NULL | 477 | NULL |
380 | }; | 478 | }; |
381 | 479 | ||
@@ -558,7 +656,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) | |||
558 | eeepc_rfkill_hotplug(); | 656 | eeepc_rfkill_hotplug(); |
559 | } | 657 | } |
560 | 658 | ||
561 | static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data) | 659 | static void eeepc_hotk_notify(struct acpi_device *device, u32 event) |
562 | { | 660 | { |
563 | static struct key_entry *key; | 661 | static struct key_entry *key; |
564 | u16 count; | 662 | u16 count; |
@@ -566,6 +664,8 @@ static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data) | |||
566 | 664 | ||
567 | if (!ehotk) | 665 | if (!ehotk) |
568 | return; | 666 | return; |
667 | if (event > ACPI_MAX_SYS_NOTIFY) | ||
668 | return; | ||
569 | if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) | 669 | if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) |
570 | brn = notify_brn(); | 670 | brn = notify_brn(); |
571 | count = ehotk->event_count[event % 128]++; | 671 | count = ehotk->event_count[event % 128]++; |
@@ -646,7 +746,6 @@ static void eeepc_unregister_rfkill_notifier(char *node) | |||
646 | 746 | ||
647 | static int eeepc_hotk_add(struct acpi_device *device) | 747 | static int eeepc_hotk_add(struct acpi_device *device) |
648 | { | 748 | { |
649 | acpi_status status = AE_OK; | ||
650 | int result; | 749 | int result; |
651 | 750 | ||
652 | if (!device) | 751 | if (!device) |
@@ -664,10 +763,6 @@ static int eeepc_hotk_add(struct acpi_device *device) | |||
664 | result = eeepc_hotk_check(); | 763 | result = eeepc_hotk_check(); |
665 | if (result) | 764 | if (result) |
666 | goto ehotk_fail; | 765 | goto ehotk_fail; |
667 | status = acpi_install_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY, | ||
668 | eeepc_hotk_notify, ehotk); | ||
669 | if (ACPI_FAILURE(status)) | ||
670 | printk(EEEPC_ERR "Error installing notify handler\n"); | ||
671 | 766 | ||
672 | eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6"); | 767 | eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6"); |
673 | eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7"); | 768 | eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7"); |
@@ -725,14 +820,8 @@ static int eeepc_hotk_add(struct acpi_device *device) | |||
725 | 820 | ||
726 | static int eeepc_hotk_remove(struct acpi_device *device, int type) | 821 | static int eeepc_hotk_remove(struct acpi_device *device, int type) |
727 | { | 822 | { |
728 | acpi_status status = 0; | ||
729 | |||
730 | if (!device || !acpi_driver_data(device)) | 823 | if (!device || !acpi_driver_data(device)) |
731 | return -EINVAL; | 824 | return -EINVAL; |
732 | status = acpi_remove_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY, | ||
733 | eeepc_hotk_notify); | ||
734 | if (ACPI_FAILURE(status)) | ||
735 | printk(EEEPC_ERR "Error removing notify handler\n"); | ||
736 | 825 | ||
737 | eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6"); | 826 | eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6"); |
738 | eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7"); | 827 | eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7"); |
@@ -989,6 +1078,9 @@ static int __init eeepc_laptop_init(void) | |||
989 | result = eeepc_hwmon_init(dev); | 1078 | result = eeepc_hwmon_init(dev); |
990 | if (result) | 1079 | if (result) |
991 | goto fail_hwmon; | 1080 | goto fail_hwmon; |
1081 | |||
1082 | eeepc_enable_camera(); | ||
1083 | |||
992 | /* Register platform stuff */ | 1084 | /* Register platform stuff */ |
993 | result = platform_driver_register(&platform_driver); | 1085 | result = platform_driver_register(&platform_driver); |
994 | if (result) | 1086 | if (result) |
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 16fffe44e333..4ac2311c00af 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
@@ -47,7 +47,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); | |||
47 | #define HPWMI_DISPLAY_QUERY 0x1 | 47 | #define HPWMI_DISPLAY_QUERY 0x1 |
48 | #define HPWMI_HDDTEMP_QUERY 0x2 | 48 | #define HPWMI_HDDTEMP_QUERY 0x2 |
49 | #define HPWMI_ALS_QUERY 0x3 | 49 | #define HPWMI_ALS_QUERY 0x3 |
50 | #define HPWMI_DOCK_QUERY 0x4 | 50 | #define HPWMI_HARDWARE_QUERY 0x4 |
51 | #define HPWMI_WIRELESS_QUERY 0x5 | 51 | #define HPWMI_WIRELESS_QUERY 0x5 |
52 | #define HPWMI_HOTKEY_QUERY 0xc | 52 | #define HPWMI_HOTKEY_QUERY 0xc |
53 | 53 | ||
@@ -75,10 +75,9 @@ struct key_entry { | |||
75 | u16 keycode; | 75 | u16 keycode; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | enum { KE_KEY, KE_SW, KE_END }; | 78 | enum { KE_KEY, KE_END }; |
79 | 79 | ||
80 | static struct key_entry hp_wmi_keymap[] = { | 80 | static struct key_entry hp_wmi_keymap[] = { |
81 | {KE_SW, 0x01, SW_DOCK}, | ||
82 | {KE_KEY, 0x02, KEY_BRIGHTNESSUP}, | 81 | {KE_KEY, 0x02, KEY_BRIGHTNESSUP}, |
83 | {KE_KEY, 0x03, KEY_BRIGHTNESSDOWN}, | 82 | {KE_KEY, 0x03, KEY_BRIGHTNESSDOWN}, |
84 | {KE_KEY, 0x20e6, KEY_PROG1}, | 83 | {KE_KEY, 0x20e6, KEY_PROG1}, |
@@ -151,7 +150,22 @@ static int hp_wmi_als_state(void) | |||
151 | 150 | ||
152 | static int hp_wmi_dock_state(void) | 151 | static int hp_wmi_dock_state(void) |
153 | { | 152 | { |
154 | return hp_wmi_perform_query(HPWMI_DOCK_QUERY, 0, 0); | 153 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, 0); |
154 | |||
155 | if (ret < 0) | ||
156 | return ret; | ||
157 | |||
158 | return ret & 0x1; | ||
159 | } | ||
160 | |||
161 | static int hp_wmi_tablet_state(void) | ||
162 | { | ||
163 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, 0); | ||
164 | |||
165 | if (ret < 0) | ||
166 | return ret; | ||
167 | |||
168 | return (ret & 0x4) ? 1 : 0; | ||
155 | } | 169 | } |
156 | 170 | ||
157 | static int hp_wmi_set_block(void *data, bool blocked) | 171 | static int hp_wmi_set_block(void *data, bool blocked) |
@@ -232,6 +246,15 @@ static ssize_t show_dock(struct device *dev, struct device_attribute *attr, | |||
232 | return sprintf(buf, "%d\n", value); | 246 | return sprintf(buf, "%d\n", value); |
233 | } | 247 | } |
234 | 248 | ||
249 | static ssize_t show_tablet(struct device *dev, struct device_attribute *attr, | ||
250 | char *buf) | ||
251 | { | ||
252 | int value = hp_wmi_tablet_state(); | ||
253 | if (value < 0) | ||
254 | return -EINVAL; | ||
255 | return sprintf(buf, "%d\n", value); | ||
256 | } | ||
257 | |||
235 | static ssize_t set_als(struct device *dev, struct device_attribute *attr, | 258 | static ssize_t set_als(struct device *dev, struct device_attribute *attr, |
236 | const char *buf, size_t count) | 259 | const char *buf, size_t count) |
237 | { | 260 | { |
@@ -244,6 +267,7 @@ static DEVICE_ATTR(display, S_IRUGO, show_display, NULL); | |||
244 | static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL); | 267 | static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL); |
245 | static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als); | 268 | static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als); |
246 | static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL); | 269 | static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL); |
270 | static DEVICE_ATTR(tablet, S_IRUGO, show_tablet, NULL); | ||
247 | 271 | ||
248 | static struct key_entry *hp_wmi_get_entry_by_scancode(int code) | 272 | static struct key_entry *hp_wmi_get_entry_by_scancode(int code) |
249 | { | 273 | { |
@@ -326,13 +350,13 @@ static void hp_wmi_notify(u32 value, void *context) | |||
326 | key->keycode, 0); | 350 | key->keycode, 0); |
327 | input_sync(hp_wmi_input_dev); | 351 | input_sync(hp_wmi_input_dev); |
328 | break; | 352 | break; |
329 | case KE_SW: | ||
330 | input_report_switch(hp_wmi_input_dev, | ||
331 | key->keycode, | ||
332 | hp_wmi_dock_state()); | ||
333 | input_sync(hp_wmi_input_dev); | ||
334 | break; | ||
335 | } | 353 | } |
354 | } else if (eventcode == 0x1) { | ||
355 | input_report_switch(hp_wmi_input_dev, SW_DOCK, | ||
356 | hp_wmi_dock_state()); | ||
357 | input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, | ||
358 | hp_wmi_tablet_state()); | ||
359 | input_sync(hp_wmi_input_dev); | ||
336 | } else if (eventcode == 0x5) { | 360 | } else if (eventcode == 0x5) { |
337 | if (wifi_rfkill) | 361 | if (wifi_rfkill) |
338 | rfkill_set_sw_state(wifi_rfkill, | 362 | rfkill_set_sw_state(wifi_rfkill, |
@@ -369,18 +393,19 @@ static int __init hp_wmi_input_setup(void) | |||
369 | set_bit(EV_KEY, hp_wmi_input_dev->evbit); | 393 | set_bit(EV_KEY, hp_wmi_input_dev->evbit); |
370 | set_bit(key->keycode, hp_wmi_input_dev->keybit); | 394 | set_bit(key->keycode, hp_wmi_input_dev->keybit); |
371 | break; | 395 | break; |
372 | case KE_SW: | ||
373 | set_bit(EV_SW, hp_wmi_input_dev->evbit); | ||
374 | set_bit(key->keycode, hp_wmi_input_dev->swbit); | ||
375 | |||
376 | /* Set initial dock state */ | ||
377 | input_report_switch(hp_wmi_input_dev, key->keycode, | ||
378 | hp_wmi_dock_state()); | ||
379 | input_sync(hp_wmi_input_dev); | ||
380 | break; | ||
381 | } | 396 | } |
382 | } | 397 | } |
383 | 398 | ||
399 | set_bit(EV_SW, hp_wmi_input_dev->evbit); | ||
400 | set_bit(SW_DOCK, hp_wmi_input_dev->swbit); | ||
401 | set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit); | ||
402 | |||
403 | /* Set initial hardware state */ | ||
404 | input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state()); | ||
405 | input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, | ||
406 | hp_wmi_tablet_state()); | ||
407 | input_sync(hp_wmi_input_dev); | ||
408 | |||
384 | err = input_register_device(hp_wmi_input_dev); | 409 | err = input_register_device(hp_wmi_input_dev); |
385 | 410 | ||
386 | if (err) { | 411 | if (err) { |
@@ -397,6 +422,7 @@ static void cleanup_sysfs(struct platform_device *device) | |||
397 | device_remove_file(&device->dev, &dev_attr_hddtemp); | 422 | device_remove_file(&device->dev, &dev_attr_hddtemp); |
398 | device_remove_file(&device->dev, &dev_attr_als); | 423 | device_remove_file(&device->dev, &dev_attr_als); |
399 | device_remove_file(&device->dev, &dev_attr_dock); | 424 | device_remove_file(&device->dev, &dev_attr_dock); |
425 | device_remove_file(&device->dev, &dev_attr_tablet); | ||
400 | } | 426 | } |
401 | 427 | ||
402 | static int __init hp_wmi_bios_setup(struct platform_device *device) | 428 | static int __init hp_wmi_bios_setup(struct platform_device *device) |
@@ -416,6 +442,9 @@ static int __init hp_wmi_bios_setup(struct platform_device *device) | |||
416 | err = device_create_file(&device->dev, &dev_attr_dock); | 442 | err = device_create_file(&device->dev, &dev_attr_dock); |
417 | if (err) | 443 | if (err) |
418 | goto add_sysfs_error; | 444 | goto add_sysfs_error; |
445 | err = device_create_file(&device->dev, &dev_attr_tablet); | ||
446 | if (err) | ||
447 | goto add_sysfs_error; | ||
419 | 448 | ||
420 | if (wireless & 0x1) { | 449 | if (wireless & 0x1) { |
421 | wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, | 450 | wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, |
@@ -485,23 +514,17 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device) | |||
485 | 514 | ||
486 | static int hp_wmi_resume_handler(struct platform_device *device) | 515 | static int hp_wmi_resume_handler(struct platform_device *device) |
487 | { | 516 | { |
488 | struct key_entry *key; | ||
489 | |||
490 | /* | 517 | /* |
491 | * Docking state may have changed while suspended, so trigger | 518 | * Hardware state may have changed while suspended, so trigger |
492 | * an input event for the current state. As this is a switch, | 519 | * input events for the current state. As this is a switch, |
493 | * the input layer will only actually pass it on if the state | 520 | * the input layer will only actually pass it on if the state |
494 | * changed. | 521 | * changed. |
495 | */ | 522 | */ |
496 | for (key = hp_wmi_keymap; key->type != KE_END; key++) { | 523 | |
497 | switch (key->type) { | 524 | input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state()); |
498 | case KE_SW: | 525 | input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, |
499 | input_report_switch(hp_wmi_input_dev, key->keycode, | 526 | hp_wmi_tablet_state()); |
500 | hp_wmi_dock_state()); | 527 | input_sync(hp_wmi_input_dev); |
501 | input_sync(hp_wmi_input_dev); | ||
502 | break; | ||
503 | } | ||
504 | } | ||
505 | 528 | ||
506 | return 0; | 529 | return 0; |
507 | } | 530 | } |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 40d64c03278c..a463fd72c495 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -22,7 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | #define TPACPI_VERSION "0.23" | 24 | #define TPACPI_VERSION "0.23" |
25 | #define TPACPI_SYSFS_VERSION 0x020300 | 25 | #define TPACPI_SYSFS_VERSION 0x020400 |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Changelog: | 28 | * Changelog: |
@@ -257,6 +257,8 @@ static struct { | |||
257 | u32 wan:1; | 257 | u32 wan:1; |
258 | u32 uwb:1; | 258 | u32 uwb:1; |
259 | u32 fan_ctrl_status_undef:1; | 259 | u32 fan_ctrl_status_undef:1; |
260 | u32 second_fan:1; | ||
261 | u32 beep_needs_two_args:1; | ||
260 | u32 input_device_registered:1; | 262 | u32 input_device_registered:1; |
261 | u32 platform_drv_registered:1; | 263 | u32 platform_drv_registered:1; |
262 | u32 platform_drv_attrs_registered:1; | 264 | u32 platform_drv_attrs_registered:1; |
@@ -277,8 +279,10 @@ struct thinkpad_id_data { | |||
277 | char *bios_version_str; /* Something like 1ZET51WW (1.03z) */ | 279 | char *bios_version_str; /* Something like 1ZET51WW (1.03z) */ |
278 | char *ec_version_str; /* Something like 1ZHT51WW-1.04a */ | 280 | char *ec_version_str; /* Something like 1ZHT51WW-1.04a */ |
279 | 281 | ||
280 | u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */ | 282 | u16 bios_model; /* 1Y = 0x5931, 0 = unknown */ |
281 | u16 ec_model; | 283 | u16 ec_model; |
284 | u16 bios_release; /* 1ZETK1WW = 0x314b, 0 = unknown */ | ||
285 | u16 ec_release; | ||
282 | 286 | ||
283 | char *model_str; /* ThinkPad T43 */ | 287 | char *model_str; /* ThinkPad T43 */ |
284 | char *nummodel_str; /* 9384A9C for a 9384-A9C model */ | 288 | char *nummodel_str; /* 9384A9C for a 9384-A9C model */ |
@@ -355,6 +359,73 @@ static void tpacpi_log_usertask(const char * const what) | |||
355 | } \ | 359 | } \ |
356 | } while (0) | 360 | } while (0) |
357 | 361 | ||
362 | /* | ||
363 | * Quirk handling helpers | ||
364 | * | ||
365 | * ThinkPad IDs and versions seen in the field so far | ||
366 | * are two-characters from the set [0-9A-Z], i.e. base 36. | ||
367 | * | ||
368 | * We use values well outside that range as specials. | ||
369 | */ | ||
370 | |||
371 | #define TPACPI_MATCH_ANY 0xffffU | ||
372 | #define TPACPI_MATCH_UNKNOWN 0U | ||
373 | |||
374 | /* TPID('1', 'Y') == 0x5931 */ | ||
375 | #define TPID(__c1, __c2) (((__c2) << 8) | (__c1)) | ||
376 | |||
377 | #define TPACPI_Q_IBM(__id1, __id2, __quirk) \ | ||
378 | { .vendor = PCI_VENDOR_ID_IBM, \ | ||
379 | .bios = TPID(__id1, __id2), \ | ||
380 | .ec = TPACPI_MATCH_ANY, \ | ||
381 | .quirks = (__quirk) } | ||
382 | |||
383 | #define TPACPI_Q_LNV(__id1, __id2, __quirk) \ | ||
384 | { .vendor = PCI_VENDOR_ID_LENOVO, \ | ||
385 | .bios = TPID(__id1, __id2), \ | ||
386 | .ec = TPACPI_MATCH_ANY, \ | ||
387 | .quirks = (__quirk) } | ||
388 | |||
389 | struct tpacpi_quirk { | ||
390 | unsigned int vendor; | ||
391 | u16 bios; | ||
392 | u16 ec; | ||
393 | unsigned long quirks; | ||
394 | }; | ||
395 | |||
396 | /** | ||
397 | * tpacpi_check_quirks() - search BIOS/EC version on a list | ||
398 | * @qlist: array of &struct tpacpi_quirk | ||
399 | * @qlist_size: number of elements in @qlist | ||
400 | * | ||
401 | * Iterates over a quirks list until one is found that matches the | ||
402 | * ThinkPad's vendor, BIOS and EC model. | ||
403 | * | ||
404 | * Returns 0 if nothing matches, otherwise returns the quirks field of | ||
405 | * the matching &struct tpacpi_quirk entry. | ||
406 | * | ||
407 | * The match criteria is: vendor, ec and bios much match. | ||
408 | */ | ||
409 | static unsigned long __init tpacpi_check_quirks( | ||
410 | const struct tpacpi_quirk *qlist, | ||
411 | unsigned int qlist_size) | ||
412 | { | ||
413 | while (qlist_size) { | ||
414 | if ((qlist->vendor == thinkpad_id.vendor || | ||
415 | qlist->vendor == TPACPI_MATCH_ANY) && | ||
416 | (qlist->bios == thinkpad_id.bios_model || | ||
417 | qlist->bios == TPACPI_MATCH_ANY) && | ||
418 | (qlist->ec == thinkpad_id.ec_model || | ||
419 | qlist->ec == TPACPI_MATCH_ANY)) | ||
420 | return qlist->quirks; | ||
421 | |||
422 | qlist_size--; | ||
423 | qlist++; | ||
424 | } | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | |||
358 | /**************************************************************************** | 429 | /**************************************************************************** |
359 | **************************************************************************** | 430 | **************************************************************************** |
360 | * | 431 | * |
@@ -2880,7 +2951,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
2880 | /* update bright_acpimode... */ | 2951 | /* update bright_acpimode... */ |
2881 | tpacpi_check_std_acpi_brightness_support(); | 2952 | tpacpi_check_std_acpi_brightness_support(); |
2882 | 2953 | ||
2883 | if (tp_features.bright_acpimode) { | 2954 | if (tp_features.bright_acpimode && acpi_video_backlight_support()) { |
2884 | printk(TPACPI_INFO | 2955 | printk(TPACPI_INFO |
2885 | "This ThinkPad has standard ACPI backlight " | 2956 | "This ThinkPad has standard ACPI backlight " |
2886 | "brightness control, supported by the ACPI " | 2957 | "brightness control, supported by the ACPI " |
@@ -4773,7 +4844,7 @@ TPACPI_HANDLE(led, ec, "SLED", /* 570 */ | |||
4773 | "LED", /* all others */ | 4844 | "LED", /* all others */ |
4774 | ); /* R30, R31 */ | 4845 | ); /* R30, R31 */ |
4775 | 4846 | ||
4776 | #define TPACPI_LED_NUMLEDS 8 | 4847 | #define TPACPI_LED_NUMLEDS 16 |
4777 | static struct tpacpi_led_classdev *tpacpi_leds; | 4848 | static struct tpacpi_led_classdev *tpacpi_leds; |
4778 | static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS]; | 4849 | static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS]; |
4779 | static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = { | 4850 | static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = { |
@@ -4786,15 +4857,20 @@ static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = { | |||
4786 | "tpacpi::dock_batt", | 4857 | "tpacpi::dock_batt", |
4787 | "tpacpi::unknown_led", | 4858 | "tpacpi::unknown_led", |
4788 | "tpacpi::standby", | 4859 | "tpacpi::standby", |
4860 | "tpacpi::dock_status1", | ||
4861 | "tpacpi::dock_status2", | ||
4862 | "tpacpi::unknown_led2", | ||
4863 | "tpacpi::unknown_led3", | ||
4864 | "tpacpi::thinkvantage", | ||
4789 | }; | 4865 | }; |
4790 | #define TPACPI_SAFE_LEDS 0x0081U | 4866 | #define TPACPI_SAFE_LEDS 0x1081U |
4791 | 4867 | ||
4792 | static inline bool tpacpi_is_led_restricted(const unsigned int led) | 4868 | static inline bool tpacpi_is_led_restricted(const unsigned int led) |
4793 | { | 4869 | { |
4794 | #ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS | 4870 | #ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS |
4795 | return false; | 4871 | return false; |
4796 | #else | 4872 | #else |
4797 | return (TPACPI_SAFE_LEDS & (1 << led)) == 0; | 4873 | return (1U & (TPACPI_SAFE_LEDS >> led)) == 0; |
4798 | #endif | 4874 | #endif |
4799 | } | 4875 | } |
4800 | 4876 | ||
@@ -4956,6 +5032,10 @@ static int __init tpacpi_init_led(unsigned int led) | |||
4956 | 5032 | ||
4957 | tpacpi_leds[led].led = led; | 5033 | tpacpi_leds[led].led = led; |
4958 | 5034 | ||
5035 | /* LEDs with no name don't get registered */ | ||
5036 | if (!tpacpi_led_names[led]) | ||
5037 | return 0; | ||
5038 | |||
4959 | tpacpi_leds[led].led_classdev.brightness_set = &led_sysfs_set; | 5039 | tpacpi_leds[led].led_classdev.brightness_set = &led_sysfs_set; |
4960 | tpacpi_leds[led].led_classdev.blink_set = &led_sysfs_blink_set; | 5040 | tpacpi_leds[led].led_classdev.blink_set = &led_sysfs_blink_set; |
4961 | if (led_supported == TPACPI_LED_570) | 5041 | if (led_supported == TPACPI_LED_570) |
@@ -4974,10 +5054,59 @@ static int __init tpacpi_init_led(unsigned int led) | |||
4974 | return rc; | 5054 | return rc; |
4975 | } | 5055 | } |
4976 | 5056 | ||
5057 | static const struct tpacpi_quirk led_useful_qtable[] __initconst = { | ||
5058 | TPACPI_Q_IBM('1', 'E', 0x009f), /* A30 */ | ||
5059 | TPACPI_Q_IBM('1', 'N', 0x009f), /* A31 */ | ||
5060 | TPACPI_Q_IBM('1', 'G', 0x009f), /* A31 */ | ||
5061 | |||
5062 | TPACPI_Q_IBM('1', 'I', 0x0097), /* T30 */ | ||
5063 | TPACPI_Q_IBM('1', 'R', 0x0097), /* T40, T41, T42, R50, R51 */ | ||
5064 | TPACPI_Q_IBM('7', '0', 0x0097), /* T43, R52 */ | ||
5065 | TPACPI_Q_IBM('1', 'Y', 0x0097), /* T43 */ | ||
5066 | TPACPI_Q_IBM('1', 'W', 0x0097), /* R50e */ | ||
5067 | TPACPI_Q_IBM('1', 'V', 0x0097), /* R51 */ | ||
5068 | TPACPI_Q_IBM('7', '8', 0x0097), /* R51e */ | ||
5069 | TPACPI_Q_IBM('7', '6', 0x0097), /* R52 */ | ||
5070 | |||
5071 | TPACPI_Q_IBM('1', 'K', 0x00bf), /* X30 */ | ||
5072 | TPACPI_Q_IBM('1', 'Q', 0x00bf), /* X31, X32 */ | ||
5073 | TPACPI_Q_IBM('1', 'U', 0x00bf), /* X40 */ | ||
5074 | TPACPI_Q_IBM('7', '4', 0x00bf), /* X41 */ | ||
5075 | TPACPI_Q_IBM('7', '5', 0x00bf), /* X41t */ | ||
5076 | |||
5077 | TPACPI_Q_IBM('7', '9', 0x1f97), /* T60 (1) */ | ||
5078 | TPACPI_Q_IBM('7', '7', 0x1f97), /* Z60* (1) */ | ||
5079 | TPACPI_Q_IBM('7', 'F', 0x1f97), /* Z61* (1) */ | ||
5080 | TPACPI_Q_IBM('7', 'B', 0x1fb7), /* X60 (1) */ | ||
5081 | |||
5082 | /* (1) - may have excess leds enabled on MSB */ | ||
5083 | |||
5084 | /* Defaults (order matters, keep last, don't reorder!) */ | ||
5085 | { /* Lenovo */ | ||
5086 | .vendor = PCI_VENDOR_ID_LENOVO, | ||
5087 | .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY, | ||
5088 | .quirks = 0x1fffU, | ||
5089 | }, | ||
5090 | { /* IBM ThinkPads with no EC version string */ | ||
5091 | .vendor = PCI_VENDOR_ID_IBM, | ||
5092 | .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_UNKNOWN, | ||
5093 | .quirks = 0x00ffU, | ||
5094 | }, | ||
5095 | { /* IBM ThinkPads with EC version string */ | ||
5096 | .vendor = PCI_VENDOR_ID_IBM, | ||
5097 | .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY, | ||
5098 | .quirks = 0x00bfU, | ||
5099 | }, | ||
5100 | }; | ||
5101 | |||
5102 | #undef TPACPI_LEDQ_IBM | ||
5103 | #undef TPACPI_LEDQ_LNV | ||
5104 | |||
4977 | static int __init led_init(struct ibm_init_struct *iibm) | 5105 | static int __init led_init(struct ibm_init_struct *iibm) |
4978 | { | 5106 | { |
4979 | unsigned int i; | 5107 | unsigned int i; |
4980 | int rc; | 5108 | int rc; |
5109 | unsigned long useful_leds; | ||
4981 | 5110 | ||
4982 | vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n"); | 5111 | vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n"); |
4983 | 5112 | ||
@@ -4999,6 +5128,9 @@ static int __init led_init(struct ibm_init_struct *iibm) | |||
4999 | vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n", | 5128 | vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n", |
5000 | str_supported(led_supported), led_supported); | 5129 | str_supported(led_supported), led_supported); |
5001 | 5130 | ||
5131 | if (led_supported == TPACPI_LED_NONE) | ||
5132 | return 1; | ||
5133 | |||
5002 | tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS, | 5134 | tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS, |
5003 | GFP_KERNEL); | 5135 | GFP_KERNEL); |
5004 | if (!tpacpi_leds) { | 5136 | if (!tpacpi_leds) { |
@@ -5006,8 +5138,12 @@ static int __init led_init(struct ibm_init_struct *iibm) | |||
5006 | return -ENOMEM; | 5138 | return -ENOMEM; |
5007 | } | 5139 | } |
5008 | 5140 | ||
5141 | useful_leds = tpacpi_check_quirks(led_useful_qtable, | ||
5142 | ARRAY_SIZE(led_useful_qtable)); | ||
5143 | |||
5009 | for (i = 0; i < TPACPI_LED_NUMLEDS; i++) { | 5144 | for (i = 0; i < TPACPI_LED_NUMLEDS; i++) { |
5010 | if (!tpacpi_is_led_restricted(i)) { | 5145 | if (!tpacpi_is_led_restricted(i) && |
5146 | test_bit(i, &useful_leds)) { | ||
5011 | rc = tpacpi_init_led(i); | 5147 | rc = tpacpi_init_led(i); |
5012 | if (rc < 0) { | 5148 | if (rc < 0) { |
5013 | led_exit(); | 5149 | led_exit(); |
@@ -5017,12 +5153,11 @@ static int __init led_init(struct ibm_init_struct *iibm) | |||
5017 | } | 5153 | } |
5018 | 5154 | ||
5019 | #ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS | 5155 | #ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS |
5020 | if (led_supported != TPACPI_LED_NONE) | 5156 | printk(TPACPI_NOTICE |
5021 | printk(TPACPI_NOTICE | 5157 | "warning: userspace override of important " |
5022 | "warning: userspace override of important " | 5158 | "firmware LEDs is enabled\n"); |
5023 | "firmware LEDs is enabled\n"); | ||
5024 | #endif | 5159 | #endif |
5025 | return (led_supported != TPACPI_LED_NONE)? 0 : 1; | 5160 | return 0; |
5026 | } | 5161 | } |
5027 | 5162 | ||
5028 | #define str_led_status(s) \ | 5163 | #define str_led_status(s) \ |
@@ -5052,7 +5187,7 @@ static int led_read(char *p) | |||
5052 | } | 5187 | } |
5053 | 5188 | ||
5054 | len += sprintf(p + len, "commands:\t" | 5189 | len += sprintf(p + len, "commands:\t" |
5055 | "<led> on, <led> off, <led> blink (<led> is 0-7)\n"); | 5190 | "<led> on, <led> off, <led> blink (<led> is 0-15)\n"); |
5056 | 5191 | ||
5057 | return len; | 5192 | return len; |
5058 | } | 5193 | } |
@@ -5067,7 +5202,7 @@ static int led_write(char *buf) | |||
5067 | return -ENODEV; | 5202 | return -ENODEV; |
5068 | 5203 | ||
5069 | while ((cmd = next_cmd(&buf))) { | 5204 | while ((cmd = next_cmd(&buf))) { |
5070 | if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 7) | 5205 | if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 15) |
5071 | return -EINVAL; | 5206 | return -EINVAL; |
5072 | 5207 | ||
5073 | if (strstr(cmd, "off")) { | 5208 | if (strstr(cmd, "off")) { |
@@ -5101,8 +5236,17 @@ static struct ibm_struct led_driver_data = { | |||
5101 | 5236 | ||
5102 | TPACPI_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */ | 5237 | TPACPI_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */ |
5103 | 5238 | ||
5239 | #define TPACPI_BEEP_Q1 0x0001 | ||
5240 | |||
5241 | static const struct tpacpi_quirk beep_quirk_table[] __initconst = { | ||
5242 | TPACPI_Q_IBM('I', 'M', TPACPI_BEEP_Q1), /* 570 */ | ||
5243 | TPACPI_Q_IBM('I', 'U', TPACPI_BEEP_Q1), /* 570E - unverified */ | ||
5244 | }; | ||
5245 | |||
5104 | static int __init beep_init(struct ibm_init_struct *iibm) | 5246 | static int __init beep_init(struct ibm_init_struct *iibm) |
5105 | { | 5247 | { |
5248 | unsigned long quirks; | ||
5249 | |||
5106 | vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n"); | 5250 | vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n"); |
5107 | 5251 | ||
5108 | TPACPI_ACPIHANDLE_INIT(beep); | 5252 | TPACPI_ACPIHANDLE_INIT(beep); |
@@ -5110,6 +5254,11 @@ static int __init beep_init(struct ibm_init_struct *iibm) | |||
5110 | vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n", | 5254 | vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n", |
5111 | str_supported(beep_handle != NULL)); | 5255 | str_supported(beep_handle != NULL)); |
5112 | 5256 | ||
5257 | quirks = tpacpi_check_quirks(beep_quirk_table, | ||
5258 | ARRAY_SIZE(beep_quirk_table)); | ||
5259 | |||
5260 | tp_features.beep_needs_two_args = !!(quirks & TPACPI_BEEP_Q1); | ||
5261 | |||
5113 | return (beep_handle)? 0 : 1; | 5262 | return (beep_handle)? 0 : 1; |
5114 | } | 5263 | } |
5115 | 5264 | ||
@@ -5141,8 +5290,15 @@ static int beep_write(char *buf) | |||
5141 | /* beep_cmd set */ | 5290 | /* beep_cmd set */ |
5142 | } else | 5291 | } else |
5143 | return -EINVAL; | 5292 | return -EINVAL; |
5144 | if (!acpi_evalf(beep_handle, NULL, NULL, "vdd", beep_cmd, 0)) | 5293 | if (tp_features.beep_needs_two_args) { |
5145 | return -EIO; | 5294 | if (!acpi_evalf(beep_handle, NULL, NULL, "vdd", |
5295 | beep_cmd, 0)) | ||
5296 | return -EIO; | ||
5297 | } else { | ||
5298 | if (!acpi_evalf(beep_handle, NULL, NULL, "vd", | ||
5299 | beep_cmd)) | ||
5300 | return -EIO; | ||
5301 | } | ||
5146 | } | 5302 | } |
5147 | 5303 | ||
5148 | return 0; | 5304 | return 0; |
@@ -5569,6 +5725,10 @@ static struct ibm_struct ecdump_driver_data = { | |||
5569 | * Bit 3-0: backlight brightness level | 5725 | * Bit 3-0: backlight brightness level |
5570 | * | 5726 | * |
5571 | * brightness_get_raw returns status data in the HBRV layout | 5727 | * brightness_get_raw returns status data in the HBRV layout |
5728 | * | ||
5729 | * WARNING: The X61 has been verified to use HBRV for something else, so | ||
5730 | * this should be used _only_ on IBM ThinkPads, and maybe with some careful | ||
5731 | * testing on the very early *60 Lenovo models... | ||
5572 | */ | 5732 | */ |
5573 | 5733 | ||
5574 | enum { | 5734 | enum { |
@@ -5869,6 +6029,12 @@ static int __init brightness_init(struct ibm_init_struct *iibm) | |||
5869 | brightness_mode); | 6029 | brightness_mode); |
5870 | } | 6030 | } |
5871 | 6031 | ||
6032 | /* Safety */ | ||
6033 | if (thinkpad_id.vendor != PCI_VENDOR_ID_IBM && | ||
6034 | (brightness_mode == TPACPI_BRGHT_MODE_ECNVRAM || | ||
6035 | brightness_mode == TPACPI_BRGHT_MODE_EC)) | ||
6036 | return -EINVAL; | ||
6037 | |||
5872 | if (tpacpi_brightness_get_raw(&b) < 0) | 6038 | if (tpacpi_brightness_get_raw(&b) < 0) |
5873 | return 1; | 6039 | return 1; |
5874 | 6040 | ||
@@ -6161,6 +6327,21 @@ static struct ibm_struct volume_driver_data = { | |||
6161 | * For firmware bugs, refer to: | 6327 | * For firmware bugs, refer to: |
6162 | * http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues | 6328 | * http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues |
6163 | * | 6329 | * |
6330 | * ---- | ||
6331 | * | ||
6332 | * ThinkPad EC register 0x31 bit 0 (only on select models) | ||
6333 | * | ||
6334 | * When bit 0 of EC register 0x31 is zero, the tachometer registers | ||
6335 | * show the speed of the main fan. When bit 0 of EC register 0x31 | ||
6336 | * is one, the tachometer registers show the speed of the auxiliary | ||
6337 | * fan. | ||
6338 | * | ||
6339 | * Fan control seems to affect both fans, regardless of the state | ||
6340 | * of this bit. | ||
6341 | * | ||
6342 | * So far, only the firmware for the X60/X61 non-tablet versions | ||
6343 | * seem to support this (firmware TP-7M). | ||
6344 | * | ||
6164 | * TPACPI_FAN_WR_ACPI_FANS: | 6345 | * TPACPI_FAN_WR_ACPI_FANS: |
6165 | * ThinkPad X31, X40, X41. Not available in the X60. | 6346 | * ThinkPad X31, X40, X41. Not available in the X60. |
6166 | * | 6347 | * |
@@ -6187,6 +6368,8 @@ enum { /* Fan control constants */ | |||
6187 | fan_status_offset = 0x2f, /* EC register 0x2f */ | 6368 | fan_status_offset = 0x2f, /* EC register 0x2f */ |
6188 | fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM) | 6369 | fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM) |
6189 | * 0x84 must be read before 0x85 */ | 6370 | * 0x84 must be read before 0x85 */ |
6371 | fan_select_offset = 0x31, /* EC register 0x31 (Firmware 7M) | ||
6372 | bit 0 selects which fan is active */ | ||
6190 | 6373 | ||
6191 | TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */ | 6374 | TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */ |
6192 | TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */ | 6375 | TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */ |
@@ -6249,30 +6432,18 @@ TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */ | |||
6249 | * We assume 0x07 really means auto mode while this quirk is active, | 6432 | * We assume 0x07 really means auto mode while this quirk is active, |
6250 | * as this is far more likely than the ThinkPad being in level 7, | 6433 | * as this is far more likely than the ThinkPad being in level 7, |
6251 | * which is only used by the firmware during thermal emergencies. | 6434 | * which is only used by the firmware during thermal emergencies. |
6435 | * | ||
6436 | * Enable for TP-1Y (T43), TP-78 (R51e), TP-76 (R52), | ||
6437 | * TP-70 (T43, R52), which are known to be buggy. | ||
6252 | */ | 6438 | */ |
6253 | 6439 | ||
6254 | static void fan_quirk1_detect(void) | 6440 | static void fan_quirk1_setup(void) |
6255 | { | 6441 | { |
6256 | /* In some ThinkPads, neither the EC nor the ACPI | ||
6257 | * DSDT initialize the HFSP register, and it ends up | ||
6258 | * being initially set to 0x07 when it *could* be | ||
6259 | * either 0x07 or 0x80. | ||
6260 | * | ||
6261 | * Enable for TP-1Y (T43), TP-78 (R51e), | ||
6262 | * TP-76 (R52), TP-70 (T43, R52), which are known | ||
6263 | * to be buggy. */ | ||
6264 | if (fan_control_initial_status == 0x07) { | 6442 | if (fan_control_initial_status == 0x07) { |
6265 | switch (thinkpad_id.ec_model) { | 6443 | printk(TPACPI_NOTICE |
6266 | case 0x5931: /* TP-1Y */ | 6444 | "fan_init: initial fan status is unknown, " |
6267 | case 0x3837: /* TP-78 */ | 6445 | "assuming it is in auto mode\n"); |
6268 | case 0x3637: /* TP-76 */ | 6446 | tp_features.fan_ctrl_status_undef = 1; |
6269 | case 0x3037: /* TP-70 */ | ||
6270 | printk(TPACPI_NOTICE | ||
6271 | "fan_init: initial fan status is unknown, " | ||
6272 | "assuming it is in auto mode\n"); | ||
6273 | tp_features.fan_ctrl_status_undef = 1; | ||
6274 | ;; | ||
6275 | } | ||
6276 | } | 6447 | } |
6277 | } | 6448 | } |
6278 | 6449 | ||
@@ -6292,6 +6463,38 @@ static void fan_quirk1_handle(u8 *fan_status) | |||
6292 | } | 6463 | } |
6293 | } | 6464 | } |
6294 | 6465 | ||
6466 | /* Select main fan on X60/X61, NOOP on others */ | ||
6467 | static bool fan_select_fan1(void) | ||
6468 | { | ||
6469 | if (tp_features.second_fan) { | ||
6470 | u8 val; | ||
6471 | |||
6472 | if (ec_read(fan_select_offset, &val) < 0) | ||
6473 | return false; | ||
6474 | val &= 0xFEU; | ||
6475 | if (ec_write(fan_select_offset, val) < 0) | ||
6476 | return false; | ||
6477 | } | ||
6478 | return true; | ||
6479 | } | ||
6480 | |||
6481 | /* Select secondary fan on X60/X61 */ | ||
6482 | static bool fan_select_fan2(void) | ||
6483 | { | ||
6484 | u8 val; | ||
6485 | |||
6486 | if (!tp_features.second_fan) | ||
6487 | return false; | ||
6488 | |||
6489 | if (ec_read(fan_select_offset, &val) < 0) | ||
6490 | return false; | ||
6491 | val |= 0x01U; | ||
6492 | if (ec_write(fan_select_offset, val) < 0) | ||
6493 | return false; | ||
6494 | |||
6495 | return true; | ||
6496 | } | ||
6497 | |||
6295 | /* | 6498 | /* |
6296 | * Call with fan_mutex held | 6499 | * Call with fan_mutex held |
6297 | */ | 6500 | */ |
@@ -6369,6 +6572,8 @@ static int fan_get_speed(unsigned int *speed) | |||
6369 | switch (fan_status_access_mode) { | 6572 | switch (fan_status_access_mode) { |
6370 | case TPACPI_FAN_RD_TPEC: | 6573 | case TPACPI_FAN_RD_TPEC: |
6371 | /* all except 570, 600e/x, 770e, 770x */ | 6574 | /* all except 570, 600e/x, 770e, 770x */ |
6575 | if (unlikely(!fan_select_fan1())) | ||
6576 | return -EIO; | ||
6372 | if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) || | 6577 | if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) || |
6373 | !acpi_ec_read(fan_rpm_offset + 1, &hi))) | 6578 | !acpi_ec_read(fan_rpm_offset + 1, &hi))) |
6374 | return -EIO; | 6579 | return -EIO; |
@@ -6385,6 +6590,34 @@ static int fan_get_speed(unsigned int *speed) | |||
6385 | return 0; | 6590 | return 0; |
6386 | } | 6591 | } |
6387 | 6592 | ||
6593 | static int fan2_get_speed(unsigned int *speed) | ||
6594 | { | ||
6595 | u8 hi, lo; | ||
6596 | bool rc; | ||
6597 | |||
6598 | switch (fan_status_access_mode) { | ||
6599 | case TPACPI_FAN_RD_TPEC: | ||
6600 | /* all except 570, 600e/x, 770e, 770x */ | ||
6601 | if (unlikely(!fan_select_fan2())) | ||
6602 | return -EIO; | ||
6603 | rc = !acpi_ec_read(fan_rpm_offset, &lo) || | ||
6604 | !acpi_ec_read(fan_rpm_offset + 1, &hi); | ||
6605 | fan_select_fan1(); /* play it safe */ | ||
6606 | if (rc) | ||
6607 | return -EIO; | ||
6608 | |||
6609 | if (likely(speed)) | ||
6610 | *speed = (hi << 8) | lo; | ||
6611 | |||
6612 | break; | ||
6613 | |||
6614 | default: | ||
6615 | return -ENXIO; | ||
6616 | } | ||
6617 | |||
6618 | return 0; | ||
6619 | } | ||
6620 | |||
6388 | static int fan_set_level(int level) | 6621 | static int fan_set_level(int level) |
6389 | { | 6622 | { |
6390 | if (!fan_control_allowed) | 6623 | if (!fan_control_allowed) |
@@ -6790,6 +7023,25 @@ static struct device_attribute dev_attr_fan_fan1_input = | |||
6790 | __ATTR(fan1_input, S_IRUGO, | 7023 | __ATTR(fan1_input, S_IRUGO, |
6791 | fan_fan1_input_show, NULL); | 7024 | fan_fan1_input_show, NULL); |
6792 | 7025 | ||
7026 | /* sysfs fan fan2_input ------------------------------------------------ */ | ||
7027 | static ssize_t fan_fan2_input_show(struct device *dev, | ||
7028 | struct device_attribute *attr, | ||
7029 | char *buf) | ||
7030 | { | ||
7031 | int res; | ||
7032 | unsigned int speed; | ||
7033 | |||
7034 | res = fan2_get_speed(&speed); | ||
7035 | if (res < 0) | ||
7036 | return res; | ||
7037 | |||
7038 | return snprintf(buf, PAGE_SIZE, "%u\n", speed); | ||
7039 | } | ||
7040 | |||
7041 | static struct device_attribute dev_attr_fan_fan2_input = | ||
7042 | __ATTR(fan2_input, S_IRUGO, | ||
7043 | fan_fan2_input_show, NULL); | ||
7044 | |||
6793 | /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */ | 7045 | /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */ |
6794 | static ssize_t fan_fan_watchdog_show(struct device_driver *drv, | 7046 | static ssize_t fan_fan_watchdog_show(struct device_driver *drv, |
6795 | char *buf) | 7047 | char *buf) |
@@ -6823,6 +7075,7 @@ static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO, | |||
6823 | static struct attribute *fan_attributes[] = { | 7075 | static struct attribute *fan_attributes[] = { |
6824 | &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr, | 7076 | &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr, |
6825 | &dev_attr_fan_fan1_input.attr, | 7077 | &dev_attr_fan_fan1_input.attr, |
7078 | NULL, /* for fan2_input */ | ||
6826 | NULL | 7079 | NULL |
6827 | }; | 7080 | }; |
6828 | 7081 | ||
@@ -6830,9 +7083,36 @@ static const struct attribute_group fan_attr_group = { | |||
6830 | .attrs = fan_attributes, | 7083 | .attrs = fan_attributes, |
6831 | }; | 7084 | }; |
6832 | 7085 | ||
7086 | #define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */ | ||
7087 | #define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */ | ||
7088 | |||
7089 | #define TPACPI_FAN_QI(__id1, __id2, __quirks) \ | ||
7090 | { .vendor = PCI_VENDOR_ID_IBM, \ | ||
7091 | .bios = TPACPI_MATCH_ANY, \ | ||
7092 | .ec = TPID(__id1, __id2), \ | ||
7093 | .quirks = __quirks } | ||
7094 | |||
7095 | #define TPACPI_FAN_QL(__id1, __id2, __quirks) \ | ||
7096 | { .vendor = PCI_VENDOR_ID_LENOVO, \ | ||
7097 | .bios = TPACPI_MATCH_ANY, \ | ||
7098 | .ec = TPID(__id1, __id2), \ | ||
7099 | .quirks = __quirks } | ||
7100 | |||
7101 | static const struct tpacpi_quirk fan_quirk_table[] __initconst = { | ||
7102 | TPACPI_FAN_QI('1', 'Y', TPACPI_FAN_Q1), | ||
7103 | TPACPI_FAN_QI('7', '8', TPACPI_FAN_Q1), | ||
7104 | TPACPI_FAN_QI('7', '6', TPACPI_FAN_Q1), | ||
7105 | TPACPI_FAN_QI('7', '0', TPACPI_FAN_Q1), | ||
7106 | TPACPI_FAN_QL('7', 'M', TPACPI_FAN_2FAN), | ||
7107 | }; | ||
7108 | |||
7109 | #undef TPACPI_FAN_QL | ||
7110 | #undef TPACPI_FAN_QI | ||
7111 | |||
6833 | static int __init fan_init(struct ibm_init_struct *iibm) | 7112 | static int __init fan_init(struct ibm_init_struct *iibm) |
6834 | { | 7113 | { |
6835 | int rc; | 7114 | int rc; |
7115 | unsigned long quirks; | ||
6836 | 7116 | ||
6837 | vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN, | 7117 | vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN, |
6838 | "initializing fan subdriver\n"); | 7118 | "initializing fan subdriver\n"); |
@@ -6843,12 +7123,16 @@ static int __init fan_init(struct ibm_init_struct *iibm) | |||
6843 | fan_control_commands = 0; | 7123 | fan_control_commands = 0; |
6844 | fan_watchdog_maxinterval = 0; | 7124 | fan_watchdog_maxinterval = 0; |
6845 | tp_features.fan_ctrl_status_undef = 0; | 7125 | tp_features.fan_ctrl_status_undef = 0; |
7126 | tp_features.second_fan = 0; | ||
6846 | fan_control_desired_level = 7; | 7127 | fan_control_desired_level = 7; |
6847 | 7128 | ||
6848 | TPACPI_ACPIHANDLE_INIT(fans); | 7129 | TPACPI_ACPIHANDLE_INIT(fans); |
6849 | TPACPI_ACPIHANDLE_INIT(gfan); | 7130 | TPACPI_ACPIHANDLE_INIT(gfan); |
6850 | TPACPI_ACPIHANDLE_INIT(sfan); | 7131 | TPACPI_ACPIHANDLE_INIT(sfan); |
6851 | 7132 | ||
7133 | quirks = tpacpi_check_quirks(fan_quirk_table, | ||
7134 | ARRAY_SIZE(fan_quirk_table)); | ||
7135 | |||
6852 | if (gfan_handle) { | 7136 | if (gfan_handle) { |
6853 | /* 570, 600e/x, 770e, 770x */ | 7137 | /* 570, 600e/x, 770e, 770x */ |
6854 | fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; | 7138 | fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; |
@@ -6858,7 +7142,13 @@ static int __init fan_init(struct ibm_init_struct *iibm) | |||
6858 | if (likely(acpi_ec_read(fan_status_offset, | 7142 | if (likely(acpi_ec_read(fan_status_offset, |
6859 | &fan_control_initial_status))) { | 7143 | &fan_control_initial_status))) { |
6860 | fan_status_access_mode = TPACPI_FAN_RD_TPEC; | 7144 | fan_status_access_mode = TPACPI_FAN_RD_TPEC; |
6861 | fan_quirk1_detect(); | 7145 | if (quirks & TPACPI_FAN_Q1) |
7146 | fan_quirk1_setup(); | ||
7147 | if (quirks & TPACPI_FAN_2FAN) { | ||
7148 | tp_features.second_fan = 1; | ||
7149 | dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN, | ||
7150 | "secondary fan support enabled\n"); | ||
7151 | } | ||
6862 | } else { | 7152 | } else { |
6863 | printk(TPACPI_ERR | 7153 | printk(TPACPI_ERR |
6864 | "ThinkPad ACPI EC access misbehaving, " | 7154 | "ThinkPad ACPI EC access misbehaving, " |
@@ -6914,6 +7204,11 @@ static int __init fan_init(struct ibm_init_struct *iibm) | |||
6914 | 7204 | ||
6915 | if (fan_status_access_mode != TPACPI_FAN_NONE || | 7205 | if (fan_status_access_mode != TPACPI_FAN_NONE || |
6916 | fan_control_access_mode != TPACPI_FAN_WR_NONE) { | 7206 | fan_control_access_mode != TPACPI_FAN_WR_NONE) { |
7207 | if (tp_features.second_fan) { | ||
7208 | /* attach second fan tachometer */ | ||
7209 | fan_attributes[ARRAY_SIZE(fan_attributes)-2] = | ||
7210 | &dev_attr_fan_fan2_input.attr; | ||
7211 | } | ||
6917 | rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, | 7212 | rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, |
6918 | &fan_attr_group); | 7213 | &fan_attr_group); |
6919 | if (rc < 0) | 7214 | if (rc < 0) |
@@ -7385,6 +7680,24 @@ err_out: | |||
7385 | 7680 | ||
7386 | /* Probing */ | 7681 | /* Probing */ |
7387 | 7682 | ||
7683 | static bool __pure __init tpacpi_is_fw_digit(const char c) | ||
7684 | { | ||
7685 | return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z'); | ||
7686 | } | ||
7687 | |||
7688 | /* Most models: xxyTkkWW (#.##c); Ancient 570/600 and -SL lacks (#.##c) */ | ||
7689 | static bool __pure __init tpacpi_is_valid_fw_id(const char* const s, | ||
7690 | const char t) | ||
7691 | { | ||
7692 | return s && strlen(s) >= 8 && | ||
7693 | tpacpi_is_fw_digit(s[0]) && | ||
7694 | tpacpi_is_fw_digit(s[1]) && | ||
7695 | s[2] == t && s[3] == 'T' && | ||
7696 | tpacpi_is_fw_digit(s[4]) && | ||
7697 | tpacpi_is_fw_digit(s[5]) && | ||
7698 | s[6] == 'W' && s[7] == 'W'; | ||
7699 | } | ||
7700 | |||
7388 | /* returns 0 - probe ok, or < 0 - probe error. | 7701 | /* returns 0 - probe ok, or < 0 - probe error. |
7389 | * Probe ok doesn't mean thinkpad found. | 7702 | * Probe ok doesn't mean thinkpad found. |
7390 | * On error, kfree() cleanup on tp->* is not performed, caller must do it */ | 7703 | * On error, kfree() cleanup on tp->* is not performed, caller must do it */ |
@@ -7411,10 +7724,15 @@ static int __must_check __init get_thinkpad_model_data( | |||
7411 | tp->bios_version_str = kstrdup(s, GFP_KERNEL); | 7724 | tp->bios_version_str = kstrdup(s, GFP_KERNEL); |
7412 | if (s && !tp->bios_version_str) | 7725 | if (s && !tp->bios_version_str) |
7413 | return -ENOMEM; | 7726 | return -ENOMEM; |
7414 | if (!tp->bios_version_str) | 7727 | |
7728 | /* Really ancient ThinkPad 240X will fail this, which is fine */ | ||
7729 | if (!tpacpi_is_valid_fw_id(tp->bios_version_str, 'E')) | ||
7415 | return 0; | 7730 | return 0; |
7731 | |||
7416 | tp->bios_model = tp->bios_version_str[0] | 7732 | tp->bios_model = tp->bios_version_str[0] |
7417 | | (tp->bios_version_str[1] << 8); | 7733 | | (tp->bios_version_str[1] << 8); |
7734 | tp->bios_release = (tp->bios_version_str[4] << 8) | ||
7735 | | tp->bios_version_str[5]; | ||
7418 | 7736 | ||
7419 | /* | 7737 | /* |
7420 | * ThinkPad T23 or newer, A31 or newer, R50e or newer, | 7738 | * ThinkPad T23 or newer, A31 or newer, R50e or newer, |
@@ -7433,8 +7751,21 @@ static int __must_check __init get_thinkpad_model_data( | |||
7433 | tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL); | 7751 | tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL); |
7434 | if (!tp->ec_version_str) | 7752 | if (!tp->ec_version_str) |
7435 | return -ENOMEM; | 7753 | return -ENOMEM; |
7436 | tp->ec_model = ec_fw_string[0] | 7754 | |
7437 | | (ec_fw_string[1] << 8); | 7755 | if (tpacpi_is_valid_fw_id(ec_fw_string, 'H')) { |
7756 | tp->ec_model = ec_fw_string[0] | ||
7757 | | (ec_fw_string[1] << 8); | ||
7758 | tp->ec_release = (ec_fw_string[4] << 8) | ||
7759 | | ec_fw_string[5]; | ||
7760 | } else { | ||
7761 | printk(TPACPI_NOTICE | ||
7762 | "ThinkPad firmware release %s " | ||
7763 | "doesn't match the known patterns\n", | ||
7764 | ec_fw_string); | ||
7765 | printk(TPACPI_NOTICE | ||
7766 | "please report this to %s\n", | ||
7767 | TPACPI_MAIL); | ||
7768 | } | ||
7438 | break; | 7769 | break; |
7439 | } | 7770 | } |
7440 | } | 7771 | } |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 7f207f335bec..ef3a2cd3a7a0 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
@@ -287,6 +287,25 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, | |||
287 | ACPI_DECODE_16); | 287 | ACPI_DECODE_16); |
288 | } | 288 | } |
289 | 289 | ||
290 | static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev, | ||
291 | struct acpi_resource *res) | ||
292 | { | ||
293 | struct acpi_resource_extended_address64 *p = &res->data.ext_address64; | ||
294 | |||
295 | if (p->producer_consumer == ACPI_PRODUCER) | ||
296 | return; | ||
297 | |||
298 | if (p->resource_type == ACPI_MEMORY_RANGE) | ||
299 | pnpacpi_parse_allocated_memresource(dev, | ||
300 | p->minimum, p->address_length, | ||
301 | p->info.mem.write_protect); | ||
302 | else if (p->resource_type == ACPI_IO_RANGE) | ||
303 | pnpacpi_parse_allocated_ioresource(dev, | ||
304 | p->minimum, p->address_length, | ||
305 | p->granularity == 0xfff ? ACPI_DECODE_10 : | ||
306 | ACPI_DECODE_16); | ||
307 | } | ||
308 | |||
290 | static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | 309 | static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, |
291 | void *data) | 310 | void *data) |
292 | { | 311 | { |
@@ -400,8 +419,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
400 | break; | 419 | break; |
401 | 420 | ||
402 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: | 421 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: |
403 | if (res->data.ext_address64.producer_consumer == ACPI_PRODUCER) | 422 | pnpacpi_parse_allocated_ext_address_space(dev, res); |
404 | return AE_OK; | ||
405 | break; | 423 | break; |
406 | 424 | ||
407 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | 425 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: |
@@ -630,6 +648,28 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev, | |||
630 | IORESOURCE_IO_FIXED); | 648 | IORESOURCE_IO_FIXED); |
631 | } | 649 | } |
632 | 650 | ||
651 | static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, | ||
652 | unsigned int option_flags, | ||
653 | struct acpi_resource *r) | ||
654 | { | ||
655 | struct acpi_resource_extended_address64 *p = &r->data.ext_address64; | ||
656 | unsigned char flags = 0; | ||
657 | |||
658 | if (p->address_length == 0) | ||
659 | return; | ||
660 | |||
661 | if (p->resource_type == ACPI_MEMORY_RANGE) { | ||
662 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) | ||
663 | flags = IORESOURCE_MEM_WRITEABLE; | ||
664 | pnp_register_mem_resource(dev, option_flags, p->minimum, | ||
665 | p->minimum, 0, p->address_length, | ||
666 | flags); | ||
667 | } else if (p->resource_type == ACPI_IO_RANGE) | ||
668 | pnp_register_port_resource(dev, option_flags, p->minimum, | ||
669 | p->minimum, 0, p->address_length, | ||
670 | IORESOURCE_IO_FIXED); | ||
671 | } | ||
672 | |||
633 | struct acpipnp_parse_option_s { | 673 | struct acpipnp_parse_option_s { |
634 | struct pnp_dev *dev; | 674 | struct pnp_dev *dev; |
635 | unsigned int option_flags; | 675 | unsigned int option_flags; |
@@ -711,6 +751,7 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res, | |||
711 | break; | 751 | break; |
712 | 752 | ||
713 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: | 753 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: |
754 | pnpacpi_parse_ext_address_option(dev, option_flags, res); | ||
714 | break; | 755 | break; |
715 | 756 | ||
716 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | 757 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: |
@@ -765,6 +806,7 @@ static int pnpacpi_supported_resource(struct acpi_resource *res) | |||
765 | case ACPI_RESOURCE_TYPE_ADDRESS16: | 806 | case ACPI_RESOURCE_TYPE_ADDRESS16: |
766 | case ACPI_RESOURCE_TYPE_ADDRESS32: | 807 | case ACPI_RESOURCE_TYPE_ADDRESS32: |
767 | case ACPI_RESOURCE_TYPE_ADDRESS64: | 808 | case ACPI_RESOURCE_TYPE_ADDRESS64: |
809 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: | ||
768 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | 810 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: |
769 | return 1; | 811 | return 1; |
770 | } | 812 | } |
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index e371a9c15341..a07015d646dd 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c | |||
@@ -398,8 +398,7 @@ static int sbs_init(struct pci_dev *dev) | |||
398 | { | 398 | { |
399 | u8 __iomem *p; | 399 | u8 __iomem *p; |
400 | 400 | ||
401 | p = ioremap_nocache(pci_resource_start(dev, 0), | 401 | p = pci_ioremap_bar(dev, 0); |
402 | pci_resource_len(dev, 0)); | ||
403 | 402 | ||
404 | if (p == NULL) | 403 | if (p == NULL) |
405 | return -ENOMEM; | 404 | return -ENOMEM; |
@@ -423,8 +422,7 @@ static void __devexit sbs_exit(struct pci_dev *dev) | |||
423 | { | 422 | { |
424 | u8 __iomem *p; | 423 | u8 __iomem *p; |
425 | 424 | ||
426 | p = ioremap_nocache(pci_resource_start(dev, 0), | 425 | p = pci_ioremap_bar(dev, 0); |
427 | pci_resource_len(dev, 0)); | ||
428 | /* FIXME: What if resource_len < OCT_REG_CR_OFF */ | 426 | /* FIXME: What if resource_len < OCT_REG_CR_OFF */ |
429 | if (p != NULL) | 427 | if (p != NULL) |
430 | writeb(0, p + OCT_REG_CR_OFF); | 428 | writeb(0, p + OCT_REG_CR_OFF); |
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c index 9f2891c2c4a2..cd1b6a45bb82 100644 --- a/drivers/serial/icom.c +++ b/drivers/serial/icom.c | |||
@@ -1548,8 +1548,7 @@ static int __devinit icom_probe(struct pci_dev *dev, | |||
1548 | goto probe_exit1; | 1548 | goto probe_exit1; |
1549 | } | 1549 | } |
1550 | 1550 | ||
1551 | icom_adapter->base_addr = ioremap(icom_adapter->base_addr_pci, | 1551 | icom_adapter->base_addr = pci_ioremap_bar(dev, 0); |
1552 | pci_resource_len(dev, 0)); | ||
1553 | 1552 | ||
1554 | if (!icom_adapter->base_addr) | 1553 | if (!icom_adapter->base_addr) |
1555 | goto probe_exit1; | 1554 | goto probe_exit1; |
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c index 107ce2e187b8..00f4577d2f7f 100644 --- a/drivers/serial/jsm/jsm_tty.c +++ b/drivers/serial/jsm/jsm_tty.c | |||
@@ -467,7 +467,7 @@ int __devinit jsm_uart_port_init(struct jsm_board *brd) | |||
467 | printk(KERN_INFO "jsm: linemap is full, added device failed\n"); | 467 | printk(KERN_INFO "jsm: linemap is full, added device failed\n"); |
468 | continue; | 468 | continue; |
469 | } else | 469 | } else |
470 | set_bit((int)line, linemap); | 470 | set_bit(line, linemap); |
471 | brd->channels[i]->uart_port.line = line; | 471 | brd->channels[i]->uart_port.line = line; |
472 | if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port)) | 472 | if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port)) |
473 | printk(KERN_INFO "jsm: add device failed\n"); | 473 | printk(KERN_INFO "jsm: add device failed\n"); |
@@ -503,7 +503,7 @@ int jsm_remove_uart_port(struct jsm_board *brd) | |||
503 | 503 | ||
504 | ch = brd->channels[i]; | 504 | ch = brd->channels[i]; |
505 | 505 | ||
506 | clear_bit((int)(ch->uart_port.line), linemap); | 506 | clear_bit(ch->uart_port.line, linemap); |
507 | uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port); | 507 | uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port); |
508 | } | 508 | } |
509 | 509 | ||
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c index 7313c2edcb83..54dd16d66a4b 100644 --- a/drivers/serial/serial_txx9.c +++ b/drivers/serial/serial_txx9.c | |||
@@ -461,6 +461,94 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state) | |||
461 | spin_unlock_irqrestore(&up->port.lock, flags); | 461 | spin_unlock_irqrestore(&up->port.lock, flags); |
462 | } | 462 | } |
463 | 463 | ||
464 | #if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL) | ||
465 | /* | ||
466 | * Wait for transmitter & holding register to empty | ||
467 | */ | ||
468 | static void wait_for_xmitr(struct uart_txx9_port *up) | ||
469 | { | ||
470 | unsigned int tmout = 10000; | ||
471 | |||
472 | /* Wait up to 10ms for the character(s) to be sent. */ | ||
473 | while (--tmout && | ||
474 | !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS)) | ||
475 | udelay(1); | ||
476 | |||
477 | /* Wait up to 1s for flow control if necessary */ | ||
478 | if (up->port.flags & UPF_CONS_FLOW) { | ||
479 | tmout = 1000000; | ||
480 | while (--tmout && | ||
481 | (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS)) | ||
482 | udelay(1); | ||
483 | } | ||
484 | } | ||
485 | #endif | ||
486 | |||
487 | #ifdef CONFIG_CONSOLE_POLL | ||
488 | /* | ||
489 | * Console polling routines for writing and reading from the uart while | ||
490 | * in an interrupt or debug context. | ||
491 | */ | ||
492 | |||
493 | static int serial_txx9_get_poll_char(struct uart_port *port) | ||
494 | { | ||
495 | unsigned int ier; | ||
496 | unsigned char c; | ||
497 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; | ||
498 | |||
499 | /* | ||
500 | * First save the IER then disable the interrupts | ||
501 | */ | ||
502 | ier = sio_in(up, TXX9_SIDICR); | ||
503 | sio_out(up, TXX9_SIDICR, 0); | ||
504 | |||
505 | while (sio_in(up, TXX9_SIDISR) & TXX9_SIDISR_UVALID) | ||
506 | ; | ||
507 | |||
508 | c = sio_in(up, TXX9_SIRFIFO); | ||
509 | |||
510 | /* | ||
511 | * Finally, clear RX interrupt status | ||
512 | * and restore the IER | ||
513 | */ | ||
514 | sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_RDIS); | ||
515 | sio_out(up, TXX9_SIDICR, ier); | ||
516 | return c; | ||
517 | } | ||
518 | |||
519 | |||
520 | static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c) | ||
521 | { | ||
522 | unsigned int ier; | ||
523 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; | ||
524 | |||
525 | /* | ||
526 | * First save the IER then disable the interrupts | ||
527 | */ | ||
528 | ier = sio_in(up, TXX9_SIDICR); | ||
529 | sio_out(up, TXX9_SIDICR, 0); | ||
530 | |||
531 | wait_for_xmitr(up); | ||
532 | /* | ||
533 | * Send the character out. | ||
534 | * If a LF, also do CR... | ||
535 | */ | ||
536 | sio_out(up, TXX9_SITFIFO, c); | ||
537 | if (c == 10) { | ||
538 | wait_for_xmitr(up); | ||
539 | sio_out(up, TXX9_SITFIFO, 13); | ||
540 | } | ||
541 | |||
542 | /* | ||
543 | * Finally, wait for transmitter to become empty | ||
544 | * and restore the IER | ||
545 | */ | ||
546 | wait_for_xmitr(up); | ||
547 | sio_out(up, TXX9_SIDICR, ier); | ||
548 | } | ||
549 | |||
550 | #endif /* CONFIG_CONSOLE_POLL */ | ||
551 | |||
464 | static int serial_txx9_startup(struct uart_port *port) | 552 | static int serial_txx9_startup(struct uart_port *port) |
465 | { | 553 | { |
466 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; | 554 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; |
@@ -781,6 +869,10 @@ static struct uart_ops serial_txx9_pops = { | |||
781 | .release_port = serial_txx9_release_port, | 869 | .release_port = serial_txx9_release_port, |
782 | .request_port = serial_txx9_request_port, | 870 | .request_port = serial_txx9_request_port, |
783 | .config_port = serial_txx9_config_port, | 871 | .config_port = serial_txx9_config_port, |
872 | #ifdef CONFIG_CONSOLE_POLL | ||
873 | .poll_get_char = serial_txx9_get_poll_char, | ||
874 | .poll_put_char = serial_txx9_put_poll_char, | ||
875 | #endif | ||
784 | }; | 876 | }; |
785 | 877 | ||
786 | static struct uart_txx9_port serial_txx9_ports[UART_NR]; | 878 | static struct uart_txx9_port serial_txx9_ports[UART_NR]; |
@@ -803,27 +895,6 @@ static void __init serial_txx9_register_ports(struct uart_driver *drv, | |||
803 | 895 | ||
804 | #ifdef CONFIG_SERIAL_TXX9_CONSOLE | 896 | #ifdef CONFIG_SERIAL_TXX9_CONSOLE |
805 | 897 | ||
806 | /* | ||
807 | * Wait for transmitter & holding register to empty | ||
808 | */ | ||
809 | static inline void wait_for_xmitr(struct uart_txx9_port *up) | ||
810 | { | ||
811 | unsigned int tmout = 10000; | ||
812 | |||
813 | /* Wait up to 10ms for the character(s) to be sent. */ | ||
814 | while (--tmout && | ||
815 | !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS)) | ||
816 | udelay(1); | ||
817 | |||
818 | /* Wait up to 1s for flow control if necessary */ | ||
819 | if (up->port.flags & UPF_CONS_FLOW) { | ||
820 | tmout = 1000000; | ||
821 | while (--tmout && | ||
822 | (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS)) | ||
823 | udelay(1); | ||
824 | } | ||
825 | } | ||
826 | |||
827 | static void serial_txx9_console_putchar(struct uart_port *port, int ch) | 898 | static void serial_txx9_console_putchar(struct uart_port *port, int ch) |
828 | { | 899 | { |
829 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; | 900 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; |
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 559f8784acf3..9052bcb4f528 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c | |||
@@ -501,7 +501,7 @@ int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header, | |||
501 | int result = -ENOANO; | 501 | int result = -ENOANO; |
502 | struct uwb_rceb *rceb = *header; | 502 | struct uwb_rceb *rceb = *header; |
503 | int event = le16_to_cpu(rceb->wEvent); | 503 | int event = le16_to_cpu(rceb->wEvent); |
504 | size_t event_size; | 504 | ssize_t event_size; |
505 | size_t core_size, offset; | 505 | size_t core_size, offset; |
506 | 506 | ||
507 | if (rceb->bEventType != UWB_RC_CET_GENERAL) | 507 | if (rceb->bEventType != UWB_RC_CET_GENERAL) |
diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c index cd2035768b47..86a853b84119 100644 --- a/drivers/uwb/wlp/txrx.c +++ b/drivers/uwb/wlp/txrx.c | |||
@@ -326,7 +326,7 @@ int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp, | |||
326 | int result = -EINVAL; | 326 | int result = -EINVAL; |
327 | struct ethhdr *eth_hdr = (void *) skb->data; | 327 | struct ethhdr *eth_hdr = (void *) skb->data; |
328 | 328 | ||
329 | if (is_broadcast_ether_addr(eth_hdr->h_dest)) { | 329 | if (is_multicast_ether_addr(eth_hdr->h_dest)) { |
330 | result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); | 330 | result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); |
331 | if (result < 0) { | 331 | if (result < 0) { |
332 | if (printk_ratelimit()) | 332 | if (printk_ratelimit()) |