diff options
Diffstat (limited to 'drivers')
268 files changed, 16200 insertions, 3723 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 88e42abf5d88..0df8fcb687d6 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
@@ -61,6 +61,7 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file); | |||
61 | static int acpi_ac_add(struct acpi_device *device); | 61 | static int acpi_ac_add(struct acpi_device *device); |
62 | static int acpi_ac_remove(struct acpi_device *device, int type); | 62 | static int acpi_ac_remove(struct acpi_device *device, int type); |
63 | static int acpi_ac_resume(struct acpi_device *device); | 63 | static int acpi_ac_resume(struct acpi_device *device); |
64 | static void acpi_ac_notify(struct acpi_device *device, u32 event); | ||
64 | 65 | ||
65 | static const struct acpi_device_id ac_device_ids[] = { | 66 | static const struct acpi_device_id ac_device_ids[] = { |
66 | {"ACPI0003", 0}, | 67 | {"ACPI0003", 0}, |
@@ -72,10 +73,12 @@ static struct acpi_driver acpi_ac_driver = { | |||
72 | .name = "ac", | 73 | .name = "ac", |
73 | .class = ACPI_AC_CLASS, | 74 | .class = ACPI_AC_CLASS, |
74 | .ids = ac_device_ids, | 75 | .ids = ac_device_ids, |
76 | .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, | ||
75 | .ops = { | 77 | .ops = { |
76 | .add = acpi_ac_add, | 78 | .add = acpi_ac_add, |
77 | .remove = acpi_ac_remove, | 79 | .remove = acpi_ac_remove, |
78 | .resume = acpi_ac_resume, | 80 | .resume = acpi_ac_resume, |
81 | .notify = acpi_ac_notify, | ||
79 | }, | 82 | }, |
80 | }; | 83 | }; |
81 | 84 | ||
@@ -220,16 +223,14 @@ static int acpi_ac_remove_fs(struct acpi_device *device) | |||
220 | Driver Model | 223 | Driver Model |
221 | -------------------------------------------------------------------------- */ | 224 | -------------------------------------------------------------------------- */ |
222 | 225 | ||
223 | static void acpi_ac_notify(acpi_handle handle, u32 event, void *data) | 226 | static void acpi_ac_notify(struct acpi_device *device, u32 event) |
224 | { | 227 | { |
225 | struct acpi_ac *ac = data; | 228 | struct acpi_ac *ac = acpi_driver_data(device); |
226 | struct acpi_device *device = NULL; | ||
227 | 229 | ||
228 | 230 | ||
229 | if (!ac) | 231 | if (!ac) |
230 | return; | 232 | return; |
231 | 233 | ||
232 | device = ac->device; | ||
233 | switch (event) { | 234 | switch (event) { |
234 | default: | 235 | default: |
235 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 236 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
@@ -253,7 +254,6 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data) | |||
253 | static int acpi_ac_add(struct acpi_device *device) | 254 | static int acpi_ac_add(struct acpi_device *device) |
254 | { | 255 | { |
255 | int result = 0; | 256 | int result = 0; |
256 | acpi_status status = AE_OK; | ||
257 | struct acpi_ac *ac = NULL; | 257 | struct acpi_ac *ac = NULL; |
258 | 258 | ||
259 | 259 | ||
@@ -286,13 +286,6 @@ static int acpi_ac_add(struct acpi_device *device) | |||
286 | ac->charger.get_property = get_ac_property; | 286 | ac->charger.get_property = get_ac_property; |
287 | power_supply_register(&ac->device->dev, &ac->charger); | 287 | power_supply_register(&ac->device->dev, &ac->charger); |
288 | #endif | 288 | #endif |
289 | status = acpi_install_notify_handler(device->handle, | ||
290 | ACPI_ALL_NOTIFY, acpi_ac_notify, | ||
291 | ac); | ||
292 | if (ACPI_FAILURE(status)) { | ||
293 | result = -ENODEV; | ||
294 | goto end; | ||
295 | } | ||
296 | 289 | ||
297 | printk(KERN_INFO PREFIX "%s [%s] (%s)\n", | 290 | printk(KERN_INFO PREFIX "%s [%s] (%s)\n", |
298 | acpi_device_name(device), acpi_device_bid(device), | 291 | acpi_device_name(device), acpi_device_bid(device), |
@@ -328,7 +321,6 @@ static int acpi_ac_resume(struct acpi_device *device) | |||
328 | 321 | ||
329 | static int acpi_ac_remove(struct acpi_device *device, int type) | 322 | static int acpi_ac_remove(struct acpi_device *device, int type) |
330 | { | 323 | { |
331 | acpi_status status = AE_OK; | ||
332 | struct acpi_ac *ac = NULL; | 324 | struct acpi_ac *ac = NULL; |
333 | 325 | ||
334 | 326 | ||
@@ -337,8 +329,6 @@ static int acpi_ac_remove(struct acpi_device *device, int type) | |||
337 | 329 | ||
338 | ac = acpi_driver_data(device); | 330 | ac = acpi_driver_data(device); |
339 | 331 | ||
340 | status = acpi_remove_notify_handler(device->handle, | ||
341 | ACPI_ALL_NOTIFY, acpi_ac_notify); | ||
342 | #ifdef CONFIG_ACPI_SYSFS_POWER | 332 | #ifdef CONFIG_ACPI_SYSFS_POWER |
343 | if (ac->charger.dev) | 333 | if (ac->charger.dev) |
344 | power_supply_unregister(&ac->charger); | 334 | power_supply_unregister(&ac->charger); |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index b0de6312919a..58b4517ce712 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -796,13 +796,12 @@ static void acpi_battery_remove_fs(struct acpi_device *device) | |||
796 | Driver Interface | 796 | Driver Interface |
797 | -------------------------------------------------------------------------- */ | 797 | -------------------------------------------------------------------------- */ |
798 | 798 | ||
799 | static void acpi_battery_notify(acpi_handle handle, u32 event, void *data) | 799 | static void acpi_battery_notify(struct acpi_device *device, u32 event) |
800 | { | 800 | { |
801 | struct acpi_battery *battery = data; | 801 | struct acpi_battery *battery = acpi_driver_data(device); |
802 | struct acpi_device *device; | 802 | |
803 | if (!battery) | 803 | if (!battery) |
804 | return; | 804 | return; |
805 | device = battery->device; | ||
806 | acpi_battery_update(battery); | 805 | acpi_battery_update(battery); |
807 | acpi_bus_generate_proc_event(device, event, | 806 | acpi_bus_generate_proc_event(device, event, |
808 | acpi_battery_present(battery)); | 807 | acpi_battery_present(battery)); |
@@ -819,7 +818,6 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data) | |||
819 | static int acpi_battery_add(struct acpi_device *device) | 818 | static int acpi_battery_add(struct acpi_device *device) |
820 | { | 819 | { |
821 | int result = 0; | 820 | int result = 0; |
822 | acpi_status status = 0; | ||
823 | struct acpi_battery *battery = NULL; | 821 | struct acpi_battery *battery = NULL; |
824 | if (!device) | 822 | if (!device) |
825 | return -EINVAL; | 823 | return -EINVAL; |
@@ -834,22 +832,12 @@ static int acpi_battery_add(struct acpi_device *device) | |||
834 | acpi_battery_update(battery); | 832 | acpi_battery_update(battery); |
835 | #ifdef CONFIG_ACPI_PROCFS_POWER | 833 | #ifdef CONFIG_ACPI_PROCFS_POWER |
836 | result = acpi_battery_add_fs(device); | 834 | result = acpi_battery_add_fs(device); |
837 | if (result) | ||
838 | goto end; | ||
839 | #endif | 835 | #endif |
840 | status = acpi_install_notify_handler(device->handle, | 836 | if (!result) { |
841 | ACPI_ALL_NOTIFY, | 837 | printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", |
842 | acpi_battery_notify, battery); | 838 | ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), |
843 | if (ACPI_FAILURE(status)) { | 839 | device->status.battery_present ? "present" : "absent"); |
844 | ACPI_EXCEPTION((AE_INFO, status, "Installing notify handler")); | 840 | } else { |
845 | result = -ENODEV; | ||
846 | goto end; | ||
847 | } | ||
848 | printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", | ||
849 | ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), | ||
850 | device->status.battery_present ? "present" : "absent"); | ||
851 | end: | ||
852 | if (result) { | ||
853 | #ifdef CONFIG_ACPI_PROCFS_POWER | 841 | #ifdef CONFIG_ACPI_PROCFS_POWER |
854 | acpi_battery_remove_fs(device); | 842 | acpi_battery_remove_fs(device); |
855 | #endif | 843 | #endif |
@@ -860,15 +848,11 @@ static int acpi_battery_add(struct acpi_device *device) | |||
860 | 848 | ||
861 | static int acpi_battery_remove(struct acpi_device *device, int type) | 849 | static int acpi_battery_remove(struct acpi_device *device, int type) |
862 | { | 850 | { |
863 | acpi_status status = 0; | ||
864 | struct acpi_battery *battery = NULL; | 851 | struct acpi_battery *battery = NULL; |
865 | 852 | ||
866 | if (!device || !acpi_driver_data(device)) | 853 | if (!device || !acpi_driver_data(device)) |
867 | return -EINVAL; | 854 | return -EINVAL; |
868 | battery = acpi_driver_data(device); | 855 | battery = acpi_driver_data(device); |
869 | status = acpi_remove_notify_handler(device->handle, | ||
870 | ACPI_ALL_NOTIFY, | ||
871 | acpi_battery_notify); | ||
872 | #ifdef CONFIG_ACPI_PROCFS_POWER | 856 | #ifdef CONFIG_ACPI_PROCFS_POWER |
873 | acpi_battery_remove_fs(device); | 857 | acpi_battery_remove_fs(device); |
874 | #endif | 858 | #endif |
@@ -896,10 +880,12 @@ static struct acpi_driver acpi_battery_driver = { | |||
896 | .name = "battery", | 880 | .name = "battery", |
897 | .class = ACPI_BATTERY_CLASS, | 881 | .class = ACPI_BATTERY_CLASS, |
898 | .ids = battery_device_ids, | 882 | .ids = battery_device_ids, |
883 | .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, | ||
899 | .ops = { | 884 | .ops = { |
900 | .add = acpi_battery_add, | 885 | .add = acpi_battery_add, |
901 | .resume = acpi_battery_resume, | 886 | .resume = acpi_battery_resume, |
902 | .remove = acpi_battery_remove, | 887 | .remove = acpi_battery_remove, |
888 | .notify = acpi_battery_notify, | ||
903 | }, | 889 | }, |
904 | }; | 890 | }; |
905 | 891 | ||
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 09c69806c1fc..f6baa77deefb 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c | |||
@@ -192,6 +192,22 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
192 | DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"), | 192 | DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"), |
193 | }, | 193 | }, |
194 | }, | 194 | }, |
195 | { | ||
196 | .callback = dmi_disable_osi_vista, | ||
197 | .ident = "Sony VGN-NS10J_S", | ||
198 | .matches = { | ||
199 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
200 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS10J_S"), | ||
201 | }, | ||
202 | }, | ||
203 | { | ||
204 | .callback = dmi_disable_osi_vista, | ||
205 | .ident = "Sony VGN-SR290J", | ||
206 | .matches = { | ||
207 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
208 | DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"), | ||
209 | }, | ||
210 | }, | ||
195 | 211 | ||
196 | /* | 212 | /* |
197 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. | 213 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index ae862f1798dc..2876fc70c3a9 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -450,18 +450,16 @@ int acpi_bus_receive_event(struct acpi_bus_event *event) | |||
450 | Notification Handling | 450 | Notification Handling |
451 | -------------------------------------------------------------------------- */ | 451 | -------------------------------------------------------------------------- */ |
452 | 452 | ||
453 | static int | 453 | static void acpi_bus_check_device(acpi_handle handle) |
454 | acpi_bus_check_device(struct acpi_device *device, int *status_changed) | ||
455 | { | 454 | { |
456 | acpi_status status = 0; | 455 | struct acpi_device *device; |
456 | acpi_status status; | ||
457 | struct acpi_device_status old_status; | 457 | struct acpi_device_status old_status; |
458 | 458 | ||
459 | 459 | if (acpi_bus_get_device(handle, &device)) | |
460 | return; | ||
460 | if (!device) | 461 | if (!device) |
461 | return -EINVAL; | 462 | return; |
462 | |||
463 | if (status_changed) | ||
464 | *status_changed = 0; | ||
465 | 463 | ||
466 | old_status = device->status; | 464 | old_status = device->status; |
467 | 465 | ||
@@ -471,22 +469,15 @@ acpi_bus_check_device(struct acpi_device *device, int *status_changed) | |||
471 | */ | 469 | */ |
472 | if (device->parent && !device->parent->status.present) { | 470 | if (device->parent && !device->parent->status.present) { |
473 | device->status = device->parent->status; | 471 | device->status = device->parent->status; |
474 | if (STRUCT_TO_INT(old_status) != STRUCT_TO_INT(device->status)) { | 472 | return; |
475 | if (status_changed) | ||
476 | *status_changed = 1; | ||
477 | } | ||
478 | return 0; | ||
479 | } | 473 | } |
480 | 474 | ||
481 | status = acpi_bus_get_status(device); | 475 | status = acpi_bus_get_status(device); |
482 | if (ACPI_FAILURE(status)) | 476 | if (ACPI_FAILURE(status)) |
483 | return -ENODEV; | 477 | return; |
484 | 478 | ||
485 | if (STRUCT_TO_INT(old_status) == STRUCT_TO_INT(device->status)) | 479 | if (STRUCT_TO_INT(old_status) == STRUCT_TO_INT(device->status)) |
486 | return 0; | 480 | return; |
487 | |||
488 | if (status_changed) | ||
489 | *status_changed = 1; | ||
490 | 481 | ||
491 | /* | 482 | /* |
492 | * Device Insertion/Removal | 483 | * Device Insertion/Removal |
@@ -498,33 +489,17 @@ acpi_bus_check_device(struct acpi_device *device, int *status_changed) | |||
498 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device removal detected\n")); | 489 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device removal detected\n")); |
499 | /* TBD: Handle device removal */ | 490 | /* TBD: Handle device removal */ |
500 | } | 491 | } |
501 | |||
502 | return 0; | ||
503 | } | 492 | } |
504 | 493 | ||
505 | static int acpi_bus_check_scope(struct acpi_device *device) | 494 | static void acpi_bus_check_scope(acpi_handle handle) |
506 | { | 495 | { |
507 | int result = 0; | ||
508 | int status_changed = 0; | ||
509 | |||
510 | |||
511 | if (!device) | ||
512 | return -EINVAL; | ||
513 | |||
514 | /* Status Change? */ | 496 | /* Status Change? */ |
515 | result = acpi_bus_check_device(device, &status_changed); | 497 | acpi_bus_check_device(handle); |
516 | if (result) | ||
517 | return result; | ||
518 | |||
519 | if (!status_changed) | ||
520 | return 0; | ||
521 | 498 | ||
522 | /* | 499 | /* |
523 | * TBD: Enumerate child devices within this device's scope and | 500 | * TBD: Enumerate child devices within this device's scope and |
524 | * run acpi_bus_check_device()'s on them. | 501 | * run acpi_bus_check_device()'s on them. |
525 | */ | 502 | */ |
526 | |||
527 | return 0; | ||
528 | } | 503 | } |
529 | 504 | ||
530 | static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list); | 505 | static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list); |
@@ -547,22 +522,19 @@ EXPORT_SYMBOL_GPL(unregister_acpi_bus_notifier); | |||
547 | */ | 522 | */ |
548 | static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) | 523 | static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) |
549 | { | 524 | { |
550 | int result = 0; | ||
551 | struct acpi_device *device = NULL; | 525 | struct acpi_device *device = NULL; |
526 | struct acpi_driver *driver; | ||
527 | |||
528 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n", | ||
529 | type, handle)); | ||
552 | 530 | ||
553 | blocking_notifier_call_chain(&acpi_bus_notify_list, | 531 | blocking_notifier_call_chain(&acpi_bus_notify_list, |
554 | type, (void *)handle); | 532 | type, (void *)handle); |
555 | 533 | ||
556 | if (acpi_bus_get_device(handle, &device)) | ||
557 | return; | ||
558 | |||
559 | switch (type) { | 534 | switch (type) { |
560 | 535 | ||
561 | case ACPI_NOTIFY_BUS_CHECK: | 536 | case ACPI_NOTIFY_BUS_CHECK: |
562 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 537 | acpi_bus_check_scope(handle); |
563 | "Received BUS CHECK notification for device [%s]\n", | ||
564 | device->pnp.bus_id)); | ||
565 | result = acpi_bus_check_scope(device); | ||
566 | /* | 538 | /* |
567 | * TBD: We'll need to outsource certain events to non-ACPI | 539 | * TBD: We'll need to outsource certain events to non-ACPI |
568 | * drivers via the device manager (device.c). | 540 | * drivers via the device manager (device.c). |
@@ -570,10 +542,7 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) | |||
570 | break; | 542 | break; |
571 | 543 | ||
572 | case ACPI_NOTIFY_DEVICE_CHECK: | 544 | case ACPI_NOTIFY_DEVICE_CHECK: |
573 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 545 | acpi_bus_check_device(handle); |
574 | "Received DEVICE CHECK notification for device [%s]\n", | ||
575 | device->pnp.bus_id)); | ||
576 | result = acpi_bus_check_device(device, NULL); | ||
577 | /* | 546 | /* |
578 | * TBD: We'll need to outsource certain events to non-ACPI | 547 | * TBD: We'll need to outsource certain events to non-ACPI |
579 | * drivers via the device manager (device.c). | 548 | * drivers via the device manager (device.c). |
@@ -581,44 +550,26 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) | |||
581 | break; | 550 | break; |
582 | 551 | ||
583 | case ACPI_NOTIFY_DEVICE_WAKE: | 552 | case ACPI_NOTIFY_DEVICE_WAKE: |
584 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
585 | "Received DEVICE WAKE notification for device [%s]\n", | ||
586 | device->pnp.bus_id)); | ||
587 | /* TBD */ | 553 | /* TBD */ |
588 | break; | 554 | break; |
589 | 555 | ||
590 | case ACPI_NOTIFY_EJECT_REQUEST: | 556 | case ACPI_NOTIFY_EJECT_REQUEST: |
591 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
592 | "Received EJECT REQUEST notification for device [%s]\n", | ||
593 | device->pnp.bus_id)); | ||
594 | /* TBD */ | 557 | /* TBD */ |
595 | break; | 558 | break; |
596 | 559 | ||
597 | case ACPI_NOTIFY_DEVICE_CHECK_LIGHT: | 560 | case ACPI_NOTIFY_DEVICE_CHECK_LIGHT: |
598 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
599 | "Received DEVICE CHECK LIGHT notification for device [%s]\n", | ||
600 | device->pnp.bus_id)); | ||
601 | /* TBD: Exactly what does 'light' mean? */ | 561 | /* TBD: Exactly what does 'light' mean? */ |
602 | break; | 562 | break; |
603 | 563 | ||
604 | case ACPI_NOTIFY_FREQUENCY_MISMATCH: | 564 | case ACPI_NOTIFY_FREQUENCY_MISMATCH: |
605 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
606 | "Received FREQUENCY MISMATCH notification for device [%s]\n", | ||
607 | device->pnp.bus_id)); | ||
608 | /* TBD */ | 565 | /* TBD */ |
609 | break; | 566 | break; |
610 | 567 | ||
611 | case ACPI_NOTIFY_BUS_MODE_MISMATCH: | 568 | case ACPI_NOTIFY_BUS_MODE_MISMATCH: |
612 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
613 | "Received BUS MODE MISMATCH notification for device [%s]\n", | ||
614 | device->pnp.bus_id)); | ||
615 | /* TBD */ | 569 | /* TBD */ |
616 | break; | 570 | break; |
617 | 571 | ||
618 | case ACPI_NOTIFY_POWER_FAULT: | 572 | case ACPI_NOTIFY_POWER_FAULT: |
619 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
620 | "Received POWER FAULT notification for device [%s]\n", | ||
621 | device->pnp.bus_id)); | ||
622 | /* TBD */ | 573 | /* TBD */ |
623 | break; | 574 | break; |
624 | 575 | ||
@@ -629,7 +580,13 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) | |||
629 | break; | 580 | break; |
630 | } | 581 | } |
631 | 582 | ||
632 | return; | 583 | acpi_bus_get_device(handle, &device); |
584 | if (device) { | ||
585 | driver = device->driver; | ||
586 | if (driver && driver->ops.notify && | ||
587 | (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS)) | ||
588 | driver->ops.notify(device, type); | ||
589 | } | ||
633 | } | 590 | } |
634 | 591 | ||
635 | /* -------------------------------------------------------------------------- | 592 | /* -------------------------------------------------------------------------- |
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 8bd2c2a6884d..a8a5c29958c8 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
@@ -140,46 +140,6 @@ struct device *acpi_get_physical_device(acpi_handle handle) | |||
140 | 140 | ||
141 | EXPORT_SYMBOL(acpi_get_physical_device); | 141 | EXPORT_SYMBOL(acpi_get_physical_device); |
142 | 142 | ||
143 | /* ToDo: When a PCI bridge is found, return the PCI device behind the bridge | ||
144 | * This should work in general, but did not on a Lenovo T61 for the | ||
145 | * graphics card. But this must be fixed when the PCI device is | ||
146 | * bound and the kernel device struct is attached to the acpi device | ||
147 | * Note: A success call will increase reference count by one | ||
148 | * Do call put_device(dev) on the returned device then | ||
149 | */ | ||
150 | struct device *acpi_get_physical_pci_device(acpi_handle handle) | ||
151 | { | ||
152 | struct device *dev; | ||
153 | long long device_id; | ||
154 | acpi_status status; | ||
155 | |||
156 | status = | ||
157 | acpi_evaluate_integer(handle, "_ADR", NULL, &device_id); | ||
158 | |||
159 | if (ACPI_FAILURE(status)) | ||
160 | return NULL; | ||
161 | |||
162 | /* We need to attempt to determine whether the _ADR refers to a | ||
163 | PCI device or not. There's no terribly good way to do this, | ||
164 | so the best we can hope for is to assume that there'll never | ||
165 | be a device in the host bridge */ | ||
166 | if (device_id >= 0x10000) { | ||
167 | /* It looks like a PCI device. Does it exist? */ | ||
168 | dev = acpi_get_physical_device(handle); | ||
169 | } else { | ||
170 | /* It doesn't look like a PCI device. Does its parent | ||
171 | exist? */ | ||
172 | acpi_handle phandle; | ||
173 | if (acpi_get_parent(handle, &phandle)) | ||
174 | return NULL; | ||
175 | dev = acpi_get_physical_device(phandle); | ||
176 | } | ||
177 | if (!dev) | ||
178 | return NULL; | ||
179 | return dev; | ||
180 | } | ||
181 | EXPORT_SYMBOL(acpi_get_physical_pci_device); | ||
182 | |||
183 | static int acpi_bind_one(struct device *dev, acpi_handle handle) | 143 | static int acpi_bind_one(struct device *dev, acpi_handle handle) |
184 | { | 144 | { |
185 | struct acpi_device *acpi_dev; | 145 | struct acpi_device *acpi_dev; |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index d916bea729f1..71670719d61a 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -79,6 +79,7 @@ static acpi_osd_handler acpi_irq_handler; | |||
79 | static void *acpi_irq_context; | 79 | static void *acpi_irq_context; |
80 | static struct workqueue_struct *kacpid_wq; | 80 | static struct workqueue_struct *kacpid_wq; |
81 | static struct workqueue_struct *kacpi_notify_wq; | 81 | static struct workqueue_struct *kacpi_notify_wq; |
82 | static struct workqueue_struct *kacpi_hotplug_wq; | ||
82 | 83 | ||
83 | struct acpi_res_list { | 84 | struct acpi_res_list { |
84 | resource_size_t start; | 85 | resource_size_t start; |
@@ -192,8 +193,10 @@ acpi_status acpi_os_initialize1(void) | |||
192 | { | 193 | { |
193 | kacpid_wq = create_singlethread_workqueue("kacpid"); | 194 | kacpid_wq = create_singlethread_workqueue("kacpid"); |
194 | kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); | 195 | kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); |
196 | kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug"); | ||
195 | BUG_ON(!kacpid_wq); | 197 | BUG_ON(!kacpid_wq); |
196 | BUG_ON(!kacpi_notify_wq); | 198 | BUG_ON(!kacpi_notify_wq); |
199 | BUG_ON(!kacpi_hotplug_wq); | ||
197 | return AE_OK; | 200 | return AE_OK; |
198 | } | 201 | } |
199 | 202 | ||
@@ -206,6 +209,7 @@ acpi_status acpi_os_terminate(void) | |||
206 | 209 | ||
207 | destroy_workqueue(kacpid_wq); | 210 | destroy_workqueue(kacpid_wq); |
208 | destroy_workqueue(kacpi_notify_wq); | 211 | destroy_workqueue(kacpi_notify_wq); |
212 | destroy_workqueue(kacpi_hotplug_wq); | ||
209 | 213 | ||
210 | return AE_OK; | 214 | return AE_OK; |
211 | } | 215 | } |
@@ -716,6 +720,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
716 | acpi_status status = AE_OK; | 720 | acpi_status status = AE_OK; |
717 | struct acpi_os_dpc *dpc; | 721 | struct acpi_os_dpc *dpc; |
718 | struct workqueue_struct *queue; | 722 | struct workqueue_struct *queue; |
723 | work_func_t func; | ||
719 | int ret; | 724 | int ret; |
720 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | 725 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, |
721 | "Scheduling function [%p(%p)] for deferred execution.\n", | 726 | "Scheduling function [%p(%p)] for deferred execution.\n", |
@@ -740,15 +745,17 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
740 | dpc->function = function; | 745 | dpc->function = function; |
741 | dpc->context = context; | 746 | dpc->context = context; |
742 | 747 | ||
743 | if (!hp) { | 748 | /* |
744 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | 749 | * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq |
745 | queue = (type == OSL_NOTIFY_HANDLER) ? | 750 | * because the hotplug code may call driver .remove() functions, |
746 | kacpi_notify_wq : kacpid_wq; | 751 | * which invoke flush_scheduled_work/acpi_os_wait_events_complete |
747 | ret = queue_work(queue, &dpc->work); | 752 | * to flush these workqueues. |
748 | } else { | 753 | */ |
749 | INIT_WORK(&dpc->work, acpi_os_execute_hp_deferred); | 754 | queue = hp ? kacpi_hotplug_wq : |
750 | ret = schedule_work(&dpc->work); | 755 | (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq); |
751 | } | 756 | func = hp ? acpi_os_execute_hp_deferred : acpi_os_execute_deferred; |
757 | INIT_WORK(&dpc->work, func); | ||
758 | ret = queue_work(queue, &dpc->work); | ||
752 | 759 | ||
753 | if (!ret) { | 760 | if (!ret) { |
754 | printk(KERN_ERR PREFIX | 761 | printk(KERN_ERR PREFIX |
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c index bc46de3d967f..a5a77b78a723 100644 --- a/drivers/acpi/pci_bind.c +++ b/drivers/acpi/pci_bind.c | |||
@@ -24,12 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/types.h> | 27 | #include <linux/types.h> |
30 | #include <linux/proc_fs.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/pm.h> | ||
33 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
34 | #include <linux/acpi.h> | 29 | #include <linux/acpi.h> |
35 | #include <acpi/acpi_bus.h> | 30 | #include <acpi/acpi_bus.h> |
@@ -38,310 +33,76 @@ | |||
38 | #define _COMPONENT ACPI_PCI_COMPONENT | 33 | #define _COMPONENT ACPI_PCI_COMPONENT |
39 | ACPI_MODULE_NAME("pci_bind"); | 34 | ACPI_MODULE_NAME("pci_bind"); |
40 | 35 | ||
41 | struct acpi_pci_data { | 36 | static int acpi_pci_unbind(struct acpi_device *device) |
42 | struct acpi_pci_id id; | ||
43 | struct pci_bus *bus; | ||
44 | struct pci_dev *dev; | ||
45 | }; | ||
46 | |||
47 | static int acpi_pci_unbind(struct acpi_device *device); | ||
48 | |||
49 | static void acpi_pci_data_handler(acpi_handle handle, u32 function, | ||
50 | void *context) | ||
51 | { | ||
52 | |||
53 | /* TBD: Anything we need to do here? */ | ||
54 | |||
55 | return; | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * acpi_get_pci_id | ||
60 | * ------------------ | ||
61 | * This function is used by the ACPI Interpreter (a.k.a. Core Subsystem) | ||
62 | * to resolve PCI information for ACPI-PCI devices defined in the namespace. | ||
63 | * This typically occurs when resolving PCI operation region information. | ||
64 | */ | ||
65 | acpi_status acpi_get_pci_id(acpi_handle handle, struct acpi_pci_id *id) | ||
66 | { | 37 | { |
67 | int result = 0; | 38 | struct pci_dev *dev; |
68 | acpi_status status = AE_OK; | ||
69 | struct acpi_device *device = NULL; | ||
70 | struct acpi_pci_data *data = NULL; | ||
71 | |||
72 | |||
73 | if (!id) | ||
74 | return AE_BAD_PARAMETER; | ||
75 | |||
76 | result = acpi_bus_get_device(handle, &device); | ||
77 | if (result) { | ||
78 | printk(KERN_ERR PREFIX | ||
79 | "Invalid ACPI Bus context for device %s\n", | ||
80 | acpi_device_bid(device)); | ||
81 | return AE_NOT_EXIST; | ||
82 | } | ||
83 | |||
84 | status = acpi_get_data(handle, acpi_pci_data_handler, (void **)&data); | ||
85 | if (ACPI_FAILURE(status) || !data) { | ||
86 | ACPI_EXCEPTION((AE_INFO, status, | ||
87 | "Invalid ACPI-PCI context for device %s", | ||
88 | acpi_device_bid(device))); | ||
89 | return status; | ||
90 | } | ||
91 | 39 | ||
92 | *id = data->id; | 40 | dev = acpi_get_pci_dev(device->handle); |
41 | if (!dev || !dev->subordinate) | ||
42 | goto out; | ||
93 | 43 | ||
94 | /* | 44 | acpi_pci_irq_del_prt(dev->subordinate); |
95 | id->segment = data->id.segment; | ||
96 | id->bus = data->id.bus; | ||
97 | id->device = data->id.device; | ||
98 | id->function = data->id.function; | ||
99 | */ | ||
100 | 45 | ||
101 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 46 | device->ops.bind = NULL; |
102 | "Device %s has PCI address %04x:%02x:%02x.%d\n", | 47 | device->ops.unbind = NULL; |
103 | acpi_device_bid(device), id->segment, id->bus, | ||
104 | id->device, id->function)); | ||
105 | 48 | ||
106 | return AE_OK; | 49 | out: |
50 | pci_dev_put(dev); | ||
51 | return 0; | ||
107 | } | 52 | } |
108 | 53 | ||
109 | EXPORT_SYMBOL(acpi_get_pci_id); | 54 | static int acpi_pci_bind(struct acpi_device *device) |
110 | |||
111 | int acpi_pci_bind(struct acpi_device *device) | ||
112 | { | 55 | { |
113 | int result = 0; | ||
114 | acpi_status status; | 56 | acpi_status status; |
115 | struct acpi_pci_data *data; | ||
116 | struct acpi_pci_data *pdata; | ||
117 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
118 | acpi_handle handle; | 57 | acpi_handle handle; |
58 | struct pci_bus *bus; | ||
59 | struct pci_dev *dev; | ||
119 | 60 | ||
120 | if (!device || !device->parent) | 61 | dev = acpi_get_pci_dev(device->handle); |
121 | return -EINVAL; | 62 | if (!dev) |
122 | 63 | return 0; | |
123 | data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL); | ||
124 | if (!data) | ||
125 | return -ENOMEM; | ||
126 | |||
127 | status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); | ||
128 | if (ACPI_FAILURE(status)) { | ||
129 | kfree(data); | ||
130 | return -ENODEV; | ||
131 | } | ||
132 | |||
133 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI device [%s]...\n", | ||
134 | (char *)buffer.pointer)); | ||
135 | |||
136 | /* | ||
137 | * Segment & Bus | ||
138 | * ------------- | ||
139 | * These are obtained via the parent device's ACPI-PCI context. | ||
140 | */ | ||
141 | status = acpi_get_data(device->parent->handle, acpi_pci_data_handler, | ||
142 | (void **)&pdata); | ||
143 | if (ACPI_FAILURE(status) || !pdata || !pdata->bus) { | ||
144 | ACPI_EXCEPTION((AE_INFO, status, | ||
145 | "Invalid ACPI-PCI context for parent device %s", | ||
146 | acpi_device_bid(device->parent))); | ||
147 | result = -ENODEV; | ||
148 | goto end; | ||
149 | } | ||
150 | data->id.segment = pdata->id.segment; | ||
151 | data->id.bus = pdata->bus->number; | ||
152 | |||
153 | /* | ||
154 | * Device & Function | ||
155 | * ----------------- | ||
156 | * These are simply obtained from the device's _ADR method. Note | ||
157 | * that a value of zero is valid. | ||
158 | */ | ||
159 | data->id.device = device->pnp.bus_address >> 16; | ||
160 | data->id.function = device->pnp.bus_address & 0xFFFF; | ||
161 | |||
162 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %04x:%02x:%02x.%d\n", | ||
163 | data->id.segment, data->id.bus, data->id.device, | ||
164 | data->id.function)); | ||
165 | |||
166 | /* | ||
167 | * TBD: Support slot devices (e.g. function=0xFFFF). | ||
168 | */ | ||
169 | |||
170 | /* | ||
171 | * Locate PCI Device | ||
172 | * ----------------- | ||
173 | * Locate matching device in PCI namespace. If it doesn't exist | ||
174 | * this typically means that the device isn't currently inserted | ||
175 | * (e.g. docking station, port replicator, etc.). | ||
176 | */ | ||
177 | data->dev = pci_get_slot(pdata->bus, | ||
178 | PCI_DEVFN(data->id.device, data->id.function)); | ||
179 | if (!data->dev) { | ||
180 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
181 | "Device %04x:%02x:%02x.%d not present in PCI namespace\n", | ||
182 | data->id.segment, data->id.bus, | ||
183 | data->id.device, data->id.function)); | ||
184 | result = -ENODEV; | ||
185 | goto end; | ||
186 | } | ||
187 | if (!data->dev->bus) { | ||
188 | printk(KERN_ERR PREFIX | ||
189 | "Device %04x:%02x:%02x.%d has invalid 'bus' field\n", | ||
190 | data->id.segment, data->id.bus, | ||
191 | data->id.device, data->id.function); | ||
192 | result = -ENODEV; | ||
193 | goto end; | ||
194 | } | ||
195 | 64 | ||
196 | /* | 65 | /* |
197 | * PCI Bridge? | 66 | * Install the 'bind' function to facilitate callbacks for |
198 | * ----------- | 67 | * children of the P2P bridge. |
199 | * If so, set the 'bus' field and install the 'bind' function to | ||
200 | * facilitate callbacks for all of its children. | ||
201 | */ | 68 | */ |
202 | if (data->dev->subordinate) { | 69 | if (dev->subordinate) { |
203 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 70 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
204 | "Device %04x:%02x:%02x.%d is a PCI bridge\n", | 71 | "Device %04x:%02x:%02x.%d is a PCI bridge\n", |
205 | data->id.segment, data->id.bus, | 72 | pci_domain_nr(dev->bus), dev->bus->number, |
206 | data->id.device, data->id.function)); | 73 | PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn))); |
207 | data->bus = data->dev->subordinate; | ||
208 | device->ops.bind = acpi_pci_bind; | 74 | device->ops.bind = acpi_pci_bind; |
209 | device->ops.unbind = acpi_pci_unbind; | 75 | device->ops.unbind = acpi_pci_unbind; |
210 | } | 76 | } |
211 | 77 | ||
212 | /* | 78 | /* |
213 | * Attach ACPI-PCI Context | 79 | * Evaluate and parse _PRT, if exists. This code allows parsing of |
214 | * ----------------------- | 80 | * _PRT objects within the scope of non-bridge devices. Note that |
215 | * Thus binding the ACPI and PCI devices. | 81 | * _PRTs within the scope of a PCI bridge assume the bridge's |
216 | */ | 82 | * subordinate bus number. |
217 | status = acpi_attach_data(device->handle, acpi_pci_data_handler, data); | ||
218 | if (ACPI_FAILURE(status)) { | ||
219 | ACPI_EXCEPTION((AE_INFO, status, | ||
220 | "Unable to attach ACPI-PCI context to device %s", | ||
221 | acpi_device_bid(device))); | ||
222 | result = -ENODEV; | ||
223 | goto end; | ||
224 | } | ||
225 | |||
226 | /* | ||
227 | * PCI Routing Table | ||
228 | * ----------------- | ||
229 | * Evaluate and parse _PRT, if exists. This code is independent of | ||
230 | * PCI bridges (above) to allow parsing of _PRT objects within the | ||
231 | * scope of non-bridge devices. Note that _PRTs within the scope of | ||
232 | * a PCI bridge assume the bridge's subordinate bus number. | ||
233 | * | 83 | * |
234 | * TBD: Can _PRTs exist within the scope of non-bridge PCI devices? | 84 | * TBD: Can _PRTs exist within the scope of non-bridge PCI devices? |
235 | */ | 85 | */ |
236 | status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); | 86 | status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); |
237 | if (ACPI_SUCCESS(status)) { | ||
238 | if (data->bus) /* PCI-PCI bridge */ | ||
239 | acpi_pci_irq_add_prt(device->handle, data->id.segment, | ||
240 | data->bus->number); | ||
241 | else /* non-bridge PCI device */ | ||
242 | acpi_pci_irq_add_prt(device->handle, data->id.segment, | ||
243 | data->id.bus); | ||
244 | } | ||
245 | |||
246 | end: | ||
247 | kfree(buffer.pointer); | ||
248 | if (result) { | ||
249 | pci_dev_put(data->dev); | ||
250 | kfree(data); | ||
251 | } | ||
252 | return result; | ||
253 | } | ||
254 | |||
255 | static int acpi_pci_unbind(struct acpi_device *device) | ||
256 | { | ||
257 | int result = 0; | ||
258 | acpi_status status; | ||
259 | struct acpi_pci_data *data; | ||
260 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
261 | |||
262 | |||
263 | if (!device || !device->parent) | ||
264 | return -EINVAL; | ||
265 | |||
266 | status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); | ||
267 | if (ACPI_FAILURE(status)) | 87 | if (ACPI_FAILURE(status)) |
268 | return -ENODEV; | 88 | goto out; |
269 | 89 | ||
270 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unbinding PCI device [%s]...\n", | 90 | if (dev->subordinate) |
271 | (char *) buffer.pointer)); | 91 | bus = dev->subordinate; |
272 | kfree(buffer.pointer); | 92 | else |
93 | bus = dev->bus; | ||
273 | 94 | ||
274 | status = | 95 | acpi_pci_irq_add_prt(device->handle, bus); |
275 | acpi_get_data(device->handle, acpi_pci_data_handler, | ||
276 | (void **)&data); | ||
277 | if (ACPI_FAILURE(status)) { | ||
278 | result = -ENODEV; | ||
279 | goto end; | ||
280 | } | ||
281 | 96 | ||
282 | status = acpi_detach_data(device->handle, acpi_pci_data_handler); | 97 | out: |
283 | if (ACPI_FAILURE(status)) { | 98 | pci_dev_put(dev); |
284 | ACPI_EXCEPTION((AE_INFO, status, | 99 | return 0; |
285 | "Unable to detach data from device %s", | ||
286 | acpi_device_bid(device))); | ||
287 | result = -ENODEV; | ||
288 | goto end; | ||
289 | } | ||
290 | if (data->dev->subordinate) { | ||
291 | acpi_pci_irq_del_prt(data->id.segment, data->bus->number); | ||
292 | } | ||
293 | pci_dev_put(data->dev); | ||
294 | kfree(data); | ||
295 | |||
296 | end: | ||
297 | return result; | ||
298 | } | 100 | } |
299 | 101 | ||
300 | int | 102 | int acpi_pci_bind_root(struct acpi_device *device) |
301 | acpi_pci_bind_root(struct acpi_device *device, | ||
302 | struct acpi_pci_id *id, struct pci_bus *bus) | ||
303 | { | 103 | { |
304 | int result = 0; | ||
305 | acpi_status status; | ||
306 | struct acpi_pci_data *data = NULL; | ||
307 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
308 | |||
309 | if (!device || !id || !bus) { | ||
310 | return -EINVAL; | ||
311 | } | ||
312 | |||
313 | data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL); | ||
314 | if (!data) | ||
315 | return -ENOMEM; | ||
316 | |||
317 | data->id = *id; | ||
318 | data->bus = bus; | ||
319 | device->ops.bind = acpi_pci_bind; | 104 | device->ops.bind = acpi_pci_bind; |
320 | device->ops.unbind = acpi_pci_unbind; | 105 | device->ops.unbind = acpi_pci_unbind; |
321 | 106 | ||
322 | status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); | 107 | return 0; |
323 | if (ACPI_FAILURE(status)) { | ||
324 | kfree (data); | ||
325 | return -ENODEV; | ||
326 | } | ||
327 | |||
328 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI root bridge [%s] to " | ||
329 | "%04x:%02x\n", (char *)buffer.pointer, | ||
330 | id->segment, id->bus)); | ||
331 | |||
332 | status = acpi_attach_data(device->handle, acpi_pci_data_handler, data); | ||
333 | if (ACPI_FAILURE(status)) { | ||
334 | ACPI_EXCEPTION((AE_INFO, status, | ||
335 | "Unable to attach ACPI-PCI context to device %s", | ||
336 | (char *)buffer.pointer)); | ||
337 | result = -ENODEV; | ||
338 | goto end; | ||
339 | } | ||
340 | |||
341 | end: | ||
342 | kfree(buffer.pointer); | ||
343 | if (result != 0) | ||
344 | kfree(data); | ||
345 | |||
346 | return result; | ||
347 | } | 108 | } |
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 2faa9e2ac893..b794eb88ab90 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -182,7 +182,7 @@ static void do_prt_fixups(struct acpi_prt_entry *entry, | |||
182 | } | 182 | } |
183 | } | 183 | } |
184 | 184 | ||
185 | static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus, | 185 | static int acpi_pci_irq_add_entry(acpi_handle handle, struct pci_bus *bus, |
186 | struct acpi_pci_routing_table *prt) | 186 | struct acpi_pci_routing_table *prt) |
187 | { | 187 | { |
188 | struct acpi_prt_entry *entry; | 188 | struct acpi_prt_entry *entry; |
@@ -196,8 +196,8 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus, | |||
196 | * 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert | 196 | * 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert |
197 | * it here. | 197 | * it here. |
198 | */ | 198 | */ |
199 | entry->id.segment = segment; | 199 | entry->id.segment = pci_domain_nr(bus); |
200 | entry->id.bus = bus; | 200 | entry->id.bus = bus->number; |
201 | entry->id.device = (prt->address >> 16) & 0xFFFF; | 201 | entry->id.device = (prt->address >> 16) & 0xFFFF; |
202 | entry->pin = prt->pin + 1; | 202 | entry->pin = prt->pin + 1; |
203 | 203 | ||
@@ -242,7 +242,7 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus, | |||
242 | return 0; | 242 | return 0; |
243 | } | 243 | } |
244 | 244 | ||
245 | int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus) | 245 | int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus) |
246 | { | 246 | { |
247 | acpi_status status; | 247 | acpi_status status; |
248 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 248 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
@@ -271,7 +271,7 @@ int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus) | |||
271 | 271 | ||
272 | entry = buffer.pointer; | 272 | entry = buffer.pointer; |
273 | while (entry && (entry->length > 0)) { | 273 | while (entry && (entry->length > 0)) { |
274 | acpi_pci_irq_add_entry(handle, segment, bus, entry); | 274 | acpi_pci_irq_add_entry(handle, bus, entry); |
275 | entry = (struct acpi_pci_routing_table *) | 275 | entry = (struct acpi_pci_routing_table *) |
276 | ((unsigned long)entry + entry->length); | 276 | ((unsigned long)entry + entry->length); |
277 | } | 277 | } |
@@ -280,16 +280,17 @@ int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus) | |||
280 | return 0; | 280 | return 0; |
281 | } | 281 | } |
282 | 282 | ||
283 | void acpi_pci_irq_del_prt(int segment, int bus) | 283 | void acpi_pci_irq_del_prt(struct pci_bus *bus) |
284 | { | 284 | { |
285 | struct acpi_prt_entry *entry, *tmp; | 285 | struct acpi_prt_entry *entry, *tmp; |
286 | 286 | ||
287 | printk(KERN_DEBUG | 287 | printk(KERN_DEBUG |
288 | "ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n", | 288 | "ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n", |
289 | segment, bus); | 289 | pci_domain_nr(bus), bus->number); |
290 | spin_lock(&acpi_prt_lock); | 290 | spin_lock(&acpi_prt_lock); |
291 | list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) { | 291 | list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) { |
292 | if (segment == entry->id.segment && bus == entry->id.bus) { | 292 | if (pci_domain_nr(bus) == entry->id.segment |
293 | && bus->number == entry->id.bus) { | ||
293 | list_del(&entry->list); | 294 | list_del(&entry->list); |
294 | kfree(entry); | 295 | kfree(entry); |
295 | } | 296 | } |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 196f97d00956..55b5b90c2a44 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -63,9 +63,10 @@ static struct acpi_driver acpi_pci_root_driver = { | |||
63 | 63 | ||
64 | struct acpi_pci_root { | 64 | struct acpi_pci_root { |
65 | struct list_head node; | 65 | struct list_head node; |
66 | struct acpi_device * device; | 66 | struct acpi_device *device; |
67 | struct acpi_pci_id id; | ||
68 | struct pci_bus *bus; | 67 | struct pci_bus *bus; |
68 | u16 segment; | ||
69 | u8 bus_nr; | ||
69 | 70 | ||
70 | u32 osc_support_set; /* _OSC state of support bits */ | 71 | u32 osc_support_set; /* _OSC state of support bits */ |
71 | u32 osc_control_set; /* _OSC state of control bits */ | 72 | u32 osc_control_set; /* _OSC state of control bits */ |
@@ -82,7 +83,7 @@ static DEFINE_MUTEX(osc_lock); | |||
82 | int acpi_pci_register_driver(struct acpi_pci_driver *driver) | 83 | int acpi_pci_register_driver(struct acpi_pci_driver *driver) |
83 | { | 84 | { |
84 | int n = 0; | 85 | int n = 0; |
85 | struct list_head *entry; | 86 | struct acpi_pci_root *root; |
86 | 87 | ||
87 | struct acpi_pci_driver **pptr = &sub_driver; | 88 | struct acpi_pci_driver **pptr = &sub_driver; |
88 | while (*pptr) | 89 | while (*pptr) |
@@ -92,9 +93,7 @@ int acpi_pci_register_driver(struct acpi_pci_driver *driver) | |||
92 | if (!driver->add) | 93 | if (!driver->add) |
93 | return 0; | 94 | return 0; |
94 | 95 | ||
95 | list_for_each(entry, &acpi_pci_roots) { | 96 | list_for_each_entry(root, &acpi_pci_roots, node) { |
96 | struct acpi_pci_root *root; | ||
97 | root = list_entry(entry, struct acpi_pci_root, node); | ||
98 | driver->add(root->device->handle); | 97 | driver->add(root->device->handle); |
99 | n++; | 98 | n++; |
100 | } | 99 | } |
@@ -106,7 +105,7 @@ EXPORT_SYMBOL(acpi_pci_register_driver); | |||
106 | 105 | ||
107 | void acpi_pci_unregister_driver(struct acpi_pci_driver *driver) | 106 | void acpi_pci_unregister_driver(struct acpi_pci_driver *driver) |
108 | { | 107 | { |
109 | struct list_head *entry; | 108 | struct acpi_pci_root *root; |
110 | 109 | ||
111 | struct acpi_pci_driver **pptr = &sub_driver; | 110 | struct acpi_pci_driver **pptr = &sub_driver; |
112 | while (*pptr) { | 111 | while (*pptr) { |
@@ -120,28 +119,48 @@ void acpi_pci_unregister_driver(struct acpi_pci_driver *driver) | |||
120 | if (!driver->remove) | 119 | if (!driver->remove) |
121 | return; | 120 | return; |
122 | 121 | ||
123 | list_for_each(entry, &acpi_pci_roots) { | 122 | list_for_each_entry(root, &acpi_pci_roots, node) |
124 | struct acpi_pci_root *root; | ||
125 | root = list_entry(entry, struct acpi_pci_root, node); | ||
126 | driver->remove(root->device->handle); | 123 | driver->remove(root->device->handle); |
127 | } | ||
128 | } | 124 | } |
129 | 125 | ||
130 | EXPORT_SYMBOL(acpi_pci_unregister_driver); | 126 | EXPORT_SYMBOL(acpi_pci_unregister_driver); |
131 | 127 | ||
132 | acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus) | 128 | acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus) |
133 | { | 129 | { |
134 | struct acpi_pci_root *tmp; | 130 | struct acpi_pci_root *root; |
135 | 131 | ||
136 | list_for_each_entry(tmp, &acpi_pci_roots, node) { | 132 | list_for_each_entry(root, &acpi_pci_roots, node) |
137 | if ((tmp->id.segment == (u16) seg) && (tmp->id.bus == (u16) bus)) | 133 | if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus)) |
138 | return tmp->device->handle; | 134 | return root->device->handle; |
139 | } | ||
140 | return NULL; | 135 | return NULL; |
141 | } | 136 | } |
142 | 137 | ||
143 | EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle); | 138 | EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle); |
144 | 139 | ||
140 | /** | ||
141 | * acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge | ||
142 | * @handle - the ACPI CA node in question. | ||
143 | * | ||
144 | * Note: we could make this API take a struct acpi_device * instead, but | ||
145 | * for now, it's more convenient to operate on an acpi_handle. | ||
146 | */ | ||
147 | int acpi_is_root_bridge(acpi_handle handle) | ||
148 | { | ||
149 | int ret; | ||
150 | struct acpi_device *device; | ||
151 | |||
152 | ret = acpi_bus_get_device(handle, &device); | ||
153 | if (ret) | ||
154 | return 0; | ||
155 | |||
156 | ret = acpi_match_device_ids(device, root_device_ids); | ||
157 | if (ret) | ||
158 | return 0; | ||
159 | else | ||
160 | return 1; | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(acpi_is_root_bridge); | ||
163 | |||
145 | static acpi_status | 164 | static acpi_status |
146 | get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) | 165 | get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) |
147 | { | 166 | { |
@@ -161,19 +180,22 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) | |||
161 | return AE_OK; | 180 | return AE_OK; |
162 | } | 181 | } |
163 | 182 | ||
164 | static acpi_status try_get_root_bridge_busnr(acpi_handle handle, int *busnum) | 183 | static acpi_status try_get_root_bridge_busnr(acpi_handle handle, |
184 | unsigned long long *bus) | ||
165 | { | 185 | { |
166 | acpi_status status; | 186 | acpi_status status; |
187 | int busnum; | ||
167 | 188 | ||
168 | *busnum = -1; | 189 | busnum = -1; |
169 | status = | 190 | status = |
170 | acpi_walk_resources(handle, METHOD_NAME__CRS, | 191 | acpi_walk_resources(handle, METHOD_NAME__CRS, |
171 | get_root_bridge_busnr_callback, busnum); | 192 | get_root_bridge_busnr_callback, &busnum); |
172 | if (ACPI_FAILURE(status)) | 193 | if (ACPI_FAILURE(status)) |
173 | return status; | 194 | return status; |
174 | /* Check if we really get a bus number from _CRS */ | 195 | /* Check if we really get a bus number from _CRS */ |
175 | if (*busnum == -1) | 196 | if (busnum == -1) |
176 | return AE_ERROR; | 197 | return AE_ERROR; |
198 | *bus = busnum; | ||
177 | return AE_OK; | 199 | return AE_OK; |
178 | } | 200 | } |
179 | 201 | ||
@@ -298,6 +320,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags) | |||
298 | static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle) | 320 | static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle) |
299 | { | 321 | { |
300 | struct acpi_pci_root *root; | 322 | struct acpi_pci_root *root; |
323 | |||
301 | list_for_each_entry(root, &acpi_pci_roots, node) { | 324 | list_for_each_entry(root, &acpi_pci_roots, node) { |
302 | if (root->device->handle == handle) | 325 | if (root->device->handle == handle) |
303 | return root; | 326 | return root; |
@@ -305,6 +328,87 @@ static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle) | |||
305 | return NULL; | 328 | return NULL; |
306 | } | 329 | } |
307 | 330 | ||
331 | struct acpi_handle_node { | ||
332 | struct list_head node; | ||
333 | acpi_handle handle; | ||
334 | }; | ||
335 | |||
336 | /** | ||
337 | * acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev | ||
338 | * @handle: the handle in question | ||
339 | * | ||
340 | * Given an ACPI CA handle, the desired PCI device is located in the | ||
341 | * list of PCI devices. | ||
342 | * | ||
343 | * If the device is found, its reference count is increased and this | ||
344 | * function returns a pointer to its data structure. The caller must | ||
345 | * decrement the reference count by calling pci_dev_put(). | ||
346 | * If no device is found, %NULL is returned. | ||
347 | */ | ||
348 | struct pci_dev *acpi_get_pci_dev(acpi_handle handle) | ||
349 | { | ||
350 | int dev, fn; | ||
351 | unsigned long long adr; | ||
352 | acpi_status status; | ||
353 | acpi_handle phandle; | ||
354 | struct pci_bus *pbus; | ||
355 | struct pci_dev *pdev = NULL; | ||
356 | struct acpi_handle_node *node, *tmp; | ||
357 | struct acpi_pci_root *root; | ||
358 | LIST_HEAD(device_list); | ||
359 | |||
360 | /* | ||
361 | * Walk up the ACPI CA namespace until we reach a PCI root bridge. | ||
362 | */ | ||
363 | phandle = handle; | ||
364 | while (!acpi_is_root_bridge(phandle)) { | ||
365 | node = kzalloc(sizeof(struct acpi_handle_node), GFP_KERNEL); | ||
366 | if (!node) | ||
367 | goto out; | ||
368 | |||
369 | INIT_LIST_HEAD(&node->node); | ||
370 | node->handle = phandle; | ||
371 | list_add(&node->node, &device_list); | ||
372 | |||
373 | status = acpi_get_parent(phandle, &phandle); | ||
374 | if (ACPI_FAILURE(status)) | ||
375 | goto out; | ||
376 | } | ||
377 | |||
378 | root = acpi_pci_find_root(phandle); | ||
379 | if (!root) | ||
380 | goto out; | ||
381 | |||
382 | pbus = root->bus; | ||
383 | |||
384 | /* | ||
385 | * Now, walk back down the PCI device tree until we return to our | ||
386 | * original handle. Assumes that everything between the PCI root | ||
387 | * bridge and the device we're looking for must be a P2P bridge. | ||
388 | */ | ||
389 | list_for_each_entry(node, &device_list, node) { | ||
390 | acpi_handle hnd = node->handle; | ||
391 | status = acpi_evaluate_integer(hnd, "_ADR", NULL, &adr); | ||
392 | if (ACPI_FAILURE(status)) | ||
393 | goto out; | ||
394 | dev = (adr >> 16) & 0xffff; | ||
395 | fn = adr & 0xffff; | ||
396 | |||
397 | pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn)); | ||
398 | if (!pdev || hnd == handle) | ||
399 | break; | ||
400 | |||
401 | pbus = pdev->subordinate; | ||
402 | pci_dev_put(pdev); | ||
403 | } | ||
404 | out: | ||
405 | list_for_each_entry_safe(node, tmp, &device_list, node) | ||
406 | kfree(node); | ||
407 | |||
408 | return pdev; | ||
409 | } | ||
410 | EXPORT_SYMBOL_GPL(acpi_get_pci_dev); | ||
411 | |||
308 | /** | 412 | /** |
309 | * acpi_pci_osc_control_set - commit requested control to Firmware | 413 | * acpi_pci_osc_control_set - commit requested control to Firmware |
310 | * @handle: acpi_handle for the target ACPI object | 414 | * @handle: acpi_handle for the target ACPI object |
@@ -363,31 +467,46 @@ EXPORT_SYMBOL(acpi_pci_osc_control_set); | |||
363 | 467 | ||
364 | static int __devinit acpi_pci_root_add(struct acpi_device *device) | 468 | static int __devinit acpi_pci_root_add(struct acpi_device *device) |
365 | { | 469 | { |
366 | int result = 0; | 470 | unsigned long long segment, bus; |
367 | struct acpi_pci_root *root = NULL; | 471 | acpi_status status; |
368 | struct acpi_pci_root *tmp; | 472 | int result; |
369 | acpi_status status = AE_OK; | 473 | struct acpi_pci_root *root; |
370 | unsigned long long value = 0; | 474 | acpi_handle handle; |
371 | acpi_handle handle = NULL; | ||
372 | struct acpi_device *child; | 475 | struct acpi_device *child; |
373 | u32 flags, base_flags; | 476 | u32 flags, base_flags; |
374 | 477 | ||
478 | segment = 0; | ||
479 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL, | ||
480 | &segment); | ||
481 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | ||
482 | printk(KERN_ERR PREFIX "can't evaluate _SEG\n"); | ||
483 | return -ENODEV; | ||
484 | } | ||
375 | 485 | ||
376 | if (!device) | 486 | /* Check _CRS first, then _BBN. If no _BBN, default to zero. */ |
377 | return -EINVAL; | 487 | bus = 0; |
488 | status = try_get_root_bridge_busnr(device->handle, &bus); | ||
489 | if (ACPI_FAILURE(status)) { | ||
490 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); | ||
491 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | ||
492 | printk(KERN_ERR PREFIX | ||
493 | "no bus number in _CRS and can't evaluate _BBN\n"); | ||
494 | return -ENODEV; | ||
495 | } | ||
496 | } | ||
378 | 497 | ||
379 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); | 498 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); |
380 | if (!root) | 499 | if (!root) |
381 | return -ENOMEM; | 500 | return -ENOMEM; |
382 | INIT_LIST_HEAD(&root->node); | ||
383 | 501 | ||
502 | INIT_LIST_HEAD(&root->node); | ||
384 | root->device = device; | 503 | root->device = device; |
504 | root->segment = segment & 0xFFFF; | ||
505 | root->bus_nr = bus & 0xFF; | ||
385 | strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); | 506 | strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); |
386 | strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); | 507 | strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); |
387 | device->driver_data = root; | 508 | device->driver_data = root; |
388 | 509 | ||
389 | device->ops.bind = acpi_pci_bind; | ||
390 | |||
391 | /* | 510 | /* |
392 | * All supported architectures that use ACPI have support for | 511 | * All supported architectures that use ACPI have support for |
393 | * PCI domains, so we indicate this in _OSC support capabilities. | 512 | * PCI domains, so we indicate this in _OSC support capabilities. |
@@ -395,79 +514,6 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
395 | flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; | 514 | flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; |
396 | acpi_pci_osc_support(root, flags); | 515 | acpi_pci_osc_support(root, flags); |
397 | 516 | ||
398 | /* | ||
399 | * Segment | ||
400 | * ------- | ||
401 | * Obtained via _SEG, if exists, otherwise assumed to be zero (0). | ||
402 | */ | ||
403 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL, | ||
404 | &value); | ||
405 | switch (status) { | ||
406 | case AE_OK: | ||
407 | root->id.segment = (u16) value; | ||
408 | break; | ||
409 | case AE_NOT_FOUND: | ||
410 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
411 | "Assuming segment 0 (no _SEG)\n")); | ||
412 | root->id.segment = 0; | ||
413 | break; | ||
414 | default: | ||
415 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SEG")); | ||
416 | result = -ENODEV; | ||
417 | goto end; | ||
418 | } | ||
419 | |||
420 | /* | ||
421 | * Bus | ||
422 | * --- | ||
423 | * Obtained via _BBN, if exists, otherwise assumed to be zero (0). | ||
424 | */ | ||
425 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, | ||
426 | &value); | ||
427 | switch (status) { | ||
428 | case AE_OK: | ||
429 | root->id.bus = (u16) value; | ||
430 | break; | ||
431 | case AE_NOT_FOUND: | ||
432 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Assuming bus 0 (no _BBN)\n")); | ||
433 | root->id.bus = 0; | ||
434 | break; | ||
435 | default: | ||
436 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BBN")); | ||
437 | result = -ENODEV; | ||
438 | goto end; | ||
439 | } | ||
440 | |||
441 | /* Some systems have wrong _BBN */ | ||
442 | list_for_each_entry(tmp, &acpi_pci_roots, node) { | ||
443 | if ((tmp->id.segment == root->id.segment) | ||
444 | && (tmp->id.bus == root->id.bus)) { | ||
445 | int bus = 0; | ||
446 | acpi_status status; | ||
447 | |||
448 | printk(KERN_ERR PREFIX | ||
449 | "Wrong _BBN value, reboot" | ||
450 | " and use option 'pci=noacpi'\n"); | ||
451 | |||
452 | status = try_get_root_bridge_busnr(device->handle, &bus); | ||
453 | if (ACPI_FAILURE(status)) | ||
454 | break; | ||
455 | if (bus != root->id.bus) { | ||
456 | printk(KERN_INFO PREFIX | ||
457 | "PCI _CRS %d overrides _BBN 0\n", bus); | ||
458 | root->id.bus = bus; | ||
459 | } | ||
460 | break; | ||
461 | } | ||
462 | } | ||
463 | /* | ||
464 | * Device & Function | ||
465 | * ----------------- | ||
466 | * Obtained from _ADR (which has already been evaluated for us). | ||
467 | */ | ||
468 | root->id.device = device->pnp.bus_address >> 16; | ||
469 | root->id.function = device->pnp.bus_address & 0xFFFF; | ||
470 | |||
471 | /* | 517 | /* |
472 | * TBD: Need PCI interface for enumeration/configuration of roots. | 518 | * TBD: Need PCI interface for enumeration/configuration of roots. |
473 | */ | 519 | */ |
@@ -477,7 +523,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
477 | 523 | ||
478 | printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n", | 524 | printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n", |
479 | acpi_device_name(device), acpi_device_bid(device), | 525 | acpi_device_name(device), acpi_device_bid(device), |
480 | root->id.segment, root->id.bus); | 526 | root->segment, root->bus_nr); |
481 | 527 | ||
482 | /* | 528 | /* |
483 | * Scan the Root Bridge | 529 | * Scan the Root Bridge |
@@ -486,11 +532,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
486 | * PCI namespace does not get created until this call is made (and | 532 | * PCI namespace does not get created until this call is made (and |
487 | * thus the root bridge's pci_dev does not exist). | 533 | * thus the root bridge's pci_dev does not exist). |
488 | */ | 534 | */ |
489 | root->bus = pci_acpi_scan_root(device, root->id.segment, root->id.bus); | 535 | root->bus = pci_acpi_scan_root(device, segment, bus); |
490 | if (!root->bus) { | 536 | if (!root->bus) { |
491 | printk(KERN_ERR PREFIX | 537 | printk(KERN_ERR PREFIX |
492 | "Bus %04x:%02x not present in PCI namespace\n", | 538 | "Bus %04x:%02x not present in PCI namespace\n", |
493 | root->id.segment, root->id.bus); | 539 | root->segment, root->bus_nr); |
494 | result = -ENODEV; | 540 | result = -ENODEV; |
495 | goto end; | 541 | goto end; |
496 | } | 542 | } |
@@ -500,7 +546,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
500 | * ----------------------- | 546 | * ----------------------- |
501 | * Thus binding the ACPI and PCI devices. | 547 | * Thus binding the ACPI and PCI devices. |
502 | */ | 548 | */ |
503 | result = acpi_pci_bind_root(device, &root->id, root->bus); | 549 | result = acpi_pci_bind_root(device); |
504 | if (result) | 550 | if (result) |
505 | goto end; | 551 | goto end; |
506 | 552 | ||
@@ -511,8 +557,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
511 | */ | 557 | */ |
512 | status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); | 558 | status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); |
513 | if (ACPI_SUCCESS(status)) | 559 | if (ACPI_SUCCESS(status)) |
514 | result = acpi_pci_irq_add_prt(device->handle, root->id.segment, | 560 | result = acpi_pci_irq_add_prt(device->handle, root->bus); |
515 | root->id.bus); | ||
516 | 561 | ||
517 | /* | 562 | /* |
518 | * Scan and bind all _ADR-Based Devices | 563 | * Scan and bind all _ADR-Based Devices |
@@ -531,42 +576,28 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
531 | if (flags != base_flags) | 576 | if (flags != base_flags) |
532 | acpi_pci_osc_support(root, flags); | 577 | acpi_pci_osc_support(root, flags); |
533 | 578 | ||
534 | end: | 579 | return 0; |
535 | if (result) { | ||
536 | if (!list_empty(&root->node)) | ||
537 | list_del(&root->node); | ||
538 | kfree(root); | ||
539 | } | ||
540 | 580 | ||
581 | end: | ||
582 | if (!list_empty(&root->node)) | ||
583 | list_del(&root->node); | ||
584 | kfree(root); | ||
541 | return result; | 585 | return result; |
542 | } | 586 | } |
543 | 587 | ||
544 | static int acpi_pci_root_start(struct acpi_device *device) | 588 | static int acpi_pci_root_start(struct acpi_device *device) |
545 | { | 589 | { |
546 | struct acpi_pci_root *root; | 590 | struct acpi_pci_root *root = acpi_driver_data(device); |
547 | 591 | ||
548 | 592 | pci_bus_add_devices(root->bus); | |
549 | list_for_each_entry(root, &acpi_pci_roots, node) { | 593 | return 0; |
550 | if (root->device == device) { | ||
551 | pci_bus_add_devices(root->bus); | ||
552 | return 0; | ||
553 | } | ||
554 | } | ||
555 | return -ENODEV; | ||
556 | } | 594 | } |
557 | 595 | ||
558 | static int acpi_pci_root_remove(struct acpi_device *device, int type) | 596 | static int acpi_pci_root_remove(struct acpi_device *device, int type) |
559 | { | 597 | { |
560 | struct acpi_pci_root *root = NULL; | 598 | struct acpi_pci_root *root = acpi_driver_data(device); |
561 | |||
562 | |||
563 | if (!device || !acpi_driver_data(device)) | ||
564 | return -EINVAL; | ||
565 | |||
566 | root = acpi_driver_data(device); | ||
567 | 599 | ||
568 | kfree(root); | 600 | kfree(root); |
569 | |||
570 | return 0; | 601 | return 0; |
571 | } | 602 | } |
572 | 603 | ||
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 56665a63bf19..d74365d4a6e7 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
@@ -194,7 +194,7 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state) | |||
194 | 194 | ||
195 | static int acpi_power_on(acpi_handle handle, struct acpi_device *dev) | 195 | static int acpi_power_on(acpi_handle handle, struct acpi_device *dev) |
196 | { | 196 | { |
197 | int result = 0, state; | 197 | int result = 0; |
198 | int found = 0; | 198 | int found = 0; |
199 | acpi_status status = AE_OK; | 199 | acpi_status status = AE_OK; |
200 | struct acpi_power_resource *resource = NULL; | 200 | struct acpi_power_resource *resource = NULL; |
@@ -236,18 +236,6 @@ static int acpi_power_on(acpi_handle handle, struct acpi_device *dev) | |||
236 | if (ACPI_FAILURE(status)) | 236 | if (ACPI_FAILURE(status)) |
237 | return -ENODEV; | 237 | return -ENODEV; |
238 | 238 | ||
239 | if (!acpi_power_nocheck) { | ||
240 | /* | ||
241 | * If acpi_power_nocheck is set, it is unnecessary to check | ||
242 | * the power state after power transition. | ||
243 | */ | ||
244 | result = acpi_power_get_state(resource->device->handle, | ||
245 | &state); | ||
246 | if (result) | ||
247 | return result; | ||
248 | if (state != ACPI_POWER_RESOURCE_STATE_ON) | ||
249 | return -ENOEXEC; | ||
250 | } | ||
251 | /* Update the power resource's _device_ power state */ | 239 | /* Update the power resource's _device_ power state */ |
252 | resource->device->power.state = ACPI_STATE_D0; | 240 | resource->device->power.state = ACPI_STATE_D0; |
253 | 241 | ||
@@ -258,7 +246,7 @@ static int acpi_power_on(acpi_handle handle, struct acpi_device *dev) | |||
258 | 246 | ||
259 | static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev) | 247 | static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev) |
260 | { | 248 | { |
261 | int result = 0, state; | 249 | int result = 0; |
262 | acpi_status status = AE_OK; | 250 | acpi_status status = AE_OK; |
263 | struct acpi_power_resource *resource = NULL; | 251 | struct acpi_power_resource *resource = NULL; |
264 | struct list_head *node, *next; | 252 | struct list_head *node, *next; |
@@ -293,18 +281,6 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev) | |||
293 | if (ACPI_FAILURE(status)) | 281 | if (ACPI_FAILURE(status)) |
294 | return -ENODEV; | 282 | return -ENODEV; |
295 | 283 | ||
296 | if (!acpi_power_nocheck) { | ||
297 | /* | ||
298 | * If acpi_power_nocheck is set, it is unnecessary to check | ||
299 | * the power state after power transition. | ||
300 | */ | ||
301 | result = acpi_power_get_state(handle, &state); | ||
302 | if (result) | ||
303 | return result; | ||
304 | if (state != ACPI_POWER_RESOURCE_STATE_OFF) | ||
305 | return -ENOEXEC; | ||
306 | } | ||
307 | |||
308 | /* Update the power resource's _device_ power state */ | 284 | /* Update the power resource's _device_ power state */ |
309 | resource->device->power.state = ACPI_STATE_D3; | 285 | resource->device->power.state = ACPI_STATE_D3; |
310 | 286 | ||
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 23f0fb84f1c1..84e0f3c07442 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -89,7 +89,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr); | |||
89 | 89 | ||
90 | static const struct acpi_device_id processor_device_ids[] = { | 90 | static const struct acpi_device_id processor_device_ids[] = { |
91 | {ACPI_PROCESSOR_OBJECT_HID, 0}, | 91 | {ACPI_PROCESSOR_OBJECT_HID, 0}, |
92 | {ACPI_PROCESSOR_HID, 0}, | 92 | {"ACPI0007", 0}, |
93 | {"", 0}, | 93 | {"", 0}, |
94 | }; | 94 | }; |
95 | MODULE_DEVICE_TABLE(acpi, processor_device_ids); | 95 | MODULE_DEVICE_TABLE(acpi, processor_device_ids); |
@@ -596,7 +596,21 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
596 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 596 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
597 | "No bus mastering arbitration control\n")); | 597 | "No bus mastering arbitration control\n")); |
598 | 598 | ||
599 | if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_HID)) { | 599 | if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { |
600 | /* Declared with "Processor" statement; match ProcessorID */ | ||
601 | status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); | ||
602 | if (ACPI_FAILURE(status)) { | ||
603 | printk(KERN_ERR PREFIX "Evaluating processor object\n"); | ||
604 | return -ENODEV; | ||
605 | } | ||
606 | |||
607 | /* | ||
608 | * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. | ||
609 | * >>> 'acpi_get_processor_id(acpi_id, &id)' in | ||
610 | * arch/xxx/acpi.c | ||
611 | */ | ||
612 | pr->acpi_id = object.processor.proc_id; | ||
613 | } else { | ||
600 | /* | 614 | /* |
601 | * Declared with "Device" statement; match _UID. | 615 | * Declared with "Device" statement; match _UID. |
602 | * Note that we don't handle string _UIDs yet. | 616 | * Note that we don't handle string _UIDs yet. |
@@ -611,20 +625,6 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
611 | } | 625 | } |
612 | device_declaration = 1; | 626 | device_declaration = 1; |
613 | pr->acpi_id = value; | 627 | pr->acpi_id = value; |
614 | } else { | ||
615 | /* Declared with "Processor" statement; match ProcessorID */ | ||
616 | status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); | ||
617 | if (ACPI_FAILURE(status)) { | ||
618 | printk(KERN_ERR PREFIX "Evaluating processor object\n"); | ||
619 | return -ENODEV; | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. | ||
624 | * >>> 'acpi_get_processor_id(acpi_id, &id)' in | ||
625 | * arch/xxx/acpi.c | ||
626 | */ | ||
627 | pr->acpi_id = object.processor.proc_id; | ||
628 | } | 628 | } |
629 | cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id); | 629 | cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id); |
630 | 630 | ||
@@ -649,7 +649,16 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
649 | return -ENODEV; | 649 | return -ENODEV; |
650 | } | 650 | } |
651 | } | 651 | } |
652 | 652 | /* | |
653 | * On some boxes several processors use the same processor bus id. | ||
654 | * But they are located in different scope. For example: | ||
655 | * \_SB.SCK0.CPU0 | ||
656 | * \_SB.SCK1.CPU0 | ||
657 | * Rename the processor device bus id. And the new bus id will be | ||
658 | * generated as the following format: | ||
659 | * CPU+CPU ID. | ||
660 | */ | ||
661 | sprintf(acpi_device_bid(device), "CPU%X", pr->id); | ||
653 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, | 662 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, |
654 | pr->acpi_id)); | 663 | pr->acpi_id)); |
655 | 664 | ||
@@ -731,6 +740,8 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device) | |||
731 | /* _PDC call should be done before doing anything else (if reqd.). */ | 740 | /* _PDC call should be done before doing anything else (if reqd.). */ |
732 | arch_acpi_processor_init_pdc(pr); | 741 | arch_acpi_processor_init_pdc(pr); |
733 | acpi_processor_set_pdc(pr); | 742 | acpi_processor_set_pdc(pr); |
743 | arch_acpi_processor_cleanup_pdc(pr); | ||
744 | |||
734 | #ifdef CONFIG_CPU_FREQ | 745 | #ifdef CONFIG_CPU_FREQ |
735 | acpi_processor_ppc_has_changed(pr); | 746 | acpi_processor_ppc_has_changed(pr); |
736 | #endif | 747 | #endif |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 10a2d913635a..0efa59e7e3af 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -139,7 +139,7 @@ static void acpi_safe_halt(void) | |||
139 | * are affected too. We pick the most conservative approach: we assume | 139 | * are affected too. We pick the most conservative approach: we assume |
140 | * that the local APIC stops in both C2 and C3. | 140 | * that the local APIC stops in both C2 and C3. |
141 | */ | 141 | */ |
142 | static void acpi_timer_check_state(int state, struct acpi_processor *pr, | 142 | static void lapic_timer_check_state(int state, struct acpi_processor *pr, |
143 | struct acpi_processor_cx *cx) | 143 | struct acpi_processor_cx *cx) |
144 | { | 144 | { |
145 | struct acpi_processor_power *pwr = &pr->power; | 145 | struct acpi_processor_power *pwr = &pr->power; |
@@ -162,7 +162,7 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr, | |||
162 | pr->power.timer_broadcast_on_state = state; | 162 | pr->power.timer_broadcast_on_state = state; |
163 | } | 163 | } |
164 | 164 | ||
165 | static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) | 165 | static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) |
166 | { | 166 | { |
167 | unsigned long reason; | 167 | unsigned long reason; |
168 | 168 | ||
@@ -173,7 +173,7 @@ static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) | |||
173 | } | 173 | } |
174 | 174 | ||
175 | /* Power(C) State timer broadcast control */ | 175 | /* Power(C) State timer broadcast control */ |
176 | static void acpi_state_timer_broadcast(struct acpi_processor *pr, | 176 | static void lapic_timer_state_broadcast(struct acpi_processor *pr, |
177 | struct acpi_processor_cx *cx, | 177 | struct acpi_processor_cx *cx, |
178 | int broadcast) | 178 | int broadcast) |
179 | { | 179 | { |
@@ -190,10 +190,10 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr, | |||
190 | 190 | ||
191 | #else | 191 | #else |
192 | 192 | ||
193 | static void acpi_timer_check_state(int state, struct acpi_processor *pr, | 193 | static void lapic_timer_check_state(int state, struct acpi_processor *pr, |
194 | struct acpi_processor_cx *cstate) { } | 194 | struct acpi_processor_cx *cstate) { } |
195 | static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } | 195 | static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } |
196 | static void acpi_state_timer_broadcast(struct acpi_processor *pr, | 196 | static void lapic_timer_state_broadcast(struct acpi_processor *pr, |
197 | struct acpi_processor_cx *cx, | 197 | struct acpi_processor_cx *cx, |
198 | int broadcast) | 198 | int broadcast) |
199 | { | 199 | { |
@@ -515,7 +515,8 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | |||
515 | static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | 515 | static void acpi_processor_power_verify_c3(struct acpi_processor *pr, |
516 | struct acpi_processor_cx *cx) | 516 | struct acpi_processor_cx *cx) |
517 | { | 517 | { |
518 | static int bm_check_flag; | 518 | static int bm_check_flag = -1; |
519 | static int bm_control_flag = -1; | ||
519 | 520 | ||
520 | 521 | ||
521 | if (!cx->address) | 522 | if (!cx->address) |
@@ -545,12 +546,14 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | |||
545 | } | 546 | } |
546 | 547 | ||
547 | /* All the logic here assumes flags.bm_check is same across all CPUs */ | 548 | /* All the logic here assumes flags.bm_check is same across all CPUs */ |
548 | if (!bm_check_flag) { | 549 | if (bm_check_flag == -1) { |
549 | /* Determine whether bm_check is needed based on CPU */ | 550 | /* Determine whether bm_check is needed based on CPU */ |
550 | acpi_processor_power_init_bm_check(&(pr->flags), pr->id); | 551 | acpi_processor_power_init_bm_check(&(pr->flags), pr->id); |
551 | bm_check_flag = pr->flags.bm_check; | 552 | bm_check_flag = pr->flags.bm_check; |
553 | bm_control_flag = pr->flags.bm_control; | ||
552 | } else { | 554 | } else { |
553 | pr->flags.bm_check = bm_check_flag; | 555 | pr->flags.bm_check = bm_check_flag; |
556 | pr->flags.bm_control = bm_control_flag; | ||
554 | } | 557 | } |
555 | 558 | ||
556 | if (pr->flags.bm_check) { | 559 | if (pr->flags.bm_check) { |
@@ -614,29 +617,25 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) | |||
614 | switch (cx->type) { | 617 | switch (cx->type) { |
615 | case ACPI_STATE_C1: | 618 | case ACPI_STATE_C1: |
616 | cx->valid = 1; | 619 | cx->valid = 1; |
617 | acpi_timer_check_state(i, pr, cx); | ||
618 | break; | 620 | break; |
619 | 621 | ||
620 | case ACPI_STATE_C2: | 622 | case ACPI_STATE_C2: |
621 | acpi_processor_power_verify_c2(cx); | 623 | acpi_processor_power_verify_c2(cx); |
622 | if (cx->valid) | ||
623 | acpi_timer_check_state(i, pr, cx); | ||
624 | break; | 624 | break; |
625 | 625 | ||
626 | case ACPI_STATE_C3: | 626 | case ACPI_STATE_C3: |
627 | acpi_processor_power_verify_c3(pr, cx); | 627 | acpi_processor_power_verify_c3(pr, cx); |
628 | if (cx->valid) | ||
629 | acpi_timer_check_state(i, pr, cx); | ||
630 | break; | 628 | break; |
631 | } | 629 | } |
632 | if (cx->valid) | 630 | if (!cx->valid) |
633 | tsc_check_state(cx->type); | 631 | continue; |
634 | 632 | ||
635 | if (cx->valid) | 633 | lapic_timer_check_state(i, pr, cx); |
636 | working++; | 634 | tsc_check_state(cx->type); |
635 | working++; | ||
637 | } | 636 | } |
638 | 637 | ||
639 | acpi_propagate_timer_broadcast(pr); | 638 | lapic_timer_propagate_broadcast(pr); |
640 | 639 | ||
641 | return (working); | 640 | return (working); |
642 | } | 641 | } |
@@ -839,7 +838,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
839 | return 0; | 838 | return 0; |
840 | } | 839 | } |
841 | 840 | ||
842 | acpi_state_timer_broadcast(pr, cx, 1); | 841 | lapic_timer_state_broadcast(pr, cx, 1); |
843 | kt1 = ktime_get_real(); | 842 | kt1 = ktime_get_real(); |
844 | acpi_idle_do_entry(cx); | 843 | acpi_idle_do_entry(cx); |
845 | kt2 = ktime_get_real(); | 844 | kt2 = ktime_get_real(); |
@@ -847,7 +846,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
847 | 846 | ||
848 | local_irq_enable(); | 847 | local_irq_enable(); |
849 | cx->usage++; | 848 | cx->usage++; |
850 | acpi_state_timer_broadcast(pr, cx, 0); | 849 | lapic_timer_state_broadcast(pr, cx, 0); |
851 | 850 | ||
852 | return idle_time; | 851 | return idle_time; |
853 | } | 852 | } |
@@ -892,7 +891,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
892 | * Must be done before busmaster disable as we might need to | 891 | * Must be done before busmaster disable as we might need to |
893 | * access HPET ! | 892 | * access HPET ! |
894 | */ | 893 | */ |
895 | acpi_state_timer_broadcast(pr, cx, 1); | 894 | lapic_timer_state_broadcast(pr, cx, 1); |
896 | 895 | ||
897 | if (cx->type == ACPI_STATE_C3) | 896 | if (cx->type == ACPI_STATE_C3) |
898 | ACPI_FLUSH_CPU_CACHE(); | 897 | ACPI_FLUSH_CPU_CACHE(); |
@@ -914,7 +913,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
914 | 913 | ||
915 | cx->usage++; | 914 | cx->usage++; |
916 | 915 | ||
917 | acpi_state_timer_broadcast(pr, cx, 0); | 916 | lapic_timer_state_broadcast(pr, cx, 0); |
918 | cx->time += sleep_ticks; | 917 | cx->time += sleep_ticks; |
919 | return idle_time; | 918 | return idle_time; |
920 | } | 919 | } |
@@ -981,7 +980,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
981 | * Must be done before busmaster disable as we might need to | 980 | * Must be done before busmaster disable as we might need to |
982 | * access HPET ! | 981 | * access HPET ! |
983 | */ | 982 | */ |
984 | acpi_state_timer_broadcast(pr, cx, 1); | 983 | lapic_timer_state_broadcast(pr, cx, 1); |
985 | 984 | ||
986 | kt1 = ktime_get_real(); | 985 | kt1 = ktime_get_real(); |
987 | /* | 986 | /* |
@@ -1026,7 +1025,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
1026 | 1025 | ||
1027 | cx->usage++; | 1026 | cx->usage++; |
1028 | 1027 | ||
1029 | acpi_state_timer_broadcast(pr, cx, 0); | 1028 | lapic_timer_state_broadcast(pr, cx, 0); |
1030 | cx->time += sleep_ticks; | 1029 | cx->time += sleep_ticks; |
1031 | return idle_time; | 1030 | return idle_time; |
1032 | } | 1031 | } |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 8ff510b91d88..781435d7e369 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -95,7 +95,7 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha | |||
95 | } | 95 | } |
96 | static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); | 96 | static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); |
97 | 97 | ||
98 | static int acpi_bus_hot_remove_device(void *context) | 98 | static void acpi_bus_hot_remove_device(void *context) |
99 | { | 99 | { |
100 | struct acpi_device *device; | 100 | struct acpi_device *device; |
101 | acpi_handle handle = context; | 101 | acpi_handle handle = context; |
@@ -104,10 +104,10 @@ static int acpi_bus_hot_remove_device(void *context) | |||
104 | acpi_status status = AE_OK; | 104 | acpi_status status = AE_OK; |
105 | 105 | ||
106 | if (acpi_bus_get_device(handle, &device)) | 106 | if (acpi_bus_get_device(handle, &device)) |
107 | return 0; | 107 | return; |
108 | 108 | ||
109 | if (!device) | 109 | if (!device) |
110 | return 0; | 110 | return; |
111 | 111 | ||
112 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 112 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
113 | "Hot-removing device %s...\n", dev_name(&device->dev))); | 113 | "Hot-removing device %s...\n", dev_name(&device->dev))); |
@@ -115,7 +115,7 @@ static int acpi_bus_hot_remove_device(void *context) | |||
115 | if (acpi_bus_trim(device, 1)) { | 115 | if (acpi_bus_trim(device, 1)) { |
116 | printk(KERN_ERR PREFIX | 116 | printk(KERN_ERR PREFIX |
117 | "Removing device failed\n"); | 117 | "Removing device failed\n"); |
118 | return -1; | 118 | return; |
119 | } | 119 | } |
120 | 120 | ||
121 | /* power off device */ | 121 | /* power off device */ |
@@ -142,9 +142,10 @@ static int acpi_bus_hot_remove_device(void *context) | |||
142 | */ | 142 | */ |
143 | status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); | 143 | status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); |
144 | if (ACPI_FAILURE(status)) | 144 | if (ACPI_FAILURE(status)) |
145 | return -ENODEV; | 145 | printk(KERN_WARNING PREFIX |
146 | "Eject device failed\n"); | ||
146 | 147 | ||
147 | return 0; | 148 | return; |
148 | } | 149 | } |
149 | 150 | ||
150 | static ssize_t | 151 | static ssize_t |
@@ -155,7 +156,6 @@ acpi_eject_store(struct device *d, struct device_attribute *attr, | |||
155 | acpi_status status; | 156 | acpi_status status; |
156 | acpi_object_type type = 0; | 157 | acpi_object_type type = 0; |
157 | struct acpi_device *acpi_device = to_acpi_device(d); | 158 | struct acpi_device *acpi_device = to_acpi_device(d); |
158 | struct task_struct *task; | ||
159 | 159 | ||
160 | if ((!count) || (buf[0] != '1')) { | 160 | if ((!count) || (buf[0] != '1')) { |
161 | return -EINVAL; | 161 | return -EINVAL; |
@@ -172,11 +172,7 @@ acpi_eject_store(struct device *d, struct device_attribute *attr, | |||
172 | goto err; | 172 | goto err; |
173 | } | 173 | } |
174 | 174 | ||
175 | /* remove the device in another thread to fix the deadlock issue */ | 175 | acpi_os_hotplug_execute(acpi_bus_hot_remove_device, acpi_device->handle); |
176 | task = kthread_run(acpi_bus_hot_remove_device, | ||
177 | acpi_device->handle, "acpi_hot_remove_device"); | ||
178 | if (IS_ERR(task)) | ||
179 | ret = PTR_ERR(task); | ||
180 | err: | 176 | err: |
181 | return ret; | 177 | return ret; |
182 | } | 178 | } |
@@ -198,12 +194,12 @@ acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *b | |||
198 | int result; | 194 | int result; |
199 | 195 | ||
200 | result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path); | 196 | result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path); |
201 | if(result) | 197 | if (result) |
202 | goto end; | 198 | goto end; |
203 | 199 | ||
204 | result = sprintf(buf, "%s\n", (char*)path.pointer); | 200 | result = sprintf(buf, "%s\n", (char*)path.pointer); |
205 | kfree(path.pointer); | 201 | kfree(path.pointer); |
206 | end: | 202 | end: |
207 | return result; | 203 | return result; |
208 | } | 204 | } |
209 | static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL); | 205 | static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL); |
@@ -217,21 +213,21 @@ static int acpi_device_setup_files(struct acpi_device *dev) | |||
217 | /* | 213 | /* |
218 | * Devices gotten from FADT don't have a "path" attribute | 214 | * Devices gotten from FADT don't have a "path" attribute |
219 | */ | 215 | */ |
220 | if(dev->handle) { | 216 | if (dev->handle) { |
221 | result = device_create_file(&dev->dev, &dev_attr_path); | 217 | result = device_create_file(&dev->dev, &dev_attr_path); |
222 | if(result) | 218 | if (result) |
223 | goto end; | 219 | goto end; |
224 | } | 220 | } |
225 | 221 | ||
226 | if(dev->flags.hardware_id) { | 222 | if (dev->flags.hardware_id) { |
227 | result = device_create_file(&dev->dev, &dev_attr_hid); | 223 | result = device_create_file(&dev->dev, &dev_attr_hid); |
228 | if(result) | 224 | if (result) |
229 | goto end; | 225 | goto end; |
230 | } | 226 | } |
231 | 227 | ||
232 | if (dev->flags.hardware_id || dev->flags.compatible_ids){ | 228 | if (dev->flags.hardware_id || dev->flags.compatible_ids) { |
233 | result = device_create_file(&dev->dev, &dev_attr_modalias); | 229 | result = device_create_file(&dev->dev, &dev_attr_modalias); |
234 | if(result) | 230 | if (result) |
235 | goto end; | 231 | goto end; |
236 | } | 232 | } |
237 | 233 | ||
@@ -242,7 +238,7 @@ static int acpi_device_setup_files(struct acpi_device *dev) | |||
242 | status = acpi_get_handle(dev->handle, "_EJ0", &temp); | 238 | status = acpi_get_handle(dev->handle, "_EJ0", &temp); |
243 | if (ACPI_SUCCESS(status)) | 239 | if (ACPI_SUCCESS(status)) |
244 | result = device_create_file(&dev->dev, &dev_attr_eject); | 240 | result = device_create_file(&dev->dev, &dev_attr_eject); |
245 | end: | 241 | end: |
246 | return result; | 242 | return result; |
247 | } | 243 | } |
248 | 244 | ||
@@ -262,9 +258,9 @@ static void acpi_device_remove_files(struct acpi_device *dev) | |||
262 | if (dev->flags.hardware_id || dev->flags.compatible_ids) | 258 | if (dev->flags.hardware_id || dev->flags.compatible_ids) |
263 | device_remove_file(&dev->dev, &dev_attr_modalias); | 259 | device_remove_file(&dev->dev, &dev_attr_modalias); |
264 | 260 | ||
265 | if(dev->flags.hardware_id) | 261 | if (dev->flags.hardware_id) |
266 | device_remove_file(&dev->dev, &dev_attr_hid); | 262 | device_remove_file(&dev->dev, &dev_attr_hid); |
267 | if(dev->handle) | 263 | if (dev->handle) |
268 | device_remove_file(&dev->dev, &dev_attr_path); | 264 | device_remove_file(&dev->dev, &dev_attr_path); |
269 | } | 265 | } |
270 | /* -------------------------------------------------------------------------- | 266 | /* -------------------------------------------------------------------------- |
@@ -512,7 +508,7 @@ static int acpi_device_register(struct acpi_device *device, | |||
512 | break; | 508 | break; |
513 | } | 509 | } |
514 | } | 510 | } |
515 | if(!found) { | 511 | if (!found) { |
516 | acpi_device_bus_id = new_bus_id; | 512 | acpi_device_bus_id = new_bus_id; |
517 | strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device"); | 513 | strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device"); |
518 | acpi_device_bus_id->instance_no = 0; | 514 | acpi_device_bus_id->instance_no = 0; |
@@ -530,22 +526,21 @@ static int acpi_device_register(struct acpi_device *device, | |||
530 | if (device->parent) | 526 | if (device->parent) |
531 | device->dev.parent = &parent->dev; | 527 | device->dev.parent = &parent->dev; |
532 | device->dev.bus = &acpi_bus_type; | 528 | device->dev.bus = &acpi_bus_type; |
533 | device_initialize(&device->dev); | ||
534 | device->dev.release = &acpi_device_release; | 529 | device->dev.release = &acpi_device_release; |
535 | result = device_add(&device->dev); | 530 | result = device_register(&device->dev); |
536 | if(result) { | 531 | if (result) { |
537 | dev_err(&device->dev, "Error adding device\n"); | 532 | dev_err(&device->dev, "Error registering device\n"); |
538 | goto end; | 533 | goto end; |
539 | } | 534 | } |
540 | 535 | ||
541 | result = acpi_device_setup_files(device); | 536 | result = acpi_device_setup_files(device); |
542 | if(result) | 537 | if (result) |
543 | printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n", | 538 | printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n", |
544 | dev_name(&device->dev)); | 539 | dev_name(&device->dev)); |
545 | 540 | ||
546 | device->removal_type = ACPI_BUS_REMOVAL_NORMAL; | 541 | device->removal_type = ACPI_BUS_REMOVAL_NORMAL; |
547 | return 0; | 542 | return 0; |
548 | end: | 543 | end: |
549 | mutex_lock(&acpi_device_lock); | 544 | mutex_lock(&acpi_device_lock); |
550 | if (device->parent) | 545 | if (device->parent) |
551 | list_del(&device->node); | 546 | list_del(&device->node); |
@@ -577,7 +572,7 @@ static void acpi_device_unregister(struct acpi_device *device, int type) | |||
577 | * @device: the device to add and initialize | 572 | * @device: the device to add and initialize |
578 | * @driver: driver for the device | 573 | * @driver: driver for the device |
579 | * | 574 | * |
580 | * Used to initialize a device via its device driver. Called whenever a | 575 | * Used to initialize a device via its device driver. Called whenever a |
581 | * driver is bound to a device. Invokes the driver's add() ops. | 576 | * driver is bound to a device. Invokes the driver's add() ops. |
582 | */ | 577 | */ |
583 | static int | 578 | static int |
@@ -585,7 +580,6 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver) | |||
585 | { | 580 | { |
586 | int result = 0; | 581 | int result = 0; |
587 | 582 | ||
588 | |||
589 | if (!device || !driver) | 583 | if (!device || !driver) |
590 | return -EINVAL; | 584 | return -EINVAL; |
591 | 585 | ||
@@ -802,7 +796,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) | |||
802 | if (!acpi_match_device_ids(device, button_device_ids)) | 796 | if (!acpi_match_device_ids(device, button_device_ids)) |
803 | device->wakeup.flags.run_wake = 1; | 797 | device->wakeup.flags.run_wake = 1; |
804 | 798 | ||
805 | end: | 799 | end: |
806 | if (ACPI_FAILURE(status)) | 800 | if (ACPI_FAILURE(status)) |
807 | device->flags.wake_capable = 0; | 801 | device->flags.wake_capable = 0; |
808 | return 0; | 802 | return 0; |
@@ -1070,7 +1064,7 @@ static void acpi_device_set_id(struct acpi_device *device, | |||
1070 | break; | 1064 | break; |
1071 | } | 1065 | } |
1072 | 1066 | ||
1073 | /* | 1067 | /* |
1074 | * \_SB | 1068 | * \_SB |
1075 | * ---- | 1069 | * ---- |
1076 | * Fix for the system root bus device -- the only root-level device. | 1070 | * Fix for the system root bus device -- the only root-level device. |
@@ -1320,7 +1314,7 @@ acpi_add_single_object(struct acpi_device **child, | |||
1320 | device->parent->ops.bind(device); | 1314 | device->parent->ops.bind(device); |
1321 | } | 1315 | } |
1322 | 1316 | ||
1323 | end: | 1317 | end: |
1324 | if (!result) | 1318 | if (!result) |
1325 | *child = device; | 1319 | *child = device; |
1326 | else { | 1320 | else { |
@@ -1464,7 +1458,6 @@ acpi_bus_add(struct acpi_device **child, | |||
1464 | 1458 | ||
1465 | return result; | 1459 | return result; |
1466 | } | 1460 | } |
1467 | |||
1468 | EXPORT_SYMBOL(acpi_bus_add); | 1461 | EXPORT_SYMBOL(acpi_bus_add); |
1469 | 1462 | ||
1470 | int acpi_bus_start(struct acpi_device *device) | 1463 | int acpi_bus_start(struct acpi_device *device) |
@@ -1484,7 +1477,6 @@ int acpi_bus_start(struct acpi_device *device) | |||
1484 | } | 1477 | } |
1485 | return result; | 1478 | return result; |
1486 | } | 1479 | } |
1487 | |||
1488 | EXPORT_SYMBOL(acpi_bus_start); | 1480 | EXPORT_SYMBOL(acpi_bus_start); |
1489 | 1481 | ||
1490 | int acpi_bus_trim(struct acpi_device *start, int rmdevice) | 1482 | int acpi_bus_trim(struct acpi_device *start, int rmdevice) |
@@ -1542,7 +1534,6 @@ int acpi_bus_trim(struct acpi_device *start, int rmdevice) | |||
1542 | } | 1534 | } |
1543 | EXPORT_SYMBOL_GPL(acpi_bus_trim); | 1535 | EXPORT_SYMBOL_GPL(acpi_bus_trim); |
1544 | 1536 | ||
1545 | |||
1546 | static int acpi_bus_scan_fixed(struct acpi_device *root) | 1537 | static int acpi_bus_scan_fixed(struct acpi_device *root) |
1547 | { | 1538 | { |
1548 | int result = 0; | 1539 | int result = 0; |
@@ -1610,6 +1601,6 @@ int __init acpi_scan_init(void) | |||
1610 | if (result) | 1601 | if (result) |
1611 | acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); | 1602 | acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); |
1612 | 1603 | ||
1613 | Done: | 1604 | Done: |
1614 | return result; | 1605 | return result; |
1615 | } | 1606 | } |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 1bdfb37377e3..8851315ce858 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -76,6 +76,7 @@ MODULE_LICENSE("GPL"); | |||
76 | static int brightness_switch_enabled = 1; | 76 | static int brightness_switch_enabled = 1; |
77 | module_param(brightness_switch_enabled, bool, 0644); | 77 | module_param(brightness_switch_enabled, bool, 0644); |
78 | 78 | ||
79 | static int register_count = 0; | ||
79 | static int acpi_video_bus_add(struct acpi_device *device); | 80 | static int acpi_video_bus_add(struct acpi_device *device); |
80 | static int acpi_video_bus_remove(struct acpi_device *device, int type); | 81 | static int acpi_video_bus_remove(struct acpi_device *device, int type); |
81 | static int acpi_video_resume(struct acpi_device *device); | 82 | static int acpi_video_resume(struct acpi_device *device); |
@@ -586,6 +587,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
586 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"), | 587 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"), |
587 | }, | 588 | }, |
588 | }, | 589 | }, |
590 | { | ||
591 | .callback = video_set_bqc_offset, | ||
592 | .ident = "Acer Aspire 7720", | ||
593 | .matches = { | ||
594 | DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), | ||
595 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), | ||
596 | }, | ||
597 | }, | ||
589 | {} | 598 | {} |
590 | }; | 599 | }; |
591 | 600 | ||
@@ -976,6 +985,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
976 | device->backlight->props.max_brightness = device->brightness->count-3; | 985 | device->backlight->props.max_brightness = device->brightness->count-3; |
977 | kfree(name); | 986 | kfree(name); |
978 | 987 | ||
988 | result = sysfs_create_link(&device->backlight->dev.kobj, | ||
989 | &device->dev->dev.kobj, "device"); | ||
990 | if (result) | ||
991 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | ||
992 | |||
979 | device->cdev = thermal_cooling_device_register("LCD", | 993 | device->cdev = thermal_cooling_device_register("LCD", |
980 | device->dev, &video_cooling_ops); | 994 | device->dev, &video_cooling_ops); |
981 | if (IS_ERR(device->cdev)) | 995 | if (IS_ERR(device->cdev)) |
@@ -1054,15 +1068,15 @@ static void acpi_video_bus_find_cap(struct acpi_video_bus *video) | |||
1054 | static int acpi_video_bus_check(struct acpi_video_bus *video) | 1068 | static int acpi_video_bus_check(struct acpi_video_bus *video) |
1055 | { | 1069 | { |
1056 | acpi_status status = -ENOENT; | 1070 | acpi_status status = -ENOENT; |
1057 | struct device *dev; | 1071 | struct pci_dev *dev; |
1058 | 1072 | ||
1059 | if (!video) | 1073 | if (!video) |
1060 | return -EINVAL; | 1074 | return -EINVAL; |
1061 | 1075 | ||
1062 | dev = acpi_get_physical_pci_device(video->device->handle); | 1076 | dev = acpi_get_pci_dev(video->device->handle); |
1063 | if (!dev) | 1077 | if (!dev) |
1064 | return -ENODEV; | 1078 | return -ENODEV; |
1065 | put_device(dev); | 1079 | pci_dev_put(dev); |
1066 | 1080 | ||
1067 | /* Since there is no HID, CID and so on for VGA driver, we have | 1081 | /* Since there is no HID, CID and so on for VGA driver, we have |
1068 | * to check well known required nodes. | 1082 | * to check well known required nodes. |
@@ -1990,6 +2004,7 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device) | |||
1990 | status = acpi_remove_notify_handler(device->dev->handle, | 2004 | status = acpi_remove_notify_handler(device->dev->handle, |
1991 | ACPI_DEVICE_NOTIFY, | 2005 | ACPI_DEVICE_NOTIFY, |
1992 | acpi_video_device_notify); | 2006 | acpi_video_device_notify); |
2007 | sysfs_remove_link(&device->backlight->dev.kobj, "device"); | ||
1993 | backlight_device_unregister(device->backlight); | 2008 | backlight_device_unregister(device->backlight); |
1994 | if (device->cdev) { | 2009 | if (device->cdev) { |
1995 | sysfs_remove_link(&device->dev->dev.kobj, | 2010 | sysfs_remove_link(&device->dev->dev.kobj, |
@@ -2318,6 +2333,13 @@ static int __init intel_opregion_present(void) | |||
2318 | int acpi_video_register(void) | 2333 | int acpi_video_register(void) |
2319 | { | 2334 | { |
2320 | int result = 0; | 2335 | int result = 0; |
2336 | if (register_count) { | ||
2337 | /* | ||
2338 | * if the function of acpi_video_register is already called, | ||
2339 | * don't register the acpi_vide_bus again and return no error. | ||
2340 | */ | ||
2341 | return 0; | ||
2342 | } | ||
2321 | 2343 | ||
2322 | acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir); | 2344 | acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir); |
2323 | if (!acpi_video_dir) | 2345 | if (!acpi_video_dir) |
@@ -2329,10 +2351,35 @@ int acpi_video_register(void) | |||
2329 | return -ENODEV; | 2351 | return -ENODEV; |
2330 | } | 2352 | } |
2331 | 2353 | ||
2354 | /* | ||
2355 | * When the acpi_video_bus is loaded successfully, increase | ||
2356 | * the counter reference. | ||
2357 | */ | ||
2358 | register_count = 1; | ||
2359 | |||
2332 | return 0; | 2360 | return 0; |
2333 | } | 2361 | } |
2334 | EXPORT_SYMBOL(acpi_video_register); | 2362 | EXPORT_SYMBOL(acpi_video_register); |
2335 | 2363 | ||
2364 | void acpi_video_unregister(void) | ||
2365 | { | ||
2366 | if (!register_count) { | ||
2367 | /* | ||
2368 | * If the acpi video bus is already unloaded, don't | ||
2369 | * unload it again and return directly. | ||
2370 | */ | ||
2371 | return; | ||
2372 | } | ||
2373 | acpi_bus_unregister_driver(&acpi_video_bus); | ||
2374 | |||
2375 | remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir); | ||
2376 | |||
2377 | register_count = 0; | ||
2378 | |||
2379 | return; | ||
2380 | } | ||
2381 | EXPORT_SYMBOL(acpi_video_unregister); | ||
2382 | |||
2336 | /* | 2383 | /* |
2337 | * This is kind of nasty. Hardware using Intel chipsets may require | 2384 | * This is kind of nasty. Hardware using Intel chipsets may require |
2338 | * the video opregion code to be run first in order to initialise | 2385 | * the video opregion code to be run first in order to initialise |
@@ -2350,16 +2397,12 @@ static int __init acpi_video_init(void) | |||
2350 | return acpi_video_register(); | 2397 | return acpi_video_register(); |
2351 | } | 2398 | } |
2352 | 2399 | ||
2353 | void acpi_video_exit(void) | 2400 | static void __exit acpi_video_exit(void) |
2354 | { | 2401 | { |
2355 | 2402 | acpi_video_unregister(); | |
2356 | acpi_bus_unregister_driver(&acpi_video_bus); | ||
2357 | |||
2358 | remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir); | ||
2359 | 2403 | ||
2360 | return; | 2404 | return; |
2361 | } | 2405 | } |
2362 | EXPORT_SYMBOL(acpi_video_exit); | ||
2363 | 2406 | ||
2364 | module_init(acpi_video_init); | 2407 | module_init(acpi_video_init); |
2365 | module_exit(acpi_video_exit); | 2408 | module_exit(acpi_video_exit); |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 09737275e25f..7cd2b63435ea 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * assinged | 10 | * assinged |
11 | * | 11 | * |
12 | * After PCI devices are glued with ACPI devices | 12 | * After PCI devices are glued with ACPI devices |
13 | * acpi_get_physical_pci_device() can be called to identify ACPI graphics | 13 | * acpi_get_pci_dev() can be called to identify ACPI graphics |
14 | * devices for which a real graphics card is plugged in | 14 | * devices for which a real graphics card is plugged in |
15 | * | 15 | * |
16 | * Now acpi_video_get_capabilities() can be called to check which | 16 | * Now acpi_video_get_capabilities() can be called to check which |
@@ -36,6 +36,7 @@ | |||
36 | 36 | ||
37 | #include <linux/acpi.h> | 37 | #include <linux/acpi.h> |
38 | #include <linux/dmi.h> | 38 | #include <linux/dmi.h> |
39 | #include <linux/pci.h> | ||
39 | 40 | ||
40 | ACPI_MODULE_NAME("video"); | 41 | ACPI_MODULE_NAME("video"); |
41 | #define _COMPONENT ACPI_VIDEO_COMPONENT | 42 | #define _COMPONENT ACPI_VIDEO_COMPONENT |
@@ -109,7 +110,7 @@ static acpi_status | |||
109 | find_video(acpi_handle handle, u32 lvl, void *context, void **rv) | 110 | find_video(acpi_handle handle, u32 lvl, void *context, void **rv) |
110 | { | 111 | { |
111 | long *cap = context; | 112 | long *cap = context; |
112 | struct device *dev; | 113 | struct pci_dev *dev; |
113 | struct acpi_device *acpi_dev; | 114 | struct acpi_device *acpi_dev; |
114 | 115 | ||
115 | const struct acpi_device_id video_ids[] = { | 116 | const struct acpi_device_id video_ids[] = { |
@@ -120,10 +121,10 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
120 | return AE_OK; | 121 | return AE_OK; |
121 | 122 | ||
122 | if (!acpi_match_device_ids(acpi_dev, video_ids)) { | 123 | if (!acpi_match_device_ids(acpi_dev, video_ids)) { |
123 | dev = acpi_get_physical_pci_device(handle); | 124 | dev = acpi_get_pci_dev(handle); |
124 | if (!dev) | 125 | if (!dev) |
125 | return AE_OK; | 126 | return AE_OK; |
126 | put_device(dev); | 127 | pci_dev_put(dev); |
127 | *cap |= acpi_is_video_device(acpi_dev); | 128 | *cap |= acpi_is_video_device(acpi_dev); |
128 | } | 129 | } |
129 | return AE_OK; | 130 | return AE_OK; |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 2aa1908e5ce0..b17c57f85032 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -679,6 +679,14 @@ config PATA_PLATFORM | |||
679 | 679 | ||
680 | If unsure, say N. | 680 | If unsure, say N. |
681 | 681 | ||
682 | config PATA_AT91 | ||
683 | tristate "PATA support for AT91SAM9260" | ||
684 | depends on ARM && ARCH_AT91 | ||
685 | help | ||
686 | This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. | ||
687 | |||
688 | If unsure, say N. | ||
689 | |||
682 | config PATA_OF_PLATFORM | 690 | config PATA_OF_PLATFORM |
683 | tristate "OpenFirmware platform device PATA support" | 691 | tristate "OpenFirmware platform device PATA support" |
684 | depends on PATA_PLATFORM && PPC_OF | 692 | depends on PATA_PLATFORM && PPC_OF |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 1558059874f0..38906f9bbb4e 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
@@ -72,6 +72,7 @@ obj-$(CONFIG_PATA_SCH) += pata_sch.o | |||
72 | obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o | 72 | obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o |
73 | obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o | 73 | obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o |
74 | obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o | 74 | obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o |
75 | obj-$(CONFIG_PATA_AT91) += pata_at91.o | ||
75 | obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o | 76 | obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o |
76 | obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o | 77 | obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o |
77 | # Should be last but two libata driver | 78 | # Should be last but two libata driver |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ca4d208ddf3b..045a486a09ea 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -125,19 +125,19 @@ MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link spe | |||
125 | 125 | ||
126 | static int atapi_enabled = 1; | 126 | static int atapi_enabled = 1; |
127 | module_param(atapi_enabled, int, 0444); | 127 | module_param(atapi_enabled, int, 0444); |
128 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); | 128 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); |
129 | 129 | ||
130 | static int atapi_dmadir = 0; | 130 | static int atapi_dmadir = 0; |
131 | module_param(atapi_dmadir, int, 0444); | 131 | module_param(atapi_dmadir, int, 0444); |
132 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); | 132 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); |
133 | 133 | ||
134 | int atapi_passthru16 = 1; | 134 | int atapi_passthru16 = 1; |
135 | module_param(atapi_passthru16, int, 0444); | 135 | module_param(atapi_passthru16, int, 0444); |
136 | MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)"); | 136 | MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); |
137 | 137 | ||
138 | int libata_fua = 0; | 138 | int libata_fua = 0; |
139 | module_param_named(fua, libata_fua, int, 0444); | 139 | module_param_named(fua, libata_fua, int, 0444); |
140 | MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); | 140 | MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); |
141 | 141 | ||
142 | static int ata_ignore_hpa; | 142 | static int ata_ignore_hpa; |
143 | module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); | 143 | module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); |
@@ -153,11 +153,11 @@ MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); | |||
153 | 153 | ||
154 | int libata_noacpi = 0; | 154 | int libata_noacpi = 0; |
155 | module_param_named(noacpi, libata_noacpi, int, 0444); | 155 | module_param_named(noacpi, libata_noacpi, int, 0444); |
156 | MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set"); | 156 | MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); |
157 | 157 | ||
158 | int libata_allow_tpm = 0; | 158 | int libata_allow_tpm = 0; |
159 | module_param_named(allow_tpm, libata_allow_tpm, int, 0444); | 159 | module_param_named(allow_tpm, libata_allow_tpm, int, 0444); |
160 | MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands"); | 160 | MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); |
161 | 161 | ||
162 | MODULE_AUTHOR("Jeff Garzik"); | 162 | MODULE_AUTHOR("Jeff Garzik"); |
163 | MODULE_DESCRIPTION("Library module for ATA devices"); | 163 | MODULE_DESCRIPTION("Library module for ATA devices"); |
@@ -1993,11 +1993,17 @@ unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) | |||
1993 | * Check if the current speed of the device requires IORDY. Used | 1993 | * Check if the current speed of the device requires IORDY. Used |
1994 | * by various controllers for chip configuration. | 1994 | * by various controllers for chip configuration. |
1995 | */ | 1995 | */ |
1996 | |||
1997 | unsigned int ata_pio_need_iordy(const struct ata_device *adev) | 1996 | unsigned int ata_pio_need_iordy(const struct ata_device *adev) |
1998 | { | 1997 | { |
1999 | /* Controller doesn't support IORDY. Probably a pointless check | 1998 | /* Don't set IORDY if we're preparing for reset. IORDY may |
2000 | as the caller should know this */ | 1999 | * lead to controller lock up on certain controllers if the |
2000 | * port is not occupied. See bko#11703 for details. | ||
2001 | */ | ||
2002 | if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) | ||
2003 | return 0; | ||
2004 | /* Controller doesn't support IORDY. Probably a pointless | ||
2005 | * check as the caller should know this. | ||
2006 | */ | ||
2001 | if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) | 2007 | if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) |
2002 | return 0; | 2008 | return 0; |
2003 | /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ | 2009 | /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ |
@@ -2020,7 +2026,6 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) | |||
2020 | * Compute the highest mode possible if we are not using iordy. Return | 2026 | * Compute the highest mode possible if we are not using iordy. Return |
2021 | * -1 if no iordy mode is available. | 2027 | * -1 if no iordy mode is available. |
2022 | */ | 2028 | */ |
2023 | |||
2024 | static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) | 2029 | static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) |
2025 | { | 2030 | { |
2026 | /* If we have no drive specific rule, then PIO 2 is non IORDY */ | 2031 | /* If we have no drive specific rule, then PIO 2 is non IORDY */ |
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c new file mode 100644 index 000000000000..4b27617be26d --- /dev/null +++ b/drivers/ata/pata_at91.c | |||
@@ -0,0 +1,361 @@ | |||
1 | /* | ||
2 | * PATA driver for AT91SAM9260 Static Memory Controller | ||
3 | * with CompactFlash interface in True IDE mode | ||
4 | * | ||
5 | * Copyright (C) 2009 Matyukevich Sergey | ||
6 | * | ||
7 | * Based on: | ||
8 | * * generic platform driver by Paul Mundt: drivers/ata/pata_platform.c | ||
9 | * * pata_at32 driver by Kristoffer Nyborg Gregertsen | ||
10 | * * at91_ide driver by Stanislaw Gruszka | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms of the GNU General Public License version 2 | ||
14 | * as published by the Free Software Foundation. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/blkdev.h> | ||
22 | #include <scsi/scsi_host.h> | ||
23 | #include <linux/ata.h> | ||
24 | #include <linux/clk.h> | ||
25 | #include <linux/libata.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/ata_platform.h> | ||
28 | |||
29 | #include <mach/at91sam9260_matrix.h> | ||
30 | #include <mach/at91sam9_smc.h> | ||
31 | #include <mach/at91sam9260.h> | ||
32 | #include <mach/board.h> | ||
33 | #include <mach/gpio.h> | ||
34 | |||
35 | |||
36 | #define DRV_NAME "pata_at91" | ||
37 | #define DRV_VERSION "0.1" | ||
38 | |||
39 | #define CF_IDE_OFFSET 0x00c00000 | ||
40 | #define CF_ALT_IDE_OFFSET 0x00e00000 | ||
41 | #define CF_IDE_RES_SIZE 0x08 | ||
42 | |||
43 | struct at91_ide_info { | ||
44 | unsigned long mode; | ||
45 | unsigned int cs; | ||
46 | |||
47 | void __iomem *ide_addr; | ||
48 | void __iomem *alt_addr; | ||
49 | }; | ||
50 | |||
51 | const struct ata_timing initial_timing = | ||
52 | {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; | ||
53 | |||
54 | static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz) | ||
55 | { | ||
56 | unsigned long mul; | ||
57 | |||
58 | /* | ||
59 | * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] = | ||
60 | * x * (f / 1_000_000_000) = | ||
61 | * x * ((f * 65536) / 1_000_000_000) / 65536 = | ||
62 | * x * (((f / 10_000) * 65536) / 100_000) / 65536 = | ||
63 | */ | ||
64 | |||
65 | mul = (mck_hz / 10000) << 16; | ||
66 | mul /= 100000; | ||
67 | |||
68 | return (ns * mul + 65536) >> 16; /* rounding */ | ||
69 | } | ||
70 | |||
71 | static void set_smc_mode(struct at91_ide_info *info) | ||
72 | { | ||
73 | at91_sys_write(AT91_SMC_MODE(info->cs), info->mode); | ||
74 | return; | ||
75 | } | ||
76 | |||
77 | static void set_smc_timing(struct device *dev, | ||
78 | struct at91_ide_info *info, const struct ata_timing *ata) | ||
79 | { | ||
80 | int read_cycle, write_cycle, active, recover; | ||
81 | int nrd_setup, nrd_pulse, nrd_recover; | ||
82 | int nwe_setup, nwe_pulse; | ||
83 | |||
84 | int ncs_write_setup, ncs_write_pulse; | ||
85 | int ncs_read_setup, ncs_read_pulse; | ||
86 | |||
87 | unsigned int mck_hz; | ||
88 | struct clk *mck; | ||
89 | |||
90 | read_cycle = ata->cyc8b; | ||
91 | nrd_setup = ata->setup; | ||
92 | nrd_pulse = ata->act8b; | ||
93 | nrd_recover = ata->rec8b; | ||
94 | |||
95 | mck = clk_get(NULL, "mck"); | ||
96 | BUG_ON(IS_ERR(mck)); | ||
97 | mck_hz = clk_get_rate(mck); | ||
98 | |||
99 | read_cycle = calc_mck_cycles(read_cycle, mck_hz); | ||
100 | nrd_setup = calc_mck_cycles(nrd_setup, mck_hz); | ||
101 | nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz); | ||
102 | nrd_recover = calc_mck_cycles(nrd_recover, mck_hz); | ||
103 | |||
104 | clk_put(mck); | ||
105 | |||
106 | active = nrd_setup + nrd_pulse; | ||
107 | recover = read_cycle - active; | ||
108 | |||
109 | /* Need at least two cycles recovery */ | ||
110 | if (recover < 2) | ||
111 | read_cycle = active + 2; | ||
112 | |||
113 | /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */ | ||
114 | ncs_read_setup = 1; | ||
115 | ncs_read_pulse = read_cycle - 2; | ||
116 | |||
117 | /* Write timings same as read timings */ | ||
118 | write_cycle = read_cycle; | ||
119 | nwe_setup = nrd_setup; | ||
120 | nwe_pulse = nrd_pulse; | ||
121 | ncs_write_setup = ncs_read_setup; | ||
122 | ncs_write_pulse = ncs_read_pulse; | ||
123 | |||
124 | dev_dbg(dev, "ATA timings: nrd_setup = %d nrd_pulse = %d nrd_cycle = %d\n", | ||
125 | nrd_setup, nrd_pulse, read_cycle); | ||
126 | dev_dbg(dev, "ATA timings: nwe_setup = %d nwe_pulse = %d nwe_cycle = %d\n", | ||
127 | nwe_setup, nwe_pulse, write_cycle); | ||
128 | dev_dbg(dev, "ATA timings: ncs_read_setup = %d ncs_read_pulse = %d\n", | ||
129 | ncs_read_setup, ncs_read_pulse); | ||
130 | dev_dbg(dev, "ATA timings: ncs_write_setup = %d ncs_write_pulse = %d\n", | ||
131 | ncs_write_setup, ncs_write_pulse); | ||
132 | |||
133 | at91_sys_write(AT91_SMC_SETUP(info->cs), | ||
134 | AT91_SMC_NWESETUP_(nwe_setup) | | ||
135 | AT91_SMC_NRDSETUP_(nrd_setup) | | ||
136 | AT91_SMC_NCS_WRSETUP_(ncs_write_setup) | | ||
137 | AT91_SMC_NCS_RDSETUP_(ncs_read_setup)); | ||
138 | |||
139 | at91_sys_write(AT91_SMC_PULSE(info->cs), | ||
140 | AT91_SMC_NWEPULSE_(nwe_pulse) | | ||
141 | AT91_SMC_NRDPULSE_(nrd_pulse) | | ||
142 | AT91_SMC_NCS_WRPULSE_(ncs_write_pulse) | | ||
143 | AT91_SMC_NCS_RDPULSE_(ncs_read_pulse)); | ||
144 | |||
145 | at91_sys_write(AT91_SMC_CYCLE(info->cs), | ||
146 | AT91_SMC_NWECYCLE_(write_cycle) | | ||
147 | AT91_SMC_NRDCYCLE_(read_cycle)); | ||
148 | |||
149 | return; | ||
150 | } | ||
151 | |||
152 | static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
153 | { | ||
154 | struct at91_ide_info *info = ap->host->private_data; | ||
155 | struct ata_timing timing; | ||
156 | int ret; | ||
157 | |||
158 | /* Compute ATA timing and set it to SMC */ | ||
159 | ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0); | ||
160 | if (ret) { | ||
161 | dev_warn(ap->dev, "Failed to compute ATA timing %d, \ | ||
162 | set PIO_0 timing\n", ret); | ||
163 | set_smc_timing(ap->dev, info, &initial_timing); | ||
164 | } else { | ||
165 | set_smc_timing(ap->dev, info, &timing); | ||
166 | } | ||
167 | |||
168 | /* Setup SMC mode */ | ||
169 | set_smc_mode(info); | ||
170 | |||
171 | return; | ||
172 | } | ||
173 | |||
174 | static unsigned int pata_at91_data_xfer_noirq(struct ata_device *dev, | ||
175 | unsigned char *buf, unsigned int buflen, int rw) | ||
176 | { | ||
177 | struct at91_ide_info *info = dev->link->ap->host->private_data; | ||
178 | unsigned int consumed; | ||
179 | unsigned long flags; | ||
180 | unsigned int mode; | ||
181 | |||
182 | local_irq_save(flags); | ||
183 | mode = at91_sys_read(AT91_SMC_MODE(info->cs)); | ||
184 | |||
185 | /* set 16bit mode before writing data */ | ||
186 | at91_sys_write(AT91_SMC_MODE(info->cs), | ||
187 | (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_16); | ||
188 | |||
189 | consumed = ata_sff_data_xfer(dev, buf, buflen, rw); | ||
190 | |||
191 | /* restore 8bit mode after data is written */ | ||
192 | at91_sys_write(AT91_SMC_MODE(info->cs), | ||
193 | (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_8); | ||
194 | |||
195 | local_irq_restore(flags); | ||
196 | return consumed; | ||
197 | } | ||
198 | |||
199 | static struct scsi_host_template pata_at91_sht = { | ||
200 | ATA_PIO_SHT(DRV_NAME), | ||
201 | }; | ||
202 | |||
203 | static struct ata_port_operations pata_at91_port_ops = { | ||
204 | .inherits = &ata_sff_port_ops, | ||
205 | |||
206 | .sff_data_xfer = pata_at91_data_xfer_noirq, | ||
207 | .set_piomode = pata_at91_set_piomode, | ||
208 | .cable_detect = ata_cable_40wire, | ||
209 | .port_start = ATA_OP_NULL, | ||
210 | }; | ||
211 | |||
212 | static int __devinit pata_at91_probe(struct platform_device *pdev) | ||
213 | { | ||
214 | struct at91_cf_data *board = pdev->dev.platform_data; | ||
215 | struct device *dev = &pdev->dev; | ||
216 | struct at91_ide_info *info; | ||
217 | struct resource *mem_res; | ||
218 | struct ata_host *host; | ||
219 | struct ata_port *ap; | ||
220 | int irq_flags = 0; | ||
221 | int irq = 0; | ||
222 | int ret; | ||
223 | |||
224 | /* get platform resources: IO/CTL memories and irq/rst pins */ | ||
225 | |||
226 | if (pdev->num_resources != 1) { | ||
227 | dev_err(&pdev->dev, "invalid number of resources\n"); | ||
228 | return -EINVAL; | ||
229 | } | ||
230 | |||
231 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
232 | |||
233 | if (!mem_res) { | ||
234 | dev_err(dev, "failed to get mem resource\n"); | ||
235 | return -EINVAL; | ||
236 | } | ||
237 | |||
238 | irq = board->irq_pin; | ||
239 | |||
240 | /* init ata host */ | ||
241 | |||
242 | host = ata_host_alloc(dev, 1); | ||
243 | |||
244 | if (!host) | ||
245 | return -ENOMEM; | ||
246 | |||
247 | ap = host->ports[0]; | ||
248 | ap->ops = &pata_at91_port_ops; | ||
249 | ap->flags |= ATA_FLAG_SLAVE_POSS; | ||
250 | ap->pio_mask = ATA_PIO4; | ||
251 | |||
252 | if (!irq) { | ||
253 | ap->flags |= ATA_FLAG_PIO_POLLING; | ||
254 | ata_port_desc(ap, "no IRQ, using PIO polling"); | ||
255 | } | ||
256 | |||
257 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
258 | |||
259 | if (!info) { | ||
260 | dev_err(dev, "failed to allocate memory for private data\n"); | ||
261 | return -ENOMEM; | ||
262 | } | ||
263 | |||
264 | info->cs = board->chipselect; | ||
265 | info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | | ||
266 | AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT | | ||
267 | AT91_SMC_DBW_8 | AT91_SMC_TDF_(0); | ||
268 | |||
269 | info->ide_addr = devm_ioremap(dev, | ||
270 | mem_res->start + CF_IDE_OFFSET, CF_IDE_RES_SIZE); | ||
271 | |||
272 | if (!info->ide_addr) { | ||
273 | dev_err(dev, "failed to map IO base\n"); | ||
274 | ret = -ENOMEM; | ||
275 | goto err_ide_ioremap; | ||
276 | } | ||
277 | |||
278 | info->alt_addr = devm_ioremap(dev, | ||
279 | mem_res->start + CF_ALT_IDE_OFFSET, CF_IDE_RES_SIZE); | ||
280 | |||
281 | if (!info->alt_addr) { | ||
282 | dev_err(dev, "failed to map CTL base\n"); | ||
283 | ret = -ENOMEM; | ||
284 | goto err_alt_ioremap; | ||
285 | } | ||
286 | |||
287 | ap->ioaddr.cmd_addr = info->ide_addr; | ||
288 | ap->ioaddr.ctl_addr = info->alt_addr + 0x06; | ||
289 | ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; | ||
290 | |||
291 | ata_sff_std_ports(&ap->ioaddr); | ||
292 | |||
293 | ata_port_desc(ap, "mmio cmd 0x%llx ctl 0x%llx", | ||
294 | (unsigned long long)mem_res->start + CF_IDE_OFFSET, | ||
295 | (unsigned long long)mem_res->start + CF_ALT_IDE_OFFSET); | ||
296 | |||
297 | host->private_data = info; | ||
298 | |||
299 | return ata_host_activate(host, irq ? gpio_to_irq(irq) : 0, | ||
300 | irq ? ata_sff_interrupt : NULL, | ||
301 | irq_flags, &pata_at91_sht); | ||
302 | |||
303 | err_alt_ioremap: | ||
304 | devm_iounmap(dev, info->ide_addr); | ||
305 | |||
306 | err_ide_ioremap: | ||
307 | kfree(info); | ||
308 | |||
309 | return ret; | ||
310 | } | ||
311 | |||
312 | static int __devexit pata_at91_remove(struct platform_device *pdev) | ||
313 | { | ||
314 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
315 | struct at91_ide_info *info = host->private_data; | ||
316 | struct device *dev = &pdev->dev; | ||
317 | |||
318 | if (!host) | ||
319 | return 0; | ||
320 | |||
321 | ata_host_detach(host); | ||
322 | |||
323 | if (!info) | ||
324 | return 0; | ||
325 | |||
326 | devm_iounmap(dev, info->ide_addr); | ||
327 | devm_iounmap(dev, info->alt_addr); | ||
328 | |||
329 | kfree(info); | ||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | static struct platform_driver pata_at91_driver = { | ||
334 | .probe = pata_at91_probe, | ||
335 | .remove = __devexit_p(pata_at91_remove), | ||
336 | .driver = { | ||
337 | .name = DRV_NAME, | ||
338 | .owner = THIS_MODULE, | ||
339 | }, | ||
340 | }; | ||
341 | |||
342 | static int __init pata_at91_init(void) | ||
343 | { | ||
344 | return platform_driver_register(&pata_at91_driver); | ||
345 | } | ||
346 | |||
347 | static void __exit pata_at91_exit(void) | ||
348 | { | ||
349 | platform_driver_unregister(&pata_at91_driver); | ||
350 | } | ||
351 | |||
352 | |||
353 | module_init(pata_at91_init); | ||
354 | module_exit(pata_at91_exit); | ||
355 | |||
356 | |||
357 | MODULE_LICENSE("GPL"); | ||
358 | MODULE_DESCRIPTION("Driver for CF in True IDE mode on AT91SAM9260 SoC"); | ||
359 | MODULE_AUTHOR("Matyukevich Sergey"); | ||
360 | MODULE_VERSION(DRV_VERSION); | ||
361 | |||
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 36b8629203be..94eaa432c40a 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -1378,6 +1378,37 @@ static int sata_fsl_remove(struct of_device *ofdev) | |||
1378 | return 0; | 1378 | return 0; |
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | #ifdef CONFIG_PM | ||
1382 | static int sata_fsl_suspend(struct of_device *op, pm_message_t state) | ||
1383 | { | ||
1384 | struct ata_host *host = dev_get_drvdata(&op->dev); | ||
1385 | return ata_host_suspend(host, state); | ||
1386 | } | ||
1387 | |||
1388 | static int sata_fsl_resume(struct of_device *op) | ||
1389 | { | ||
1390 | struct ata_host *host = dev_get_drvdata(&op->dev); | ||
1391 | struct sata_fsl_host_priv *host_priv = host->private_data; | ||
1392 | int ret; | ||
1393 | void __iomem *hcr_base = host_priv->hcr_base; | ||
1394 | struct ata_port *ap = host->ports[0]; | ||
1395 | struct sata_fsl_port_priv *pp = ap->private_data; | ||
1396 | |||
1397 | ret = sata_fsl_init_controller(host); | ||
1398 | if (ret) { | ||
1399 | dev_printk(KERN_ERR, &op->dev, | ||
1400 | "Error initialize hardware\n"); | ||
1401 | return ret; | ||
1402 | } | ||
1403 | |||
1404 | /* Recovery the CHBA register in host controller cmd register set */ | ||
1405 | iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA); | ||
1406 | |||
1407 | ata_host_resume(host); | ||
1408 | return 0; | ||
1409 | } | ||
1410 | #endif | ||
1411 | |||
1381 | static struct of_device_id fsl_sata_match[] = { | 1412 | static struct of_device_id fsl_sata_match[] = { |
1382 | { | 1413 | { |
1383 | .compatible = "fsl,pq-sata", | 1414 | .compatible = "fsl,pq-sata", |
@@ -1392,6 +1423,10 @@ static struct of_platform_driver fsl_sata_driver = { | |||
1392 | .match_table = fsl_sata_match, | 1423 | .match_table = fsl_sata_match, |
1393 | .probe = sata_fsl_probe, | 1424 | .probe = sata_fsl_probe, |
1394 | .remove = sata_fsl_remove, | 1425 | .remove = sata_fsl_remove, |
1426 | #ifdef CONFIG_PM | ||
1427 | .suspend = sata_fsl_suspend, | ||
1428 | .resume = sata_fsl_resume, | ||
1429 | #endif | ||
1395 | }; | 1430 | }; |
1396 | 1431 | ||
1397 | static int __init sata_fsl_init(void) | 1432 | static int __init sata_fsl_init(void) |
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c index 140ea10ecb88..c02db01f736e 100644 --- a/drivers/char/bsr.c +++ b/drivers/char/bsr.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/cdev.h> | 27 | #include <linux/cdev.h> |
28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include <asm/pgtable.h> | ||
30 | #include <asm/io.h> | 31 | #include <asm/io.h> |
31 | 32 | ||
32 | /* | 33 | /* |
@@ -75,12 +76,13 @@ static struct class *bsr_class; | |||
75 | static int bsr_major; | 76 | static int bsr_major; |
76 | 77 | ||
77 | enum { | 78 | enum { |
78 | BSR_8 = 0, | 79 | BSR_8 = 0, |
79 | BSR_16 = 1, | 80 | BSR_16 = 1, |
80 | BSR_64 = 2, | 81 | BSR_64 = 2, |
81 | BSR_128 = 3, | 82 | BSR_128 = 3, |
82 | BSR_UNKNOWN = 4, | 83 | BSR_4096 = 4, |
83 | BSR_MAX = 5, | 84 | BSR_UNKNOWN = 5, |
85 | BSR_MAX = 6, | ||
84 | }; | 86 | }; |
85 | 87 | ||
86 | static unsigned bsr_types[BSR_MAX]; | 88 | static unsigned bsr_types[BSR_MAX]; |
@@ -117,15 +119,22 @@ static int bsr_mmap(struct file *filp, struct vm_area_struct *vma) | |||
117 | { | 119 | { |
118 | unsigned long size = vma->vm_end - vma->vm_start; | 120 | unsigned long size = vma->vm_end - vma->vm_start; |
119 | struct bsr_dev *dev = filp->private_data; | 121 | struct bsr_dev *dev = filp->private_data; |
122 | int ret; | ||
120 | 123 | ||
121 | if (size > dev->bsr_len || (size & (PAGE_SIZE-1))) | ||
122 | return -EINVAL; | ||
123 | |||
124 | vma->vm_flags |= (VM_IO | VM_DONTEXPAND); | ||
125 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 124 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
126 | 125 | ||
127 | if (io_remap_pfn_range(vma, vma->vm_start, dev->bsr_addr >> PAGE_SHIFT, | 126 | /* check for the case of a small BSR device and map one 4k page for it*/ |
128 | size, vma->vm_page_prot)) | 127 | if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE) |
128 | ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12, | ||
129 | vma->vm_page_prot); | ||
130 | else if (size <= dev->bsr_len) | ||
131 | ret = io_remap_pfn_range(vma, vma->vm_start, | ||
132 | dev->bsr_addr >> PAGE_SHIFT, | ||
133 | size, vma->vm_page_prot); | ||
134 | else | ||
135 | return -EINVAL; | ||
136 | |||
137 | if (ret) | ||
129 | return -EAGAIN; | 138 | return -EAGAIN; |
130 | 139 | ||
131 | return 0; | 140 | return 0; |
@@ -205,6 +214,11 @@ static int bsr_add_node(struct device_node *bn) | |||
205 | cur->bsr_stride = bsr_stride[i]; | 214 | cur->bsr_stride = bsr_stride[i]; |
206 | cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs); | 215 | cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs); |
207 | 216 | ||
217 | /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */ | ||
218 | /* we can only map 4k of it, so only advertise the 4k in sysfs */ | ||
219 | if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE) | ||
220 | cur->bsr_len = 4096; | ||
221 | |||
208 | switch(cur->bsr_bytes) { | 222 | switch(cur->bsr_bytes) { |
209 | case 8: | 223 | case 8: |
210 | cur->bsr_type = BSR_8; | 224 | cur->bsr_type = BSR_8; |
@@ -218,9 +232,11 @@ static int bsr_add_node(struct device_node *bn) | |||
218 | case 128: | 232 | case 128: |
219 | cur->bsr_type = BSR_128; | 233 | cur->bsr_type = BSR_128; |
220 | break; | 234 | break; |
235 | case 4096: | ||
236 | cur->bsr_type = BSR_4096; | ||
237 | break; | ||
221 | default: | 238 | default: |
222 | cur->bsr_type = BSR_UNKNOWN; | 239 | cur->bsr_type = BSR_UNKNOWN; |
223 | printk(KERN_INFO "unknown BSR size %d\n",cur->bsr_bytes); | ||
224 | } | 240 | } |
225 | 241 | ||
226 | cur->bsr_num = bsr_types[cur->bsr_type]; | 242 | cur->bsr_num = bsr_types[cur->bsr_type]; |
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 9533f43a30bb..52d953eb30c3 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c | |||
@@ -1048,8 +1048,6 @@ static int mxser_open(struct tty_struct *tty, struct file *filp) | |||
1048 | if (retval) | 1048 | if (retval) |
1049 | return retval; | 1049 | return retval; |
1050 | 1050 | ||
1051 | /* unmark here for very high baud rate (ex. 921600 bps) used */ | ||
1052 | tty->low_latency = 1; | ||
1053 | return 0; | 1051 | return 0; |
1054 | } | 1052 | } |
1055 | 1053 | ||
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c index d6102b644b55..574f1c79b6e6 100644 --- a/drivers/char/nozomi.c +++ b/drivers/char/nozomi.c | |||
@@ -1591,8 +1591,6 @@ static int ntty_open(struct tty_struct *tty, struct file *file) | |||
1591 | 1591 | ||
1592 | /* Enable interrupt downlink for channel */ | 1592 | /* Enable interrupt downlink for channel */ |
1593 | if (port->port.count == 1) { | 1593 | if (port->port.count == 1) { |
1594 | /* FIXME: is this needed now ? */ | ||
1595 | tty->low_latency = 1; | ||
1596 | tty->driver_data = port; | 1594 | tty->driver_data = port; |
1597 | tty_port_tty_set(&port->port, tty); | 1595 | tty_port_tty_set(&port->port, tty); |
1598 | DBG1("open: %d", port->token_dl); | 1596 | DBG1("open: %d", port->token_dl); |
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index 1386625fc4ca..a2e67e6df3a1 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
@@ -467,7 +467,6 @@ static unsigned int free_tbuf_count(struct slgt_info *info); | |||
467 | static unsigned int tbuf_bytes(struct slgt_info *info); | 467 | static unsigned int tbuf_bytes(struct slgt_info *info); |
468 | static void reset_tbufs(struct slgt_info *info); | 468 | static void reset_tbufs(struct slgt_info *info); |
469 | static void tdma_reset(struct slgt_info *info); | 469 | static void tdma_reset(struct slgt_info *info); |
470 | static void tdma_start(struct slgt_info *info); | ||
471 | static void tx_load(struct slgt_info *info, const char *buf, unsigned int count); | 470 | static void tx_load(struct slgt_info *info, const char *buf, unsigned int count); |
472 | 471 | ||
473 | static void get_signals(struct slgt_info *info); | 472 | static void get_signals(struct slgt_info *info); |
@@ -795,6 +794,18 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios) | |||
795 | } | 794 | } |
796 | } | 795 | } |
797 | 796 | ||
797 | static void update_tx_timer(struct slgt_info *info) | ||
798 | { | ||
799 | /* | ||
800 | * use worst case speed of 1200bps to calculate transmit timeout | ||
801 | * based on data in buffers (tbuf_bytes) and FIFO (128 bytes) | ||
802 | */ | ||
803 | if (info->params.mode == MGSL_MODE_HDLC) { | ||
804 | int timeout = (tbuf_bytes(info) * 7) + 1000; | ||
805 | mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout)); | ||
806 | } | ||
807 | } | ||
808 | |||
798 | static int write(struct tty_struct *tty, | 809 | static int write(struct tty_struct *tty, |
799 | const unsigned char *buf, int count) | 810 | const unsigned char *buf, int count) |
800 | { | 811 | { |
@@ -838,8 +849,18 @@ start: | |||
838 | spin_lock_irqsave(&info->lock,flags); | 849 | spin_lock_irqsave(&info->lock,flags); |
839 | if (!info->tx_active) | 850 | if (!info->tx_active) |
840 | tx_start(info); | 851 | tx_start(info); |
841 | else | 852 | else if (!(rd_reg32(info, TDCSR) & BIT0)) { |
842 | tdma_start(info); | 853 | /* transmit still active but transmit DMA stopped */ |
854 | unsigned int i = info->tbuf_current; | ||
855 | if (!i) | ||
856 | i = info->tbuf_count; | ||
857 | i--; | ||
858 | /* if DMA buf unsent must try later after tx idle */ | ||
859 | if (desc_count(info->tbufs[i])) | ||
860 | ret = 0; | ||
861 | } | ||
862 | if (ret > 0) | ||
863 | update_tx_timer(info); | ||
843 | spin_unlock_irqrestore(&info->lock,flags); | 864 | spin_unlock_irqrestore(&info->lock,flags); |
844 | } | 865 | } |
845 | 866 | ||
@@ -1502,10 +1523,9 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1502 | /* save start time for transmit timeout detection */ | 1523 | /* save start time for transmit timeout detection */ |
1503 | dev->trans_start = jiffies; | 1524 | dev->trans_start = jiffies; |
1504 | 1525 | ||
1505 | /* start hardware transmitter if necessary */ | ||
1506 | spin_lock_irqsave(&info->lock,flags); | 1526 | spin_lock_irqsave(&info->lock,flags); |
1507 | if (!info->tx_active) | 1527 | tx_start(info); |
1508 | tx_start(info); | 1528 | update_tx_timer(info); |
1509 | spin_unlock_irqrestore(&info->lock,flags); | 1529 | spin_unlock_irqrestore(&info->lock,flags); |
1510 | 1530 | ||
1511 | return 0; | 1531 | return 0; |
@@ -3946,50 +3966,19 @@ static void tx_start(struct slgt_info *info) | |||
3946 | slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE); | 3966 | slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE); |
3947 | /* clear tx idle and underrun status bits */ | 3967 | /* clear tx idle and underrun status bits */ |
3948 | wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER)); | 3968 | wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER)); |
3949 | if (info->params.mode == MGSL_MODE_HDLC) | ||
3950 | mod_timer(&info->tx_timer, jiffies + | ||
3951 | msecs_to_jiffies(5000)); | ||
3952 | } else { | 3969 | } else { |
3953 | slgt_irq_off(info, IRQ_TXDATA); | 3970 | slgt_irq_off(info, IRQ_TXDATA); |
3954 | slgt_irq_on(info, IRQ_TXIDLE); | 3971 | slgt_irq_on(info, IRQ_TXIDLE); |
3955 | /* clear tx idle status bit */ | 3972 | /* clear tx idle status bit */ |
3956 | wr_reg16(info, SSR, IRQ_TXIDLE); | 3973 | wr_reg16(info, SSR, IRQ_TXIDLE); |
3957 | } | 3974 | } |
3958 | tdma_start(info); | 3975 | /* set 1st descriptor address and start DMA */ |
3976 | wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc); | ||
3977 | wr_reg32(info, TDCSR, BIT2 + BIT0); | ||
3959 | info->tx_active = true; | 3978 | info->tx_active = true; |
3960 | } | 3979 | } |
3961 | } | 3980 | } |
3962 | 3981 | ||
3963 | /* | ||
3964 | * start transmit DMA if inactive and there are unsent buffers | ||
3965 | */ | ||
3966 | static void tdma_start(struct slgt_info *info) | ||
3967 | { | ||
3968 | unsigned int i; | ||
3969 | |||
3970 | if (rd_reg32(info, TDCSR) & BIT0) | ||
3971 | return; | ||
3972 | |||
3973 | /* transmit DMA inactive, check for unsent buffers */ | ||
3974 | i = info->tbuf_start; | ||
3975 | while (!desc_count(info->tbufs[i])) { | ||
3976 | if (++i == info->tbuf_count) | ||
3977 | i = 0; | ||
3978 | if (i == info->tbuf_current) | ||
3979 | return; | ||
3980 | } | ||
3981 | info->tbuf_start = i; | ||
3982 | |||
3983 | /* there are unsent buffers, start transmit DMA */ | ||
3984 | |||
3985 | /* reset needed if previous error condition */ | ||
3986 | tdma_reset(info); | ||
3987 | |||
3988 | /* set 1st descriptor address */ | ||
3989 | wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc); | ||
3990 | wr_reg32(info, TDCSR, BIT2 + BIT0); /* IRQ + DMA enable */ | ||
3991 | } | ||
3992 | |||
3993 | static void tx_stop(struct slgt_info *info) | 3982 | static void tx_stop(struct slgt_info *info) |
3994 | { | 3983 | { |
3995 | unsigned short val; | 3984 | unsigned short val; |
@@ -5004,8 +4993,7 @@ static void tx_timeout(unsigned long context) | |||
5004 | info->icount.txtimeout++; | 4993 | info->icount.txtimeout++; |
5005 | } | 4994 | } |
5006 | spin_lock_irqsave(&info->lock,flags); | 4995 | spin_lock_irqsave(&info->lock,flags); |
5007 | info->tx_active = false; | 4996 | tx_stop(info); |
5008 | info->tx_count = 0; | ||
5009 | spin_unlock_irqrestore(&info->lock,flags); | 4997 | spin_unlock_irqrestore(&info->lock,flags); |
5010 | 4998 | ||
5011 | #if SYNCLINK_GENERIC_HDLC | 4999 | #if SYNCLINK_GENERIC_HDLC |
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index 62dadfc95e34..4e862a75f7ff 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c | |||
@@ -193,7 +193,7 @@ int tty_port_block_til_ready(struct tty_port *port, | |||
193 | { | 193 | { |
194 | int do_clocal = 0, retval; | 194 | int do_clocal = 0, retval; |
195 | unsigned long flags; | 195 | unsigned long flags; |
196 | DECLARE_WAITQUEUE(wait, current); | 196 | DEFINE_WAIT(wait); |
197 | int cd; | 197 | int cd; |
198 | 198 | ||
199 | /* block if port is in the process of being closed */ | 199 | /* block if port is in the process of being closed */ |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 9aa9ea9822c8..88dab52926f4 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -432,23 +432,27 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |||
432 | list_splice_init(&txd->tx_list, &dc->free_list); | 432 | list_splice_init(&txd->tx_list, &dc->free_list); |
433 | list_move(&desc->desc_node, &dc->free_list); | 433 | list_move(&desc->desc_node, &dc->free_list); |
434 | 434 | ||
435 | /* | ||
436 | * We use dma_unmap_page() regardless of how the buffers were | ||
437 | * mapped before they were submitted... | ||
438 | */ | ||
439 | if (!ds) { | 435 | if (!ds) { |
440 | dma_addr_t dmaaddr; | 436 | dma_addr_t dmaaddr; |
441 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 437 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
442 | dmaaddr = is_dmac64(dc) ? | 438 | dmaaddr = is_dmac64(dc) ? |
443 | desc->hwdesc.DAR : desc->hwdesc32.DAR; | 439 | desc->hwdesc.DAR : desc->hwdesc32.DAR; |
444 | dma_unmap_page(chan2parent(&dc->chan), dmaaddr, | 440 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
445 | desc->len, DMA_FROM_DEVICE); | 441 | dma_unmap_single(chan2parent(&dc->chan), |
442 | dmaaddr, desc->len, DMA_FROM_DEVICE); | ||
443 | else | ||
444 | dma_unmap_page(chan2parent(&dc->chan), | ||
445 | dmaaddr, desc->len, DMA_FROM_DEVICE); | ||
446 | } | 446 | } |
447 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | 447 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
448 | dmaaddr = is_dmac64(dc) ? | 448 | dmaaddr = is_dmac64(dc) ? |
449 | desc->hwdesc.SAR : desc->hwdesc32.SAR; | 449 | desc->hwdesc.SAR : desc->hwdesc32.SAR; |
450 | dma_unmap_page(chan2parent(&dc->chan), dmaaddr, | 450 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
451 | desc->len, DMA_TO_DEVICE); | 451 | dma_unmap_single(chan2parent(&dc->chan), |
452 | dmaaddr, desc->len, DMA_TO_DEVICE); | ||
453 | else | ||
454 | dma_unmap_page(chan2parent(&dc->chan), | ||
455 | dmaaddr, desc->len, DMA_TO_DEVICE); | ||
452 | } | 456 | } |
453 | } | 457 | } |
454 | 458 | ||
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index c36bf40568cf..858fe6037223 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -754,13 +754,13 @@ static void amd64_cpu_display_info(struct amd64_pvt *pvt) | |||
754 | static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) | 754 | static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) |
755 | { | 755 | { |
756 | int bit; | 756 | int bit; |
757 | enum dev_type edac_cap = EDAC_NONE; | 757 | enum dev_type edac_cap = EDAC_FLAG_NONE; |
758 | 758 | ||
759 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F) | 759 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F) |
760 | ? 19 | 760 | ? 19 |
761 | : 17; | 761 | : 17; |
762 | 762 | ||
763 | if (pvt->dclr0 >> BIT(bit)) | 763 | if (pvt->dclr0 & BIT(bit)) |
764 | edac_cap = EDAC_FLAG_SECDED; | 764 | edac_cap = EDAC_FLAG_SECDED; |
765 | 765 | ||
766 | return edac_cap; | 766 | return edac_cap; |
@@ -1269,7 +1269,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) | |||
1269 | if (channels == 0) | 1269 | if (channels == 0) |
1270 | channels = 1; | 1270 | channels = 1; |
1271 | 1271 | ||
1272 | debugf0("DIMM count= %d\n", channels); | 1272 | debugf0("MCT channel count: %d\n", channels); |
1273 | 1273 | ||
1274 | return channels; | 1274 | return channels; |
1275 | 1275 | ||
@@ -2966,7 +2966,12 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) | |||
2966 | " Use of the override can cause " | 2966 | " Use of the override can cause " |
2967 | "unknown side effects.\n"); | 2967 | "unknown side effects.\n"); |
2968 | ret = -ENODEV; | 2968 | ret = -ENODEV; |
2969 | } | 2969 | } else |
2970 | /* | ||
2971 | * enable further driver loading if ECC enable is | ||
2972 | * overridden. | ||
2973 | */ | ||
2974 | ret = 0; | ||
2970 | } else { | 2975 | } else { |
2971 | amd64_printk(KERN_INFO, | 2976 | amd64_printk(KERN_INFO, |
2972 | "ECC is enabled by BIOS, Proceeding " | 2977 | "ECC is enabled by BIOS, Proceeding " |
@@ -3006,7 +3011,6 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) | |||
3006 | 3011 | ||
3007 | mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; | 3012 | mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; |
3008 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | 3013 | mci->edac_ctl_cap = EDAC_FLAG_NONE; |
3009 | mci->edac_cap = EDAC_FLAG_NONE; | ||
3010 | 3014 | ||
3011 | if (pvt->nbcap & K8_NBCAP_SECDED) | 3015 | if (pvt->nbcap & K8_NBCAP_SECDED) |
3012 | mci->edac_ctl_cap |= EDAC_FLAG_SECDED; | 3016 | mci->edac_ctl_cap |= EDAC_FLAG_SECDED; |
@@ -3052,7 +3056,7 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl, | |||
3052 | if (!pvt) | 3056 | if (!pvt) |
3053 | goto err_exit; | 3057 | goto err_exit; |
3054 | 3058 | ||
3055 | pvt->mc_node_id = get_mc_node_id_from_pdev(dram_f2_ctl); | 3059 | pvt->mc_node_id = get_node_id(dram_f2_ctl); |
3056 | 3060 | ||
3057 | pvt->dram_f2_ctl = dram_f2_ctl; | 3061 | pvt->dram_f2_ctl = dram_f2_ctl; |
3058 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | 3062 | pvt->ext_model = boot_cpu_data.x86_model >> 4; |
@@ -3179,8 +3183,7 @@ static int __devinit amd64_init_one_instance(struct pci_dev *pdev, | |||
3179 | { | 3183 | { |
3180 | int ret = 0; | 3184 | int ret = 0; |
3181 | 3185 | ||
3182 | debugf0("(MC node=%d,mc_type='%s')\n", | 3186 | debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev), |
3183 | get_mc_node_id_from_pdev(pdev), | ||
3184 | get_amd_family_name(mc_type->driver_data)); | 3187 | get_amd_family_name(mc_type->driver_data)); |
3185 | 3188 | ||
3186 | ret = pci_enable_device(pdev); | 3189 | ret = pci_enable_device(pdev); |
@@ -3319,15 +3322,17 @@ static int __init amd64_edac_init(void) | |||
3319 | 3322 | ||
3320 | err = amd64_init_2nd_stage(pvt_lookup[nb]); | 3323 | err = amd64_init_2nd_stage(pvt_lookup[nb]); |
3321 | if (err) | 3324 | if (err) |
3322 | goto err_exit; | 3325 | goto err_2nd_stage; |
3323 | } | 3326 | } |
3324 | 3327 | ||
3325 | amd64_setup_pci_device(); | 3328 | amd64_setup_pci_device(); |
3326 | 3329 | ||
3327 | return 0; | 3330 | return 0; |
3328 | 3331 | ||
3332 | err_2nd_stage: | ||
3333 | debugf0("2nd stage failed\n"); | ||
3334 | |||
3329 | err_exit: | 3335 | err_exit: |
3330 | debugf0("'finish_setup' stage failed\n"); | ||
3331 | pci_unregister_driver(&amd64_pci_driver); | 3336 | pci_unregister_driver(&amd64_pci_driver); |
3332 | 3337 | ||
3333 | return err; | 3338 | return err; |
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index a159957e167b..ba73015af8e4 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h | |||
@@ -444,7 +444,7 @@ enum { | |||
444 | #define K8_MSR_MC4ADDR 0x0412 | 444 | #define K8_MSR_MC4ADDR 0x0412 |
445 | 445 | ||
446 | /* AMD sets the first MC device at device ID 0x18. */ | 446 | /* AMD sets the first MC device at device ID 0x18. */ |
447 | static inline int get_mc_node_id_from_pdev(struct pci_dev *pdev) | 447 | static inline int get_node_id(struct pci_dev *pdev) |
448 | { | 448 | { |
449 | return PCI_SLOT(pdev->devfn) - 0x18; | 449 | return PCI_SLOT(pdev->devfn) - 0x18; |
450 | } | 450 | } |
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index c961fe415aef..39b393d38bb3 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -81,6 +81,7 @@ config DRM_I830 | |||
81 | 81 | ||
82 | config DRM_I915 | 82 | config DRM_I915 |
83 | tristate "i915 driver" | 83 | tristate "i915 driver" |
84 | depends on AGP_INTEL | ||
84 | select FB_CFB_FILLRECT | 85 | select FB_CFB_FILLRECT |
85 | select FB_CFB_COPYAREA | 86 | select FB_CFB_COPYAREA |
86 | select FB_CFB_IMAGEBLIT | 87 | select FB_CFB_IMAGEBLIT |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 4e89ab08b7b8..fe23f29f7cba 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -16,6 +16,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ | |||
16 | drm-$(CONFIG_COMPAT) += drm_ioc32.o | 16 | drm-$(CONFIG_COMPAT) += drm_ioc32.o |
17 | 17 | ||
18 | obj-$(CONFIG_DRM) += drm.o | 18 | obj-$(CONFIG_DRM) += drm.o |
19 | obj-$(CONFIG_DRM_TTM) += ttm/ | ||
19 | obj-$(CONFIG_DRM_TDFX) += tdfx/ | 20 | obj-$(CONFIG_DRM_TDFX) += tdfx/ |
20 | obj-$(CONFIG_DRM_R128) += r128/ | 21 | obj-$(CONFIG_DRM_R128) += r128/ |
21 | obj-$(CONFIG_DRM_RADEON)+= radeon/ | 22 | obj-$(CONFIG_DRM_RADEON)+= radeon/ |
@@ -26,4 +27,3 @@ obj-$(CONFIG_DRM_I915) += i915/ | |||
26 | obj-$(CONFIG_DRM_SIS) += sis/ | 27 | obj-$(CONFIG_DRM_SIS) += sis/ |
27 | obj-$(CONFIG_DRM_SAVAGE)+= savage/ | 28 | obj-$(CONFIG_DRM_SAVAGE)+= savage/ |
28 | obj-$(CONFIG_DRM_VIA) +=via/ | 29 | obj-$(CONFIG_DRM_VIA) +=via/ |
29 | obj-$(CONFIG_DRM_TTM) += ttm/ | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7d0835226f6e..80cc6d06d61b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -294,10 +294,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
294 | unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; | 294 | unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; |
295 | unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; | 295 | unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; |
296 | unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; | 296 | unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; |
297 | unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 8 | pt->hsync_offset_lo; | 297 | unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; |
298 | unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 6 | pt->hsync_pulse_width_lo; | 298 | unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; |
299 | unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) | (pt->vsync_offset_pulse_width_lo & 0xf); | 299 | unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; |
300 | unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; | 300 | unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); |
301 | 301 | ||
302 | /* ignore tiny modes */ | 302 | /* ignore tiny modes */ |
303 | if (hactive < 64 || vactive < 64) | 303 | if (hactive < 64 || vactive < 64) |
@@ -347,8 +347,8 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
347 | mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? | 347 | mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? |
348 | DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; | 348 | DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; |
349 | 349 | ||
350 | mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; | 350 | mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; |
351 | mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; | 351 | mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; |
352 | 352 | ||
353 | if (quirks & EDID_QUIRK_DETAILED_IN_CM) { | 353 | if (quirks & EDID_QUIRK_DETAILED_IN_CM) { |
354 | mode->width_mm *= 10; | 354 | mode->width_mm *= 10; |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 51c5a050aa73..30d6b99fb302 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -13,6 +13,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
13 | intel_crt.o \ | 13 | intel_crt.o \ |
14 | intel_lvds.o \ | 14 | intel_lvds.o \ |
15 | intel_bios.o \ | 15 | intel_bios.o \ |
16 | intel_dp.o \ | ||
17 | intel_dp_i2c.o \ | ||
16 | intel_hdmi.o \ | 18 | intel_hdmi.o \ |
17 | intel_sdvo.o \ | 19 | intel_sdvo.o \ |
18 | intel_modes.o \ | 20 | intel_modes.o \ |
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index e747ac42fe3a..288fc50627e2 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h | |||
@@ -37,7 +37,7 @@ struct intel_dvo_device { | |||
37 | /* GPIO register used for i2c bus to control this device */ | 37 | /* GPIO register used for i2c bus to control this device */ |
38 | u32 gpio; | 38 | u32 gpio; |
39 | int slave_addr; | 39 | int slave_addr; |
40 | struct intel_i2c_chan *i2c_bus; | 40 | struct i2c_adapter *i2c_bus; |
41 | 41 | ||
42 | const struct intel_dvo_dev_ops *dev_ops; | 42 | const struct intel_dvo_dev_ops *dev_ops; |
43 | void *dev_priv; | 43 | void *dev_priv; |
@@ -52,7 +52,7 @@ struct intel_dvo_dev_ops { | |||
52 | * Returns NULL if the device does not exist. | 52 | * Returns NULL if the device does not exist. |
53 | */ | 53 | */ |
54 | bool (*init)(struct intel_dvo_device *dvo, | 54 | bool (*init)(struct intel_dvo_device *dvo, |
55 | struct intel_i2c_chan *i2cbus); | 55 | struct i2c_adapter *i2cbus); |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * Called to allow the output a chance to create properties after the | 58 | * Called to allow the output a chance to create properties after the |
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 03d4b4973b02..621815b531db 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c | |||
@@ -176,19 +176,20 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); | |||
176 | 176 | ||
177 | static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) | 177 | static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) |
178 | { | 178 | { |
179 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 179 | struct i2c_adapter *adapter = dvo->i2c_bus; |
180 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
180 | u8 out_buf[2]; | 181 | u8 out_buf[2]; |
181 | u8 in_buf[2]; | 182 | u8 in_buf[2]; |
182 | 183 | ||
183 | struct i2c_msg msgs[] = { | 184 | struct i2c_msg msgs[] = { |
184 | { | 185 | { |
185 | .addr = i2cbus->slave_addr, | 186 | .addr = dvo->slave_addr, |
186 | .flags = 0, | 187 | .flags = 0, |
187 | .len = 1, | 188 | .len = 1, |
188 | .buf = out_buf, | 189 | .buf = out_buf, |
189 | }, | 190 | }, |
190 | { | 191 | { |
191 | .addr = i2cbus->slave_addr, | 192 | .addr = dvo->slave_addr, |
192 | .flags = I2C_M_RD, | 193 | .flags = I2C_M_RD, |
193 | .len = 1, | 194 | .len = 1, |
194 | .buf = in_buf, | 195 | .buf = in_buf, |
@@ -208,10 +209,11 @@ static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) | |||
208 | 209 | ||
209 | static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) | 210 | static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) |
210 | { | 211 | { |
211 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 212 | struct i2c_adapter *adapter = dvo->i2c_bus; |
213 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
212 | uint8_t out_buf[2]; | 214 | uint8_t out_buf[2]; |
213 | struct i2c_msg msg = { | 215 | struct i2c_msg msg = { |
214 | .addr = i2cbus->slave_addr, | 216 | .addr = dvo->slave_addr, |
215 | .flags = 0, | 217 | .flags = 0, |
216 | .len = 2, | 218 | .len = 2, |
217 | .buf = out_buf, | 219 | .buf = out_buf, |
@@ -228,8 +230,9 @@ static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) | |||
228 | 230 | ||
229 | /** Probes for a CH7017 on the given bus and slave address. */ | 231 | /** Probes for a CH7017 on the given bus and slave address. */ |
230 | static bool ch7017_init(struct intel_dvo_device *dvo, | 232 | static bool ch7017_init(struct intel_dvo_device *dvo, |
231 | struct intel_i2c_chan *i2cbus) | 233 | struct i2c_adapter *adapter) |
232 | { | 234 | { |
235 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
233 | struct ch7017_priv *priv; | 236 | struct ch7017_priv *priv; |
234 | uint8_t val; | 237 | uint8_t val; |
235 | 238 | ||
@@ -237,8 +240,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo, | |||
237 | if (priv == NULL) | 240 | if (priv == NULL) |
238 | return false; | 241 | return false; |
239 | 242 | ||
240 | dvo->i2c_bus = i2cbus; | 243 | dvo->i2c_bus = adapter; |
241 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
242 | dvo->dev_priv = priv; | 244 | dvo->dev_priv = priv; |
243 | 245 | ||
244 | if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) | 246 | if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) |
@@ -248,7 +250,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo, | |||
248 | val != CH7018_DEVICE_ID_VALUE && | 250 | val != CH7018_DEVICE_ID_VALUE && |
249 | val != CH7019_DEVICE_ID_VALUE) { | 251 | val != CH7019_DEVICE_ID_VALUE) { |
250 | DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", | 252 | DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", |
251 | val, i2cbus->adapter.name,i2cbus->slave_addr); | 253 | val, i2cbus->adapter.name,dvo->slave_addr); |
252 | goto fail; | 254 | goto fail; |
253 | } | 255 | } |
254 | 256 | ||
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index d2fd95dbd034..a9b896289680 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c | |||
@@ -123,19 +123,20 @@ static char *ch7xxx_get_id(uint8_t vid) | |||
123 | static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | 123 | static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) |
124 | { | 124 | { |
125 | struct ch7xxx_priv *ch7xxx= dvo->dev_priv; | 125 | struct ch7xxx_priv *ch7xxx= dvo->dev_priv; |
126 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 126 | struct i2c_adapter *adapter = dvo->i2c_bus; |
127 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
127 | u8 out_buf[2]; | 128 | u8 out_buf[2]; |
128 | u8 in_buf[2]; | 129 | u8 in_buf[2]; |
129 | 130 | ||
130 | struct i2c_msg msgs[] = { | 131 | struct i2c_msg msgs[] = { |
131 | { | 132 | { |
132 | .addr = i2cbus->slave_addr, | 133 | .addr = dvo->slave_addr, |
133 | .flags = 0, | 134 | .flags = 0, |
134 | .len = 1, | 135 | .len = 1, |
135 | .buf = out_buf, | 136 | .buf = out_buf, |
136 | }, | 137 | }, |
137 | { | 138 | { |
138 | .addr = i2cbus->slave_addr, | 139 | .addr = dvo->slave_addr, |
139 | .flags = I2C_M_RD, | 140 | .flags = I2C_M_RD, |
140 | .len = 1, | 141 | .len = 1, |
141 | .buf = in_buf, | 142 | .buf = in_buf, |
@@ -152,7 +153,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
152 | 153 | ||
153 | if (!ch7xxx->quiet) { | 154 | if (!ch7xxx->quiet) { |
154 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | 155 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", |
155 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 156 | addr, i2cbus->adapter.name, dvo->slave_addr); |
156 | } | 157 | } |
157 | return false; | 158 | return false; |
158 | } | 159 | } |
@@ -161,10 +162,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
161 | static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | 162 | static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) |
162 | { | 163 | { |
163 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; | 164 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; |
164 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 165 | struct i2c_adapter *adapter = dvo->i2c_bus; |
166 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
165 | uint8_t out_buf[2]; | 167 | uint8_t out_buf[2]; |
166 | struct i2c_msg msg = { | 168 | struct i2c_msg msg = { |
167 | .addr = i2cbus->slave_addr, | 169 | .addr = dvo->slave_addr, |
168 | .flags = 0, | 170 | .flags = 0, |
169 | .len = 2, | 171 | .len = 2, |
170 | .buf = out_buf, | 172 | .buf = out_buf, |
@@ -178,14 +180,14 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
178 | 180 | ||
179 | if (!ch7xxx->quiet) { | 181 | if (!ch7xxx->quiet) { |
180 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | 182 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", |
181 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 183 | addr, i2cbus->adapter.name, dvo->slave_addr); |
182 | } | 184 | } |
183 | 185 | ||
184 | return false; | 186 | return false; |
185 | } | 187 | } |
186 | 188 | ||
187 | static bool ch7xxx_init(struct intel_dvo_device *dvo, | 189 | static bool ch7xxx_init(struct intel_dvo_device *dvo, |
188 | struct intel_i2c_chan *i2cbus) | 190 | struct i2c_adapter *adapter) |
189 | { | 191 | { |
190 | /* this will detect the CH7xxx chip on the specified i2c bus */ | 192 | /* this will detect the CH7xxx chip on the specified i2c bus */ |
191 | struct ch7xxx_priv *ch7xxx; | 193 | struct ch7xxx_priv *ch7xxx; |
@@ -196,8 +198,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, | |||
196 | if (ch7xxx == NULL) | 198 | if (ch7xxx == NULL) |
197 | return false; | 199 | return false; |
198 | 200 | ||
199 | dvo->i2c_bus = i2cbus; | 201 | dvo->i2c_bus = adapter; |
200 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
201 | dvo->dev_priv = ch7xxx; | 202 | dvo->dev_priv = ch7xxx; |
202 | ch7xxx->quiet = true; | 203 | ch7xxx->quiet = true; |
203 | 204 | ||
@@ -207,7 +208,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, | |||
207 | name = ch7xxx_get_id(vendor); | 208 | name = ch7xxx_get_id(vendor); |
208 | if (!name) { | 209 | if (!name) { |
209 | DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", | 210 | DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", |
210 | vendor, i2cbus->adapter.name, i2cbus->slave_addr); | 211 | vendor, adapter->name, dvo->slave_addr); |
211 | goto out; | 212 | goto out; |
212 | } | 213 | } |
213 | 214 | ||
@@ -217,7 +218,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, | |||
217 | 218 | ||
218 | if (device != CH7xxx_DID) { | 219 | if (device != CH7xxx_DID) { |
219 | DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", | 220 | DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", |
220 | vendor, i2cbus->adapter.name, i2cbus->slave_addr); | 221 | vendor, adapter->name, dvo->slave_addr); |
221 | goto out; | 222 | goto out; |
222 | } | 223 | } |
223 | 224 | ||
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index 0c8d375e8e37..aa176f9921fe 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c | |||
@@ -169,13 +169,14 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo); | |||
169 | static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | 169 | static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) |
170 | { | 170 | { |
171 | struct ivch_priv *priv = dvo->dev_priv; | 171 | struct ivch_priv *priv = dvo->dev_priv; |
172 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 172 | struct i2c_adapter *adapter = dvo->i2c_bus; |
173 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
173 | u8 out_buf[1]; | 174 | u8 out_buf[1]; |
174 | u8 in_buf[2]; | 175 | u8 in_buf[2]; |
175 | 176 | ||
176 | struct i2c_msg msgs[] = { | 177 | struct i2c_msg msgs[] = { |
177 | { | 178 | { |
178 | .addr = i2cbus->slave_addr, | 179 | .addr = dvo->slave_addr, |
179 | .flags = I2C_M_RD, | 180 | .flags = I2C_M_RD, |
180 | .len = 0, | 181 | .len = 0, |
181 | }, | 182 | }, |
@@ -186,7 +187,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | |||
186 | .buf = out_buf, | 187 | .buf = out_buf, |
187 | }, | 188 | }, |
188 | { | 189 | { |
189 | .addr = i2cbus->slave_addr, | 190 | .addr = dvo->slave_addr, |
190 | .flags = I2C_M_RD | I2C_M_NOSTART, | 191 | .flags = I2C_M_RD | I2C_M_NOSTART, |
191 | .len = 2, | 192 | .len = 2, |
192 | .buf = in_buf, | 193 | .buf = in_buf, |
@@ -202,7 +203,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | |||
202 | 203 | ||
203 | if (!priv->quiet) { | 204 | if (!priv->quiet) { |
204 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | 205 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", |
205 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 206 | addr, i2cbus->adapter.name, dvo->slave_addr); |
206 | } | 207 | } |
207 | return false; | 208 | return false; |
208 | } | 209 | } |
@@ -211,10 +212,11 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | |||
211 | static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) | 212 | static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) |
212 | { | 213 | { |
213 | struct ivch_priv *priv = dvo->dev_priv; | 214 | struct ivch_priv *priv = dvo->dev_priv; |
214 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 215 | struct i2c_adapter *adapter = dvo->i2c_bus; |
216 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
215 | u8 out_buf[3]; | 217 | u8 out_buf[3]; |
216 | struct i2c_msg msg = { | 218 | struct i2c_msg msg = { |
217 | .addr = i2cbus->slave_addr, | 219 | .addr = dvo->slave_addr, |
218 | .flags = 0, | 220 | .flags = 0, |
219 | .len = 3, | 221 | .len = 3, |
220 | .buf = out_buf, | 222 | .buf = out_buf, |
@@ -229,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) | |||
229 | 231 | ||
230 | if (!priv->quiet) { | 232 | if (!priv->quiet) { |
231 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | 233 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", |
232 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 234 | addr, i2cbus->adapter.name, dvo->slave_addr); |
233 | } | 235 | } |
234 | 236 | ||
235 | return false; | 237 | return false; |
@@ -237,7 +239,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) | |||
237 | 239 | ||
238 | /** Probes the given bus and slave address for an ivch */ | 240 | /** Probes the given bus and slave address for an ivch */ |
239 | static bool ivch_init(struct intel_dvo_device *dvo, | 241 | static bool ivch_init(struct intel_dvo_device *dvo, |
240 | struct intel_i2c_chan *i2cbus) | 242 | struct i2c_adapter *adapter) |
241 | { | 243 | { |
242 | struct ivch_priv *priv; | 244 | struct ivch_priv *priv; |
243 | uint16_t temp; | 245 | uint16_t temp; |
@@ -246,8 +248,7 @@ static bool ivch_init(struct intel_dvo_device *dvo, | |||
246 | if (priv == NULL) | 248 | if (priv == NULL) |
247 | return false; | 249 | return false; |
248 | 250 | ||
249 | dvo->i2c_bus = i2cbus; | 251 | dvo->i2c_bus = adapter; |
250 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
251 | dvo->dev_priv = priv; | 252 | dvo->dev_priv = priv; |
252 | priv->quiet = true; | 253 | priv->quiet = true; |
253 | 254 | ||
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 033a4bb070b2..e1c1f7341e5c 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c | |||
@@ -76,19 +76,20 @@ struct sil164_priv { | |||
76 | static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | 76 | static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) |
77 | { | 77 | { |
78 | struct sil164_priv *sil = dvo->dev_priv; | 78 | struct sil164_priv *sil = dvo->dev_priv; |
79 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 79 | struct i2c_adapter *adapter = dvo->i2c_bus; |
80 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
80 | u8 out_buf[2]; | 81 | u8 out_buf[2]; |
81 | u8 in_buf[2]; | 82 | u8 in_buf[2]; |
82 | 83 | ||
83 | struct i2c_msg msgs[] = { | 84 | struct i2c_msg msgs[] = { |
84 | { | 85 | { |
85 | .addr = i2cbus->slave_addr, | 86 | .addr = dvo->slave_addr, |
86 | .flags = 0, | 87 | .flags = 0, |
87 | .len = 1, | 88 | .len = 1, |
88 | .buf = out_buf, | 89 | .buf = out_buf, |
89 | }, | 90 | }, |
90 | { | 91 | { |
91 | .addr = i2cbus->slave_addr, | 92 | .addr = dvo->slave_addr, |
92 | .flags = I2C_M_RD, | 93 | .flags = I2C_M_RD, |
93 | .len = 1, | 94 | .len = 1, |
94 | .buf = in_buf, | 95 | .buf = in_buf, |
@@ -105,7 +106,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
105 | 106 | ||
106 | if (!sil->quiet) { | 107 | if (!sil->quiet) { |
107 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | 108 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", |
108 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 109 | addr, i2cbus->adapter.name, dvo->slave_addr); |
109 | } | 110 | } |
110 | return false; | 111 | return false; |
111 | } | 112 | } |
@@ -113,10 +114,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
113 | static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | 114 | static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) |
114 | { | 115 | { |
115 | struct sil164_priv *sil= dvo->dev_priv; | 116 | struct sil164_priv *sil= dvo->dev_priv; |
116 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 117 | struct i2c_adapter *adapter = dvo->i2c_bus; |
118 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
117 | uint8_t out_buf[2]; | 119 | uint8_t out_buf[2]; |
118 | struct i2c_msg msg = { | 120 | struct i2c_msg msg = { |
119 | .addr = i2cbus->slave_addr, | 121 | .addr = dvo->slave_addr, |
120 | .flags = 0, | 122 | .flags = 0, |
121 | .len = 2, | 123 | .len = 2, |
122 | .buf = out_buf, | 124 | .buf = out_buf, |
@@ -130,7 +132,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
130 | 132 | ||
131 | if (!sil->quiet) { | 133 | if (!sil->quiet) { |
132 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | 134 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", |
133 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 135 | addr, i2cbus->adapter.name, dvo->slave_addr); |
134 | } | 136 | } |
135 | 137 | ||
136 | return false; | 138 | return false; |
@@ -138,7 +140,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
138 | 140 | ||
139 | /* Silicon Image 164 driver for chip on i2c bus */ | 141 | /* Silicon Image 164 driver for chip on i2c bus */ |
140 | static bool sil164_init(struct intel_dvo_device *dvo, | 142 | static bool sil164_init(struct intel_dvo_device *dvo, |
141 | struct intel_i2c_chan *i2cbus) | 143 | struct i2c_adapter *adapter) |
142 | { | 144 | { |
143 | /* this will detect the SIL164 chip on the specified i2c bus */ | 145 | /* this will detect the SIL164 chip on the specified i2c bus */ |
144 | struct sil164_priv *sil; | 146 | struct sil164_priv *sil; |
@@ -148,8 +150,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, | |||
148 | if (sil == NULL) | 150 | if (sil == NULL) |
149 | return false; | 151 | return false; |
150 | 152 | ||
151 | dvo->i2c_bus = i2cbus; | 153 | dvo->i2c_bus = adapter; |
152 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
153 | dvo->dev_priv = sil; | 154 | dvo->dev_priv = sil; |
154 | sil->quiet = true; | 155 | sil->quiet = true; |
155 | 156 | ||
@@ -158,7 +159,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, | |||
158 | 159 | ||
159 | if (ch != (SIL164_VID & 0xff)) { | 160 | if (ch != (SIL164_VID & 0xff)) { |
160 | DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", | 161 | DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", |
161 | ch, i2cbus->adapter.name, i2cbus->slave_addr); | 162 | ch, adapter->name, dvo->slave_addr); |
162 | goto out; | 163 | goto out; |
163 | } | 164 | } |
164 | 165 | ||
@@ -167,7 +168,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, | |||
167 | 168 | ||
168 | if (ch != (SIL164_DID & 0xff)) { | 169 | if (ch != (SIL164_DID & 0xff)) { |
169 | DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", | 170 | DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", |
170 | ch, i2cbus->adapter.name, i2cbus->slave_addr); | 171 | ch, adapter->name, dvo->slave_addr); |
171 | goto out; | 172 | goto out; |
172 | } | 173 | } |
173 | sil->quiet = false; | 174 | sil->quiet = false; |
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 207fda806ebf..9ecc907384ec 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c | |||
@@ -101,19 +101,20 @@ struct tfp410_priv { | |||
101 | static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | 101 | static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) |
102 | { | 102 | { |
103 | struct tfp410_priv *tfp = dvo->dev_priv; | 103 | struct tfp410_priv *tfp = dvo->dev_priv; |
104 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 104 | struct i2c_adapter *adapter = dvo->i2c_bus; |
105 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
105 | u8 out_buf[2]; | 106 | u8 out_buf[2]; |
106 | u8 in_buf[2]; | 107 | u8 in_buf[2]; |
107 | 108 | ||
108 | struct i2c_msg msgs[] = { | 109 | struct i2c_msg msgs[] = { |
109 | { | 110 | { |
110 | .addr = i2cbus->slave_addr, | 111 | .addr = dvo->slave_addr, |
111 | .flags = 0, | 112 | .flags = 0, |
112 | .len = 1, | 113 | .len = 1, |
113 | .buf = out_buf, | 114 | .buf = out_buf, |
114 | }, | 115 | }, |
115 | { | 116 | { |
116 | .addr = i2cbus->slave_addr, | 117 | .addr = dvo->slave_addr, |
117 | .flags = I2C_M_RD, | 118 | .flags = I2C_M_RD, |
118 | .len = 1, | 119 | .len = 1, |
119 | .buf = in_buf, | 120 | .buf = in_buf, |
@@ -130,7 +131,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
130 | 131 | ||
131 | if (!tfp->quiet) { | 132 | if (!tfp->quiet) { |
132 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | 133 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", |
133 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 134 | addr, i2cbus->adapter.name, dvo->slave_addr); |
134 | } | 135 | } |
135 | return false; | 136 | return false; |
136 | } | 137 | } |
@@ -138,10 +139,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
138 | static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | 139 | static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) |
139 | { | 140 | { |
140 | struct tfp410_priv *tfp = dvo->dev_priv; | 141 | struct tfp410_priv *tfp = dvo->dev_priv; |
141 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 142 | struct i2c_adapter *adapter = dvo->i2c_bus; |
143 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
142 | uint8_t out_buf[2]; | 144 | uint8_t out_buf[2]; |
143 | struct i2c_msg msg = { | 145 | struct i2c_msg msg = { |
144 | .addr = i2cbus->slave_addr, | 146 | .addr = dvo->slave_addr, |
145 | .flags = 0, | 147 | .flags = 0, |
146 | .len = 2, | 148 | .len = 2, |
147 | .buf = out_buf, | 149 | .buf = out_buf, |
@@ -155,7 +157,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
155 | 157 | ||
156 | if (!tfp->quiet) { | 158 | if (!tfp->quiet) { |
157 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | 159 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", |
158 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 160 | addr, i2cbus->adapter.name, dvo->slave_addr); |
159 | } | 161 | } |
160 | 162 | ||
161 | return false; | 163 | return false; |
@@ -174,7 +176,7 @@ static int tfp410_getid(struct intel_dvo_device *dvo, int addr) | |||
174 | 176 | ||
175 | /* Ti TFP410 driver for chip on i2c bus */ | 177 | /* Ti TFP410 driver for chip on i2c bus */ |
176 | static bool tfp410_init(struct intel_dvo_device *dvo, | 178 | static bool tfp410_init(struct intel_dvo_device *dvo, |
177 | struct intel_i2c_chan *i2cbus) | 179 | struct i2c_adapter *adapter) |
178 | { | 180 | { |
179 | /* this will detect the tfp410 chip on the specified i2c bus */ | 181 | /* this will detect the tfp410 chip on the specified i2c bus */ |
180 | struct tfp410_priv *tfp; | 182 | struct tfp410_priv *tfp; |
@@ -184,20 +186,19 @@ static bool tfp410_init(struct intel_dvo_device *dvo, | |||
184 | if (tfp == NULL) | 186 | if (tfp == NULL) |
185 | return false; | 187 | return false; |
186 | 188 | ||
187 | dvo->i2c_bus = i2cbus; | 189 | dvo->i2c_bus = adapter; |
188 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
189 | dvo->dev_priv = tfp; | 190 | dvo->dev_priv = tfp; |
190 | tfp->quiet = true; | 191 | tfp->quiet = true; |
191 | 192 | ||
192 | if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { | 193 | if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { |
193 | DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", | 194 | DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", |
194 | id, i2cbus->adapter.name, i2cbus->slave_addr); | 195 | id, adapter->name, dvo->slave_addr); |
195 | goto out; | 196 | goto out; |
196 | } | 197 | } |
197 | 198 | ||
198 | if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { | 199 | if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { |
199 | DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", | 200 | DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", |
200 | id, i2cbus->adapter.name, i2cbus->slave_addr); | 201 | id, adapter->name, dvo->slave_addr); |
201 | goto out; | 202 | goto out; |
202 | } | 203 | } |
203 | tfp->quiet = false; | 204 | tfp->quiet = false; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 98560e1e899a..e3cb4025e323 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -67,8 +67,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
67 | 67 | ||
68 | pci_save_state(dev->pdev); | 68 | pci_save_state(dev->pdev); |
69 | 69 | ||
70 | i915_save_state(dev); | ||
71 | |||
72 | /* If KMS is active, we do the leavevt stuff here */ | 70 | /* If KMS is active, we do the leavevt stuff here */ |
73 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 71 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
74 | if (i915_gem_idle(dev)) | 72 | if (i915_gem_idle(dev)) |
@@ -77,6 +75,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
77 | drm_irq_uninstall(dev); | 75 | drm_irq_uninstall(dev); |
78 | } | 76 | } |
79 | 77 | ||
78 | i915_save_state(dev); | ||
79 | |||
80 | intel_opregion_free(dev, 1); | 80 | intel_opregion_free(dev, 1); |
81 | 81 | ||
82 | if (state.event == PM_EVENT_SUSPEND) { | 82 | if (state.event == PM_EVENT_SUSPEND) { |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7a84f04e8439..bb4c2d387b6c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -306,6 +306,17 @@ typedef struct drm_i915_private { | |||
306 | u32 saveCURBPOS; | 306 | u32 saveCURBPOS; |
307 | u32 saveCURBBASE; | 307 | u32 saveCURBBASE; |
308 | u32 saveCURSIZE; | 308 | u32 saveCURSIZE; |
309 | u32 saveDP_B; | ||
310 | u32 saveDP_C; | ||
311 | u32 saveDP_D; | ||
312 | u32 savePIPEA_GMCH_DATA_M; | ||
313 | u32 savePIPEB_GMCH_DATA_M; | ||
314 | u32 savePIPEA_GMCH_DATA_N; | ||
315 | u32 savePIPEB_GMCH_DATA_N; | ||
316 | u32 savePIPEA_DP_LINK_M; | ||
317 | u32 savePIPEB_DP_LINK_M; | ||
318 | u32 savePIPEA_DP_LINK_N; | ||
319 | u32 savePIPEB_DP_LINK_N; | ||
309 | 320 | ||
310 | struct { | 321 | struct { |
311 | struct drm_mm gtt_space; | 322 | struct drm_mm gtt_space; |
@@ -857,6 +868,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
857 | #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ | 868 | #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ |
858 | IS_I915GM(dev))) | 869 | IS_I915GM(dev))) |
859 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 870 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
871 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | ||
860 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) | 872 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) |
861 | 873 | ||
862 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 874 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fd2b8bdffe3f..876b65cb7629 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1006,7 +1006,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1006 | 1006 | ||
1007 | mutex_lock(&dev->struct_mutex); | 1007 | mutex_lock(&dev->struct_mutex); |
1008 | #if WATCH_BUF | 1008 | #if WATCH_BUF |
1009 | DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", | 1009 | DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n", |
1010 | obj, obj->size, read_domains, write_domain); | 1010 | obj, obj->size, read_domains, write_domain); |
1011 | #endif | 1011 | #endif |
1012 | if (read_domains & I915_GEM_DOMAIN_GTT) { | 1012 | if (read_domains & I915_GEM_DOMAIN_GTT) { |
@@ -1050,7 +1050,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | #if WATCH_BUF | 1052 | #if WATCH_BUF |
1053 | DRM_INFO("%s: sw_finish %d (%p %d)\n", | 1053 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", |
1054 | __func__, args->handle, obj, obj->size); | 1054 | __func__, args->handle, obj, obj->size); |
1055 | #endif | 1055 | #endif |
1056 | obj_priv = obj->driver_private; | 1056 | obj_priv = obj->driver_private; |
@@ -2423,7 +2423,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2423 | } | 2423 | } |
2424 | 2424 | ||
2425 | #if WATCH_BUF | 2425 | #if WATCH_BUF |
2426 | DRM_INFO("Binding object of size %d at 0x%08x\n", | 2426 | DRM_INFO("Binding object of size %zd at 0x%08x\n", |
2427 | obj->size, obj_priv->gtt_offset); | 2427 | obj->size, obj_priv->gtt_offset); |
2428 | #endif | 2428 | #endif |
2429 | ret = i915_gem_object_get_pages(obj); | 2429 | ret = i915_gem_object_get_pages(obj); |
@@ -4227,6 +4227,7 @@ i915_gem_lastclose(struct drm_device *dev) | |||
4227 | void | 4227 | void |
4228 | i915_gem_load(struct drm_device *dev) | 4228 | i915_gem_load(struct drm_device *dev) |
4229 | { | 4229 | { |
4230 | int i; | ||
4230 | drm_i915_private_t *dev_priv = dev->dev_private; | 4231 | drm_i915_private_t *dev_priv = dev->dev_private; |
4231 | 4232 | ||
4232 | spin_lock_init(&dev_priv->mm.active_list_lock); | 4233 | spin_lock_init(&dev_priv->mm.active_list_lock); |
@@ -4246,6 +4247,18 @@ i915_gem_load(struct drm_device *dev) | |||
4246 | else | 4247 | else |
4247 | dev_priv->num_fence_regs = 8; | 4248 | dev_priv->num_fence_regs = 8; |
4248 | 4249 | ||
4250 | /* Initialize fence registers to zero */ | ||
4251 | if (IS_I965G(dev)) { | ||
4252 | for (i = 0; i < 16; i++) | ||
4253 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); | ||
4254 | } else { | ||
4255 | for (i = 0; i < 8; i++) | ||
4256 | I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); | ||
4257 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
4258 | for (i = 0; i < 8; i++) | ||
4259 | I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); | ||
4260 | } | ||
4261 | |||
4249 | i915_gem_detect_bit_6_swizzle(dev); | 4262 | i915_gem_detect_bit_6_swizzle(dev); |
4250 | } | 4263 | } |
4251 | 4264 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 8d0b943e2c5a..e602614bd3f8 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
@@ -87,7 +87,7 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, | |||
87 | chunk_len = page_len - chunk; | 87 | chunk_len = page_len - chunk; |
88 | if (chunk_len > 128) | 88 | if (chunk_len > 128) |
89 | chunk_len = 128; | 89 | chunk_len = 128; |
90 | i915_gem_dump_page(obj_priv->page_list[page], | 90 | i915_gem_dump_page(obj_priv->pages[page], |
91 | chunk, chunk + chunk_len, | 91 | chunk, chunk + chunk_len, |
92 | obj_priv->gtt_offset + | 92 | obj_priv->gtt_offset + |
93 | page * PAGE_SIZE, | 93 | page * PAGE_SIZE, |
@@ -143,7 +143,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | |||
143 | uint32_t *backing_map = NULL; | 143 | uint32_t *backing_map = NULL; |
144 | int bad_count = 0; | 144 | int bad_count = 0; |
145 | 145 | ||
146 | DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", | 146 | DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", |
147 | __func__, obj, obj_priv->gtt_offset, handle, | 147 | __func__, obj, obj_priv->gtt_offset, handle, |
148 | obj->size / 1024); | 148 | obj->size / 1024); |
149 | 149 | ||
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | |||
157 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { | 157 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { |
158 | int i; | 158 | int i; |
159 | 159 | ||
160 | backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); | 160 | backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); |
161 | 161 | ||
162 | if (backing_map == NULL) { | 162 | if (backing_map == NULL) { |
163 | DRM_ERROR("failed to map backing page\n"); | 163 | DRM_ERROR("failed to map backing page\n"); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 5c1ceec49f5b..daeae62e1c28 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -114,11 +114,13 @@ intel_alloc_mchbar_resource(struct drm_device *dev) | |||
114 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | 114 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; |
115 | 115 | ||
116 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | 116 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ |
117 | #ifdef CONFIG_PNP | ||
117 | if (mchbar_addr && | 118 | if (mchbar_addr && |
118 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { | 119 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { |
119 | ret = 0; | 120 | ret = 0; |
120 | goto out_put; | 121 | goto out_put; |
121 | } | 122 | } |
123 | #endif | ||
122 | 124 | ||
123 | /* Get some space for it */ | 125 | /* Get some space for it */ |
124 | ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res, | 126 | ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res, |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b86b7b7130c6..228546f6eaa4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -232,7 +232,17 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
232 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 232 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
233 | hotplug_work); | 233 | hotplug_work); |
234 | struct drm_device *dev = dev_priv->dev; | 234 | struct drm_device *dev = dev_priv->dev; |
235 | 235 | struct drm_mode_config *mode_config = &dev->mode_config; | |
236 | struct drm_connector *connector; | ||
237 | |||
238 | if (mode_config->num_connector) { | ||
239 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
240 | struct intel_output *intel_output = to_intel_output(connector); | ||
241 | |||
242 | if (intel_output->hot_plug) | ||
243 | (*intel_output->hot_plug) (intel_output); | ||
244 | } | ||
245 | } | ||
236 | /* Just fire off a uevent and let userspace tell us what to do */ | 246 | /* Just fire off a uevent and let userspace tell us what to do */ |
237 | drm_sysfs_hotplug_event(dev); | 247 | drm_sysfs_hotplug_event(dev); |
238 | } | 248 | } |
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index dc425e74a268..e4b4e8898e39 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c | |||
@@ -419,7 +419,7 @@ void intel_opregion_free(struct drm_device *dev, int suspend) | |||
419 | return; | 419 | return; |
420 | 420 | ||
421 | if (!suspend) | 421 | if (!suspend) |
422 | acpi_video_exit(); | 422 | acpi_video_unregister(); |
423 | 423 | ||
424 | opregion->acpi->drdy = 0; | 424 | opregion->acpi->drdy = 0; |
425 | 425 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f6237a0b1133..88bf7521405f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -569,6 +569,19 @@ | |||
569 | #define C0DRB3 0x10206 | 569 | #define C0DRB3 0x10206 |
570 | #define C1DRB3 0x10606 | 570 | #define C1DRB3 0x10606 |
571 | 571 | ||
572 | /* Clocking configuration register */ | ||
573 | #define CLKCFG 0x10c00 | ||
574 | #define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */ | ||
575 | #define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ | ||
576 | #define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ | ||
577 | #define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ | ||
578 | #define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ | ||
579 | #define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ | ||
580 | /* this is a guess, could be 5 as well */ | ||
581 | #define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */ | ||
582 | #define CLKCFG_FSB_1600_ALT (5 << 0) /* hrawclk 400 */ | ||
583 | #define CLKCFG_FSB_MASK (7 << 0) | ||
584 | |||
572 | /** GM965 GM45 render standby register */ | 585 | /** GM965 GM45 render standby register */ |
573 | #define MCHBAR_RENDER_STANDBY 0x111B8 | 586 | #define MCHBAR_RENDER_STANDBY 0x111B8 |
574 | 587 | ||
@@ -834,9 +847,25 @@ | |||
834 | #define HORIZ_INTERP_MASK (3 << 6) | 847 | #define HORIZ_INTERP_MASK (3 << 6) |
835 | #define HORIZ_AUTO_SCALE (1 << 5) | 848 | #define HORIZ_AUTO_SCALE (1 << 5) |
836 | #define PANEL_8TO6_DITHER_ENABLE (1 << 3) | 849 | #define PANEL_8TO6_DITHER_ENABLE (1 << 3) |
850 | #define PFIT_FILTER_FUZZY (0 << 24) | ||
851 | #define PFIT_SCALING_AUTO (0 << 26) | ||
852 | #define PFIT_SCALING_PROGRAMMED (1 << 26) | ||
853 | #define PFIT_SCALING_PILLAR (2 << 26) | ||
854 | #define PFIT_SCALING_LETTER (3 << 26) | ||
837 | #define PFIT_PGM_RATIOS 0x61234 | 855 | #define PFIT_PGM_RATIOS 0x61234 |
838 | #define PFIT_VERT_SCALE_MASK 0xfff00000 | 856 | #define PFIT_VERT_SCALE_MASK 0xfff00000 |
839 | #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 | 857 | #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 |
858 | /* Pre-965 */ | ||
859 | #define PFIT_VERT_SCALE_SHIFT 20 | ||
860 | #define PFIT_VERT_SCALE_MASK 0xfff00000 | ||
861 | #define PFIT_HORIZ_SCALE_SHIFT 4 | ||
862 | #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 | ||
863 | /* 965+ */ | ||
864 | #define PFIT_VERT_SCALE_SHIFT_965 16 | ||
865 | #define PFIT_VERT_SCALE_MASK_965 0x1fff0000 | ||
866 | #define PFIT_HORIZ_SCALE_SHIFT_965 0 | ||
867 | #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff | ||
868 | |||
840 | #define PFIT_AUTO_RATIOS 0x61238 | 869 | #define PFIT_AUTO_RATIOS 0x61238 |
841 | 870 | ||
842 | /* Backlight control */ | 871 | /* Backlight control */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index a98e2831ed31..8d8e083d14ab 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -322,6 +322,20 @@ int i915_save_state(struct drm_device *dev) | |||
322 | dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); | 322 | dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); |
323 | dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); | 323 | dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); |
324 | 324 | ||
325 | /* Display Port state */ | ||
326 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
327 | dev_priv->saveDP_B = I915_READ(DP_B); | ||
328 | dev_priv->saveDP_C = I915_READ(DP_C); | ||
329 | dev_priv->saveDP_D = I915_READ(DP_D); | ||
330 | dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M); | ||
331 | dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M); | ||
332 | dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N); | ||
333 | dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N); | ||
334 | dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M); | ||
335 | dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M); | ||
336 | dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N); | ||
337 | dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N); | ||
338 | } | ||
325 | /* FIXME: save TV & SDVO state */ | 339 | /* FIXME: save TV & SDVO state */ |
326 | 340 | ||
327 | /* FBC state */ | 341 | /* FBC state */ |
@@ -404,7 +418,19 @@ int i915_restore_state(struct drm_device *dev) | |||
404 | for (i = 0; i < 8; i++) | 418 | for (i = 0; i < 8; i++) |
405 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | 419 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); |
406 | } | 420 | } |
407 | 421 | ||
422 | /* Display port ratios (must be done before clock is set) */ | ||
423 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
424 | I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); | ||
425 | I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); | ||
426 | I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); | ||
427 | I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); | ||
428 | I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); | ||
429 | I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); | ||
430 | I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); | ||
431 | I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); | ||
432 | } | ||
433 | |||
408 | /* Pipe & plane A info */ | 434 | /* Pipe & plane A info */ |
409 | /* Prime the clock */ | 435 | /* Prime the clock */ |
410 | if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { | 436 | if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { |
@@ -518,6 +544,12 @@ int i915_restore_state(struct drm_device *dev) | |||
518 | I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); | 544 | I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); |
519 | I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); | 545 | I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); |
520 | 546 | ||
547 | /* Display Port state */ | ||
548 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
549 | I915_WRITE(DP_B, dev_priv->saveDP_B); | ||
550 | I915_WRITE(DP_C, dev_priv->saveDP_C); | ||
551 | I915_WRITE(DP_D, dev_priv->saveDP_D); | ||
552 | } | ||
521 | /* FIXME: restore TV & SDVO state */ | 553 | /* FIXME: restore TV & SDVO state */ |
522 | 554 | ||
523 | /* FBC info */ | 555 | /* FBC info */ |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index cdd126d068a7..716409a57244 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -99,9 +99,11 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
99 | { | 99 | { |
100 | struct bdb_lvds_options *lvds_options; | 100 | struct bdb_lvds_options *lvds_options; |
101 | struct bdb_lvds_lfp_data *lvds_lfp_data; | 101 | struct bdb_lvds_lfp_data *lvds_lfp_data; |
102 | struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; | ||
102 | struct bdb_lvds_lfp_data_entry *entry; | 103 | struct bdb_lvds_lfp_data_entry *entry; |
103 | struct lvds_dvo_timing *dvo_timing; | 104 | struct lvds_dvo_timing *dvo_timing; |
104 | struct drm_display_mode *panel_fixed_mode; | 105 | struct drm_display_mode *panel_fixed_mode; |
106 | int lfp_data_size; | ||
105 | 107 | ||
106 | /* Defaults if we can't find VBT info */ | 108 | /* Defaults if we can't find VBT info */ |
107 | dev_priv->lvds_dither = 0; | 109 | dev_priv->lvds_dither = 0; |
@@ -119,9 +121,17 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
119 | if (!lvds_lfp_data) | 121 | if (!lvds_lfp_data) |
120 | return; | 122 | return; |
121 | 123 | ||
124 | lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS); | ||
125 | if (!lvds_lfp_data_ptrs) | ||
126 | return; | ||
127 | |||
122 | dev_priv->lvds_vbt = 1; | 128 | dev_priv->lvds_vbt = 1; |
123 | 129 | ||
124 | entry = &lvds_lfp_data->data[lvds_options->panel_type]; | 130 | lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - |
131 | lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; | ||
132 | entry = (struct bdb_lvds_lfp_data_entry *) | ||
133 | ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * | ||
134 | lvds_options->panel_type)); | ||
125 | dvo_timing = &entry->dvo_timing; | 135 | dvo_timing = &entry->dvo_timing; |
126 | 136 | ||
127 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); | 137 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3e1c78162119..73e7b9cecac8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "intel_drv.h" | 29 | #include "intel_drv.h" |
30 | #include "i915_drm.h" | 30 | #include "i915_drm.h" |
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include "intel_dp.h" | ||
32 | 33 | ||
33 | #include "drm_crtc_helper.h" | 34 | #include "drm_crtc_helper.h" |
34 | 35 | ||
@@ -127,19 +128,6 @@ struct intel_limit { | |||
127 | #define I9XX_P2_LVDS_FAST 7 | 128 | #define I9XX_P2_LVDS_FAST 7 |
128 | #define I9XX_P2_LVDS_SLOW_LIMIT 112000 | 129 | #define I9XX_P2_LVDS_SLOW_LIMIT 112000 |
129 | 130 | ||
130 | #define INTEL_LIMIT_I8XX_DVO_DAC 0 | ||
131 | #define INTEL_LIMIT_I8XX_LVDS 1 | ||
132 | #define INTEL_LIMIT_I9XX_SDVO_DAC 2 | ||
133 | #define INTEL_LIMIT_I9XX_LVDS 3 | ||
134 | #define INTEL_LIMIT_G4X_SDVO 4 | ||
135 | #define INTEL_LIMIT_G4X_HDMI_DAC 5 | ||
136 | #define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6 | ||
137 | #define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 | ||
138 | #define INTEL_LIMIT_IGD_SDVO_DAC 8 | ||
139 | #define INTEL_LIMIT_IGD_LVDS 9 | ||
140 | #define INTEL_LIMIT_IGDNG_SDVO_DAC 10 | ||
141 | #define INTEL_LIMIT_IGDNG_LVDS 11 | ||
142 | |||
143 | /*The parameter is for SDVO on G4x platform*/ | 131 | /*The parameter is for SDVO on G4x platform*/ |
144 | #define G4X_DOT_SDVO_MIN 25000 | 132 | #define G4X_DOT_SDVO_MIN 25000 |
145 | #define G4X_DOT_SDVO_MAX 270000 | 133 | #define G4X_DOT_SDVO_MAX 270000 |
@@ -218,6 +206,25 @@ struct intel_limit { | |||
218 | #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 | 206 | #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 |
219 | #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 | 207 | #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 |
220 | 208 | ||
209 | /*The parameter is for DISPLAY PORT on G4x platform*/ | ||
210 | #define G4X_DOT_DISPLAY_PORT_MIN 161670 | ||
211 | #define G4X_DOT_DISPLAY_PORT_MAX 227000 | ||
212 | #define G4X_N_DISPLAY_PORT_MIN 1 | ||
213 | #define G4X_N_DISPLAY_PORT_MAX 2 | ||
214 | #define G4X_M_DISPLAY_PORT_MIN 97 | ||
215 | #define G4X_M_DISPLAY_PORT_MAX 108 | ||
216 | #define G4X_M1_DISPLAY_PORT_MIN 0x10 | ||
217 | #define G4X_M1_DISPLAY_PORT_MAX 0x12 | ||
218 | #define G4X_M2_DISPLAY_PORT_MIN 0x05 | ||
219 | #define G4X_M2_DISPLAY_PORT_MAX 0x06 | ||
220 | #define G4X_P_DISPLAY_PORT_MIN 10 | ||
221 | #define G4X_P_DISPLAY_PORT_MAX 20 | ||
222 | #define G4X_P1_DISPLAY_PORT_MIN 1 | ||
223 | #define G4X_P1_DISPLAY_PORT_MAX 2 | ||
224 | #define G4X_P2_DISPLAY_PORT_SLOW 10 | ||
225 | #define G4X_P2_DISPLAY_PORT_FAST 10 | ||
226 | #define G4X_P2_DISPLAY_PORT_LIMIT 0 | ||
227 | |||
221 | /* IGDNG */ | 228 | /* IGDNG */ |
222 | /* as we calculate clock using (register_value + 2) for | 229 | /* as we calculate clock using (register_value + 2) for |
223 | N/M1/M2, so here the range value for them is (actual_value-2). | 230 | N/M1/M2, so here the range value for them is (actual_value-2). |
@@ -256,8 +263,11 @@ static bool | |||
256 | intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 263 | intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
257 | int target, int refclk, intel_clock_t *best_clock); | 264 | int target, int refclk, intel_clock_t *best_clock); |
258 | 265 | ||
259 | static const intel_limit_t intel_limits[] = { | 266 | static bool |
260 | { /* INTEL_LIMIT_I8XX_DVO_DAC */ | 267 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
268 | int target, int refclk, intel_clock_t *best_clock); | ||
269 | |||
270 | static const intel_limit_t intel_limits_i8xx_dvo = { | ||
261 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | 271 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, |
262 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, | 272 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, |
263 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, | 273 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, |
@@ -269,8 +279,9 @@ static const intel_limit_t intel_limits[] = { | |||
269 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 279 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, |
270 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, | 280 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, |
271 | .find_pll = intel_find_best_PLL, | 281 | .find_pll = intel_find_best_PLL, |
272 | }, | 282 | }; |
273 | { /* INTEL_LIMIT_I8XX_LVDS */ | 283 | |
284 | static const intel_limit_t intel_limits_i8xx_lvds = { | ||
274 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | 285 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, |
275 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, | 286 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, |
276 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, | 287 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, |
@@ -282,8 +293,9 @@ static const intel_limit_t intel_limits[] = { | |||
282 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 293 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, |
283 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, | 294 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, |
284 | .find_pll = intel_find_best_PLL, | 295 | .find_pll = intel_find_best_PLL, |
285 | }, | 296 | }; |
286 | { /* INTEL_LIMIT_I9XX_SDVO_DAC */ | 297 | |
298 | static const intel_limit_t intel_limits_i9xx_sdvo = { | ||
287 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 299 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
288 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, | 300 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, |
289 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, | 301 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, |
@@ -295,8 +307,9 @@ static const intel_limit_t intel_limits[] = { | |||
295 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 307 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
296 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 308 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, |
297 | .find_pll = intel_find_best_PLL, | 309 | .find_pll = intel_find_best_PLL, |
298 | }, | 310 | }; |
299 | { /* INTEL_LIMIT_I9XX_LVDS */ | 311 | |
312 | static const intel_limit_t intel_limits_i9xx_lvds = { | ||
300 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 313 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
301 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, | 314 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, |
302 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, | 315 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, |
@@ -311,9 +324,10 @@ static const intel_limit_t intel_limits[] = { | |||
311 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 324 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
312 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, | 325 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, |
313 | .find_pll = intel_find_best_PLL, | 326 | .find_pll = intel_find_best_PLL, |
314 | }, | 327 | }; |
328 | |||
315 | /* below parameter and function is for G4X Chipset Family*/ | 329 | /* below parameter and function is for G4X Chipset Family*/ |
316 | { /* INTEL_LIMIT_G4X_SDVO */ | 330 | static const intel_limit_t intel_limits_g4x_sdvo = { |
317 | .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, | 331 | .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, |
318 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | 332 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, |
319 | .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, | 333 | .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, |
@@ -327,8 +341,9 @@ static const intel_limit_t intel_limits[] = { | |||
327 | .p2_fast = G4X_P2_SDVO_FAST | 341 | .p2_fast = G4X_P2_SDVO_FAST |
328 | }, | 342 | }, |
329 | .find_pll = intel_g4x_find_best_PLL, | 343 | .find_pll = intel_g4x_find_best_PLL, |
330 | }, | 344 | }; |
331 | { /* INTEL_LIMIT_G4X_HDMI_DAC */ | 345 | |
346 | static const intel_limit_t intel_limits_g4x_hdmi = { | ||
332 | .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, | 347 | .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, |
333 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | 348 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, |
334 | .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, | 349 | .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, |
@@ -342,8 +357,9 @@ static const intel_limit_t intel_limits[] = { | |||
342 | .p2_fast = G4X_P2_HDMI_DAC_FAST | 357 | .p2_fast = G4X_P2_HDMI_DAC_FAST |
343 | }, | 358 | }, |
344 | .find_pll = intel_g4x_find_best_PLL, | 359 | .find_pll = intel_g4x_find_best_PLL, |
345 | }, | 360 | }; |
346 | { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */ | 361 | |
362 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { | ||
347 | .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, | 363 | .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, |
348 | .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, | 364 | .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, |
349 | .vco = { .min = G4X_VCO_MIN, | 365 | .vco = { .min = G4X_VCO_MIN, |
@@ -365,8 +381,9 @@ static const intel_limit_t intel_limits[] = { | |||
365 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST | 381 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST |
366 | }, | 382 | }, |
367 | .find_pll = intel_g4x_find_best_PLL, | 383 | .find_pll = intel_g4x_find_best_PLL, |
368 | }, | 384 | }; |
369 | { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */ | 385 | |
386 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { | ||
370 | .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, | 387 | .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, |
371 | .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, | 388 | .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, |
372 | .vco = { .min = G4X_VCO_MIN, | 389 | .vco = { .min = G4X_VCO_MIN, |
@@ -388,8 +405,32 @@ static const intel_limit_t intel_limits[] = { | |||
388 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST | 405 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST |
389 | }, | 406 | }, |
390 | .find_pll = intel_g4x_find_best_PLL, | 407 | .find_pll = intel_g4x_find_best_PLL, |
391 | }, | 408 | }; |
392 | { /* INTEL_LIMIT_IGD_SDVO */ | 409 | |
410 | static const intel_limit_t intel_limits_g4x_display_port = { | ||
411 | .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN, | ||
412 | .max = G4X_DOT_DISPLAY_PORT_MAX }, | ||
413 | .vco = { .min = G4X_VCO_MIN, | ||
414 | .max = G4X_VCO_MAX}, | ||
415 | .n = { .min = G4X_N_DISPLAY_PORT_MIN, | ||
416 | .max = G4X_N_DISPLAY_PORT_MAX }, | ||
417 | .m = { .min = G4X_M_DISPLAY_PORT_MIN, | ||
418 | .max = G4X_M_DISPLAY_PORT_MAX }, | ||
419 | .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN, | ||
420 | .max = G4X_M1_DISPLAY_PORT_MAX }, | ||
421 | .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN, | ||
422 | .max = G4X_M2_DISPLAY_PORT_MAX }, | ||
423 | .p = { .min = G4X_P_DISPLAY_PORT_MIN, | ||
424 | .max = G4X_P_DISPLAY_PORT_MAX }, | ||
425 | .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN, | ||
426 | .max = G4X_P1_DISPLAY_PORT_MAX}, | ||
427 | .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT, | ||
428 | .p2_slow = G4X_P2_DISPLAY_PORT_SLOW, | ||
429 | .p2_fast = G4X_P2_DISPLAY_PORT_FAST }, | ||
430 | .find_pll = intel_find_pll_g4x_dp, | ||
431 | }; | ||
432 | |||
433 | static const intel_limit_t intel_limits_igd_sdvo = { | ||
393 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, | 434 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, |
394 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, | 435 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, |
395 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, | 436 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, |
@@ -401,8 +442,9 @@ static const intel_limit_t intel_limits[] = { | |||
401 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 442 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
402 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 443 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, |
403 | .find_pll = intel_find_best_PLL, | 444 | .find_pll = intel_find_best_PLL, |
404 | }, | 445 | }; |
405 | { /* INTEL_LIMIT_IGD_LVDS */ | 446 | |
447 | static const intel_limit_t intel_limits_igd_lvds = { | ||
406 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 448 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
407 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, | 449 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, |
408 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, | 450 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, |
@@ -415,8 +457,9 @@ static const intel_limit_t intel_limits[] = { | |||
415 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 457 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
416 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, | 458 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, |
417 | .find_pll = intel_find_best_PLL, | 459 | .find_pll = intel_find_best_PLL, |
418 | }, | 460 | }; |
419 | { /* INTEL_LIMIT_IGDNG_SDVO_DAC */ | 461 | |
462 | static const intel_limit_t intel_limits_igdng_sdvo = { | ||
420 | .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, | 463 | .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, |
421 | .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, | 464 | .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, |
422 | .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, | 465 | .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, |
@@ -429,8 +472,9 @@ static const intel_limit_t intel_limits[] = { | |||
429 | .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, | 472 | .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, |
430 | .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, | 473 | .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, |
431 | .find_pll = intel_igdng_find_best_PLL, | 474 | .find_pll = intel_igdng_find_best_PLL, |
432 | }, | 475 | }; |
433 | { /* INTEL_LIMIT_IGDNG_LVDS */ | 476 | |
477 | static const intel_limit_t intel_limits_igdng_lvds = { | ||
434 | .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, | 478 | .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, |
435 | .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, | 479 | .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, |
436 | .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, | 480 | .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, |
@@ -443,16 +487,15 @@ static const intel_limit_t intel_limits[] = { | |||
443 | .p2_slow = IGDNG_P2_LVDS_SLOW, | 487 | .p2_slow = IGDNG_P2_LVDS_SLOW, |
444 | .p2_fast = IGDNG_P2_LVDS_FAST }, | 488 | .p2_fast = IGDNG_P2_LVDS_FAST }, |
445 | .find_pll = intel_igdng_find_best_PLL, | 489 | .find_pll = intel_igdng_find_best_PLL, |
446 | }, | ||
447 | }; | 490 | }; |
448 | 491 | ||
449 | static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) | 492 | static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) |
450 | { | 493 | { |
451 | const intel_limit_t *limit; | 494 | const intel_limit_t *limit; |
452 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 495 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
453 | limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS]; | 496 | limit = &intel_limits_igdng_lvds; |
454 | else | 497 | else |
455 | limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC]; | 498 | limit = &intel_limits_igdng_sdvo; |
456 | 499 | ||
457 | return limit; | 500 | return limit; |
458 | } | 501 | } |
@@ -467,19 +510,19 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) | |||
467 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | 510 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == |
468 | LVDS_CLKB_POWER_UP) | 511 | LVDS_CLKB_POWER_UP) |
469 | /* LVDS with dual channel */ | 512 | /* LVDS with dual channel */ |
470 | limit = &intel_limits | 513 | limit = &intel_limits_g4x_dual_channel_lvds; |
471 | [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS]; | ||
472 | else | 514 | else |
473 | /* LVDS with dual channel */ | 515 | /* LVDS with dual channel */ |
474 | limit = &intel_limits | 516 | limit = &intel_limits_g4x_single_channel_lvds; |
475 | [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS]; | ||
476 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || | 517 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || |
477 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { | 518 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { |
478 | limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC]; | 519 | limit = &intel_limits_g4x_hdmi; |
479 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { | 520 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { |
480 | limit = &intel_limits[INTEL_LIMIT_G4X_SDVO]; | 521 | limit = &intel_limits_g4x_sdvo; |
522 | } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) { | ||
523 | limit = &intel_limits_g4x_display_port; | ||
481 | } else /* The option is for other outputs */ | 524 | } else /* The option is for other outputs */ |
482 | limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; | 525 | limit = &intel_limits_i9xx_sdvo; |
483 | 526 | ||
484 | return limit; | 527 | return limit; |
485 | } | 528 | } |
@@ -495,19 +538,19 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | |||
495 | limit = intel_g4x_limit(crtc); | 538 | limit = intel_g4x_limit(crtc); |
496 | } else if (IS_I9XX(dev) && !IS_IGD(dev)) { | 539 | } else if (IS_I9XX(dev) && !IS_IGD(dev)) { |
497 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 540 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
498 | limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; | 541 | limit = &intel_limits_i9xx_lvds; |
499 | else | 542 | else |
500 | limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; | 543 | limit = &intel_limits_i9xx_sdvo; |
501 | } else if (IS_IGD(dev)) { | 544 | } else if (IS_IGD(dev)) { |
502 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 545 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
503 | limit = &intel_limits[INTEL_LIMIT_IGD_LVDS]; | 546 | limit = &intel_limits_igd_lvds; |
504 | else | 547 | else |
505 | limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC]; | 548 | limit = &intel_limits_igd_sdvo; |
506 | } else { | 549 | } else { |
507 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 550 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
508 | limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; | 551 | limit = &intel_limits_i8xx_lvds; |
509 | else | 552 | else |
510 | limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC]; | 553 | limit = &intel_limits_i8xx_dvo; |
511 | } | 554 | } |
512 | return limit; | 555 | return limit; |
513 | } | 556 | } |
@@ -764,6 +807,35 @@ out: | |||
764 | return found; | 807 | return found; |
765 | } | 808 | } |
766 | 809 | ||
810 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ | ||
811 | static bool | ||
812 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
813 | int target, int refclk, intel_clock_t *best_clock) | ||
814 | { | ||
815 | intel_clock_t clock; | ||
816 | if (target < 200000) { | ||
817 | clock.dot = 161670; | ||
818 | clock.p = 20; | ||
819 | clock.p1 = 2; | ||
820 | clock.p2 = 10; | ||
821 | clock.n = 0x01; | ||
822 | clock.m = 97; | ||
823 | clock.m1 = 0x10; | ||
824 | clock.m2 = 0x05; | ||
825 | } else { | ||
826 | clock.dot = 270000; | ||
827 | clock.p = 10; | ||
828 | clock.p1 = 1; | ||
829 | clock.p2 = 10; | ||
830 | clock.n = 0x02; | ||
831 | clock.m = 108; | ||
832 | clock.m1 = 0x12; | ||
833 | clock.m2 = 0x06; | ||
834 | } | ||
835 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); | ||
836 | return true; | ||
837 | } | ||
838 | |||
767 | void | 839 | void |
768 | intel_wait_for_vblank(struct drm_device *dev) | 840 | intel_wait_for_vblank(struct drm_device *dev) |
769 | { | 841 | { |
@@ -1541,7 +1613,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1541 | intel_clock_t clock; | 1613 | intel_clock_t clock; |
1542 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; | 1614 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; |
1543 | bool ok, is_sdvo = false, is_dvo = false; | 1615 | bool ok, is_sdvo = false, is_dvo = false; |
1544 | bool is_crt = false, is_lvds = false, is_tv = false; | 1616 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
1545 | struct drm_mode_config *mode_config = &dev->mode_config; | 1617 | struct drm_mode_config *mode_config = &dev->mode_config; |
1546 | struct drm_connector *connector; | 1618 | struct drm_connector *connector; |
1547 | const intel_limit_t *limit; | 1619 | const intel_limit_t *limit; |
@@ -1585,6 +1657,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1585 | case INTEL_OUTPUT_ANALOG: | 1657 | case INTEL_OUTPUT_ANALOG: |
1586 | is_crt = true; | 1658 | is_crt = true; |
1587 | break; | 1659 | break; |
1660 | case INTEL_OUTPUT_DISPLAYPORT: | ||
1661 | is_dp = true; | ||
1662 | break; | ||
1588 | } | 1663 | } |
1589 | 1664 | ||
1590 | num_outputs++; | 1665 | num_outputs++; |
@@ -1600,6 +1675,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1600 | } else { | 1675 | } else { |
1601 | refclk = 48000; | 1676 | refclk = 48000; |
1602 | } | 1677 | } |
1678 | |||
1603 | 1679 | ||
1604 | /* | 1680 | /* |
1605 | * Returns a set of divisors for the desired target clock with the given | 1681 | * Returns a set of divisors for the desired target clock with the given |
@@ -1662,6 +1738,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1662 | else if (IS_IGDNG(dev)) | 1738 | else if (IS_IGDNG(dev)) |
1663 | dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; | 1739 | dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; |
1664 | } | 1740 | } |
1741 | if (is_dp) | ||
1742 | dpll |= DPLL_DVO_HIGH_SPEED; | ||
1665 | 1743 | ||
1666 | /* compute bitmask from p1 value */ | 1744 | /* compute bitmask from p1 value */ |
1667 | if (IS_IGD(dev)) | 1745 | if (IS_IGD(dev)) |
@@ -1809,6 +1887,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1809 | I915_WRITE(lvds_reg, lvds); | 1887 | I915_WRITE(lvds_reg, lvds); |
1810 | I915_READ(lvds_reg); | 1888 | I915_READ(lvds_reg); |
1811 | } | 1889 | } |
1890 | if (is_dp) | ||
1891 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | ||
1812 | 1892 | ||
1813 | I915_WRITE(fp_reg, fp); | 1893 | I915_WRITE(fp_reg, fp); |
1814 | I915_WRITE(dpll_reg, dpll); | 1894 | I915_WRITE(dpll_reg, dpll); |
@@ -2475,6 +2555,8 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
2475 | found = intel_sdvo_init(dev, SDVOB); | 2555 | found = intel_sdvo_init(dev, SDVOB); |
2476 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 2556 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) |
2477 | intel_hdmi_init(dev, SDVOB); | 2557 | intel_hdmi_init(dev, SDVOB); |
2558 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) | ||
2559 | intel_dp_init(dev, DP_B); | ||
2478 | } | 2560 | } |
2479 | 2561 | ||
2480 | /* Before G4X SDVOC doesn't have its own detect register */ | 2562 | /* Before G4X SDVOC doesn't have its own detect register */ |
@@ -2487,7 +2569,11 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
2487 | found = intel_sdvo_init(dev, SDVOC); | 2569 | found = intel_sdvo_init(dev, SDVOC); |
2488 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 2570 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) |
2489 | intel_hdmi_init(dev, SDVOC); | 2571 | intel_hdmi_init(dev, SDVOC); |
2572 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) | ||
2573 | intel_dp_init(dev, DP_C); | ||
2490 | } | 2574 | } |
2575 | if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) | ||
2576 | intel_dp_init(dev, DP_D); | ||
2491 | } else | 2577 | } else |
2492 | intel_dvo_init(dev); | 2578 | intel_dvo_init(dev); |
2493 | 2579 | ||
@@ -2530,6 +2616,11 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
2530 | (1 << 1)); | 2616 | (1 << 1)); |
2531 | clone_mask = (1 << INTEL_OUTPUT_TVOUT); | 2617 | clone_mask = (1 << INTEL_OUTPUT_TVOUT); |
2532 | break; | 2618 | break; |
2619 | case INTEL_OUTPUT_DISPLAYPORT: | ||
2620 | crtc_mask = ((1 << 0) | | ||
2621 | (1 << 1)); | ||
2622 | clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT); | ||
2623 | break; | ||
2533 | } | 2624 | } |
2534 | encoder->possible_crtcs = crtc_mask; | 2625 | encoder->possible_crtcs = crtc_mask; |
2535 | encoder->possible_clones = intel_connector_clones(dev, clone_mask); | 2626 | encoder->possible_clones = intel_connector_clones(dev, clone_mask); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c new file mode 100644 index 000000000000..8f8d37d5663a --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -0,0 +1,1153 @@ | |||
1 | /* | ||
2 | * Copyright © 2008 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Keith Packard <keithp@keithp.com> | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #include <linux/i2c.h> | ||
29 | #include "drmP.h" | ||
30 | #include "drm.h" | ||
31 | #include "drm_crtc.h" | ||
32 | #include "drm_crtc_helper.h" | ||
33 | #include "intel_drv.h" | ||
34 | #include "i915_drm.h" | ||
35 | #include "i915_drv.h" | ||
36 | #include "intel_dp.h" | ||
37 | |||
38 | #define DP_LINK_STATUS_SIZE 6 | ||
39 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) | ||
40 | |||
41 | #define DP_LINK_CONFIGURATION_SIZE 9 | ||
42 | |||
43 | struct intel_dp_priv { | ||
44 | uint32_t output_reg; | ||
45 | uint32_t DP; | ||
46 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; | ||
47 | uint32_t save_DP; | ||
48 | uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE]; | ||
49 | bool has_audio; | ||
50 | int dpms_mode; | ||
51 | uint8_t link_bw; | ||
52 | uint8_t lane_count; | ||
53 | uint8_t dpcd[4]; | ||
54 | struct intel_output *intel_output; | ||
55 | struct i2c_adapter adapter; | ||
56 | struct i2c_algo_dp_aux_data algo; | ||
57 | }; | ||
58 | |||
59 | static void | ||
60 | intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | ||
61 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); | ||
62 | |||
63 | static void | ||
64 | intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); | ||
65 | |||
66 | static int | ||
67 | intel_dp_max_lane_count(struct intel_output *intel_output) | ||
68 | { | ||
69 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
70 | int max_lane_count = 4; | ||
71 | |||
72 | if (dp_priv->dpcd[0] >= 0x11) { | ||
73 | max_lane_count = dp_priv->dpcd[2] & 0x1f; | ||
74 | switch (max_lane_count) { | ||
75 | case 1: case 2: case 4: | ||
76 | break; | ||
77 | default: | ||
78 | max_lane_count = 4; | ||
79 | } | ||
80 | } | ||
81 | return max_lane_count; | ||
82 | } | ||
83 | |||
84 | static int | ||
85 | intel_dp_max_link_bw(struct intel_output *intel_output) | ||
86 | { | ||
87 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
88 | int max_link_bw = dp_priv->dpcd[1]; | ||
89 | |||
90 | switch (max_link_bw) { | ||
91 | case DP_LINK_BW_1_62: | ||
92 | case DP_LINK_BW_2_7: | ||
93 | break; | ||
94 | default: | ||
95 | max_link_bw = DP_LINK_BW_1_62; | ||
96 | break; | ||
97 | } | ||
98 | return max_link_bw; | ||
99 | } | ||
100 | |||
101 | static int | ||
102 | intel_dp_link_clock(uint8_t link_bw) | ||
103 | { | ||
104 | if (link_bw == DP_LINK_BW_2_7) | ||
105 | return 270000; | ||
106 | else | ||
107 | return 162000; | ||
108 | } | ||
109 | |||
110 | /* I think this is a fiction */ | ||
111 | static int | ||
112 | intel_dp_link_required(int pixel_clock) | ||
113 | { | ||
114 | return pixel_clock * 3; | ||
115 | } | ||
116 | |||
117 | static int | ||
118 | intel_dp_mode_valid(struct drm_connector *connector, | ||
119 | struct drm_display_mode *mode) | ||
120 | { | ||
121 | struct intel_output *intel_output = to_intel_output(connector); | ||
122 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); | ||
123 | int max_lanes = intel_dp_max_lane_count(intel_output); | ||
124 | |||
125 | if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes) | ||
126 | return MODE_CLOCK_HIGH; | ||
127 | |||
128 | if (mode->clock < 10000) | ||
129 | return MODE_CLOCK_LOW; | ||
130 | |||
131 | return MODE_OK; | ||
132 | } | ||
133 | |||
134 | static uint32_t | ||
135 | pack_aux(uint8_t *src, int src_bytes) | ||
136 | { | ||
137 | int i; | ||
138 | uint32_t v = 0; | ||
139 | |||
140 | if (src_bytes > 4) | ||
141 | src_bytes = 4; | ||
142 | for (i = 0; i < src_bytes; i++) | ||
143 | v |= ((uint32_t) src[i]) << ((3-i) * 8); | ||
144 | return v; | ||
145 | } | ||
146 | |||
147 | static void | ||
148 | unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) | ||
149 | { | ||
150 | int i; | ||
151 | if (dst_bytes > 4) | ||
152 | dst_bytes = 4; | ||
153 | for (i = 0; i < dst_bytes; i++) | ||
154 | dst[i] = src >> ((3-i) * 8); | ||
155 | } | ||
156 | |||
157 | /* hrawclock is 1/4 the FSB frequency */ | ||
158 | static int | ||
159 | intel_hrawclk(struct drm_device *dev) | ||
160 | { | ||
161 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
162 | uint32_t clkcfg; | ||
163 | |||
164 | clkcfg = I915_READ(CLKCFG); | ||
165 | switch (clkcfg & CLKCFG_FSB_MASK) { | ||
166 | case CLKCFG_FSB_400: | ||
167 | return 100; | ||
168 | case CLKCFG_FSB_533: | ||
169 | return 133; | ||
170 | case CLKCFG_FSB_667: | ||
171 | return 166; | ||
172 | case CLKCFG_FSB_800: | ||
173 | return 200; | ||
174 | case CLKCFG_FSB_1067: | ||
175 | return 266; | ||
176 | case CLKCFG_FSB_1333: | ||
177 | return 333; | ||
178 | /* these two are just a guess; one of them might be right */ | ||
179 | case CLKCFG_FSB_1600: | ||
180 | case CLKCFG_FSB_1600_ALT: | ||
181 | return 400; | ||
182 | default: | ||
183 | return 133; | ||
184 | } | ||
185 | } | ||
186 | |||
187 | static int | ||
188 | intel_dp_aux_ch(struct intel_output *intel_output, | ||
189 | uint8_t *send, int send_bytes, | ||
190 | uint8_t *recv, int recv_size) | ||
191 | { | ||
192 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
193 | uint32_t output_reg = dp_priv->output_reg; | ||
194 | struct drm_device *dev = intel_output->base.dev; | ||
195 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
196 | uint32_t ch_ctl = output_reg + 0x10; | ||
197 | uint32_t ch_data = ch_ctl + 4; | ||
198 | int i; | ||
199 | int recv_bytes; | ||
200 | uint32_t ctl; | ||
201 | uint32_t status; | ||
202 | uint32_t aux_clock_divider; | ||
203 | int try; | ||
204 | |||
205 | /* The clock divider is based off the hrawclk, | ||
206 | * and would like to run at 2MHz. So, take the | ||
207 | * hrawclk value and divide by 2 and use that | ||
208 | */ | ||
209 | aux_clock_divider = intel_hrawclk(dev) / 2; | ||
210 | /* Must try at least 3 times according to DP spec */ | ||
211 | for (try = 0; try < 5; try++) { | ||
212 | /* Load the send data into the aux channel data registers */ | ||
213 | for (i = 0; i < send_bytes; i += 4) { | ||
214 | uint32_t d = pack_aux(send + i, send_bytes - i);; | ||
215 | |||
216 | I915_WRITE(ch_data + i, d); | ||
217 | } | ||
218 | |||
219 | ctl = (DP_AUX_CH_CTL_SEND_BUSY | | ||
220 | DP_AUX_CH_CTL_TIME_OUT_400us | | ||
221 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | | ||
222 | (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | | ||
223 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | | ||
224 | DP_AUX_CH_CTL_DONE | | ||
225 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | ||
226 | DP_AUX_CH_CTL_RECEIVE_ERROR); | ||
227 | |||
228 | /* Send the command and wait for it to complete */ | ||
229 | I915_WRITE(ch_ctl, ctl); | ||
230 | (void) I915_READ(ch_ctl); | ||
231 | for (;;) { | ||
232 | udelay(100); | ||
233 | status = I915_READ(ch_ctl); | ||
234 | if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) | ||
235 | break; | ||
236 | } | ||
237 | |||
238 | /* Clear done status and any errors */ | ||
239 | I915_WRITE(ch_ctl, (ctl | | ||
240 | DP_AUX_CH_CTL_DONE | | ||
241 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | ||
242 | DP_AUX_CH_CTL_RECEIVE_ERROR)); | ||
243 | (void) I915_READ(ch_ctl); | ||
244 | if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0) | ||
245 | break; | ||
246 | } | ||
247 | |||
248 | if ((status & DP_AUX_CH_CTL_DONE) == 0) { | ||
249 | printk(KERN_ERR "dp_aux_ch not done status 0x%08x\n", status); | ||
250 | return -EBUSY; | ||
251 | } | ||
252 | |||
253 | /* Check for timeout or receive error. | ||
254 | * Timeouts occur when the sink is not connected | ||
255 | */ | ||
256 | if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { | ||
257 | printk(KERN_ERR "dp_aux_ch receive error status 0x%08x\n", status); | ||
258 | return -EIO; | ||
259 | } | ||
260 | if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { | ||
261 | printk(KERN_ERR "dp_aux_ch timeout status 0x%08x\n", status); | ||
262 | return -ETIMEDOUT; | ||
263 | } | ||
264 | |||
265 | /* Unload any bytes sent back from the other side */ | ||
266 | recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> | ||
267 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); | ||
268 | |||
269 | if (recv_bytes > recv_size) | ||
270 | recv_bytes = recv_size; | ||
271 | |||
272 | for (i = 0; i < recv_bytes; i += 4) { | ||
273 | uint32_t d = I915_READ(ch_data + i); | ||
274 | |||
275 | unpack_aux(d, recv + i, recv_bytes - i); | ||
276 | } | ||
277 | |||
278 | return recv_bytes; | ||
279 | } | ||
280 | |||
281 | /* Write data to the aux channel in native mode */ | ||
282 | static int | ||
283 | intel_dp_aux_native_write(struct intel_output *intel_output, | ||
284 | uint16_t address, uint8_t *send, int send_bytes) | ||
285 | { | ||
286 | int ret; | ||
287 | uint8_t msg[20]; | ||
288 | int msg_bytes; | ||
289 | uint8_t ack; | ||
290 | |||
291 | if (send_bytes > 16) | ||
292 | return -1; | ||
293 | msg[0] = AUX_NATIVE_WRITE << 4; | ||
294 | msg[1] = address >> 8; | ||
295 | msg[2] = address; | ||
296 | msg[3] = send_bytes - 1; | ||
297 | memcpy(&msg[4], send, send_bytes); | ||
298 | msg_bytes = send_bytes + 4; | ||
299 | for (;;) { | ||
300 | ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); | ||
301 | if (ret < 0) | ||
302 | return ret; | ||
303 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | ||
304 | break; | ||
305 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | ||
306 | udelay(100); | ||
307 | else | ||
308 | return -EIO; | ||
309 | } | ||
310 | return send_bytes; | ||
311 | } | ||
312 | |||
313 | /* Write a single byte to the aux channel in native mode */ | ||
314 | static int | ||
315 | intel_dp_aux_native_write_1(struct intel_output *intel_output, | ||
316 | uint16_t address, uint8_t byte) | ||
317 | { | ||
318 | return intel_dp_aux_native_write(intel_output, address, &byte, 1); | ||
319 | } | ||
320 | |||
321 | /* read bytes from a native aux channel */ | ||
322 | static int | ||
323 | intel_dp_aux_native_read(struct intel_output *intel_output, | ||
324 | uint16_t address, uint8_t *recv, int recv_bytes) | ||
325 | { | ||
326 | uint8_t msg[4]; | ||
327 | int msg_bytes; | ||
328 | uint8_t reply[20]; | ||
329 | int reply_bytes; | ||
330 | uint8_t ack; | ||
331 | int ret; | ||
332 | |||
333 | msg[0] = AUX_NATIVE_READ << 4; | ||
334 | msg[1] = address >> 8; | ||
335 | msg[2] = address & 0xff; | ||
336 | msg[3] = recv_bytes - 1; | ||
337 | |||
338 | msg_bytes = 4; | ||
339 | reply_bytes = recv_bytes + 1; | ||
340 | |||
341 | for (;;) { | ||
342 | ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, | ||
343 | reply, reply_bytes); | ||
344 | if (ret == 0) | ||
345 | return -EPROTO; | ||
346 | if (ret < 0) | ||
347 | return ret; | ||
348 | ack = reply[0]; | ||
349 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { | ||
350 | memcpy(recv, reply + 1, ret - 1); | ||
351 | return ret - 1; | ||
352 | } | ||
353 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | ||
354 | udelay(100); | ||
355 | else | ||
356 | return -EIO; | ||
357 | } | ||
358 | } | ||
359 | |||
360 | static int | ||
361 | intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, | ||
362 | uint8_t *send, int send_bytes, | ||
363 | uint8_t *recv, int recv_bytes) | ||
364 | { | ||
365 | struct intel_dp_priv *dp_priv = container_of(adapter, | ||
366 | struct intel_dp_priv, | ||
367 | adapter); | ||
368 | struct intel_output *intel_output = dp_priv->intel_output; | ||
369 | |||
370 | return intel_dp_aux_ch(intel_output, | ||
371 | send, send_bytes, recv, recv_bytes); | ||
372 | } | ||
373 | |||
374 | static int | ||
375 | intel_dp_i2c_init(struct intel_output *intel_output, const char *name) | ||
376 | { | ||
377 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
378 | |||
379 | DRM_ERROR("i2c_init %s\n", name); | ||
380 | dp_priv->algo.running = false; | ||
381 | dp_priv->algo.address = 0; | ||
382 | dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch; | ||
383 | |||
384 | memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter)); | ||
385 | dp_priv->adapter.owner = THIS_MODULE; | ||
386 | dp_priv->adapter.class = I2C_CLASS_DDC; | ||
387 | strncpy (dp_priv->adapter.name, name, sizeof dp_priv->adapter.name - 1); | ||
388 | dp_priv->adapter.name[sizeof dp_priv->adapter.name - 1] = '\0'; | ||
389 | dp_priv->adapter.algo_data = &dp_priv->algo; | ||
390 | dp_priv->adapter.dev.parent = &intel_output->base.kdev; | ||
391 | |||
392 | return i2c_dp_aux_add_bus(&dp_priv->adapter); | ||
393 | } | ||
394 | |||
395 | static bool | ||
396 | intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
397 | struct drm_display_mode *adjusted_mode) | ||
398 | { | ||
399 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
400 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
401 | int lane_count, clock; | ||
402 | int max_lane_count = intel_dp_max_lane_count(intel_output); | ||
403 | int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; | ||
404 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | ||
405 | |||
406 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | ||
407 | for (clock = 0; clock <= max_clock; clock++) { | ||
408 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; | ||
409 | |||
410 | if (intel_dp_link_required(mode->clock) <= link_avail) { | ||
411 | dp_priv->link_bw = bws[clock]; | ||
412 | dp_priv->lane_count = lane_count; | ||
413 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); | ||
414 | printk(KERN_ERR "link bw %02x lane count %d clock %d\n", | ||
415 | dp_priv->link_bw, dp_priv->lane_count, | ||
416 | adjusted_mode->clock); | ||
417 | return true; | ||
418 | } | ||
419 | } | ||
420 | } | ||
421 | return false; | ||
422 | } | ||
423 | |||
424 | struct intel_dp_m_n { | ||
425 | uint32_t tu; | ||
426 | uint32_t gmch_m; | ||
427 | uint32_t gmch_n; | ||
428 | uint32_t link_m; | ||
429 | uint32_t link_n; | ||
430 | }; | ||
431 | |||
432 | static void | ||
433 | intel_reduce_ratio(uint32_t *num, uint32_t *den) | ||
434 | { | ||
435 | while (*num > 0xffffff || *den > 0xffffff) { | ||
436 | *num >>= 1; | ||
437 | *den >>= 1; | ||
438 | } | ||
439 | } | ||
440 | |||
441 | static void | ||
442 | intel_dp_compute_m_n(int bytes_per_pixel, | ||
443 | int nlanes, | ||
444 | int pixel_clock, | ||
445 | int link_clock, | ||
446 | struct intel_dp_m_n *m_n) | ||
447 | { | ||
448 | m_n->tu = 64; | ||
449 | m_n->gmch_m = pixel_clock * bytes_per_pixel; | ||
450 | m_n->gmch_n = link_clock * nlanes; | ||
451 | intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); | ||
452 | m_n->link_m = pixel_clock; | ||
453 | m_n->link_n = link_clock; | ||
454 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); | ||
455 | } | ||
456 | |||
457 | void | ||
458 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | ||
459 | struct drm_display_mode *adjusted_mode) | ||
460 | { | ||
461 | struct drm_device *dev = crtc->dev; | ||
462 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
463 | struct drm_connector *connector; | ||
464 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
465 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
466 | int lane_count = 4; | ||
467 | struct intel_dp_m_n m_n; | ||
468 | |||
469 | /* | ||
470 | * Find the lane count in the intel_output private | ||
471 | */ | ||
472 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
473 | struct intel_output *intel_output = to_intel_output(connector); | ||
474 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
475 | |||
476 | if (!connector->encoder || connector->encoder->crtc != crtc) | ||
477 | continue; | ||
478 | |||
479 | if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { | ||
480 | lane_count = dp_priv->lane_count; | ||
481 | break; | ||
482 | } | ||
483 | } | ||
484 | |||
485 | /* | ||
486 | * Compute the GMCH and Link ratios. The '3' here is | ||
487 | * the number of bytes_per_pixel post-LUT, which we always | ||
488 | * set up for 8-bits of R/G/B, or 3 bytes total. | ||
489 | */ | ||
490 | intel_dp_compute_m_n(3, lane_count, | ||
491 | mode->clock, adjusted_mode->clock, &m_n); | ||
492 | |||
493 | if (intel_crtc->pipe == 0) { | ||
494 | I915_WRITE(PIPEA_GMCH_DATA_M, | ||
495 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | ||
496 | m_n.gmch_m); | ||
497 | I915_WRITE(PIPEA_GMCH_DATA_N, | ||
498 | m_n.gmch_n); | ||
499 | I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m); | ||
500 | I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n); | ||
501 | } else { | ||
502 | I915_WRITE(PIPEB_GMCH_DATA_M, | ||
503 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | ||
504 | m_n.gmch_m); | ||
505 | I915_WRITE(PIPEB_GMCH_DATA_N, | ||
506 | m_n.gmch_n); | ||
507 | I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m); | ||
508 | I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n); | ||
509 | } | ||
510 | } | ||
511 | |||
512 | static void | ||
513 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
514 | struct drm_display_mode *adjusted_mode) | ||
515 | { | ||
516 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
517 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
518 | struct drm_crtc *crtc = intel_output->enc.crtc; | ||
519 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
520 | |||
521 | dp_priv->DP = (DP_LINK_TRAIN_OFF | | ||
522 | DP_VOLTAGE_0_4 | | ||
523 | DP_PRE_EMPHASIS_0 | | ||
524 | DP_SYNC_VS_HIGH | | ||
525 | DP_SYNC_HS_HIGH); | ||
526 | |||
527 | switch (dp_priv->lane_count) { | ||
528 | case 1: | ||
529 | dp_priv->DP |= DP_PORT_WIDTH_1; | ||
530 | break; | ||
531 | case 2: | ||
532 | dp_priv->DP |= DP_PORT_WIDTH_2; | ||
533 | break; | ||
534 | case 4: | ||
535 | dp_priv->DP |= DP_PORT_WIDTH_4; | ||
536 | break; | ||
537 | } | ||
538 | if (dp_priv->has_audio) | ||
539 | dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE; | ||
540 | |||
541 | memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | ||
542 | dp_priv->link_configuration[0] = dp_priv->link_bw; | ||
543 | dp_priv->link_configuration[1] = dp_priv->lane_count; | ||
544 | |||
545 | /* | ||
546 | * Check for DPCD version > 1.1, | ||
547 | * enable enahanced frame stuff in that case | ||
548 | */ | ||
549 | if (dp_priv->dpcd[0] >= 0x11) { | ||
550 | dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | ||
551 | dp_priv->DP |= DP_ENHANCED_FRAMING; | ||
552 | } | ||
553 | |||
554 | if (intel_crtc->pipe == 1) | ||
555 | dp_priv->DP |= DP_PIPEB_SELECT; | ||
556 | } | ||
557 | |||
558 | |||
559 | static void | ||
560 | intel_dp_dpms(struct drm_encoder *encoder, int mode) | ||
561 | { | ||
562 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
563 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
564 | struct drm_device *dev = intel_output->base.dev; | ||
565 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
566 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); | ||
567 | |||
568 | if (mode != DRM_MODE_DPMS_ON) { | ||
569 | if (dp_reg & DP_PORT_EN) | ||
570 | intel_dp_link_down(intel_output, dp_priv->DP); | ||
571 | } else { | ||
572 | if (!(dp_reg & DP_PORT_EN)) | ||
573 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | ||
574 | } | ||
575 | dp_priv->dpms_mode = mode; | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * Fetch AUX CH registers 0x202 - 0x207 which contain | ||
580 | * link status information | ||
581 | */ | ||
582 | static bool | ||
583 | intel_dp_get_link_status(struct intel_output *intel_output, | ||
584 | uint8_t link_status[DP_LINK_STATUS_SIZE]) | ||
585 | { | ||
586 | int ret; | ||
587 | |||
588 | ret = intel_dp_aux_native_read(intel_output, | ||
589 | DP_LANE0_1_STATUS, | ||
590 | link_status, DP_LINK_STATUS_SIZE); | ||
591 | if (ret != DP_LINK_STATUS_SIZE) | ||
592 | return false; | ||
593 | return true; | ||
594 | } | ||
595 | |||
596 | static uint8_t | ||
597 | intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
598 | int r) | ||
599 | { | ||
600 | return link_status[r - DP_LANE0_1_STATUS]; | ||
601 | } | ||
602 | |||
603 | static void | ||
604 | intel_dp_save(struct drm_connector *connector) | ||
605 | { | ||
606 | struct intel_output *intel_output = to_intel_output(connector); | ||
607 | struct drm_device *dev = intel_output->base.dev; | ||
608 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
609 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
610 | |||
611 | dp_priv->save_DP = I915_READ(dp_priv->output_reg); | ||
612 | intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, | ||
613 | dp_priv->save_link_configuration, | ||
614 | sizeof (dp_priv->save_link_configuration)); | ||
615 | } | ||
616 | |||
617 | static uint8_t | ||
618 | intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
619 | int lane) | ||
620 | { | ||
621 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | ||
622 | int s = ((lane & 1) ? | ||
623 | DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : | ||
624 | DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); | ||
625 | uint8_t l = intel_dp_link_status(link_status, i); | ||
626 | |||
627 | return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; | ||
628 | } | ||
629 | |||
630 | static uint8_t | ||
631 | intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
632 | int lane) | ||
633 | { | ||
634 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | ||
635 | int s = ((lane & 1) ? | ||
636 | DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : | ||
637 | DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); | ||
638 | uint8_t l = intel_dp_link_status(link_status, i); | ||
639 | |||
640 | return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; | ||
641 | } | ||
642 | |||
643 | |||
644 | #if 0 | ||
645 | static char *voltage_names[] = { | ||
646 | "0.4V", "0.6V", "0.8V", "1.2V" | ||
647 | }; | ||
648 | static char *pre_emph_names[] = { | ||
649 | "0dB", "3.5dB", "6dB", "9.5dB" | ||
650 | }; | ||
651 | static char *link_train_names[] = { | ||
652 | "pattern 1", "pattern 2", "idle", "off" | ||
653 | }; | ||
654 | #endif | ||
655 | |||
656 | /* | ||
657 | * These are source-specific values; current Intel hardware supports | ||
658 | * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB | ||
659 | */ | ||
660 | #define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 | ||
661 | |||
662 | static uint8_t | ||
663 | intel_dp_pre_emphasis_max(uint8_t voltage_swing) | ||
664 | { | ||
665 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | ||
666 | case DP_TRAIN_VOLTAGE_SWING_400: | ||
667 | return DP_TRAIN_PRE_EMPHASIS_6; | ||
668 | case DP_TRAIN_VOLTAGE_SWING_600: | ||
669 | return DP_TRAIN_PRE_EMPHASIS_6; | ||
670 | case DP_TRAIN_VOLTAGE_SWING_800: | ||
671 | return DP_TRAIN_PRE_EMPHASIS_3_5; | ||
672 | case DP_TRAIN_VOLTAGE_SWING_1200: | ||
673 | default: | ||
674 | return DP_TRAIN_PRE_EMPHASIS_0; | ||
675 | } | ||
676 | } | ||
677 | |||
678 | static void | ||
679 | intel_get_adjust_train(struct intel_output *intel_output, | ||
680 | uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
681 | int lane_count, | ||
682 | uint8_t train_set[4]) | ||
683 | { | ||
684 | uint8_t v = 0; | ||
685 | uint8_t p = 0; | ||
686 | int lane; | ||
687 | |||
688 | for (lane = 0; lane < lane_count; lane++) { | ||
689 | uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane); | ||
690 | uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane); | ||
691 | |||
692 | if (this_v > v) | ||
693 | v = this_v; | ||
694 | if (this_p > p) | ||
695 | p = this_p; | ||
696 | } | ||
697 | |||
698 | if (v >= I830_DP_VOLTAGE_MAX) | ||
699 | v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; | ||
700 | |||
701 | if (p >= intel_dp_pre_emphasis_max(v)) | ||
702 | p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; | ||
703 | |||
704 | for (lane = 0; lane < 4; lane++) | ||
705 | train_set[lane] = v | p; | ||
706 | } | ||
707 | |||
708 | static uint32_t | ||
709 | intel_dp_signal_levels(uint8_t train_set, int lane_count) | ||
710 | { | ||
711 | uint32_t signal_levels = 0; | ||
712 | |||
713 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { | ||
714 | case DP_TRAIN_VOLTAGE_SWING_400: | ||
715 | default: | ||
716 | signal_levels |= DP_VOLTAGE_0_4; | ||
717 | break; | ||
718 | case DP_TRAIN_VOLTAGE_SWING_600: | ||
719 | signal_levels |= DP_VOLTAGE_0_6; | ||
720 | break; | ||
721 | case DP_TRAIN_VOLTAGE_SWING_800: | ||
722 | signal_levels |= DP_VOLTAGE_0_8; | ||
723 | break; | ||
724 | case DP_TRAIN_VOLTAGE_SWING_1200: | ||
725 | signal_levels |= DP_VOLTAGE_1_2; | ||
726 | break; | ||
727 | } | ||
728 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { | ||
729 | case DP_TRAIN_PRE_EMPHASIS_0: | ||
730 | default: | ||
731 | signal_levels |= DP_PRE_EMPHASIS_0; | ||
732 | break; | ||
733 | case DP_TRAIN_PRE_EMPHASIS_3_5: | ||
734 | signal_levels |= DP_PRE_EMPHASIS_3_5; | ||
735 | break; | ||
736 | case DP_TRAIN_PRE_EMPHASIS_6: | ||
737 | signal_levels |= DP_PRE_EMPHASIS_6; | ||
738 | break; | ||
739 | case DP_TRAIN_PRE_EMPHASIS_9_5: | ||
740 | signal_levels |= DP_PRE_EMPHASIS_9_5; | ||
741 | break; | ||
742 | } | ||
743 | return signal_levels; | ||
744 | } | ||
745 | |||
746 | static uint8_t | ||
747 | intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
748 | int lane) | ||
749 | { | ||
750 | int i = DP_LANE0_1_STATUS + (lane >> 1); | ||
751 | int s = (lane & 1) * 4; | ||
752 | uint8_t l = intel_dp_link_status(link_status, i); | ||
753 | |||
754 | return (l >> s) & 0xf; | ||
755 | } | ||
756 | |||
757 | /* Check for clock recovery is done on all channels */ | ||
758 | static bool | ||
759 | intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) | ||
760 | { | ||
761 | int lane; | ||
762 | uint8_t lane_status; | ||
763 | |||
764 | for (lane = 0; lane < lane_count; lane++) { | ||
765 | lane_status = intel_get_lane_status(link_status, lane); | ||
766 | if ((lane_status & DP_LANE_CR_DONE) == 0) | ||
767 | return false; | ||
768 | } | ||
769 | return true; | ||
770 | } | ||
771 | |||
772 | /* Check to see if channel eq is done on all channels */ | ||
773 | #define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ | ||
774 | DP_LANE_CHANNEL_EQ_DONE|\ | ||
775 | DP_LANE_SYMBOL_LOCKED) | ||
776 | static bool | ||
777 | intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) | ||
778 | { | ||
779 | uint8_t lane_align; | ||
780 | uint8_t lane_status; | ||
781 | int lane; | ||
782 | |||
783 | lane_align = intel_dp_link_status(link_status, | ||
784 | DP_LANE_ALIGN_STATUS_UPDATED); | ||
785 | if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) | ||
786 | return false; | ||
787 | for (lane = 0; lane < lane_count; lane++) { | ||
788 | lane_status = intel_get_lane_status(link_status, lane); | ||
789 | if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) | ||
790 | return false; | ||
791 | } | ||
792 | return true; | ||
793 | } | ||
794 | |||
795 | static bool | ||
796 | intel_dp_set_link_train(struct intel_output *intel_output, | ||
797 | uint32_t dp_reg_value, | ||
798 | uint8_t dp_train_pat, | ||
799 | uint8_t train_set[4], | ||
800 | bool first) | ||
801 | { | ||
802 | struct drm_device *dev = intel_output->base.dev; | ||
803 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
804 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
805 | int ret; | ||
806 | |||
807 | I915_WRITE(dp_priv->output_reg, dp_reg_value); | ||
808 | POSTING_READ(dp_priv->output_reg); | ||
809 | if (first) | ||
810 | intel_wait_for_vblank(dev); | ||
811 | |||
812 | intel_dp_aux_native_write_1(intel_output, | ||
813 | DP_TRAINING_PATTERN_SET, | ||
814 | dp_train_pat); | ||
815 | |||
816 | ret = intel_dp_aux_native_write(intel_output, | ||
817 | DP_TRAINING_LANE0_SET, train_set, 4); | ||
818 | if (ret != 4) | ||
819 | return false; | ||
820 | |||
821 | return true; | ||
822 | } | ||
823 | |||
824 | static void | ||
825 | intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | ||
826 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) | ||
827 | { | ||
828 | struct drm_device *dev = intel_output->base.dev; | ||
829 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
830 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
831 | uint8_t train_set[4]; | ||
832 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | ||
833 | int i; | ||
834 | uint8_t voltage; | ||
835 | bool clock_recovery = false; | ||
836 | bool channel_eq = false; | ||
837 | bool first = true; | ||
838 | int tries; | ||
839 | |||
840 | /* Write the link configuration data */ | ||
841 | intel_dp_aux_native_write(intel_output, 0x100, | ||
842 | link_configuration, DP_LINK_CONFIGURATION_SIZE); | ||
843 | |||
844 | DP |= DP_PORT_EN; | ||
845 | DP &= ~DP_LINK_TRAIN_MASK; | ||
846 | memset(train_set, 0, 4); | ||
847 | voltage = 0xff; | ||
848 | tries = 0; | ||
849 | clock_recovery = false; | ||
850 | for (;;) { | ||
851 | /* Use train_set[0] to set the voltage and pre emphasis values */ | ||
852 | uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | ||
853 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | ||
854 | |||
855 | if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, | ||
856 | DP_TRAINING_PATTERN_1, train_set, first)) | ||
857 | break; | ||
858 | first = false; | ||
859 | /* Set training pattern 1 */ | ||
860 | |||
861 | udelay(100); | ||
862 | if (!intel_dp_get_link_status(intel_output, link_status)) | ||
863 | break; | ||
864 | |||
865 | if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { | ||
866 | clock_recovery = true; | ||
867 | break; | ||
868 | } | ||
869 | |||
870 | /* Check to see if we've tried the max voltage */ | ||
871 | for (i = 0; i < dp_priv->lane_count; i++) | ||
872 | if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | ||
873 | break; | ||
874 | if (i == dp_priv->lane_count) | ||
875 | break; | ||
876 | |||
877 | /* Check to see if we've tried the same voltage 5 times */ | ||
878 | if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | ||
879 | ++tries; | ||
880 | if (tries == 5) | ||
881 | break; | ||
882 | } else | ||
883 | tries = 0; | ||
884 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
885 | |||
886 | /* Compute new train_set as requested by target */ | ||
887 | intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); | ||
888 | } | ||
889 | |||
890 | /* channel equalization */ | ||
891 | tries = 0; | ||
892 | channel_eq = false; | ||
893 | for (;;) { | ||
894 | /* Use train_set[0] to set the voltage and pre emphasis values */ | ||
895 | uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | ||
896 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | ||
897 | |||
898 | /* channel eq pattern */ | ||
899 | if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, | ||
900 | DP_TRAINING_PATTERN_2, train_set, | ||
901 | false)) | ||
902 | break; | ||
903 | |||
904 | udelay(400); | ||
905 | if (!intel_dp_get_link_status(intel_output, link_status)) | ||
906 | break; | ||
907 | |||
908 | if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { | ||
909 | channel_eq = true; | ||
910 | break; | ||
911 | } | ||
912 | |||
913 | /* Try 5 times */ | ||
914 | if (tries > 5) | ||
915 | break; | ||
916 | |||
917 | /* Compute new train_set as requested by target */ | ||
918 | intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); | ||
919 | ++tries; | ||
920 | } | ||
921 | |||
922 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); | ||
923 | POSTING_READ(dp_priv->output_reg); | ||
924 | intel_dp_aux_native_write_1(intel_output, | ||
925 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); | ||
926 | } | ||
927 | |||
928 | static void | ||
929 | intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | ||
930 | { | ||
931 | struct drm_device *dev = intel_output->base.dev; | ||
932 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
933 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
934 | |||
935 | I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); | ||
936 | POSTING_READ(dp_priv->output_reg); | ||
937 | } | ||
938 | |||
939 | static void | ||
940 | intel_dp_restore(struct drm_connector *connector) | ||
941 | { | ||
942 | struct intel_output *intel_output = to_intel_output(connector); | ||
943 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
944 | |||
945 | if (dp_priv->save_DP & DP_PORT_EN) | ||
946 | intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); | ||
947 | else | ||
948 | intel_dp_link_down(intel_output, dp_priv->save_DP); | ||
949 | } | ||
950 | |||
951 | /* | ||
952 | * According to DP spec | ||
953 | * 5.1.2: | ||
954 | * 1. Read DPCD | ||
955 | * 2. Configure link according to Receiver Capabilities | ||
956 | * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 | ||
957 | * 4. Check link status on receipt of hot-plug interrupt | ||
958 | */ | ||
959 | |||
960 | static void | ||
961 | intel_dp_check_link_status(struct intel_output *intel_output) | ||
962 | { | ||
963 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
964 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | ||
965 | |||
966 | if (!intel_output->enc.crtc) | ||
967 | return; | ||
968 | |||
969 | if (!intel_dp_get_link_status(intel_output, link_status)) { | ||
970 | intel_dp_link_down(intel_output, dp_priv->DP); | ||
971 | return; | ||
972 | } | ||
973 | |||
974 | if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) | ||
975 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | ||
976 | } | ||
977 | |||
978 | /** | ||
979 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. | ||
980 | * | ||
981 | * \return true if DP port is connected. | ||
982 | * \return false if DP port is disconnected. | ||
983 | */ | ||
984 | static enum drm_connector_status | ||
985 | intel_dp_detect(struct drm_connector *connector) | ||
986 | { | ||
987 | struct intel_output *intel_output = to_intel_output(connector); | ||
988 | struct drm_device *dev = intel_output->base.dev; | ||
989 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
990 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
991 | uint32_t temp, bit; | ||
992 | enum drm_connector_status status; | ||
993 | |||
994 | dp_priv->has_audio = false; | ||
995 | |||
996 | temp = I915_READ(PORT_HOTPLUG_EN); | ||
997 | |||
998 | I915_WRITE(PORT_HOTPLUG_EN, | ||
999 | temp | | ||
1000 | DPB_HOTPLUG_INT_EN | | ||
1001 | DPC_HOTPLUG_INT_EN | | ||
1002 | DPD_HOTPLUG_INT_EN); | ||
1003 | |||
1004 | POSTING_READ(PORT_HOTPLUG_EN); | ||
1005 | |||
1006 | switch (dp_priv->output_reg) { | ||
1007 | case DP_B: | ||
1008 | bit = DPB_HOTPLUG_INT_STATUS; | ||
1009 | break; | ||
1010 | case DP_C: | ||
1011 | bit = DPC_HOTPLUG_INT_STATUS; | ||
1012 | break; | ||
1013 | case DP_D: | ||
1014 | bit = DPD_HOTPLUG_INT_STATUS; | ||
1015 | break; | ||
1016 | default: | ||
1017 | return connector_status_unknown; | ||
1018 | } | ||
1019 | |||
1020 | temp = I915_READ(PORT_HOTPLUG_STAT); | ||
1021 | |||
1022 | if ((temp & bit) == 0) | ||
1023 | return connector_status_disconnected; | ||
1024 | |||
1025 | status = connector_status_disconnected; | ||
1026 | if (intel_dp_aux_native_read(intel_output, | ||
1027 | 0x000, dp_priv->dpcd, | ||
1028 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) | ||
1029 | { | ||
1030 | if (dp_priv->dpcd[0] != 0) | ||
1031 | status = connector_status_connected; | ||
1032 | } | ||
1033 | return status; | ||
1034 | } | ||
1035 | |||
1036 | static int intel_dp_get_modes(struct drm_connector *connector) | ||
1037 | { | ||
1038 | struct intel_output *intel_output = to_intel_output(connector); | ||
1039 | |||
1040 | /* We should parse the EDID data and find out if it has an audio sink | ||
1041 | */ | ||
1042 | |||
1043 | return intel_ddc_get_modes(intel_output); | ||
1044 | } | ||
1045 | |||
1046 | static void | ||
1047 | intel_dp_destroy (struct drm_connector *connector) | ||
1048 | { | ||
1049 | struct intel_output *intel_output = to_intel_output(connector); | ||
1050 | |||
1051 | if (intel_output->i2c_bus) | ||
1052 | intel_i2c_destroy(intel_output->i2c_bus); | ||
1053 | drm_sysfs_connector_remove(connector); | ||
1054 | drm_connector_cleanup(connector); | ||
1055 | kfree(intel_output); | ||
1056 | } | ||
1057 | |||
1058 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { | ||
1059 | .dpms = intel_dp_dpms, | ||
1060 | .mode_fixup = intel_dp_mode_fixup, | ||
1061 | .prepare = intel_encoder_prepare, | ||
1062 | .mode_set = intel_dp_mode_set, | ||
1063 | .commit = intel_encoder_commit, | ||
1064 | }; | ||
1065 | |||
1066 | static const struct drm_connector_funcs intel_dp_connector_funcs = { | ||
1067 | .dpms = drm_helper_connector_dpms, | ||
1068 | .save = intel_dp_save, | ||
1069 | .restore = intel_dp_restore, | ||
1070 | .detect = intel_dp_detect, | ||
1071 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
1072 | .destroy = intel_dp_destroy, | ||
1073 | }; | ||
1074 | |||
1075 | static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { | ||
1076 | .get_modes = intel_dp_get_modes, | ||
1077 | .mode_valid = intel_dp_mode_valid, | ||
1078 | .best_encoder = intel_best_encoder, | ||
1079 | }; | ||
1080 | |||
1081 | static void intel_dp_enc_destroy(struct drm_encoder *encoder) | ||
1082 | { | ||
1083 | drm_encoder_cleanup(encoder); | ||
1084 | } | ||
1085 | |||
1086 | static const struct drm_encoder_funcs intel_dp_enc_funcs = { | ||
1087 | .destroy = intel_dp_enc_destroy, | ||
1088 | }; | ||
1089 | |||
1090 | void | ||
1091 | intel_dp_hot_plug(struct intel_output *intel_output) | ||
1092 | { | ||
1093 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
1094 | |||
1095 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) | ||
1096 | intel_dp_check_link_status(intel_output); | ||
1097 | } | ||
1098 | |||
1099 | void | ||
1100 | intel_dp_init(struct drm_device *dev, int output_reg) | ||
1101 | { | ||
1102 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1103 | struct drm_connector *connector; | ||
1104 | struct intel_output *intel_output; | ||
1105 | struct intel_dp_priv *dp_priv; | ||
1106 | |||
1107 | intel_output = kcalloc(sizeof(struct intel_output) + | ||
1108 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); | ||
1109 | if (!intel_output) | ||
1110 | return; | ||
1111 | |||
1112 | dp_priv = (struct intel_dp_priv *)(intel_output + 1); | ||
1113 | |||
1114 | connector = &intel_output->base; | ||
1115 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, | ||
1116 | DRM_MODE_CONNECTOR_DisplayPort); | ||
1117 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | ||
1118 | |||
1119 | intel_output->type = INTEL_OUTPUT_DISPLAYPORT; | ||
1120 | |||
1121 | connector->interlace_allowed = true; | ||
1122 | connector->doublescan_allowed = 0; | ||
1123 | |||
1124 | dp_priv->intel_output = intel_output; | ||
1125 | dp_priv->output_reg = output_reg; | ||
1126 | dp_priv->has_audio = false; | ||
1127 | dp_priv->dpms_mode = DRM_MODE_DPMS_ON; | ||
1128 | intel_output->dev_priv = dp_priv; | ||
1129 | |||
1130 | drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, | ||
1131 | DRM_MODE_ENCODER_TMDS); | ||
1132 | drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); | ||
1133 | |||
1134 | drm_mode_connector_attach_encoder(&intel_output->base, | ||
1135 | &intel_output->enc); | ||
1136 | drm_sysfs_connector_add(connector); | ||
1137 | |||
1138 | /* Set up the DDC bus. */ | ||
1139 | intel_dp_i2c_init(intel_output, | ||
1140 | (output_reg == DP_B) ? "DPDDC-B" : | ||
1141 | (output_reg == DP_C) ? "DPDDC-C" : "DPDDC-D"); | ||
1142 | intel_output->ddc_bus = &dp_priv->adapter; | ||
1143 | intel_output->hot_plug = intel_dp_hot_plug; | ||
1144 | |||
1145 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | ||
1146 | * 0xd. Failure to do so will result in spurious interrupts being | ||
1147 | * generated on the port when a cable is not attached. | ||
1148 | */ | ||
1149 | if (IS_G4X(dev) && !IS_GM45(dev)) { | ||
1150 | u32 temp = I915_READ(PEG_BAND_GAP_DATA); | ||
1151 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); | ||
1152 | } | ||
1153 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h new file mode 100644 index 000000000000..2b38054d3b6d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp.h | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Copyright © 2008 Keith Packard | ||
3 | * | ||
4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
5 | * documentation for any purpose is hereby granted without fee, provided that | ||
6 | * the above copyright notice appear in all copies and that both that copyright | ||
7 | * notice and this permission notice appear in supporting documentation, and | ||
8 | * that the name of the copyright holders not be used in advertising or | ||
9 | * publicity pertaining to distribution of the software without specific, | ||
10 | * written prior permission. The copyright holders make no representations | ||
11 | * about the suitability of this software for any purpose. It is provided "as | ||
12 | * is" without express or implied warranty. | ||
13 | * | ||
14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
20 | * OF THIS SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #ifndef _INTEL_DP_H_ | ||
24 | #define _INTEL_DP_H_ | ||
25 | |||
26 | /* From the VESA DisplayPort spec */ | ||
27 | |||
28 | #define AUX_NATIVE_WRITE 0x8 | ||
29 | #define AUX_NATIVE_READ 0x9 | ||
30 | #define AUX_I2C_WRITE 0x0 | ||
31 | #define AUX_I2C_READ 0x1 | ||
32 | #define AUX_I2C_STATUS 0x2 | ||
33 | #define AUX_I2C_MOT 0x4 | ||
34 | |||
35 | #define AUX_NATIVE_REPLY_ACK (0x0 << 4) | ||
36 | #define AUX_NATIVE_REPLY_NACK (0x1 << 4) | ||
37 | #define AUX_NATIVE_REPLY_DEFER (0x2 << 4) | ||
38 | #define AUX_NATIVE_REPLY_MASK (0x3 << 4) | ||
39 | |||
40 | #define AUX_I2C_REPLY_ACK (0x0 << 6) | ||
41 | #define AUX_I2C_REPLY_NACK (0x1 << 6) | ||
42 | #define AUX_I2C_REPLY_DEFER (0x2 << 6) | ||
43 | #define AUX_I2C_REPLY_MASK (0x3 << 6) | ||
44 | |||
45 | /* AUX CH addresses */ | ||
46 | #define DP_LINK_BW_SET 0x100 | ||
47 | # define DP_LINK_BW_1_62 0x06 | ||
48 | # define DP_LINK_BW_2_7 0x0a | ||
49 | |||
50 | #define DP_LANE_COUNT_SET 0x101 | ||
51 | # define DP_LANE_COUNT_MASK 0x0f | ||
52 | # define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) | ||
53 | |||
54 | #define DP_TRAINING_PATTERN_SET 0x102 | ||
55 | |||
56 | # define DP_TRAINING_PATTERN_DISABLE 0 | ||
57 | # define DP_TRAINING_PATTERN_1 1 | ||
58 | # define DP_TRAINING_PATTERN_2 2 | ||
59 | # define DP_TRAINING_PATTERN_MASK 0x3 | ||
60 | |||
61 | # define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) | ||
62 | # define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2) | ||
63 | # define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2) | ||
64 | # define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2) | ||
65 | # define DP_LINK_QUAL_PATTERN_MASK (3 << 2) | ||
66 | |||
67 | # define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) | ||
68 | # define DP_LINK_SCRAMBLING_DISABLE (1 << 5) | ||
69 | |||
70 | # define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) | ||
71 | # define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) | ||
72 | # define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) | ||
73 | # define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) | ||
74 | |||
75 | #define DP_TRAINING_LANE0_SET 0x103 | ||
76 | #define DP_TRAINING_LANE1_SET 0x104 | ||
77 | #define DP_TRAINING_LANE2_SET 0x105 | ||
78 | #define DP_TRAINING_LANE3_SET 0x106 | ||
79 | |||
80 | # define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 | ||
81 | # define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 | ||
82 | # define DP_TRAIN_MAX_SWING_REACHED (1 << 2) | ||
83 | # define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) | ||
84 | # define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) | ||
85 | # define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) | ||
86 | # define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) | ||
87 | |||
88 | # define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) | ||
89 | # define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) | ||
90 | # define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) | ||
91 | # define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) | ||
92 | # define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) | ||
93 | |||
94 | # define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 | ||
95 | # define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) | ||
96 | |||
97 | #define DP_DOWNSPREAD_CTRL 0x107 | ||
98 | # define DP_SPREAD_AMP_0_5 (1 << 4) | ||
99 | |||
100 | #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 | ||
101 | # define DP_SET_ANSI_8B10B (1 << 0) | ||
102 | |||
103 | #define DP_LANE0_1_STATUS 0x202 | ||
104 | #define DP_LANE2_3_STATUS 0x203 | ||
105 | |||
106 | # define DP_LANE_CR_DONE (1 << 0) | ||
107 | # define DP_LANE_CHANNEL_EQ_DONE (1 << 1) | ||
108 | # define DP_LANE_SYMBOL_LOCKED (1 << 2) | ||
109 | |||
110 | #define DP_LANE_ALIGN_STATUS_UPDATED 0x204 | ||
111 | |||
112 | #define DP_INTERLANE_ALIGN_DONE (1 << 0) | ||
113 | #define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) | ||
114 | #define DP_LINK_STATUS_UPDATED (1 << 7) | ||
115 | |||
116 | #define DP_SINK_STATUS 0x205 | ||
117 | |||
118 | #define DP_RECEIVE_PORT_0_STATUS (1 << 0) | ||
119 | #define DP_RECEIVE_PORT_1_STATUS (1 << 1) | ||
120 | |||
121 | #define DP_ADJUST_REQUEST_LANE0_1 0x206 | ||
122 | #define DP_ADJUST_REQUEST_LANE2_3 0x207 | ||
123 | |||
124 | #define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 | ||
125 | #define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 | ||
126 | #define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c | ||
127 | #define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 | ||
128 | #define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 | ||
129 | #define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 | ||
130 | #define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 | ||
131 | #define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 | ||
132 | |||
133 | struct i2c_algo_dp_aux_data { | ||
134 | bool running; | ||
135 | u16 address; | ||
136 | int (*aux_ch) (struct i2c_adapter *adapter, | ||
137 | uint8_t *send, int send_bytes, | ||
138 | uint8_t *recv, int recv_bytes); | ||
139 | }; | ||
140 | |||
141 | int | ||
142 | i2c_dp_aux_add_bus(struct i2c_adapter *adapter); | ||
143 | |||
144 | #endif /* _INTEL_DP_H_ */ | ||
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c new file mode 100644 index 000000000000..4e60f14b1a6d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp_i2c.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * Copyright © 2009 Keith Packard | ||
3 | * | ||
4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
5 | * documentation for any purpose is hereby granted without fee, provided that | ||
6 | * the above copyright notice appear in all copies and that both that copyright | ||
7 | * notice and this permission notice appear in supporting documentation, and | ||
8 | * that the name of the copyright holders not be used in advertising or | ||
9 | * publicity pertaining to distribution of the software without specific, | ||
10 | * written prior permission. The copyright holders make no representations | ||
11 | * about the suitability of this software for any purpose. It is provided "as | ||
12 | * is" without express or implied warranty. | ||
13 | * | ||
14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
20 | * OF THIS SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/i2c.h> | ||
31 | #include "intel_dp.h" | ||
32 | |||
33 | /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ | ||
34 | |||
35 | #define MODE_I2C_START 1 | ||
36 | #define MODE_I2C_WRITE 2 | ||
37 | #define MODE_I2C_READ 4 | ||
38 | #define MODE_I2C_STOP 8 | ||
39 | |||
40 | static int | ||
41 | i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, | ||
42 | uint8_t write_byte, uint8_t *read_byte) | ||
43 | { | ||
44 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
45 | uint16_t address = algo_data->address; | ||
46 | uint8_t msg[5]; | ||
47 | uint8_t reply[2]; | ||
48 | int msg_bytes; | ||
49 | int reply_bytes; | ||
50 | int ret; | ||
51 | |||
52 | /* Set up the command byte */ | ||
53 | if (mode & MODE_I2C_READ) | ||
54 | msg[0] = AUX_I2C_READ << 4; | ||
55 | else | ||
56 | msg[0] = AUX_I2C_WRITE << 4; | ||
57 | |||
58 | if (!(mode & MODE_I2C_STOP)) | ||
59 | msg[0] |= AUX_I2C_MOT << 4; | ||
60 | |||
61 | msg[1] = address >> 8; | ||
62 | msg[2] = address; | ||
63 | |||
64 | switch (mode) { | ||
65 | case MODE_I2C_WRITE: | ||
66 | msg[3] = 0; | ||
67 | msg[4] = write_byte; | ||
68 | msg_bytes = 5; | ||
69 | reply_bytes = 1; | ||
70 | break; | ||
71 | case MODE_I2C_READ: | ||
72 | msg[3] = 0; | ||
73 | msg_bytes = 4; | ||
74 | reply_bytes = 2; | ||
75 | break; | ||
76 | default: | ||
77 | msg_bytes = 3; | ||
78 | reply_bytes = 1; | ||
79 | break; | ||
80 | } | ||
81 | |||
82 | for (;;) { | ||
83 | ret = (*algo_data->aux_ch)(adapter, | ||
84 | msg, msg_bytes, | ||
85 | reply, reply_bytes); | ||
86 | if (ret < 0) { | ||
87 | printk(KERN_ERR "aux_ch failed %d\n", ret); | ||
88 | return ret; | ||
89 | } | ||
90 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | ||
91 | case AUX_I2C_REPLY_ACK: | ||
92 | if (mode == MODE_I2C_READ) { | ||
93 | *read_byte = reply[1]; | ||
94 | } | ||
95 | return reply_bytes - 1; | ||
96 | case AUX_I2C_REPLY_NACK: | ||
97 | printk(KERN_ERR "aux_ch nack\n"); | ||
98 | return -EREMOTEIO; | ||
99 | case AUX_I2C_REPLY_DEFER: | ||
100 | printk(KERN_ERR "aux_ch defer\n"); | ||
101 | udelay(100); | ||
102 | break; | ||
103 | default: | ||
104 | printk(KERN_ERR "aux_ch invalid reply 0x%02x\n", reply[0]); | ||
105 | return -EREMOTEIO; | ||
106 | } | ||
107 | } | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * I2C over AUX CH | ||
112 | */ | ||
113 | |||
114 | /* | ||
115 | * Send the address. If the I2C link is running, this 'restarts' | ||
116 | * the connection with the new address, this is used for doing | ||
117 | * a write followed by a read (as needed for DDC) | ||
118 | */ | ||
119 | static int | ||
120 | i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading) | ||
121 | { | ||
122 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
123 | int mode = MODE_I2C_START; | ||
124 | int ret; | ||
125 | |||
126 | if (reading) | ||
127 | mode |= MODE_I2C_READ; | ||
128 | else | ||
129 | mode |= MODE_I2C_WRITE; | ||
130 | algo_data->address = address; | ||
131 | algo_data->running = true; | ||
132 | ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Stop the I2C transaction. This closes out the link, sending | ||
138 | * a bare address packet with the MOT bit turned off | ||
139 | */ | ||
140 | static void | ||
141 | i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading) | ||
142 | { | ||
143 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
144 | int mode = MODE_I2C_STOP; | ||
145 | |||
146 | if (reading) | ||
147 | mode |= MODE_I2C_READ; | ||
148 | else | ||
149 | mode |= MODE_I2C_WRITE; | ||
150 | if (algo_data->running) { | ||
151 | (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); | ||
152 | algo_data->running = false; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Write a single byte to the current I2C address, the | ||
158 | * the I2C link must be running or this returns -EIO | ||
159 | */ | ||
160 | static int | ||
161 | i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte) | ||
162 | { | ||
163 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
164 | int ret; | ||
165 | |||
166 | if (!algo_data->running) | ||
167 | return -EIO; | ||
168 | |||
169 | ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL); | ||
170 | return ret; | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * Read a single byte from the current I2C address, the | ||
175 | * I2C link must be running or this returns -EIO | ||
176 | */ | ||
177 | static int | ||
178 | i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret) | ||
179 | { | ||
180 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
181 | int ret; | ||
182 | |||
183 | if (!algo_data->running) | ||
184 | return -EIO; | ||
185 | |||
186 | ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret); | ||
187 | return ret; | ||
188 | } | ||
189 | |||
190 | static int | ||
191 | i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter, | ||
192 | struct i2c_msg *msgs, | ||
193 | int num) | ||
194 | { | ||
195 | int ret = 0; | ||
196 | bool reading = false; | ||
197 | int m; | ||
198 | int b; | ||
199 | |||
200 | for (m = 0; m < num; m++) { | ||
201 | u16 len = msgs[m].len; | ||
202 | u8 *buf = msgs[m].buf; | ||
203 | reading = (msgs[m].flags & I2C_M_RD) != 0; | ||
204 | ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading); | ||
205 | if (ret < 0) | ||
206 | break; | ||
207 | if (reading) { | ||
208 | for (b = 0; b < len; b++) { | ||
209 | ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]); | ||
210 | if (ret < 0) | ||
211 | break; | ||
212 | } | ||
213 | } else { | ||
214 | for (b = 0; b < len; b++) { | ||
215 | ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]); | ||
216 | if (ret < 0) | ||
217 | break; | ||
218 | } | ||
219 | } | ||
220 | if (ret < 0) | ||
221 | break; | ||
222 | } | ||
223 | if (ret >= 0) | ||
224 | ret = num; | ||
225 | i2c_algo_dp_aux_stop(adapter, reading); | ||
226 | printk(KERN_ERR "dp_aux_xfer return %d\n", ret); | ||
227 | return ret; | ||
228 | } | ||
229 | |||
230 | static u32 | ||
231 | i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter) | ||
232 | { | ||
233 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | | ||
234 | I2C_FUNC_SMBUS_READ_BLOCK_DATA | | ||
235 | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | | ||
236 | I2C_FUNC_10BIT_ADDR; | ||
237 | } | ||
238 | |||
239 | static const struct i2c_algorithm i2c_dp_aux_algo = { | ||
240 | .master_xfer = i2c_algo_dp_aux_xfer, | ||
241 | .functionality = i2c_algo_dp_aux_functionality, | ||
242 | }; | ||
243 | |||
244 | static void | ||
245 | i2c_dp_aux_reset_bus(struct i2c_adapter *adapter) | ||
246 | { | ||
247 | (void) i2c_algo_dp_aux_address(adapter, 0, false); | ||
248 | (void) i2c_algo_dp_aux_stop(adapter, false); | ||
249 | |||
250 | } | ||
251 | |||
252 | static int | ||
253 | i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) | ||
254 | { | ||
255 | adapter->algo = &i2c_dp_aux_algo; | ||
256 | adapter->retries = 3; | ||
257 | i2c_dp_aux_reset_bus(adapter); | ||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | int | ||
262 | i2c_dp_aux_add_bus(struct i2c_adapter *adapter) | ||
263 | { | ||
264 | int error; | ||
265 | |||
266 | error = i2c_dp_aux_prepare_bus(adapter); | ||
267 | if (error) | ||
268 | return error; | ||
269 | error = i2c_add_adapter(adapter); | ||
270 | return error; | ||
271 | } | ||
272 | EXPORT_SYMBOL(i2c_dp_aux_add_bus); | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cd4b9c5f715e..004541c935a8 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -54,6 +54,7 @@ | |||
54 | #define INTEL_OUTPUT_LVDS 4 | 54 | #define INTEL_OUTPUT_LVDS 4 |
55 | #define INTEL_OUTPUT_TVOUT 5 | 55 | #define INTEL_OUTPUT_TVOUT 5 |
56 | #define INTEL_OUTPUT_HDMI 6 | 56 | #define INTEL_OUTPUT_HDMI 6 |
57 | #define INTEL_OUTPUT_DISPLAYPORT 7 | ||
57 | 58 | ||
58 | #define INTEL_DVO_CHIP_NONE 0 | 59 | #define INTEL_DVO_CHIP_NONE 0 |
59 | #define INTEL_DVO_CHIP_LVDS 1 | 60 | #define INTEL_DVO_CHIP_LVDS 1 |
@@ -65,7 +66,6 @@ struct intel_i2c_chan { | |||
65 | u32 reg; /* GPIO reg */ | 66 | u32 reg; /* GPIO reg */ |
66 | struct i2c_adapter adapter; | 67 | struct i2c_adapter adapter; |
67 | struct i2c_algo_bit_data algo; | 68 | struct i2c_algo_bit_data algo; |
68 | u8 slave_addr; | ||
69 | }; | 69 | }; |
70 | 70 | ||
71 | struct intel_framebuffer { | 71 | struct intel_framebuffer { |
@@ -79,11 +79,12 @@ struct intel_output { | |||
79 | 79 | ||
80 | struct drm_encoder enc; | 80 | struct drm_encoder enc; |
81 | int type; | 81 | int type; |
82 | struct intel_i2c_chan *i2c_bus; /* for control functions */ | 82 | struct i2c_adapter *i2c_bus; |
83 | struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ | 83 | struct i2c_adapter *ddc_bus; |
84 | bool load_detect_temp; | 84 | bool load_detect_temp; |
85 | bool needs_tv_clock; | 85 | bool needs_tv_clock; |
86 | void *dev_priv; | 86 | void *dev_priv; |
87 | void (*hot_plug)(struct intel_output *); | ||
87 | }; | 88 | }; |
88 | 89 | ||
89 | struct intel_crtc { | 90 | struct intel_crtc { |
@@ -104,9 +105,9 @@ struct intel_crtc { | |||
104 | #define enc_to_intel_output(x) container_of(x, struct intel_output, enc) | 105 | #define enc_to_intel_output(x) container_of(x, struct intel_output, enc) |
105 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 106 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
106 | 107 | ||
107 | struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, | 108 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, |
108 | const char *name); | 109 | const char *name); |
109 | void intel_i2c_destroy(struct intel_i2c_chan *chan); | 110 | void intel_i2c_destroy(struct i2c_adapter *adapter); |
110 | int intel_ddc_get_modes(struct intel_output *intel_output); | 111 | int intel_ddc_get_modes(struct intel_output *intel_output); |
111 | extern bool intel_ddc_probe(struct intel_output *intel_output); | 112 | extern bool intel_ddc_probe(struct intel_output *intel_output); |
112 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); | 113 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); |
@@ -116,6 +117,10 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device); | |||
116 | extern void intel_dvo_init(struct drm_device *dev); | 117 | extern void intel_dvo_init(struct drm_device *dev); |
117 | extern void intel_tv_init(struct drm_device *dev); | 118 | extern void intel_tv_init(struct drm_device *dev); |
118 | extern void intel_lvds_init(struct drm_device *dev); | 119 | extern void intel_lvds_init(struct drm_device *dev); |
120 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); | ||
121 | void | ||
122 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | ||
123 | struct drm_display_mode *adjusted_mode); | ||
119 | 124 | ||
120 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 125 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
121 | extern void intel_encoder_prepare (struct drm_encoder *encoder); | 126 | extern void intel_encoder_prepare (struct drm_encoder *encoder); |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 1ee3007d6ec0..13bff20930e8 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -384,10 +384,9 @@ void intel_dvo_init(struct drm_device *dev) | |||
384 | { | 384 | { |
385 | struct intel_output *intel_output; | 385 | struct intel_output *intel_output; |
386 | struct intel_dvo_device *dvo; | 386 | struct intel_dvo_device *dvo; |
387 | struct intel_i2c_chan *i2cbus = NULL; | 387 | struct i2c_adapter *i2cbus = NULL; |
388 | int ret = 0; | 388 | int ret = 0; |
389 | int i; | 389 | int i; |
390 | int gpio_inited = 0; | ||
391 | int encoder_type = DRM_MODE_ENCODER_NONE; | 390 | int encoder_type = DRM_MODE_ENCODER_NONE; |
392 | intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); | 391 | intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); |
393 | if (!intel_output) | 392 | if (!intel_output) |
@@ -420,14 +419,11 @@ void intel_dvo_init(struct drm_device *dev) | |||
420 | * It appears that everything is on GPIOE except for panels | 419 | * It appears that everything is on GPIOE except for panels |
421 | * on i830 laptops, which are on GPIOB (DVOA). | 420 | * on i830 laptops, which are on GPIOB (DVOA). |
422 | */ | 421 | */ |
423 | if (gpio_inited != gpio) { | 422 | if (i2cbus != NULL) |
424 | if (i2cbus != NULL) | 423 | intel_i2c_destroy(i2cbus); |
425 | intel_i2c_destroy(i2cbus); | 424 | if (!(i2cbus = intel_i2c_create(dev, gpio, |
426 | if (!(i2cbus = intel_i2c_create(dev, gpio, | 425 | gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { |
427 | gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { | 426 | continue; |
428 | continue; | ||
429 | } | ||
430 | gpio_inited = gpio; | ||
431 | } | 427 | } |
432 | 428 | ||
433 | if (dvo->dev_ops!= NULL) | 429 | if (dvo->dev_ops!= NULL) |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 4ea2a651b92c..9e30daae37dc 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "drmP.h" | 31 | #include "drmP.h" |
32 | #include "drm.h" | 32 | #include "drm.h" |
33 | #include "drm_crtc.h" | 33 | #include "drm_crtc.h" |
34 | #include "drm_edid.h" | ||
34 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
35 | #include "i915_drm.h" | 36 | #include "i915_drm.h" |
36 | #include "i915_drv.h" | 37 | #include "i915_drv.h" |
@@ -56,8 +57,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
56 | sdvox = SDVO_ENCODING_HDMI | | 57 | sdvox = SDVO_ENCODING_HDMI | |
57 | SDVO_BORDER_ENABLE | | 58 | SDVO_BORDER_ENABLE | |
58 | SDVO_VSYNC_ACTIVE_HIGH | | 59 | SDVO_VSYNC_ACTIVE_HIGH | |
59 | SDVO_HSYNC_ACTIVE_HIGH | | 60 | SDVO_HSYNC_ACTIVE_HIGH; |
60 | SDVO_NULL_PACKETS_DURING_VSYNC; | ||
61 | 61 | ||
62 | if (hdmi_priv->has_hdmi_sink) | 62 | if (hdmi_priv->has_hdmi_sink) |
63 | sdvox |= SDVO_AUDIO_ENABLE; | 63 | sdvox |= SDVO_AUDIO_ENABLE; |
@@ -129,20 +129,26 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | |||
129 | return true; | 129 | return true; |
130 | } | 130 | } |
131 | 131 | ||
132 | static void | 132 | static enum drm_connector_status |
133 | intel_hdmi_sink_detect(struct drm_connector *connector) | 133 | intel_hdmi_edid_detect(struct drm_connector *connector) |
134 | { | 134 | { |
135 | struct intel_output *intel_output = to_intel_output(connector); | 135 | struct intel_output *intel_output = to_intel_output(connector); |
136 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 136 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; |
137 | struct edid *edid = NULL; | 137 | struct edid *edid = NULL; |
138 | enum drm_connector_status status = connector_status_disconnected; | ||
138 | 139 | ||
139 | edid = drm_get_edid(&intel_output->base, | 140 | edid = drm_get_edid(&intel_output->base, |
140 | &intel_output->ddc_bus->adapter); | 141 | intel_output->ddc_bus); |
141 | if (edid != NULL) { | 142 | hdmi_priv->has_hdmi_sink = false; |
142 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | 143 | if (edid) { |
143 | kfree(edid); | 144 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
145 | status = connector_status_connected; | ||
146 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | ||
147 | } | ||
144 | intel_output->base.display_info.raw_edid = NULL; | 148 | intel_output->base.display_info.raw_edid = NULL; |
149 | kfree(edid); | ||
145 | } | 150 | } |
151 | return status; | ||
146 | } | 152 | } |
147 | 153 | ||
148 | static enum drm_connector_status | 154 | static enum drm_connector_status |
@@ -154,11 +160,7 @@ igdng_hdmi_detect(struct drm_connector *connector) | |||
154 | /* FIXME hotplug detect */ | 160 | /* FIXME hotplug detect */ |
155 | 161 | ||
156 | hdmi_priv->has_hdmi_sink = false; | 162 | hdmi_priv->has_hdmi_sink = false; |
157 | intel_hdmi_sink_detect(connector); | 163 | return intel_hdmi_edid_detect(connector); |
158 | if (hdmi_priv->has_hdmi_sink) | ||
159 | return connector_status_connected; | ||
160 | else | ||
161 | return connector_status_disconnected; | ||
162 | } | 164 | } |
163 | 165 | ||
164 | static enum drm_connector_status | 166 | static enum drm_connector_status |
@@ -201,10 +203,9 @@ intel_hdmi_detect(struct drm_connector *connector) | |||
201 | return connector_status_unknown; | 203 | return connector_status_unknown; |
202 | } | 204 | } |
203 | 205 | ||
204 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) { | 206 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) |
205 | intel_hdmi_sink_detect(connector); | 207 | return intel_hdmi_edid_detect(connector); |
206 | return connector_status_connected; | 208 | else |
207 | } else | ||
208 | return connector_status_disconnected; | 209 | return connector_status_disconnected; |
209 | } | 210 | } |
210 | 211 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index f7061f68d050..62b8bead7652 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -124,6 +124,7 @@ static void set_data(void *data, int state_high) | |||
124 | * @output: driver specific output device | 124 | * @output: driver specific output device |
125 | * @reg: GPIO reg to use | 125 | * @reg: GPIO reg to use |
126 | * @name: name for this bus | 126 | * @name: name for this bus |
127 | * @slave_addr: slave address (if fixed) | ||
127 | * | 128 | * |
128 | * Creates and registers a new i2c bus with the Linux i2c layer, for use | 129 | * Creates and registers a new i2c bus with the Linux i2c layer, for use |
129 | * in output probing and control (e.g. DDC or SDVO control functions). | 130 | * in output probing and control (e.g. DDC or SDVO control functions). |
@@ -139,8 +140,8 @@ static void set_data(void *data, int state_high) | |||
139 | * %GPIOH | 140 | * %GPIOH |
140 | * see PRM for details on how these different busses are used. | 141 | * see PRM for details on how these different busses are used. |
141 | */ | 142 | */ |
142 | struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, | 143 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, |
143 | const char *name) | 144 | const char *name) |
144 | { | 145 | { |
145 | struct intel_i2c_chan *chan; | 146 | struct intel_i2c_chan *chan; |
146 | 147 | ||
@@ -174,7 +175,7 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, | |||
174 | intel_i2c_quirk_set(dev, false); | 175 | intel_i2c_quirk_set(dev, false); |
175 | udelay(20); | 176 | udelay(20); |
176 | 177 | ||
177 | return chan; | 178 | return &chan->adapter; |
178 | 179 | ||
179 | out_free: | 180 | out_free: |
180 | kfree(chan); | 181 | kfree(chan); |
@@ -187,11 +188,16 @@ out_free: | |||
187 | * | 188 | * |
188 | * Unregister the adapter from the i2c layer, then free the structure. | 189 | * Unregister the adapter from the i2c layer, then free the structure. |
189 | */ | 190 | */ |
190 | void intel_i2c_destroy(struct intel_i2c_chan *chan) | 191 | void intel_i2c_destroy(struct i2c_adapter *adapter) |
191 | { | 192 | { |
192 | if (!chan) | 193 | struct intel_i2c_chan *chan; |
194 | |||
195 | if (!adapter) | ||
193 | return; | 196 | return; |
194 | 197 | ||
198 | chan = container_of(adapter, | ||
199 | struct intel_i2c_chan, | ||
200 | adapter); | ||
195 | i2c_del_adapter(&chan->adapter); | 201 | i2c_del_adapter(&chan->adapter); |
196 | kfree(chan); | 202 | kfree(chan); |
197 | } | 203 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f073ed8432e8..9564ca44a977 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -39,6 +39,21 @@ | |||
39 | 39 | ||
40 | #define I915_LVDS "i915_lvds" | 40 | #define I915_LVDS "i915_lvds" |
41 | 41 | ||
42 | /* | ||
43 | * the following four scaling options are defined. | ||
44 | * #define DRM_MODE_SCALE_NON_GPU 0 | ||
45 | * #define DRM_MODE_SCALE_FULLSCREEN 1 | ||
46 | * #define DRM_MODE_SCALE_NO_SCALE 2 | ||
47 | * #define DRM_MODE_SCALE_ASPECT 3 | ||
48 | */ | ||
49 | |||
50 | /* Private structure for the integrated LVDS support */ | ||
51 | struct intel_lvds_priv { | ||
52 | int fitting_mode; | ||
53 | u32 pfit_control; | ||
54 | u32 pfit_pgm_ratios; | ||
55 | }; | ||
56 | |||
42 | /** | 57 | /** |
43 | * Sets the backlight level. | 58 | * Sets the backlight level. |
44 | * | 59 | * |
@@ -213,10 +228,27 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
213 | struct drm_display_mode *mode, | 228 | struct drm_display_mode *mode, |
214 | struct drm_display_mode *adjusted_mode) | 229 | struct drm_display_mode *adjusted_mode) |
215 | { | 230 | { |
231 | /* | ||
232 | * float point operation is not supported . So the PANEL_RATIO_FACTOR | ||
233 | * is defined, which can avoid the float point computation when | ||
234 | * calculating the panel ratio. | ||
235 | */ | ||
236 | #define PANEL_RATIO_FACTOR 8192 | ||
216 | struct drm_device *dev = encoder->dev; | 237 | struct drm_device *dev = encoder->dev; |
217 | struct drm_i915_private *dev_priv = dev->dev_private; | 238 | struct drm_i915_private *dev_priv = dev->dev_private; |
218 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 239 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
219 | struct drm_encoder *tmp_encoder; | 240 | struct drm_encoder *tmp_encoder; |
241 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
242 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | ||
243 | u32 pfit_control = 0, pfit_pgm_ratios = 0; | ||
244 | int left_border = 0, right_border = 0, top_border = 0; | ||
245 | int bottom_border = 0; | ||
246 | bool border = 0; | ||
247 | int panel_ratio, desired_ratio, vert_scale, horiz_scale; | ||
248 | int horiz_ratio, vert_ratio; | ||
249 | u32 hsync_width, vsync_width; | ||
250 | u32 hblank_width, vblank_width; | ||
251 | u32 hsync_pos, vsync_pos; | ||
220 | 252 | ||
221 | /* Should never happen!! */ | 253 | /* Should never happen!! */ |
222 | if (!IS_I965G(dev) && intel_crtc->pipe == 0) { | 254 | if (!IS_I965G(dev) && intel_crtc->pipe == 0) { |
@@ -232,7 +264,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
232 | return false; | 264 | return false; |
233 | } | 265 | } |
234 | } | 266 | } |
235 | 267 | /* If we don't have a panel mode, there is nothing we can do */ | |
268 | if (dev_priv->panel_fixed_mode == NULL) | ||
269 | return true; | ||
236 | /* | 270 | /* |
237 | * If we have timings from the BIOS for the panel, put them in | 271 | * If we have timings from the BIOS for the panel, put them in |
238 | * to the adjusted mode. The CRTC will be set up for this mode, | 272 | * to the adjusted mode. The CRTC will be set up for this mode, |
@@ -256,6 +290,243 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
256 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | 290 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); |
257 | } | 291 | } |
258 | 292 | ||
293 | /* Make sure pre-965s set dither correctly */ | ||
294 | if (!IS_I965G(dev)) { | ||
295 | if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) | ||
296 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | ||
297 | } | ||
298 | |||
299 | /* Native modes don't need fitting */ | ||
300 | if (adjusted_mode->hdisplay == mode->hdisplay && | ||
301 | adjusted_mode->vdisplay == mode->vdisplay) { | ||
302 | pfit_pgm_ratios = 0; | ||
303 | border = 0; | ||
304 | goto out; | ||
305 | } | ||
306 | |||
307 | /* 965+ wants fuzzy fitting */ | ||
308 | if (IS_I965G(dev)) | ||
309 | pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | | ||
310 | PFIT_FILTER_FUZZY; | ||
311 | |||
312 | hsync_width = adjusted_mode->crtc_hsync_end - | ||
313 | adjusted_mode->crtc_hsync_start; | ||
314 | vsync_width = adjusted_mode->crtc_vsync_end - | ||
315 | adjusted_mode->crtc_vsync_start; | ||
316 | hblank_width = adjusted_mode->crtc_hblank_end - | ||
317 | adjusted_mode->crtc_hblank_start; | ||
318 | vblank_width = adjusted_mode->crtc_vblank_end - | ||
319 | adjusted_mode->crtc_vblank_start; | ||
320 | /* | ||
321 | * Deal with panel fitting options. Figure out how to stretch the | ||
322 | * image based on its aspect ratio & the current panel fitting mode. | ||
323 | */ | ||
324 | panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR / | ||
325 | adjusted_mode->vdisplay; | ||
326 | desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR / | ||
327 | mode->vdisplay; | ||
328 | /* | ||
329 | * Enable automatic panel scaling for non-native modes so that they fill | ||
330 | * the screen. Should be enabled before the pipe is enabled, according | ||
331 | * to register description and PRM. | ||
332 | * Change the value here to see the borders for debugging | ||
333 | */ | ||
334 | I915_WRITE(BCLRPAT_A, 0); | ||
335 | I915_WRITE(BCLRPAT_B, 0); | ||
336 | |||
337 | switch (lvds_priv->fitting_mode) { | ||
338 | case DRM_MODE_SCALE_NO_SCALE: | ||
339 | /* | ||
340 | * For centered modes, we have to calculate border widths & | ||
341 | * heights and modify the values programmed into the CRTC. | ||
342 | */ | ||
343 | left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2; | ||
344 | right_border = left_border; | ||
345 | if (mode->hdisplay & 1) | ||
346 | right_border++; | ||
347 | top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2; | ||
348 | bottom_border = top_border; | ||
349 | if (mode->vdisplay & 1) | ||
350 | bottom_border++; | ||
351 | /* Set active & border values */ | ||
352 | adjusted_mode->crtc_hdisplay = mode->hdisplay; | ||
353 | /* Keep the boder be even */ | ||
354 | if (right_border & 1) | ||
355 | right_border++; | ||
356 | /* use the border directly instead of border minuse one */ | ||
357 | adjusted_mode->crtc_hblank_start = mode->hdisplay + | ||
358 | right_border; | ||
359 | /* keep the blank width constant */ | ||
360 | adjusted_mode->crtc_hblank_end = | ||
361 | adjusted_mode->crtc_hblank_start + hblank_width; | ||
362 | /* get the hsync pos relative to hblank start */ | ||
363 | hsync_pos = (hblank_width - hsync_width) / 2; | ||
364 | /* keep the hsync pos be even */ | ||
365 | if (hsync_pos & 1) | ||
366 | hsync_pos++; | ||
367 | adjusted_mode->crtc_hsync_start = | ||
368 | adjusted_mode->crtc_hblank_start + hsync_pos; | ||
369 | /* keep the hsync width constant */ | ||
370 | adjusted_mode->crtc_hsync_end = | ||
371 | adjusted_mode->crtc_hsync_start + hsync_width; | ||
372 | adjusted_mode->crtc_vdisplay = mode->vdisplay; | ||
373 | /* use the border instead of border minus one */ | ||
374 | adjusted_mode->crtc_vblank_start = mode->vdisplay + | ||
375 | bottom_border; | ||
376 | /* keep the vblank width constant */ | ||
377 | adjusted_mode->crtc_vblank_end = | ||
378 | adjusted_mode->crtc_vblank_start + vblank_width; | ||
379 | /* get the vsync start postion relative to vblank start */ | ||
380 | vsync_pos = (vblank_width - vsync_width) / 2; | ||
381 | adjusted_mode->crtc_vsync_start = | ||
382 | adjusted_mode->crtc_vblank_start + vsync_pos; | ||
383 | /* keep the vsync width constant */ | ||
384 | adjusted_mode->crtc_vsync_end = | ||
385 | adjusted_mode->crtc_vblank_start + vsync_width; | ||
386 | border = 1; | ||
387 | break; | ||
388 | case DRM_MODE_SCALE_ASPECT: | ||
389 | /* Scale but preserve the spect ratio */ | ||
390 | pfit_control |= PFIT_ENABLE; | ||
391 | if (IS_I965G(dev)) { | ||
392 | /* 965+ is easy, it does everything in hw */ | ||
393 | if (panel_ratio > desired_ratio) | ||
394 | pfit_control |= PFIT_SCALING_PILLAR; | ||
395 | else if (panel_ratio < desired_ratio) | ||
396 | pfit_control |= PFIT_SCALING_LETTER; | ||
397 | else | ||
398 | pfit_control |= PFIT_SCALING_AUTO; | ||
399 | } else { | ||
400 | /* | ||
401 | * For earlier chips we have to calculate the scaling | ||
402 | * ratio by hand and program it into the | ||
403 | * PFIT_PGM_RATIO register | ||
404 | */ | ||
405 | u32 horiz_bits, vert_bits, bits = 12; | ||
406 | horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/ | ||
407 | adjusted_mode->hdisplay; | ||
408 | vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/ | ||
409 | adjusted_mode->vdisplay; | ||
410 | horiz_scale = adjusted_mode->hdisplay * | ||
411 | PANEL_RATIO_FACTOR / mode->hdisplay; | ||
412 | vert_scale = adjusted_mode->vdisplay * | ||
413 | PANEL_RATIO_FACTOR / mode->vdisplay; | ||
414 | |||
415 | /* retain aspect ratio */ | ||
416 | if (panel_ratio > desired_ratio) { /* Pillar */ | ||
417 | u32 scaled_width; | ||
418 | scaled_width = mode->hdisplay * vert_scale / | ||
419 | PANEL_RATIO_FACTOR; | ||
420 | horiz_ratio = vert_ratio; | ||
421 | pfit_control |= (VERT_AUTO_SCALE | | ||
422 | VERT_INTERP_BILINEAR | | ||
423 | HORIZ_INTERP_BILINEAR); | ||
424 | /* Pillar will have left/right borders */ | ||
425 | left_border = (adjusted_mode->hdisplay - | ||
426 | scaled_width) / 2; | ||
427 | right_border = left_border; | ||
428 | if (mode->hdisplay & 1) /* odd resolutions */ | ||
429 | right_border++; | ||
430 | /* keep the border be even */ | ||
431 | if (right_border & 1) | ||
432 | right_border++; | ||
433 | adjusted_mode->crtc_hdisplay = scaled_width; | ||
434 | /* use border instead of border minus one */ | ||
435 | adjusted_mode->crtc_hblank_start = | ||
436 | scaled_width + right_border; | ||
437 | /* keep the hblank width constant */ | ||
438 | adjusted_mode->crtc_hblank_end = | ||
439 | adjusted_mode->crtc_hblank_start + | ||
440 | hblank_width; | ||
441 | /* | ||
442 | * get the hsync start pos relative to | ||
443 | * hblank start | ||
444 | */ | ||
445 | hsync_pos = (hblank_width - hsync_width) / 2; | ||
446 | /* keep the hsync_pos be even */ | ||
447 | if (hsync_pos & 1) | ||
448 | hsync_pos++; | ||
449 | adjusted_mode->crtc_hsync_start = | ||
450 | adjusted_mode->crtc_hblank_start + | ||
451 | hsync_pos; | ||
452 | /* keept hsync width constant */ | ||
453 | adjusted_mode->crtc_hsync_end = | ||
454 | adjusted_mode->crtc_hsync_start + | ||
455 | hsync_width; | ||
456 | border = 1; | ||
457 | } else if (panel_ratio < desired_ratio) { /* letter */ | ||
458 | u32 scaled_height = mode->vdisplay * | ||
459 | horiz_scale / PANEL_RATIO_FACTOR; | ||
460 | vert_ratio = horiz_ratio; | ||
461 | pfit_control |= (HORIZ_AUTO_SCALE | | ||
462 | VERT_INTERP_BILINEAR | | ||
463 | HORIZ_INTERP_BILINEAR); | ||
464 | /* Letterbox will have top/bottom border */ | ||
465 | top_border = (adjusted_mode->vdisplay - | ||
466 | scaled_height) / 2; | ||
467 | bottom_border = top_border; | ||
468 | if (mode->vdisplay & 1) | ||
469 | bottom_border++; | ||
470 | adjusted_mode->crtc_vdisplay = scaled_height; | ||
471 | /* use border instead of border minus one */ | ||
472 | adjusted_mode->crtc_vblank_start = | ||
473 | scaled_height + bottom_border; | ||
474 | /* keep the vblank width constant */ | ||
475 | adjusted_mode->crtc_vblank_end = | ||
476 | adjusted_mode->crtc_vblank_start + | ||
477 | vblank_width; | ||
478 | /* | ||
479 | * get the vsync start pos relative to | ||
480 | * vblank start | ||
481 | */ | ||
482 | vsync_pos = (vblank_width - vsync_width) / 2; | ||
483 | adjusted_mode->crtc_vsync_start = | ||
484 | adjusted_mode->crtc_vblank_start + | ||
485 | vsync_pos; | ||
486 | /* keep the vsync width constant */ | ||
487 | adjusted_mode->crtc_vsync_end = | ||
488 | adjusted_mode->crtc_vsync_start + | ||
489 | vsync_width; | ||
490 | border = 1; | ||
491 | } else { | ||
492 | /* Aspects match, Let hw scale both directions */ | ||
493 | pfit_control |= (VERT_AUTO_SCALE | | ||
494 | HORIZ_AUTO_SCALE | | ||
495 | VERT_INTERP_BILINEAR | | ||
496 | HORIZ_INTERP_BILINEAR); | ||
497 | } | ||
498 | horiz_bits = (1 << bits) * horiz_ratio / | ||
499 | PANEL_RATIO_FACTOR; | ||
500 | vert_bits = (1 << bits) * vert_ratio / | ||
501 | PANEL_RATIO_FACTOR; | ||
502 | pfit_pgm_ratios = | ||
503 | ((vert_bits << PFIT_VERT_SCALE_SHIFT) & | ||
504 | PFIT_VERT_SCALE_MASK) | | ||
505 | ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) & | ||
506 | PFIT_HORIZ_SCALE_MASK); | ||
507 | } | ||
508 | break; | ||
509 | |||
510 | case DRM_MODE_SCALE_FULLSCREEN: | ||
511 | /* | ||
512 | * Full scaling, even if it changes the aspect ratio. | ||
513 | * Fortunately this is all done for us in hw. | ||
514 | */ | ||
515 | pfit_control |= PFIT_ENABLE; | ||
516 | if (IS_I965G(dev)) | ||
517 | pfit_control |= PFIT_SCALING_AUTO; | ||
518 | else | ||
519 | pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | | ||
520 | VERT_INTERP_BILINEAR | | ||
521 | HORIZ_INTERP_BILINEAR); | ||
522 | break; | ||
523 | default: | ||
524 | break; | ||
525 | } | ||
526 | |||
527 | out: | ||
528 | lvds_priv->pfit_control = pfit_control; | ||
529 | lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; | ||
259 | /* | 530 | /* |
260 | * XXX: It would be nice to support lower refresh rates on the | 531 | * XXX: It would be nice to support lower refresh rates on the |
261 | * panels to reduce power consumption, and perhaps match the | 532 | * panels to reduce power consumption, and perhaps match the |
@@ -301,8 +572,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
301 | { | 572 | { |
302 | struct drm_device *dev = encoder->dev; | 573 | struct drm_device *dev = encoder->dev; |
303 | struct drm_i915_private *dev_priv = dev->dev_private; | 574 | struct drm_i915_private *dev_priv = dev->dev_private; |
304 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 575 | struct intel_output *intel_output = enc_to_intel_output(encoder); |
305 | u32 pfit_control; | 576 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; |
306 | 577 | ||
307 | /* | 578 | /* |
308 | * The LVDS pin pair will already have been turned on in the | 579 | * The LVDS pin pair will already have been turned on in the |
@@ -319,22 +590,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
319 | * screen. Should be enabled before the pipe is enabled, according to | 590 | * screen. Should be enabled before the pipe is enabled, according to |
320 | * register description and PRM. | 591 | * register description and PRM. |
321 | */ | 592 | */ |
322 | if (mode->hdisplay != adjusted_mode->hdisplay || | 593 | I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios); |
323 | mode->vdisplay != adjusted_mode->vdisplay) | 594 | I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); |
324 | pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE | | ||
325 | HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | | ||
326 | HORIZ_INTERP_BILINEAR); | ||
327 | else | ||
328 | pfit_control = 0; | ||
329 | |||
330 | if (!IS_I965G(dev)) { | ||
331 | if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) | ||
332 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | ||
333 | } | ||
334 | else | ||
335 | pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; | ||
336 | |||
337 | I915_WRITE(PFIT_CONTROL, pfit_control); | ||
338 | } | 595 | } |
339 | 596 | ||
340 | /** | 597 | /** |
@@ -406,6 +663,34 @@ static int intel_lvds_set_property(struct drm_connector *connector, | |||
406 | struct drm_property *property, | 663 | struct drm_property *property, |
407 | uint64_t value) | 664 | uint64_t value) |
408 | { | 665 | { |
666 | struct drm_device *dev = connector->dev; | ||
667 | struct intel_output *intel_output = | ||
668 | to_intel_output(connector); | ||
669 | |||
670 | if (property == dev->mode_config.scaling_mode_property && | ||
671 | connector->encoder) { | ||
672 | struct drm_crtc *crtc = connector->encoder->crtc; | ||
673 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | ||
674 | if (value == DRM_MODE_SCALE_NON_GPU) { | ||
675 | DRM_DEBUG_KMS(I915_LVDS, | ||
676 | "non_GPU property is unsupported\n"); | ||
677 | return 0; | ||
678 | } | ||
679 | if (lvds_priv->fitting_mode == value) { | ||
680 | /* the LVDS scaling property is not changed */ | ||
681 | return 0; | ||
682 | } | ||
683 | lvds_priv->fitting_mode = value; | ||
684 | if (crtc && crtc->enabled) { | ||
685 | /* | ||
686 | * If the CRTC is enabled, the display will be changed | ||
687 | * according to the new panel fitting mode. | ||
688 | */ | ||
689 | drm_crtc_helper_set_mode(crtc, &crtc->mode, | ||
690 | crtc->x, crtc->y, crtc->fb); | ||
691 | } | ||
692 | } | ||
693 | |||
409 | return 0; | 694 | return 0; |
410 | } | 695 | } |
411 | 696 | ||
@@ -456,7 +741,7 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
456 | .callback = intel_no_lvds_dmi_callback, | 741 | .callback = intel_no_lvds_dmi_callback, |
457 | .ident = "Apple Mac Mini (Core series)", | 742 | .ident = "Apple Mac Mini (Core series)", |
458 | .matches = { | 743 | .matches = { |
459 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | 744 | DMI_MATCH(DMI_SYS_VENDOR, "Apple"), |
460 | DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), | 745 | DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), |
461 | }, | 746 | }, |
462 | }, | 747 | }, |
@@ -464,7 +749,7 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
464 | .callback = intel_no_lvds_dmi_callback, | 749 | .callback = intel_no_lvds_dmi_callback, |
465 | .ident = "Apple Mac Mini (Core 2 series)", | 750 | .ident = "Apple Mac Mini (Core 2 series)", |
466 | .matches = { | 751 | .matches = { |
467 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | 752 | DMI_MATCH(DMI_SYS_VENDOR, "Apple"), |
468 | DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), | 753 | DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), |
469 | }, | 754 | }, |
470 | }, | 755 | }, |
@@ -518,6 +803,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
518 | struct drm_encoder *encoder; | 803 | struct drm_encoder *encoder; |
519 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ | 804 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ |
520 | struct drm_crtc *crtc; | 805 | struct drm_crtc *crtc; |
806 | struct intel_lvds_priv *lvds_priv; | ||
521 | u32 lvds; | 807 | u32 lvds; |
522 | int pipe, gpio = GPIOC; | 808 | int pipe, gpio = GPIOC; |
523 | 809 | ||
@@ -531,7 +817,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
531 | gpio = PCH_GPIOC; | 817 | gpio = PCH_GPIOC; |
532 | } | 818 | } |
533 | 819 | ||
534 | intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); | 820 | intel_output = kzalloc(sizeof(struct intel_output) + |
821 | sizeof(struct intel_lvds_priv), GFP_KERNEL); | ||
535 | if (!intel_output) { | 822 | if (!intel_output) { |
536 | return; | 823 | return; |
537 | } | 824 | } |
@@ -553,7 +840,18 @@ void intel_lvds_init(struct drm_device *dev) | |||
553 | connector->interlace_allowed = false; | 840 | connector->interlace_allowed = false; |
554 | connector->doublescan_allowed = false; | 841 | connector->doublescan_allowed = false; |
555 | 842 | ||
843 | lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); | ||
844 | intel_output->dev_priv = lvds_priv; | ||
845 | /* create the scaling mode property */ | ||
846 | drm_mode_create_scaling_mode_property(dev); | ||
847 | /* | ||
848 | * the initial panel fitting mode will be FULL_SCREEN. | ||
849 | */ | ||
556 | 850 | ||
851 | drm_connector_attach_property(&intel_output->base, | ||
852 | dev->mode_config.scaling_mode_property, | ||
853 | DRM_MODE_SCALE_FULLSCREEN); | ||
854 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; | ||
557 | /* | 855 | /* |
558 | * LVDS discovery: | 856 | * LVDS discovery: |
559 | * 1) check for EDID on DDC | 857 | * 1) check for EDID on DDC |
@@ -649,5 +947,5 @@ failed: | |||
649 | if (intel_output->ddc_bus) | 947 | if (intel_output->ddc_bus) |
650 | intel_i2c_destroy(intel_output->ddc_bus); | 948 | intel_i2c_destroy(intel_output->ddc_bus); |
651 | drm_connector_cleanup(connector); | 949 | drm_connector_cleanup(connector); |
652 | kfree(connector); | 950 | kfree(intel_output); |
653 | } | 951 | } |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index e0910fefce87..67e2f4632a24 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -53,10 +53,9 @@ bool intel_ddc_probe(struct intel_output *intel_output) | |||
53 | } | 53 | } |
54 | }; | 54 | }; |
55 | 55 | ||
56 | intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); | 56 | intel_i2c_quirk_set(intel_output->base.dev, true); |
57 | ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2); | 57 | ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); |
58 | intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); | 58 | intel_i2c_quirk_set(intel_output->base.dev, false); |
59 | |||
60 | if (ret == 2) | 59 | if (ret == 2) |
61 | return true; | 60 | return true; |
62 | 61 | ||
@@ -74,10 +73,9 @@ int intel_ddc_get_modes(struct intel_output *intel_output) | |||
74 | struct edid *edid; | 73 | struct edid *edid; |
75 | int ret = 0; | 74 | int ret = 0; |
76 | 75 | ||
77 | intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); | 76 | intel_i2c_quirk_set(intel_output->base.dev, true); |
78 | edid = drm_get_edid(&intel_output->base, | 77 | edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); |
79 | &intel_output->ddc_bus->adapter); | 78 | intel_i2c_quirk_set(intel_output->base.dev, false); |
80 | intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); | ||
81 | if (edid) { | 79 | if (edid) { |
82 | drm_mode_connector_update_edid_property(&intel_output->base, | 80 | drm_mode_connector_update_edid_property(&intel_output->base, |
83 | edid); | 81 | edid); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 9a00adb3a508..f03473779feb 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -38,8 +38,7 @@ | |||
38 | #undef SDVO_DEBUG | 38 | #undef SDVO_DEBUG |
39 | #define I915_SDVO "i915_sdvo" | 39 | #define I915_SDVO "i915_sdvo" |
40 | struct intel_sdvo_priv { | 40 | struct intel_sdvo_priv { |
41 | struct intel_i2c_chan *i2c_bus; | 41 | u8 slave_addr; |
42 | int slaveaddr; | ||
43 | 42 | ||
44 | /* Register for the SDVO device: SDVOB or SDVOC */ | 43 | /* Register for the SDVO device: SDVOB or SDVOC */ |
45 | int output_device; | 44 | int output_device; |
@@ -146,13 +145,13 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | |||
146 | 145 | ||
147 | struct i2c_msg msgs[] = { | 146 | struct i2c_msg msgs[] = { |
148 | { | 147 | { |
149 | .addr = sdvo_priv->i2c_bus->slave_addr, | 148 | .addr = sdvo_priv->slave_addr >> 1, |
150 | .flags = 0, | 149 | .flags = 0, |
151 | .len = 1, | 150 | .len = 1, |
152 | .buf = out_buf, | 151 | .buf = out_buf, |
153 | }, | 152 | }, |
154 | { | 153 | { |
155 | .addr = sdvo_priv->i2c_bus->slave_addr, | 154 | .addr = sdvo_priv->slave_addr >> 1, |
156 | .flags = I2C_M_RD, | 155 | .flags = I2C_M_RD, |
157 | .len = 1, | 156 | .len = 1, |
158 | .buf = buf, | 157 | .buf = buf, |
@@ -162,7 +161,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | |||
162 | out_buf[0] = addr; | 161 | out_buf[0] = addr; |
163 | out_buf[1] = 0; | 162 | out_buf[1] = 0; |
164 | 163 | ||
165 | if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2) | 164 | if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) |
166 | { | 165 | { |
167 | *ch = buf[0]; | 166 | *ch = buf[0]; |
168 | return true; | 167 | return true; |
@@ -175,10 +174,11 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | |||
175 | static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | 174 | static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, |
176 | u8 ch) | 175 | u8 ch) |
177 | { | 176 | { |
177 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
178 | u8 out_buf[2]; | 178 | u8 out_buf[2]; |
179 | struct i2c_msg msgs[] = { | 179 | struct i2c_msg msgs[] = { |
180 | { | 180 | { |
181 | .addr = intel_output->i2c_bus->slave_addr, | 181 | .addr = sdvo_priv->slave_addr >> 1, |
182 | .flags = 0, | 182 | .flags = 0, |
183 | .len = 2, | 183 | .len = 2, |
184 | .buf = out_buf, | 184 | .buf = out_buf, |
@@ -188,7 +188,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | |||
188 | out_buf[0] = addr; | 188 | out_buf[0] = addr; |
189 | out_buf[1] = ch; | 189 | out_buf[1] = ch; |
190 | 190 | ||
191 | if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1) | 191 | if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) |
192 | { | 192 | { |
193 | return true; | 193 | return true; |
194 | } | 194 | } |
@@ -1369,9 +1369,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | |||
1369 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1369 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; |
1370 | struct edid *edid = NULL; | 1370 | struct edid *edid = NULL; |
1371 | 1371 | ||
1372 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); | ||
1373 | edid = drm_get_edid(&intel_output->base, | 1372 | edid = drm_get_edid(&intel_output->base, |
1374 | &intel_output->ddc_bus->adapter); | 1373 | intel_output->ddc_bus); |
1375 | if (edid != NULL) { | 1374 | if (edid != NULL) { |
1376 | sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); | 1375 | sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); |
1377 | kfree(edid); | 1376 | kfree(edid); |
@@ -1549,7 +1548,6 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | |||
1549 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | 1548 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) |
1550 | { | 1549 | { |
1551 | struct intel_output *intel_output = to_intel_output(connector); | 1550 | struct intel_output *intel_output = to_intel_output(connector); |
1552 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1553 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 1551 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
1554 | 1552 | ||
1555 | /* | 1553 | /* |
@@ -1557,8 +1555,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | |||
1557 | * Assume that the preferred modes are | 1555 | * Assume that the preferred modes are |
1558 | * arranged in priority order. | 1556 | * arranged in priority order. |
1559 | */ | 1557 | */ |
1560 | /* set the bus switch and get the modes */ | ||
1561 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); | ||
1562 | intel_ddc_get_modes(intel_output); | 1558 | intel_ddc_get_modes(intel_output); |
1563 | if (list_empty(&connector->probed_modes) == false) | 1559 | if (list_empty(&connector->probed_modes) == false) |
1564 | return; | 1560 | return; |
@@ -1709,7 +1705,7 @@ intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) | |||
1709 | 1705 | ||
1710 | list_for_each_entry(connector, | 1706 | list_for_each_entry(connector, |
1711 | &dev->mode_config.connector_list, head) { | 1707 | &dev->mode_config.connector_list, head) { |
1712 | if (to_intel_output(connector)->ddc_bus == chan) { | 1708 | if (to_intel_output(connector)->ddc_bus == &chan->adapter) { |
1713 | intel_output = to_intel_output(connector); | 1709 | intel_output = to_intel_output(connector); |
1714 | break; | 1710 | break; |
1715 | } | 1711 | } |
@@ -1723,7 +1719,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, | |||
1723 | struct intel_output *intel_output; | 1719 | struct intel_output *intel_output; |
1724 | struct intel_sdvo_priv *sdvo_priv; | 1720 | struct intel_sdvo_priv *sdvo_priv; |
1725 | struct i2c_algo_bit_data *algo_data; | 1721 | struct i2c_algo_bit_data *algo_data; |
1726 | struct i2c_algorithm *algo; | 1722 | const struct i2c_algorithm *algo; |
1727 | 1723 | ||
1728 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; | 1724 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; |
1729 | intel_output = | 1725 | intel_output = |
@@ -1733,7 +1729,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, | |||
1733 | return -EINVAL; | 1729 | return -EINVAL; |
1734 | 1730 | ||
1735 | sdvo_priv = intel_output->dev_priv; | 1731 | sdvo_priv = intel_output->dev_priv; |
1736 | algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo; | 1732 | algo = intel_output->i2c_bus->algo; |
1737 | 1733 | ||
1738 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); | 1734 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); |
1739 | return algo->master_xfer(i2c_adap, msgs, num); | 1735 | return algo->master_xfer(i2c_adap, msgs, num); |
@@ -1785,13 +1781,11 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
1785 | struct drm_connector *connector; | 1781 | struct drm_connector *connector; |
1786 | struct intel_output *intel_output; | 1782 | struct intel_output *intel_output; |
1787 | struct intel_sdvo_priv *sdvo_priv; | 1783 | struct intel_sdvo_priv *sdvo_priv; |
1788 | struct intel_i2c_chan *i2cbus = NULL; | 1784 | |
1789 | struct intel_i2c_chan *ddcbus = NULL; | ||
1790 | int connector_type; | 1785 | int connector_type; |
1791 | u8 ch[0x40]; | 1786 | u8 ch[0x40]; |
1792 | int i; | 1787 | int i; |
1793 | int encoder_type, output_id; | 1788 | int encoder_type; |
1794 | u8 slave_addr; | ||
1795 | 1789 | ||
1796 | intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); | 1790 | intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); |
1797 | if (!intel_output) { | 1791 | if (!intel_output) { |
@@ -1799,29 +1793,24 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
1799 | } | 1793 | } |
1800 | 1794 | ||
1801 | sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); | 1795 | sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); |
1796 | sdvo_priv->output_device = output_device; | ||
1797 | |||
1798 | intel_output->dev_priv = sdvo_priv; | ||
1802 | intel_output->type = INTEL_OUTPUT_SDVO; | 1799 | intel_output->type = INTEL_OUTPUT_SDVO; |
1803 | 1800 | ||
1804 | /* setup the DDC bus. */ | 1801 | /* setup the DDC bus. */ |
1805 | if (output_device == SDVOB) | 1802 | if (output_device == SDVOB) |
1806 | i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); | 1803 | intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); |
1807 | else | 1804 | else |
1808 | i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); | 1805 | intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); |
1809 | 1806 | ||
1810 | if (!i2cbus) | 1807 | if (!intel_output->i2c_bus) |
1811 | goto err_inteloutput; | 1808 | goto err_inteloutput; |
1812 | 1809 | ||
1813 | slave_addr = intel_sdvo_get_slave_addr(dev, output_device); | 1810 | sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); |
1814 | sdvo_priv->i2c_bus = i2cbus; | ||
1815 | 1811 | ||
1816 | if (output_device == SDVOB) { | 1812 | /* Save the bit-banging i2c functionality for use by the DDC wrapper */ |
1817 | output_id = 1; | 1813 | intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; |
1818 | } else { | ||
1819 | output_id = 2; | ||
1820 | } | ||
1821 | sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1; | ||
1822 | sdvo_priv->output_device = output_device; | ||
1823 | intel_output->i2c_bus = i2cbus; | ||
1824 | intel_output->dev_priv = sdvo_priv; | ||
1825 | 1814 | ||
1826 | /* Read the regs to test if we can talk to the device */ | 1815 | /* Read the regs to test if we can talk to the device */ |
1827 | for (i = 0; i < 0x40; i++) { | 1816 | for (i = 0; i < 0x40; i++) { |
@@ -1835,17 +1824,15 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
1835 | 1824 | ||
1836 | /* setup the DDC bus. */ | 1825 | /* setup the DDC bus. */ |
1837 | if (output_device == SDVOB) | 1826 | if (output_device == SDVOB) |
1838 | ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); | 1827 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); |
1839 | else | 1828 | else |
1840 | ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); | 1829 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); |
1841 | 1830 | ||
1842 | if (ddcbus == NULL) | 1831 | if (intel_output->ddc_bus == NULL) |
1843 | goto err_i2c; | 1832 | goto err_i2c; |
1844 | 1833 | ||
1845 | intel_sdvo_i2c_bit_algo.functionality = | 1834 | /* Wrap with our custom algo which switches to DDC mode */ |
1846 | intel_output->i2c_bus->adapter.algo->functionality; | 1835 | intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; |
1847 | ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo; | ||
1848 | intel_output->ddc_bus = ddcbus; | ||
1849 | 1836 | ||
1850 | /* In defaut case sdvo lvds is false */ | 1837 | /* In defaut case sdvo lvds is false */ |
1851 | sdvo_priv->is_lvds = false; | 1838 | sdvo_priv->is_lvds = false; |
@@ -1965,9 +1952,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
1965 | return true; | 1952 | return true; |
1966 | 1953 | ||
1967 | err_i2c: | 1954 | err_i2c: |
1968 | if (ddcbus != NULL) | 1955 | if (intel_output->ddc_bus != NULL) |
1969 | intel_i2c_destroy(intel_output->ddc_bus); | 1956 | intel_i2c_destroy(intel_output->ddc_bus); |
1970 | intel_i2c_destroy(intel_output->i2c_bus); | 1957 | if (intel_output->i2c_bus != NULL) |
1958 | intel_i2c_destroy(intel_output->i2c_bus); | ||
1971 | err_inteloutput: | 1959 | err_inteloutput: |
1972 | kfree(intel_output); | 1960 | kfree(intel_output); |
1973 | 1961 | ||
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index ea68992e4416..a43c98e3f077 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1383,34 +1383,31 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) | |||
1383 | /* | 1383 | /* |
1384 | * Detect TV by polling) | 1384 | * Detect TV by polling) |
1385 | */ | 1385 | */ |
1386 | if (intel_output->load_detect_temp) { | 1386 | save_tv_dac = tv_dac; |
1387 | /* TV not currently running, prod it with destructive detect */ | 1387 | tv_ctl = I915_READ(TV_CTL); |
1388 | save_tv_dac = tv_dac; | 1388 | save_tv_ctl = tv_ctl; |
1389 | tv_ctl = I915_READ(TV_CTL); | 1389 | tv_ctl &= ~TV_ENC_ENABLE; |
1390 | save_tv_ctl = tv_ctl; | 1390 | tv_ctl &= ~TV_TEST_MODE_MASK; |
1391 | tv_ctl &= ~TV_ENC_ENABLE; | 1391 | tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; |
1392 | tv_ctl &= ~TV_TEST_MODE_MASK; | 1392 | tv_dac &= ~TVDAC_SENSE_MASK; |
1393 | tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; | 1393 | tv_dac &= ~DAC_A_MASK; |
1394 | tv_dac &= ~TVDAC_SENSE_MASK; | 1394 | tv_dac &= ~DAC_B_MASK; |
1395 | tv_dac &= ~DAC_A_MASK; | 1395 | tv_dac &= ~DAC_C_MASK; |
1396 | tv_dac &= ~DAC_B_MASK; | 1396 | tv_dac |= (TVDAC_STATE_CHG_EN | |
1397 | tv_dac &= ~DAC_C_MASK; | 1397 | TVDAC_A_SENSE_CTL | |
1398 | tv_dac |= (TVDAC_STATE_CHG_EN | | 1398 | TVDAC_B_SENSE_CTL | |
1399 | TVDAC_A_SENSE_CTL | | 1399 | TVDAC_C_SENSE_CTL | |
1400 | TVDAC_B_SENSE_CTL | | 1400 | DAC_CTL_OVERRIDE | |
1401 | TVDAC_C_SENSE_CTL | | 1401 | DAC_A_0_7_V | |
1402 | DAC_CTL_OVERRIDE | | 1402 | DAC_B_0_7_V | |
1403 | DAC_A_0_7_V | | 1403 | DAC_C_0_7_V); |
1404 | DAC_B_0_7_V | | 1404 | I915_WRITE(TV_CTL, tv_ctl); |
1405 | DAC_C_0_7_V); | 1405 | I915_WRITE(TV_DAC, tv_dac); |
1406 | I915_WRITE(TV_CTL, tv_ctl); | 1406 | intel_wait_for_vblank(dev); |
1407 | I915_WRITE(TV_DAC, tv_dac); | 1407 | tv_dac = I915_READ(TV_DAC); |
1408 | intel_wait_for_vblank(dev); | 1408 | I915_WRITE(TV_DAC, save_tv_dac); |
1409 | tv_dac = I915_READ(TV_DAC); | 1409 | I915_WRITE(TV_CTL, save_tv_ctl); |
1410 | I915_WRITE(TV_DAC, save_tv_dac); | 1410 | intel_wait_for_vblank(dev); |
1411 | I915_WRITE(TV_CTL, save_tv_ctl); | ||
1412 | intel_wait_for_vblank(dev); | ||
1413 | } | ||
1414 | /* | 1411 | /* |
1415 | * A B C | 1412 | * A B C |
1416 | * 0 1 1 Composite | 1413 | * 0 1 1 Composite |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f30aa7274a54..f97563db4e59 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -35,6 +35,23 @@ | |||
35 | #include "atom.h" | 35 | #include "atom.h" |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Clear GPU surface registers. | ||
39 | */ | ||
40 | static void radeon_surface_init(struct radeon_device *rdev) | ||
41 | { | ||
42 | /* FIXME: check this out */ | ||
43 | if (rdev->family < CHIP_R600) { | ||
44 | int i; | ||
45 | |||
46 | for (i = 0; i < 8; i++) { | ||
47 | WREG32(RADEON_SURFACE0_INFO + | ||
48 | i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), | ||
49 | 0); | ||
50 | } | ||
51 | } | ||
52 | } | ||
53 | |||
54 | /* | ||
38 | * GPU scratch registers helpers function. | 55 | * GPU scratch registers helpers function. |
39 | */ | 56 | */ |
40 | static void radeon_scratch_init(struct radeon_device *rdev) | 57 | static void radeon_scratch_init(struct radeon_device *rdev) |
@@ -496,6 +513,8 @@ int radeon_device_init(struct radeon_device *rdev, | |||
496 | radeon_errata(rdev); | 513 | radeon_errata(rdev); |
497 | /* Initialize scratch registers */ | 514 | /* Initialize scratch registers */ |
498 | radeon_scratch_init(rdev); | 515 | radeon_scratch_init(rdev); |
516 | /* Initialize surface registers */ | ||
517 | radeon_surface_init(rdev); | ||
499 | 518 | ||
500 | /* TODO: disable VGA need to use VGA request */ | 519 | /* TODO: disable VGA need to use VGA request */ |
501 | /* BIOS*/ | 520 | /* BIOS*/ |
@@ -604,9 +623,6 @@ int radeon_device_init(struct radeon_device *rdev, | |||
604 | if (r) { | 623 | if (r) { |
605 | return r; | 624 | return r; |
606 | } | 625 | } |
607 | if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) { | ||
608 | rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private; | ||
609 | } | ||
610 | if (!ret) { | 626 | if (!ret) { |
611 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); | 627 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); |
612 | } | 628 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 09c9fb9f6210..84ba69f48784 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -345,7 +345,7 @@ static void __exit radeon_exit(void) | |||
345 | drm_exit(driver); | 345 | drm_exit(driver); |
346 | } | 346 | } |
347 | 347 | ||
348 | late_initcall(radeon_init); | 348 | module_init(radeon_init); |
349 | module_exit(radeon_exit); | 349 | module_exit(radeon_exit); |
350 | 350 | ||
351 | MODULE_AUTHOR(DRIVER_AUTHOR); | 351 | MODULE_AUTHOR(DRIVER_AUTHOR); |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index fa86d398945e..9e8f191eb64a 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -478,14 +478,16 @@ int radeonfb_create(struct radeon_device *rdev, | |||
478 | { | 478 | { |
479 | struct fb_info *info; | 479 | struct fb_info *info; |
480 | struct radeon_fb_device *rfbdev; | 480 | struct radeon_fb_device *rfbdev; |
481 | struct drm_framebuffer *fb; | 481 | struct drm_framebuffer *fb = NULL; |
482 | struct radeon_framebuffer *rfb; | 482 | struct radeon_framebuffer *rfb; |
483 | struct drm_mode_fb_cmd mode_cmd; | 483 | struct drm_mode_fb_cmd mode_cmd; |
484 | struct drm_gem_object *gobj = NULL; | 484 | struct drm_gem_object *gobj = NULL; |
485 | struct radeon_object *robj = NULL; | 485 | struct radeon_object *robj = NULL; |
486 | struct device *device = &rdev->pdev->dev; | 486 | struct device *device = &rdev->pdev->dev; |
487 | int size, aligned_size, ret; | 487 | int size, aligned_size, ret; |
488 | u64 fb_gpuaddr; | ||
488 | void *fbptr = NULL; | 489 | void *fbptr = NULL; |
490 | unsigned long tmp; | ||
489 | 491 | ||
490 | mode_cmd.width = surface_width; | 492 | mode_cmd.width = surface_width; |
491 | mode_cmd.height = surface_height; | 493 | mode_cmd.height = surface_height; |
@@ -498,11 +500,12 @@ int radeonfb_create(struct radeon_device *rdev, | |||
498 | aligned_size = ALIGN(size, PAGE_SIZE); | 500 | aligned_size = ALIGN(size, PAGE_SIZE); |
499 | 501 | ||
500 | ret = radeon_gem_object_create(rdev, aligned_size, 0, | 502 | ret = radeon_gem_object_create(rdev, aligned_size, 0, |
501 | RADEON_GEM_DOMAIN_VRAM, | 503 | RADEON_GEM_DOMAIN_VRAM, |
502 | false, ttm_bo_type_kernel, | 504 | false, ttm_bo_type_kernel, |
503 | false, &gobj); | 505 | false, &gobj); |
504 | if (ret) { | 506 | if (ret) { |
505 | printk(KERN_ERR "failed to allocate framebuffer\n"); | 507 | printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", |
508 | surface_width, surface_height); | ||
506 | ret = -ENOMEM; | 509 | ret = -ENOMEM; |
507 | goto out; | 510 | goto out; |
508 | } | 511 | } |
@@ -515,12 +518,19 @@ int radeonfb_create(struct radeon_device *rdev, | |||
515 | ret = -ENOMEM; | 518 | ret = -ENOMEM; |
516 | goto out_unref; | 519 | goto out_unref; |
517 | } | 520 | } |
521 | ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); | ||
522 | if (ret) { | ||
523 | printk(KERN_ERR "failed to pin framebuffer\n"); | ||
524 | ret = -ENOMEM; | ||
525 | goto out_unref; | ||
526 | } | ||
518 | 527 | ||
519 | list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); | 528 | list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); |
520 | 529 | ||
521 | rfb = to_radeon_framebuffer(fb); | 530 | rfb = to_radeon_framebuffer(fb); |
522 | *rfb_p = rfb; | 531 | *rfb_p = rfb; |
523 | rdev->fbdev_rfb = rfb; | 532 | rdev->fbdev_rfb = rfb; |
533 | rdev->fbdev_robj = robj; | ||
524 | 534 | ||
525 | info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); | 535 | info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); |
526 | if (info == NULL) { | 536 | if (info == NULL) { |
@@ -541,13 +551,13 @@ int radeonfb_create(struct radeon_device *rdev, | |||
541 | info->fix.xpanstep = 1; /* doing it in hw */ | 551 | info->fix.xpanstep = 1; /* doing it in hw */ |
542 | info->fix.ypanstep = 1; /* doing it in hw */ | 552 | info->fix.ypanstep = 1; /* doing it in hw */ |
543 | info->fix.ywrapstep = 0; | 553 | info->fix.ywrapstep = 0; |
544 | info->fix.accel = FB_ACCEL_I830; | 554 | info->fix.accel = FB_ACCEL_NONE; |
545 | info->fix.type_aux = 0; | 555 | info->fix.type_aux = 0; |
546 | info->flags = FBINFO_DEFAULT; | 556 | info->flags = FBINFO_DEFAULT; |
547 | info->fbops = &radeonfb_ops; | 557 | info->fbops = &radeonfb_ops; |
548 | info->fix.line_length = fb->pitch; | 558 | info->fix.line_length = fb->pitch; |
549 | info->screen_base = fbptr; | 559 | tmp = fb_gpuaddr - rdev->mc.vram_location; |
550 | info->fix.smem_start = (unsigned long)fbptr; | 560 | info->fix.smem_start = rdev->mc.aper_base + tmp; |
551 | info->fix.smem_len = size; | 561 | info->fix.smem_len = size; |
552 | info->screen_base = fbptr; | 562 | info->screen_base = fbptr; |
553 | info->screen_size = size; | 563 | info->screen_size = size; |
@@ -562,8 +572,8 @@ int radeonfb_create(struct radeon_device *rdev, | |||
562 | info->var.width = -1; | 572 | info->var.width = -1; |
563 | info->var.xres = fb_width; | 573 | info->var.xres = fb_width; |
564 | info->var.yres = fb_height; | 574 | info->var.yres = fb_height; |
565 | info->fix.mmio_start = pci_resource_start(rdev->pdev, 2); | 575 | info->fix.mmio_start = 0; |
566 | info->fix.mmio_len = pci_resource_len(rdev->pdev, 2); | 576 | info->fix.mmio_len = 0; |
567 | info->pixmap.size = 64*1024; | 577 | info->pixmap.size = 64*1024; |
568 | info->pixmap.buf_align = 8; | 578 | info->pixmap.buf_align = 8; |
569 | info->pixmap.access_align = 32; | 579 | info->pixmap.access_align = 32; |
@@ -644,7 +654,7 @@ out_unref: | |||
644 | if (robj) { | 654 | if (robj) { |
645 | radeon_object_kunmap(robj); | 655 | radeon_object_kunmap(robj); |
646 | } | 656 | } |
647 | if (ret) { | 657 | if (fb && ret) { |
648 | list_del(&fb->filp_head); | 658 | list_del(&fb->filp_head); |
649 | drm_gem_object_unreference(gobj); | 659 | drm_gem_object_unreference(gobj); |
650 | drm_framebuffer_cleanup(fb); | 660 | drm_framebuffer_cleanup(fb); |
@@ -813,6 +823,7 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) | |||
813 | robj = rfb->obj->driver_private; | 823 | robj = rfb->obj->driver_private; |
814 | unregister_framebuffer(info); | 824 | unregister_framebuffer(info); |
815 | radeon_object_kunmap(robj); | 825 | radeon_object_kunmap(robj); |
826 | radeon_object_unpin(robj); | ||
816 | framebuffer_release(info); | 827 | framebuffer_release(info); |
817 | } | 828 | } |
818 | 829 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 983e8df5e000..bac0d06c52ac 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -223,7 +223,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | |||
223 | { | 223 | { |
224 | uint32_t flags; | 224 | uint32_t flags; |
225 | uint32_t tmp; | 225 | uint32_t tmp; |
226 | void *fbptr; | ||
227 | int r; | 226 | int r; |
228 | 227 | ||
229 | flags = radeon_object_flags_from_domain(domain); | 228 | flags = radeon_object_flags_from_domain(domain); |
@@ -242,10 +241,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | |||
242 | DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); | 241 | DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); |
243 | return r; | 242 | return r; |
244 | } | 243 | } |
245 | if (robj->rdev->fbdev_robj == robj) { | ||
246 | mutex_lock(&robj->rdev->fbdev_info->lock); | ||
247 | radeon_object_kunmap(robj); | ||
248 | } | ||
249 | tmp = robj->tobj.mem.placement; | 244 | tmp = robj->tobj.mem.placement; |
250 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); | 245 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); |
251 | robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; | 246 | robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; |
@@ -261,23 +256,12 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | |||
261 | DRM_ERROR("radeon: failed to pin object.\n"); | 256 | DRM_ERROR("radeon: failed to pin object.\n"); |
262 | } | 257 | } |
263 | radeon_object_unreserve(robj); | 258 | radeon_object_unreserve(robj); |
264 | if (robj->rdev->fbdev_robj == robj) { | ||
265 | if (!r) { | ||
266 | r = radeon_object_kmap(robj, &fbptr); | ||
267 | } | ||
268 | if (!r) { | ||
269 | robj->rdev->fbdev_info->screen_base = fbptr; | ||
270 | robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr; | ||
271 | } | ||
272 | mutex_unlock(&robj->rdev->fbdev_info->lock); | ||
273 | } | ||
274 | return r; | 259 | return r; |
275 | } | 260 | } |
276 | 261 | ||
277 | void radeon_object_unpin(struct radeon_object *robj) | 262 | void radeon_object_unpin(struct radeon_object *robj) |
278 | { | 263 | { |
279 | uint32_t flags; | 264 | uint32_t flags; |
280 | void *fbptr; | ||
281 | int r; | 265 | int r; |
282 | 266 | ||
283 | spin_lock(&robj->tobj.lock); | 267 | spin_lock(&robj->tobj.lock); |
@@ -297,10 +281,6 @@ void radeon_object_unpin(struct radeon_object *robj) | |||
297 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); | 281 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); |
298 | return; | 282 | return; |
299 | } | 283 | } |
300 | if (robj->rdev->fbdev_robj == robj) { | ||
301 | mutex_lock(&robj->rdev->fbdev_info->lock); | ||
302 | radeon_object_kunmap(robj); | ||
303 | } | ||
304 | flags = robj->tobj.mem.placement; | 284 | flags = robj->tobj.mem.placement; |
305 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; | 285 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; |
306 | r = ttm_buffer_object_validate(&robj->tobj, | 286 | r = ttm_buffer_object_validate(&robj->tobj, |
@@ -310,16 +290,6 @@ void radeon_object_unpin(struct radeon_object *robj) | |||
310 | DRM_ERROR("radeon: failed to unpin buffer.\n"); | 290 | DRM_ERROR("radeon: failed to unpin buffer.\n"); |
311 | } | 291 | } |
312 | radeon_object_unreserve(robj); | 292 | radeon_object_unreserve(robj); |
313 | if (robj->rdev->fbdev_robj == robj) { | ||
314 | if (!r) { | ||
315 | r = radeon_object_kmap(robj, &fbptr); | ||
316 | } | ||
317 | if (!r) { | ||
318 | robj->rdev->fbdev_info->screen_base = fbptr; | ||
319 | robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr; | ||
320 | } | ||
321 | mutex_unlock(&robj->rdev->fbdev_info->lock); | ||
322 | } | ||
323 | } | 293 | } |
324 | 294 | ||
325 | int radeon_object_wait(struct radeon_object *robj) | 295 | int radeon_object_wait(struct radeon_object *robj) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 517c84559633..bdec583901eb 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/highmem.h> | 34 | #include <linux/highmem.h> |
35 | #include <linux/wait.h> | 35 | #include <linux/wait.h> |
36 | #include <linux/vmalloc.h> | 36 | #include <linux/vmalloc.h> |
37 | #include <linux/version.h> | ||
38 | #include <linux/module.h> | 37 | #include <linux/module.h> |
39 | 38 | ||
40 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) | 39 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 27b146c54fbc..40b75032ea47 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <ttm/ttm_bo_driver.h> | 32 | #include <ttm/ttm_bo_driver.h> |
33 | #include <ttm/ttm_placement.h> | 33 | #include <ttm/ttm_placement.h> |
34 | #include <linux/mm.h> | 34 | #include <linux/mm.h> |
35 | #include <linux/version.h> | ||
36 | #include <linux/rbtree.h> | 35 | #include <linux/rbtree.h> |
37 | #include <linux/module.h> | 36 | #include <linux/module.h> |
38 | #include <linux/uaccess.h> | 37 | #include <linux/uaccess.h> |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 0331fa74cd3f..75dc8bd24592 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -28,7 +28,6 @@ | |||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/version.h> | ||
32 | #include <linux/vmalloc.h> | 31 | #include <linux/vmalloc.h> |
33 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
34 | #include <linux/highmem.h> | 33 | #include <linux/highmem.h> |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 3c259ee7ddda..8206442fbabd 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -326,6 +326,16 @@ config I2C_DAVINCI | |||
326 | devices such as DaVinci NIC. | 326 | devices such as DaVinci NIC. |
327 | For details please see http://www.ti.com/davinci | 327 | For details please see http://www.ti.com/davinci |
328 | 328 | ||
329 | config I2C_DESIGNWARE | ||
330 | tristate "Synopsys DesignWare" | ||
331 | depends on HAVE_CLK | ||
332 | help | ||
333 | If you say yes to this option, support will be included for the | ||
334 | Synopsys DesignWare I2C adapter. Only master mode is supported. | ||
335 | |||
336 | This driver can also be built as a module. If so, the module | ||
337 | will be called i2c-designware. | ||
338 | |||
329 | config I2C_GPIO | 339 | config I2C_GPIO |
330 | tristate "GPIO-based bitbanging I2C" | 340 | tristate "GPIO-based bitbanging I2C" |
331 | depends on GENERIC_GPIO | 341 | depends on GENERIC_GPIO |
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index edeabf003106..e654263bfc01 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile | |||
@@ -30,6 +30,7 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o | |||
30 | obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o | 30 | obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o |
31 | obj-$(CONFIG_I2C_CPM) += i2c-cpm.o | 31 | obj-$(CONFIG_I2C_CPM) += i2c-cpm.o |
32 | obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o | 32 | obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o |
33 | obj-$(CONFIG_I2C_DESIGNWARE) += i2c-designware.o | ||
33 | obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o | 34 | obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o |
34 | obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o | 35 | obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o |
35 | obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o | 36 | obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o |
diff --git a/drivers/i2c/busses/i2c-designware.c b/drivers/i2c/busses/i2c-designware.c new file mode 100644 index 000000000000..b444762e9b9f --- /dev/null +++ b/drivers/i2c/busses/i2c-designware.c | |||
@@ -0,0 +1,624 @@ | |||
1 | /* | ||
2 | * Synopsys Designware I2C adapter driver (master only). | ||
3 | * | ||
4 | * Based on the TI DAVINCI I2C adapter driver. | ||
5 | * | ||
6 | * Copyright (C) 2006 Texas Instruments. | ||
7 | * Copyright (C) 2007 MontaVista Software Inc. | ||
8 | * Copyright (C) 2009 Provigent Ltd. | ||
9 | * | ||
10 | * ---------------------------------------------------------------------------- | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
25 | * ---------------------------------------------------------------------------- | ||
26 | * | ||
27 | */ | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/i2c.h> | ||
32 | #include <linux/clk.h> | ||
33 | #include <linux/errno.h> | ||
34 | #include <linux/sched.h> | ||
35 | #include <linux/err.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | #include <linux/platform_device.h> | ||
38 | #include <linux/io.h> | ||
39 | |||
40 | /* | ||
41 | * Registers offset | ||
42 | */ | ||
43 | #define DW_IC_CON 0x0 | ||
44 | #define DW_IC_TAR 0x4 | ||
45 | #define DW_IC_DATA_CMD 0x10 | ||
46 | #define DW_IC_SS_SCL_HCNT 0x14 | ||
47 | #define DW_IC_SS_SCL_LCNT 0x18 | ||
48 | #define DW_IC_FS_SCL_HCNT 0x1c | ||
49 | #define DW_IC_FS_SCL_LCNT 0x20 | ||
50 | #define DW_IC_INTR_STAT 0x2c | ||
51 | #define DW_IC_INTR_MASK 0x30 | ||
52 | #define DW_IC_CLR_INTR 0x40 | ||
53 | #define DW_IC_ENABLE 0x6c | ||
54 | #define DW_IC_STATUS 0x70 | ||
55 | #define DW_IC_TXFLR 0x74 | ||
56 | #define DW_IC_RXFLR 0x78 | ||
57 | #define DW_IC_COMP_PARAM_1 0xf4 | ||
58 | #define DW_IC_TX_ABRT_SOURCE 0x80 | ||
59 | |||
60 | #define DW_IC_CON_MASTER 0x1 | ||
61 | #define DW_IC_CON_SPEED_STD 0x2 | ||
62 | #define DW_IC_CON_SPEED_FAST 0x4 | ||
63 | #define DW_IC_CON_10BITADDR_MASTER 0x10 | ||
64 | #define DW_IC_CON_RESTART_EN 0x20 | ||
65 | #define DW_IC_CON_SLAVE_DISABLE 0x40 | ||
66 | |||
67 | #define DW_IC_INTR_TX_EMPTY 0x10 | ||
68 | #define DW_IC_INTR_TX_ABRT 0x40 | ||
69 | #define DW_IC_INTR_STOP_DET 0x200 | ||
70 | |||
71 | #define DW_IC_STATUS_ACTIVITY 0x1 | ||
72 | |||
73 | #define DW_IC_ERR_TX_ABRT 0x1 | ||
74 | |||
75 | /* | ||
76 | * status codes | ||
77 | */ | ||
78 | #define STATUS_IDLE 0x0 | ||
79 | #define STATUS_WRITE_IN_PROGRESS 0x1 | ||
80 | #define STATUS_READ_IN_PROGRESS 0x2 | ||
81 | |||
82 | #define TIMEOUT 20 /* ms */ | ||
83 | |||
84 | /* | ||
85 | * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register | ||
86 | * | ||
87 | * only expected abort codes are listed here | ||
88 | * refer to the datasheet for the full list | ||
89 | */ | ||
90 | #define ABRT_7B_ADDR_NOACK 0 | ||
91 | #define ABRT_10ADDR1_NOACK 1 | ||
92 | #define ABRT_10ADDR2_NOACK 2 | ||
93 | #define ABRT_TXDATA_NOACK 3 | ||
94 | #define ABRT_GCALL_NOACK 4 | ||
95 | #define ABRT_GCALL_READ 5 | ||
96 | #define ABRT_SBYTE_ACKDET 7 | ||
97 | #define ABRT_SBYTE_NORSTRT 9 | ||
98 | #define ABRT_10B_RD_NORSTRT 10 | ||
99 | #define ARB_MASTER_DIS 11 | ||
100 | #define ARB_LOST 12 | ||
101 | |||
102 | static char *abort_sources[] = { | ||
103 | [ABRT_7B_ADDR_NOACK] = | ||
104 | "slave address not acknowledged (7bit mode)", | ||
105 | [ABRT_10ADDR1_NOACK] = | ||
106 | "first address byte not acknowledged (10bit mode)", | ||
107 | [ABRT_10ADDR2_NOACK] = | ||
108 | "second address byte not acknowledged (10bit mode)", | ||
109 | [ABRT_TXDATA_NOACK] = | ||
110 | "data not acknowledged", | ||
111 | [ABRT_GCALL_NOACK] = | ||
112 | "no acknowledgement for a general call", | ||
113 | [ABRT_GCALL_READ] = | ||
114 | "read after general call", | ||
115 | [ABRT_SBYTE_ACKDET] = | ||
116 | "start byte acknowledged", | ||
117 | [ABRT_SBYTE_NORSTRT] = | ||
118 | "trying to send start byte when restart is disabled", | ||
119 | [ABRT_10B_RD_NORSTRT] = | ||
120 | "trying to read when restart is disabled (10bit mode)", | ||
121 | [ARB_MASTER_DIS] = | ||
122 | "trying to use disabled adapter", | ||
123 | [ARB_LOST] = | ||
124 | "lost arbitration", | ||
125 | }; | ||
126 | |||
127 | /** | ||
128 | * struct dw_i2c_dev - private i2c-designware data | ||
129 | * @dev: driver model device node | ||
130 | * @base: IO registers pointer | ||
131 | * @cmd_complete: tx completion indicator | ||
132 | * @pump_msg: continue in progress transfers | ||
133 | * @lock: protect this struct and IO registers | ||
134 | * @clk: input reference clock | ||
135 | * @cmd_err: run time hadware error code | ||
136 | * @msgs: points to an array of messages currently being transfered | ||
137 | * @msgs_num: the number of elements in msgs | ||
138 | * @msg_write_idx: the element index of the current tx message in the msgs | ||
139 | * array | ||
140 | * @tx_buf_len: the length of the current tx buffer | ||
141 | * @tx_buf: the current tx buffer | ||
142 | * @msg_read_idx: the element index of the current rx message in the msgs | ||
143 | * array | ||
144 | * @rx_buf_len: the length of the current rx buffer | ||
145 | * @rx_buf: the current rx buffer | ||
146 | * @msg_err: error status of the current transfer | ||
147 | * @status: i2c master status, one of STATUS_* | ||
148 | * @abort_source: copy of the TX_ABRT_SOURCE register | ||
149 | * @irq: interrupt number for the i2c master | ||
150 | * @adapter: i2c subsystem adapter node | ||
151 | * @tx_fifo_depth: depth of the hardware tx fifo | ||
152 | * @rx_fifo_depth: depth of the hardware rx fifo | ||
153 | */ | ||
154 | struct dw_i2c_dev { | ||
155 | struct device *dev; | ||
156 | void __iomem *base; | ||
157 | struct completion cmd_complete; | ||
158 | struct tasklet_struct pump_msg; | ||
159 | struct mutex lock; | ||
160 | struct clk *clk; | ||
161 | int cmd_err; | ||
162 | struct i2c_msg *msgs; | ||
163 | int msgs_num; | ||
164 | int msg_write_idx; | ||
165 | u16 tx_buf_len; | ||
166 | u8 *tx_buf; | ||
167 | int msg_read_idx; | ||
168 | u16 rx_buf_len; | ||
169 | u8 *rx_buf; | ||
170 | int msg_err; | ||
171 | unsigned int status; | ||
172 | u16 abort_source; | ||
173 | int irq; | ||
174 | struct i2c_adapter adapter; | ||
175 | unsigned int tx_fifo_depth; | ||
176 | unsigned int rx_fifo_depth; | ||
177 | }; | ||
178 | |||
179 | /** | ||
180 | * i2c_dw_init() - initialize the designware i2c master hardware | ||
181 | * @dev: device private data | ||
182 | * | ||
183 | * This functions configures and enables the I2C master. | ||
184 | * This function is called during I2C init function, and in case of timeout at | ||
185 | * run time. | ||
186 | */ | ||
187 | static void i2c_dw_init(struct dw_i2c_dev *dev) | ||
188 | { | ||
189 | u32 input_clock_khz = clk_get_rate(dev->clk) / 1000; | ||
190 | u16 ic_con; | ||
191 | |||
192 | /* Disable the adapter */ | ||
193 | writeb(0, dev->base + DW_IC_ENABLE); | ||
194 | |||
195 | /* set standard and fast speed deviders for high/low periods */ | ||
196 | writew((input_clock_khz * 40 / 10000)+1, /* std speed high, 4us */ | ||
197 | dev->base + DW_IC_SS_SCL_HCNT); | ||
198 | writew((input_clock_khz * 47 / 10000)+1, /* std speed low, 4.7us */ | ||
199 | dev->base + DW_IC_SS_SCL_LCNT); | ||
200 | writew((input_clock_khz * 6 / 10000)+1, /* fast speed high, 0.6us */ | ||
201 | dev->base + DW_IC_FS_SCL_HCNT); | ||
202 | writew((input_clock_khz * 13 / 10000)+1, /* fast speed low, 1.3us */ | ||
203 | dev->base + DW_IC_FS_SCL_LCNT); | ||
204 | |||
205 | /* configure the i2c master */ | ||
206 | ic_con = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE | | ||
207 | DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST; | ||
208 | writew(ic_con, dev->base + DW_IC_CON); | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * Waiting for bus not busy | ||
213 | */ | ||
214 | static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev) | ||
215 | { | ||
216 | int timeout = TIMEOUT; | ||
217 | |||
218 | while (readb(dev->base + DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) { | ||
219 | if (timeout <= 0) { | ||
220 | dev_warn(dev->dev, "timeout waiting for bus ready\n"); | ||
221 | return -ETIMEDOUT; | ||
222 | } | ||
223 | timeout--; | ||
224 | mdelay(1); | ||
225 | } | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * Initiate low level master read/write transaction. | ||
232 | * This function is called from i2c_dw_xfer when starting a transfer. | ||
233 | * This function is also called from dw_i2c_pump_msg to continue a transfer | ||
234 | * that is longer than the size of the TX FIFO. | ||
235 | */ | ||
236 | static void | ||
237 | i2c_dw_xfer_msg(struct i2c_adapter *adap) | ||
238 | { | ||
239 | struct dw_i2c_dev *dev = i2c_get_adapdata(adap); | ||
240 | struct i2c_msg *msgs = dev->msgs; | ||
241 | int num = dev->msgs_num; | ||
242 | u16 ic_con, intr_mask; | ||
243 | int tx_limit = dev->tx_fifo_depth - readb(dev->base + DW_IC_TXFLR); | ||
244 | int rx_limit = dev->rx_fifo_depth - readb(dev->base + DW_IC_RXFLR); | ||
245 | u16 addr = msgs[dev->msg_write_idx].addr; | ||
246 | u16 buf_len = dev->tx_buf_len; | ||
247 | |||
248 | if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { | ||
249 | /* Disable the adapter */ | ||
250 | writeb(0, dev->base + DW_IC_ENABLE); | ||
251 | |||
252 | /* set the slave (target) address */ | ||
253 | writew(msgs[dev->msg_write_idx].addr, dev->base + DW_IC_TAR); | ||
254 | |||
255 | /* if the slave address is ten bit address, enable 10BITADDR */ | ||
256 | ic_con = readw(dev->base + DW_IC_CON); | ||
257 | if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) | ||
258 | ic_con |= DW_IC_CON_10BITADDR_MASTER; | ||
259 | else | ||
260 | ic_con &= ~DW_IC_CON_10BITADDR_MASTER; | ||
261 | writew(ic_con, dev->base + DW_IC_CON); | ||
262 | |||
263 | /* Enable the adapter */ | ||
264 | writeb(1, dev->base + DW_IC_ENABLE); | ||
265 | } | ||
266 | |||
267 | for (; dev->msg_write_idx < num; dev->msg_write_idx++) { | ||
268 | /* if target address has changed, we need to | ||
269 | * reprogram the target address in the i2c | ||
270 | * adapter when we are done with this transfer | ||
271 | */ | ||
272 | if (msgs[dev->msg_write_idx].addr != addr) | ||
273 | return; | ||
274 | |||
275 | if (msgs[dev->msg_write_idx].len == 0) { | ||
276 | dev_err(dev->dev, | ||
277 | "%s: invalid message length\n", __func__); | ||
278 | dev->msg_err = -EINVAL; | ||
279 | return; | ||
280 | } | ||
281 | |||
282 | if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { | ||
283 | /* new i2c_msg */ | ||
284 | dev->tx_buf = msgs[dev->msg_write_idx].buf; | ||
285 | buf_len = msgs[dev->msg_write_idx].len; | ||
286 | } | ||
287 | |||
288 | while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { | ||
289 | if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { | ||
290 | writew(0x100, dev->base + DW_IC_DATA_CMD); | ||
291 | rx_limit--; | ||
292 | } else | ||
293 | writew(*(dev->tx_buf++), | ||
294 | dev->base + DW_IC_DATA_CMD); | ||
295 | tx_limit--; buf_len--; | ||
296 | } | ||
297 | } | ||
298 | |||
299 | intr_mask = DW_IC_INTR_STOP_DET | DW_IC_INTR_TX_ABRT; | ||
300 | if (buf_len > 0) { /* more bytes to be written */ | ||
301 | intr_mask |= DW_IC_INTR_TX_EMPTY; | ||
302 | dev->status |= STATUS_WRITE_IN_PROGRESS; | ||
303 | } else | ||
304 | dev->status &= ~STATUS_WRITE_IN_PROGRESS; | ||
305 | writew(intr_mask, dev->base + DW_IC_INTR_MASK); | ||
306 | |||
307 | dev->tx_buf_len = buf_len; | ||
308 | } | ||
309 | |||
310 | static void | ||
311 | i2c_dw_read(struct i2c_adapter *adap) | ||
312 | { | ||
313 | struct dw_i2c_dev *dev = i2c_get_adapdata(adap); | ||
314 | struct i2c_msg *msgs = dev->msgs; | ||
315 | int num = dev->msgs_num; | ||
316 | u16 addr = msgs[dev->msg_read_idx].addr; | ||
317 | int rx_valid = readw(dev->base + DW_IC_RXFLR); | ||
318 | |||
319 | for (; dev->msg_read_idx < num; dev->msg_read_idx++) { | ||
320 | u16 len; | ||
321 | u8 *buf; | ||
322 | |||
323 | if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) | ||
324 | continue; | ||
325 | |||
326 | /* different i2c client, reprogram the i2c adapter */ | ||
327 | if (msgs[dev->msg_read_idx].addr != addr) | ||
328 | return; | ||
329 | |||
330 | if (!(dev->status & STATUS_READ_IN_PROGRESS)) { | ||
331 | len = msgs[dev->msg_read_idx].len; | ||
332 | buf = msgs[dev->msg_read_idx].buf; | ||
333 | } else { | ||
334 | len = dev->rx_buf_len; | ||
335 | buf = dev->rx_buf; | ||
336 | } | ||
337 | |||
338 | for (; len > 0 && rx_valid > 0; len--, rx_valid--) | ||
339 | *buf++ = readb(dev->base + DW_IC_DATA_CMD); | ||
340 | |||
341 | if (len > 0) { | ||
342 | dev->status |= STATUS_READ_IN_PROGRESS; | ||
343 | dev->rx_buf_len = len; | ||
344 | dev->rx_buf = buf; | ||
345 | return; | ||
346 | } else | ||
347 | dev->status &= ~STATUS_READ_IN_PROGRESS; | ||
348 | } | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * Prepare controller for a transaction and call i2c_dw_xfer_msg | ||
353 | */ | ||
354 | static int | ||
355 | i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) | ||
356 | { | ||
357 | struct dw_i2c_dev *dev = i2c_get_adapdata(adap); | ||
358 | int ret; | ||
359 | |||
360 | dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); | ||
361 | |||
362 | mutex_lock(&dev->lock); | ||
363 | |||
364 | INIT_COMPLETION(dev->cmd_complete); | ||
365 | dev->msgs = msgs; | ||
366 | dev->msgs_num = num; | ||
367 | dev->cmd_err = 0; | ||
368 | dev->msg_write_idx = 0; | ||
369 | dev->msg_read_idx = 0; | ||
370 | dev->msg_err = 0; | ||
371 | dev->status = STATUS_IDLE; | ||
372 | |||
373 | ret = i2c_dw_wait_bus_not_busy(dev); | ||
374 | if (ret < 0) | ||
375 | goto done; | ||
376 | |||
377 | /* start the transfers */ | ||
378 | i2c_dw_xfer_msg(adap); | ||
379 | |||
380 | /* wait for tx to complete */ | ||
381 | ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, HZ); | ||
382 | if (ret == 0) { | ||
383 | dev_err(dev->dev, "controller timed out\n"); | ||
384 | i2c_dw_init(dev); | ||
385 | ret = -ETIMEDOUT; | ||
386 | goto done; | ||
387 | } else if (ret < 0) | ||
388 | goto done; | ||
389 | |||
390 | if (dev->msg_err) { | ||
391 | ret = dev->msg_err; | ||
392 | goto done; | ||
393 | } | ||
394 | |||
395 | /* no error */ | ||
396 | if (likely(!dev->cmd_err)) { | ||
397 | /* read rx fifo, and disable the adapter */ | ||
398 | do { | ||
399 | i2c_dw_read(adap); | ||
400 | } while (dev->status & STATUS_READ_IN_PROGRESS); | ||
401 | writeb(0, dev->base + DW_IC_ENABLE); | ||
402 | ret = num; | ||
403 | goto done; | ||
404 | } | ||
405 | |||
406 | /* We have an error */ | ||
407 | if (dev->cmd_err == DW_IC_ERR_TX_ABRT) { | ||
408 | unsigned long abort_source = dev->abort_source; | ||
409 | int i; | ||
410 | |||
411 | for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) { | ||
412 | dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); | ||
413 | } | ||
414 | } | ||
415 | ret = -EIO; | ||
416 | |||
417 | done: | ||
418 | mutex_unlock(&dev->lock); | ||
419 | |||
420 | return ret; | ||
421 | } | ||
422 | |||
423 | static u32 i2c_dw_func(struct i2c_adapter *adap) | ||
424 | { | ||
425 | return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR; | ||
426 | } | ||
427 | |||
428 | static void dw_i2c_pump_msg(unsigned long data) | ||
429 | { | ||
430 | struct dw_i2c_dev *dev = (struct dw_i2c_dev *) data; | ||
431 | u16 intr_mask; | ||
432 | |||
433 | i2c_dw_read(&dev->adapter); | ||
434 | i2c_dw_xfer_msg(&dev->adapter); | ||
435 | |||
436 | intr_mask = DW_IC_INTR_STOP_DET | DW_IC_INTR_TX_ABRT; | ||
437 | if (dev->status & STATUS_WRITE_IN_PROGRESS) | ||
438 | intr_mask |= DW_IC_INTR_TX_EMPTY; | ||
439 | writew(intr_mask, dev->base + DW_IC_INTR_MASK); | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * Interrupt service routine. This gets called whenever an I2C interrupt | ||
444 | * occurs. | ||
445 | */ | ||
446 | static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) | ||
447 | { | ||
448 | struct dw_i2c_dev *dev = dev_id; | ||
449 | u16 stat; | ||
450 | |||
451 | stat = readw(dev->base + DW_IC_INTR_STAT); | ||
452 | dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat); | ||
453 | if (stat & DW_IC_INTR_TX_ABRT) { | ||
454 | dev->abort_source = readw(dev->base + DW_IC_TX_ABRT_SOURCE); | ||
455 | dev->cmd_err |= DW_IC_ERR_TX_ABRT; | ||
456 | dev->status = STATUS_IDLE; | ||
457 | } else if (stat & DW_IC_INTR_TX_EMPTY) | ||
458 | tasklet_schedule(&dev->pump_msg); | ||
459 | |||
460 | readb(dev->base + DW_IC_CLR_INTR); /* clear interrupts */ | ||
461 | writew(0, dev->base + DW_IC_INTR_MASK); /* disable interrupts */ | ||
462 | if (stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) | ||
463 | complete(&dev->cmd_complete); | ||
464 | |||
465 | return IRQ_HANDLED; | ||
466 | } | ||
467 | |||
468 | static struct i2c_algorithm i2c_dw_algo = { | ||
469 | .master_xfer = i2c_dw_xfer, | ||
470 | .functionality = i2c_dw_func, | ||
471 | }; | ||
472 | |||
473 | static int __devinit dw_i2c_probe(struct platform_device *pdev) | ||
474 | { | ||
475 | struct dw_i2c_dev *dev; | ||
476 | struct i2c_adapter *adap; | ||
477 | struct resource *mem, *irq, *ioarea; | ||
478 | int r; | ||
479 | |||
480 | /* NOTE: driver uses the static register mapping */ | ||
481 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
482 | if (!mem) { | ||
483 | dev_err(&pdev->dev, "no mem resource?\n"); | ||
484 | return -EINVAL; | ||
485 | } | ||
486 | |||
487 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
488 | if (!irq) { | ||
489 | dev_err(&pdev->dev, "no irq resource?\n"); | ||
490 | return -EINVAL; | ||
491 | } | ||
492 | |||
493 | ioarea = request_mem_region(mem->start, resource_size(mem), | ||
494 | pdev->name); | ||
495 | if (!ioarea) { | ||
496 | dev_err(&pdev->dev, "I2C region already claimed\n"); | ||
497 | return -EBUSY; | ||
498 | } | ||
499 | |||
500 | dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL); | ||
501 | if (!dev) { | ||
502 | r = -ENOMEM; | ||
503 | goto err_release_region; | ||
504 | } | ||
505 | |||
506 | init_completion(&dev->cmd_complete); | ||
507 | tasklet_init(&dev->pump_msg, dw_i2c_pump_msg, (unsigned long) dev); | ||
508 | mutex_init(&dev->lock); | ||
509 | dev->dev = get_device(&pdev->dev); | ||
510 | dev->irq = irq->start; | ||
511 | platform_set_drvdata(pdev, dev); | ||
512 | |||
513 | dev->clk = clk_get(&pdev->dev, NULL); | ||
514 | if (IS_ERR(dev->clk)) { | ||
515 | r = -ENODEV; | ||
516 | goto err_free_mem; | ||
517 | } | ||
518 | clk_enable(dev->clk); | ||
519 | |||
520 | dev->base = ioremap(mem->start, resource_size(mem)); | ||
521 | if (dev->base == NULL) { | ||
522 | dev_err(&pdev->dev, "failure mapping io resources\n"); | ||
523 | r = -EBUSY; | ||
524 | goto err_unuse_clocks; | ||
525 | } | ||
526 | { | ||
527 | u32 param1 = readl(dev->base + DW_IC_COMP_PARAM_1); | ||
528 | |||
529 | dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1; | ||
530 | dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1; | ||
531 | } | ||
532 | i2c_dw_init(dev); | ||
533 | |||
534 | writew(0, dev->base + DW_IC_INTR_MASK); /* disable IRQ */ | ||
535 | r = request_irq(dev->irq, i2c_dw_isr, 0, pdev->name, dev); | ||
536 | if (r) { | ||
537 | dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq); | ||
538 | goto err_iounmap; | ||
539 | } | ||
540 | |||
541 | adap = &dev->adapter; | ||
542 | i2c_set_adapdata(adap, dev); | ||
543 | adap->owner = THIS_MODULE; | ||
544 | adap->class = I2C_CLASS_HWMON; | ||
545 | strlcpy(adap->name, "Synopsys DesignWare I2C adapter", | ||
546 | sizeof(adap->name)); | ||
547 | adap->algo = &i2c_dw_algo; | ||
548 | adap->dev.parent = &pdev->dev; | ||
549 | |||
550 | adap->nr = pdev->id; | ||
551 | r = i2c_add_numbered_adapter(adap); | ||
552 | if (r) { | ||
553 | dev_err(&pdev->dev, "failure adding adapter\n"); | ||
554 | goto err_free_irq; | ||
555 | } | ||
556 | |||
557 | return 0; | ||
558 | |||
559 | err_free_irq: | ||
560 | free_irq(dev->irq, dev); | ||
561 | err_iounmap: | ||
562 | iounmap(dev->base); | ||
563 | err_unuse_clocks: | ||
564 | clk_disable(dev->clk); | ||
565 | clk_put(dev->clk); | ||
566 | dev->clk = NULL; | ||
567 | err_free_mem: | ||
568 | platform_set_drvdata(pdev, NULL); | ||
569 | put_device(&pdev->dev); | ||
570 | kfree(dev); | ||
571 | err_release_region: | ||
572 | release_mem_region(mem->start, resource_size(mem)); | ||
573 | |||
574 | return r; | ||
575 | } | ||
576 | |||
577 | static int __devexit dw_i2c_remove(struct platform_device *pdev) | ||
578 | { | ||
579 | struct dw_i2c_dev *dev = platform_get_drvdata(pdev); | ||
580 | struct resource *mem; | ||
581 | |||
582 | platform_set_drvdata(pdev, NULL); | ||
583 | i2c_del_adapter(&dev->adapter); | ||
584 | put_device(&pdev->dev); | ||
585 | |||
586 | clk_disable(dev->clk); | ||
587 | clk_put(dev->clk); | ||
588 | dev->clk = NULL; | ||
589 | |||
590 | writeb(0, dev->base + DW_IC_ENABLE); | ||
591 | free_irq(dev->irq, dev); | ||
592 | kfree(dev); | ||
593 | |||
594 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
595 | release_mem_region(mem->start, resource_size(mem)); | ||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | /* work with hotplug and coldplug */ | ||
600 | MODULE_ALIAS("platform:i2c_designware"); | ||
601 | |||
602 | static struct platform_driver dw_i2c_driver = { | ||
603 | .remove = __devexit_p(dw_i2c_remove), | ||
604 | .driver = { | ||
605 | .name = "i2c_designware", | ||
606 | .owner = THIS_MODULE, | ||
607 | }, | ||
608 | }; | ||
609 | |||
610 | static int __init dw_i2c_init_driver(void) | ||
611 | { | ||
612 | return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe); | ||
613 | } | ||
614 | module_init(dw_i2c_init_driver); | ||
615 | |||
616 | static void __exit dw_i2c_exit_driver(void) | ||
617 | { | ||
618 | platform_driver_unregister(&dw_i2c_driver); | ||
619 | } | ||
620 | module_exit(dw_i2c_exit_driver); | ||
621 | |||
622 | MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>"); | ||
623 | MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter"); | ||
624 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c index 03c86209446f..680e5975217f 100644 --- a/drivers/ide/cmd64x.c +++ b/drivers/ide/cmd64x.c | |||
@@ -389,8 +389,7 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = { | |||
389 | .init_chipset = init_chipset_cmd64x, | 389 | .init_chipset = init_chipset_cmd64x, |
390 | .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, | 390 | .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, |
391 | .port_ops = &cmd648_port_ops, | 391 | .port_ops = &cmd648_port_ops, |
392 | .host_flags = IDE_HFLAG_SERIALIZE | | 392 | .host_flags = IDE_HFLAG_ABUSE_PREFETCH, |
393 | IDE_HFLAG_ABUSE_PREFETCH, | ||
394 | .pio_mask = ATA_PIO5, | 393 | .pio_mask = ATA_PIO5, |
395 | .mwdma_mask = ATA_MWDMA2, | 394 | .mwdma_mask = ATA_MWDMA2, |
396 | .udma_mask = ATA_UDMA2, | 395 | .udma_mask = ATA_UDMA2, |
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c index bd066bb9d611..09f98ed0731f 100644 --- a/drivers/ide/cs5520.c +++ b/drivers/ide/cs5520.c | |||
@@ -135,6 +135,7 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic | |||
135 | 135 | ||
136 | ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); | 136 | ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); |
137 | hw[0].irq = 14; | 137 | hw[0].irq = 14; |
138 | hw[1].irq = 15; | ||
138 | 139 | ||
139 | return ide_host_add(d, hws, 2, NULL); | 140 | return ide_host_add(d, hws, 2, NULL); |
140 | } | 141 | } |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 4a19686fcfe9..f0ede5953af8 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -876,9 +876,12 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, | |||
876 | return stat; | 876 | return stat; |
877 | 877 | ||
878 | /* | 878 | /* |
879 | * Sanity check the given block size | 879 | * Sanity check the given block size, in so far as making |
880 | * sure the sectors_per_frame we give to the caller won't | ||
881 | * end up being bogus. | ||
880 | */ | 882 | */ |
881 | blocklen = be32_to_cpu(capbuf.blocklen); | 883 | blocklen = be32_to_cpu(capbuf.blocklen); |
884 | blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS; | ||
882 | switch (blocklen) { | 885 | switch (blocklen) { |
883 | case 512: | 886 | case 512: |
884 | case 1024: | 887 | case 1024: |
@@ -886,10 +889,9 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, | |||
886 | case 4096: | 889 | case 4096: |
887 | break; | 890 | break; |
888 | default: | 891 | default: |
889 | printk(KERN_ERR PFX "%s: weird block size %u\n", | 892 | printk_once(KERN_ERR PFX "%s: weird block size %u; " |
893 | "setting default block size to 2048\n", | ||
890 | drive->name, blocklen); | 894 | drive->name, blocklen); |
891 | printk(KERN_ERR PFX "%s: default to 2kb block size\n", | ||
892 | drive->name); | ||
893 | blocklen = 2048; | 895 | blocklen = 2048; |
894 | break; | 896 | break; |
895 | } | 897 | } |
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 219e6fb78dc6..ee58c88dee5a 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -361,9 +361,6 @@ static int ide_tune_dma(ide_drive_t *drive) | |||
361 | if (__ide_dma_bad_drive(drive)) | 361 | if (__ide_dma_bad_drive(drive)) |
362 | return 0; | 362 | return 0; |
363 | 363 | ||
364 | if (ide_id_dma_bug(drive)) | ||
365 | return 0; | ||
366 | |||
367 | if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) | 364 | if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) |
368 | return config_drive_for_dma(drive); | 365 | return config_drive_for_dma(drive); |
369 | 366 | ||
@@ -394,24 +391,6 @@ static int ide_dma_check(ide_drive_t *drive) | |||
394 | return -1; | 391 | return -1; |
395 | } | 392 | } |
396 | 393 | ||
397 | int ide_id_dma_bug(ide_drive_t *drive) | ||
398 | { | ||
399 | u16 *id = drive->id; | ||
400 | |||
401 | if (id[ATA_ID_FIELD_VALID] & 4) { | ||
402 | if ((id[ATA_ID_UDMA_MODES] >> 8) && | ||
403 | (id[ATA_ID_MWDMA_MODES] >> 8)) | ||
404 | goto err_out; | ||
405 | } else if ((id[ATA_ID_MWDMA_MODES] >> 8) && | ||
406 | (id[ATA_ID_SWDMA_MODES] >> 8)) | ||
407 | goto err_out; | ||
408 | |||
409 | return 0; | ||
410 | err_out: | ||
411 | printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name); | ||
412 | return 1; | ||
413 | } | ||
414 | |||
415 | int ide_set_dma(ide_drive_t *drive) | 394 | int ide_set_dma(ide_drive_t *drive) |
416 | { | 395 | { |
417 | int rc; | 396 | int rc; |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 1059f809b809..93b7886a2d6e 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -476,10 +476,14 @@ void do_ide_request(struct request_queue *q) | |||
476 | 476 | ||
477 | if (!ide_lock_port(hwif)) { | 477 | if (!ide_lock_port(hwif)) { |
478 | ide_hwif_t *prev_port; | 478 | ide_hwif_t *prev_port; |
479 | |||
480 | WARN_ON_ONCE(hwif->rq); | ||
481 | repeat: | 479 | repeat: |
482 | prev_port = hwif->host->cur_port; | 480 | prev_port = hwif->host->cur_port; |
481 | |||
482 | if (drive->dev_flags & IDE_DFLAG_BLOCKED) | ||
483 | rq = hwif->rq; | ||
484 | else | ||
485 | WARN_ON_ONCE(hwif->rq); | ||
486 | |||
483 | if (drive->dev_flags & IDE_DFLAG_SLEEPING && | 487 | if (drive->dev_flags & IDE_DFLAG_SLEEPING && |
484 | time_after(drive->sleep, jiffies)) { | 488 | time_after(drive->sleep, jiffies)) { |
485 | ide_unlock_port(hwif); | 489 | ide_unlock_port(hwif); |
@@ -506,43 +510,29 @@ repeat: | |||
506 | hwif->cur_dev = drive; | 510 | hwif->cur_dev = drive; |
507 | drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); | 511 | drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); |
508 | 512 | ||
509 | spin_unlock_irq(&hwif->lock); | 513 | if (rq == NULL) { |
510 | spin_lock_irq(q->queue_lock); | 514 | spin_unlock_irq(&hwif->lock); |
511 | /* | 515 | spin_lock_irq(q->queue_lock); |
512 | * we know that the queue isn't empty, but this can happen | 516 | /* |
513 | * if the q->prep_rq_fn() decides to kill a request | 517 | * we know that the queue isn't empty, but this can |
514 | */ | 518 | * happen if ->prep_rq_fn() decides to kill a request |
515 | if (!rq) | 519 | */ |
516 | rq = blk_fetch_request(drive->queue); | 520 | rq = blk_fetch_request(drive->queue); |
521 | spin_unlock_irq(q->queue_lock); | ||
522 | spin_lock_irq(&hwif->lock); | ||
517 | 523 | ||
518 | spin_unlock_irq(q->queue_lock); | 524 | if (rq == NULL) { |
519 | spin_lock_irq(&hwif->lock); | 525 | ide_unlock_port(hwif); |
520 | 526 | goto out; | |
521 | if (!rq) { | 527 | } |
522 | ide_unlock_port(hwif); | ||
523 | goto out; | ||
524 | } | 528 | } |
525 | 529 | ||
526 | /* | 530 | /* |
527 | * Sanity: don't accept a request that isn't a PM request | 531 | * Sanity: don't accept a request that isn't a PM request |
528 | * if we are currently power managed. This is very important as | 532 | * if we are currently power managed. |
529 | * blk_stop_queue() doesn't prevent the blk_fetch_request() | ||
530 | * above to return us whatever is in the queue. Since we call | ||
531 | * ide_do_request() ourselves, we end up taking requests while | ||
532 | * the queue is blocked... | ||
533 | * | ||
534 | * We let requests forced at head of queue with ide-preempt | ||
535 | * though. I hope that doesn't happen too much, hopefully not | ||
536 | * unless the subdriver triggers such a thing in its own PM | ||
537 | * state machine. | ||
538 | */ | 533 | */ |
539 | if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && | 534 | BUG_ON((drive->dev_flags & IDE_DFLAG_BLOCKED) && |
540 | blk_pm_request(rq) == 0 && | 535 | blk_pm_request(rq) == 0); |
541 | (rq->cmd_flags & REQ_PREEMPT) == 0) { | ||
542 | /* there should be no pending command at this point */ | ||
543 | ide_unlock_port(hwif); | ||
544 | goto plug_device; | ||
545 | } | ||
546 | 536 | ||
547 | hwif->rq = rq; | 537 | hwif->rq = rq; |
548 | 538 | ||
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index fa047150a1c6..2892b242bbe1 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c | |||
@@ -210,6 +210,7 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list); | |||
210 | */ | 210 | */ |
211 | static const struct drive_list_entry ivb_list[] = { | 211 | static const struct drive_list_entry ivb_list[] = { |
212 | { "QUANTUM FIREBALLlct10 05" , "A03.0900" }, | 212 | { "QUANTUM FIREBALLlct10 05" , "A03.0900" }, |
213 | { "QUANTUM FIREBALLlct20 30" , "APL.0900" }, | ||
213 | { "TSSTcorp CDDVDW SH-S202J" , "SB00" }, | 214 | { "TSSTcorp CDDVDW SH-S202J" , "SB00" }, |
214 | { "TSSTcorp CDDVDW SH-S202J" , "SB01" }, | 215 | { "TSSTcorp CDDVDW SH-S202J" , "SB01" }, |
215 | { "TSSTcorp CDDVDW SH-S202N" , "SB00" }, | 216 | { "TSSTcorp CDDVDW SH-S202N" , "SB00" }, |
@@ -329,9 +330,6 @@ int ide_driveid_update(ide_drive_t *drive) | |||
329 | 330 | ||
330 | kfree(id); | 331 | kfree(id); |
331 | 332 | ||
332 | if ((drive->dev_flags & IDE_DFLAG_USING_DMA) && ide_id_dma_bug(drive)) | ||
333 | ide_dma_off(drive); | ||
334 | |||
335 | return 1; | 333 | return 1; |
336 | out_err: | 334 | out_err: |
337 | if (rc == 2) | 335 | if (rc == 2) |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 51af4eea0d36..1bb106f6221a 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -818,6 +818,24 @@ static int ide_port_setup_devices(ide_hwif_t *hwif) | |||
818 | return j; | 818 | return j; |
819 | } | 819 | } |
820 | 820 | ||
821 | static void ide_host_enable_irqs(struct ide_host *host) | ||
822 | { | ||
823 | ide_hwif_t *hwif; | ||
824 | int i; | ||
825 | |||
826 | ide_host_for_each_port(i, hwif, host) { | ||
827 | if (hwif == NULL) | ||
828 | continue; | ||
829 | |||
830 | /* clear any pending IRQs */ | ||
831 | hwif->tp_ops->read_status(hwif); | ||
832 | |||
833 | /* unmask IRQs */ | ||
834 | if (hwif->io_ports.ctl_addr) | ||
835 | hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); | ||
836 | } | ||
837 | } | ||
838 | |||
821 | /* | 839 | /* |
822 | * This routine sets up the IRQ for an IDE interface. | 840 | * This routine sets up the IRQ for an IDE interface. |
823 | */ | 841 | */ |
@@ -831,9 +849,6 @@ static int init_irq (ide_hwif_t *hwif) | |||
831 | if (irq_handler == NULL) | 849 | if (irq_handler == NULL) |
832 | irq_handler = ide_intr; | 850 | irq_handler = ide_intr; |
833 | 851 | ||
834 | if (io_ports->ctl_addr) | ||
835 | hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); | ||
836 | |||
837 | if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) | 852 | if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) |
838 | goto out_up; | 853 | goto out_up; |
839 | 854 | ||
@@ -1404,6 +1419,8 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d, | |||
1404 | ide_port_tune_devices(hwif); | 1419 | ide_port_tune_devices(hwif); |
1405 | } | 1420 | } |
1406 | 1421 | ||
1422 | ide_host_enable_irqs(host); | ||
1423 | |||
1407 | ide_host_for_each_port(i, hwif, host) { | 1424 | ide_host_for_each_port(i, hwif, host) { |
1408 | if (hwif == NULL) | 1425 | if (hwif == NULL) |
1409 | continue; | 1426 | continue; |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index ce511d8748ce..5be1bd4fc7ed 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -514,7 +514,7 @@ static struct notifier_block nb = { | |||
514 | .notifier_call = netevent_callback | 514 | .notifier_call = netevent_callback |
515 | }; | 515 | }; |
516 | 516 | ||
517 | static int addr_init(void) | 517 | static int __init addr_init(void) |
518 | { | 518 | { |
519 | addr_wq = create_singlethread_workqueue("ib_addr"); | 519 | addr_wq = create_singlethread_workqueue("ib_addr"); |
520 | if (!addr_wq) | 520 | if (!addr_wq) |
@@ -524,7 +524,7 @@ static int addr_init(void) | |||
524 | return 0; | 524 | return 0; |
525 | } | 525 | } |
526 | 526 | ||
527 | static void addr_cleanup(void) | 527 | static void __exit addr_cleanup(void) |
528 | { | 528 | { |
529 | unregister_netevent_notifier(&nb); | 529 | unregister_netevent_notifier(&nb); |
530 | destroy_workqueue(addr_wq); | 530 | destroy_workqueue(addr_wq); |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 851de83ff455..075317884b53 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -2960,7 +2960,7 @@ static void cma_remove_one(struct ib_device *device) | |||
2960 | kfree(cma_dev); | 2960 | kfree(cma_dev); |
2961 | } | 2961 | } |
2962 | 2962 | ||
2963 | static int cma_init(void) | 2963 | static int __init cma_init(void) |
2964 | { | 2964 | { |
2965 | int ret, low, high, remaining; | 2965 | int ret, low, high, remaining; |
2966 | 2966 | ||
@@ -2990,7 +2990,7 @@ err: | |||
2990 | return ret; | 2990 | return ret; |
2991 | } | 2991 | } |
2992 | 2992 | ||
2993 | static void cma_cleanup(void) | 2993 | static void __exit cma_cleanup(void) |
2994 | { | 2994 | { |
2995 | ib_unregister_client(&cma_client); | 2995 | ib_unregister_client(&cma_client); |
2996 | unregister_netdevice_notifier(&cma_nb); | 2996 | unregister_netdevice_notifier(&cma_nb); |
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index 9209c5332dfe..8b92f85d4dd0 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c | |||
@@ -319,7 +319,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port, | |||
319 | ib_device); | 319 | ib_device); |
320 | struct hipz_query_port *rblock; | 320 | struct hipz_query_port *rblock; |
321 | 321 | ||
322 | if (index > 255) { | 322 | if (index < 0 || index > 255) { |
323 | ehca_err(&shca->ib_device, "Invalid index: %x.", index); | 323 | ehca_err(&shca->ib_device, "Invalid index: %x.", index); |
324 | return -EINVAL; | 324 | return -EINVAL; |
325 | } | 325 | } |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index ce4e6eff4792..fab18a2c74a8 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -52,7 +52,7 @@ | |||
52 | #include "ehca_tools.h" | 52 | #include "ehca_tools.h" |
53 | #include "hcp_if.h" | 53 | #include "hcp_if.h" |
54 | 54 | ||
55 | #define HCAD_VERSION "0027" | 55 | #define HCAD_VERSION "0028" |
56 | 56 | ||
57 | MODULE_LICENSE("Dual BSD/GPL"); | 57 | MODULE_LICENSE("Dual BSD/GPL"); |
58 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | 58 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); |
@@ -506,6 +506,7 @@ static int ehca_init_device(struct ehca_shca *shca) | |||
506 | shca->ib_device.detach_mcast = ehca_detach_mcast; | 506 | shca->ib_device.detach_mcast = ehca_detach_mcast; |
507 | shca->ib_device.process_mad = ehca_process_mad; | 507 | shca->ib_device.process_mad = ehca_process_mad; |
508 | shca->ib_device.mmap = ehca_mmap; | 508 | shca->ib_device.mmap = ehca_mmap; |
509 | shca->ib_device.dma_ops = &ehca_dma_mapping_ops; | ||
509 | 510 | ||
510 | if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { | 511 | if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { |
511 | shca->ib_device.uverbs_cmd_mask |= | 512 | shca->ib_device.uverbs_cmd_mask |= |
@@ -1028,17 +1029,23 @@ static int __init ehca_module_init(void) | |||
1028 | goto module_init1; | 1029 | goto module_init1; |
1029 | } | 1030 | } |
1030 | 1031 | ||
1032 | ret = ehca_create_busmap(); | ||
1033 | if (ret) { | ||
1034 | ehca_gen_err("Cannot create busmap."); | ||
1035 | goto module_init2; | ||
1036 | } | ||
1037 | |||
1031 | ret = ibmebus_register_driver(&ehca_driver); | 1038 | ret = ibmebus_register_driver(&ehca_driver); |
1032 | if (ret) { | 1039 | if (ret) { |
1033 | ehca_gen_err("Cannot register eHCA device driver"); | 1040 | ehca_gen_err("Cannot register eHCA device driver"); |
1034 | ret = -EINVAL; | 1041 | ret = -EINVAL; |
1035 | goto module_init2; | 1042 | goto module_init3; |
1036 | } | 1043 | } |
1037 | 1044 | ||
1038 | ret = register_memory_notifier(&ehca_mem_nb); | 1045 | ret = register_memory_notifier(&ehca_mem_nb); |
1039 | if (ret) { | 1046 | if (ret) { |
1040 | ehca_gen_err("Failed registering memory add/remove notifier"); | 1047 | ehca_gen_err("Failed registering memory add/remove notifier"); |
1041 | goto module_init3; | 1048 | goto module_init4; |
1042 | } | 1049 | } |
1043 | 1050 | ||
1044 | if (ehca_poll_all_eqs != 1) { | 1051 | if (ehca_poll_all_eqs != 1) { |
@@ -1053,9 +1060,12 @@ static int __init ehca_module_init(void) | |||
1053 | 1060 | ||
1054 | return 0; | 1061 | return 0; |
1055 | 1062 | ||
1056 | module_init3: | 1063 | module_init4: |
1057 | ibmebus_unregister_driver(&ehca_driver); | 1064 | ibmebus_unregister_driver(&ehca_driver); |
1058 | 1065 | ||
1066 | module_init3: | ||
1067 | ehca_destroy_busmap(); | ||
1068 | |||
1059 | module_init2: | 1069 | module_init2: |
1060 | ehca_destroy_slab_caches(); | 1070 | ehca_destroy_slab_caches(); |
1061 | 1071 | ||
@@ -1073,6 +1083,8 @@ static void __exit ehca_module_exit(void) | |||
1073 | 1083 | ||
1074 | unregister_memory_notifier(&ehca_mem_nb); | 1084 | unregister_memory_notifier(&ehca_mem_nb); |
1075 | 1085 | ||
1086 | ehca_destroy_busmap(); | ||
1087 | |||
1076 | ehca_destroy_slab_caches(); | 1088 | ehca_destroy_slab_caches(); |
1077 | 1089 | ||
1078 | ehca_destroy_comp_pool(); | 1090 | ehca_destroy_comp_pool(); |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 72f83f7df614..7663a2a9f130 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
@@ -53,6 +53,38 @@ | |||
53 | /* max number of rpages (per hcall register_rpages) */ | 53 | /* max number of rpages (per hcall register_rpages) */ |
54 | #define MAX_RPAGES 512 | 54 | #define MAX_RPAGES 512 |
55 | 55 | ||
56 | /* DMEM toleration management */ | ||
57 | #define EHCA_SECTSHIFT SECTION_SIZE_BITS | ||
58 | #define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT) | ||
59 | #define EHCA_HUGEPAGESHIFT 34 | ||
60 | #define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT) | ||
61 | #define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT) | ||
62 | #define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL | ||
63 | #define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */ | ||
64 | #define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2) | ||
65 | #define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT) | ||
66 | #define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */ | ||
67 | #define EHCA_DIR_MAP_SIZE (0x10000) | ||
68 | #define EHCA_ENT_MAP_SIZE (0x10000) | ||
69 | #define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1) | ||
70 | |||
71 | static unsigned long ehca_mr_len; | ||
72 | |||
73 | /* | ||
74 | * Memory map data structures | ||
75 | */ | ||
76 | struct ehca_dir_bmap { | ||
77 | u64 ent[EHCA_MAP_ENTRIES]; | ||
78 | }; | ||
79 | struct ehca_top_bmap { | ||
80 | struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES]; | ||
81 | }; | ||
82 | struct ehca_bmap { | ||
83 | struct ehca_top_bmap *top[EHCA_MAP_ENTRIES]; | ||
84 | }; | ||
85 | |||
86 | static struct ehca_bmap *ehca_bmap; | ||
87 | |||
56 | static struct kmem_cache *mr_cache; | 88 | static struct kmem_cache *mr_cache; |
57 | static struct kmem_cache *mw_cache; | 89 | static struct kmem_cache *mw_cache; |
58 | 90 | ||
@@ -68,6 +100,8 @@ enum ehca_mr_pgsize { | |||
68 | #define EHCA_MR_PGSHIFT1M 20 | 100 | #define EHCA_MR_PGSHIFT1M 20 |
69 | #define EHCA_MR_PGSHIFT16M 24 | 101 | #define EHCA_MR_PGSHIFT16M 24 |
70 | 102 | ||
103 | static u64 ehca_map_vaddr(void *caddr); | ||
104 | |||
71 | static u32 ehca_encode_hwpage_size(u32 pgsize) | 105 | static u32 ehca_encode_hwpage_size(u32 pgsize) |
72 | { | 106 | { |
73 | int log = ilog2(pgsize); | 107 | int log = ilog2(pgsize); |
@@ -135,7 +169,8 @@ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags) | |||
135 | goto get_dma_mr_exit0; | 169 | goto get_dma_mr_exit0; |
136 | } | 170 | } |
137 | 171 | ||
138 | ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE, | 172 | ret = ehca_reg_maxmr(shca, e_maxmr, |
173 | (void *)ehca_map_vaddr((void *)KERNELBASE), | ||
139 | mr_access_flags, e_pd, | 174 | mr_access_flags, e_pd, |
140 | &e_maxmr->ib.ib_mr.lkey, | 175 | &e_maxmr->ib.ib_mr.lkey, |
141 | &e_maxmr->ib.ib_mr.rkey); | 176 | &e_maxmr->ib.ib_mr.rkey); |
@@ -251,7 +286,7 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, | |||
251 | 286 | ||
252 | ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, | 287 | ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, |
253 | e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, | 288 | e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, |
254 | &e_mr->ib.ib_mr.rkey); | 289 | &e_mr->ib.ib_mr.rkey, EHCA_REG_MR); |
255 | if (ret) { | 290 | if (ret) { |
256 | ib_mr = ERR_PTR(ret); | 291 | ib_mr = ERR_PTR(ret); |
257 | goto reg_phys_mr_exit1; | 292 | goto reg_phys_mr_exit1; |
@@ -370,7 +405,7 @@ reg_user_mr_fallback: | |||
370 | 405 | ||
371 | ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, | 406 | ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, |
372 | e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, | 407 | e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, |
373 | &e_mr->ib.ib_mr.rkey); | 408 | &e_mr->ib.ib_mr.rkey, EHCA_REG_MR); |
374 | if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { | 409 | if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { |
375 | ehca_warn(pd->device, "failed to register mr " | 410 | ehca_warn(pd->device, "failed to register mr " |
376 | "with hwpage_size=%llx", hwpage_size); | 411 | "with hwpage_size=%llx", hwpage_size); |
@@ -794,7 +829,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, | |||
794 | ret = ehca_reg_mr(shca, e_fmr, NULL, | 829 | ret = ehca_reg_mr(shca, e_fmr, NULL, |
795 | fmr_attr->max_pages * (1 << fmr_attr->page_shift), | 830 | fmr_attr->max_pages * (1 << fmr_attr->page_shift), |
796 | mr_access_flags, e_pd, &pginfo, | 831 | mr_access_flags, e_pd, &pginfo, |
797 | &tmp_lkey, &tmp_rkey); | 832 | &tmp_lkey, &tmp_rkey, EHCA_REG_MR); |
798 | if (ret) { | 833 | if (ret) { |
799 | ib_fmr = ERR_PTR(ret); | 834 | ib_fmr = ERR_PTR(ret); |
800 | goto alloc_fmr_exit1; | 835 | goto alloc_fmr_exit1; |
@@ -983,6 +1018,10 @@ free_fmr_exit0: | |||
983 | 1018 | ||
984 | /*----------------------------------------------------------------------*/ | 1019 | /*----------------------------------------------------------------------*/ |
985 | 1020 | ||
1021 | static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca, | ||
1022 | struct ehca_mr *e_mr, | ||
1023 | struct ehca_mr_pginfo *pginfo); | ||
1024 | |||
986 | int ehca_reg_mr(struct ehca_shca *shca, | 1025 | int ehca_reg_mr(struct ehca_shca *shca, |
987 | struct ehca_mr *e_mr, | 1026 | struct ehca_mr *e_mr, |
988 | u64 *iova_start, | 1027 | u64 *iova_start, |
@@ -991,7 +1030,8 @@ int ehca_reg_mr(struct ehca_shca *shca, | |||
991 | struct ehca_pd *e_pd, | 1030 | struct ehca_pd *e_pd, |
992 | struct ehca_mr_pginfo *pginfo, | 1031 | struct ehca_mr_pginfo *pginfo, |
993 | u32 *lkey, /*OUT*/ | 1032 | u32 *lkey, /*OUT*/ |
994 | u32 *rkey) /*OUT*/ | 1033 | u32 *rkey, /*OUT*/ |
1034 | enum ehca_reg_type reg_type) | ||
995 | { | 1035 | { |
996 | int ret; | 1036 | int ret; |
997 | u64 h_ret; | 1037 | u64 h_ret; |
@@ -1015,7 +1055,13 @@ int ehca_reg_mr(struct ehca_shca *shca, | |||
1015 | 1055 | ||
1016 | e_mr->ipz_mr_handle = hipzout.handle; | 1056 | e_mr->ipz_mr_handle = hipzout.handle; |
1017 | 1057 | ||
1018 | ret = ehca_reg_mr_rpages(shca, e_mr, pginfo); | 1058 | if (reg_type == EHCA_REG_BUSMAP_MR) |
1059 | ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo); | ||
1060 | else if (reg_type == EHCA_REG_MR) | ||
1061 | ret = ehca_reg_mr_rpages(shca, e_mr, pginfo); | ||
1062 | else | ||
1063 | ret = -EINVAL; | ||
1064 | |||
1019 | if (ret) | 1065 | if (ret) |
1020 | goto ehca_reg_mr_exit1; | 1066 | goto ehca_reg_mr_exit1; |
1021 | 1067 | ||
@@ -1316,7 +1362,7 @@ int ehca_rereg_mr(struct ehca_shca *shca, | |||
1316 | e_mr->fmr_map_cnt = save_mr.fmr_map_cnt; | 1362 | e_mr->fmr_map_cnt = save_mr.fmr_map_cnt; |
1317 | 1363 | ||
1318 | ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl, | 1364 | ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl, |
1319 | e_pd, pginfo, lkey, rkey); | 1365 | e_pd, pginfo, lkey, rkey, EHCA_REG_MR); |
1320 | if (ret) { | 1366 | if (ret) { |
1321 | u32 offset = (u64)(&e_mr->flags) - (u64)e_mr; | 1367 | u32 offset = (u64)(&e_mr->flags) - (u64)e_mr; |
1322 | memcpy(&e_mr->flags, &(save_mr.flags), | 1368 | memcpy(&e_mr->flags, &(save_mr.flags), |
@@ -1409,7 +1455,7 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca, | |||
1409 | ret = ehca_reg_mr(shca, e_fmr, NULL, | 1455 | ret = ehca_reg_mr(shca, e_fmr, NULL, |
1410 | (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), | 1456 | (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), |
1411 | e_fmr->acl, e_pd, &pginfo, &tmp_lkey, | 1457 | e_fmr->acl, e_pd, &pginfo, &tmp_lkey, |
1412 | &tmp_rkey); | 1458 | &tmp_rkey, EHCA_REG_MR); |
1413 | if (ret) { | 1459 | if (ret) { |
1414 | u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; | 1460 | u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; |
1415 | memcpy(&e_fmr->flags, &(save_mr.flags), | 1461 | memcpy(&e_fmr->flags, &(save_mr.flags), |
@@ -1478,6 +1524,90 @@ ehca_reg_smr_exit0: | |||
1478 | } /* end ehca_reg_smr() */ | 1524 | } /* end ehca_reg_smr() */ |
1479 | 1525 | ||
1480 | /*----------------------------------------------------------------------*/ | 1526 | /*----------------------------------------------------------------------*/ |
1527 | static inline void *ehca_calc_sectbase(int top, int dir, int idx) | ||
1528 | { | ||
1529 | unsigned long ret = idx; | ||
1530 | ret |= dir << EHCA_DIR_INDEX_SHIFT; | ||
1531 | ret |= top << EHCA_TOP_INDEX_SHIFT; | ||
1532 | return abs_to_virt(ret << SECTION_SIZE_BITS); | ||
1533 | } | ||
1534 | |||
1535 | #define ehca_bmap_valid(entry) \ | ||
1536 | ((u64)entry != (u64)EHCA_INVAL_ADDR) | ||
1537 | |||
1538 | static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage, | ||
1539 | struct ehca_shca *shca, struct ehca_mr *mr, | ||
1540 | struct ehca_mr_pginfo *pginfo) | ||
1541 | { | ||
1542 | u64 h_ret = 0; | ||
1543 | unsigned long page = 0; | ||
1544 | u64 rpage = virt_to_abs(kpage); | ||
1545 | int page_count; | ||
1546 | |||
1547 | void *sectbase = ehca_calc_sectbase(top, dir, idx); | ||
1548 | if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) { | ||
1549 | ehca_err(&shca->ib_device, "reg_mr_section will probably fail:" | ||
1550 | "hwpage_size does not fit to " | ||
1551 | "section start address"); | ||
1552 | } | ||
1553 | page_count = EHCA_SECTSIZE / pginfo->hwpage_size; | ||
1554 | |||
1555 | while (page < page_count) { | ||
1556 | u64 rnum; | ||
1557 | for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count); | ||
1558 | rnum++) { | ||
1559 | void *pg = sectbase + ((page++) * pginfo->hwpage_size); | ||
1560 | kpage[rnum] = virt_to_abs(pg); | ||
1561 | } | ||
1562 | |||
1563 | h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr, | ||
1564 | ehca_encode_hwpage_size(pginfo->hwpage_size), | ||
1565 | 0, rpage, rnum); | ||
1566 | |||
1567 | if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) { | ||
1568 | ehca_err(&shca->ib_device, "register_rpage_mr failed"); | ||
1569 | return h_ret; | ||
1570 | } | ||
1571 | } | ||
1572 | return h_ret; | ||
1573 | } | ||
1574 | |||
1575 | static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage, | ||
1576 | struct ehca_shca *shca, struct ehca_mr *mr, | ||
1577 | struct ehca_mr_pginfo *pginfo) | ||
1578 | { | ||
1579 | u64 hret = H_SUCCESS; | ||
1580 | int idx; | ||
1581 | |||
1582 | for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) { | ||
1583 | if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx])) | ||
1584 | continue; | ||
1585 | |||
1586 | hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr, | ||
1587 | pginfo); | ||
1588 | if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) | ||
1589 | return hret; | ||
1590 | } | ||
1591 | return hret; | ||
1592 | } | ||
1593 | |||
1594 | static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca, | ||
1595 | struct ehca_mr *mr, | ||
1596 | struct ehca_mr_pginfo *pginfo) | ||
1597 | { | ||
1598 | u64 hret = H_SUCCESS; | ||
1599 | int dir; | ||
1600 | |||
1601 | for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) { | ||
1602 | if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir])) | ||
1603 | continue; | ||
1604 | |||
1605 | hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo); | ||
1606 | if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) | ||
1607 | return hret; | ||
1608 | } | ||
1609 | return hret; | ||
1610 | } | ||
1481 | 1611 | ||
1482 | /* register internal max-MR to internal SHCA */ | 1612 | /* register internal max-MR to internal SHCA */ |
1483 | int ehca_reg_internal_maxmr( | 1613 | int ehca_reg_internal_maxmr( |
@@ -1495,6 +1625,11 @@ int ehca_reg_internal_maxmr( | |||
1495 | u32 num_hwpages; | 1625 | u32 num_hwpages; |
1496 | u64 hw_pgsize; | 1626 | u64 hw_pgsize; |
1497 | 1627 | ||
1628 | if (!ehca_bmap) { | ||
1629 | ret = -EFAULT; | ||
1630 | goto ehca_reg_internal_maxmr_exit0; | ||
1631 | } | ||
1632 | |||
1498 | e_mr = ehca_mr_new(); | 1633 | e_mr = ehca_mr_new(); |
1499 | if (!e_mr) { | 1634 | if (!e_mr) { |
1500 | ehca_err(&shca->ib_device, "out of memory"); | 1635 | ehca_err(&shca->ib_device, "out of memory"); |
@@ -1504,8 +1639,8 @@ int ehca_reg_internal_maxmr( | |||
1504 | e_mr->flags |= EHCA_MR_FLAG_MAXMR; | 1639 | e_mr->flags |= EHCA_MR_FLAG_MAXMR; |
1505 | 1640 | ||
1506 | /* register internal max-MR on HCA */ | 1641 | /* register internal max-MR on HCA */ |
1507 | size_maxmr = (u64)high_memory - PAGE_OFFSET; | 1642 | size_maxmr = ehca_mr_len; |
1508 | iova_start = (u64 *)KERNELBASE; | 1643 | iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE); |
1509 | ib_pbuf.addr = 0; | 1644 | ib_pbuf.addr = 0; |
1510 | ib_pbuf.size = size_maxmr; | 1645 | ib_pbuf.size = size_maxmr; |
1511 | num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, | 1646 | num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, |
@@ -1524,7 +1659,7 @@ int ehca_reg_internal_maxmr( | |||
1524 | 1659 | ||
1525 | ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, | 1660 | ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, |
1526 | &pginfo, &e_mr->ib.ib_mr.lkey, | 1661 | &pginfo, &e_mr->ib.ib_mr.lkey, |
1527 | &e_mr->ib.ib_mr.rkey); | 1662 | &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR); |
1528 | if (ret) { | 1663 | if (ret) { |
1529 | ehca_err(&shca->ib_device, "reg of internal max MR failed, " | 1664 | ehca_err(&shca->ib_device, "reg of internal max MR failed, " |
1530 | "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x " | 1665 | "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x " |
@@ -2077,8 +2212,8 @@ int ehca_mr_is_maxmr(u64 size, | |||
2077 | u64 *iova_start) | 2212 | u64 *iova_start) |
2078 | { | 2213 | { |
2079 | /* a MR is treated as max-MR only if it fits following: */ | 2214 | /* a MR is treated as max-MR only if it fits following: */ |
2080 | if ((size == ((u64)high_memory - PAGE_OFFSET)) && | 2215 | if ((size == ehca_mr_len) && |
2081 | (iova_start == (void *)KERNELBASE)) { | 2216 | (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) { |
2082 | ehca_gen_dbg("this is a max-MR"); | 2217 | ehca_gen_dbg("this is a max-MR"); |
2083 | return 1; | 2218 | return 1; |
2084 | } else | 2219 | } else |
@@ -2184,3 +2319,350 @@ void ehca_cleanup_mrmw_cache(void) | |||
2184 | if (mw_cache) | 2319 | if (mw_cache) |
2185 | kmem_cache_destroy(mw_cache); | 2320 | kmem_cache_destroy(mw_cache); |
2186 | } | 2321 | } |
2322 | |||
2323 | static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap, | ||
2324 | int dir) | ||
2325 | { | ||
2326 | if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) { | ||
2327 | ehca_top_bmap->dir[dir] = | ||
2328 | kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL); | ||
2329 | if (!ehca_top_bmap->dir[dir]) | ||
2330 | return -ENOMEM; | ||
2331 | /* Set map block to 0xFF according to EHCA_INVAL_ADDR */ | ||
2332 | memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE); | ||
2333 | } | ||
2334 | return 0; | ||
2335 | } | ||
2336 | |||
2337 | static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir) | ||
2338 | { | ||
2339 | if (!ehca_bmap_valid(ehca_bmap->top[top])) { | ||
2340 | ehca_bmap->top[top] = | ||
2341 | kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL); | ||
2342 | if (!ehca_bmap->top[top]) | ||
2343 | return -ENOMEM; | ||
2344 | /* Set map block to 0xFF according to EHCA_INVAL_ADDR */ | ||
2345 | memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE); | ||
2346 | } | ||
2347 | return ehca_init_top_bmap(ehca_bmap->top[top], dir); | ||
2348 | } | ||
2349 | |||
2350 | static inline int ehca_calc_index(unsigned long i, unsigned long s) | ||
2351 | { | ||
2352 | return (i >> s) & EHCA_INDEX_MASK; | ||
2353 | } | ||
2354 | |||
2355 | void ehca_destroy_busmap(void) | ||
2356 | { | ||
2357 | int top, dir; | ||
2358 | |||
2359 | if (!ehca_bmap) | ||
2360 | return; | ||
2361 | |||
2362 | for (top = 0; top < EHCA_MAP_ENTRIES; top++) { | ||
2363 | if (!ehca_bmap_valid(ehca_bmap->top[top])) | ||
2364 | continue; | ||
2365 | for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) { | ||
2366 | if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir])) | ||
2367 | continue; | ||
2368 | |||
2369 | kfree(ehca_bmap->top[top]->dir[dir]); | ||
2370 | } | ||
2371 | |||
2372 | kfree(ehca_bmap->top[top]); | ||
2373 | } | ||
2374 | |||
2375 | kfree(ehca_bmap); | ||
2376 | ehca_bmap = NULL; | ||
2377 | } | ||
2378 | |||
2379 | static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages) | ||
2380 | { | ||
2381 | unsigned long i, start_section, end_section; | ||
2382 | int top, dir, idx; | ||
2383 | |||
2384 | if (!nr_pages) | ||
2385 | return 0; | ||
2386 | |||
2387 | if (!ehca_bmap) { | ||
2388 | ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL); | ||
2389 | if (!ehca_bmap) | ||
2390 | return -ENOMEM; | ||
2391 | /* Set map block to 0xFF according to EHCA_INVAL_ADDR */ | ||
2392 | memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE); | ||
2393 | } | ||
2394 | |||
2395 | start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE; | ||
2396 | end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE; | ||
2397 | for (i = start_section; i < end_section; i++) { | ||
2398 | int ret; | ||
2399 | top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT); | ||
2400 | dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT); | ||
2401 | idx = i & EHCA_INDEX_MASK; | ||
2402 | |||
2403 | ret = ehca_init_bmap(ehca_bmap, top, dir); | ||
2404 | if (ret) { | ||
2405 | ehca_destroy_busmap(); | ||
2406 | return ret; | ||
2407 | } | ||
2408 | ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len; | ||
2409 | ehca_mr_len += EHCA_SECTSIZE; | ||
2410 | } | ||
2411 | return 0; | ||
2412 | } | ||
2413 | |||
2414 | static int ehca_is_hugepage(unsigned long pfn) | ||
2415 | { | ||
2416 | int page_order; | ||
2417 | |||
2418 | if (pfn & EHCA_HUGEPAGE_PFN_MASK) | ||
2419 | return 0; | ||
2420 | |||
2421 | page_order = compound_order(pfn_to_page(pfn)); | ||
2422 | if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT) | ||
2423 | return 0; | ||
2424 | |||
2425 | return 1; | ||
2426 | } | ||
2427 | |||
2428 | static int ehca_create_busmap_callback(unsigned long initial_pfn, | ||
2429 | unsigned long total_nr_pages, void *arg) | ||
2430 | { | ||
2431 | int ret; | ||
2432 | unsigned long pfn, start_pfn, end_pfn, nr_pages; | ||
2433 | |||
2434 | if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE) | ||
2435 | return ehca_update_busmap(initial_pfn, total_nr_pages); | ||
2436 | |||
2437 | /* Given chunk is >= 16GB -> check for hugepages */ | ||
2438 | start_pfn = initial_pfn; | ||
2439 | end_pfn = initial_pfn + total_nr_pages; | ||
2440 | pfn = start_pfn; | ||
2441 | |||
2442 | while (pfn < end_pfn) { | ||
2443 | if (ehca_is_hugepage(pfn)) { | ||
2444 | /* Add mem found in front of the hugepage */ | ||
2445 | nr_pages = pfn - start_pfn; | ||
2446 | ret = ehca_update_busmap(start_pfn, nr_pages); | ||
2447 | if (ret) | ||
2448 | return ret; | ||
2449 | /* Skip the hugepage */ | ||
2450 | pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE); | ||
2451 | start_pfn = pfn; | ||
2452 | } else | ||
2453 | pfn += (EHCA_SECTSIZE / PAGE_SIZE); | ||
2454 | } | ||
2455 | |||
2456 | /* Add mem found behind the hugepage(s) */ | ||
2457 | nr_pages = pfn - start_pfn; | ||
2458 | return ehca_update_busmap(start_pfn, nr_pages); | ||
2459 | } | ||
2460 | |||
2461 | int ehca_create_busmap(void) | ||
2462 | { | ||
2463 | int ret; | ||
2464 | |||
2465 | ehca_mr_len = 0; | ||
2466 | ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL, | ||
2467 | ehca_create_busmap_callback); | ||
2468 | return ret; | ||
2469 | } | ||
2470 | |||
2471 | static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca, | ||
2472 | struct ehca_mr *e_mr, | ||
2473 | struct ehca_mr_pginfo *pginfo) | ||
2474 | { | ||
2475 | int top; | ||
2476 | u64 hret, *kpage; | ||
2477 | |||
2478 | kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); | ||
2479 | if (!kpage) { | ||
2480 | ehca_err(&shca->ib_device, "kpage alloc failed"); | ||
2481 | return -ENOMEM; | ||
2482 | } | ||
2483 | for (top = 0; top < EHCA_MAP_ENTRIES; top++) { | ||
2484 | if (!ehca_bmap_valid(ehca_bmap->top[top])) | ||
2485 | continue; | ||
2486 | hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo); | ||
2487 | if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS)) | ||
2488 | break; | ||
2489 | } | ||
2490 | |||
2491 | ehca_free_fw_ctrlblock(kpage); | ||
2492 | |||
2493 | if (hret == H_SUCCESS) | ||
2494 | return 0; /* Everything is fine */ | ||
2495 | else { | ||
2496 | ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, " | ||
2497 | "h_ret=%lli e_mr=%p top=%x lkey=%x " | ||
2498 | "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top, | ||
2499 | e_mr->ib.ib_mr.lkey, | ||
2500 | shca->ipz_hca_handle.handle, | ||
2501 | e_mr->ipz_mr_handle.handle); | ||
2502 | return ehca2ib_return_code(hret); | ||
2503 | } | ||
2504 | } | ||
2505 | |||
2506 | static u64 ehca_map_vaddr(void *caddr) | ||
2507 | { | ||
2508 | int top, dir, idx; | ||
2509 | unsigned long abs_addr, offset; | ||
2510 | u64 entry; | ||
2511 | |||
2512 | if (!ehca_bmap) | ||
2513 | return EHCA_INVAL_ADDR; | ||
2514 | |||
2515 | abs_addr = virt_to_abs(caddr); | ||
2516 | top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT); | ||
2517 | if (!ehca_bmap_valid(ehca_bmap->top[top])) | ||
2518 | return EHCA_INVAL_ADDR; | ||
2519 | |||
2520 | dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT); | ||
2521 | if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir])) | ||
2522 | return EHCA_INVAL_ADDR; | ||
2523 | |||
2524 | idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT); | ||
2525 | |||
2526 | entry = ehca_bmap->top[top]->dir[dir]->ent[idx]; | ||
2527 | if (ehca_bmap_valid(entry)) { | ||
2528 | offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1); | ||
2529 | return entry | offset; | ||
2530 | } else | ||
2531 | return EHCA_INVAL_ADDR; | ||
2532 | } | ||
2533 | |||
2534 | static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr) | ||
2535 | { | ||
2536 | return dma_addr == EHCA_INVAL_ADDR; | ||
2537 | } | ||
2538 | |||
2539 | static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr, | ||
2540 | size_t size, enum dma_data_direction direction) | ||
2541 | { | ||
2542 | if (cpu_addr) | ||
2543 | return ehca_map_vaddr(cpu_addr); | ||
2544 | else | ||
2545 | return EHCA_INVAL_ADDR; | ||
2546 | } | ||
2547 | |||
2548 | static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, | ||
2549 | enum dma_data_direction direction) | ||
2550 | { | ||
2551 | /* This is only a stub; nothing to be done here */ | ||
2552 | } | ||
2553 | |||
2554 | static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page, | ||
2555 | unsigned long offset, size_t size, | ||
2556 | enum dma_data_direction direction) | ||
2557 | { | ||
2558 | u64 addr; | ||
2559 | |||
2560 | if (offset + size > PAGE_SIZE) | ||
2561 | return EHCA_INVAL_ADDR; | ||
2562 | |||
2563 | addr = ehca_map_vaddr(page_address(page)); | ||
2564 | if (!ehca_dma_mapping_error(dev, addr)) | ||
2565 | addr += offset; | ||
2566 | |||
2567 | return addr; | ||
2568 | } | ||
2569 | |||
2570 | static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, | ||
2571 | enum dma_data_direction direction) | ||
2572 | { | ||
2573 | /* This is only a stub; nothing to be done here */ | ||
2574 | } | ||
2575 | |||
2576 | static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl, | ||
2577 | int nents, enum dma_data_direction direction) | ||
2578 | { | ||
2579 | struct scatterlist *sg; | ||
2580 | int i; | ||
2581 | |||
2582 | for_each_sg(sgl, sg, nents, i) { | ||
2583 | u64 addr; | ||
2584 | addr = ehca_map_vaddr(sg_virt(sg)); | ||
2585 | if (ehca_dma_mapping_error(dev, addr)) | ||
2586 | return 0; | ||
2587 | |||
2588 | sg->dma_address = addr; | ||
2589 | sg->dma_length = sg->length; | ||
2590 | } | ||
2591 | return nents; | ||
2592 | } | ||
2593 | |||
2594 | static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg, | ||
2595 | int nents, enum dma_data_direction direction) | ||
2596 | { | ||
2597 | /* This is only a stub; nothing to be done here */ | ||
2598 | } | ||
2599 | |||
2600 | static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg) | ||
2601 | { | ||
2602 | return sg->dma_address; | ||
2603 | } | ||
2604 | |||
2605 | static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg) | ||
2606 | { | ||
2607 | return sg->length; | ||
2608 | } | ||
2609 | |||
2610 | static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr, | ||
2611 | size_t size, | ||
2612 | enum dma_data_direction dir) | ||
2613 | { | ||
2614 | dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); | ||
2615 | } | ||
2616 | |||
2617 | static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr, | ||
2618 | size_t size, | ||
2619 | enum dma_data_direction dir) | ||
2620 | { | ||
2621 | dma_sync_single_for_device(dev->dma_device, addr, size, dir); | ||
2622 | } | ||
2623 | |||
2624 | static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size, | ||
2625 | u64 *dma_handle, gfp_t flag) | ||
2626 | { | ||
2627 | struct page *p; | ||
2628 | void *addr = NULL; | ||
2629 | u64 dma_addr; | ||
2630 | |||
2631 | p = alloc_pages(flag, get_order(size)); | ||
2632 | if (p) { | ||
2633 | addr = page_address(p); | ||
2634 | dma_addr = ehca_map_vaddr(addr); | ||
2635 | if (ehca_dma_mapping_error(dev, dma_addr)) { | ||
2636 | free_pages((unsigned long)addr, get_order(size)); | ||
2637 | return NULL; | ||
2638 | } | ||
2639 | if (dma_handle) | ||
2640 | *dma_handle = dma_addr; | ||
2641 | return addr; | ||
2642 | } | ||
2643 | return NULL; | ||
2644 | } | ||
2645 | |||
2646 | static void ehca_dma_free_coherent(struct ib_device *dev, size_t size, | ||
2647 | void *cpu_addr, u64 dma_handle) | ||
2648 | { | ||
2649 | if (cpu_addr && size) | ||
2650 | free_pages((unsigned long)cpu_addr, get_order(size)); | ||
2651 | } | ||
2652 | |||
2653 | |||
2654 | struct ib_dma_mapping_ops ehca_dma_mapping_ops = { | ||
2655 | .mapping_error = ehca_dma_mapping_error, | ||
2656 | .map_single = ehca_dma_map_single, | ||
2657 | .unmap_single = ehca_dma_unmap_single, | ||
2658 | .map_page = ehca_dma_map_page, | ||
2659 | .unmap_page = ehca_dma_unmap_page, | ||
2660 | .map_sg = ehca_dma_map_sg, | ||
2661 | .unmap_sg = ehca_dma_unmap_sg, | ||
2662 | .dma_address = ehca_dma_address, | ||
2663 | .dma_len = ehca_dma_len, | ||
2664 | .sync_single_for_cpu = ehca_dma_sync_single_for_cpu, | ||
2665 | .sync_single_for_device = ehca_dma_sync_single_for_device, | ||
2666 | .alloc_coherent = ehca_dma_alloc_coherent, | ||
2667 | .free_coherent = ehca_dma_free_coherent, | ||
2668 | }; | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h index bc8f4e31c123..50d8b51306dd 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.h +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.h | |||
@@ -42,6 +42,11 @@ | |||
42 | #ifndef _EHCA_MRMW_H_ | 42 | #ifndef _EHCA_MRMW_H_ |
43 | #define _EHCA_MRMW_H_ | 43 | #define _EHCA_MRMW_H_ |
44 | 44 | ||
45 | enum ehca_reg_type { | ||
46 | EHCA_REG_MR, | ||
47 | EHCA_REG_BUSMAP_MR | ||
48 | }; | ||
49 | |||
45 | int ehca_reg_mr(struct ehca_shca *shca, | 50 | int ehca_reg_mr(struct ehca_shca *shca, |
46 | struct ehca_mr *e_mr, | 51 | struct ehca_mr *e_mr, |
47 | u64 *iova_start, | 52 | u64 *iova_start, |
@@ -50,7 +55,8 @@ int ehca_reg_mr(struct ehca_shca *shca, | |||
50 | struct ehca_pd *e_pd, | 55 | struct ehca_pd *e_pd, |
51 | struct ehca_mr_pginfo *pginfo, | 56 | struct ehca_mr_pginfo *pginfo, |
52 | u32 *lkey, | 57 | u32 *lkey, |
53 | u32 *rkey); | 58 | u32 *rkey, |
59 | enum ehca_reg_type reg_type); | ||
54 | 60 | ||
55 | int ehca_reg_mr_rpages(struct ehca_shca *shca, | 61 | int ehca_reg_mr_rpages(struct ehca_shca *shca, |
56 | struct ehca_mr *e_mr, | 62 | struct ehca_mr *e_mr, |
@@ -118,4 +124,9 @@ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, | |||
118 | 124 | ||
119 | void ehca_mr_deletenew(struct ehca_mr *mr); | 125 | void ehca_mr_deletenew(struct ehca_mr *mr); |
120 | 126 | ||
127 | int ehca_create_busmap(void); | ||
128 | |||
129 | void ehca_destroy_busmap(void); | ||
130 | |||
131 | extern struct ib_dma_mapping_ops ehca_dma_mapping_ops; | ||
121 | #endif /*_EHCA_MRMW_H_*/ | 132 | #endif /*_EHCA_MRMW_H_*/ |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index d606edf10858..065b20899876 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -352,10 +352,14 @@ static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, | |||
352 | 352 | ||
353 | BUG_ON(!mtts); | 353 | BUG_ON(!mtts); |
354 | 354 | ||
355 | dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, | ||
356 | list_len * sizeof (u64), DMA_TO_DEVICE); | ||
357 | |||
355 | for (i = 0; i < list_len; ++i) | 358 | for (i = 0; i < list_len; ++i) |
356 | mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT); | 359 | mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT); |
357 | 360 | ||
358 | dma_sync_single(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE); | 361 | dma_sync_single_for_device(&dev->pdev->dev, dma_handle, |
362 | list_len * sizeof (u64), DMA_TO_DEVICE); | ||
359 | } | 363 | } |
360 | 364 | ||
361 | int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, | 365 | int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, |
@@ -803,12 +807,15 @@ int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | |||
803 | 807 | ||
804 | wmb(); | 808 | wmb(); |
805 | 809 | ||
810 | dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle, | ||
811 | list_len * sizeof(u64), DMA_TO_DEVICE); | ||
812 | |||
806 | for (i = 0; i < list_len; ++i) | 813 | for (i = 0; i < list_len; ++i) |
807 | fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] | | 814 | fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] | |
808 | MTHCA_MTT_FLAG_PRESENT); | 815 | MTHCA_MTT_FLAG_PRESENT); |
809 | 816 | ||
810 | dma_sync_single(&dev->pdev->dev, fmr->mem.arbel.dma_handle, | 817 | dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle, |
811 | list_len * sizeof(u64), DMA_TO_DEVICE); | 818 | list_len * sizeof(u64), DMA_TO_DEVICE); |
812 | 819 | ||
813 | fmr->mem.arbel.mpt->key = cpu_to_be32(key); | 820 | fmr->mem.arbel.mpt->key = cpu_to_be32(key); |
814 | fmr->mem.arbel.mpt->lkey = cpu_to_be32(key); | 821 | fmr->mem.arbel.mpt->lkey = cpu_to_be32(key); |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 11c7d6642014..114b802771ad 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -472,6 +472,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
472 | 472 | ||
473 | static void nes_retrans_expired(struct nes_cm_node *cm_node) | 473 | static void nes_retrans_expired(struct nes_cm_node *cm_node) |
474 | { | 474 | { |
475 | struct iw_cm_id *cm_id = cm_node->cm_id; | ||
475 | switch (cm_node->state) { | 476 | switch (cm_node->state) { |
476 | case NES_CM_STATE_SYN_RCVD: | 477 | case NES_CM_STATE_SYN_RCVD: |
477 | case NES_CM_STATE_CLOSING: | 478 | case NES_CM_STATE_CLOSING: |
@@ -479,7 +480,9 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node) | |||
479 | break; | 480 | break; |
480 | case NES_CM_STATE_LAST_ACK: | 481 | case NES_CM_STATE_LAST_ACK: |
481 | case NES_CM_STATE_FIN_WAIT1: | 482 | case NES_CM_STATE_FIN_WAIT1: |
482 | case NES_CM_STATE_MPAREJ_RCVD: | 483 | if (cm_node->cm_id) |
484 | cm_id->rem_ref(cm_id); | ||
485 | cm_node->state = NES_CM_STATE_CLOSED; | ||
483 | send_reset(cm_node, NULL); | 486 | send_reset(cm_node, NULL); |
484 | break; | 487 | break; |
485 | default: | 488 | default: |
@@ -1406,6 +1409,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1406 | case NES_CM_STATE_CLOSED: | 1409 | case NES_CM_STATE_CLOSED: |
1407 | drop_packet(skb); | 1410 | drop_packet(skb); |
1408 | break; | 1411 | break; |
1412 | case NES_CM_STATE_FIN_WAIT1: | ||
1409 | case NES_CM_STATE_LAST_ACK: | 1413 | case NES_CM_STATE_LAST_ACK: |
1410 | cm_node->cm_id->rem_ref(cm_node->cm_id); | 1414 | cm_node->cm_id->rem_ref(cm_node->cm_id); |
1411 | case NES_CM_STATE_TIME_WAIT: | 1415 | case NES_CM_STATE_TIME_WAIT: |
@@ -1413,8 +1417,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1413 | rem_ref_cm_node(cm_node->cm_core, cm_node); | 1417 | rem_ref_cm_node(cm_node->cm_core, cm_node); |
1414 | drop_packet(skb); | 1418 | drop_packet(skb); |
1415 | break; | 1419 | break; |
1416 | case NES_CM_STATE_FIN_WAIT1: | ||
1417 | nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__); | ||
1418 | default: | 1420 | default: |
1419 | drop_packet(skb); | 1421 | drop_packet(skb); |
1420 | break; | 1422 | break; |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 64d5cfd8f380..21e0fd336cf7 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -654,7 +654,7 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop | |||
654 | default: | 654 | default: |
655 | props->max_qp_rd_atom = 0; | 655 | props->max_qp_rd_atom = 0; |
656 | } | 656 | } |
657 | props->max_qp_init_rd_atom = props->max_qp_wr; | 657 | props->max_qp_init_rd_atom = props->max_qp_rd_atom; |
658 | props->atomic_cap = IB_ATOMIC_NONE; | 658 | props->atomic_cap = IB_ATOMIC_NONE; |
659 | props->max_map_per_fmr = 1; | 659 | props->max_map_per_fmr = 1; |
660 | 660 | ||
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 6e149f4a1fff..a0f68386c12f 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c | |||
@@ -378,6 +378,17 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, | |||
378 | dev->ofdev.dev.bus = &macio_bus_type; | 378 | dev->ofdev.dev.bus = &macio_bus_type; |
379 | dev->ofdev.dev.release = macio_release_dev; | 379 | dev->ofdev.dev.release = macio_release_dev; |
380 | 380 | ||
381 | #ifdef CONFIG_PCI | ||
382 | /* Set the DMA ops to the ones from the PCI device, this could be | ||
383 | * fishy if we didn't know that on PowerMac it's always direct ops | ||
384 | * or iommu ops that will work fine | ||
385 | */ | ||
386 | dev->ofdev.dev.archdata.dma_ops = | ||
387 | chip->lbus.pdev->dev.archdata.dma_ops; | ||
388 | dev->ofdev.dev.archdata.dma_data = | ||
389 | chip->lbus.pdev->dev.archdata.dma_data; | ||
390 | #endif /* CONFIG_PCI */ | ||
391 | |||
381 | #ifdef DEBUG | 392 | #ifdef DEBUG |
382 | printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n", | 393 | printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n", |
383 | dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj); | 394 | dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj); |
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 36e0675be9f7..020f9573fd82 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
@@ -231,6 +231,17 @@ config DM_MIRROR | |||
231 | Allow volume managers to mirror logical volumes, also | 231 | Allow volume managers to mirror logical volumes, also |
232 | needed for live data migration tools such as 'pvmove'. | 232 | needed for live data migration tools such as 'pvmove'. |
233 | 233 | ||
234 | config DM_LOG_USERSPACE | ||
235 | tristate "Mirror userspace logging (EXPERIMENTAL)" | ||
236 | depends on DM_MIRROR && EXPERIMENTAL && NET | ||
237 | select CONNECTOR | ||
238 | ---help--- | ||
239 | The userspace logging module provides a mechanism for | ||
240 | relaying the dm-dirty-log API to userspace. Log designs | ||
241 | which are more suited to userspace implementation (e.g. | ||
242 | shared storage logs) or experimental logs can be implemented | ||
243 | by leveraging this framework. | ||
244 | |||
234 | config DM_ZERO | 245 | config DM_ZERO |
235 | tristate "Zero target" | 246 | tristate "Zero target" |
236 | depends on BLK_DEV_DM | 247 | depends on BLK_DEV_DM |
@@ -249,6 +260,25 @@ config DM_MULTIPATH | |||
249 | ---help--- | 260 | ---help--- |
250 | Allow volume managers to support multipath hardware. | 261 | Allow volume managers to support multipath hardware. |
251 | 262 | ||
263 | config DM_MULTIPATH_QL | ||
264 | tristate "I/O Path Selector based on the number of in-flight I/Os" | ||
265 | depends on DM_MULTIPATH | ||
266 | ---help--- | ||
267 | This path selector is a dynamic load balancer which selects | ||
268 | the path with the least number of in-flight I/Os. | ||
269 | |||
270 | If unsure, say N. | ||
271 | |||
272 | config DM_MULTIPATH_ST | ||
273 | tristate "I/O Path Selector based on the service time" | ||
274 | depends on DM_MULTIPATH | ||
275 | ---help--- | ||
276 | This path selector is a dynamic load balancer which selects | ||
277 | the path expected to complete the incoming I/O in the shortest | ||
278 | time. | ||
279 | |||
280 | If unsure, say N. | ||
281 | |||
252 | config DM_DELAY | 282 | config DM_DELAY |
253 | tristate "I/O delaying target (EXPERIMENTAL)" | 283 | tristate "I/O delaying target (EXPERIMENTAL)" |
254 | depends on BLK_DEV_DM && EXPERIMENTAL | 284 | depends on BLK_DEV_DM && EXPERIMENTAL |
diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 45cc5951d928..1dc4185bd781 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile | |||
@@ -8,6 +8,8 @@ dm-multipath-y += dm-path-selector.o dm-mpath.o | |||
8 | dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \ | 8 | dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \ |
9 | dm-snap-persistent.o | 9 | dm-snap-persistent.o |
10 | dm-mirror-y += dm-raid1.o | 10 | dm-mirror-y += dm-raid1.o |
11 | dm-log-userspace-y \ | ||
12 | += dm-log-userspace-base.o dm-log-userspace-transfer.o | ||
11 | md-mod-y += md.o bitmap.o | 13 | md-mod-y += md.o bitmap.o |
12 | raid456-y += raid5.o | 14 | raid456-y += raid5.o |
13 | raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \ | 15 | raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \ |
@@ -36,8 +38,11 @@ obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o | |||
36 | obj-$(CONFIG_DM_CRYPT) += dm-crypt.o | 38 | obj-$(CONFIG_DM_CRYPT) += dm-crypt.o |
37 | obj-$(CONFIG_DM_DELAY) += dm-delay.o | 39 | obj-$(CONFIG_DM_DELAY) += dm-delay.o |
38 | obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o | 40 | obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o |
41 | obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o | ||
42 | obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o | ||
39 | obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o | 43 | obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o |
40 | obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o | 44 | obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o |
45 | obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o | ||
41 | obj-$(CONFIG_DM_ZERO) += dm-zero.o | 46 | obj-$(CONFIG_DM_ZERO) += dm-zero.o |
42 | 47 | ||
43 | quiet_cmd_unroll = UNROLL $@ | 48 | quiet_cmd_unroll = UNROLL $@ |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 53394e863c74..9933eb861c71 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -1132,6 +1132,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1132 | goto bad_crypt_queue; | 1132 | goto bad_crypt_queue; |
1133 | } | 1133 | } |
1134 | 1134 | ||
1135 | ti->num_flush_requests = 1; | ||
1135 | ti->private = cc; | 1136 | ti->private = cc; |
1136 | return 0; | 1137 | return 0; |
1137 | 1138 | ||
@@ -1189,6 +1190,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
1189 | union map_info *map_context) | 1190 | union map_info *map_context) |
1190 | { | 1191 | { |
1191 | struct dm_crypt_io *io; | 1192 | struct dm_crypt_io *io; |
1193 | struct crypt_config *cc; | ||
1194 | |||
1195 | if (unlikely(bio_empty_barrier(bio))) { | ||
1196 | cc = ti->private; | ||
1197 | bio->bi_bdev = cc->dev->bdev; | ||
1198 | return DM_MAPIO_REMAPPED; | ||
1199 | } | ||
1192 | 1200 | ||
1193 | io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); | 1201 | io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); |
1194 | 1202 | ||
@@ -1305,9 +1313,17 @@ static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | |||
1305 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | 1313 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); |
1306 | } | 1314 | } |
1307 | 1315 | ||
1316 | static int crypt_iterate_devices(struct dm_target *ti, | ||
1317 | iterate_devices_callout_fn fn, void *data) | ||
1318 | { | ||
1319 | struct crypt_config *cc = ti->private; | ||
1320 | |||
1321 | return fn(ti, cc->dev, cc->start, data); | ||
1322 | } | ||
1323 | |||
1308 | static struct target_type crypt_target = { | 1324 | static struct target_type crypt_target = { |
1309 | .name = "crypt", | 1325 | .name = "crypt", |
1310 | .version= {1, 6, 0}, | 1326 | .version = {1, 7, 0}, |
1311 | .module = THIS_MODULE, | 1327 | .module = THIS_MODULE, |
1312 | .ctr = crypt_ctr, | 1328 | .ctr = crypt_ctr, |
1313 | .dtr = crypt_dtr, | 1329 | .dtr = crypt_dtr, |
@@ -1318,6 +1334,7 @@ static struct target_type crypt_target = { | |||
1318 | .resume = crypt_resume, | 1334 | .resume = crypt_resume, |
1319 | .message = crypt_message, | 1335 | .message = crypt_message, |
1320 | .merge = crypt_merge, | 1336 | .merge = crypt_merge, |
1337 | .iterate_devices = crypt_iterate_devices, | ||
1321 | }; | 1338 | }; |
1322 | 1339 | ||
1323 | static int __init dm_crypt_init(void) | 1340 | static int __init dm_crypt_init(void) |
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 559dbb52bc85..4e5b843cd4d7 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c | |||
@@ -197,6 +197,7 @@ out: | |||
197 | mutex_init(&dc->timer_lock); | 197 | mutex_init(&dc->timer_lock); |
198 | atomic_set(&dc->may_delay, 1); | 198 | atomic_set(&dc->may_delay, 1); |
199 | 199 | ||
200 | ti->num_flush_requests = 1; | ||
200 | ti->private = dc; | 201 | ti->private = dc; |
201 | return 0; | 202 | return 0; |
202 | 203 | ||
@@ -278,8 +279,9 @@ static int delay_map(struct dm_target *ti, struct bio *bio, | |||
278 | 279 | ||
279 | if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { | 280 | if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { |
280 | bio->bi_bdev = dc->dev_write->bdev; | 281 | bio->bi_bdev = dc->dev_write->bdev; |
281 | bio->bi_sector = dc->start_write + | 282 | if (bio_sectors(bio)) |
282 | (bio->bi_sector - ti->begin); | 283 | bio->bi_sector = dc->start_write + |
284 | (bio->bi_sector - ti->begin); | ||
283 | 285 | ||
284 | return delay_bio(dc, dc->write_delay, bio); | 286 | return delay_bio(dc, dc->write_delay, bio); |
285 | } | 287 | } |
@@ -316,9 +318,26 @@ static int delay_status(struct dm_target *ti, status_type_t type, | |||
316 | return 0; | 318 | return 0; |
317 | } | 319 | } |
318 | 320 | ||
321 | static int delay_iterate_devices(struct dm_target *ti, | ||
322 | iterate_devices_callout_fn fn, void *data) | ||
323 | { | ||
324 | struct delay_c *dc = ti->private; | ||
325 | int ret = 0; | ||
326 | |||
327 | ret = fn(ti, dc->dev_read, dc->start_read, data); | ||
328 | if (ret) | ||
329 | goto out; | ||
330 | |||
331 | if (dc->dev_write) | ||
332 | ret = fn(ti, dc->dev_write, dc->start_write, data); | ||
333 | |||
334 | out: | ||
335 | return ret; | ||
336 | } | ||
337 | |||
319 | static struct target_type delay_target = { | 338 | static struct target_type delay_target = { |
320 | .name = "delay", | 339 | .name = "delay", |
321 | .version = {1, 0, 2}, | 340 | .version = {1, 1, 0}, |
322 | .module = THIS_MODULE, | 341 | .module = THIS_MODULE, |
323 | .ctr = delay_ctr, | 342 | .ctr = delay_ctr, |
324 | .dtr = delay_dtr, | 343 | .dtr = delay_dtr, |
@@ -326,6 +345,7 @@ static struct target_type delay_target = { | |||
326 | .presuspend = delay_presuspend, | 345 | .presuspend = delay_presuspend, |
327 | .resume = delay_resume, | 346 | .resume = delay_resume, |
328 | .status = delay_status, | 347 | .status = delay_status, |
348 | .iterate_devices = delay_iterate_devices, | ||
329 | }; | 349 | }; |
330 | 350 | ||
331 | static int __init dm_delay_init(void) | 351 | static int __init dm_delay_init(void) |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 75d8081a9041..c3ae51584b12 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -216,7 +216,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | |||
216 | return -EINVAL; | 216 | return -EINVAL; |
217 | } | 217 | } |
218 | 218 | ||
219 | type = get_type(argv[1]); | 219 | type = get_type(&persistent); |
220 | if (!type) { | 220 | if (!type) { |
221 | ti->error = "Exception store type not recognised"; | 221 | ti->error = "Exception store type not recognised"; |
222 | r = -EINVAL; | 222 | r = -EINVAL; |
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index c92701dc5001..2442c8c07898 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h | |||
@@ -156,7 +156,7 @@ static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e) | |||
156 | */ | 156 | */ |
157 | static inline sector_t get_dev_size(struct block_device *bdev) | 157 | static inline sector_t get_dev_size(struct block_device *bdev) |
158 | { | 158 | { |
159 | return bdev->bd_inode->i_size >> SECTOR_SHIFT; | 159 | return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; |
160 | } | 160 | } |
161 | 161 | ||
162 | static inline chunk_t sector_to_chunk(struct dm_exception_store *store, | 162 | static inline chunk_t sector_to_chunk(struct dm_exception_store *store, |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index e73aabd61cd7..3a2e6a2f8bdd 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -22,6 +22,7 @@ struct dm_io_client { | |||
22 | /* FIXME: can we shrink this ? */ | 22 | /* FIXME: can we shrink this ? */ |
23 | struct io { | 23 | struct io { |
24 | unsigned long error_bits; | 24 | unsigned long error_bits; |
25 | unsigned long eopnotsupp_bits; | ||
25 | atomic_t count; | 26 | atomic_t count; |
26 | struct task_struct *sleeper; | 27 | struct task_struct *sleeper; |
27 | struct dm_io_client *client; | 28 | struct dm_io_client *client; |
@@ -107,8 +108,11 @@ static inline unsigned bio_get_region(struct bio *bio) | |||
107 | *---------------------------------------------------------------*/ | 108 | *---------------------------------------------------------------*/ |
108 | static void dec_count(struct io *io, unsigned int region, int error) | 109 | static void dec_count(struct io *io, unsigned int region, int error) |
109 | { | 110 | { |
110 | if (error) | 111 | if (error) { |
111 | set_bit(region, &io->error_bits); | 112 | set_bit(region, &io->error_bits); |
113 | if (error == -EOPNOTSUPP) | ||
114 | set_bit(region, &io->eopnotsupp_bits); | ||
115 | } | ||
112 | 116 | ||
113 | if (atomic_dec_and_test(&io->count)) { | 117 | if (atomic_dec_and_test(&io->count)) { |
114 | if (io->sleeper) | 118 | if (io->sleeper) |
@@ -360,7 +364,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, | |||
360 | return -EIO; | 364 | return -EIO; |
361 | } | 365 | } |
362 | 366 | ||
367 | retry: | ||
363 | io.error_bits = 0; | 368 | io.error_bits = 0; |
369 | io.eopnotsupp_bits = 0; | ||
364 | atomic_set(&io.count, 1); /* see dispatch_io() */ | 370 | atomic_set(&io.count, 1); /* see dispatch_io() */ |
365 | io.sleeper = current; | 371 | io.sleeper = current; |
366 | io.client = client; | 372 | io.client = client; |
@@ -377,6 +383,11 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, | |||
377 | } | 383 | } |
378 | set_current_state(TASK_RUNNING); | 384 | set_current_state(TASK_RUNNING); |
379 | 385 | ||
386 | if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { | ||
387 | rw &= ~(1 << BIO_RW_BARRIER); | ||
388 | goto retry; | ||
389 | } | ||
390 | |||
380 | if (error_bits) | 391 | if (error_bits) |
381 | *error_bits = io.error_bits; | 392 | *error_bits = io.error_bits; |
382 | 393 | ||
@@ -397,6 +408,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, | |||
397 | 408 | ||
398 | io = mempool_alloc(client->pool, GFP_NOIO); | 409 | io = mempool_alloc(client->pool, GFP_NOIO); |
399 | io->error_bits = 0; | 410 | io->error_bits = 0; |
411 | io->eopnotsupp_bits = 0; | ||
400 | atomic_set(&io->count, 1); /* see dispatch_io() */ | 412 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
401 | io->sleeper = NULL; | 413 | io->sleeper = NULL; |
402 | io->client = client; | 414 | io->client = client; |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 1128d3fba797..7f77f18fcafa 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -276,7 +276,7 @@ retry: | |||
276 | up_write(&_hash_lock); | 276 | up_write(&_hash_lock); |
277 | } | 277 | } |
278 | 278 | ||
279 | static int dm_hash_rename(const char *old, const char *new) | 279 | static int dm_hash_rename(uint32_t cookie, const char *old, const char *new) |
280 | { | 280 | { |
281 | char *new_name, *old_name; | 281 | char *new_name, *old_name; |
282 | struct hash_cell *hc; | 282 | struct hash_cell *hc; |
@@ -333,7 +333,7 @@ static int dm_hash_rename(const char *old, const char *new) | |||
333 | dm_table_put(table); | 333 | dm_table_put(table); |
334 | } | 334 | } |
335 | 335 | ||
336 | dm_kobject_uevent(hc->md); | 336 | dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie); |
337 | 337 | ||
338 | dm_put(hc->md); | 338 | dm_put(hc->md); |
339 | up_write(&_hash_lock); | 339 | up_write(&_hash_lock); |
@@ -680,6 +680,9 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size) | |||
680 | 680 | ||
681 | __hash_remove(hc); | 681 | __hash_remove(hc); |
682 | up_write(&_hash_lock); | 682 | up_write(&_hash_lock); |
683 | |||
684 | dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr); | ||
685 | |||
683 | dm_put(md); | 686 | dm_put(md); |
684 | param->data_size = 0; | 687 | param->data_size = 0; |
685 | return 0; | 688 | return 0; |
@@ -715,7 +718,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size) | |||
715 | return r; | 718 | return r; |
716 | 719 | ||
717 | param->data_size = 0; | 720 | param->data_size = 0; |
718 | return dm_hash_rename(param->name, new_name); | 721 | return dm_hash_rename(param->event_nr, param->name, new_name); |
719 | } | 722 | } |
720 | 723 | ||
721 | static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) | 724 | static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) |
@@ -842,8 +845,11 @@ static int do_resume(struct dm_ioctl *param) | |||
842 | if (dm_suspended(md)) | 845 | if (dm_suspended(md)) |
843 | r = dm_resume(md); | 846 | r = dm_resume(md); |
844 | 847 | ||
845 | if (!r) | 848 | |
849 | if (!r) { | ||
850 | dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr); | ||
846 | r = __dev_status(md, param); | 851 | r = __dev_status(md, param); |
852 | } | ||
847 | 853 | ||
848 | dm_put(md); | 854 | dm_put(md); |
849 | return r; | 855 | return r; |
@@ -1044,6 +1050,12 @@ static int populate_table(struct dm_table *table, | |||
1044 | next = spec->next; | 1050 | next = spec->next; |
1045 | } | 1051 | } |
1046 | 1052 | ||
1053 | r = dm_table_set_type(table); | ||
1054 | if (r) { | ||
1055 | DMWARN("unable to set table type"); | ||
1056 | return r; | ||
1057 | } | ||
1058 | |||
1047 | return dm_table_complete(table); | 1059 | return dm_table_complete(table); |
1048 | } | 1060 | } |
1049 | 1061 | ||
@@ -1089,6 +1101,13 @@ static int table_load(struct dm_ioctl *param, size_t param_size) | |||
1089 | goto out; | 1101 | goto out; |
1090 | } | 1102 | } |
1091 | 1103 | ||
1104 | r = dm_table_alloc_md_mempools(t); | ||
1105 | if (r) { | ||
1106 | DMWARN("unable to allocate mempools for this table"); | ||
1107 | dm_table_destroy(t); | ||
1108 | goto out; | ||
1109 | } | ||
1110 | |||
1092 | down_write(&_hash_lock); | 1111 | down_write(&_hash_lock); |
1093 | hc = dm_get_mdptr(md); | 1112 | hc = dm_get_mdptr(md); |
1094 | if (!hc || hc->md != md) { | 1113 | if (!hc || hc->md != md) { |
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 79fb53e51c70..9184b6deb868 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c | |||
@@ -53,6 +53,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
53 | goto bad; | 53 | goto bad; |
54 | } | 54 | } |
55 | 55 | ||
56 | ti->num_flush_requests = 1; | ||
56 | ti->private = lc; | 57 | ti->private = lc; |
57 | return 0; | 58 | return 0; |
58 | 59 | ||
@@ -81,7 +82,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio) | |||
81 | struct linear_c *lc = ti->private; | 82 | struct linear_c *lc = ti->private; |
82 | 83 | ||
83 | bio->bi_bdev = lc->dev->bdev; | 84 | bio->bi_bdev = lc->dev->bdev; |
84 | bio->bi_sector = linear_map_sector(ti, bio->bi_sector); | 85 | if (bio_sectors(bio)) |
86 | bio->bi_sector = linear_map_sector(ti, bio->bi_sector); | ||
85 | } | 87 | } |
86 | 88 | ||
87 | static int linear_map(struct dm_target *ti, struct bio *bio, | 89 | static int linear_map(struct dm_target *ti, struct bio *bio, |
@@ -132,9 +134,17 @@ static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | |||
132 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | 134 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); |
133 | } | 135 | } |
134 | 136 | ||
137 | static int linear_iterate_devices(struct dm_target *ti, | ||
138 | iterate_devices_callout_fn fn, void *data) | ||
139 | { | ||
140 | struct linear_c *lc = ti->private; | ||
141 | |||
142 | return fn(ti, lc->dev, lc->start, data); | ||
143 | } | ||
144 | |||
135 | static struct target_type linear_target = { | 145 | static struct target_type linear_target = { |
136 | .name = "linear", | 146 | .name = "linear", |
137 | .version= {1, 0, 3}, | 147 | .version = {1, 1, 0}, |
138 | .module = THIS_MODULE, | 148 | .module = THIS_MODULE, |
139 | .ctr = linear_ctr, | 149 | .ctr = linear_ctr, |
140 | .dtr = linear_dtr, | 150 | .dtr = linear_dtr, |
@@ -142,6 +152,7 @@ static struct target_type linear_target = { | |||
142 | .status = linear_status, | 152 | .status = linear_status, |
143 | .ioctl = linear_ioctl, | 153 | .ioctl = linear_ioctl, |
144 | .merge = linear_merge, | 154 | .merge = linear_merge, |
155 | .iterate_devices = linear_iterate_devices, | ||
145 | }; | 156 | }; |
146 | 157 | ||
147 | int __init dm_linear_init(void) | 158 | int __init dm_linear_init(void) |
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c new file mode 100644 index 000000000000..e69b96560997 --- /dev/null +++ b/drivers/md/dm-log-userspace-base.c | |||
@@ -0,0 +1,696 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2009 Red Hat, Inc. | ||
3 | * | ||
4 | * This file is released under the LGPL. | ||
5 | */ | ||
6 | |||
7 | #include <linux/bio.h> | ||
8 | #include <linux/dm-dirty-log.h> | ||
9 | #include <linux/device-mapper.h> | ||
10 | #include <linux/dm-log-userspace.h> | ||
11 | |||
12 | #include "dm-log-userspace-transfer.h" | ||
13 | |||
14 | struct flush_entry { | ||
15 | int type; | ||
16 | region_t region; | ||
17 | struct list_head list; | ||
18 | }; | ||
19 | |||
20 | struct log_c { | ||
21 | struct dm_target *ti; | ||
22 | uint32_t region_size; | ||
23 | region_t region_count; | ||
24 | char uuid[DM_UUID_LEN]; | ||
25 | |||
26 | char *usr_argv_str; | ||
27 | uint32_t usr_argc; | ||
28 | |||
29 | /* | ||
30 | * in_sync_hint gets set when doing is_remote_recovering. It | ||
31 | * represents the first region that needs recovery. IOW, the | ||
32 | * first zero bit of sync_bits. This can be useful for to limit | ||
33 | * traffic for calls like is_remote_recovering and get_resync_work, | ||
34 | * but be take care in its use for anything else. | ||
35 | */ | ||
36 | uint64_t in_sync_hint; | ||
37 | |||
38 | spinlock_t flush_lock; | ||
39 | struct list_head flush_list; /* only for clear and mark requests */ | ||
40 | }; | ||
41 | |||
42 | static mempool_t *flush_entry_pool; | ||
43 | |||
44 | static void *flush_entry_alloc(gfp_t gfp_mask, void *pool_data) | ||
45 | { | ||
46 | return kmalloc(sizeof(struct flush_entry), gfp_mask); | ||
47 | } | ||
48 | |||
49 | static void flush_entry_free(void *element, void *pool_data) | ||
50 | { | ||
51 | kfree(element); | ||
52 | } | ||
53 | |||
54 | static int userspace_do_request(struct log_c *lc, const char *uuid, | ||
55 | int request_type, char *data, size_t data_size, | ||
56 | char *rdata, size_t *rdata_size) | ||
57 | { | ||
58 | int r; | ||
59 | |||
60 | /* | ||
61 | * If the server isn't there, -ESRCH is returned, | ||
62 | * and we must keep trying until the server is | ||
63 | * restored. | ||
64 | */ | ||
65 | retry: | ||
66 | r = dm_consult_userspace(uuid, request_type, data, | ||
67 | data_size, rdata, rdata_size); | ||
68 | |||
69 | if (r != -ESRCH) | ||
70 | return r; | ||
71 | |||
72 | DMERR(" Userspace log server not found."); | ||
73 | while (1) { | ||
74 | set_current_state(TASK_INTERRUPTIBLE); | ||
75 | schedule_timeout(2*HZ); | ||
76 | DMWARN("Attempting to contact userspace log server..."); | ||
77 | r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str, | ||
78 | strlen(lc->usr_argv_str) + 1, | ||
79 | NULL, NULL); | ||
80 | if (!r) | ||
81 | break; | ||
82 | } | ||
83 | DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); | ||
84 | r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL, | ||
85 | 0, NULL, NULL); | ||
86 | if (!r) | ||
87 | goto retry; | ||
88 | |||
89 | DMERR("Error trying to resume userspace log: %d", r); | ||
90 | |||
91 | return -ESRCH; | ||
92 | } | ||
93 | |||
94 | static int build_constructor_string(struct dm_target *ti, | ||
95 | unsigned argc, char **argv, | ||
96 | char **ctr_str) | ||
97 | { | ||
98 | int i, str_size; | ||
99 | char *str = NULL; | ||
100 | |||
101 | *ctr_str = NULL; | ||
102 | |||
103 | for (i = 0, str_size = 0; i < argc; i++) | ||
104 | str_size += strlen(argv[i]) + 1; /* +1 for space between args */ | ||
105 | |||
106 | str_size += 20; /* Max number of chars in a printed u64 number */ | ||
107 | |||
108 | str = kzalloc(str_size, GFP_KERNEL); | ||
109 | if (!str) { | ||
110 | DMWARN("Unable to allocate memory for constructor string"); | ||
111 | return -ENOMEM; | ||
112 | } | ||
113 | |||
114 | for (i = 0, str_size = 0; i < argc; i++) | ||
115 | str_size += sprintf(str + str_size, "%s ", argv[i]); | ||
116 | str_size += sprintf(str + str_size, "%llu", | ||
117 | (unsigned long long)ti->len); | ||
118 | |||
119 | *ctr_str = str; | ||
120 | return str_size; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * userspace_ctr | ||
125 | * | ||
126 | * argv contains: | ||
127 | * <UUID> <other args> | ||
128 | * Where 'other args' is the userspace implementation specific log | ||
129 | * arguments. An example might be: | ||
130 | * <UUID> clustered_disk <arg count> <log dev> <region_size> [[no]sync] | ||
131 | * | ||
132 | * So, this module will strip off the <UUID> for identification purposes | ||
133 | * when communicating with userspace about a log; but will pass on everything | ||
134 | * else. | ||
135 | */ | ||
136 | static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, | ||
137 | unsigned argc, char **argv) | ||
138 | { | ||
139 | int r = 0; | ||
140 | int str_size; | ||
141 | char *ctr_str = NULL; | ||
142 | struct log_c *lc = NULL; | ||
143 | uint64_t rdata; | ||
144 | size_t rdata_size = sizeof(rdata); | ||
145 | |||
146 | if (argc < 3) { | ||
147 | DMWARN("Too few arguments to userspace dirty log"); | ||
148 | return -EINVAL; | ||
149 | } | ||
150 | |||
151 | lc = kmalloc(sizeof(*lc), GFP_KERNEL); | ||
152 | if (!lc) { | ||
153 | DMWARN("Unable to allocate userspace log context."); | ||
154 | return -ENOMEM; | ||
155 | } | ||
156 | |||
157 | lc->ti = ti; | ||
158 | |||
159 | if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { | ||
160 | DMWARN("UUID argument too long."); | ||
161 | kfree(lc); | ||
162 | return -EINVAL; | ||
163 | } | ||
164 | |||
165 | strncpy(lc->uuid, argv[0], DM_UUID_LEN); | ||
166 | spin_lock_init(&lc->flush_lock); | ||
167 | INIT_LIST_HEAD(&lc->flush_list); | ||
168 | |||
169 | str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str); | ||
170 | if (str_size < 0) { | ||
171 | kfree(lc); | ||
172 | return str_size; | ||
173 | } | ||
174 | |||
175 | /* Send table string */ | ||
176 | r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR, | ||
177 | ctr_str, str_size, NULL, NULL); | ||
178 | |||
179 | if (r == -ESRCH) { | ||
180 | DMERR("Userspace log server not found"); | ||
181 | goto out; | ||
182 | } | ||
183 | |||
184 | /* Since the region size does not change, get it now */ | ||
185 | rdata_size = sizeof(rdata); | ||
186 | r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE, | ||
187 | NULL, 0, (char *)&rdata, &rdata_size); | ||
188 | |||
189 | if (r) { | ||
190 | DMERR("Failed to get region size of dirty log"); | ||
191 | goto out; | ||
192 | } | ||
193 | |||
194 | lc->region_size = (uint32_t)rdata; | ||
195 | lc->region_count = dm_sector_div_up(ti->len, lc->region_size); | ||
196 | |||
197 | out: | ||
198 | if (r) { | ||
199 | kfree(lc); | ||
200 | kfree(ctr_str); | ||
201 | } else { | ||
202 | lc->usr_argv_str = ctr_str; | ||
203 | lc->usr_argc = argc; | ||
204 | log->context = lc; | ||
205 | } | ||
206 | |||
207 | return r; | ||
208 | } | ||
209 | |||
210 | static void userspace_dtr(struct dm_dirty_log *log) | ||
211 | { | ||
212 | int r; | ||
213 | struct log_c *lc = log->context; | ||
214 | |||
215 | r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR, | ||
216 | NULL, 0, | ||
217 | NULL, NULL); | ||
218 | |||
219 | kfree(lc->usr_argv_str); | ||
220 | kfree(lc); | ||
221 | |||
222 | return; | ||
223 | } | ||
224 | |||
225 | static int userspace_presuspend(struct dm_dirty_log *log) | ||
226 | { | ||
227 | int r; | ||
228 | struct log_c *lc = log->context; | ||
229 | |||
230 | r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND, | ||
231 | NULL, 0, | ||
232 | NULL, NULL); | ||
233 | |||
234 | return r; | ||
235 | } | ||
236 | |||
237 | static int userspace_postsuspend(struct dm_dirty_log *log) | ||
238 | { | ||
239 | int r; | ||
240 | struct log_c *lc = log->context; | ||
241 | |||
242 | r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND, | ||
243 | NULL, 0, | ||
244 | NULL, NULL); | ||
245 | |||
246 | return r; | ||
247 | } | ||
248 | |||
249 | static int userspace_resume(struct dm_dirty_log *log) | ||
250 | { | ||
251 | int r; | ||
252 | struct log_c *lc = log->context; | ||
253 | |||
254 | lc->in_sync_hint = 0; | ||
255 | r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME, | ||
256 | NULL, 0, | ||
257 | NULL, NULL); | ||
258 | |||
259 | return r; | ||
260 | } | ||
261 | |||
262 | static uint32_t userspace_get_region_size(struct dm_dirty_log *log) | ||
263 | { | ||
264 | struct log_c *lc = log->context; | ||
265 | |||
266 | return lc->region_size; | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * userspace_is_clean | ||
271 | * | ||
272 | * Check whether a region is clean. If there is any sort of | ||
273 | * failure when consulting the server, we return not clean. | ||
274 | * | ||
275 | * Returns: 1 if clean, 0 otherwise | ||
276 | */ | ||
277 | static int userspace_is_clean(struct dm_dirty_log *log, region_t region) | ||
278 | { | ||
279 | int r; | ||
280 | uint64_t region64 = (uint64_t)region; | ||
281 | int64_t is_clean; | ||
282 | size_t rdata_size; | ||
283 | struct log_c *lc = log->context; | ||
284 | |||
285 | rdata_size = sizeof(is_clean); | ||
286 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN, | ||
287 | (char *)®ion64, sizeof(region64), | ||
288 | (char *)&is_clean, &rdata_size); | ||
289 | |||
290 | return (r) ? 0 : (int)is_clean; | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * userspace_in_sync | ||
295 | * | ||
296 | * Check if the region is in-sync. If there is any sort | ||
297 | * of failure when consulting the server, we assume that | ||
298 | * the region is not in sync. | ||
299 | * | ||
300 | * If 'can_block' is set, return immediately | ||
301 | * | ||
302 | * Returns: 1 if in-sync, 0 if not-in-sync, -EWOULDBLOCK | ||
303 | */ | ||
304 | static int userspace_in_sync(struct dm_dirty_log *log, region_t region, | ||
305 | int can_block) | ||
306 | { | ||
307 | int r; | ||
308 | uint64_t region64 = region; | ||
309 | int64_t in_sync; | ||
310 | size_t rdata_size; | ||
311 | struct log_c *lc = log->context; | ||
312 | |||
313 | /* | ||
314 | * We can never respond directly - even if in_sync_hint is | ||
315 | * set. This is because another machine could see a device | ||
316 | * failure and mark the region out-of-sync. If we don't go | ||
317 | * to userspace to ask, we might think the region is in-sync | ||
318 | * and allow a read to pick up data that is stale. (This is | ||
319 | * very unlikely if a device actually fails; but it is very | ||
320 | * likely if a connection to one device from one machine fails.) | ||
321 | * | ||
322 | * There still might be a problem if the mirror caches the region | ||
323 | * state as in-sync... but then this call would not be made. So, | ||
324 | * that is a mirror problem. | ||
325 | */ | ||
326 | if (!can_block) | ||
327 | return -EWOULDBLOCK; | ||
328 | |||
329 | rdata_size = sizeof(in_sync); | ||
330 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC, | ||
331 | (char *)®ion64, sizeof(region64), | ||
332 | (char *)&in_sync, &rdata_size); | ||
333 | return (r) ? 0 : (int)in_sync; | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * userspace_flush | ||
338 | * | ||
339 | * This function is ok to block. | ||
340 | * The flush happens in two stages. First, it sends all | ||
341 | * clear/mark requests that are on the list. Then it | ||
342 | * tells the server to commit them. This gives the | ||
343 | * server a chance to optimise the commit, instead of | ||
344 | * doing it for every request. | ||
345 | * | ||
346 | * Additionally, we could implement another thread that | ||
347 | * sends the requests up to the server - reducing the | ||
348 | * load on flush. Then the flush would have less in | ||
349 | * the list and be responsible for the finishing commit. | ||
350 | * | ||
351 | * Returns: 0 on success, < 0 on failure | ||
352 | */ | ||
353 | static int userspace_flush(struct dm_dirty_log *log) | ||
354 | { | ||
355 | int r = 0; | ||
356 | unsigned long flags; | ||
357 | struct log_c *lc = log->context; | ||
358 | LIST_HEAD(flush_list); | ||
359 | struct flush_entry *fe, *tmp_fe; | ||
360 | |||
361 | spin_lock_irqsave(&lc->flush_lock, flags); | ||
362 | list_splice_init(&lc->flush_list, &flush_list); | ||
363 | spin_unlock_irqrestore(&lc->flush_lock, flags); | ||
364 | |||
365 | if (list_empty(&flush_list)) | ||
366 | return 0; | ||
367 | |||
368 | /* | ||
369 | * FIXME: Count up requests, group request types, | ||
370 | * allocate memory to stick all requests in and | ||
371 | * send to server in one go. Failing the allocation, | ||
372 | * do it one by one. | ||
373 | */ | ||
374 | |||
375 | list_for_each_entry(fe, &flush_list, list) { | ||
376 | r = userspace_do_request(lc, lc->uuid, fe->type, | ||
377 | (char *)&fe->region, | ||
378 | sizeof(fe->region), | ||
379 | NULL, NULL); | ||
380 | if (r) | ||
381 | goto fail; | ||
382 | } | ||
383 | |||
384 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, | ||
385 | NULL, 0, NULL, NULL); | ||
386 | |||
387 | fail: | ||
388 | /* | ||
389 | * We can safely remove these entries, even if failure. | ||
390 | * Calling code will receive an error and will know that | ||
391 | * the log facility has failed. | ||
392 | */ | ||
393 | list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) { | ||
394 | list_del(&fe->list); | ||
395 | mempool_free(fe, flush_entry_pool); | ||
396 | } | ||
397 | |||
398 | if (r) | ||
399 | dm_table_event(lc->ti->table); | ||
400 | |||
401 | return r; | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * userspace_mark_region | ||
406 | * | ||
407 | * This function should avoid blocking unless absolutely required. | ||
408 | * (Memory allocation is valid for blocking.) | ||
409 | */ | ||
410 | static void userspace_mark_region(struct dm_dirty_log *log, region_t region) | ||
411 | { | ||
412 | unsigned long flags; | ||
413 | struct log_c *lc = log->context; | ||
414 | struct flush_entry *fe; | ||
415 | |||
416 | /* Wait for an allocation, but _never_ fail */ | ||
417 | fe = mempool_alloc(flush_entry_pool, GFP_NOIO); | ||
418 | BUG_ON(!fe); | ||
419 | |||
420 | spin_lock_irqsave(&lc->flush_lock, flags); | ||
421 | fe->type = DM_ULOG_MARK_REGION; | ||
422 | fe->region = region; | ||
423 | list_add(&fe->list, &lc->flush_list); | ||
424 | spin_unlock_irqrestore(&lc->flush_lock, flags); | ||
425 | |||
426 | return; | ||
427 | } | ||
428 | |||
429 | /* | ||
430 | * userspace_clear_region | ||
431 | * | ||
432 | * This function must not block. | ||
433 | * So, the alloc can't block. In the worst case, it is ok to | ||
434 | * fail. It would simply mean we can't clear the region. | ||
435 | * Does nothing to current sync context, but does mean | ||
436 | * the region will be re-sync'ed on a reload of the mirror | ||
437 | * even though it is in-sync. | ||
438 | */ | ||
439 | static void userspace_clear_region(struct dm_dirty_log *log, region_t region) | ||
440 | { | ||
441 | unsigned long flags; | ||
442 | struct log_c *lc = log->context; | ||
443 | struct flush_entry *fe; | ||
444 | |||
445 | /* | ||
446 | * If we fail to allocate, we skip the clearing of | ||
447 | * the region. This doesn't hurt us in any way, except | ||
448 | * to cause the region to be resync'ed when the | ||
449 | * device is activated next time. | ||
450 | */ | ||
451 | fe = mempool_alloc(flush_entry_pool, GFP_ATOMIC); | ||
452 | if (!fe) { | ||
453 | DMERR("Failed to allocate memory to clear region."); | ||
454 | return; | ||
455 | } | ||
456 | |||
457 | spin_lock_irqsave(&lc->flush_lock, flags); | ||
458 | fe->type = DM_ULOG_CLEAR_REGION; | ||
459 | fe->region = region; | ||
460 | list_add(&fe->list, &lc->flush_list); | ||
461 | spin_unlock_irqrestore(&lc->flush_lock, flags); | ||
462 | |||
463 | return; | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * userspace_get_resync_work | ||
468 | * | ||
469 | * Get a region that needs recovery. It is valid to return | ||
470 | * an error for this function. | ||
471 | * | ||
472 | * Returns: 1 if region filled, 0 if no work, <0 on error | ||
473 | */ | ||
474 | static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region) | ||
475 | { | ||
476 | int r; | ||
477 | size_t rdata_size; | ||
478 | struct log_c *lc = log->context; | ||
479 | struct { | ||
480 | int64_t i; /* 64-bit for mix arch compatibility */ | ||
481 | region_t r; | ||
482 | } pkg; | ||
483 | |||
484 | if (lc->in_sync_hint >= lc->region_count) | ||
485 | return 0; | ||
486 | |||
487 | rdata_size = sizeof(pkg); | ||
488 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK, | ||
489 | NULL, 0, | ||
490 | (char *)&pkg, &rdata_size); | ||
491 | |||
492 | *region = pkg.r; | ||
493 | return (r) ? r : (int)pkg.i; | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * userspace_set_region_sync | ||
498 | * | ||
499 | * Set the sync status of a given region. This function | ||
500 | * must not fail. | ||
501 | */ | ||
502 | static void userspace_set_region_sync(struct dm_dirty_log *log, | ||
503 | region_t region, int in_sync) | ||
504 | { | ||
505 | int r; | ||
506 | struct log_c *lc = log->context; | ||
507 | struct { | ||
508 | region_t r; | ||
509 | int64_t i; | ||
510 | } pkg; | ||
511 | |||
512 | pkg.r = region; | ||
513 | pkg.i = (int64_t)in_sync; | ||
514 | |||
515 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC, | ||
516 | (char *)&pkg, sizeof(pkg), | ||
517 | NULL, NULL); | ||
518 | |||
519 | /* | ||
520 | * It would be nice to be able to report failures. | ||
521 | * However, it is easy emough to detect and resolve. | ||
522 | */ | ||
523 | return; | ||
524 | } | ||
525 | |||
526 | /* | ||
527 | * userspace_get_sync_count | ||
528 | * | ||
529 | * If there is any sort of failure when consulting the server, | ||
530 | * we assume that the sync count is zero. | ||
531 | * | ||
532 | * Returns: sync count on success, 0 on failure | ||
533 | */ | ||
534 | static region_t userspace_get_sync_count(struct dm_dirty_log *log) | ||
535 | { | ||
536 | int r; | ||
537 | size_t rdata_size; | ||
538 | uint64_t sync_count; | ||
539 | struct log_c *lc = log->context; | ||
540 | |||
541 | rdata_size = sizeof(sync_count); | ||
542 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT, | ||
543 | NULL, 0, | ||
544 | (char *)&sync_count, &rdata_size); | ||
545 | |||
546 | if (r) | ||
547 | return 0; | ||
548 | |||
549 | if (sync_count >= lc->region_count) | ||
550 | lc->in_sync_hint = lc->region_count; | ||
551 | |||
552 | return (region_t)sync_count; | ||
553 | } | ||
554 | |||
555 | /* | ||
556 | * userspace_status | ||
557 | * | ||
558 | * Returns: amount of space consumed | ||
559 | */ | ||
560 | static int userspace_status(struct dm_dirty_log *log, status_type_t status_type, | ||
561 | char *result, unsigned maxlen) | ||
562 | { | ||
563 | int r = 0; | ||
564 | size_t sz = (size_t)maxlen; | ||
565 | struct log_c *lc = log->context; | ||
566 | |||
567 | switch (status_type) { | ||
568 | case STATUSTYPE_INFO: | ||
569 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO, | ||
570 | NULL, 0, | ||
571 | result, &sz); | ||
572 | |||
573 | if (r) { | ||
574 | sz = 0; | ||
575 | DMEMIT("%s 1 COM_FAILURE", log->type->name); | ||
576 | } | ||
577 | break; | ||
578 | case STATUSTYPE_TABLE: | ||
579 | sz = 0; | ||
580 | DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1, | ||
581 | lc->uuid, lc->usr_argv_str); | ||
582 | break; | ||
583 | } | ||
584 | return (r) ? 0 : (int)sz; | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * userspace_is_remote_recovering | ||
589 | * | ||
590 | * Returns: 1 if region recovering, 0 otherwise | ||
591 | */ | ||
592 | static int userspace_is_remote_recovering(struct dm_dirty_log *log, | ||
593 | region_t region) | ||
594 | { | ||
595 | int r; | ||
596 | uint64_t region64 = region; | ||
597 | struct log_c *lc = log->context; | ||
598 | static unsigned long long limit; | ||
599 | struct { | ||
600 | int64_t is_recovering; | ||
601 | uint64_t in_sync_hint; | ||
602 | } pkg; | ||
603 | size_t rdata_size = sizeof(pkg); | ||
604 | |||
605 | /* | ||
606 | * Once the mirror has been reported to be in-sync, | ||
607 | * it will never again ask for recovery work. So, | ||
608 | * we can safely say there is not a remote machine | ||
609 | * recovering if the device is in-sync. (in_sync_hint | ||
610 | * must be reset at resume time.) | ||
611 | */ | ||
612 | if (region < lc->in_sync_hint) | ||
613 | return 0; | ||
614 | else if (jiffies < limit) | ||
615 | return 1; | ||
616 | |||
617 | limit = jiffies + (HZ / 4); | ||
618 | r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING, | ||
619 | (char *)®ion64, sizeof(region64), | ||
620 | (char *)&pkg, &rdata_size); | ||
621 | if (r) | ||
622 | return 1; | ||
623 | |||
624 | lc->in_sync_hint = pkg.in_sync_hint; | ||
625 | |||
626 | return (int)pkg.is_recovering; | ||
627 | } | ||
628 | |||
629 | static struct dm_dirty_log_type _userspace_type = { | ||
630 | .name = "userspace", | ||
631 | .module = THIS_MODULE, | ||
632 | .ctr = userspace_ctr, | ||
633 | .dtr = userspace_dtr, | ||
634 | .presuspend = userspace_presuspend, | ||
635 | .postsuspend = userspace_postsuspend, | ||
636 | .resume = userspace_resume, | ||
637 | .get_region_size = userspace_get_region_size, | ||
638 | .is_clean = userspace_is_clean, | ||
639 | .in_sync = userspace_in_sync, | ||
640 | .flush = userspace_flush, | ||
641 | .mark_region = userspace_mark_region, | ||
642 | .clear_region = userspace_clear_region, | ||
643 | .get_resync_work = userspace_get_resync_work, | ||
644 | .set_region_sync = userspace_set_region_sync, | ||
645 | .get_sync_count = userspace_get_sync_count, | ||
646 | .status = userspace_status, | ||
647 | .is_remote_recovering = userspace_is_remote_recovering, | ||
648 | }; | ||
649 | |||
650 | static int __init userspace_dirty_log_init(void) | ||
651 | { | ||
652 | int r = 0; | ||
653 | |||
654 | flush_entry_pool = mempool_create(100, flush_entry_alloc, | ||
655 | flush_entry_free, NULL); | ||
656 | |||
657 | if (!flush_entry_pool) { | ||
658 | DMWARN("Unable to create flush_entry_pool: No memory."); | ||
659 | return -ENOMEM; | ||
660 | } | ||
661 | |||
662 | r = dm_ulog_tfr_init(); | ||
663 | if (r) { | ||
664 | DMWARN("Unable to initialize userspace log communications"); | ||
665 | mempool_destroy(flush_entry_pool); | ||
666 | return r; | ||
667 | } | ||
668 | |||
669 | r = dm_dirty_log_type_register(&_userspace_type); | ||
670 | if (r) { | ||
671 | DMWARN("Couldn't register userspace dirty log type"); | ||
672 | dm_ulog_tfr_exit(); | ||
673 | mempool_destroy(flush_entry_pool); | ||
674 | return r; | ||
675 | } | ||
676 | |||
677 | DMINFO("version 1.0.0 loaded"); | ||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | static void __exit userspace_dirty_log_exit(void) | ||
682 | { | ||
683 | dm_dirty_log_type_unregister(&_userspace_type); | ||
684 | dm_ulog_tfr_exit(); | ||
685 | mempool_destroy(flush_entry_pool); | ||
686 | |||
687 | DMINFO("version 1.0.0 unloaded"); | ||
688 | return; | ||
689 | } | ||
690 | |||
691 | module_init(userspace_dirty_log_init); | ||
692 | module_exit(userspace_dirty_log_exit); | ||
693 | |||
694 | MODULE_DESCRIPTION(DM_NAME " userspace dirty log link"); | ||
695 | MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>"); | ||
696 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c new file mode 100644 index 000000000000..0ca1ee768a1f --- /dev/null +++ b/drivers/md/dm-log-userspace-transfer.c | |||
@@ -0,0 +1,276 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2009 Red Hat, Inc. | ||
3 | * | ||
4 | * This file is released under the LGPL. | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <net/sock.h> | ||
10 | #include <linux/workqueue.h> | ||
11 | #include <linux/connector.h> | ||
12 | #include <linux/device-mapper.h> | ||
13 | #include <linux/dm-log-userspace.h> | ||
14 | |||
15 | #include "dm-log-userspace-transfer.h" | ||
16 | |||
17 | static uint32_t dm_ulog_seq; | ||
18 | |||
19 | /* | ||
20 | * Netlink/Connector is an unreliable protocol. How long should | ||
21 | * we wait for a response before assuming it was lost and retrying? | ||
22 | * (If we do receive a response after this time, it will be discarded | ||
23 | * and the response to the resent request will be waited for. | ||
24 | */ | ||
25 | #define DM_ULOG_RETRY_TIMEOUT (15 * HZ) | ||
26 | |||
27 | /* | ||
28 | * Pre-allocated space for speed | ||
29 | */ | ||
30 | #define DM_ULOG_PREALLOCED_SIZE 512 | ||
31 | static struct cn_msg *prealloced_cn_msg; | ||
32 | static struct dm_ulog_request *prealloced_ulog_tfr; | ||
33 | |||
34 | static struct cb_id ulog_cn_id = { | ||
35 | .idx = CN_IDX_DM, | ||
36 | .val = CN_VAL_DM_USERSPACE_LOG | ||
37 | }; | ||
38 | |||
39 | static DEFINE_MUTEX(dm_ulog_lock); | ||
40 | |||
41 | struct receiving_pkg { | ||
42 | struct list_head list; | ||
43 | struct completion complete; | ||
44 | |||
45 | uint32_t seq; | ||
46 | |||
47 | int error; | ||
48 | size_t *data_size; | ||
49 | char *data; | ||
50 | }; | ||
51 | |||
52 | static DEFINE_SPINLOCK(receiving_list_lock); | ||
53 | static struct list_head receiving_list; | ||
54 | |||
55 | static int dm_ulog_sendto_server(struct dm_ulog_request *tfr) | ||
56 | { | ||
57 | int r; | ||
58 | struct cn_msg *msg = prealloced_cn_msg; | ||
59 | |||
60 | memset(msg, 0, sizeof(struct cn_msg)); | ||
61 | |||
62 | msg->id.idx = ulog_cn_id.idx; | ||
63 | msg->id.val = ulog_cn_id.val; | ||
64 | msg->ack = 0; | ||
65 | msg->seq = tfr->seq; | ||
66 | msg->len = sizeof(struct dm_ulog_request) + tfr->data_size; | ||
67 | |||
68 | r = cn_netlink_send(msg, 0, gfp_any()); | ||
69 | |||
70 | return r; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Parameters for this function can be either msg or tfr, but not | ||
75 | * both. This function fills in the reply for a waiting request. | ||
76 | * If just msg is given, then the reply is simply an ACK from userspace | ||
77 | * that the request was received. | ||
78 | * | ||
79 | * Returns: 0 on success, -ENOENT on failure | ||
80 | */ | ||
81 | static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr) | ||
82 | { | ||
83 | uint32_t rtn_seq = (msg) ? msg->seq : (tfr) ? tfr->seq : 0; | ||
84 | struct receiving_pkg *pkg; | ||
85 | |||
86 | /* | ||
87 | * The 'receiving_pkg' entries in this list are statically | ||
88 | * allocated on the stack in 'dm_consult_userspace'. | ||
89 | * Each process that is waiting for a reply from the user | ||
90 | * space server will have an entry in this list. | ||
91 | * | ||
92 | * We are safe to do it this way because the stack space | ||
93 | * is unique to each process, but still addressable by | ||
94 | * other processes. | ||
95 | */ | ||
96 | list_for_each_entry(pkg, &receiving_list, list) { | ||
97 | if (rtn_seq != pkg->seq) | ||
98 | continue; | ||
99 | |||
100 | if (msg) { | ||
101 | pkg->error = -msg->ack; | ||
102 | /* | ||
103 | * If we are trying again, we will need to know our | ||
104 | * storage capacity. Otherwise, along with the | ||
105 | * error code, we make explicit that we have no data. | ||
106 | */ | ||
107 | if (pkg->error != -EAGAIN) | ||
108 | *(pkg->data_size) = 0; | ||
109 | } else if (tfr->data_size > *(pkg->data_size)) { | ||
110 | DMERR("Insufficient space to receive package [%u] " | ||
111 | "(%u vs %lu)", tfr->request_type, | ||
112 | tfr->data_size, *(pkg->data_size)); | ||
113 | |||
114 | *(pkg->data_size) = 0; | ||
115 | pkg->error = -ENOSPC; | ||
116 | } else { | ||
117 | pkg->error = tfr->error; | ||
118 | memcpy(pkg->data, tfr->data, tfr->data_size); | ||
119 | *(pkg->data_size) = tfr->data_size; | ||
120 | } | ||
121 | complete(&pkg->complete); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | return -ENOENT; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * This is the connector callback that delivers data | ||
130 | * that was sent from userspace. | ||
131 | */ | ||
132 | static void cn_ulog_callback(void *data) | ||
133 | { | ||
134 | struct cn_msg *msg = (struct cn_msg *)data; | ||
135 | struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); | ||
136 | |||
137 | spin_lock(&receiving_list_lock); | ||
138 | if (msg->len == 0) | ||
139 | fill_pkg(msg, NULL); | ||
140 | else if (msg->len < sizeof(*tfr)) | ||
141 | DMERR("Incomplete message received (expected %u, got %u): [%u]", | ||
142 | (unsigned)sizeof(*tfr), msg->len, msg->seq); | ||
143 | else | ||
144 | fill_pkg(NULL, tfr); | ||
145 | spin_unlock(&receiving_list_lock); | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * dm_consult_userspace | ||
150 | * @uuid: log's uuid (must be DM_UUID_LEN in size) | ||
151 | * @request_type: found in include/linux/dm-log-userspace.h | ||
152 | * @data: data to tx to the server | ||
153 | * @data_size: size of data in bytes | ||
154 | * @rdata: place to put return data from server | ||
155 | * @rdata_size: value-result (amount of space given/amount of space used) | ||
156 | * | ||
157 | * rdata_size is undefined on failure. | ||
158 | * | ||
159 | * Memory used to communicate with userspace is zero'ed | ||
160 | * before populating to ensure that no unwanted bits leak | ||
161 | * from kernel space to user-space. All userspace log communications | ||
162 | * between kernel and user space go through this function. | ||
163 | * | ||
164 | * Returns: 0 on success, -EXXX on failure | ||
165 | **/ | ||
166 | int dm_consult_userspace(const char *uuid, int request_type, | ||
167 | char *data, size_t data_size, | ||
168 | char *rdata, size_t *rdata_size) | ||
169 | { | ||
170 | int r = 0; | ||
171 | size_t dummy = 0; | ||
172 | int overhead_size = | ||
173 | sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg); | ||
174 | struct dm_ulog_request *tfr = prealloced_ulog_tfr; | ||
175 | struct receiving_pkg pkg; | ||
176 | |||
177 | if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { | ||
178 | DMINFO("Size of tfr exceeds preallocated size"); | ||
179 | return -EINVAL; | ||
180 | } | ||
181 | |||
182 | if (!rdata_size) | ||
183 | rdata_size = &dummy; | ||
184 | resend: | ||
185 | /* | ||
186 | * We serialize the sending of requests so we can | ||
187 | * use the preallocated space. | ||
188 | */ | ||
189 | mutex_lock(&dm_ulog_lock); | ||
190 | |||
191 | memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); | ||
192 | memcpy(tfr->uuid, uuid, DM_UUID_LEN); | ||
193 | tfr->seq = dm_ulog_seq++; | ||
194 | |||
195 | /* | ||
196 | * Must be valid request type (all other bits set to | ||
197 | * zero). This reserves other bits for possible future | ||
198 | * use. | ||
199 | */ | ||
200 | tfr->request_type = request_type & DM_ULOG_REQUEST_MASK; | ||
201 | |||
202 | tfr->data_size = data_size; | ||
203 | if (data && data_size) | ||
204 | memcpy(tfr->data, data, data_size); | ||
205 | |||
206 | memset(&pkg, 0, sizeof(pkg)); | ||
207 | init_completion(&pkg.complete); | ||
208 | pkg.seq = tfr->seq; | ||
209 | pkg.data_size = rdata_size; | ||
210 | pkg.data = rdata; | ||
211 | spin_lock(&receiving_list_lock); | ||
212 | list_add(&(pkg.list), &receiving_list); | ||
213 | spin_unlock(&receiving_list_lock); | ||
214 | |||
215 | r = dm_ulog_sendto_server(tfr); | ||
216 | |||
217 | mutex_unlock(&dm_ulog_lock); | ||
218 | |||
219 | if (r) { | ||
220 | DMERR("Unable to send log request [%u] to userspace: %d", | ||
221 | request_type, r); | ||
222 | spin_lock(&receiving_list_lock); | ||
223 | list_del_init(&(pkg.list)); | ||
224 | spin_unlock(&receiving_list_lock); | ||
225 | |||
226 | goto out; | ||
227 | } | ||
228 | |||
229 | r = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT); | ||
230 | spin_lock(&receiving_list_lock); | ||
231 | list_del_init(&(pkg.list)); | ||
232 | spin_unlock(&receiving_list_lock); | ||
233 | if (!r) { | ||
234 | DMWARN("[%s] Request timed out: [%u/%u] - retrying", | ||
235 | (strlen(uuid) > 8) ? | ||
236 | (uuid + (strlen(uuid) - 8)) : (uuid), | ||
237 | request_type, pkg.seq); | ||
238 | goto resend; | ||
239 | } | ||
240 | |||
241 | r = pkg.error; | ||
242 | if (r == -EAGAIN) | ||
243 | goto resend; | ||
244 | |||
245 | out: | ||
246 | return r; | ||
247 | } | ||
248 | |||
249 | int dm_ulog_tfr_init(void) | ||
250 | { | ||
251 | int r; | ||
252 | void *prealloced; | ||
253 | |||
254 | INIT_LIST_HEAD(&receiving_list); | ||
255 | |||
256 | prealloced = kmalloc(DM_ULOG_PREALLOCED_SIZE, GFP_KERNEL); | ||
257 | if (!prealloced) | ||
258 | return -ENOMEM; | ||
259 | |||
260 | prealloced_cn_msg = prealloced; | ||
261 | prealloced_ulog_tfr = prealloced + sizeof(struct cn_msg); | ||
262 | |||
263 | r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback); | ||
264 | if (r) { | ||
265 | cn_del_callback(&ulog_cn_id); | ||
266 | return r; | ||
267 | } | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | void dm_ulog_tfr_exit(void) | ||
273 | { | ||
274 | cn_del_callback(&ulog_cn_id); | ||
275 | kfree(prealloced_cn_msg); | ||
276 | } | ||
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h new file mode 100644 index 000000000000..c26d8e4e2710 --- /dev/null +++ b/drivers/md/dm-log-userspace-transfer.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2009 Red Hat, Inc. | ||
3 | * | ||
4 | * This file is released under the LGPL. | ||
5 | */ | ||
6 | |||
7 | #ifndef __DM_LOG_USERSPACE_TRANSFER_H__ | ||
8 | #define __DM_LOG_USERSPACE_TRANSFER_H__ | ||
9 | |||
10 | #define DM_MSG_PREFIX "dm-log-userspace" | ||
11 | |||
12 | int dm_ulog_tfr_init(void); | ||
13 | void dm_ulog_tfr_exit(void); | ||
14 | int dm_consult_userspace(const char *uuid, int request_type, | ||
15 | char *data, size_t data_size, | ||
16 | char *rdata, size_t *rdata_size); | ||
17 | |||
18 | #endif /* __DM_LOG_USERSPACE_TRANSFER_H__ */ | ||
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 6fa8ccf91c70..9443896ede07 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -412,11 +412,12 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, | |||
412 | /* | 412 | /* |
413 | * Buffer holds both header and bitset. | 413 | * Buffer holds both header and bitset. |
414 | */ | 414 | */ |
415 | buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + | 415 | buf_size = |
416 | bitset_size, | 416 | dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size, |
417 | ti->limits.logical_block_size); | 417 | bdev_logical_block_size(lc->header_location. |
418 | bdev)); | ||
418 | 419 | ||
419 | if (buf_size > dev->bdev->bd_inode->i_size) { | 420 | if (buf_size > i_size_read(dev->bdev->bd_inode)) { |
420 | DMWARN("log device %s too small: need %llu bytes", | 421 | DMWARN("log device %s too small: need %llu bytes", |
421 | dev->name, (unsigned long long)buf_size); | 422 | dev->name, (unsigned long long)buf_size); |
422 | kfree(lc); | 423 | kfree(lc); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 6a386ab4f7eb..c70604a20897 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/device-mapper.h> | 8 | #include <linux/device-mapper.h> |
9 | 9 | ||
10 | #include "dm-path-selector.h" | 10 | #include "dm-path-selector.h" |
11 | #include "dm-bio-record.h" | ||
12 | #include "dm-uevent.h" | 11 | #include "dm-uevent.h" |
13 | 12 | ||
14 | #include <linux/ctype.h> | 13 | #include <linux/ctype.h> |
@@ -35,6 +34,7 @@ struct pgpath { | |||
35 | 34 | ||
36 | struct dm_path path; | 35 | struct dm_path path; |
37 | struct work_struct deactivate_path; | 36 | struct work_struct deactivate_path; |
37 | struct work_struct activate_path; | ||
38 | }; | 38 | }; |
39 | 39 | ||
40 | #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) | 40 | #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) |
@@ -64,8 +64,6 @@ struct multipath { | |||
64 | spinlock_t lock; | 64 | spinlock_t lock; |
65 | 65 | ||
66 | const char *hw_handler_name; | 66 | const char *hw_handler_name; |
67 | struct work_struct activate_path; | ||
68 | struct pgpath *pgpath_to_activate; | ||
69 | unsigned nr_priority_groups; | 67 | unsigned nr_priority_groups; |
70 | struct list_head priority_groups; | 68 | struct list_head priority_groups; |
71 | unsigned pg_init_required; /* pg_init needs calling? */ | 69 | unsigned pg_init_required; /* pg_init needs calling? */ |
@@ -84,7 +82,7 @@ struct multipath { | |||
84 | unsigned pg_init_count; /* Number of times pg_init called */ | 82 | unsigned pg_init_count; /* Number of times pg_init called */ |
85 | 83 | ||
86 | struct work_struct process_queued_ios; | 84 | struct work_struct process_queued_ios; |
87 | struct bio_list queued_ios; | 85 | struct list_head queued_ios; |
88 | unsigned queue_size; | 86 | unsigned queue_size; |
89 | 87 | ||
90 | struct work_struct trigger_event; | 88 | struct work_struct trigger_event; |
@@ -101,7 +99,7 @@ struct multipath { | |||
101 | */ | 99 | */ |
102 | struct dm_mpath_io { | 100 | struct dm_mpath_io { |
103 | struct pgpath *pgpath; | 101 | struct pgpath *pgpath; |
104 | struct dm_bio_details details; | 102 | size_t nr_bytes; |
105 | }; | 103 | }; |
106 | 104 | ||
107 | typedef int (*action_fn) (struct pgpath *pgpath); | 105 | typedef int (*action_fn) (struct pgpath *pgpath); |
@@ -128,6 +126,7 @@ static struct pgpath *alloc_pgpath(void) | |||
128 | if (pgpath) { | 126 | if (pgpath) { |
129 | pgpath->is_active = 1; | 127 | pgpath->is_active = 1; |
130 | INIT_WORK(&pgpath->deactivate_path, deactivate_path); | 128 | INIT_WORK(&pgpath->deactivate_path, deactivate_path); |
129 | INIT_WORK(&pgpath->activate_path, activate_path); | ||
131 | } | 130 | } |
132 | 131 | ||
133 | return pgpath; | 132 | return pgpath; |
@@ -160,7 +159,6 @@ static struct priority_group *alloc_priority_group(void) | |||
160 | 159 | ||
161 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | 160 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) |
162 | { | 161 | { |
163 | unsigned long flags; | ||
164 | struct pgpath *pgpath, *tmp; | 162 | struct pgpath *pgpath, *tmp; |
165 | struct multipath *m = ti->private; | 163 | struct multipath *m = ti->private; |
166 | 164 | ||
@@ -169,10 +167,6 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | |||
169 | if (m->hw_handler_name) | 167 | if (m->hw_handler_name) |
170 | scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); | 168 | scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); |
171 | dm_put_device(ti, pgpath->path.dev); | 169 | dm_put_device(ti, pgpath->path.dev); |
172 | spin_lock_irqsave(&m->lock, flags); | ||
173 | if (m->pgpath_to_activate == pgpath) | ||
174 | m->pgpath_to_activate = NULL; | ||
175 | spin_unlock_irqrestore(&m->lock, flags); | ||
176 | free_pgpath(pgpath); | 170 | free_pgpath(pgpath); |
177 | } | 171 | } |
178 | } | 172 | } |
@@ -198,11 +192,11 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
198 | m = kzalloc(sizeof(*m), GFP_KERNEL); | 192 | m = kzalloc(sizeof(*m), GFP_KERNEL); |
199 | if (m) { | 193 | if (m) { |
200 | INIT_LIST_HEAD(&m->priority_groups); | 194 | INIT_LIST_HEAD(&m->priority_groups); |
195 | INIT_LIST_HEAD(&m->queued_ios); | ||
201 | spin_lock_init(&m->lock); | 196 | spin_lock_init(&m->lock); |
202 | m->queue_io = 1; | 197 | m->queue_io = 1; |
203 | INIT_WORK(&m->process_queued_ios, process_queued_ios); | 198 | INIT_WORK(&m->process_queued_ios, process_queued_ios); |
204 | INIT_WORK(&m->trigger_event, trigger_event); | 199 | INIT_WORK(&m->trigger_event, trigger_event); |
205 | INIT_WORK(&m->activate_path, activate_path); | ||
206 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); | 200 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); |
207 | if (!m->mpio_pool) { | 201 | if (!m->mpio_pool) { |
208 | kfree(m); | 202 | kfree(m); |
@@ -250,11 +244,12 @@ static void __switch_pg(struct multipath *m, struct pgpath *pgpath) | |||
250 | m->pg_init_count = 0; | 244 | m->pg_init_count = 0; |
251 | } | 245 | } |
252 | 246 | ||
253 | static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg) | 247 | static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg, |
248 | size_t nr_bytes) | ||
254 | { | 249 | { |
255 | struct dm_path *path; | 250 | struct dm_path *path; |
256 | 251 | ||
257 | path = pg->ps.type->select_path(&pg->ps, &m->repeat_count); | 252 | path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes); |
258 | if (!path) | 253 | if (!path) |
259 | return -ENXIO; | 254 | return -ENXIO; |
260 | 255 | ||
@@ -266,7 +261,7 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg) | |||
266 | return 0; | 261 | return 0; |
267 | } | 262 | } |
268 | 263 | ||
269 | static void __choose_pgpath(struct multipath *m) | 264 | static void __choose_pgpath(struct multipath *m, size_t nr_bytes) |
270 | { | 265 | { |
271 | struct priority_group *pg; | 266 | struct priority_group *pg; |
272 | unsigned bypassed = 1; | 267 | unsigned bypassed = 1; |
@@ -278,12 +273,12 @@ static void __choose_pgpath(struct multipath *m) | |||
278 | if (m->next_pg) { | 273 | if (m->next_pg) { |
279 | pg = m->next_pg; | 274 | pg = m->next_pg; |
280 | m->next_pg = NULL; | 275 | m->next_pg = NULL; |
281 | if (!__choose_path_in_pg(m, pg)) | 276 | if (!__choose_path_in_pg(m, pg, nr_bytes)) |
282 | return; | 277 | return; |
283 | } | 278 | } |
284 | 279 | ||
285 | /* Don't change PG until it has no remaining paths */ | 280 | /* Don't change PG until it has no remaining paths */ |
286 | if (m->current_pg && !__choose_path_in_pg(m, m->current_pg)) | 281 | if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes)) |
287 | return; | 282 | return; |
288 | 283 | ||
289 | /* | 284 | /* |
@@ -295,7 +290,7 @@ static void __choose_pgpath(struct multipath *m) | |||
295 | list_for_each_entry(pg, &m->priority_groups, list) { | 290 | list_for_each_entry(pg, &m->priority_groups, list) { |
296 | if (pg->bypassed == bypassed) | 291 | if (pg->bypassed == bypassed) |
297 | continue; | 292 | continue; |
298 | if (!__choose_path_in_pg(m, pg)) | 293 | if (!__choose_path_in_pg(m, pg, nr_bytes)) |
299 | return; | 294 | return; |
300 | } | 295 | } |
301 | } while (bypassed--); | 296 | } while (bypassed--); |
@@ -322,19 +317,21 @@ static int __must_push_back(struct multipath *m) | |||
322 | dm_noflush_suspending(m->ti)); | 317 | dm_noflush_suspending(m->ti)); |
323 | } | 318 | } |
324 | 319 | ||
325 | static int map_io(struct multipath *m, struct bio *bio, | 320 | static int map_io(struct multipath *m, struct request *clone, |
326 | struct dm_mpath_io *mpio, unsigned was_queued) | 321 | struct dm_mpath_io *mpio, unsigned was_queued) |
327 | { | 322 | { |
328 | int r = DM_MAPIO_REMAPPED; | 323 | int r = DM_MAPIO_REMAPPED; |
324 | size_t nr_bytes = blk_rq_bytes(clone); | ||
329 | unsigned long flags; | 325 | unsigned long flags; |
330 | struct pgpath *pgpath; | 326 | struct pgpath *pgpath; |
327 | struct block_device *bdev; | ||
331 | 328 | ||
332 | spin_lock_irqsave(&m->lock, flags); | 329 | spin_lock_irqsave(&m->lock, flags); |
333 | 330 | ||
334 | /* Do we need to select a new pgpath? */ | 331 | /* Do we need to select a new pgpath? */ |
335 | if (!m->current_pgpath || | 332 | if (!m->current_pgpath || |
336 | (!m->queue_io && (m->repeat_count && --m->repeat_count == 0))) | 333 | (!m->queue_io && (m->repeat_count && --m->repeat_count == 0))) |
337 | __choose_pgpath(m); | 334 | __choose_pgpath(m, nr_bytes); |
338 | 335 | ||
339 | pgpath = m->current_pgpath; | 336 | pgpath = m->current_pgpath; |
340 | 337 | ||
@@ -344,21 +341,28 @@ static int map_io(struct multipath *m, struct bio *bio, | |||
344 | if ((pgpath && m->queue_io) || | 341 | if ((pgpath && m->queue_io) || |
345 | (!pgpath && m->queue_if_no_path)) { | 342 | (!pgpath && m->queue_if_no_path)) { |
346 | /* Queue for the daemon to resubmit */ | 343 | /* Queue for the daemon to resubmit */ |
347 | bio_list_add(&m->queued_ios, bio); | 344 | list_add_tail(&clone->queuelist, &m->queued_ios); |
348 | m->queue_size++; | 345 | m->queue_size++; |
349 | if ((m->pg_init_required && !m->pg_init_in_progress) || | 346 | if ((m->pg_init_required && !m->pg_init_in_progress) || |
350 | !m->queue_io) | 347 | !m->queue_io) |
351 | queue_work(kmultipathd, &m->process_queued_ios); | 348 | queue_work(kmultipathd, &m->process_queued_ios); |
352 | pgpath = NULL; | 349 | pgpath = NULL; |
353 | r = DM_MAPIO_SUBMITTED; | 350 | r = DM_MAPIO_SUBMITTED; |
354 | } else if (pgpath) | 351 | } else if (pgpath) { |
355 | bio->bi_bdev = pgpath->path.dev->bdev; | 352 | bdev = pgpath->path.dev->bdev; |
356 | else if (__must_push_back(m)) | 353 | clone->q = bdev_get_queue(bdev); |
354 | clone->rq_disk = bdev->bd_disk; | ||
355 | } else if (__must_push_back(m)) | ||
357 | r = DM_MAPIO_REQUEUE; | 356 | r = DM_MAPIO_REQUEUE; |
358 | else | 357 | else |
359 | r = -EIO; /* Failed */ | 358 | r = -EIO; /* Failed */ |
360 | 359 | ||
361 | mpio->pgpath = pgpath; | 360 | mpio->pgpath = pgpath; |
361 | mpio->nr_bytes = nr_bytes; | ||
362 | |||
363 | if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io) | ||
364 | pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path, | ||
365 | nr_bytes); | ||
362 | 366 | ||
363 | spin_unlock_irqrestore(&m->lock, flags); | 367 | spin_unlock_irqrestore(&m->lock, flags); |
364 | 368 | ||
@@ -396,30 +400,31 @@ static void dispatch_queued_ios(struct multipath *m) | |||
396 | { | 400 | { |
397 | int r; | 401 | int r; |
398 | unsigned long flags; | 402 | unsigned long flags; |
399 | struct bio *bio = NULL, *next; | ||
400 | struct dm_mpath_io *mpio; | 403 | struct dm_mpath_io *mpio; |
401 | union map_info *info; | 404 | union map_info *info; |
405 | struct request *clone, *n; | ||
406 | LIST_HEAD(cl); | ||
402 | 407 | ||
403 | spin_lock_irqsave(&m->lock, flags); | 408 | spin_lock_irqsave(&m->lock, flags); |
404 | bio = bio_list_get(&m->queued_ios); | 409 | list_splice_init(&m->queued_ios, &cl); |
405 | spin_unlock_irqrestore(&m->lock, flags); | 410 | spin_unlock_irqrestore(&m->lock, flags); |
406 | 411 | ||
407 | while (bio) { | 412 | list_for_each_entry_safe(clone, n, &cl, queuelist) { |
408 | next = bio->bi_next; | 413 | list_del_init(&clone->queuelist); |
409 | bio->bi_next = NULL; | ||
410 | 414 | ||
411 | info = dm_get_mapinfo(bio); | 415 | info = dm_get_rq_mapinfo(clone); |
412 | mpio = info->ptr; | 416 | mpio = info->ptr; |
413 | 417 | ||
414 | r = map_io(m, bio, mpio, 1); | 418 | r = map_io(m, clone, mpio, 1); |
415 | if (r < 0) | 419 | if (r < 0) { |
416 | bio_endio(bio, r); | 420 | mempool_free(mpio, m->mpio_pool); |
417 | else if (r == DM_MAPIO_REMAPPED) | 421 | dm_kill_unmapped_request(clone, r); |
418 | generic_make_request(bio); | 422 | } else if (r == DM_MAPIO_REMAPPED) |
419 | else if (r == DM_MAPIO_REQUEUE) | 423 | dm_dispatch_request(clone); |
420 | bio_endio(bio, -EIO); | 424 | else if (r == DM_MAPIO_REQUEUE) { |
421 | 425 | mempool_free(mpio, m->mpio_pool); | |
422 | bio = next; | 426 | dm_requeue_unmapped_request(clone); |
427 | } | ||
423 | } | 428 | } |
424 | } | 429 | } |
425 | 430 | ||
@@ -427,8 +432,8 @@ static void process_queued_ios(struct work_struct *work) | |||
427 | { | 432 | { |
428 | struct multipath *m = | 433 | struct multipath *m = |
429 | container_of(work, struct multipath, process_queued_ios); | 434 | container_of(work, struct multipath, process_queued_ios); |
430 | struct pgpath *pgpath = NULL; | 435 | struct pgpath *pgpath = NULL, *tmp; |
431 | unsigned init_required = 0, must_queue = 1; | 436 | unsigned must_queue = 1; |
432 | unsigned long flags; | 437 | unsigned long flags; |
433 | 438 | ||
434 | spin_lock_irqsave(&m->lock, flags); | 439 | spin_lock_irqsave(&m->lock, flags); |
@@ -437,7 +442,7 @@ static void process_queued_ios(struct work_struct *work) | |||
437 | goto out; | 442 | goto out; |
438 | 443 | ||
439 | if (!m->current_pgpath) | 444 | if (!m->current_pgpath) |
440 | __choose_pgpath(m); | 445 | __choose_pgpath(m, 0); |
441 | 446 | ||
442 | pgpath = m->current_pgpath; | 447 | pgpath = m->current_pgpath; |
443 | 448 | ||
@@ -446,19 +451,15 @@ static void process_queued_ios(struct work_struct *work) | |||
446 | must_queue = 0; | 451 | must_queue = 0; |
447 | 452 | ||
448 | if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { | 453 | if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { |
449 | m->pgpath_to_activate = pgpath; | ||
450 | m->pg_init_count++; | 454 | m->pg_init_count++; |
451 | m->pg_init_required = 0; | 455 | m->pg_init_required = 0; |
452 | m->pg_init_in_progress = 1; | 456 | list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) { |
453 | init_required = 1; | 457 | if (queue_work(kmpath_handlerd, &tmp->activate_path)) |
458 | m->pg_init_in_progress++; | ||
459 | } | ||
454 | } | 460 | } |
455 | |||
456 | out: | 461 | out: |
457 | spin_unlock_irqrestore(&m->lock, flags); | 462 | spin_unlock_irqrestore(&m->lock, flags); |
458 | |||
459 | if (init_required) | ||
460 | queue_work(kmpath_handlerd, &m->activate_path); | ||
461 | |||
462 | if (!must_queue) | 463 | if (!must_queue) |
463 | dispatch_queued_ios(m); | 464 | dispatch_queued_ios(m); |
464 | } | 465 | } |
@@ -553,6 +554,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg, | |||
553 | return -EINVAL; | 554 | return -EINVAL; |
554 | } | 555 | } |
555 | 556 | ||
557 | if (ps_argc > as->argc) { | ||
558 | dm_put_path_selector(pst); | ||
559 | ti->error = "not enough arguments for path selector"; | ||
560 | return -EINVAL; | ||
561 | } | ||
562 | |||
556 | r = pst->create(&pg->ps, ps_argc, as->argv); | 563 | r = pst->create(&pg->ps, ps_argc, as->argv); |
557 | if (r) { | 564 | if (r) { |
558 | dm_put_path_selector(pst); | 565 | dm_put_path_selector(pst); |
@@ -591,9 +598,20 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, | |||
591 | } | 598 | } |
592 | 599 | ||
593 | if (m->hw_handler_name) { | 600 | if (m->hw_handler_name) { |
594 | r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev), | 601 | struct request_queue *q = bdev_get_queue(p->path.dev->bdev); |
595 | m->hw_handler_name); | 602 | |
603 | r = scsi_dh_attach(q, m->hw_handler_name); | ||
604 | if (r == -EBUSY) { | ||
605 | /* | ||
606 | * Already attached to different hw_handler, | ||
607 | * try to reattach with correct one. | ||
608 | */ | ||
609 | scsi_dh_detach(q); | ||
610 | r = scsi_dh_attach(q, m->hw_handler_name); | ||
611 | } | ||
612 | |||
596 | if (r < 0) { | 613 | if (r < 0) { |
614 | ti->error = "error attaching hardware handler"; | ||
597 | dm_put_device(ti, p->path.dev); | 615 | dm_put_device(ti, p->path.dev); |
598 | goto bad; | 616 | goto bad; |
599 | } | 617 | } |
@@ -699,6 +717,11 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m) | |||
699 | if (!hw_argc) | 717 | if (!hw_argc) |
700 | return 0; | 718 | return 0; |
701 | 719 | ||
720 | if (hw_argc > as->argc) { | ||
721 | ti->error = "not enough arguments for hardware handler"; | ||
722 | return -EINVAL; | ||
723 | } | ||
724 | |||
702 | m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); | 725 | m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); |
703 | request_module("scsi_dh_%s", m->hw_handler_name); | 726 | request_module("scsi_dh_%s", m->hw_handler_name); |
704 | if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { | 727 | if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { |
@@ -823,6 +846,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
823 | goto bad; | 846 | goto bad; |
824 | } | 847 | } |
825 | 848 | ||
849 | ti->num_flush_requests = 1; | ||
850 | |||
826 | return 0; | 851 | return 0; |
827 | 852 | ||
828 | bad: | 853 | bad: |
@@ -836,25 +861,29 @@ static void multipath_dtr(struct dm_target *ti) | |||
836 | 861 | ||
837 | flush_workqueue(kmpath_handlerd); | 862 | flush_workqueue(kmpath_handlerd); |
838 | flush_workqueue(kmultipathd); | 863 | flush_workqueue(kmultipathd); |
864 | flush_scheduled_work(); | ||
839 | free_multipath(m); | 865 | free_multipath(m); |
840 | } | 866 | } |
841 | 867 | ||
842 | /* | 868 | /* |
843 | * Map bios, recording original fields for later in case we have to resubmit | 869 | * Map cloned requests |
844 | */ | 870 | */ |
845 | static int multipath_map(struct dm_target *ti, struct bio *bio, | 871 | static int multipath_map(struct dm_target *ti, struct request *clone, |
846 | union map_info *map_context) | 872 | union map_info *map_context) |
847 | { | 873 | { |
848 | int r; | 874 | int r; |
849 | struct dm_mpath_io *mpio; | 875 | struct dm_mpath_io *mpio; |
850 | struct multipath *m = (struct multipath *) ti->private; | 876 | struct multipath *m = (struct multipath *) ti->private; |
851 | 877 | ||
852 | mpio = mempool_alloc(m->mpio_pool, GFP_NOIO); | 878 | mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC); |
853 | dm_bio_record(&mpio->details, bio); | 879 | if (!mpio) |
880 | /* ENOMEM, requeue */ | ||
881 | return DM_MAPIO_REQUEUE; | ||
882 | memset(mpio, 0, sizeof(*mpio)); | ||
854 | 883 | ||
855 | map_context->ptr = mpio; | 884 | map_context->ptr = mpio; |
856 | bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); | 885 | clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; |
857 | r = map_io(m, bio, mpio, 0); | 886 | r = map_io(m, clone, mpio, 0); |
858 | if (r < 0 || r == DM_MAPIO_REQUEUE) | 887 | if (r < 0 || r == DM_MAPIO_REQUEUE) |
859 | mempool_free(mpio, m->mpio_pool); | 888 | mempool_free(mpio, m->mpio_pool); |
860 | 889 | ||
@@ -924,9 +953,13 @@ static int reinstate_path(struct pgpath *pgpath) | |||
924 | 953 | ||
925 | pgpath->is_active = 1; | 954 | pgpath->is_active = 1; |
926 | 955 | ||
927 | m->current_pgpath = NULL; | 956 | if (!m->nr_valid_paths++ && m->queue_size) { |
928 | if (!m->nr_valid_paths++ && m->queue_size) | 957 | m->current_pgpath = NULL; |
929 | queue_work(kmultipathd, &m->process_queued_ios); | 958 | queue_work(kmultipathd, &m->process_queued_ios); |
959 | } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { | ||
960 | if (queue_work(kmpath_handlerd, &pgpath->activate_path)) | ||
961 | m->pg_init_in_progress++; | ||
962 | } | ||
930 | 963 | ||
931 | dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, | 964 | dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, |
932 | pgpath->path.dev->name, m->nr_valid_paths); | 965 | pgpath->path.dev->name, m->nr_valid_paths); |
@@ -1102,87 +1135,70 @@ static void pg_init_done(struct dm_path *path, int errors) | |||
1102 | 1135 | ||
1103 | spin_lock_irqsave(&m->lock, flags); | 1136 | spin_lock_irqsave(&m->lock, flags); |
1104 | if (errors) { | 1137 | if (errors) { |
1105 | DMERR("Could not failover device. Error %d.", errors); | 1138 | if (pgpath == m->current_pgpath) { |
1106 | m->current_pgpath = NULL; | 1139 | DMERR("Could not failover device. Error %d.", errors); |
1107 | m->current_pg = NULL; | 1140 | m->current_pgpath = NULL; |
1141 | m->current_pg = NULL; | ||
1142 | } | ||
1108 | } else if (!m->pg_init_required) { | 1143 | } else if (!m->pg_init_required) { |
1109 | m->queue_io = 0; | 1144 | m->queue_io = 0; |
1110 | pg->bypassed = 0; | 1145 | pg->bypassed = 0; |
1111 | } | 1146 | } |
1112 | 1147 | ||
1113 | m->pg_init_in_progress = 0; | 1148 | m->pg_init_in_progress--; |
1114 | queue_work(kmultipathd, &m->process_queued_ios); | 1149 | if (!m->pg_init_in_progress) |
1150 | queue_work(kmultipathd, &m->process_queued_ios); | ||
1115 | spin_unlock_irqrestore(&m->lock, flags); | 1151 | spin_unlock_irqrestore(&m->lock, flags); |
1116 | } | 1152 | } |
1117 | 1153 | ||
1118 | static void activate_path(struct work_struct *work) | 1154 | static void activate_path(struct work_struct *work) |
1119 | { | 1155 | { |
1120 | int ret; | 1156 | int ret; |
1121 | struct multipath *m = | 1157 | struct pgpath *pgpath = |
1122 | container_of(work, struct multipath, activate_path); | 1158 | container_of(work, struct pgpath, activate_path); |
1123 | struct dm_path *path; | ||
1124 | unsigned long flags; | ||
1125 | 1159 | ||
1126 | spin_lock_irqsave(&m->lock, flags); | 1160 | ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev)); |
1127 | path = &m->pgpath_to_activate->path; | 1161 | pg_init_done(&pgpath->path, ret); |
1128 | m->pgpath_to_activate = NULL; | ||
1129 | spin_unlock_irqrestore(&m->lock, flags); | ||
1130 | if (!path) | ||
1131 | return; | ||
1132 | ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); | ||
1133 | pg_init_done(path, ret); | ||
1134 | } | 1162 | } |
1135 | 1163 | ||
1136 | /* | 1164 | /* |
1137 | * end_io handling | 1165 | * end_io handling |
1138 | */ | 1166 | */ |
1139 | static int do_end_io(struct multipath *m, struct bio *bio, | 1167 | static int do_end_io(struct multipath *m, struct request *clone, |
1140 | int error, struct dm_mpath_io *mpio) | 1168 | int error, struct dm_mpath_io *mpio) |
1141 | { | 1169 | { |
1170 | /* | ||
1171 | * We don't queue any clone request inside the multipath target | ||
1172 | * during end I/O handling, since those clone requests don't have | ||
1173 | * bio clones. If we queue them inside the multipath target, | ||
1174 | * we need to make bio clones, that requires memory allocation. | ||
1175 | * (See drivers/md/dm.c:end_clone_bio() about why the clone requests | ||
1176 | * don't have bio clones.) | ||
1177 | * Instead of queueing the clone request here, we queue the original | ||
1178 | * request into dm core, which will remake a clone request and | ||
1179 | * clone bios for it and resubmit it later. | ||
1180 | */ | ||
1181 | int r = DM_ENDIO_REQUEUE; | ||
1142 | unsigned long flags; | 1182 | unsigned long flags; |
1143 | 1183 | ||
1144 | if (!error) | 1184 | if (!error && !clone->errors) |
1145 | return 0; /* I/O complete */ | 1185 | return 0; /* I/O complete */ |
1146 | 1186 | ||
1147 | if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) | ||
1148 | return error; | ||
1149 | |||
1150 | if (error == -EOPNOTSUPP) | 1187 | if (error == -EOPNOTSUPP) |
1151 | return error; | 1188 | return error; |
1152 | 1189 | ||
1153 | spin_lock_irqsave(&m->lock, flags); | ||
1154 | if (!m->nr_valid_paths) { | ||
1155 | if (__must_push_back(m)) { | ||
1156 | spin_unlock_irqrestore(&m->lock, flags); | ||
1157 | return DM_ENDIO_REQUEUE; | ||
1158 | } else if (!m->queue_if_no_path) { | ||
1159 | spin_unlock_irqrestore(&m->lock, flags); | ||
1160 | return -EIO; | ||
1161 | } else { | ||
1162 | spin_unlock_irqrestore(&m->lock, flags); | ||
1163 | goto requeue; | ||
1164 | } | ||
1165 | } | ||
1166 | spin_unlock_irqrestore(&m->lock, flags); | ||
1167 | |||
1168 | if (mpio->pgpath) | 1190 | if (mpio->pgpath) |
1169 | fail_path(mpio->pgpath); | 1191 | fail_path(mpio->pgpath); |
1170 | 1192 | ||
1171 | requeue: | ||
1172 | dm_bio_restore(&mpio->details, bio); | ||
1173 | |||
1174 | /* queue for the daemon to resubmit or fail */ | ||
1175 | spin_lock_irqsave(&m->lock, flags); | 1193 | spin_lock_irqsave(&m->lock, flags); |
1176 | bio_list_add(&m->queued_ios, bio); | 1194 | if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m)) |
1177 | m->queue_size++; | 1195 | r = -EIO; |
1178 | if (!m->queue_io) | ||
1179 | queue_work(kmultipathd, &m->process_queued_ios); | ||
1180 | spin_unlock_irqrestore(&m->lock, flags); | 1196 | spin_unlock_irqrestore(&m->lock, flags); |
1181 | 1197 | ||
1182 | return DM_ENDIO_INCOMPLETE; /* io not complete */ | 1198 | return r; |
1183 | } | 1199 | } |
1184 | 1200 | ||
1185 | static int multipath_end_io(struct dm_target *ti, struct bio *bio, | 1201 | static int multipath_end_io(struct dm_target *ti, struct request *clone, |
1186 | int error, union map_info *map_context) | 1202 | int error, union map_info *map_context) |
1187 | { | 1203 | { |
1188 | struct multipath *m = ti->private; | 1204 | struct multipath *m = ti->private; |
@@ -1191,14 +1207,13 @@ static int multipath_end_io(struct dm_target *ti, struct bio *bio, | |||
1191 | struct path_selector *ps; | 1207 | struct path_selector *ps; |
1192 | int r; | 1208 | int r; |
1193 | 1209 | ||
1194 | r = do_end_io(m, bio, error, mpio); | 1210 | r = do_end_io(m, clone, error, mpio); |
1195 | if (pgpath) { | 1211 | if (pgpath) { |
1196 | ps = &pgpath->pg->ps; | 1212 | ps = &pgpath->pg->ps; |
1197 | if (ps->type->end_io) | 1213 | if (ps->type->end_io) |
1198 | ps->type->end_io(ps, &pgpath->path); | 1214 | ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); |
1199 | } | 1215 | } |
1200 | if (r != DM_ENDIO_INCOMPLETE) | 1216 | mempool_free(mpio, m->mpio_pool); |
1201 | mempool_free(mpio, m->mpio_pool); | ||
1202 | 1217 | ||
1203 | return r; | 1218 | return r; |
1204 | } | 1219 | } |
@@ -1411,7 +1426,7 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, | |||
1411 | spin_lock_irqsave(&m->lock, flags); | 1426 | spin_lock_irqsave(&m->lock, flags); |
1412 | 1427 | ||
1413 | if (!m->current_pgpath) | 1428 | if (!m->current_pgpath) |
1414 | __choose_pgpath(m); | 1429 | __choose_pgpath(m, 0); |
1415 | 1430 | ||
1416 | if (m->current_pgpath) { | 1431 | if (m->current_pgpath) { |
1417 | bdev = m->current_pgpath->path.dev->bdev; | 1432 | bdev = m->current_pgpath->path.dev->bdev; |
@@ -1428,22 +1443,113 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, | |||
1428 | return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); | 1443 | return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); |
1429 | } | 1444 | } |
1430 | 1445 | ||
1446 | static int multipath_iterate_devices(struct dm_target *ti, | ||
1447 | iterate_devices_callout_fn fn, void *data) | ||
1448 | { | ||
1449 | struct multipath *m = ti->private; | ||
1450 | struct priority_group *pg; | ||
1451 | struct pgpath *p; | ||
1452 | int ret = 0; | ||
1453 | |||
1454 | list_for_each_entry(pg, &m->priority_groups, list) { | ||
1455 | list_for_each_entry(p, &pg->pgpaths, list) { | ||
1456 | ret = fn(ti, p->path.dev, ti->begin, data); | ||
1457 | if (ret) | ||
1458 | goto out; | ||
1459 | } | ||
1460 | } | ||
1461 | |||
1462 | out: | ||
1463 | return ret; | ||
1464 | } | ||
1465 | |||
1466 | static int __pgpath_busy(struct pgpath *pgpath) | ||
1467 | { | ||
1468 | struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); | ||
1469 | |||
1470 | return dm_underlying_device_busy(q); | ||
1471 | } | ||
1472 | |||
1473 | /* | ||
1474 | * We return "busy", only when we can map I/Os but underlying devices | ||
1475 | * are busy (so even if we map I/Os now, the I/Os will wait on | ||
1476 | * the underlying queue). | ||
1477 | * In other words, if we want to kill I/Os or queue them inside us | ||
1478 | * due to map unavailability, we don't return "busy". Otherwise, | ||
1479 | * dm core won't give us the I/Os and we can't do what we want. | ||
1480 | */ | ||
1481 | static int multipath_busy(struct dm_target *ti) | ||
1482 | { | ||
1483 | int busy = 0, has_active = 0; | ||
1484 | struct multipath *m = ti->private; | ||
1485 | struct priority_group *pg; | ||
1486 | struct pgpath *pgpath; | ||
1487 | unsigned long flags; | ||
1488 | |||
1489 | spin_lock_irqsave(&m->lock, flags); | ||
1490 | |||
1491 | /* Guess which priority_group will be used at next mapping time */ | ||
1492 | if (unlikely(!m->current_pgpath && m->next_pg)) | ||
1493 | pg = m->next_pg; | ||
1494 | else if (likely(m->current_pg)) | ||
1495 | pg = m->current_pg; | ||
1496 | else | ||
1497 | /* | ||
1498 | * We don't know which pg will be used at next mapping time. | ||
1499 | * We don't call __choose_pgpath() here to avoid to trigger | ||
1500 | * pg_init just by busy checking. | ||
1501 | * So we don't know whether underlying devices we will be using | ||
1502 | * at next mapping time are busy or not. Just try mapping. | ||
1503 | */ | ||
1504 | goto out; | ||
1505 | |||
1506 | /* | ||
1507 | * If there is one non-busy active path at least, the path selector | ||
1508 | * will be able to select it. So we consider such a pg as not busy. | ||
1509 | */ | ||
1510 | busy = 1; | ||
1511 | list_for_each_entry(pgpath, &pg->pgpaths, list) | ||
1512 | if (pgpath->is_active) { | ||
1513 | has_active = 1; | ||
1514 | |||
1515 | if (!__pgpath_busy(pgpath)) { | ||
1516 | busy = 0; | ||
1517 | break; | ||
1518 | } | ||
1519 | } | ||
1520 | |||
1521 | if (!has_active) | ||
1522 | /* | ||
1523 | * No active path in this pg, so this pg won't be used and | ||
1524 | * the current_pg will be changed at next mapping time. | ||
1525 | * We need to try mapping to determine it. | ||
1526 | */ | ||
1527 | busy = 0; | ||
1528 | |||
1529 | out: | ||
1530 | spin_unlock_irqrestore(&m->lock, flags); | ||
1531 | |||
1532 | return busy; | ||
1533 | } | ||
1534 | |||
1431 | /*----------------------------------------------------------------- | 1535 | /*----------------------------------------------------------------- |
1432 | * Module setup | 1536 | * Module setup |
1433 | *---------------------------------------------------------------*/ | 1537 | *---------------------------------------------------------------*/ |
1434 | static struct target_type multipath_target = { | 1538 | static struct target_type multipath_target = { |
1435 | .name = "multipath", | 1539 | .name = "multipath", |
1436 | .version = {1, 0, 5}, | 1540 | .version = {1, 1, 0}, |
1437 | .module = THIS_MODULE, | 1541 | .module = THIS_MODULE, |
1438 | .ctr = multipath_ctr, | 1542 | .ctr = multipath_ctr, |
1439 | .dtr = multipath_dtr, | 1543 | .dtr = multipath_dtr, |
1440 | .map = multipath_map, | 1544 | .map_rq = multipath_map, |
1441 | .end_io = multipath_end_io, | 1545 | .rq_end_io = multipath_end_io, |
1442 | .presuspend = multipath_presuspend, | 1546 | .presuspend = multipath_presuspend, |
1443 | .resume = multipath_resume, | 1547 | .resume = multipath_resume, |
1444 | .status = multipath_status, | 1548 | .status = multipath_status, |
1445 | .message = multipath_message, | 1549 | .message = multipath_message, |
1446 | .ioctl = multipath_ioctl, | 1550 | .ioctl = multipath_ioctl, |
1551 | .iterate_devices = multipath_iterate_devices, | ||
1552 | .busy = multipath_busy, | ||
1447 | }; | 1553 | }; |
1448 | 1554 | ||
1449 | static int __init dm_multipath_init(void) | 1555 | static int __init dm_multipath_init(void) |
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h index 27357b85d73d..e7d1fa8b0459 100644 --- a/drivers/md/dm-path-selector.h +++ b/drivers/md/dm-path-selector.h | |||
@@ -56,7 +56,8 @@ struct path_selector_type { | |||
56 | * the path fails. | 56 | * the path fails. |
57 | */ | 57 | */ |
58 | struct dm_path *(*select_path) (struct path_selector *ps, | 58 | struct dm_path *(*select_path) (struct path_selector *ps, |
59 | unsigned *repeat_count); | 59 | unsigned *repeat_count, |
60 | size_t nr_bytes); | ||
60 | 61 | ||
61 | /* | 62 | /* |
62 | * Notify the selector that a path has failed. | 63 | * Notify the selector that a path has failed. |
@@ -75,7 +76,10 @@ struct path_selector_type { | |||
75 | int (*status) (struct path_selector *ps, struct dm_path *path, | 76 | int (*status) (struct path_selector *ps, struct dm_path *path, |
76 | status_type_t type, char *result, unsigned int maxlen); | 77 | status_type_t type, char *result, unsigned int maxlen); |
77 | 78 | ||
78 | int (*end_io) (struct path_selector *ps, struct dm_path *path); | 79 | int (*start_io) (struct path_selector *ps, struct dm_path *path, |
80 | size_t nr_bytes); | ||
81 | int (*end_io) (struct path_selector *ps, struct dm_path *path, | ||
82 | size_t nr_bytes); | ||
79 | }; | 83 | }; |
80 | 84 | ||
81 | /* Register a path selector */ | 85 | /* Register a path selector */ |
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c new file mode 100644 index 000000000000..f92b6cea9d9c --- /dev/null +++ b/drivers/md/dm-queue-length.c | |||
@@ -0,0 +1,263 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004-2005 IBM Corp. All Rights Reserved. | ||
3 | * Copyright (C) 2006-2009 NEC Corporation. | ||
4 | * | ||
5 | * dm-queue-length.c | ||
6 | * | ||
7 | * Module Author: Stefan Bader, IBM | ||
8 | * Modified by: Kiyoshi Ueda, NEC | ||
9 | * | ||
10 | * This file is released under the GPL. | ||
11 | * | ||
12 | * queue-length path selector - choose a path with the least number of | ||
13 | * in-flight I/Os. | ||
14 | */ | ||
15 | |||
16 | #include "dm.h" | ||
17 | #include "dm-path-selector.h" | ||
18 | |||
19 | #include <linux/slab.h> | ||
20 | #include <linux/ctype.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <asm/atomic.h> | ||
24 | |||
25 | #define DM_MSG_PREFIX "multipath queue-length" | ||
26 | #define QL_MIN_IO 128 | ||
27 | #define QL_VERSION "0.1.0" | ||
28 | |||
29 | struct selector { | ||
30 | struct list_head valid_paths; | ||
31 | struct list_head failed_paths; | ||
32 | }; | ||
33 | |||
34 | struct path_info { | ||
35 | struct list_head list; | ||
36 | struct dm_path *path; | ||
37 | unsigned repeat_count; | ||
38 | atomic_t qlen; /* the number of in-flight I/Os */ | ||
39 | }; | ||
40 | |||
41 | static struct selector *alloc_selector(void) | ||
42 | { | ||
43 | struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
44 | |||
45 | if (s) { | ||
46 | INIT_LIST_HEAD(&s->valid_paths); | ||
47 | INIT_LIST_HEAD(&s->failed_paths); | ||
48 | } | ||
49 | |||
50 | return s; | ||
51 | } | ||
52 | |||
53 | static int ql_create(struct path_selector *ps, unsigned argc, char **argv) | ||
54 | { | ||
55 | struct selector *s = alloc_selector(); | ||
56 | |||
57 | if (!s) | ||
58 | return -ENOMEM; | ||
59 | |||
60 | ps->context = s; | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static void ql_free_paths(struct list_head *paths) | ||
65 | { | ||
66 | struct path_info *pi, *next; | ||
67 | |||
68 | list_for_each_entry_safe(pi, next, paths, list) { | ||
69 | list_del(&pi->list); | ||
70 | kfree(pi); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | static void ql_destroy(struct path_selector *ps) | ||
75 | { | ||
76 | struct selector *s = ps->context; | ||
77 | |||
78 | ql_free_paths(&s->valid_paths); | ||
79 | ql_free_paths(&s->failed_paths); | ||
80 | kfree(s); | ||
81 | ps->context = NULL; | ||
82 | } | ||
83 | |||
84 | static int ql_status(struct path_selector *ps, struct dm_path *path, | ||
85 | status_type_t type, char *result, unsigned maxlen) | ||
86 | { | ||
87 | unsigned sz = 0; | ||
88 | struct path_info *pi; | ||
89 | |||
90 | /* When called with NULL path, return selector status/args. */ | ||
91 | if (!path) | ||
92 | DMEMIT("0 "); | ||
93 | else { | ||
94 | pi = path->pscontext; | ||
95 | |||
96 | switch (type) { | ||
97 | case STATUSTYPE_INFO: | ||
98 | DMEMIT("%d ", atomic_read(&pi->qlen)); | ||
99 | break; | ||
100 | case STATUSTYPE_TABLE: | ||
101 | DMEMIT("%u ", pi->repeat_count); | ||
102 | break; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | return sz; | ||
107 | } | ||
108 | |||
109 | static int ql_add_path(struct path_selector *ps, struct dm_path *path, | ||
110 | int argc, char **argv, char **error) | ||
111 | { | ||
112 | struct selector *s = ps->context; | ||
113 | struct path_info *pi; | ||
114 | unsigned repeat_count = QL_MIN_IO; | ||
115 | |||
116 | /* | ||
117 | * Arguments: [<repeat_count>] | ||
118 | * <repeat_count>: The number of I/Os before switching path. | ||
119 | * If not given, default (QL_MIN_IO) is used. | ||
120 | */ | ||
121 | if (argc > 1) { | ||
122 | *error = "queue-length ps: incorrect number of arguments"; | ||
123 | return -EINVAL; | ||
124 | } | ||
125 | |||
126 | if ((argc == 1) && (sscanf(argv[0], "%u", &repeat_count) != 1)) { | ||
127 | *error = "queue-length ps: invalid repeat count"; | ||
128 | return -EINVAL; | ||
129 | } | ||
130 | |||
131 | /* Allocate the path information structure */ | ||
132 | pi = kmalloc(sizeof(*pi), GFP_KERNEL); | ||
133 | if (!pi) { | ||
134 | *error = "queue-length ps: Error allocating path information"; | ||
135 | return -ENOMEM; | ||
136 | } | ||
137 | |||
138 | pi->path = path; | ||
139 | pi->repeat_count = repeat_count; | ||
140 | atomic_set(&pi->qlen, 0); | ||
141 | |||
142 | path->pscontext = pi; | ||
143 | |||
144 | list_add_tail(&pi->list, &s->valid_paths); | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static void ql_fail_path(struct path_selector *ps, struct dm_path *path) | ||
150 | { | ||
151 | struct selector *s = ps->context; | ||
152 | struct path_info *pi = path->pscontext; | ||
153 | |||
154 | list_move(&pi->list, &s->failed_paths); | ||
155 | } | ||
156 | |||
157 | static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path) | ||
158 | { | ||
159 | struct selector *s = ps->context; | ||
160 | struct path_info *pi = path->pscontext; | ||
161 | |||
162 | list_move_tail(&pi->list, &s->valid_paths); | ||
163 | |||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Select a path having the minimum number of in-flight I/Os | ||
169 | */ | ||
170 | static struct dm_path *ql_select_path(struct path_selector *ps, | ||
171 | unsigned *repeat_count, size_t nr_bytes) | ||
172 | { | ||
173 | struct selector *s = ps->context; | ||
174 | struct path_info *pi = NULL, *best = NULL; | ||
175 | |||
176 | if (list_empty(&s->valid_paths)) | ||
177 | return NULL; | ||
178 | |||
179 | /* Change preferred (first in list) path to evenly balance. */ | ||
180 | list_move_tail(s->valid_paths.next, &s->valid_paths); | ||
181 | |||
182 | list_for_each_entry(pi, &s->valid_paths, list) { | ||
183 | if (!best || | ||
184 | (atomic_read(&pi->qlen) < atomic_read(&best->qlen))) | ||
185 | best = pi; | ||
186 | |||
187 | if (!atomic_read(&best->qlen)) | ||
188 | break; | ||
189 | } | ||
190 | |||
191 | if (!best) | ||
192 | return NULL; | ||
193 | |||
194 | *repeat_count = best->repeat_count; | ||
195 | |||
196 | return best->path; | ||
197 | } | ||
198 | |||
199 | static int ql_start_io(struct path_selector *ps, struct dm_path *path, | ||
200 | size_t nr_bytes) | ||
201 | { | ||
202 | struct path_info *pi = path->pscontext; | ||
203 | |||
204 | atomic_inc(&pi->qlen); | ||
205 | |||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int ql_end_io(struct path_selector *ps, struct dm_path *path, | ||
210 | size_t nr_bytes) | ||
211 | { | ||
212 | struct path_info *pi = path->pscontext; | ||
213 | |||
214 | atomic_dec(&pi->qlen); | ||
215 | |||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | static struct path_selector_type ql_ps = { | ||
220 | .name = "queue-length", | ||
221 | .module = THIS_MODULE, | ||
222 | .table_args = 1, | ||
223 | .info_args = 1, | ||
224 | .create = ql_create, | ||
225 | .destroy = ql_destroy, | ||
226 | .status = ql_status, | ||
227 | .add_path = ql_add_path, | ||
228 | .fail_path = ql_fail_path, | ||
229 | .reinstate_path = ql_reinstate_path, | ||
230 | .select_path = ql_select_path, | ||
231 | .start_io = ql_start_io, | ||
232 | .end_io = ql_end_io, | ||
233 | }; | ||
234 | |||
235 | static int __init dm_ql_init(void) | ||
236 | { | ||
237 | int r = dm_register_path_selector(&ql_ps); | ||
238 | |||
239 | if (r < 0) | ||
240 | DMERR("register failed %d", r); | ||
241 | |||
242 | DMINFO("version " QL_VERSION " loaded"); | ||
243 | |||
244 | return r; | ||
245 | } | ||
246 | |||
247 | static void __exit dm_ql_exit(void) | ||
248 | { | ||
249 | int r = dm_unregister_path_selector(&ql_ps); | ||
250 | |||
251 | if (r < 0) | ||
252 | DMERR("unregister failed %d", r); | ||
253 | } | ||
254 | |||
255 | module_init(dm_ql_init); | ||
256 | module_exit(dm_ql_exit); | ||
257 | |||
258 | MODULE_AUTHOR("Stefan Bader <Stefan.Bader at de.ibm.com>"); | ||
259 | MODULE_DESCRIPTION( | ||
260 | "(C) Copyright IBM Corp. 2004,2005 All Rights Reserved.\n" | ||
261 | DM_NAME " path selector to balance the number of in-flight I/Os" | ||
262 | ); | ||
263 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 076fbb4e967a..ce8868c768cc 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -1283,9 +1283,23 @@ static int mirror_status(struct dm_target *ti, status_type_t type, | |||
1283 | return 0; | 1283 | return 0; |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | static int mirror_iterate_devices(struct dm_target *ti, | ||
1287 | iterate_devices_callout_fn fn, void *data) | ||
1288 | { | ||
1289 | struct mirror_set *ms = ti->private; | ||
1290 | int ret = 0; | ||
1291 | unsigned i; | ||
1292 | |||
1293 | for (i = 0; !ret && i < ms->nr_mirrors; i++) | ||
1294 | ret = fn(ti, ms->mirror[i].dev, | ||
1295 | ms->mirror[i].offset, data); | ||
1296 | |||
1297 | return ret; | ||
1298 | } | ||
1299 | |||
1286 | static struct target_type mirror_target = { | 1300 | static struct target_type mirror_target = { |
1287 | .name = "mirror", | 1301 | .name = "mirror", |
1288 | .version = {1, 0, 20}, | 1302 | .version = {1, 12, 0}, |
1289 | .module = THIS_MODULE, | 1303 | .module = THIS_MODULE, |
1290 | .ctr = mirror_ctr, | 1304 | .ctr = mirror_ctr, |
1291 | .dtr = mirror_dtr, | 1305 | .dtr = mirror_dtr, |
@@ -1295,6 +1309,7 @@ static struct target_type mirror_target = { | |||
1295 | .postsuspend = mirror_postsuspend, | 1309 | .postsuspend = mirror_postsuspend, |
1296 | .resume = mirror_resume, | 1310 | .resume = mirror_resume, |
1297 | .status = mirror_status, | 1311 | .status = mirror_status, |
1312 | .iterate_devices = mirror_iterate_devices, | ||
1298 | }; | 1313 | }; |
1299 | 1314 | ||
1300 | static int __init dm_mirror_init(void) | 1315 | static int __init dm_mirror_init(void) |
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 7b899be0b087..36dbe29f2fd6 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c | |||
@@ -283,7 +283,7 @@ static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region) | |||
283 | 283 | ||
284 | nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); | 284 | nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); |
285 | if (unlikely(!nreg)) | 285 | if (unlikely(!nreg)) |
286 | nreg = kmalloc(sizeof(*nreg), GFP_NOIO); | 286 | nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL); |
287 | 287 | ||
288 | nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? | 288 | nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? |
289 | DM_RH_CLEAN : DM_RH_NOSYNC; | 289 | DM_RH_CLEAN : DM_RH_NOSYNC; |
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c index cdfbf65b28cb..24752f449bef 100644 --- a/drivers/md/dm-round-robin.c +++ b/drivers/md/dm-round-robin.c | |||
@@ -161,7 +161,7 @@ static int rr_reinstate_path(struct path_selector *ps, struct dm_path *p) | |||
161 | } | 161 | } |
162 | 162 | ||
163 | static struct dm_path *rr_select_path(struct path_selector *ps, | 163 | static struct dm_path *rr_select_path(struct path_selector *ps, |
164 | unsigned *repeat_count) | 164 | unsigned *repeat_count, size_t nr_bytes) |
165 | { | 165 | { |
166 | struct selector *s = (struct selector *) ps->context; | 166 | struct selector *s = (struct selector *) ps->context; |
167 | struct path_info *pi = NULL; | 167 | struct path_info *pi = NULL; |
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c new file mode 100644 index 000000000000..cfa668f46c40 --- /dev/null +++ b/drivers/md/dm-service-time.c | |||
@@ -0,0 +1,339 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007-2009 NEC Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * Module Author: Kiyoshi Ueda | ||
5 | * | ||
6 | * This file is released under the GPL. | ||
7 | * | ||
8 | * Throughput oriented path selector. | ||
9 | */ | ||
10 | |||
11 | #include "dm.h" | ||
12 | #include "dm-path-selector.h" | ||
13 | |||
14 | #define DM_MSG_PREFIX "multipath service-time" | ||
15 | #define ST_MIN_IO 1 | ||
16 | #define ST_MAX_RELATIVE_THROUGHPUT 100 | ||
17 | #define ST_MAX_RELATIVE_THROUGHPUT_SHIFT 7 | ||
18 | #define ST_MAX_INFLIGHT_SIZE ((size_t)-1 >> ST_MAX_RELATIVE_THROUGHPUT_SHIFT) | ||
19 | #define ST_VERSION "0.2.0" | ||
20 | |||
21 | struct selector { | ||
22 | struct list_head valid_paths; | ||
23 | struct list_head failed_paths; | ||
24 | }; | ||
25 | |||
26 | struct path_info { | ||
27 | struct list_head list; | ||
28 | struct dm_path *path; | ||
29 | unsigned repeat_count; | ||
30 | unsigned relative_throughput; | ||
31 | atomic_t in_flight_size; /* Total size of in-flight I/Os */ | ||
32 | }; | ||
33 | |||
34 | static struct selector *alloc_selector(void) | ||
35 | { | ||
36 | struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
37 | |||
38 | if (s) { | ||
39 | INIT_LIST_HEAD(&s->valid_paths); | ||
40 | INIT_LIST_HEAD(&s->failed_paths); | ||
41 | } | ||
42 | |||
43 | return s; | ||
44 | } | ||
45 | |||
46 | static int st_create(struct path_selector *ps, unsigned argc, char **argv) | ||
47 | { | ||
48 | struct selector *s = alloc_selector(); | ||
49 | |||
50 | if (!s) | ||
51 | return -ENOMEM; | ||
52 | |||
53 | ps->context = s; | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static void free_paths(struct list_head *paths) | ||
58 | { | ||
59 | struct path_info *pi, *next; | ||
60 | |||
61 | list_for_each_entry_safe(pi, next, paths, list) { | ||
62 | list_del(&pi->list); | ||
63 | kfree(pi); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | static void st_destroy(struct path_selector *ps) | ||
68 | { | ||
69 | struct selector *s = ps->context; | ||
70 | |||
71 | free_paths(&s->valid_paths); | ||
72 | free_paths(&s->failed_paths); | ||
73 | kfree(s); | ||
74 | ps->context = NULL; | ||
75 | } | ||
76 | |||
77 | static int st_status(struct path_selector *ps, struct dm_path *path, | ||
78 | status_type_t type, char *result, unsigned maxlen) | ||
79 | { | ||
80 | unsigned sz = 0; | ||
81 | struct path_info *pi; | ||
82 | |||
83 | if (!path) | ||
84 | DMEMIT("0 "); | ||
85 | else { | ||
86 | pi = path->pscontext; | ||
87 | |||
88 | switch (type) { | ||
89 | case STATUSTYPE_INFO: | ||
90 | DMEMIT("%d %u ", atomic_read(&pi->in_flight_size), | ||
91 | pi->relative_throughput); | ||
92 | break; | ||
93 | case STATUSTYPE_TABLE: | ||
94 | DMEMIT("%u %u ", pi->repeat_count, | ||
95 | pi->relative_throughput); | ||
96 | break; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | return sz; | ||
101 | } | ||
102 | |||
103 | static int st_add_path(struct path_selector *ps, struct dm_path *path, | ||
104 | int argc, char **argv, char **error) | ||
105 | { | ||
106 | struct selector *s = ps->context; | ||
107 | struct path_info *pi; | ||
108 | unsigned repeat_count = ST_MIN_IO; | ||
109 | unsigned relative_throughput = 1; | ||
110 | |||
111 | /* | ||
112 | * Arguments: [<repeat_count> [<relative_throughput>]] | ||
113 | * <repeat_count>: The number of I/Os before switching path. | ||
114 | * If not given, default (ST_MIN_IO) is used. | ||
115 | * <relative_throughput>: The relative throughput value of | ||
116 | * the path among all paths in the path-group. | ||
117 | * The valid range: 0-<ST_MAX_RELATIVE_THROUGHPUT> | ||
118 | * If not given, minimum value '1' is used. | ||
119 | * If '0' is given, the path isn't selected while | ||
120 | * other paths having a positive value are | ||
121 | * available. | ||
122 | */ | ||
123 | if (argc > 2) { | ||
124 | *error = "service-time ps: incorrect number of arguments"; | ||
125 | return -EINVAL; | ||
126 | } | ||
127 | |||
128 | if (argc && (sscanf(argv[0], "%u", &repeat_count) != 1)) { | ||
129 | *error = "service-time ps: invalid repeat count"; | ||
130 | return -EINVAL; | ||
131 | } | ||
132 | |||
133 | if ((argc == 2) && | ||
134 | (sscanf(argv[1], "%u", &relative_throughput) != 1 || | ||
135 | relative_throughput > ST_MAX_RELATIVE_THROUGHPUT)) { | ||
136 | *error = "service-time ps: invalid relative_throughput value"; | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | |||
140 | /* allocate the path */ | ||
141 | pi = kmalloc(sizeof(*pi), GFP_KERNEL); | ||
142 | if (!pi) { | ||
143 | *error = "service-time ps: Error allocating path context"; | ||
144 | return -ENOMEM; | ||
145 | } | ||
146 | |||
147 | pi->path = path; | ||
148 | pi->repeat_count = repeat_count; | ||
149 | pi->relative_throughput = relative_throughput; | ||
150 | atomic_set(&pi->in_flight_size, 0); | ||
151 | |||
152 | path->pscontext = pi; | ||
153 | |||
154 | list_add_tail(&pi->list, &s->valid_paths); | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static void st_fail_path(struct path_selector *ps, struct dm_path *path) | ||
160 | { | ||
161 | struct selector *s = ps->context; | ||
162 | struct path_info *pi = path->pscontext; | ||
163 | |||
164 | list_move(&pi->list, &s->failed_paths); | ||
165 | } | ||
166 | |||
167 | static int st_reinstate_path(struct path_selector *ps, struct dm_path *path) | ||
168 | { | ||
169 | struct selector *s = ps->context; | ||
170 | struct path_info *pi = path->pscontext; | ||
171 | |||
172 | list_move_tail(&pi->list, &s->valid_paths); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Compare the estimated service time of 2 paths, pi1 and pi2, | ||
179 | * for the incoming I/O. | ||
180 | * | ||
181 | * Returns: | ||
182 | * < 0 : pi1 is better | ||
183 | * 0 : no difference between pi1 and pi2 | ||
184 | * > 0 : pi2 is better | ||
185 | * | ||
186 | * Description: | ||
187 | * Basically, the service time is estimated by: | ||
188 | * ('pi->in-flight-size' + 'incoming') / 'pi->relative_throughput' | ||
189 | * To reduce the calculation, some optimizations are made. | ||
190 | * (See comments inline) | ||
191 | */ | ||
192 | static int st_compare_load(struct path_info *pi1, struct path_info *pi2, | ||
193 | size_t incoming) | ||
194 | { | ||
195 | size_t sz1, sz2, st1, st2; | ||
196 | |||
197 | sz1 = atomic_read(&pi1->in_flight_size); | ||
198 | sz2 = atomic_read(&pi2->in_flight_size); | ||
199 | |||
200 | /* | ||
201 | * Case 1: Both have same throughput value. Choose less loaded path. | ||
202 | */ | ||
203 | if (pi1->relative_throughput == pi2->relative_throughput) | ||
204 | return sz1 - sz2; | ||
205 | |||
206 | /* | ||
207 | * Case 2a: Both have same load. Choose higher throughput path. | ||
208 | * Case 2b: One path has no throughput value. Choose the other one. | ||
209 | */ | ||
210 | if (sz1 == sz2 || | ||
211 | !pi1->relative_throughput || !pi2->relative_throughput) | ||
212 | return pi2->relative_throughput - pi1->relative_throughput; | ||
213 | |||
214 | /* | ||
215 | * Case 3: Calculate service time. Choose faster path. | ||
216 | * Service time using pi1: | ||
217 | * st1 = (sz1 + incoming) / pi1->relative_throughput | ||
218 | * Service time using pi2: | ||
219 | * st2 = (sz2 + incoming) / pi2->relative_throughput | ||
220 | * | ||
221 | * To avoid the division, transform the expression to use | ||
222 | * multiplication. | ||
223 | * Because ->relative_throughput > 0 here, if st1 < st2, | ||
224 | * the expressions below are the same meaning: | ||
225 | * (sz1 + incoming) / pi1->relative_throughput < | ||
226 | * (sz2 + incoming) / pi2->relative_throughput | ||
227 | * (sz1 + incoming) * pi2->relative_throughput < | ||
228 | * (sz2 + incoming) * pi1->relative_throughput | ||
229 | * So use the later one. | ||
230 | */ | ||
231 | sz1 += incoming; | ||
232 | sz2 += incoming; | ||
233 | if (unlikely(sz1 >= ST_MAX_INFLIGHT_SIZE || | ||
234 | sz2 >= ST_MAX_INFLIGHT_SIZE)) { | ||
235 | /* | ||
236 | * Size may be too big for multiplying pi->relative_throughput | ||
237 | * and overflow. | ||
238 | * To avoid the overflow and mis-selection, shift down both. | ||
239 | */ | ||
240 | sz1 >>= ST_MAX_RELATIVE_THROUGHPUT_SHIFT; | ||
241 | sz2 >>= ST_MAX_RELATIVE_THROUGHPUT_SHIFT; | ||
242 | } | ||
243 | st1 = sz1 * pi2->relative_throughput; | ||
244 | st2 = sz2 * pi1->relative_throughput; | ||
245 | if (st1 != st2) | ||
246 | return st1 - st2; | ||
247 | |||
248 | /* | ||
249 | * Case 4: Service time is equal. Choose higher throughput path. | ||
250 | */ | ||
251 | return pi2->relative_throughput - pi1->relative_throughput; | ||
252 | } | ||
253 | |||
254 | static struct dm_path *st_select_path(struct path_selector *ps, | ||
255 | unsigned *repeat_count, size_t nr_bytes) | ||
256 | { | ||
257 | struct selector *s = ps->context; | ||
258 | struct path_info *pi = NULL, *best = NULL; | ||
259 | |||
260 | if (list_empty(&s->valid_paths)) | ||
261 | return NULL; | ||
262 | |||
263 | /* Change preferred (first in list) path to evenly balance. */ | ||
264 | list_move_tail(s->valid_paths.next, &s->valid_paths); | ||
265 | |||
266 | list_for_each_entry(pi, &s->valid_paths, list) | ||
267 | if (!best || (st_compare_load(pi, best, nr_bytes) < 0)) | ||
268 | best = pi; | ||
269 | |||
270 | if (!best) | ||
271 | return NULL; | ||
272 | |||
273 | *repeat_count = best->repeat_count; | ||
274 | |||
275 | return best->path; | ||
276 | } | ||
277 | |||
278 | static int st_start_io(struct path_selector *ps, struct dm_path *path, | ||
279 | size_t nr_bytes) | ||
280 | { | ||
281 | struct path_info *pi = path->pscontext; | ||
282 | |||
283 | atomic_add(nr_bytes, &pi->in_flight_size); | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | static int st_end_io(struct path_selector *ps, struct dm_path *path, | ||
289 | size_t nr_bytes) | ||
290 | { | ||
291 | struct path_info *pi = path->pscontext; | ||
292 | |||
293 | atomic_sub(nr_bytes, &pi->in_flight_size); | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static struct path_selector_type st_ps = { | ||
299 | .name = "service-time", | ||
300 | .module = THIS_MODULE, | ||
301 | .table_args = 2, | ||
302 | .info_args = 2, | ||
303 | .create = st_create, | ||
304 | .destroy = st_destroy, | ||
305 | .status = st_status, | ||
306 | .add_path = st_add_path, | ||
307 | .fail_path = st_fail_path, | ||
308 | .reinstate_path = st_reinstate_path, | ||
309 | .select_path = st_select_path, | ||
310 | .start_io = st_start_io, | ||
311 | .end_io = st_end_io, | ||
312 | }; | ||
313 | |||
314 | static int __init dm_st_init(void) | ||
315 | { | ||
316 | int r = dm_register_path_selector(&st_ps); | ||
317 | |||
318 | if (r < 0) | ||
319 | DMERR("register failed %d", r); | ||
320 | |||
321 | DMINFO("version " ST_VERSION " loaded"); | ||
322 | |||
323 | return r; | ||
324 | } | ||
325 | |||
326 | static void __exit dm_st_exit(void) | ||
327 | { | ||
328 | int r = dm_unregister_path_selector(&st_ps); | ||
329 | |||
330 | if (r < 0) | ||
331 | DMERR("unregister failed %d", r); | ||
332 | } | ||
333 | |||
334 | module_init(dm_st_init); | ||
335 | module_exit(dm_st_exit); | ||
336 | |||
337 | MODULE_DESCRIPTION(DM_NAME " throughput oriented path selector"); | ||
338 | MODULE_AUTHOR("Kiyoshi Ueda <k-ueda@ct.jp.nec.com>"); | ||
339 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 2662a41337e7..6e3fe4f14934 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -636,7 +636,7 @@ static void persistent_commit_exception(struct dm_exception_store *store, | |||
636 | /* | 636 | /* |
637 | * Commit exceptions to disk. | 637 | * Commit exceptions to disk. |
638 | */ | 638 | */ |
639 | if (ps->valid && area_io(ps, WRITE)) | 639 | if (ps->valid && area_io(ps, WRITE_BARRIER)) |
640 | ps->valid = 0; | 640 | ps->valid = 0; |
641 | 641 | ||
642 | /* | 642 | /* |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index d73f17fc7778..d573165cd2b7 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -678,6 +678,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
678 | 678 | ||
679 | ti->private = s; | 679 | ti->private = s; |
680 | ti->split_io = s->store->chunk_size; | 680 | ti->split_io = s->store->chunk_size; |
681 | ti->num_flush_requests = 1; | ||
681 | 682 | ||
682 | return 0; | 683 | return 0; |
683 | 684 | ||
@@ -1030,6 +1031,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
1030 | chunk_t chunk; | 1031 | chunk_t chunk; |
1031 | struct dm_snap_pending_exception *pe = NULL; | 1032 | struct dm_snap_pending_exception *pe = NULL; |
1032 | 1033 | ||
1034 | if (unlikely(bio_empty_barrier(bio))) { | ||
1035 | bio->bi_bdev = s->store->cow->bdev; | ||
1036 | return DM_MAPIO_REMAPPED; | ||
1037 | } | ||
1038 | |||
1033 | chunk = sector_to_chunk(s->store, bio->bi_sector); | 1039 | chunk = sector_to_chunk(s->store, bio->bi_sector); |
1034 | 1040 | ||
1035 | /* Full snapshots are not usable */ | 1041 | /* Full snapshots are not usable */ |
@@ -1338,6 +1344,8 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1338 | } | 1344 | } |
1339 | 1345 | ||
1340 | ti->private = dev; | 1346 | ti->private = dev; |
1347 | ti->num_flush_requests = 1; | ||
1348 | |||
1341 | return 0; | 1349 | return 0; |
1342 | } | 1350 | } |
1343 | 1351 | ||
@@ -1353,6 +1361,9 @@ static int origin_map(struct dm_target *ti, struct bio *bio, | |||
1353 | struct dm_dev *dev = ti->private; | 1361 | struct dm_dev *dev = ti->private; |
1354 | bio->bi_bdev = dev->bdev; | 1362 | bio->bi_bdev = dev->bdev; |
1355 | 1363 | ||
1364 | if (unlikely(bio_empty_barrier(bio))) | ||
1365 | return DM_MAPIO_REMAPPED; | ||
1366 | |||
1356 | /* Only tell snapshots if this is a write */ | 1367 | /* Only tell snapshots if this is a write */ |
1357 | return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; | 1368 | return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; |
1358 | } | 1369 | } |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 41569bc60abc..b240e85ae39a 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -167,6 +167,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
167 | sc->stripes = stripes; | 167 | sc->stripes = stripes; |
168 | sc->stripe_width = width; | 168 | sc->stripe_width = width; |
169 | ti->split_io = chunk_size; | 169 | ti->split_io = chunk_size; |
170 | ti->num_flush_requests = stripes; | ||
170 | 171 | ||
171 | sc->chunk_mask = ((sector_t) chunk_size) - 1; | 172 | sc->chunk_mask = ((sector_t) chunk_size) - 1; |
172 | for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++) | 173 | for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++) |
@@ -211,10 +212,18 @@ static int stripe_map(struct dm_target *ti, struct bio *bio, | |||
211 | union map_info *map_context) | 212 | union map_info *map_context) |
212 | { | 213 | { |
213 | struct stripe_c *sc = (struct stripe_c *) ti->private; | 214 | struct stripe_c *sc = (struct stripe_c *) ti->private; |
215 | sector_t offset, chunk; | ||
216 | uint32_t stripe; | ||
214 | 217 | ||
215 | sector_t offset = bio->bi_sector - ti->begin; | 218 | if (unlikely(bio_empty_barrier(bio))) { |
216 | sector_t chunk = offset >> sc->chunk_shift; | 219 | BUG_ON(map_context->flush_request >= sc->stripes); |
217 | uint32_t stripe = sector_div(chunk, sc->stripes); | 220 | bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev; |
221 | return DM_MAPIO_REMAPPED; | ||
222 | } | ||
223 | |||
224 | offset = bio->bi_sector - ti->begin; | ||
225 | chunk = offset >> sc->chunk_shift; | ||
226 | stripe = sector_div(chunk, sc->stripes); | ||
218 | 227 | ||
219 | bio->bi_bdev = sc->stripe[stripe].dev->bdev; | 228 | bio->bi_bdev = sc->stripe[stripe].dev->bdev; |
220 | bio->bi_sector = sc->stripe[stripe].physical_start + | 229 | bio->bi_sector = sc->stripe[stripe].physical_start + |
@@ -304,15 +313,31 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, | |||
304 | return error; | 313 | return error; |
305 | } | 314 | } |
306 | 315 | ||
316 | static int stripe_iterate_devices(struct dm_target *ti, | ||
317 | iterate_devices_callout_fn fn, void *data) | ||
318 | { | ||
319 | struct stripe_c *sc = ti->private; | ||
320 | int ret = 0; | ||
321 | unsigned i = 0; | ||
322 | |||
323 | do | ||
324 | ret = fn(ti, sc->stripe[i].dev, | ||
325 | sc->stripe[i].physical_start, data); | ||
326 | while (!ret && ++i < sc->stripes); | ||
327 | |||
328 | return ret; | ||
329 | } | ||
330 | |||
307 | static struct target_type stripe_target = { | 331 | static struct target_type stripe_target = { |
308 | .name = "striped", | 332 | .name = "striped", |
309 | .version = {1, 1, 0}, | 333 | .version = {1, 2, 0}, |
310 | .module = THIS_MODULE, | 334 | .module = THIS_MODULE, |
311 | .ctr = stripe_ctr, | 335 | .ctr = stripe_ctr, |
312 | .dtr = stripe_dtr, | 336 | .dtr = stripe_dtr, |
313 | .map = stripe_map, | 337 | .map = stripe_map, |
314 | .end_io = stripe_end_io, | 338 | .end_io = stripe_end_io, |
315 | .status = stripe_status, | 339 | .status = stripe_status, |
340 | .iterate_devices = stripe_iterate_devices, | ||
316 | }; | 341 | }; |
317 | 342 | ||
318 | int __init dm_stripe_init(void) | 343 | int __init dm_stripe_init(void) |
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c index a2a45e6c7c8b..4b045903a4e2 100644 --- a/drivers/md/dm-sysfs.c +++ b/drivers/md/dm-sysfs.c | |||
@@ -57,12 +57,21 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf) | |||
57 | return strlen(buf); | 57 | return strlen(buf); |
58 | } | 58 | } |
59 | 59 | ||
60 | static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) | ||
61 | { | ||
62 | sprintf(buf, "%d\n", dm_suspended(md)); | ||
63 | |||
64 | return strlen(buf); | ||
65 | } | ||
66 | |||
60 | static DM_ATTR_RO(name); | 67 | static DM_ATTR_RO(name); |
61 | static DM_ATTR_RO(uuid); | 68 | static DM_ATTR_RO(uuid); |
69 | static DM_ATTR_RO(suspended); | ||
62 | 70 | ||
63 | static struct attribute *dm_attrs[] = { | 71 | static struct attribute *dm_attrs[] = { |
64 | &dm_attr_name.attr, | 72 | &dm_attr_name.attr, |
65 | &dm_attr_uuid.attr, | 73 | &dm_attr_uuid.attr, |
74 | &dm_attr_suspended.attr, | ||
66 | NULL, | 75 | NULL, |
67 | }; | 76 | }; |
68 | 77 | ||
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e9a73bb242b0..4899ebe767c8 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -41,6 +41,7 @@ | |||
41 | struct dm_table { | 41 | struct dm_table { |
42 | struct mapped_device *md; | 42 | struct mapped_device *md; |
43 | atomic_t holders; | 43 | atomic_t holders; |
44 | unsigned type; | ||
44 | 45 | ||
45 | /* btree table */ | 46 | /* btree table */ |
46 | unsigned int depth; | 47 | unsigned int depth; |
@@ -62,15 +63,11 @@ struct dm_table { | |||
62 | /* a list of devices used by this table */ | 63 | /* a list of devices used by this table */ |
63 | struct list_head devices; | 64 | struct list_head devices; |
64 | 65 | ||
65 | /* | ||
66 | * These are optimistic limits taken from all the | ||
67 | * targets, some targets will need smaller limits. | ||
68 | */ | ||
69 | struct io_restrictions limits; | ||
70 | |||
71 | /* events get handed up using this callback */ | 66 | /* events get handed up using this callback */ |
72 | void (*event_fn)(void *); | 67 | void (*event_fn)(void *); |
73 | void *event_context; | 68 | void *event_context; |
69 | |||
70 | struct dm_md_mempools *mempools; | ||
74 | }; | 71 | }; |
75 | 72 | ||
76 | /* | 73 | /* |
@@ -89,43 +86,6 @@ static unsigned int int_log(unsigned int n, unsigned int base) | |||
89 | } | 86 | } |
90 | 87 | ||
91 | /* | 88 | /* |
92 | * Returns the minimum that is _not_ zero, unless both are zero. | ||
93 | */ | ||
94 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | ||
95 | |||
96 | /* | ||
97 | * Combine two io_restrictions, always taking the lower value. | ||
98 | */ | ||
99 | static void combine_restrictions_low(struct io_restrictions *lhs, | ||
100 | struct io_restrictions *rhs) | ||
101 | { | ||
102 | lhs->max_sectors = | ||
103 | min_not_zero(lhs->max_sectors, rhs->max_sectors); | ||
104 | |||
105 | lhs->max_phys_segments = | ||
106 | min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments); | ||
107 | |||
108 | lhs->max_hw_segments = | ||
109 | min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); | ||
110 | |||
111 | lhs->logical_block_size = max(lhs->logical_block_size, | ||
112 | rhs->logical_block_size); | ||
113 | |||
114 | lhs->max_segment_size = | ||
115 | min_not_zero(lhs->max_segment_size, rhs->max_segment_size); | ||
116 | |||
117 | lhs->max_hw_sectors = | ||
118 | min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); | ||
119 | |||
120 | lhs->seg_boundary_mask = | ||
121 | min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); | ||
122 | |||
123 | lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn); | ||
124 | |||
125 | lhs->no_cluster |= rhs->no_cluster; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Calculate the index of the child node of the n'th node k'th key. | 89 | * Calculate the index of the child node of the n'th node k'th key. |
130 | */ | 90 | */ |
131 | static inline unsigned int get_child(unsigned int n, unsigned int k) | 91 | static inline unsigned int get_child(unsigned int n, unsigned int k) |
@@ -267,6 +227,8 @@ static void free_devices(struct list_head *devices) | |||
267 | list_for_each_safe(tmp, next, devices) { | 227 | list_for_each_safe(tmp, next, devices) { |
268 | struct dm_dev_internal *dd = | 228 | struct dm_dev_internal *dd = |
269 | list_entry(tmp, struct dm_dev_internal, list); | 229 | list_entry(tmp, struct dm_dev_internal, list); |
230 | DMWARN("dm_table_destroy: dm_put_device call missing for %s", | ||
231 | dd->dm_dev.name); | ||
270 | kfree(dd); | 232 | kfree(dd); |
271 | } | 233 | } |
272 | } | 234 | } |
@@ -296,12 +258,10 @@ void dm_table_destroy(struct dm_table *t) | |||
296 | vfree(t->highs); | 258 | vfree(t->highs); |
297 | 259 | ||
298 | /* free the device list */ | 260 | /* free the device list */ |
299 | if (t->devices.next != &t->devices) { | 261 | if (t->devices.next != &t->devices) |
300 | DMWARN("devices still present during destroy: " | ||
301 | "dm_table_remove_device calls missing"); | ||
302 | |||
303 | free_devices(&t->devices); | 262 | free_devices(&t->devices); |
304 | } | 263 | |
264 | dm_free_md_mempools(t->mempools); | ||
305 | 265 | ||
306 | kfree(t); | 266 | kfree(t); |
307 | } | 267 | } |
@@ -385,15 +345,48 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) | |||
385 | /* | 345 | /* |
386 | * If possible, this checks an area of a destination device is valid. | 346 | * If possible, this checks an area of a destination device is valid. |
387 | */ | 347 | */ |
388 | static int check_device_area(struct dm_dev_internal *dd, sector_t start, | 348 | static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, |
389 | sector_t len) | 349 | sector_t start, void *data) |
390 | { | 350 | { |
391 | sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT; | 351 | struct queue_limits *limits = data; |
352 | struct block_device *bdev = dev->bdev; | ||
353 | sector_t dev_size = | ||
354 | i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; | ||
355 | unsigned short logical_block_size_sectors = | ||
356 | limits->logical_block_size >> SECTOR_SHIFT; | ||
357 | char b[BDEVNAME_SIZE]; | ||
392 | 358 | ||
393 | if (!dev_size) | 359 | if (!dev_size) |
394 | return 1; | 360 | return 1; |
395 | 361 | ||
396 | return ((start < dev_size) && (len <= (dev_size - start))); | 362 | if ((start >= dev_size) || (start + ti->len > dev_size)) { |
363 | DMWARN("%s: %s too small for target", | ||
364 | dm_device_name(ti->table->md), bdevname(bdev, b)); | ||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | if (logical_block_size_sectors <= 1) | ||
369 | return 1; | ||
370 | |||
371 | if (start & (logical_block_size_sectors - 1)) { | ||
372 | DMWARN("%s: start=%llu not aligned to h/w " | ||
373 | "logical block size %hu of %s", | ||
374 | dm_device_name(ti->table->md), | ||
375 | (unsigned long long)start, | ||
376 | limits->logical_block_size, bdevname(bdev, b)); | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | if (ti->len & (logical_block_size_sectors - 1)) { | ||
381 | DMWARN("%s: len=%llu not aligned to h/w " | ||
382 | "logical block size %hu of %s", | ||
383 | dm_device_name(ti->table->md), | ||
384 | (unsigned long long)ti->len, | ||
385 | limits->logical_block_size, bdevname(bdev, b)); | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | return 1; | ||
397 | } | 390 | } |
398 | 391 | ||
399 | /* | 392 | /* |
@@ -479,38 +472,32 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
479 | } | 472 | } |
480 | atomic_inc(&dd->count); | 473 | atomic_inc(&dd->count); |
481 | 474 | ||
482 | if (!check_device_area(dd, start, len)) { | ||
483 | DMWARN("device %s too small for target", path); | ||
484 | dm_put_device(ti, &dd->dm_dev); | ||
485 | return -EINVAL; | ||
486 | } | ||
487 | |||
488 | *result = &dd->dm_dev; | 475 | *result = &dd->dm_dev; |
489 | |||
490 | return 0; | 476 | return 0; |
491 | } | 477 | } |
492 | 478 | ||
493 | void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | 479 | /* |
480 | * Returns the minimum that is _not_ zero, unless both are zero. | ||
481 | */ | ||
482 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | ||
483 | |||
484 | int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | ||
485 | sector_t start, void *data) | ||
494 | { | 486 | { |
487 | struct queue_limits *limits = data; | ||
488 | struct block_device *bdev = dev->bdev; | ||
495 | struct request_queue *q = bdev_get_queue(bdev); | 489 | struct request_queue *q = bdev_get_queue(bdev); |
496 | struct io_restrictions *rs = &ti->limits; | ||
497 | char b[BDEVNAME_SIZE]; | 490 | char b[BDEVNAME_SIZE]; |
498 | 491 | ||
499 | if (unlikely(!q)) { | 492 | if (unlikely(!q)) { |
500 | DMWARN("%s: Cannot set limits for nonexistent device %s", | 493 | DMWARN("%s: Cannot set limits for nonexistent device %s", |
501 | dm_device_name(ti->table->md), bdevname(bdev, b)); | 494 | dm_device_name(ti->table->md), bdevname(bdev, b)); |
502 | return; | 495 | return 0; |
503 | } | 496 | } |
504 | 497 | ||
505 | /* | 498 | if (blk_stack_limits(limits, &q->limits, start) < 0) |
506 | * Combine the device limits low. | 499 | DMWARN("%s: target device %s is misaligned", |
507 | * | 500 | dm_device_name(ti->table->md), bdevname(bdev, b)); |
508 | * FIXME: if we move an io_restriction struct | ||
509 | * into q this would just be a call to | ||
510 | * combine_restrictions_low() | ||
511 | */ | ||
512 | rs->max_sectors = | ||
513 | min_not_zero(rs->max_sectors, queue_max_sectors(q)); | ||
514 | 501 | ||
515 | /* | 502 | /* |
516 | * Check if merge fn is supported. | 503 | * Check if merge fn is supported. |
@@ -519,48 +506,21 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | |||
519 | */ | 506 | */ |
520 | 507 | ||
521 | if (q->merge_bvec_fn && !ti->type->merge) | 508 | if (q->merge_bvec_fn && !ti->type->merge) |
522 | rs->max_sectors = | 509 | limits->max_sectors = |
523 | min_not_zero(rs->max_sectors, | 510 | min_not_zero(limits->max_sectors, |
524 | (unsigned int) (PAGE_SIZE >> 9)); | 511 | (unsigned int) (PAGE_SIZE >> 9)); |
525 | 512 | return 0; | |
526 | rs->max_phys_segments = | ||
527 | min_not_zero(rs->max_phys_segments, | ||
528 | queue_max_phys_segments(q)); | ||
529 | |||
530 | rs->max_hw_segments = | ||
531 | min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q)); | ||
532 | |||
533 | rs->logical_block_size = max(rs->logical_block_size, | ||
534 | queue_logical_block_size(q)); | ||
535 | |||
536 | rs->max_segment_size = | ||
537 | min_not_zero(rs->max_segment_size, queue_max_segment_size(q)); | ||
538 | |||
539 | rs->max_hw_sectors = | ||
540 | min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q)); | ||
541 | |||
542 | rs->seg_boundary_mask = | ||
543 | min_not_zero(rs->seg_boundary_mask, | ||
544 | queue_segment_boundary(q)); | ||
545 | |||
546 | rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q)); | ||
547 | |||
548 | rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | ||
549 | } | 513 | } |
550 | EXPORT_SYMBOL_GPL(dm_set_device_limits); | 514 | EXPORT_SYMBOL_GPL(dm_set_device_limits); |
551 | 515 | ||
552 | int dm_get_device(struct dm_target *ti, const char *path, sector_t start, | 516 | int dm_get_device(struct dm_target *ti, const char *path, sector_t start, |
553 | sector_t len, fmode_t mode, struct dm_dev **result) | 517 | sector_t len, fmode_t mode, struct dm_dev **result) |
554 | { | 518 | { |
555 | int r = __table_get_device(ti->table, ti, path, | 519 | return __table_get_device(ti->table, ti, path, |
556 | start, len, mode, result); | 520 | start, len, mode, result); |
557 | |||
558 | if (!r) | ||
559 | dm_set_device_limits(ti, (*result)->bdev); | ||
560 | |||
561 | return r; | ||
562 | } | 521 | } |
563 | 522 | ||
523 | |||
564 | /* | 524 | /* |
565 | * Decrement a devices use count and remove it if necessary. | 525 | * Decrement a devices use count and remove it if necessary. |
566 | */ | 526 | */ |
@@ -675,24 +635,78 @@ int dm_split_args(int *argc, char ***argvp, char *input) | |||
675 | return 0; | 635 | return 0; |
676 | } | 636 | } |
677 | 637 | ||
678 | static void check_for_valid_limits(struct io_restrictions *rs) | 638 | /* |
639 | * Impose necessary and sufficient conditions on a devices's table such | ||
640 | * that any incoming bio which respects its logical_block_size can be | ||
641 | * processed successfully. If it falls across the boundary between | ||
642 | * two or more targets, the size of each piece it gets split into must | ||
643 | * be compatible with the logical_block_size of the target processing it. | ||
644 | */ | ||
645 | static int validate_hardware_logical_block_alignment(struct dm_table *table, | ||
646 | struct queue_limits *limits) | ||
679 | { | 647 | { |
680 | if (!rs->max_sectors) | 648 | /* |
681 | rs->max_sectors = SAFE_MAX_SECTORS; | 649 | * This function uses arithmetic modulo the logical_block_size |
682 | if (!rs->max_hw_sectors) | 650 | * (in units of 512-byte sectors). |
683 | rs->max_hw_sectors = SAFE_MAX_SECTORS; | 651 | */ |
684 | if (!rs->max_phys_segments) | 652 | unsigned short device_logical_block_size_sects = |
685 | rs->max_phys_segments = MAX_PHYS_SEGMENTS; | 653 | limits->logical_block_size >> SECTOR_SHIFT; |
686 | if (!rs->max_hw_segments) | 654 | |
687 | rs->max_hw_segments = MAX_HW_SEGMENTS; | 655 | /* |
688 | if (!rs->logical_block_size) | 656 | * Offset of the start of the next table entry, mod logical_block_size. |
689 | rs->logical_block_size = 1 << SECTOR_SHIFT; | 657 | */ |
690 | if (!rs->max_segment_size) | 658 | unsigned short next_target_start = 0; |
691 | rs->max_segment_size = MAX_SEGMENT_SIZE; | 659 | |
692 | if (!rs->seg_boundary_mask) | 660 | /* |
693 | rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 661 | * Given an aligned bio that extends beyond the end of a |
694 | if (!rs->bounce_pfn) | 662 | * target, how many sectors must the next target handle? |
695 | rs->bounce_pfn = -1; | 663 | */ |
664 | unsigned short remaining = 0; | ||
665 | |||
666 | struct dm_target *uninitialized_var(ti); | ||
667 | struct queue_limits ti_limits; | ||
668 | unsigned i = 0; | ||
669 | |||
670 | /* | ||
671 | * Check each entry in the table in turn. | ||
672 | */ | ||
673 | while (i < dm_table_get_num_targets(table)) { | ||
674 | ti = dm_table_get_target(table, i++); | ||
675 | |||
676 | blk_set_default_limits(&ti_limits); | ||
677 | |||
678 | /* combine all target devices' limits */ | ||
679 | if (ti->type->iterate_devices) | ||
680 | ti->type->iterate_devices(ti, dm_set_device_limits, | ||
681 | &ti_limits); | ||
682 | |||
683 | /* | ||
684 | * If the remaining sectors fall entirely within this | ||
685 | * table entry are they compatible with its logical_block_size? | ||
686 | */ | ||
687 | if (remaining < ti->len && | ||
688 | remaining & ((ti_limits.logical_block_size >> | ||
689 | SECTOR_SHIFT) - 1)) | ||
690 | break; /* Error */ | ||
691 | |||
692 | next_target_start = | ||
693 | (unsigned short) ((next_target_start + ti->len) & | ||
694 | (device_logical_block_size_sects - 1)); | ||
695 | remaining = next_target_start ? | ||
696 | device_logical_block_size_sects - next_target_start : 0; | ||
697 | } | ||
698 | |||
699 | if (remaining) { | ||
700 | DMWARN("%s: table line %u (start sect %llu len %llu) " | ||
701 | "not aligned to h/w logical block size %hu", | ||
702 | dm_device_name(table->md), i, | ||
703 | (unsigned long long) ti->begin, | ||
704 | (unsigned long long) ti->len, | ||
705 | limits->logical_block_size); | ||
706 | return -EINVAL; | ||
707 | } | ||
708 | |||
709 | return 0; | ||
696 | } | 710 | } |
697 | 711 | ||
698 | int dm_table_add_target(struct dm_table *t, const char *type, | 712 | int dm_table_add_target(struct dm_table *t, const char *type, |
@@ -747,9 +761,6 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
747 | 761 | ||
748 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; | 762 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; |
749 | 763 | ||
750 | /* FIXME: the plan is to combine high here and then have | ||
751 | * the merge fn apply the target level restrictions. */ | ||
752 | combine_restrictions_low(&t->limits, &tgt->limits); | ||
753 | return 0; | 764 | return 0; |
754 | 765 | ||
755 | bad: | 766 | bad: |
@@ -758,6 +769,104 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
758 | return r; | 769 | return r; |
759 | } | 770 | } |
760 | 771 | ||
772 | int dm_table_set_type(struct dm_table *t) | ||
773 | { | ||
774 | unsigned i; | ||
775 | unsigned bio_based = 0, request_based = 0; | ||
776 | struct dm_target *tgt; | ||
777 | struct dm_dev_internal *dd; | ||
778 | struct list_head *devices; | ||
779 | |||
780 | for (i = 0; i < t->num_targets; i++) { | ||
781 | tgt = t->targets + i; | ||
782 | if (dm_target_request_based(tgt)) | ||
783 | request_based = 1; | ||
784 | else | ||
785 | bio_based = 1; | ||
786 | |||
787 | if (bio_based && request_based) { | ||
788 | DMWARN("Inconsistent table: different target types" | ||
789 | " can't be mixed up"); | ||
790 | return -EINVAL; | ||
791 | } | ||
792 | } | ||
793 | |||
794 | if (bio_based) { | ||
795 | /* We must use this table as bio-based */ | ||
796 | t->type = DM_TYPE_BIO_BASED; | ||
797 | return 0; | ||
798 | } | ||
799 | |||
800 | BUG_ON(!request_based); /* No targets in this table */ | ||
801 | |||
802 | /* Non-request-stackable devices can't be used for request-based dm */ | ||
803 | devices = dm_table_get_devices(t); | ||
804 | list_for_each_entry(dd, devices, list) { | ||
805 | if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) { | ||
806 | DMWARN("table load rejected: including" | ||
807 | " non-request-stackable devices"); | ||
808 | return -EINVAL; | ||
809 | } | ||
810 | } | ||
811 | |||
812 | /* | ||
813 | * Request-based dm supports only tables that have a single target now. | ||
814 | * To support multiple targets, request splitting support is needed, | ||
815 | * and that needs lots of changes in the block-layer. | ||
816 | * (e.g. request completion process for partial completion.) | ||
817 | */ | ||
818 | if (t->num_targets > 1) { | ||
819 | DMWARN("Request-based dm doesn't support multiple targets yet"); | ||
820 | return -EINVAL; | ||
821 | } | ||
822 | |||
823 | t->type = DM_TYPE_REQUEST_BASED; | ||
824 | |||
825 | return 0; | ||
826 | } | ||
827 | |||
828 | unsigned dm_table_get_type(struct dm_table *t) | ||
829 | { | ||
830 | return t->type; | ||
831 | } | ||
832 | |||
833 | bool dm_table_bio_based(struct dm_table *t) | ||
834 | { | ||
835 | return dm_table_get_type(t) == DM_TYPE_BIO_BASED; | ||
836 | } | ||
837 | |||
838 | bool dm_table_request_based(struct dm_table *t) | ||
839 | { | ||
840 | return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; | ||
841 | } | ||
842 | |||
843 | int dm_table_alloc_md_mempools(struct dm_table *t) | ||
844 | { | ||
845 | unsigned type = dm_table_get_type(t); | ||
846 | |||
847 | if (unlikely(type == DM_TYPE_NONE)) { | ||
848 | DMWARN("no table type is set, can't allocate mempools"); | ||
849 | return -EINVAL; | ||
850 | } | ||
851 | |||
852 | t->mempools = dm_alloc_md_mempools(type); | ||
853 | if (!t->mempools) | ||
854 | return -ENOMEM; | ||
855 | |||
856 | return 0; | ||
857 | } | ||
858 | |||
859 | void dm_table_free_md_mempools(struct dm_table *t) | ||
860 | { | ||
861 | dm_free_md_mempools(t->mempools); | ||
862 | t->mempools = NULL; | ||
863 | } | ||
864 | |||
865 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) | ||
866 | { | ||
867 | return t->mempools; | ||
868 | } | ||
869 | |||
761 | static int setup_indexes(struct dm_table *t) | 870 | static int setup_indexes(struct dm_table *t) |
762 | { | 871 | { |
763 | int i; | 872 | int i; |
@@ -792,8 +901,6 @@ int dm_table_complete(struct dm_table *t) | |||
792 | int r = 0; | 901 | int r = 0; |
793 | unsigned int leaf_nodes; | 902 | unsigned int leaf_nodes; |
794 | 903 | ||
795 | check_for_valid_limits(&t->limits); | ||
796 | |||
797 | /* how many indexes will the btree have ? */ | 904 | /* how many indexes will the btree have ? */ |
798 | leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); | 905 | leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); |
799 | t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); | 906 | t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); |
@@ -869,6 +976,57 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | |||
869 | } | 976 | } |
870 | 977 | ||
871 | /* | 978 | /* |
979 | * Establish the new table's queue_limits and validate them. | ||
980 | */ | ||
981 | int dm_calculate_queue_limits(struct dm_table *table, | ||
982 | struct queue_limits *limits) | ||
983 | { | ||
984 | struct dm_target *uninitialized_var(ti); | ||
985 | struct queue_limits ti_limits; | ||
986 | unsigned i = 0; | ||
987 | |||
988 | blk_set_default_limits(limits); | ||
989 | |||
990 | while (i < dm_table_get_num_targets(table)) { | ||
991 | blk_set_default_limits(&ti_limits); | ||
992 | |||
993 | ti = dm_table_get_target(table, i++); | ||
994 | |||
995 | if (!ti->type->iterate_devices) | ||
996 | goto combine_limits; | ||
997 | |||
998 | /* | ||
999 | * Combine queue limits of all the devices this target uses. | ||
1000 | */ | ||
1001 | ti->type->iterate_devices(ti, dm_set_device_limits, | ||
1002 | &ti_limits); | ||
1003 | |||
1004 | /* | ||
1005 | * Check each device area is consistent with the target's | ||
1006 | * overall queue limits. | ||
1007 | */ | ||
1008 | if (!ti->type->iterate_devices(ti, device_area_is_valid, | ||
1009 | &ti_limits)) | ||
1010 | return -EINVAL; | ||
1011 | |||
1012 | combine_limits: | ||
1013 | /* | ||
1014 | * Merge this target's queue limits into the overall limits | ||
1015 | * for the table. | ||
1016 | */ | ||
1017 | if (blk_stack_limits(limits, &ti_limits, 0) < 0) | ||
1018 | DMWARN("%s: target device " | ||
1019 | "(start sect %llu len %llu) " | ||
1020 | "is misaligned", | ||
1021 | dm_device_name(table->md), | ||
1022 | (unsigned long long) ti->begin, | ||
1023 | (unsigned long long) ti->len); | ||
1024 | } | ||
1025 | |||
1026 | return validate_hardware_logical_block_alignment(table, limits); | ||
1027 | } | ||
1028 | |||
1029 | /* | ||
872 | * Set the integrity profile for this device if all devices used have | 1030 | * Set the integrity profile for this device if all devices used have |
873 | * matching profiles. | 1031 | * matching profiles. |
874 | */ | 1032 | */ |
@@ -907,27 +1065,42 @@ no_integrity: | |||
907 | return; | 1065 | return; |
908 | } | 1066 | } |
909 | 1067 | ||
910 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | 1068 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
1069 | struct queue_limits *limits) | ||
911 | { | 1070 | { |
912 | /* | 1071 | /* |
913 | * Make sure we obey the optimistic sub devices | 1072 | * Each target device in the table has a data area that should normally |
914 | * restrictions. | 1073 | * be aligned such that the DM device's alignment_offset is 0. |
1074 | * FIXME: Propagate alignment_offsets up the stack and warn of | ||
1075 | * sub-optimal or inconsistent settings. | ||
1076 | */ | ||
1077 | limits->alignment_offset = 0; | ||
1078 | limits->misaligned = 0; | ||
1079 | |||
1080 | /* | ||
1081 | * Copy table's limits to the DM device's request_queue | ||
915 | */ | 1082 | */ |
916 | blk_queue_max_sectors(q, t->limits.max_sectors); | 1083 | q->limits = *limits; |
917 | blk_queue_max_phys_segments(q, t->limits.max_phys_segments); | 1084 | |
918 | blk_queue_max_hw_segments(q, t->limits.max_hw_segments); | 1085 | if (limits->no_cluster) |
919 | blk_queue_logical_block_size(q, t->limits.logical_block_size); | ||
920 | blk_queue_max_segment_size(q, t->limits.max_segment_size); | ||
921 | blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); | ||
922 | blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); | ||
923 | blk_queue_bounce_limit(q, t->limits.bounce_pfn); | ||
924 | |||
925 | if (t->limits.no_cluster) | ||
926 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); | 1086 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); |
927 | else | 1087 | else |
928 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); | 1088 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); |
929 | 1089 | ||
930 | dm_table_set_integrity(t); | 1090 | dm_table_set_integrity(t); |
1091 | |||
1092 | /* | ||
1093 | * QUEUE_FLAG_STACKABLE must be set after all queue settings are | ||
1094 | * visible to other CPUs because, once the flag is set, incoming bios | ||
1095 | * are processed by request-based dm, which refers to the queue | ||
1096 | * settings. | ||
1097 | * Until the flag set, bios are passed to bio-based dm and queued to | ||
1098 | * md->deferred where queue settings are not needed yet. | ||
1099 | * Those bios are passed to request-based dm at the resume time. | ||
1100 | */ | ||
1101 | smp_mb(); | ||
1102 | if (dm_table_request_based(t)) | ||
1103 | queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q); | ||
931 | } | 1104 | } |
932 | 1105 | ||
933 | unsigned int dm_table_get_num_targets(struct dm_table *t) | 1106 | unsigned int dm_table_get_num_targets(struct dm_table *t) |
@@ -1023,6 +1196,20 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) | |||
1023 | return r; | 1196 | return r; |
1024 | } | 1197 | } |
1025 | 1198 | ||
1199 | int dm_table_any_busy_target(struct dm_table *t) | ||
1200 | { | ||
1201 | unsigned i; | ||
1202 | struct dm_target *ti; | ||
1203 | |||
1204 | for (i = 0; i < t->num_targets; i++) { | ||
1205 | ti = t->targets + i; | ||
1206 | if (ti->type->busy && ti->type->busy(ti)) | ||
1207 | return 1; | ||
1208 | } | ||
1209 | |||
1210 | return 0; | ||
1211 | } | ||
1212 | |||
1026 | void dm_table_unplug_all(struct dm_table *t) | 1213 | void dm_table_unplug_all(struct dm_table *t) |
1027 | { | 1214 | { |
1028 | struct dm_dev_internal *dd; | 1215 | struct dm_dev_internal *dd; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 48db308fae67..3c6d4ee8921d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -24,6 +24,13 @@ | |||
24 | 24 | ||
25 | #define DM_MSG_PREFIX "core" | 25 | #define DM_MSG_PREFIX "core" |
26 | 26 | ||
27 | /* | ||
28 | * Cookies are numeric values sent with CHANGE and REMOVE | ||
29 | * uevents while resuming, removing or renaming the device. | ||
30 | */ | ||
31 | #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" | ||
32 | #define DM_COOKIE_LENGTH 24 | ||
33 | |||
27 | static const char *_name = DM_NAME; | 34 | static const char *_name = DM_NAME; |
28 | 35 | ||
29 | static unsigned int major = 0; | 36 | static unsigned int major = 0; |
@@ -71,7 +78,7 @@ struct dm_rq_target_io { | |||
71 | */ | 78 | */ |
72 | struct dm_rq_clone_bio_info { | 79 | struct dm_rq_clone_bio_info { |
73 | struct bio *orig; | 80 | struct bio *orig; |
74 | struct request *rq; | 81 | struct dm_rq_target_io *tio; |
75 | }; | 82 | }; |
76 | 83 | ||
77 | union map_info *dm_get_mapinfo(struct bio *bio) | 84 | union map_info *dm_get_mapinfo(struct bio *bio) |
@@ -81,6 +88,14 @@ union map_info *dm_get_mapinfo(struct bio *bio) | |||
81 | return NULL; | 88 | return NULL; |
82 | } | 89 | } |
83 | 90 | ||
91 | union map_info *dm_get_rq_mapinfo(struct request *rq) | ||
92 | { | ||
93 | if (rq && rq->end_io_data) | ||
94 | return &((struct dm_rq_target_io *)rq->end_io_data)->info; | ||
95 | return NULL; | ||
96 | } | ||
97 | EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); | ||
98 | |||
84 | #define MINOR_ALLOCED ((void *)-1) | 99 | #define MINOR_ALLOCED ((void *)-1) |
85 | 100 | ||
86 | /* | 101 | /* |
@@ -157,13 +172,31 @@ struct mapped_device { | |||
157 | * freeze/thaw support require holding onto a super block | 172 | * freeze/thaw support require holding onto a super block |
158 | */ | 173 | */ |
159 | struct super_block *frozen_sb; | 174 | struct super_block *frozen_sb; |
160 | struct block_device *suspended_bdev; | 175 | struct block_device *bdev; |
161 | 176 | ||
162 | /* forced geometry settings */ | 177 | /* forced geometry settings */ |
163 | struct hd_geometry geometry; | 178 | struct hd_geometry geometry; |
164 | 179 | ||
180 | /* marker of flush suspend for request-based dm */ | ||
181 | struct request suspend_rq; | ||
182 | |||
183 | /* For saving the address of __make_request for request based dm */ | ||
184 | make_request_fn *saved_make_request_fn; | ||
185 | |||
165 | /* sysfs handle */ | 186 | /* sysfs handle */ |
166 | struct kobject kobj; | 187 | struct kobject kobj; |
188 | |||
189 | /* zero-length barrier that will be cloned and submitted to targets */ | ||
190 | struct bio barrier_bio; | ||
191 | }; | ||
192 | |||
193 | /* | ||
194 | * For mempools pre-allocation at the table loading time. | ||
195 | */ | ||
196 | struct dm_md_mempools { | ||
197 | mempool_t *io_pool; | ||
198 | mempool_t *tio_pool; | ||
199 | struct bio_set *bs; | ||
167 | }; | 200 | }; |
168 | 201 | ||
169 | #define MIN_IOS 256 | 202 | #define MIN_IOS 256 |
@@ -391,14 +424,29 @@ static void free_io(struct mapped_device *md, struct dm_io *io) | |||
391 | mempool_free(io, md->io_pool); | 424 | mempool_free(io, md->io_pool); |
392 | } | 425 | } |
393 | 426 | ||
394 | static struct dm_target_io *alloc_tio(struct mapped_device *md) | 427 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) |
395 | { | 428 | { |
396 | return mempool_alloc(md->tio_pool, GFP_NOIO); | 429 | mempool_free(tio, md->tio_pool); |
397 | } | 430 | } |
398 | 431 | ||
399 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) | 432 | static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md) |
400 | { | 433 | { |
401 | mempool_free(tio, md->tio_pool); | 434 | return mempool_alloc(md->tio_pool, GFP_ATOMIC); |
435 | } | ||
436 | |||
437 | static void free_rq_tio(struct dm_rq_target_io *tio) | ||
438 | { | ||
439 | mempool_free(tio, tio->md->tio_pool); | ||
440 | } | ||
441 | |||
442 | static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md) | ||
443 | { | ||
444 | return mempool_alloc(md->io_pool, GFP_ATOMIC); | ||
445 | } | ||
446 | |||
447 | static void free_bio_info(struct dm_rq_clone_bio_info *info) | ||
448 | { | ||
449 | mempool_free(info, info->tio->md->io_pool); | ||
402 | } | 450 | } |
403 | 451 | ||
404 | static void start_io_acct(struct dm_io *io) | 452 | static void start_io_acct(struct dm_io *io) |
@@ -464,12 +512,13 @@ static void queue_io(struct mapped_device *md, struct bio *bio) | |||
464 | struct dm_table *dm_get_table(struct mapped_device *md) | 512 | struct dm_table *dm_get_table(struct mapped_device *md) |
465 | { | 513 | { |
466 | struct dm_table *t; | 514 | struct dm_table *t; |
515 | unsigned long flags; | ||
467 | 516 | ||
468 | read_lock(&md->map_lock); | 517 | read_lock_irqsave(&md->map_lock, flags); |
469 | t = md->map; | 518 | t = md->map; |
470 | if (t) | 519 | if (t) |
471 | dm_table_get(t); | 520 | dm_table_get(t); |
472 | read_unlock(&md->map_lock); | 521 | read_unlock_irqrestore(&md->map_lock, flags); |
473 | 522 | ||
474 | return t; | 523 | return t; |
475 | } | 524 | } |
@@ -536,9 +585,11 @@ static void dec_pending(struct dm_io *io, int error) | |||
536 | * Target requested pushing back the I/O. | 585 | * Target requested pushing back the I/O. |
537 | */ | 586 | */ |
538 | spin_lock_irqsave(&md->deferred_lock, flags); | 587 | spin_lock_irqsave(&md->deferred_lock, flags); |
539 | if (__noflush_suspending(md)) | 588 | if (__noflush_suspending(md)) { |
540 | bio_list_add_head(&md->deferred, io->bio); | 589 | if (!bio_barrier(io->bio)) |
541 | else | 590 | bio_list_add_head(&md->deferred, |
591 | io->bio); | ||
592 | } else | ||
542 | /* noflush suspend was interrupted. */ | 593 | /* noflush suspend was interrupted. */ |
543 | io->error = -EIO; | 594 | io->error = -EIO; |
544 | spin_unlock_irqrestore(&md->deferred_lock, flags); | 595 | spin_unlock_irqrestore(&md->deferred_lock, flags); |
@@ -553,7 +604,8 @@ static void dec_pending(struct dm_io *io, int error) | |||
553 | * a per-device variable for error reporting. | 604 | * a per-device variable for error reporting. |
554 | * Note that you can't touch the bio after end_io_acct | 605 | * Note that you can't touch the bio after end_io_acct |
555 | */ | 606 | */ |
556 | md->barrier_error = io_error; | 607 | if (!md->barrier_error && io_error != -EOPNOTSUPP) |
608 | md->barrier_error = io_error; | ||
557 | end_io_acct(io); | 609 | end_io_acct(io); |
558 | } else { | 610 | } else { |
559 | end_io_acct(io); | 611 | end_io_acct(io); |
@@ -607,6 +659,262 @@ static void clone_endio(struct bio *bio, int error) | |||
607 | dec_pending(io, error); | 659 | dec_pending(io, error); |
608 | } | 660 | } |
609 | 661 | ||
662 | /* | ||
663 | * Partial completion handling for request-based dm | ||
664 | */ | ||
665 | static void end_clone_bio(struct bio *clone, int error) | ||
666 | { | ||
667 | struct dm_rq_clone_bio_info *info = clone->bi_private; | ||
668 | struct dm_rq_target_io *tio = info->tio; | ||
669 | struct bio *bio = info->orig; | ||
670 | unsigned int nr_bytes = info->orig->bi_size; | ||
671 | |||
672 | bio_put(clone); | ||
673 | |||
674 | if (tio->error) | ||
675 | /* | ||
676 | * An error has already been detected on the request. | ||
677 | * Once error occurred, just let clone->end_io() handle | ||
678 | * the remainder. | ||
679 | */ | ||
680 | return; | ||
681 | else if (error) { | ||
682 | /* | ||
683 | * Don't notice the error to the upper layer yet. | ||
684 | * The error handling decision is made by the target driver, | ||
685 | * when the request is completed. | ||
686 | */ | ||
687 | tio->error = error; | ||
688 | return; | ||
689 | } | ||
690 | |||
691 | /* | ||
692 | * I/O for the bio successfully completed. | ||
693 | * Notice the data completion to the upper layer. | ||
694 | */ | ||
695 | |||
696 | /* | ||
697 | * bios are processed from the head of the list. | ||
698 | * So the completing bio should always be rq->bio. | ||
699 | * If it's not, something wrong is happening. | ||
700 | */ | ||
701 | if (tio->orig->bio != bio) | ||
702 | DMERR("bio completion is going in the middle of the request"); | ||
703 | |||
704 | /* | ||
705 | * Update the original request. | ||
706 | * Do not use blk_end_request() here, because it may complete | ||
707 | * the original request before the clone, and break the ordering. | ||
708 | */ | ||
709 | blk_update_request(tio->orig, 0, nr_bytes); | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Don't touch any member of the md after calling this function because | ||
714 | * the md may be freed in dm_put() at the end of this function. | ||
715 | * Or do dm_get() before calling this function and dm_put() later. | ||
716 | */ | ||
717 | static void rq_completed(struct mapped_device *md, int run_queue) | ||
718 | { | ||
719 | int wakeup_waiters = 0; | ||
720 | struct request_queue *q = md->queue; | ||
721 | unsigned long flags; | ||
722 | |||
723 | spin_lock_irqsave(q->queue_lock, flags); | ||
724 | if (!queue_in_flight(q)) | ||
725 | wakeup_waiters = 1; | ||
726 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
727 | |||
728 | /* nudge anyone waiting on suspend queue */ | ||
729 | if (wakeup_waiters) | ||
730 | wake_up(&md->wait); | ||
731 | |||
732 | if (run_queue) | ||
733 | blk_run_queue(q); | ||
734 | |||
735 | /* | ||
736 | * dm_put() must be at the end of this function. See the comment above | ||
737 | */ | ||
738 | dm_put(md); | ||
739 | } | ||
740 | |||
741 | static void dm_unprep_request(struct request *rq) | ||
742 | { | ||
743 | struct request *clone = rq->special; | ||
744 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
745 | |||
746 | rq->special = NULL; | ||
747 | rq->cmd_flags &= ~REQ_DONTPREP; | ||
748 | |||
749 | blk_rq_unprep_clone(clone); | ||
750 | free_rq_tio(tio); | ||
751 | } | ||
752 | |||
753 | /* | ||
754 | * Requeue the original request of a clone. | ||
755 | */ | ||
756 | void dm_requeue_unmapped_request(struct request *clone) | ||
757 | { | ||
758 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
759 | struct mapped_device *md = tio->md; | ||
760 | struct request *rq = tio->orig; | ||
761 | struct request_queue *q = rq->q; | ||
762 | unsigned long flags; | ||
763 | |||
764 | dm_unprep_request(rq); | ||
765 | |||
766 | spin_lock_irqsave(q->queue_lock, flags); | ||
767 | if (elv_queue_empty(q)) | ||
768 | blk_plug_device(q); | ||
769 | blk_requeue_request(q, rq); | ||
770 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
771 | |||
772 | rq_completed(md, 0); | ||
773 | } | ||
774 | EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); | ||
775 | |||
776 | static void __stop_queue(struct request_queue *q) | ||
777 | { | ||
778 | blk_stop_queue(q); | ||
779 | } | ||
780 | |||
781 | static void stop_queue(struct request_queue *q) | ||
782 | { | ||
783 | unsigned long flags; | ||
784 | |||
785 | spin_lock_irqsave(q->queue_lock, flags); | ||
786 | __stop_queue(q); | ||
787 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
788 | } | ||
789 | |||
790 | static void __start_queue(struct request_queue *q) | ||
791 | { | ||
792 | if (blk_queue_stopped(q)) | ||
793 | blk_start_queue(q); | ||
794 | } | ||
795 | |||
796 | static void start_queue(struct request_queue *q) | ||
797 | { | ||
798 | unsigned long flags; | ||
799 | |||
800 | spin_lock_irqsave(q->queue_lock, flags); | ||
801 | __start_queue(q); | ||
802 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
803 | } | ||
804 | |||
805 | /* | ||
806 | * Complete the clone and the original request. | ||
807 | * Must be called without queue lock. | ||
808 | */ | ||
809 | static void dm_end_request(struct request *clone, int error) | ||
810 | { | ||
811 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
812 | struct mapped_device *md = tio->md; | ||
813 | struct request *rq = tio->orig; | ||
814 | |||
815 | if (blk_pc_request(rq)) { | ||
816 | rq->errors = clone->errors; | ||
817 | rq->resid_len = clone->resid_len; | ||
818 | |||
819 | if (rq->sense) | ||
820 | /* | ||
821 | * We are using the sense buffer of the original | ||
822 | * request. | ||
823 | * So setting the length of the sense data is enough. | ||
824 | */ | ||
825 | rq->sense_len = clone->sense_len; | ||
826 | } | ||
827 | |||
828 | BUG_ON(clone->bio); | ||
829 | free_rq_tio(tio); | ||
830 | |||
831 | blk_end_request_all(rq, error); | ||
832 | |||
833 | rq_completed(md, 1); | ||
834 | } | ||
835 | |||
836 | /* | ||
837 | * Request completion handler for request-based dm | ||
838 | */ | ||
839 | static void dm_softirq_done(struct request *rq) | ||
840 | { | ||
841 | struct request *clone = rq->completion_data; | ||
842 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
843 | dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; | ||
844 | int error = tio->error; | ||
845 | |||
846 | if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io) | ||
847 | error = rq_end_io(tio->ti, clone, error, &tio->info); | ||
848 | |||
849 | if (error <= 0) | ||
850 | /* The target wants to complete the I/O */ | ||
851 | dm_end_request(clone, error); | ||
852 | else if (error == DM_ENDIO_INCOMPLETE) | ||
853 | /* The target will handle the I/O */ | ||
854 | return; | ||
855 | else if (error == DM_ENDIO_REQUEUE) | ||
856 | /* The target wants to requeue the I/O */ | ||
857 | dm_requeue_unmapped_request(clone); | ||
858 | else { | ||
859 | DMWARN("unimplemented target endio return value: %d", error); | ||
860 | BUG(); | ||
861 | } | ||
862 | } | ||
863 | |||
864 | /* | ||
865 | * Complete the clone and the original request with the error status | ||
866 | * through softirq context. | ||
867 | */ | ||
868 | static void dm_complete_request(struct request *clone, int error) | ||
869 | { | ||
870 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
871 | struct request *rq = tio->orig; | ||
872 | |||
873 | tio->error = error; | ||
874 | rq->completion_data = clone; | ||
875 | blk_complete_request(rq); | ||
876 | } | ||
877 | |||
878 | /* | ||
879 | * Complete the not-mapped clone and the original request with the error status | ||
880 | * through softirq context. | ||
881 | * Target's rq_end_io() function isn't called. | ||
882 | * This may be used when the target's map_rq() function fails. | ||
883 | */ | ||
884 | void dm_kill_unmapped_request(struct request *clone, int error) | ||
885 | { | ||
886 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
887 | struct request *rq = tio->orig; | ||
888 | |||
889 | rq->cmd_flags |= REQ_FAILED; | ||
890 | dm_complete_request(clone, error); | ||
891 | } | ||
892 | EXPORT_SYMBOL_GPL(dm_kill_unmapped_request); | ||
893 | |||
894 | /* | ||
895 | * Called with the queue lock held | ||
896 | */ | ||
897 | static void end_clone_request(struct request *clone, int error) | ||
898 | { | ||
899 | /* | ||
900 | * For just cleaning up the information of the queue in which | ||
901 | * the clone was dispatched. | ||
902 | * The clone is *NOT* freed actually here because it is alloced from | ||
903 | * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags. | ||
904 | */ | ||
905 | __blk_put_request(clone->q, clone); | ||
906 | |||
907 | /* | ||
908 | * Actual request completion is done in a softirq context which doesn't | ||
909 | * hold the queue lock. Otherwise, deadlock could occur because: | ||
910 | * - another request may be submitted by the upper level driver | ||
911 | * of the stacking during the completion | ||
912 | * - the submission which requires queue lock may be done | ||
913 | * against this queue | ||
914 | */ | ||
915 | dm_complete_request(clone, error); | ||
916 | } | ||
917 | |||
610 | static sector_t max_io_len(struct mapped_device *md, | 918 | static sector_t max_io_len(struct mapped_device *md, |
611 | sector_t sector, struct dm_target *ti) | 919 | sector_t sector, struct dm_target *ti) |
612 | { | 920 | { |
@@ -634,11 +942,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
634 | sector_t sector; | 942 | sector_t sector; |
635 | struct mapped_device *md; | 943 | struct mapped_device *md; |
636 | 944 | ||
637 | /* | ||
638 | * Sanity checks. | ||
639 | */ | ||
640 | BUG_ON(!clone->bi_size); | ||
641 | |||
642 | clone->bi_end_io = clone_endio; | 945 | clone->bi_end_io = clone_endio; |
643 | clone->bi_private = tio; | 946 | clone->bi_private = tio; |
644 | 947 | ||
@@ -752,6 +1055,48 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
752 | return clone; | 1055 | return clone; |
753 | } | 1056 | } |
754 | 1057 | ||
1058 | static struct dm_target_io *alloc_tio(struct clone_info *ci, | ||
1059 | struct dm_target *ti) | ||
1060 | { | ||
1061 | struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO); | ||
1062 | |||
1063 | tio->io = ci->io; | ||
1064 | tio->ti = ti; | ||
1065 | memset(&tio->info, 0, sizeof(tio->info)); | ||
1066 | |||
1067 | return tio; | ||
1068 | } | ||
1069 | |||
1070 | static void __flush_target(struct clone_info *ci, struct dm_target *ti, | ||
1071 | unsigned flush_nr) | ||
1072 | { | ||
1073 | struct dm_target_io *tio = alloc_tio(ci, ti); | ||
1074 | struct bio *clone; | ||
1075 | |||
1076 | tio->info.flush_request = flush_nr; | ||
1077 | |||
1078 | clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); | ||
1079 | __bio_clone(clone, ci->bio); | ||
1080 | clone->bi_destructor = dm_bio_destructor; | ||
1081 | |||
1082 | __map_bio(ti, clone, tio); | ||
1083 | } | ||
1084 | |||
1085 | static int __clone_and_map_empty_barrier(struct clone_info *ci) | ||
1086 | { | ||
1087 | unsigned target_nr = 0, flush_nr; | ||
1088 | struct dm_target *ti; | ||
1089 | |||
1090 | while ((ti = dm_table_get_target(ci->map, target_nr++))) | ||
1091 | for (flush_nr = 0; flush_nr < ti->num_flush_requests; | ||
1092 | flush_nr++) | ||
1093 | __flush_target(ci, ti, flush_nr); | ||
1094 | |||
1095 | ci->sector_count = 0; | ||
1096 | |||
1097 | return 0; | ||
1098 | } | ||
1099 | |||
755 | static int __clone_and_map(struct clone_info *ci) | 1100 | static int __clone_and_map(struct clone_info *ci) |
756 | { | 1101 | { |
757 | struct bio *clone, *bio = ci->bio; | 1102 | struct bio *clone, *bio = ci->bio; |
@@ -759,6 +1104,9 @@ static int __clone_and_map(struct clone_info *ci) | |||
759 | sector_t len = 0, max; | 1104 | sector_t len = 0, max; |
760 | struct dm_target_io *tio; | 1105 | struct dm_target_io *tio; |
761 | 1106 | ||
1107 | if (unlikely(bio_empty_barrier(bio))) | ||
1108 | return __clone_and_map_empty_barrier(ci); | ||
1109 | |||
762 | ti = dm_table_find_target(ci->map, ci->sector); | 1110 | ti = dm_table_find_target(ci->map, ci->sector); |
763 | if (!dm_target_is_valid(ti)) | 1111 | if (!dm_target_is_valid(ti)) |
764 | return -EIO; | 1112 | return -EIO; |
@@ -768,10 +1116,7 @@ static int __clone_and_map(struct clone_info *ci) | |||
768 | /* | 1116 | /* |
769 | * Allocate a target io object. | 1117 | * Allocate a target io object. |
770 | */ | 1118 | */ |
771 | tio = alloc_tio(ci->md); | 1119 | tio = alloc_tio(ci, ti); |
772 | tio->io = ci->io; | ||
773 | tio->ti = ti; | ||
774 | memset(&tio->info, 0, sizeof(tio->info)); | ||
775 | 1120 | ||
776 | if (ci->sector_count <= max) { | 1121 | if (ci->sector_count <= max) { |
777 | /* | 1122 | /* |
@@ -827,10 +1172,7 @@ static int __clone_and_map(struct clone_info *ci) | |||
827 | 1172 | ||
828 | max = max_io_len(ci->md, ci->sector, ti); | 1173 | max = max_io_len(ci->md, ci->sector, ti); |
829 | 1174 | ||
830 | tio = alloc_tio(ci->md); | 1175 | tio = alloc_tio(ci, ti); |
831 | tio->io = ci->io; | ||
832 | tio->ti = ti; | ||
833 | memset(&tio->info, 0, sizeof(tio->info)); | ||
834 | } | 1176 | } |
835 | 1177 | ||
836 | len = min(remaining, max); | 1178 | len = min(remaining, max); |
@@ -865,7 +1207,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) | |||
865 | if (!bio_barrier(bio)) | 1207 | if (!bio_barrier(bio)) |
866 | bio_io_error(bio); | 1208 | bio_io_error(bio); |
867 | else | 1209 | else |
868 | md->barrier_error = -EIO; | 1210 | if (!md->barrier_error) |
1211 | md->barrier_error = -EIO; | ||
869 | return; | 1212 | return; |
870 | } | 1213 | } |
871 | 1214 | ||
@@ -878,6 +1221,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) | |||
878 | ci.io->md = md; | 1221 | ci.io->md = md; |
879 | ci.sector = bio->bi_sector; | 1222 | ci.sector = bio->bi_sector; |
880 | ci.sector_count = bio_sectors(bio); | 1223 | ci.sector_count = bio_sectors(bio); |
1224 | if (unlikely(bio_empty_barrier(bio))) | ||
1225 | ci.sector_count = 1; | ||
881 | ci.idx = bio->bi_idx; | 1226 | ci.idx = bio->bi_idx; |
882 | 1227 | ||
883 | start_io_acct(ci.io); | 1228 | start_io_acct(ci.io); |
@@ -925,6 +1270,16 @@ static int dm_merge_bvec(struct request_queue *q, | |||
925 | */ | 1270 | */ |
926 | if (max_size && ti->type->merge) | 1271 | if (max_size && ti->type->merge) |
927 | max_size = ti->type->merge(ti, bvm, biovec, max_size); | 1272 | max_size = ti->type->merge(ti, bvm, biovec, max_size); |
1273 | /* | ||
1274 | * If the target doesn't support merge method and some of the devices | ||
1275 | * provided their merge_bvec method (we know this by looking at | ||
1276 | * queue_max_hw_sectors), then we can't allow bios with multiple vector | ||
1277 | * entries. So always set max_size to 0, and the code below allows | ||
1278 | * just one page. | ||
1279 | */ | ||
1280 | else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) | ||
1281 | |||
1282 | max_size = 0; | ||
928 | 1283 | ||
929 | out_table: | 1284 | out_table: |
930 | dm_table_put(map); | 1285 | dm_table_put(map); |
@@ -943,7 +1298,7 @@ out: | |||
943 | * The request function that just remaps the bio built up by | 1298 | * The request function that just remaps the bio built up by |
944 | * dm_merge_bvec. | 1299 | * dm_merge_bvec. |
945 | */ | 1300 | */ |
946 | static int dm_request(struct request_queue *q, struct bio *bio) | 1301 | static int _dm_request(struct request_queue *q, struct bio *bio) |
947 | { | 1302 | { |
948 | int rw = bio_data_dir(bio); | 1303 | int rw = bio_data_dir(bio); |
949 | struct mapped_device *md = q->queuedata; | 1304 | struct mapped_device *md = q->queuedata; |
@@ -980,12 +1335,274 @@ static int dm_request(struct request_queue *q, struct bio *bio) | |||
980 | return 0; | 1335 | return 0; |
981 | } | 1336 | } |
982 | 1337 | ||
1338 | static int dm_make_request(struct request_queue *q, struct bio *bio) | ||
1339 | { | ||
1340 | struct mapped_device *md = q->queuedata; | ||
1341 | |||
1342 | if (unlikely(bio_barrier(bio))) { | ||
1343 | bio_endio(bio, -EOPNOTSUPP); | ||
1344 | return 0; | ||
1345 | } | ||
1346 | |||
1347 | return md->saved_make_request_fn(q, bio); /* call __make_request() */ | ||
1348 | } | ||
1349 | |||
1350 | static int dm_request_based(struct mapped_device *md) | ||
1351 | { | ||
1352 | return blk_queue_stackable(md->queue); | ||
1353 | } | ||
1354 | |||
1355 | static int dm_request(struct request_queue *q, struct bio *bio) | ||
1356 | { | ||
1357 | struct mapped_device *md = q->queuedata; | ||
1358 | |||
1359 | if (dm_request_based(md)) | ||
1360 | return dm_make_request(q, bio); | ||
1361 | |||
1362 | return _dm_request(q, bio); | ||
1363 | } | ||
1364 | |||
1365 | void dm_dispatch_request(struct request *rq) | ||
1366 | { | ||
1367 | int r; | ||
1368 | |||
1369 | if (blk_queue_io_stat(rq->q)) | ||
1370 | rq->cmd_flags |= REQ_IO_STAT; | ||
1371 | |||
1372 | rq->start_time = jiffies; | ||
1373 | r = blk_insert_cloned_request(rq->q, rq); | ||
1374 | if (r) | ||
1375 | dm_complete_request(rq, r); | ||
1376 | } | ||
1377 | EXPORT_SYMBOL_GPL(dm_dispatch_request); | ||
1378 | |||
1379 | static void dm_rq_bio_destructor(struct bio *bio) | ||
1380 | { | ||
1381 | struct dm_rq_clone_bio_info *info = bio->bi_private; | ||
1382 | struct mapped_device *md = info->tio->md; | ||
1383 | |||
1384 | free_bio_info(info); | ||
1385 | bio_free(bio, md->bs); | ||
1386 | } | ||
1387 | |||
1388 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, | ||
1389 | void *data) | ||
1390 | { | ||
1391 | struct dm_rq_target_io *tio = data; | ||
1392 | struct mapped_device *md = tio->md; | ||
1393 | struct dm_rq_clone_bio_info *info = alloc_bio_info(md); | ||
1394 | |||
1395 | if (!info) | ||
1396 | return -ENOMEM; | ||
1397 | |||
1398 | info->orig = bio_orig; | ||
1399 | info->tio = tio; | ||
1400 | bio->bi_end_io = end_clone_bio; | ||
1401 | bio->bi_private = info; | ||
1402 | bio->bi_destructor = dm_rq_bio_destructor; | ||
1403 | |||
1404 | return 0; | ||
1405 | } | ||
1406 | |||
1407 | static int setup_clone(struct request *clone, struct request *rq, | ||
1408 | struct dm_rq_target_io *tio) | ||
1409 | { | ||
1410 | int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, | ||
1411 | dm_rq_bio_constructor, tio); | ||
1412 | |||
1413 | if (r) | ||
1414 | return r; | ||
1415 | |||
1416 | clone->cmd = rq->cmd; | ||
1417 | clone->cmd_len = rq->cmd_len; | ||
1418 | clone->sense = rq->sense; | ||
1419 | clone->buffer = rq->buffer; | ||
1420 | clone->end_io = end_clone_request; | ||
1421 | clone->end_io_data = tio; | ||
1422 | |||
1423 | return 0; | ||
1424 | } | ||
1425 | |||
1426 | static int dm_rq_flush_suspending(struct mapped_device *md) | ||
1427 | { | ||
1428 | return !md->suspend_rq.special; | ||
1429 | } | ||
1430 | |||
1431 | /* | ||
1432 | * Called with the queue lock held. | ||
1433 | */ | ||
1434 | static int dm_prep_fn(struct request_queue *q, struct request *rq) | ||
1435 | { | ||
1436 | struct mapped_device *md = q->queuedata; | ||
1437 | struct dm_rq_target_io *tio; | ||
1438 | struct request *clone; | ||
1439 | |||
1440 | if (unlikely(rq == &md->suspend_rq)) { | ||
1441 | if (dm_rq_flush_suspending(md)) | ||
1442 | return BLKPREP_OK; | ||
1443 | else | ||
1444 | /* The flush suspend was interrupted */ | ||
1445 | return BLKPREP_KILL; | ||
1446 | } | ||
1447 | |||
1448 | if (unlikely(rq->special)) { | ||
1449 | DMWARN("Already has something in rq->special."); | ||
1450 | return BLKPREP_KILL; | ||
1451 | } | ||
1452 | |||
1453 | tio = alloc_rq_tio(md); /* Only one for each original request */ | ||
1454 | if (!tio) | ||
1455 | /* -ENOMEM */ | ||
1456 | return BLKPREP_DEFER; | ||
1457 | |||
1458 | tio->md = md; | ||
1459 | tio->ti = NULL; | ||
1460 | tio->orig = rq; | ||
1461 | tio->error = 0; | ||
1462 | memset(&tio->info, 0, sizeof(tio->info)); | ||
1463 | |||
1464 | clone = &tio->clone; | ||
1465 | if (setup_clone(clone, rq, tio)) { | ||
1466 | /* -ENOMEM */ | ||
1467 | free_rq_tio(tio); | ||
1468 | return BLKPREP_DEFER; | ||
1469 | } | ||
1470 | |||
1471 | rq->special = clone; | ||
1472 | rq->cmd_flags |= REQ_DONTPREP; | ||
1473 | |||
1474 | return BLKPREP_OK; | ||
1475 | } | ||
1476 | |||
1477 | static void map_request(struct dm_target *ti, struct request *rq, | ||
1478 | struct mapped_device *md) | ||
1479 | { | ||
1480 | int r; | ||
1481 | struct request *clone = rq->special; | ||
1482 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
1483 | |||
1484 | /* | ||
1485 | * Hold the md reference here for the in-flight I/O. | ||
1486 | * We can't rely on the reference count by device opener, | ||
1487 | * because the device may be closed during the request completion | ||
1488 | * when all bios are completed. | ||
1489 | * See the comment in rq_completed() too. | ||
1490 | */ | ||
1491 | dm_get(md); | ||
1492 | |||
1493 | tio->ti = ti; | ||
1494 | r = ti->type->map_rq(ti, clone, &tio->info); | ||
1495 | switch (r) { | ||
1496 | case DM_MAPIO_SUBMITTED: | ||
1497 | /* The target has taken the I/O to submit by itself later */ | ||
1498 | break; | ||
1499 | case DM_MAPIO_REMAPPED: | ||
1500 | /* The target has remapped the I/O so dispatch it */ | ||
1501 | dm_dispatch_request(clone); | ||
1502 | break; | ||
1503 | case DM_MAPIO_REQUEUE: | ||
1504 | /* The target wants to requeue the I/O */ | ||
1505 | dm_requeue_unmapped_request(clone); | ||
1506 | break; | ||
1507 | default: | ||
1508 | if (r > 0) { | ||
1509 | DMWARN("unimplemented target map return value: %d", r); | ||
1510 | BUG(); | ||
1511 | } | ||
1512 | |||
1513 | /* The target wants to complete the I/O */ | ||
1514 | dm_kill_unmapped_request(clone, r); | ||
1515 | break; | ||
1516 | } | ||
1517 | } | ||
1518 | |||
1519 | /* | ||
1520 | * q->request_fn for request-based dm. | ||
1521 | * Called with the queue lock held. | ||
1522 | */ | ||
1523 | static void dm_request_fn(struct request_queue *q) | ||
1524 | { | ||
1525 | struct mapped_device *md = q->queuedata; | ||
1526 | struct dm_table *map = dm_get_table(md); | ||
1527 | struct dm_target *ti; | ||
1528 | struct request *rq; | ||
1529 | |||
1530 | /* | ||
1531 | * For noflush suspend, check blk_queue_stopped() to immediately | ||
1532 | * quit I/O dispatching. | ||
1533 | */ | ||
1534 | while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { | ||
1535 | rq = blk_peek_request(q); | ||
1536 | if (!rq) | ||
1537 | goto plug_and_out; | ||
1538 | |||
1539 | if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */ | ||
1540 | if (queue_in_flight(q)) | ||
1541 | /* Not quiet yet. Wait more */ | ||
1542 | goto plug_and_out; | ||
1543 | |||
1544 | /* This device should be quiet now */ | ||
1545 | __stop_queue(q); | ||
1546 | blk_start_request(rq); | ||
1547 | __blk_end_request_all(rq, 0); | ||
1548 | wake_up(&md->wait); | ||
1549 | goto out; | ||
1550 | } | ||
1551 | |||
1552 | ti = dm_table_find_target(map, blk_rq_pos(rq)); | ||
1553 | if (ti->type->busy && ti->type->busy(ti)) | ||
1554 | goto plug_and_out; | ||
1555 | |||
1556 | blk_start_request(rq); | ||
1557 | spin_unlock(q->queue_lock); | ||
1558 | map_request(ti, rq, md); | ||
1559 | spin_lock_irq(q->queue_lock); | ||
1560 | } | ||
1561 | |||
1562 | goto out; | ||
1563 | |||
1564 | plug_and_out: | ||
1565 | if (!elv_queue_empty(q)) | ||
1566 | /* Some requests still remain, retry later */ | ||
1567 | blk_plug_device(q); | ||
1568 | |||
1569 | out: | ||
1570 | dm_table_put(map); | ||
1571 | |||
1572 | return; | ||
1573 | } | ||
1574 | |||
1575 | int dm_underlying_device_busy(struct request_queue *q) | ||
1576 | { | ||
1577 | return blk_lld_busy(q); | ||
1578 | } | ||
1579 | EXPORT_SYMBOL_GPL(dm_underlying_device_busy); | ||
1580 | |||
1581 | static int dm_lld_busy(struct request_queue *q) | ||
1582 | { | ||
1583 | int r; | ||
1584 | struct mapped_device *md = q->queuedata; | ||
1585 | struct dm_table *map = dm_get_table(md); | ||
1586 | |||
1587 | if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) | ||
1588 | r = 1; | ||
1589 | else | ||
1590 | r = dm_table_any_busy_target(map); | ||
1591 | |||
1592 | dm_table_put(map); | ||
1593 | |||
1594 | return r; | ||
1595 | } | ||
1596 | |||
983 | static void dm_unplug_all(struct request_queue *q) | 1597 | static void dm_unplug_all(struct request_queue *q) |
984 | { | 1598 | { |
985 | struct mapped_device *md = q->queuedata; | 1599 | struct mapped_device *md = q->queuedata; |
986 | struct dm_table *map = dm_get_table(md); | 1600 | struct dm_table *map = dm_get_table(md); |
987 | 1601 | ||
988 | if (map) { | 1602 | if (map) { |
1603 | if (dm_request_based(md)) | ||
1604 | generic_unplug_device(q); | ||
1605 | |||
989 | dm_table_unplug_all(map); | 1606 | dm_table_unplug_all(map); |
990 | dm_table_put(map); | 1607 | dm_table_put(map); |
991 | } | 1608 | } |
@@ -1000,7 +1617,16 @@ static int dm_any_congested(void *congested_data, int bdi_bits) | |||
1000 | if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { | 1617 | if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { |
1001 | map = dm_get_table(md); | 1618 | map = dm_get_table(md); |
1002 | if (map) { | 1619 | if (map) { |
1003 | r = dm_table_any_congested(map, bdi_bits); | 1620 | /* |
1621 | * Request-based dm cares about only own queue for | ||
1622 | * the query about congestion status of request_queue | ||
1623 | */ | ||
1624 | if (dm_request_based(md)) | ||
1625 | r = md->queue->backing_dev_info.state & | ||
1626 | bdi_bits; | ||
1627 | else | ||
1628 | r = dm_table_any_congested(map, bdi_bits); | ||
1629 | |||
1004 | dm_table_put(map); | 1630 | dm_table_put(map); |
1005 | } | 1631 | } |
1006 | } | 1632 | } |
@@ -1123,30 +1749,32 @@ static struct mapped_device *alloc_dev(int minor) | |||
1123 | INIT_LIST_HEAD(&md->uevent_list); | 1749 | INIT_LIST_HEAD(&md->uevent_list); |
1124 | spin_lock_init(&md->uevent_lock); | 1750 | spin_lock_init(&md->uevent_lock); |
1125 | 1751 | ||
1126 | md->queue = blk_alloc_queue(GFP_KERNEL); | 1752 | md->queue = blk_init_queue(dm_request_fn, NULL); |
1127 | if (!md->queue) | 1753 | if (!md->queue) |
1128 | goto bad_queue; | 1754 | goto bad_queue; |
1129 | 1755 | ||
1756 | /* | ||
1757 | * Request-based dm devices cannot be stacked on top of bio-based dm | ||
1758 | * devices. The type of this dm device has not been decided yet, | ||
1759 | * although we initialized the queue using blk_init_queue(). | ||
1760 | * The type is decided at the first table loading time. | ||
1761 | * To prevent problematic device stacking, clear the queue flag | ||
1762 | * for request stacking support until then. | ||
1763 | * | ||
1764 | * This queue is new, so no concurrency on the queue_flags. | ||
1765 | */ | ||
1766 | queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); | ||
1767 | md->saved_make_request_fn = md->queue->make_request_fn; | ||
1130 | md->queue->queuedata = md; | 1768 | md->queue->queuedata = md; |
1131 | md->queue->backing_dev_info.congested_fn = dm_any_congested; | 1769 | md->queue->backing_dev_info.congested_fn = dm_any_congested; |
1132 | md->queue->backing_dev_info.congested_data = md; | 1770 | md->queue->backing_dev_info.congested_data = md; |
1133 | blk_queue_make_request(md->queue, dm_request); | 1771 | blk_queue_make_request(md->queue, dm_request); |
1134 | blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); | ||
1135 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); | 1772 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); |
1136 | md->queue->unplug_fn = dm_unplug_all; | 1773 | md->queue->unplug_fn = dm_unplug_all; |
1137 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); | 1774 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); |
1138 | 1775 | blk_queue_softirq_done(md->queue, dm_softirq_done); | |
1139 | md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); | 1776 | blk_queue_prep_rq(md->queue, dm_prep_fn); |
1140 | if (!md->io_pool) | 1777 | blk_queue_lld_busy(md->queue, dm_lld_busy); |
1141 | goto bad_io_pool; | ||
1142 | |||
1143 | md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); | ||
1144 | if (!md->tio_pool) | ||
1145 | goto bad_tio_pool; | ||
1146 | |||
1147 | md->bs = bioset_create(16, 0); | ||
1148 | if (!md->bs) | ||
1149 | goto bad_no_bioset; | ||
1150 | 1778 | ||
1151 | md->disk = alloc_disk(1); | 1779 | md->disk = alloc_disk(1); |
1152 | if (!md->disk) | 1780 | if (!md->disk) |
@@ -1170,6 +1798,10 @@ static struct mapped_device *alloc_dev(int minor) | |||
1170 | if (!md->wq) | 1798 | if (!md->wq) |
1171 | goto bad_thread; | 1799 | goto bad_thread; |
1172 | 1800 | ||
1801 | md->bdev = bdget_disk(md->disk, 0); | ||
1802 | if (!md->bdev) | ||
1803 | goto bad_bdev; | ||
1804 | |||
1173 | /* Populate the mapping, nobody knows we exist yet */ | 1805 | /* Populate the mapping, nobody knows we exist yet */ |
1174 | spin_lock(&_minor_lock); | 1806 | spin_lock(&_minor_lock); |
1175 | old_md = idr_replace(&_minor_idr, md, minor); | 1807 | old_md = idr_replace(&_minor_idr, md, minor); |
@@ -1179,15 +1811,11 @@ static struct mapped_device *alloc_dev(int minor) | |||
1179 | 1811 | ||
1180 | return md; | 1812 | return md; |
1181 | 1813 | ||
1814 | bad_bdev: | ||
1815 | destroy_workqueue(md->wq); | ||
1182 | bad_thread: | 1816 | bad_thread: |
1183 | put_disk(md->disk); | 1817 | put_disk(md->disk); |
1184 | bad_disk: | 1818 | bad_disk: |
1185 | bioset_free(md->bs); | ||
1186 | bad_no_bioset: | ||
1187 | mempool_destroy(md->tio_pool); | ||
1188 | bad_tio_pool: | ||
1189 | mempool_destroy(md->io_pool); | ||
1190 | bad_io_pool: | ||
1191 | blk_cleanup_queue(md->queue); | 1819 | blk_cleanup_queue(md->queue); |
1192 | bad_queue: | 1820 | bad_queue: |
1193 | free_minor(minor); | 1821 | free_minor(minor); |
@@ -1204,14 +1832,15 @@ static void free_dev(struct mapped_device *md) | |||
1204 | { | 1832 | { |
1205 | int minor = MINOR(disk_devt(md->disk)); | 1833 | int minor = MINOR(disk_devt(md->disk)); |
1206 | 1834 | ||
1207 | if (md->suspended_bdev) { | 1835 | unlock_fs(md); |
1208 | unlock_fs(md); | 1836 | bdput(md->bdev); |
1209 | bdput(md->suspended_bdev); | ||
1210 | } | ||
1211 | destroy_workqueue(md->wq); | 1837 | destroy_workqueue(md->wq); |
1212 | mempool_destroy(md->tio_pool); | 1838 | if (md->tio_pool) |
1213 | mempool_destroy(md->io_pool); | 1839 | mempool_destroy(md->tio_pool); |
1214 | bioset_free(md->bs); | 1840 | if (md->io_pool) |
1841 | mempool_destroy(md->io_pool); | ||
1842 | if (md->bs) | ||
1843 | bioset_free(md->bs); | ||
1215 | blk_integrity_unregister(md->disk); | 1844 | blk_integrity_unregister(md->disk); |
1216 | del_gendisk(md->disk); | 1845 | del_gendisk(md->disk); |
1217 | free_minor(minor); | 1846 | free_minor(minor); |
@@ -1226,6 +1855,29 @@ static void free_dev(struct mapped_device *md) | |||
1226 | kfree(md); | 1855 | kfree(md); |
1227 | } | 1856 | } |
1228 | 1857 | ||
1858 | static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | ||
1859 | { | ||
1860 | struct dm_md_mempools *p; | ||
1861 | |||
1862 | if (md->io_pool && md->tio_pool && md->bs) | ||
1863 | /* the md already has necessary mempools */ | ||
1864 | goto out; | ||
1865 | |||
1866 | p = dm_table_get_md_mempools(t); | ||
1867 | BUG_ON(!p || md->io_pool || md->tio_pool || md->bs); | ||
1868 | |||
1869 | md->io_pool = p->io_pool; | ||
1870 | p->io_pool = NULL; | ||
1871 | md->tio_pool = p->tio_pool; | ||
1872 | p->tio_pool = NULL; | ||
1873 | md->bs = p->bs; | ||
1874 | p->bs = NULL; | ||
1875 | |||
1876 | out: | ||
1877 | /* mempool bind completed, now no need any mempools in the table */ | ||
1878 | dm_table_free_md_mempools(t); | ||
1879 | } | ||
1880 | |||
1229 | /* | 1881 | /* |
1230 | * Bind a table to the device. | 1882 | * Bind a table to the device. |
1231 | */ | 1883 | */ |
@@ -1249,15 +1901,17 @@ static void __set_size(struct mapped_device *md, sector_t size) | |||
1249 | { | 1901 | { |
1250 | set_capacity(md->disk, size); | 1902 | set_capacity(md->disk, size); |
1251 | 1903 | ||
1252 | mutex_lock(&md->suspended_bdev->bd_inode->i_mutex); | 1904 | mutex_lock(&md->bdev->bd_inode->i_mutex); |
1253 | i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); | 1905 | i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); |
1254 | mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex); | 1906 | mutex_unlock(&md->bdev->bd_inode->i_mutex); |
1255 | } | 1907 | } |
1256 | 1908 | ||
1257 | static int __bind(struct mapped_device *md, struct dm_table *t) | 1909 | static int __bind(struct mapped_device *md, struct dm_table *t, |
1910 | struct queue_limits *limits) | ||
1258 | { | 1911 | { |
1259 | struct request_queue *q = md->queue; | 1912 | struct request_queue *q = md->queue; |
1260 | sector_t size; | 1913 | sector_t size; |
1914 | unsigned long flags; | ||
1261 | 1915 | ||
1262 | size = dm_table_get_size(t); | 1916 | size = dm_table_get_size(t); |
1263 | 1917 | ||
@@ -1267,8 +1921,7 @@ static int __bind(struct mapped_device *md, struct dm_table *t) | |||
1267 | if (size != get_capacity(md->disk)) | 1921 | if (size != get_capacity(md->disk)) |
1268 | memset(&md->geometry, 0, sizeof(md->geometry)); | 1922 | memset(&md->geometry, 0, sizeof(md->geometry)); |
1269 | 1923 | ||
1270 | if (md->suspended_bdev) | 1924 | __set_size(md, size); |
1271 | __set_size(md, size); | ||
1272 | 1925 | ||
1273 | if (!size) { | 1926 | if (!size) { |
1274 | dm_table_destroy(t); | 1927 | dm_table_destroy(t); |
@@ -1277,10 +1930,22 @@ static int __bind(struct mapped_device *md, struct dm_table *t) | |||
1277 | 1930 | ||
1278 | dm_table_event_callback(t, event_callback, md); | 1931 | dm_table_event_callback(t, event_callback, md); |
1279 | 1932 | ||
1280 | write_lock(&md->map_lock); | 1933 | /* |
1934 | * The queue hasn't been stopped yet, if the old table type wasn't | ||
1935 | * for request-based during suspension. So stop it to prevent | ||
1936 | * I/O mapping before resume. | ||
1937 | * This must be done before setting the queue restrictions, | ||
1938 | * because request-based dm may be run just after the setting. | ||
1939 | */ | ||
1940 | if (dm_table_request_based(t) && !blk_queue_stopped(q)) | ||
1941 | stop_queue(q); | ||
1942 | |||
1943 | __bind_mempools(md, t); | ||
1944 | |||
1945 | write_lock_irqsave(&md->map_lock, flags); | ||
1281 | md->map = t; | 1946 | md->map = t; |
1282 | dm_table_set_restrictions(t, q); | 1947 | dm_table_set_restrictions(t, q, limits); |
1283 | write_unlock(&md->map_lock); | 1948 | write_unlock_irqrestore(&md->map_lock, flags); |
1284 | 1949 | ||
1285 | return 0; | 1950 | return 0; |
1286 | } | 1951 | } |
@@ -1288,14 +1953,15 @@ static int __bind(struct mapped_device *md, struct dm_table *t) | |||
1288 | static void __unbind(struct mapped_device *md) | 1953 | static void __unbind(struct mapped_device *md) |
1289 | { | 1954 | { |
1290 | struct dm_table *map = md->map; | 1955 | struct dm_table *map = md->map; |
1956 | unsigned long flags; | ||
1291 | 1957 | ||
1292 | if (!map) | 1958 | if (!map) |
1293 | return; | 1959 | return; |
1294 | 1960 | ||
1295 | dm_table_event_callback(map, NULL, NULL); | 1961 | dm_table_event_callback(map, NULL, NULL); |
1296 | write_lock(&md->map_lock); | 1962 | write_lock_irqsave(&md->map_lock, flags); |
1297 | md->map = NULL; | 1963 | md->map = NULL; |
1298 | write_unlock(&md->map_lock); | 1964 | write_unlock_irqrestore(&md->map_lock, flags); |
1299 | dm_table_destroy(map); | 1965 | dm_table_destroy(map); |
1300 | } | 1966 | } |
1301 | 1967 | ||
@@ -1399,6 +2065,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
1399 | { | 2065 | { |
1400 | int r = 0; | 2066 | int r = 0; |
1401 | DECLARE_WAITQUEUE(wait, current); | 2067 | DECLARE_WAITQUEUE(wait, current); |
2068 | struct request_queue *q = md->queue; | ||
2069 | unsigned long flags; | ||
1402 | 2070 | ||
1403 | dm_unplug_all(md->queue); | 2071 | dm_unplug_all(md->queue); |
1404 | 2072 | ||
@@ -1408,7 +2076,14 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
1408 | set_current_state(interruptible); | 2076 | set_current_state(interruptible); |
1409 | 2077 | ||
1410 | smp_mb(); | 2078 | smp_mb(); |
1411 | if (!atomic_read(&md->pending)) | 2079 | if (dm_request_based(md)) { |
2080 | spin_lock_irqsave(q->queue_lock, flags); | ||
2081 | if (!queue_in_flight(q) && blk_queue_stopped(q)) { | ||
2082 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2083 | break; | ||
2084 | } | ||
2085 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2086 | } else if (!atomic_read(&md->pending)) | ||
1412 | break; | 2087 | break; |
1413 | 2088 | ||
1414 | if (interruptible == TASK_INTERRUPTIBLE && | 2089 | if (interruptible == TASK_INTERRUPTIBLE && |
@@ -1426,34 +2101,36 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
1426 | return r; | 2101 | return r; |
1427 | } | 2102 | } |
1428 | 2103 | ||
1429 | static int dm_flush(struct mapped_device *md) | 2104 | static void dm_flush(struct mapped_device *md) |
1430 | { | 2105 | { |
1431 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); | 2106 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); |
1432 | return 0; | 2107 | |
2108 | bio_init(&md->barrier_bio); | ||
2109 | md->barrier_bio.bi_bdev = md->bdev; | ||
2110 | md->barrier_bio.bi_rw = WRITE_BARRIER; | ||
2111 | __split_and_process_bio(md, &md->barrier_bio); | ||
2112 | |||
2113 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); | ||
1433 | } | 2114 | } |
1434 | 2115 | ||
1435 | static void process_barrier(struct mapped_device *md, struct bio *bio) | 2116 | static void process_barrier(struct mapped_device *md, struct bio *bio) |
1436 | { | 2117 | { |
1437 | int error = dm_flush(md); | 2118 | md->barrier_error = 0; |
1438 | |||
1439 | if (unlikely(error)) { | ||
1440 | bio_endio(bio, error); | ||
1441 | return; | ||
1442 | } | ||
1443 | if (bio_empty_barrier(bio)) { | ||
1444 | bio_endio(bio, 0); | ||
1445 | return; | ||
1446 | } | ||
1447 | |||
1448 | __split_and_process_bio(md, bio); | ||
1449 | 2119 | ||
1450 | error = dm_flush(md); | 2120 | dm_flush(md); |
1451 | 2121 | ||
1452 | if (!error && md->barrier_error) | 2122 | if (!bio_empty_barrier(bio)) { |
1453 | error = md->barrier_error; | 2123 | __split_and_process_bio(md, bio); |
2124 | dm_flush(md); | ||
2125 | } | ||
1454 | 2126 | ||
1455 | if (md->barrier_error != DM_ENDIO_REQUEUE) | 2127 | if (md->barrier_error != DM_ENDIO_REQUEUE) |
1456 | bio_endio(bio, error); | 2128 | bio_endio(bio, md->barrier_error); |
2129 | else { | ||
2130 | spin_lock_irq(&md->deferred_lock); | ||
2131 | bio_list_add_head(&md->deferred, bio); | ||
2132 | spin_unlock_irq(&md->deferred_lock); | ||
2133 | } | ||
1457 | } | 2134 | } |
1458 | 2135 | ||
1459 | /* | 2136 | /* |
@@ -1479,10 +2156,14 @@ static void dm_wq_work(struct work_struct *work) | |||
1479 | 2156 | ||
1480 | up_write(&md->io_lock); | 2157 | up_write(&md->io_lock); |
1481 | 2158 | ||
1482 | if (bio_barrier(c)) | 2159 | if (dm_request_based(md)) |
1483 | process_barrier(md, c); | 2160 | generic_make_request(c); |
1484 | else | 2161 | else { |
1485 | __split_and_process_bio(md, c); | 2162 | if (bio_barrier(c)) |
2163 | process_barrier(md, c); | ||
2164 | else | ||
2165 | __split_and_process_bio(md, c); | ||
2166 | } | ||
1486 | 2167 | ||
1487 | down_write(&md->io_lock); | 2168 | down_write(&md->io_lock); |
1488 | } | 2169 | } |
@@ -1502,6 +2183,7 @@ static void dm_queue_flush(struct mapped_device *md) | |||
1502 | */ | 2183 | */ |
1503 | int dm_swap_table(struct mapped_device *md, struct dm_table *table) | 2184 | int dm_swap_table(struct mapped_device *md, struct dm_table *table) |
1504 | { | 2185 | { |
2186 | struct queue_limits limits; | ||
1505 | int r = -EINVAL; | 2187 | int r = -EINVAL; |
1506 | 2188 | ||
1507 | mutex_lock(&md->suspend_lock); | 2189 | mutex_lock(&md->suspend_lock); |
@@ -1510,19 +2192,96 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) | |||
1510 | if (!dm_suspended(md)) | 2192 | if (!dm_suspended(md)) |
1511 | goto out; | 2193 | goto out; |
1512 | 2194 | ||
1513 | /* without bdev, the device size cannot be changed */ | 2195 | r = dm_calculate_queue_limits(table, &limits); |
1514 | if (!md->suspended_bdev) | 2196 | if (r) |
1515 | if (get_capacity(md->disk) != dm_table_get_size(table)) | 2197 | goto out; |
1516 | goto out; | 2198 | |
2199 | /* cannot change the device type, once a table is bound */ | ||
2200 | if (md->map && | ||
2201 | (dm_table_get_type(md->map) != dm_table_get_type(table))) { | ||
2202 | DMWARN("can't change the device type after a table is bound"); | ||
2203 | goto out; | ||
2204 | } | ||
2205 | |||
2206 | /* | ||
2207 | * It is enought that blk_queue_ordered() is called only once when | ||
2208 | * the first bio-based table is bound. | ||
2209 | * | ||
2210 | * This setting should be moved to alloc_dev() when request-based dm | ||
2211 | * supports barrier. | ||
2212 | */ | ||
2213 | if (!md->map && dm_table_bio_based(table)) | ||
2214 | blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); | ||
1517 | 2215 | ||
1518 | __unbind(md); | 2216 | __unbind(md); |
1519 | r = __bind(md, table); | 2217 | r = __bind(md, table, &limits); |
1520 | 2218 | ||
1521 | out: | 2219 | out: |
1522 | mutex_unlock(&md->suspend_lock); | 2220 | mutex_unlock(&md->suspend_lock); |
1523 | return r; | 2221 | return r; |
1524 | } | 2222 | } |
1525 | 2223 | ||
2224 | static void dm_rq_invalidate_suspend_marker(struct mapped_device *md) | ||
2225 | { | ||
2226 | md->suspend_rq.special = (void *)0x1; | ||
2227 | } | ||
2228 | |||
2229 | static void dm_rq_abort_suspend(struct mapped_device *md, int noflush) | ||
2230 | { | ||
2231 | struct request_queue *q = md->queue; | ||
2232 | unsigned long flags; | ||
2233 | |||
2234 | spin_lock_irqsave(q->queue_lock, flags); | ||
2235 | if (!noflush) | ||
2236 | dm_rq_invalidate_suspend_marker(md); | ||
2237 | __start_queue(q); | ||
2238 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2239 | } | ||
2240 | |||
2241 | static void dm_rq_start_suspend(struct mapped_device *md, int noflush) | ||
2242 | { | ||
2243 | struct request *rq = &md->suspend_rq; | ||
2244 | struct request_queue *q = md->queue; | ||
2245 | |||
2246 | if (noflush) | ||
2247 | stop_queue(q); | ||
2248 | else { | ||
2249 | blk_rq_init(q, rq); | ||
2250 | blk_insert_request(q, rq, 0, NULL); | ||
2251 | } | ||
2252 | } | ||
2253 | |||
2254 | static int dm_rq_suspend_available(struct mapped_device *md, int noflush) | ||
2255 | { | ||
2256 | int r = 1; | ||
2257 | struct request *rq = &md->suspend_rq; | ||
2258 | struct request_queue *q = md->queue; | ||
2259 | unsigned long flags; | ||
2260 | |||
2261 | if (noflush) | ||
2262 | return r; | ||
2263 | |||
2264 | /* The marker must be protected by queue lock if it is in use */ | ||
2265 | spin_lock_irqsave(q->queue_lock, flags); | ||
2266 | if (unlikely(rq->ref_count)) { | ||
2267 | /* | ||
2268 | * This can happen, when the previous flush suspend was | ||
2269 | * interrupted, the marker is still in the queue and | ||
2270 | * this flush suspend has been invoked, because we don't | ||
2271 | * remove the marker at the time of suspend interruption. | ||
2272 | * We have only one marker per mapped_device, so we can't | ||
2273 | * start another flush suspend while it is in use. | ||
2274 | */ | ||
2275 | BUG_ON(!rq->special); /* The marker should be invalidated */ | ||
2276 | DMWARN("Invalidating the previous flush suspend is still in" | ||
2277 | " progress. Please retry later."); | ||
2278 | r = 0; | ||
2279 | } | ||
2280 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2281 | |||
2282 | return r; | ||
2283 | } | ||
2284 | |||
1526 | /* | 2285 | /* |
1527 | * Functions to lock and unlock any filesystem running on the | 2286 | * Functions to lock and unlock any filesystem running on the |
1528 | * device. | 2287 | * device. |
@@ -1533,7 +2292,7 @@ static int lock_fs(struct mapped_device *md) | |||
1533 | 2292 | ||
1534 | WARN_ON(md->frozen_sb); | 2293 | WARN_ON(md->frozen_sb); |
1535 | 2294 | ||
1536 | md->frozen_sb = freeze_bdev(md->suspended_bdev); | 2295 | md->frozen_sb = freeze_bdev(md->bdev); |
1537 | if (IS_ERR(md->frozen_sb)) { | 2296 | if (IS_ERR(md->frozen_sb)) { |
1538 | r = PTR_ERR(md->frozen_sb); | 2297 | r = PTR_ERR(md->frozen_sb); |
1539 | md->frozen_sb = NULL; | 2298 | md->frozen_sb = NULL; |
@@ -1542,9 +2301,6 @@ static int lock_fs(struct mapped_device *md) | |||
1542 | 2301 | ||
1543 | set_bit(DMF_FROZEN, &md->flags); | 2302 | set_bit(DMF_FROZEN, &md->flags); |
1544 | 2303 | ||
1545 | /* don't bdput right now, we don't want the bdev | ||
1546 | * to go away while it is locked. | ||
1547 | */ | ||
1548 | return 0; | 2304 | return 0; |
1549 | } | 2305 | } |
1550 | 2306 | ||
@@ -1553,7 +2309,7 @@ static void unlock_fs(struct mapped_device *md) | |||
1553 | if (!test_bit(DMF_FROZEN, &md->flags)) | 2309 | if (!test_bit(DMF_FROZEN, &md->flags)) |
1554 | return; | 2310 | return; |
1555 | 2311 | ||
1556 | thaw_bdev(md->suspended_bdev, md->frozen_sb); | 2312 | thaw_bdev(md->bdev, md->frozen_sb); |
1557 | md->frozen_sb = NULL; | 2313 | md->frozen_sb = NULL; |
1558 | clear_bit(DMF_FROZEN, &md->flags); | 2314 | clear_bit(DMF_FROZEN, &md->flags); |
1559 | } | 2315 | } |
@@ -1565,6 +2321,53 @@ static void unlock_fs(struct mapped_device *md) | |||
1565 | * dm_bind_table, dm_suspend must be called to flush any in | 2321 | * dm_bind_table, dm_suspend must be called to flush any in |
1566 | * flight bios and ensure that any further io gets deferred. | 2322 | * flight bios and ensure that any further io gets deferred. |
1567 | */ | 2323 | */ |
2324 | /* | ||
2325 | * Suspend mechanism in request-based dm. | ||
2326 | * | ||
2327 | * After the suspend starts, further incoming requests are kept in | ||
2328 | * the request_queue and deferred. | ||
2329 | * Remaining requests in the request_queue at the start of suspend are flushed | ||
2330 | * if it is flush suspend. | ||
2331 | * The suspend completes when the following conditions have been satisfied, | ||
2332 | * so wait for it: | ||
2333 | * 1. q->in_flight is 0 (which means no in_flight request) | ||
2334 | * 2. queue has been stopped (which means no request dispatching) | ||
2335 | * | ||
2336 | * | ||
2337 | * Noflush suspend | ||
2338 | * --------------- | ||
2339 | * Noflush suspend doesn't need to dispatch remaining requests. | ||
2340 | * So stop the queue immediately. Then, wait for all in_flight requests | ||
2341 | * to be completed or requeued. | ||
2342 | * | ||
2343 | * To abort noflush suspend, start the queue. | ||
2344 | * | ||
2345 | * | ||
2346 | * Flush suspend | ||
2347 | * ------------- | ||
2348 | * Flush suspend needs to dispatch remaining requests. So stop the queue | ||
2349 | * after the remaining requests are completed. (Requeued request must be also | ||
2350 | * re-dispatched and completed. Until then, we can't stop the queue.) | ||
2351 | * | ||
2352 | * During flushing the remaining requests, further incoming requests are also | ||
2353 | * inserted to the same queue. To distinguish which requests are to be | ||
2354 | * flushed, we insert a marker request to the queue at the time of starting | ||
2355 | * flush suspend, like a barrier. | ||
2356 | * The dispatching is blocked when the marker is found on the top of the queue. | ||
2357 | * And the queue is stopped when all in_flight requests are completed, since | ||
2358 | * that means the remaining requests are completely flushed. | ||
2359 | * Then, the marker is removed from the queue. | ||
2360 | * | ||
2361 | * To abort flush suspend, we also need to take care of the marker, not only | ||
2362 | * starting the queue. | ||
2363 | * We don't remove the marker forcibly from the queue since it's against | ||
2364 | * the block-layer manner. Instead, we put a invalidated mark on the marker. | ||
2365 | * When the invalidated marker is found on the top of the queue, it is | ||
2366 | * immediately removed from the queue, so it doesn't block dispatching. | ||
2367 | * Because we have only one marker per mapped_device, we can't start another | ||
2368 | * flush suspend until the invalidated marker is removed from the queue. | ||
2369 | * So fail and return with -EBUSY in such a case. | ||
2370 | */ | ||
1568 | int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | 2371 | int dm_suspend(struct mapped_device *md, unsigned suspend_flags) |
1569 | { | 2372 | { |
1570 | struct dm_table *map = NULL; | 2373 | struct dm_table *map = NULL; |
@@ -1579,6 +2382,11 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1579 | goto out_unlock; | 2382 | goto out_unlock; |
1580 | } | 2383 | } |
1581 | 2384 | ||
2385 | if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) { | ||
2386 | r = -EBUSY; | ||
2387 | goto out_unlock; | ||
2388 | } | ||
2389 | |||
1582 | map = dm_get_table(md); | 2390 | map = dm_get_table(md); |
1583 | 2391 | ||
1584 | /* | 2392 | /* |
@@ -1591,24 +2399,14 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1591 | /* This does not get reverted if there's an error later. */ | 2399 | /* This does not get reverted if there's an error later. */ |
1592 | dm_table_presuspend_targets(map); | 2400 | dm_table_presuspend_targets(map); |
1593 | 2401 | ||
1594 | /* bdget() can stall if the pending I/Os are not flushed */ | 2402 | /* |
1595 | if (!noflush) { | 2403 | * Flush I/O to the device. noflush supersedes do_lockfs, |
1596 | md->suspended_bdev = bdget_disk(md->disk, 0); | 2404 | * because lock_fs() needs to flush I/Os. |
1597 | if (!md->suspended_bdev) { | 2405 | */ |
1598 | DMWARN("bdget failed in dm_suspend"); | 2406 | if (!noflush && do_lockfs) { |
1599 | r = -ENOMEM; | 2407 | r = lock_fs(md); |
2408 | if (r) | ||
1600 | goto out; | 2409 | goto out; |
1601 | } | ||
1602 | |||
1603 | /* | ||
1604 | * Flush I/O to the device. noflush supersedes do_lockfs, | ||
1605 | * because lock_fs() needs to flush I/Os. | ||
1606 | */ | ||
1607 | if (do_lockfs) { | ||
1608 | r = lock_fs(md); | ||
1609 | if (r) | ||
1610 | goto out; | ||
1611 | } | ||
1612 | } | 2410 | } |
1613 | 2411 | ||
1614 | /* | 2412 | /* |
@@ -1634,6 +2432,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1634 | 2432 | ||
1635 | flush_workqueue(md->wq); | 2433 | flush_workqueue(md->wq); |
1636 | 2434 | ||
2435 | if (dm_request_based(md)) | ||
2436 | dm_rq_start_suspend(md, noflush); | ||
2437 | |||
1637 | /* | 2438 | /* |
1638 | * At this point no more requests are entering target request routines. | 2439 | * At this point no more requests are entering target request routines. |
1639 | * We call dm_wait_for_completion to wait for all existing requests | 2440 | * We call dm_wait_for_completion to wait for all existing requests |
@@ -1650,6 +2451,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1650 | if (r < 0) { | 2451 | if (r < 0) { |
1651 | dm_queue_flush(md); | 2452 | dm_queue_flush(md); |
1652 | 2453 | ||
2454 | if (dm_request_based(md)) | ||
2455 | dm_rq_abort_suspend(md, noflush); | ||
2456 | |||
1653 | unlock_fs(md); | 2457 | unlock_fs(md); |
1654 | goto out; /* pushback list is already flushed, so skip flush */ | 2458 | goto out; /* pushback list is already flushed, so skip flush */ |
1655 | } | 2459 | } |
@@ -1665,11 +2469,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1665 | set_bit(DMF_SUSPENDED, &md->flags); | 2469 | set_bit(DMF_SUSPENDED, &md->flags); |
1666 | 2470 | ||
1667 | out: | 2471 | out: |
1668 | if (r && md->suspended_bdev) { | ||
1669 | bdput(md->suspended_bdev); | ||
1670 | md->suspended_bdev = NULL; | ||
1671 | } | ||
1672 | |||
1673 | dm_table_put(map); | 2472 | dm_table_put(map); |
1674 | 2473 | ||
1675 | out_unlock: | 2474 | out_unlock: |
@@ -1696,21 +2495,20 @@ int dm_resume(struct mapped_device *md) | |||
1696 | 2495 | ||
1697 | dm_queue_flush(md); | 2496 | dm_queue_flush(md); |
1698 | 2497 | ||
1699 | unlock_fs(md); | 2498 | /* |
2499 | * Flushing deferred I/Os must be done after targets are resumed | ||
2500 | * so that mapping of targets can work correctly. | ||
2501 | * Request-based dm is queueing the deferred I/Os in its request_queue. | ||
2502 | */ | ||
2503 | if (dm_request_based(md)) | ||
2504 | start_queue(md->queue); | ||
1700 | 2505 | ||
1701 | if (md->suspended_bdev) { | 2506 | unlock_fs(md); |
1702 | bdput(md->suspended_bdev); | ||
1703 | md->suspended_bdev = NULL; | ||
1704 | } | ||
1705 | 2507 | ||
1706 | clear_bit(DMF_SUSPENDED, &md->flags); | 2508 | clear_bit(DMF_SUSPENDED, &md->flags); |
1707 | 2509 | ||
1708 | dm_table_unplug_all(map); | 2510 | dm_table_unplug_all(map); |
1709 | |||
1710 | dm_kobject_uevent(md); | ||
1711 | |||
1712 | r = 0; | 2511 | r = 0; |
1713 | |||
1714 | out: | 2512 | out: |
1715 | dm_table_put(map); | 2513 | dm_table_put(map); |
1716 | mutex_unlock(&md->suspend_lock); | 2514 | mutex_unlock(&md->suspend_lock); |
@@ -1721,9 +2519,19 @@ out: | |||
1721 | /*----------------------------------------------------------------- | 2519 | /*----------------------------------------------------------------- |
1722 | * Event notification. | 2520 | * Event notification. |
1723 | *---------------------------------------------------------------*/ | 2521 | *---------------------------------------------------------------*/ |
1724 | void dm_kobject_uevent(struct mapped_device *md) | 2522 | void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, |
1725 | { | 2523 | unsigned cookie) |
1726 | kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE); | 2524 | { |
2525 | char udev_cookie[DM_COOKIE_LENGTH]; | ||
2526 | char *envp[] = { udev_cookie, NULL }; | ||
2527 | |||
2528 | if (!cookie) | ||
2529 | kobject_uevent(&disk_to_dev(md->disk)->kobj, action); | ||
2530 | else { | ||
2531 | snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", | ||
2532 | DM_COOKIE_ENV_VAR_NAME, cookie); | ||
2533 | kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); | ||
2534 | } | ||
1727 | } | 2535 | } |
1728 | 2536 | ||
1729 | uint32_t dm_next_uevent_seq(struct mapped_device *md) | 2537 | uint32_t dm_next_uevent_seq(struct mapped_device *md) |
@@ -1777,6 +2585,10 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj) | |||
1777 | if (&md->kobj != kobj) | 2585 | if (&md->kobj != kobj) |
1778 | return NULL; | 2586 | return NULL; |
1779 | 2587 | ||
2588 | if (test_bit(DMF_FREEING, &md->flags) || | ||
2589 | test_bit(DMF_DELETING, &md->flags)) | ||
2590 | return NULL; | ||
2591 | |||
1780 | dm_get(md); | 2592 | dm_get(md); |
1781 | return md; | 2593 | return md; |
1782 | } | 2594 | } |
@@ -1797,6 +2609,61 @@ int dm_noflush_suspending(struct dm_target *ti) | |||
1797 | } | 2609 | } |
1798 | EXPORT_SYMBOL_GPL(dm_noflush_suspending); | 2610 | EXPORT_SYMBOL_GPL(dm_noflush_suspending); |
1799 | 2611 | ||
2612 | struct dm_md_mempools *dm_alloc_md_mempools(unsigned type) | ||
2613 | { | ||
2614 | struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); | ||
2615 | |||
2616 | if (!pools) | ||
2617 | return NULL; | ||
2618 | |||
2619 | pools->io_pool = (type == DM_TYPE_BIO_BASED) ? | ||
2620 | mempool_create_slab_pool(MIN_IOS, _io_cache) : | ||
2621 | mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache); | ||
2622 | if (!pools->io_pool) | ||
2623 | goto free_pools_and_out; | ||
2624 | |||
2625 | pools->tio_pool = (type == DM_TYPE_BIO_BASED) ? | ||
2626 | mempool_create_slab_pool(MIN_IOS, _tio_cache) : | ||
2627 | mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); | ||
2628 | if (!pools->tio_pool) | ||
2629 | goto free_io_pool_and_out; | ||
2630 | |||
2631 | pools->bs = (type == DM_TYPE_BIO_BASED) ? | ||
2632 | bioset_create(16, 0) : bioset_create(MIN_IOS, 0); | ||
2633 | if (!pools->bs) | ||
2634 | goto free_tio_pool_and_out; | ||
2635 | |||
2636 | return pools; | ||
2637 | |||
2638 | free_tio_pool_and_out: | ||
2639 | mempool_destroy(pools->tio_pool); | ||
2640 | |||
2641 | free_io_pool_and_out: | ||
2642 | mempool_destroy(pools->io_pool); | ||
2643 | |||
2644 | free_pools_and_out: | ||
2645 | kfree(pools); | ||
2646 | |||
2647 | return NULL; | ||
2648 | } | ||
2649 | |||
2650 | void dm_free_md_mempools(struct dm_md_mempools *pools) | ||
2651 | { | ||
2652 | if (!pools) | ||
2653 | return; | ||
2654 | |||
2655 | if (pools->io_pool) | ||
2656 | mempool_destroy(pools->io_pool); | ||
2657 | |||
2658 | if (pools->tio_pool) | ||
2659 | mempool_destroy(pools->tio_pool); | ||
2660 | |||
2661 | if (pools->bs) | ||
2662 | bioset_free(pools->bs); | ||
2663 | |||
2664 | kfree(pools); | ||
2665 | } | ||
2666 | |||
1800 | static struct block_device_operations dm_blk_dops = { | 2667 | static struct block_device_operations dm_blk_dops = { |
1801 | .open = dm_blk_open, | 2668 | .open = dm_blk_open, |
1802 | .release = dm_blk_close, | 2669 | .release = dm_blk_close, |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index a31506d93e91..23278ae80f08 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -23,6 +23,13 @@ | |||
23 | #define DM_SUSPEND_NOFLUSH_FLAG (1 << 1) | 23 | #define DM_SUSPEND_NOFLUSH_FLAG (1 << 1) |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * Type of table and mapped_device's mempool | ||
27 | */ | ||
28 | #define DM_TYPE_NONE 0 | ||
29 | #define DM_TYPE_BIO_BASED 1 | ||
30 | #define DM_TYPE_REQUEST_BASED 2 | ||
31 | |||
32 | /* | ||
26 | * List of devices that a metadevice uses and should open/close. | 33 | * List of devices that a metadevice uses and should open/close. |
27 | */ | 34 | */ |
28 | struct dm_dev_internal { | 35 | struct dm_dev_internal { |
@@ -32,6 +39,7 @@ struct dm_dev_internal { | |||
32 | }; | 39 | }; |
33 | 40 | ||
34 | struct dm_table; | 41 | struct dm_table; |
42 | struct dm_md_mempools; | ||
35 | 43 | ||
36 | /*----------------------------------------------------------------- | 44 | /*----------------------------------------------------------------- |
37 | * Internal table functions. | 45 | * Internal table functions. |
@@ -41,18 +49,34 @@ void dm_table_event_callback(struct dm_table *t, | |||
41 | void (*fn)(void *), void *context); | 49 | void (*fn)(void *), void *context); |
42 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); | 50 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); |
43 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); | 51 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); |
44 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q); | 52 | int dm_calculate_queue_limits(struct dm_table *table, |
53 | struct queue_limits *limits); | ||
54 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | ||
55 | struct queue_limits *limits); | ||
45 | struct list_head *dm_table_get_devices(struct dm_table *t); | 56 | struct list_head *dm_table_get_devices(struct dm_table *t); |
46 | void dm_table_presuspend_targets(struct dm_table *t); | 57 | void dm_table_presuspend_targets(struct dm_table *t); |
47 | void dm_table_postsuspend_targets(struct dm_table *t); | 58 | void dm_table_postsuspend_targets(struct dm_table *t); |
48 | int dm_table_resume_targets(struct dm_table *t); | 59 | int dm_table_resume_targets(struct dm_table *t); |
49 | int dm_table_any_congested(struct dm_table *t, int bdi_bits); | 60 | int dm_table_any_congested(struct dm_table *t, int bdi_bits); |
61 | int dm_table_any_busy_target(struct dm_table *t); | ||
62 | int dm_table_set_type(struct dm_table *t); | ||
63 | unsigned dm_table_get_type(struct dm_table *t); | ||
64 | bool dm_table_bio_based(struct dm_table *t); | ||
65 | bool dm_table_request_based(struct dm_table *t); | ||
66 | int dm_table_alloc_md_mempools(struct dm_table *t); | ||
67 | void dm_table_free_md_mempools(struct dm_table *t); | ||
68 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); | ||
50 | 69 | ||
51 | /* | 70 | /* |
52 | * To check the return value from dm_table_find_target(). | 71 | * To check the return value from dm_table_find_target(). |
53 | */ | 72 | */ |
54 | #define dm_target_is_valid(t) ((t)->table) | 73 | #define dm_target_is_valid(t) ((t)->table) |
55 | 74 | ||
75 | /* | ||
76 | * To check whether the target type is request-based or not (bio-based). | ||
77 | */ | ||
78 | #define dm_target_request_based(t) ((t)->type->map_rq != NULL) | ||
79 | |||
56 | /*----------------------------------------------------------------- | 80 | /*----------------------------------------------------------------- |
57 | * A registry of target types. | 81 | * A registry of target types. |
58 | *---------------------------------------------------------------*/ | 82 | *---------------------------------------------------------------*/ |
@@ -92,9 +116,16 @@ void dm_stripe_exit(void); | |||
92 | int dm_open_count(struct mapped_device *md); | 116 | int dm_open_count(struct mapped_device *md); |
93 | int dm_lock_for_deletion(struct mapped_device *md); | 117 | int dm_lock_for_deletion(struct mapped_device *md); |
94 | 118 | ||
95 | void dm_kobject_uevent(struct mapped_device *md); | 119 | void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, |
120 | unsigned cookie); | ||
96 | 121 | ||
97 | int dm_kcopyd_init(void); | 122 | int dm_kcopyd_init(void); |
98 | void dm_kcopyd_exit(void); | 123 | void dm_kcopyd_exit(void); |
99 | 124 | ||
125 | /* | ||
126 | * Mempool operations | ||
127 | */ | ||
128 | struct dm_md_mempools *dm_alloc_md_mempools(unsigned type); | ||
129 | void dm_free_md_mempools(struct dm_md_mempools *pools); | ||
130 | |||
100 | #endif | 131 | #endif |
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c index 3fe158ac7bbf..4216328552f6 100644 --- a/drivers/media/common/ir-keymaps.c +++ b/drivers/media/common/ir-keymaps.c | |||
@@ -2750,3 +2750,26 @@ IR_KEYTAB_TYPE ir_codes_dm1105_nec[IR_KEYTAB_SIZE] = { | |||
2750 | [0x1b] = KEY_B, /*recall*/ | 2750 | [0x1b] = KEY_B, /*recall*/ |
2751 | }; | 2751 | }; |
2752 | EXPORT_SYMBOL_GPL(ir_codes_dm1105_nec); | 2752 | EXPORT_SYMBOL_GPL(ir_codes_dm1105_nec); |
2753 | |||
2754 | /* EVGA inDtube | ||
2755 | Devin Heitmueller <devin.heitmueller@gmail.com> | ||
2756 | */ | ||
2757 | IR_KEYTAB_TYPE ir_codes_evga_indtube[IR_KEYTAB_SIZE] = { | ||
2758 | [0x12] = KEY_POWER, | ||
2759 | [0x02] = KEY_MODE, /* TV */ | ||
2760 | [0x14] = KEY_MUTE, | ||
2761 | [0x1a] = KEY_CHANNELUP, | ||
2762 | [0x16] = KEY_TV2, /* PIP */ | ||
2763 | [0x1d] = KEY_VOLUMEUP, | ||
2764 | [0x05] = KEY_CHANNELDOWN, | ||
2765 | [0x0f] = KEY_PLAYPAUSE, | ||
2766 | [0x19] = KEY_VOLUMEDOWN, | ||
2767 | [0x1c] = KEY_REWIND, | ||
2768 | [0x0d] = KEY_RECORD, | ||
2769 | [0x18] = KEY_FORWARD, | ||
2770 | [0x1e] = KEY_PREVIOUS, | ||
2771 | [0x1b] = KEY_STOP, | ||
2772 | [0x1f] = KEY_NEXT, | ||
2773 | [0x13] = KEY_CAMERA, | ||
2774 | }; | ||
2775 | EXPORT_SYMBOL_GPL(ir_codes_evga_indtube); | ||
diff --git a/drivers/media/dvb/frontends/stv0900.h b/drivers/media/dvb/frontends/stv0900.h index 8a1332c2031d..bf4e9b633044 100644 --- a/drivers/media/dvb/frontends/stv0900.h +++ b/drivers/media/dvb/frontends/stv0900.h | |||
@@ -29,6 +29,11 @@ | |||
29 | #include <linux/dvb/frontend.h> | 29 | #include <linux/dvb/frontend.h> |
30 | #include "dvb_frontend.h" | 30 | #include "dvb_frontend.h" |
31 | 31 | ||
32 | struct stv0900_reg { | ||
33 | u16 addr; | ||
34 | u8 val; | ||
35 | }; | ||
36 | |||
32 | struct stv0900_config { | 37 | struct stv0900_config { |
33 | u8 demod_address; | 38 | u8 demod_address; |
34 | u32 xtal; | 39 | u32 xtal; |
@@ -38,7 +43,7 @@ struct stv0900_config { | |||
38 | 43 | ||
39 | u8 path1_mode; | 44 | u8 path1_mode; |
40 | u8 path2_mode; | 45 | u8 path2_mode; |
41 | 46 | struct stv0900_reg *ts_config_regs; | |
42 | u8 tun1_maddress;/* 0, 1, 2, 3 for 0xc0, 0xc2, 0xc4, 0xc6 */ | 47 | u8 tun1_maddress;/* 0, 1, 2, 3 for 0xc0, 0xc2, 0xc4, 0xc6 */ |
43 | u8 tun2_maddress; | 48 | u8 tun2_maddress; |
44 | u8 tun1_adc;/* 1 for stv6110, 2 for stb6100 */ | 49 | u8 tun1_adc;/* 1 for stv6110, 2 for stb6100 */ |
diff --git a/drivers/media/dvb/frontends/stv0900_core.c b/drivers/media/dvb/frontends/stv0900_core.c index 8499bcf7f251..1da045fbb4ef 100644 --- a/drivers/media/dvb/frontends/stv0900_core.c +++ b/drivers/media/dvb/frontends/stv0900_core.c | |||
@@ -149,31 +149,31 @@ void stv0900_write_reg(struct stv0900_internal *i_params, u16 reg_addr, | |||
149 | dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret); | 149 | dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret); |
150 | } | 150 | } |
151 | 151 | ||
152 | u8 stv0900_read_reg(struct stv0900_internal *i_params, u16 reg_addr) | 152 | u8 stv0900_read_reg(struct stv0900_internal *i_params, u16 reg) |
153 | { | 153 | { |
154 | u8 data[2]; | ||
155 | int ret; | 154 | int ret; |
156 | struct i2c_msg i2cmsg = { | 155 | u8 b0[] = { MSB(reg), LSB(reg) }; |
157 | .addr = i_params->i2c_addr, | 156 | u8 buf = 0; |
158 | .flags = 0, | 157 | struct i2c_msg msg[] = { |
159 | .len = 2, | 158 | { |
160 | .buf = data, | 159 | .addr = i_params->i2c_addr, |
160 | .flags = 0, | ||
161 | .buf = b0, | ||
162 | .len = 2, | ||
163 | }, { | ||
164 | .addr = i_params->i2c_addr, | ||
165 | .flags = I2C_M_RD, | ||
166 | .buf = &buf, | ||
167 | .len = 1, | ||
168 | }, | ||
161 | }; | 169 | }; |
162 | 170 | ||
163 | data[0] = MSB(reg_addr); | 171 | ret = i2c_transfer(i_params->i2c_adap, msg, 2); |
164 | data[1] = LSB(reg_addr); | 172 | if (ret != 2) |
165 | 173 | dprintk(KERN_ERR "%s: i2c error %d, reg[0x%02x]\n", | |
166 | ret = i2c_transfer(i_params->i2c_adap, &i2cmsg, 1); | 174 | __func__, ret, reg); |
167 | if (ret != 1) | ||
168 | dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret); | ||
169 | |||
170 | i2cmsg.flags = I2C_M_RD; | ||
171 | i2cmsg.len = 1; | ||
172 | ret = i2c_transfer(i_params->i2c_adap, &i2cmsg, 1); | ||
173 | if (ret != 1) | ||
174 | dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret); | ||
175 | 175 | ||
176 | return data[0]; | 176 | return buf; |
177 | } | 177 | } |
178 | 178 | ||
179 | void extract_mask_pos(u32 label, u8 *mask, u8 *pos) | 179 | void extract_mask_pos(u32 label, u8 *mask, u8 *pos) |
@@ -712,6 +712,44 @@ static s32 stv0900_carr_get_quality(struct dvb_frontend *fe, | |||
712 | return c_n; | 712 | return c_n; |
713 | } | 713 | } |
714 | 714 | ||
715 | static int stv0900_read_ucblocks(struct dvb_frontend *fe, u32 * ucblocks) | ||
716 | { | ||
717 | struct stv0900_state *state = fe->demodulator_priv; | ||
718 | struct stv0900_internal *i_params = state->internal; | ||
719 | enum fe_stv0900_demod_num demod = state->demod; | ||
720 | u8 err_val1, err_val0; | ||
721 | s32 err_field1, err_field0; | ||
722 | u32 header_err_val = 0; | ||
723 | |||
724 | *ucblocks = 0x0; | ||
725 | if (stv0900_get_standard(fe, demod) == STV0900_DVBS2_STANDARD) { | ||
726 | /* DVB-S2 delineator errors count */ | ||
727 | |||
728 | /* retreiving number for errnous headers */ | ||
729 | dmd_reg(err_field0, R0900_P1_BBFCRCKO0, | ||
730 | R0900_P2_BBFCRCKO0); | ||
731 | dmd_reg(err_field1, R0900_P1_BBFCRCKO1, | ||
732 | R0900_P2_BBFCRCKO1); | ||
733 | |||
734 | err_val1 = stv0900_read_reg(i_params, err_field1); | ||
735 | err_val0 = stv0900_read_reg(i_params, err_field0); | ||
736 | header_err_val = (err_val1<<8) | err_val0; | ||
737 | |||
738 | /* retreiving number for errnous packets */ | ||
739 | dmd_reg(err_field0, R0900_P1_UPCRCKO0, | ||
740 | R0900_P2_UPCRCKO0); | ||
741 | dmd_reg(err_field1, R0900_P1_UPCRCKO1, | ||
742 | R0900_P2_UPCRCKO1); | ||
743 | |||
744 | err_val1 = stv0900_read_reg(i_params, err_field1); | ||
745 | err_val0 = stv0900_read_reg(i_params, err_field0); | ||
746 | *ucblocks = (err_val1<<8) | err_val0; | ||
747 | *ucblocks += header_err_val; | ||
748 | } | ||
749 | |||
750 | return 0; | ||
751 | } | ||
752 | |||
715 | static int stv0900_read_snr(struct dvb_frontend *fe, u16 *snr) | 753 | static int stv0900_read_snr(struct dvb_frontend *fe, u16 *snr) |
716 | { | 754 | { |
717 | *snr = stv0900_carr_get_quality(fe, | 755 | *snr = stv0900_carr_get_quality(fe, |
@@ -1355,7 +1393,7 @@ static enum fe_stv0900_error stv0900_init_internal(struct dvb_frontend *fe, | |||
1355 | struct stv0900_state *state = fe->demodulator_priv; | 1393 | struct stv0900_state *state = fe->demodulator_priv; |
1356 | enum fe_stv0900_error error = STV0900_NO_ERROR; | 1394 | enum fe_stv0900_error error = STV0900_NO_ERROR; |
1357 | enum fe_stv0900_error demodError = STV0900_NO_ERROR; | 1395 | enum fe_stv0900_error demodError = STV0900_NO_ERROR; |
1358 | int selosci; | 1396 | int selosci, i; |
1359 | 1397 | ||
1360 | struct stv0900_inode *temp_int = find_inode(state->i2c_adap, | 1398 | struct stv0900_inode *temp_int = find_inode(state->i2c_adap, |
1361 | state->config->demod_address); | 1399 | state->config->demod_address); |
@@ -1402,7 +1440,23 @@ static enum fe_stv0900_error stv0900_init_internal(struct dvb_frontend *fe, | |||
1402 | stv0900_write_bits(state->internal, F0900_P1_ROLLOFF_CONTROL, p_init->rolloff); | 1440 | stv0900_write_bits(state->internal, F0900_P1_ROLLOFF_CONTROL, p_init->rolloff); |
1403 | stv0900_write_bits(state->internal, F0900_P2_ROLLOFF_CONTROL, p_init->rolloff); | 1441 | stv0900_write_bits(state->internal, F0900_P2_ROLLOFF_CONTROL, p_init->rolloff); |
1404 | 1442 | ||
1405 | stv0900_set_ts_parallel_serial(state->internal, p_init->path1_ts_clock, p_init->path2_ts_clock); | 1443 | state->internal->ts_config = p_init->ts_config; |
1444 | if (state->internal->ts_config == NULL) | ||
1445 | stv0900_set_ts_parallel_serial(state->internal, | ||
1446 | p_init->path1_ts_clock, | ||
1447 | p_init->path2_ts_clock); | ||
1448 | else { | ||
1449 | for (i = 0; state->internal->ts_config[i].addr != 0xffff; i++) | ||
1450 | stv0900_write_reg(state->internal, | ||
1451 | state->internal->ts_config[i].addr, | ||
1452 | state->internal->ts_config[i].val); | ||
1453 | |||
1454 | stv0900_write_bits(state->internal, F0900_P2_RST_HWARE, 1); | ||
1455 | stv0900_write_bits(state->internal, F0900_P2_RST_HWARE, 0); | ||
1456 | stv0900_write_bits(state->internal, F0900_P1_RST_HWARE, 1); | ||
1457 | stv0900_write_bits(state->internal, F0900_P1_RST_HWARE, 0); | ||
1458 | } | ||
1459 | |||
1406 | stv0900_write_bits(state->internal, F0900_P1_TUN_MADDRESS, p_init->tun1_maddress); | 1460 | stv0900_write_bits(state->internal, F0900_P1_TUN_MADDRESS, p_init->tun1_maddress); |
1407 | switch (p_init->tuner1_adc) { | 1461 | switch (p_init->tuner1_adc) { |
1408 | case 1: | 1462 | case 1: |
@@ -1882,6 +1936,7 @@ static struct dvb_frontend_ops stv0900_ops = { | |||
1882 | .read_ber = stv0900_read_ber, | 1936 | .read_ber = stv0900_read_ber, |
1883 | .read_signal_strength = stv0900_read_signal_strength, | 1937 | .read_signal_strength = stv0900_read_signal_strength, |
1884 | .read_snr = stv0900_read_snr, | 1938 | .read_snr = stv0900_read_snr, |
1939 | .read_ucblocks = stv0900_read_ucblocks, | ||
1885 | }; | 1940 | }; |
1886 | 1941 | ||
1887 | struct dvb_frontend *stv0900_attach(const struct stv0900_config *config, | 1942 | struct dvb_frontend *stv0900_attach(const struct stv0900_config *config, |
@@ -1915,6 +1970,7 @@ struct dvb_frontend *stv0900_attach(const struct stv0900_config *config, | |||
1915 | init_params.tun1_iq_inversion = STV0900_IQ_NORMAL; | 1970 | init_params.tun1_iq_inversion = STV0900_IQ_NORMAL; |
1916 | init_params.tuner1_adc = config->tun1_adc; | 1971 | init_params.tuner1_adc = config->tun1_adc; |
1917 | init_params.path2_ts_clock = config->path2_mode; | 1972 | init_params.path2_ts_clock = config->path2_mode; |
1973 | init_params.ts_config = config->ts_config_regs; | ||
1918 | init_params.tun2_maddress = config->tun2_maddress; | 1974 | init_params.tun2_maddress = config->tun2_maddress; |
1919 | init_params.tuner2_adc = config->tun2_adc; | 1975 | init_params.tuner2_adc = config->tun2_adc; |
1920 | init_params.tun2_iq_inversion = STV0900_IQ_SWAPPED; | 1976 | init_params.tun2_iq_inversion = STV0900_IQ_SWAPPED; |
diff --git a/drivers/media/dvb/frontends/stv0900_priv.h b/drivers/media/dvb/frontends/stv0900_priv.h index 67dc8ec634e2..5ed7a145c7d3 100644 --- a/drivers/media/dvb/frontends/stv0900_priv.h +++ b/drivers/media/dvb/frontends/stv0900_priv.h | |||
@@ -271,6 +271,7 @@ struct stv0900_init_params{ | |||
271 | 271 | ||
272 | /* IQ from the tuner2 to the demod */ | 272 | /* IQ from the tuner2 to the demod */ |
273 | enum stv0900_iq_inversion tun2_iq_inversion; | 273 | enum stv0900_iq_inversion tun2_iq_inversion; |
274 | struct stv0900_reg *ts_config; | ||
274 | }; | 275 | }; |
275 | 276 | ||
276 | struct stv0900_search_params { | 277 | struct stv0900_search_params { |
@@ -363,6 +364,7 @@ struct stv0900_internal{ | |||
363 | u8 i2c_addr; | 364 | u8 i2c_addr; |
364 | u8 clkmode;/* 0 for CLKI, 2 for XTALI */ | 365 | u8 clkmode;/* 0 for CLKI, 2 for XTALI */ |
365 | u8 chip_id; | 366 | u8 chip_id; |
367 | struct stv0900_reg *ts_config; | ||
366 | enum fe_stv0900_error errs; | 368 | enum fe_stv0900_error errs; |
367 | int dmds_used; | 369 | int dmds_used; |
368 | }; | 370 | }; |
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c index 96ef745a2e4e..488bdfb34fb3 100644 --- a/drivers/media/dvb/frontends/stv090x.c +++ b/drivers/media/dvb/frontends/stv090x.c | |||
@@ -2674,7 +2674,7 @@ static u8 stv090x_optimize_carloop(struct stv090x_state *state, enum stv090x_mod | |||
2674 | 2674 | ||
2675 | static u8 stv090x_optimize_carloop_short(struct stv090x_state *state) | 2675 | static u8 stv090x_optimize_carloop_short(struct stv090x_state *state) |
2676 | { | 2676 | { |
2677 | struct stv090x_short_frame_crloop *short_crl; | 2677 | struct stv090x_short_frame_crloop *short_crl = NULL; |
2678 | s32 index = 0; | 2678 | s32 index = 0; |
2679 | u8 aclc = 0x0b; | 2679 | u8 aclc = 0x0b; |
2680 | 2680 | ||
@@ -2694,10 +2694,13 @@ static u8 stv090x_optimize_carloop_short(struct stv090x_state *state) | |||
2694 | break; | 2694 | break; |
2695 | } | 2695 | } |
2696 | 2696 | ||
2697 | if (state->dev_ver >= 0x30) | 2697 | if (state->dev_ver >= 0x30) { |
2698 | short_crl = stv090x_s2_short_crl_cut20; | 2698 | /* Cut 3.0 and up */ |
2699 | else if (state->dev_ver >= 0x20) | ||
2700 | short_crl = stv090x_s2_short_crl_cut30; | 2699 | short_crl = stv090x_s2_short_crl_cut30; |
2700 | } else { | ||
2701 | /* Cut 2.0 and up: we don't support cuts older than 2.0 */ | ||
2702 | short_crl = stv090x_s2_short_crl_cut20; | ||
2703 | } | ||
2701 | 2704 | ||
2702 | if (state->srate <= 3000000) | 2705 | if (state->srate <= 3000000) |
2703 | aclc = short_crl[index].crl_2; | 2706 | aclc = short_crl[index].crl_2; |
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c index 4302c563a6b8..cc8862ce4aae 100644 --- a/drivers/media/dvb/frontends/tda10048.c +++ b/drivers/media/dvb/frontends/tda10048.c | |||
@@ -210,6 +210,7 @@ static struct pll_tab { | |||
210 | { TDA10048_CLK_4000, TDA10048_IF_36130, 10, 0, 0 }, | 210 | { TDA10048_CLK_4000, TDA10048_IF_36130, 10, 0, 0 }, |
211 | { TDA10048_CLK_16000, TDA10048_IF_3300, 10, 3, 0 }, | 211 | { TDA10048_CLK_16000, TDA10048_IF_3300, 10, 3, 0 }, |
212 | { TDA10048_CLK_16000, TDA10048_IF_3500, 10, 3, 0 }, | 212 | { TDA10048_CLK_16000, TDA10048_IF_3500, 10, 3, 0 }, |
213 | { TDA10048_CLK_16000, TDA10048_IF_3800, 10, 3, 0 }, | ||
213 | { TDA10048_CLK_16000, TDA10048_IF_4000, 10, 3, 0 }, | 214 | { TDA10048_CLK_16000, TDA10048_IF_4000, 10, 3, 0 }, |
214 | { TDA10048_CLK_16000, TDA10048_IF_4300, 10, 3, 0 }, | 215 | { TDA10048_CLK_16000, TDA10048_IF_4300, 10, 3, 0 }, |
215 | { TDA10048_CLK_16000, TDA10048_IF_36130, 10, 3, 0 }, | 216 | { TDA10048_CLK_16000, TDA10048_IF_36130, 10, 3, 0 }, |
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c index 32be382f0e97..a246903c3341 100644 --- a/drivers/media/dvb/siano/smscoreapi.c +++ b/drivers/media/dvb/siano/smscoreapi.c | |||
@@ -1422,8 +1422,8 @@ int smscore_gpio_configure(struct smscore_device_t *coredev, u8 PinNum, | |||
1422 | struct smscore_gpio_config *pGpioConfig) { | 1422 | struct smscore_gpio_config *pGpioConfig) { |
1423 | 1423 | ||
1424 | u32 totalLen; | 1424 | u32 totalLen; |
1425 | u32 TranslatedPinNum; | 1425 | u32 TranslatedPinNum = 0; |
1426 | u32 GroupNum; | 1426 | u32 GroupNum = 0; |
1427 | u32 ElectricChar; | 1427 | u32 ElectricChar; |
1428 | u32 groupCfg; | 1428 | u32 groupCfg; |
1429 | void *buffer; | 1429 | void *buffer; |
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c index 393623818ade..3cd76dddb6aa 100644 --- a/drivers/media/radio/radio-tea5764.c +++ b/drivers/media/radio/radio-tea5764.c | |||
@@ -322,7 +322,9 @@ static int vidioc_g_tuner(struct file *file, void *priv, | |||
322 | v->rangehigh = FREQ_MAX * FREQ_MUL; | 322 | v->rangehigh = FREQ_MAX * FREQ_MUL; |
323 | v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO; | 323 | v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO; |
324 | if (r->tunchk & TEA5764_TUNCHK_STEREO) | 324 | if (r->tunchk & TEA5764_TUNCHK_STEREO) |
325 | v->rxsubchans = V4L2_TUNER_SUB_STEREO; | 325 | v->rxsubchans = V4L2_TUNER_SUB_STEREO; |
326 | else | ||
327 | v->rxsubchans = V4L2_TUNER_SUB_MONO; | ||
326 | v->audmode = tea5764_get_audout_mode(radio); | 328 | v->audmode = tea5764_get_audout_mode(radio); |
327 | v->signal = TEA5764_TUNCHK_LEVEL(r->tunchk) * 0xffff / 0xf; | 329 | v->signal = TEA5764_TUNCHK_LEVEL(r->tunchk) * 0xffff / 0xf; |
328 | v->afc = TEA5764_TUNCHK_IFCNT(r->tunchk); | 330 | v->afc = TEA5764_TUNCHK_IFCNT(r->tunchk); |
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index 94f440535c64..061e147f6f26 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig | |||
@@ -866,9 +866,13 @@ config USB_W9968CF | |||
866 | module will be called w9968cf. | 866 | module will be called w9968cf. |
867 | 867 | ||
868 | config USB_OV511 | 868 | config USB_OV511 |
869 | tristate "USB OV511 Camera support" | 869 | tristate "USB OV511 Camera support (DEPRECATED)" |
870 | depends on VIDEO_V4L1 | 870 | depends on VIDEO_V4L1 |
871 | ---help--- | 871 | ---help--- |
872 | This driver is DEPRECATED please use the gspca ov519 module | ||
873 | instead. Note that for the ov511 / ov518 support of the gspca module | ||
874 | you need atleast version 0.6.0 of libv4l. | ||
875 | |||
872 | Say Y here if you want to connect this type of camera to your | 876 | Say Y here if you want to connect this type of camera to your |
873 | computer's USB port. See <file:Documentation/video4linux/ov511.txt> | 877 | computer's USB port. See <file:Documentation/video4linux/ov511.txt> |
874 | for more information and for a list of supported cameras. | 878 | for more information and for a list of supported cameras. |
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c index 8e35c3aed544..5136df198338 100644 --- a/drivers/media/video/cx18/cx18-controls.c +++ b/drivers/media/video/cx18/cx18-controls.c | |||
@@ -61,6 +61,8 @@ int cx18_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qctrl) | |||
61 | 61 | ||
62 | switch (qctrl->id) { | 62 | switch (qctrl->id) { |
63 | /* Standard V4L2 controls */ | 63 | /* Standard V4L2 controls */ |
64 | case V4L2_CID_USER_CLASS: | ||
65 | return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0); | ||
64 | case V4L2_CID_BRIGHTNESS: | 66 | case V4L2_CID_BRIGHTNESS: |
65 | case V4L2_CID_HUE: | 67 | case V4L2_CID_HUE: |
66 | case V4L2_CID_SATURATION: | 68 | case V4L2_CID_SATURATION: |
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c index 6a9464079b4c..28f48f41f218 100644 --- a/drivers/media/video/cx231xx/cx231xx-avcore.c +++ b/drivers/media/video/cx231xx/cx231xx-avcore.c | |||
@@ -1052,22 +1052,13 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev, | |||
1052 | /* Set resolution of the video */ | 1052 | /* Set resolution of the video */ |
1053 | int cx231xx_resolution_set(struct cx231xx *dev) | 1053 | int cx231xx_resolution_set(struct cx231xx *dev) |
1054 | { | 1054 | { |
1055 | int width, height; | ||
1056 | u32 hscale, vscale; | ||
1057 | int status = 0; | ||
1058 | |||
1059 | width = dev->width; | ||
1060 | height = dev->height; | ||
1061 | |||
1062 | get_scale(dev, width, height, &hscale, &vscale); | ||
1063 | |||
1064 | /* set horzontal scale */ | 1055 | /* set horzontal scale */ |
1065 | status = vid_blk_write_word(dev, HSCALE_CTRL, hscale); | 1056 | int status = vid_blk_write_word(dev, HSCALE_CTRL, dev->hscale); |
1057 | if (status) | ||
1058 | return status; | ||
1066 | 1059 | ||
1067 | /* set vertical scale */ | 1060 | /* set vertical scale */ |
1068 | status = vid_blk_write_word(dev, VSCALE_CTRL, vscale); | 1061 | return vid_blk_write_word(dev, VSCALE_CTRL, dev->vscale); |
1069 | |||
1070 | return status; | ||
1071 | } | 1062 | } |
1072 | 1063 | ||
1073 | /****************************************************************************** | 1064 | /****************************************************************************** |
@@ -2055,7 +2046,7 @@ int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type) | |||
2055 | 2046 | ||
2056 | int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type) | 2047 | int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type) |
2057 | { | 2048 | { |
2058 | int rc; | 2049 | int rc = -1; |
2059 | u32 ep_mask = -1; | 2050 | u32 ep_mask = -1; |
2060 | struct pcb_config *pcb_config; | 2051 | struct pcb_config *pcb_config; |
2061 | 2052 | ||
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c index a23ae73fe634..609bae6098d3 100644 --- a/drivers/media/video/cx231xx/cx231xx-video.c +++ b/drivers/media/video/cx231xx/cx231xx-video.c | |||
@@ -893,9 +893,9 @@ static int check_dev(struct cx231xx *dev) | |||
893 | return 0; | 893 | return 0; |
894 | } | 894 | } |
895 | 895 | ||
896 | void get_scale(struct cx231xx *dev, | 896 | static void get_scale(struct cx231xx *dev, |
897 | unsigned int width, unsigned int height, | 897 | unsigned int width, unsigned int height, |
898 | unsigned int *hscale, unsigned int *vscale) | 898 | unsigned int *hscale, unsigned int *vscale) |
899 | { | 899 | { |
900 | unsigned int maxw = norm_maxw(dev); | 900 | unsigned int maxw = norm_maxw(dev); |
901 | unsigned int maxh = norm_maxh(dev); | 901 | unsigned int maxh = norm_maxh(dev); |
@@ -907,10 +907,6 @@ void get_scale(struct cx231xx *dev, | |||
907 | *vscale = (((unsigned long)maxh) << 12) / height - 4096L; | 907 | *vscale = (((unsigned long)maxh) << 12) / height - 4096L; |
908 | if (*vscale >= 0x4000) | 908 | if (*vscale >= 0x4000) |
909 | *vscale = 0x3fff; | 909 | *vscale = 0x3fff; |
910 | |||
911 | dev->hscale = *hscale; | ||
912 | dev->vscale = *vscale; | ||
913 | |||
914 | } | 910 | } |
915 | 911 | ||
916 | /* ------------------------------------------------------------------ | 912 | /* ------------------------------------------------------------------ |
@@ -955,8 +951,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, | |||
955 | { | 951 | { |
956 | struct cx231xx_fh *fh = priv; | 952 | struct cx231xx_fh *fh = priv; |
957 | struct cx231xx *dev = fh->dev; | 953 | struct cx231xx *dev = fh->dev; |
958 | int width = f->fmt.pix.width; | 954 | unsigned int width = f->fmt.pix.width; |
959 | int height = f->fmt.pix.height; | 955 | unsigned int height = f->fmt.pix.height; |
960 | unsigned int maxw = norm_maxw(dev); | 956 | unsigned int maxw = norm_maxw(dev); |
961 | unsigned int maxh = norm_maxh(dev); | 957 | unsigned int maxh = norm_maxh(dev); |
962 | unsigned int hscale, vscale; | 958 | unsigned int hscale, vscale; |
@@ -971,17 +967,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, | |||
971 | 967 | ||
972 | /* width must even because of the YUYV format | 968 | /* width must even because of the YUYV format |
973 | height must be even because of interlacing */ | 969 | height must be even because of interlacing */ |
974 | height &= 0xfffe; | 970 | v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0); |
975 | width &= 0xfffe; | ||
976 | |||
977 | if (unlikely(height < 32)) | ||
978 | height = 32; | ||
979 | if (unlikely(height > maxh)) | ||
980 | height = maxh; | ||
981 | if (unlikely(width < 48)) | ||
982 | width = 48; | ||
983 | if (unlikely(width > maxw)) | ||
984 | width = maxw; | ||
985 | 971 | ||
986 | get_scale(dev, width, height, &hscale, &vscale); | 972 | get_scale(dev, width, height, &hscale, &vscale); |
987 | 973 | ||
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h index e38eb2d425f7..a0f823ac6b8d 100644 --- a/drivers/media/video/cx231xx/cx231xx.h +++ b/drivers/media/video/cx231xx/cx231xx.h | |||
@@ -722,9 +722,6 @@ int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input); | |||
722 | int cx231xx_set_decoder_video_input(struct cx231xx *dev, u8 pin_type, u8 input); | 722 | int cx231xx_set_decoder_video_input(struct cx231xx *dev, u8 pin_type, u8 input); |
723 | int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev); | 723 | int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev); |
724 | int cx231xx_set_audio_input(struct cx231xx *dev, u8 input); | 724 | int cx231xx_set_audio_input(struct cx231xx *dev, u8 input); |
725 | void get_scale(struct cx231xx *dev, | ||
726 | unsigned int width, unsigned int height, | ||
727 | unsigned int *hscale, unsigned int *vscale); | ||
728 | 725 | ||
729 | /* Provided by cx231xx-video.c */ | 726 | /* Provided by cx231xx-video.c */ |
730 | int cx231xx_register_extension(struct cx231xx_ops *dev); | 727 | int cx231xx_register_extension(struct cx231xx_ops *dev); |
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c index 8ded52946334..4c8e95853fa3 100644 --- a/drivers/media/video/cx2341x.c +++ b/drivers/media/video/cx2341x.c | |||
@@ -500,6 +500,8 @@ int cx2341x_ctrl_query(const struct cx2341x_mpeg_params *params, | |||
500 | int err; | 500 | int err; |
501 | 501 | ||
502 | switch (qctrl->id) { | 502 | switch (qctrl->id) { |
503 | case V4L2_CID_MPEG_CLASS: | ||
504 | return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0); | ||
503 | case V4L2_CID_MPEG_STREAM_TYPE: | 505 | case V4L2_CID_MPEG_STREAM_TYPE: |
504 | return v4l2_ctrl_query_fill(qctrl, | 506 | return v4l2_ctrl_query_fill(qctrl, |
505 | V4L2_MPEG_STREAM_TYPE_MPEG2_PS, | 507 | V4L2_MPEG_STREAM_TYPE_MPEG2_PS, |
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c index e236df23370e..48a975134ac5 100644 --- a/drivers/media/video/cx23885/cx23885-dvb.c +++ b/drivers/media/video/cx23885/cx23885-dvb.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include "dibx000_common.h" | 45 | #include "dibx000_common.h" |
46 | #include "zl10353.h" | 46 | #include "zl10353.h" |
47 | #include "stv0900.h" | 47 | #include "stv0900.h" |
48 | #include "stv0900_reg.h" | ||
48 | #include "stv6110.h" | 49 | #include "stv6110.h" |
49 | #include "lnbh24.h" | 50 | #include "lnbh24.h" |
50 | #include "cx24116.h" | 51 | #include "cx24116.h" |
@@ -242,12 +243,22 @@ static struct tda18271_std_map hauppauge_tda18271_std_map = { | |||
242 | .if_lvl = 6, .rfagc_top = 0x37 }, | 243 | .if_lvl = 6, .rfagc_top = 0x37 }, |
243 | }; | 244 | }; |
244 | 245 | ||
246 | static struct tda18271_std_map hauppauge_hvr1200_tda18271_std_map = { | ||
247 | .dvbt_6 = { .if_freq = 3300, .agc_mode = 3, .std = 4, | ||
248 | .if_lvl = 1, .rfagc_top = 0x37, }, | ||
249 | .dvbt_7 = { .if_freq = 3800, .agc_mode = 3, .std = 5, | ||
250 | .if_lvl = 1, .rfagc_top = 0x37, }, | ||
251 | .dvbt_8 = { .if_freq = 4300, .agc_mode = 3, .std = 6, | ||
252 | .if_lvl = 1, .rfagc_top = 0x37, }, | ||
253 | }; | ||
254 | |||
245 | static struct tda18271_config hauppauge_tda18271_config = { | 255 | static struct tda18271_config hauppauge_tda18271_config = { |
246 | .std_map = &hauppauge_tda18271_std_map, | 256 | .std_map = &hauppauge_tda18271_std_map, |
247 | .gate = TDA18271_GATE_ANALOG, | 257 | .gate = TDA18271_GATE_ANALOG, |
248 | }; | 258 | }; |
249 | 259 | ||
250 | static struct tda18271_config hauppauge_hvr1200_tuner_config = { | 260 | static struct tda18271_config hauppauge_hvr1200_tuner_config = { |
261 | .std_map = &hauppauge_hvr1200_tda18271_std_map, | ||
251 | .gate = TDA18271_GATE_ANALOG, | 262 | .gate = TDA18271_GATE_ANALOG, |
252 | }; | 263 | }; |
253 | 264 | ||
@@ -370,13 +381,25 @@ static struct zl10353_config dvico_fusionhdtv_xc3028 = { | |||
370 | .disable_i2c_gate_ctrl = 1, | 381 | .disable_i2c_gate_ctrl = 1, |
371 | }; | 382 | }; |
372 | 383 | ||
384 | static struct stv0900_reg stv0900_ts_regs[] = { | ||
385 | { R0900_TSGENERAL, 0x00 }, | ||
386 | { R0900_P1_TSSPEED, 0x40 }, | ||
387 | { R0900_P2_TSSPEED, 0x40 }, | ||
388 | { R0900_P1_TSCFGM, 0xc0 }, | ||
389 | { R0900_P2_TSCFGM, 0xc0 }, | ||
390 | { R0900_P1_TSCFGH, 0xe0 }, | ||
391 | { R0900_P2_TSCFGH, 0xe0 }, | ||
392 | { R0900_P1_TSCFGL, 0x20 }, | ||
393 | { R0900_P2_TSCFGL, 0x20 }, | ||
394 | { 0xffff, 0xff }, /* terminate */ | ||
395 | }; | ||
396 | |||
373 | static struct stv0900_config netup_stv0900_config = { | 397 | static struct stv0900_config netup_stv0900_config = { |
374 | .demod_address = 0x68, | 398 | .demod_address = 0x68, |
375 | .xtal = 27000000, | 399 | .xtal = 27000000, |
376 | .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ | 400 | .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ |
377 | .diseqc_mode = 2,/* 2/3 PWM */ | 401 | .diseqc_mode = 2,/* 2/3 PWM */ |
378 | .path1_mode = 2,/*Serial continues clock */ | 402 | .ts_config_regs = stv0900_ts_regs, |
379 | .path2_mode = 2,/*Serial continues clock */ | ||
380 | .tun1_maddress = 0,/* 0x60 */ | 403 | .tun1_maddress = 0,/* 0x60 */ |
381 | .tun2_maddress = 3,/* 0x63 */ | 404 | .tun2_maddress = 3,/* 0x63 */ |
382 | .tun1_adc = 1,/* 1 Vpp */ | 405 | .tun1_adc = 1,/* 1 Vpp */ |
@@ -736,7 +759,8 @@ static int dvb_register(struct cx23885_tsport *port) | |||
736 | if (!dvb_attach(lnbh24_attach, | 759 | if (!dvb_attach(lnbh24_attach, |
737 | fe0->dvb.frontend, | 760 | fe0->dvb.frontend, |
738 | &i2c_bus->i2c_adap, | 761 | &i2c_bus->i2c_adap, |
739 | LNBH24_PCL, 0, 0x09)) | 762 | LNBH24_PCL, |
763 | LNBH24_TTX, 0x09)) | ||
740 | printk(KERN_ERR | 764 | printk(KERN_ERR |
741 | "No LNBH24 found!\n"); | 765 | "No LNBH24 found!\n"); |
742 | 766 | ||
@@ -756,7 +780,8 @@ static int dvb_register(struct cx23885_tsport *port) | |||
756 | if (!dvb_attach(lnbh24_attach, | 780 | if (!dvb_attach(lnbh24_attach, |
757 | fe0->dvb.frontend, | 781 | fe0->dvb.frontend, |
758 | &i2c_bus->i2c_adap, | 782 | &i2c_bus->i2c_adap, |
759 | LNBH24_PCL, 0, 0x0a)) | 783 | LNBH24_PCL, |
784 | LNBH24_TTX, 0x0a)) | ||
760 | printk(KERN_ERR | 785 | printk(KERN_ERR |
761 | "No LNBH24 found!\n"); | 786 | "No LNBH24 found!\n"); |
762 | 787 | ||
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c index 66bbd2e71105..70836af3ab48 100644 --- a/drivers/media/video/cx23885/cx23885-video.c +++ b/drivers/media/video/cx23885/cx23885-video.c | |||
@@ -963,15 +963,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, | |||
963 | } | 963 | } |
964 | 964 | ||
965 | f->fmt.pix.field = field; | 965 | f->fmt.pix.field = field; |
966 | if (f->fmt.pix.height < 32) | 966 | v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2, |
967 | f->fmt.pix.height = 32; | 967 | &f->fmt.pix.height, 32, maxh, 0, 0); |
968 | if (f->fmt.pix.height > maxh) | ||
969 | f->fmt.pix.height = maxh; | ||
970 | if (f->fmt.pix.width < 48) | ||
971 | f->fmt.pix.width = 48; | ||
972 | if (f->fmt.pix.width > maxw) | ||
973 | f->fmt.pix.width = maxw; | ||
974 | f->fmt.pix.width &= ~0x03; | ||
975 | f->fmt.pix.bytesperline = | 968 | f->fmt.pix.bytesperline = |
976 | (f->fmt.pix.width * fmt->depth) >> 3; | 969 | (f->fmt.pix.width * fmt->depth) >> 3; |
977 | f->fmt.pix.sizeimage = | 970 | f->fmt.pix.sizeimage = |
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c index 94b7a52629d0..a5cc1c1fc2d6 100644 --- a/drivers/media/video/cx88/cx88-cards.c +++ b/drivers/media/video/cx88/cx88-cards.c | |||
@@ -1524,33 +1524,45 @@ static const struct cx88_board cx88_boards[] = { | |||
1524 | }, | 1524 | }, |
1525 | .mpeg = CX88_MPEG_DVB, | 1525 | .mpeg = CX88_MPEG_DVB, |
1526 | }, | 1526 | }, |
1527 | /* Terry Wu <terrywu2009@gmail.com> */ | ||
1528 | /* TV Audio : set GPIO 2, 18, 19 value to 0, 1, 0 */ | ||
1529 | /* FM Audio : set GPIO 2, 18, 19 value to 0, 0, 0 */ | ||
1530 | /* Line-in Audio : set GPIO 2, 18, 19 value to 0, 1, 1 */ | ||
1531 | /* Mute Audio : set GPIO 2 value to 1 */ | ||
1527 | [CX88_BOARD_WINFAST_TV2000_XP_GLOBAL] = { | 1532 | [CX88_BOARD_WINFAST_TV2000_XP_GLOBAL] = { |
1528 | .name = "Winfast TV2000 XP Global", | 1533 | .name = "Leadtek TV2000 XP Global", |
1529 | .tuner_type = TUNER_XC2028, | 1534 | .tuner_type = TUNER_XC2028, |
1530 | .tuner_addr = 0x61, | 1535 | .tuner_addr = 0x61, |
1536 | .radio_type = TUNER_XC2028, | ||
1537 | .radio_addr = 0x61, | ||
1531 | .input = { { | 1538 | .input = { { |
1532 | .type = CX88_VMUX_TELEVISION, | 1539 | .type = CX88_VMUX_TELEVISION, |
1533 | .vmux = 0, | 1540 | .vmux = 0, |
1534 | .gpio0 = 0x0400, /* pin 2:mute = 0 (off?) */ | 1541 | .gpio0 = 0x0400, /* pin 2 = 0 */ |
1535 | .gpio1 = 0x0000, | 1542 | .gpio1 = 0x0000, |
1536 | .gpio2 = 0x0800, /* pin 19:audio = 0 (tv) */ | 1543 | .gpio2 = 0x0C04, /* pin 18 = 1, pin 19 = 0 */ |
1537 | 1544 | .gpio3 = 0x0000, | |
1538 | }, { | 1545 | }, { |
1539 | .type = CX88_VMUX_COMPOSITE1, | 1546 | .type = CX88_VMUX_COMPOSITE1, |
1540 | .vmux = 1, | 1547 | .vmux = 1, |
1541 | .gpio0 = 0x0400, /* probably? or 0x0404 to turn mute on */ | 1548 | .gpio0 = 0x0400, /* pin 2 = 0 */ |
1542 | .gpio1 = 0x0000, | 1549 | .gpio1 = 0x0000, |
1543 | .gpio2 = 0x0808, /* pin 19:audio = 1 (line) */ | 1550 | .gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */ |
1544 | 1551 | .gpio3 = 0x0000, | |
1545 | }, { | 1552 | }, { |
1546 | .type = CX88_VMUX_SVIDEO, | 1553 | .type = CX88_VMUX_SVIDEO, |
1547 | .vmux = 2, | 1554 | .vmux = 2, |
1555 | .gpio0 = 0x0400, /* pin 2 = 0 */ | ||
1556 | .gpio1 = 0x0000, | ||
1557 | .gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */ | ||
1558 | .gpio3 = 0x0000, | ||
1548 | } }, | 1559 | } }, |
1549 | .radio = { | 1560 | .radio = { |
1550 | .type = CX88_RADIO, | 1561 | .type = CX88_RADIO, |
1551 | .gpio0 = 0x004ff, | 1562 | .gpio0 = 0x0400, /* pin 2 = 0 */ |
1552 | .gpio1 = 0x010ff, | 1563 | .gpio1 = 0x0000, |
1553 | .gpio2 = 0x0ff, | 1564 | .gpio2 = 0x0C00, /* pin 18 = 0, pin 19 = 0 */ |
1565 | .gpio3 = 0x0000, | ||
1554 | }, | 1566 | }, |
1555 | }, | 1567 | }, |
1556 | [CX88_BOARD_POWERCOLOR_REAL_ANGEL] = { | 1568 | [CX88_BOARD_POWERCOLOR_REAL_ANGEL] = { |
@@ -2438,6 +2450,41 @@ static const struct cx88_subid cx88_subids[] = { | |||
2438 | .subvendor = 0x107d, | 2450 | .subvendor = 0x107d, |
2439 | .subdevice = 0x6654, | 2451 | .subdevice = 0x6654, |
2440 | .card = CX88_BOARD_WINFAST_DTV1800H, | 2452 | .card = CX88_BOARD_WINFAST_DTV1800H, |
2453 | }, { | ||
2454 | /* PVR2000 PAL Model [107d:6630] */ | ||
2455 | .subvendor = 0x107d, | ||
2456 | .subdevice = 0x6630, | ||
2457 | .card = CX88_BOARD_LEADTEK_PVR2000, | ||
2458 | }, { | ||
2459 | /* PVR2000 PAL Model [107d:6638] */ | ||
2460 | .subvendor = 0x107d, | ||
2461 | .subdevice = 0x6638, | ||
2462 | .card = CX88_BOARD_LEADTEK_PVR2000, | ||
2463 | }, { | ||
2464 | /* PVR2000 NTSC Model [107d:6631] */ | ||
2465 | .subvendor = 0x107d, | ||
2466 | .subdevice = 0x6631, | ||
2467 | .card = CX88_BOARD_LEADTEK_PVR2000, | ||
2468 | }, { | ||
2469 | /* PVR2000 NTSC Model [107d:6637] */ | ||
2470 | .subvendor = 0x107d, | ||
2471 | .subdevice = 0x6637, | ||
2472 | .card = CX88_BOARD_LEADTEK_PVR2000, | ||
2473 | }, { | ||
2474 | /* PVR2000 NTSC Model [107d:663d] */ | ||
2475 | .subvendor = 0x107d, | ||
2476 | .subdevice = 0x663d, | ||
2477 | .card = CX88_BOARD_LEADTEK_PVR2000, | ||
2478 | }, { | ||
2479 | /* DV2000 NTSC Model [107d:6621] */ | ||
2480 | .subvendor = 0x107d, | ||
2481 | .subdevice = 0x6621, | ||
2482 | .card = CX88_BOARD_WINFAST_DV2000, | ||
2483 | }, { | ||
2484 | /* TV2000 XP Global [107d:6618] */ | ||
2485 | .subvendor = 0x107d, | ||
2486 | .subdevice = 0x6618, | ||
2487 | .card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL, | ||
2441 | }, | 2488 | }, |
2442 | }; | 2489 | }; |
2443 | 2490 | ||
@@ -2446,12 +2493,6 @@ static const struct cx88_subid cx88_subids[] = { | |||
2446 | 2493 | ||
2447 | static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data) | 2494 | static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data) |
2448 | { | 2495 | { |
2449 | /* This is just for the "Winfast 2000XP Expert" board ATM; I don't have data on | ||
2450 | * any others. | ||
2451 | * | ||
2452 | * Byte 0 is 1 on the NTSC board. | ||
2453 | */ | ||
2454 | |||
2455 | if (eeprom_data[4] != 0x7d || | 2496 | if (eeprom_data[4] != 0x7d || |
2456 | eeprom_data[5] != 0x10 || | 2497 | eeprom_data[5] != 0x10 || |
2457 | eeprom_data[7] != 0x66) { | 2498 | eeprom_data[7] != 0x66) { |
@@ -2459,8 +2500,19 @@ static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data) | |||
2459 | return; | 2500 | return; |
2460 | } | 2501 | } |
2461 | 2502 | ||
2462 | core->board.tuner_type = (eeprom_data[6] == 0x13) ? | 2503 | /* Terry Wu <terrywu2009@gmail.com> */ |
2463 | TUNER_PHILIPS_FM1236_MK3 : TUNER_PHILIPS_FM1216ME_MK3; | 2504 | switch (eeprom_data[6]) { |
2505 | case 0x13: /* SSID 6613 for TV2000 XP Expert NTSC Model */ | ||
2506 | case 0x21: /* SSID 6621 for DV2000 NTSC Model */ | ||
2507 | case 0x31: /* SSID 6631 for PVR2000 NTSC Model */ | ||
2508 | case 0x37: /* SSID 6637 for PVR2000 NTSC Model */ | ||
2509 | case 0x3d: /* SSID 6637 for PVR2000 NTSC Model */ | ||
2510 | core->board.tuner_type = TUNER_PHILIPS_FM1236_MK3; | ||
2511 | break; | ||
2512 | default: | ||
2513 | core->board.tuner_type = TUNER_PHILIPS_FM1216ME_MK3; | ||
2514 | break; | ||
2515 | } | ||
2464 | 2516 | ||
2465 | info_printk(core, "Leadtek Winfast 2000XP Expert config: " | 2517 | info_printk(core, "Leadtek Winfast 2000XP Expert config: " |
2466 | "tuner=%d, eeprom[0]=0x%02x\n", | 2518 | "tuner=%d, eeprom[0]=0x%02x\n", |
@@ -2713,7 +2765,6 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core, | |||
2713 | { | 2765 | { |
2714 | /* Board-specific callbacks */ | 2766 | /* Board-specific callbacks */ |
2715 | switch (core->boardnr) { | 2767 | switch (core->boardnr) { |
2716 | case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: | ||
2717 | case CX88_BOARD_POWERCOLOR_REAL_ANGEL: | 2768 | case CX88_BOARD_POWERCOLOR_REAL_ANGEL: |
2718 | case CX88_BOARD_GENIATECH_X8000_MT: | 2769 | case CX88_BOARD_GENIATECH_X8000_MT: |
2719 | case CX88_BOARD_KWORLD_ATSC_120: | 2770 | case CX88_BOARD_KWORLD_ATSC_120: |
@@ -2725,6 +2776,7 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core, | |||
2725 | case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO: | 2776 | case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO: |
2726 | case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: | 2777 | case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: |
2727 | return cx88_dvico_xc2028_callback(core, command, arg); | 2778 | return cx88_dvico_xc2028_callback(core, command, arg); |
2779 | case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: | ||
2728 | case CX88_BOARD_WINFAST_DTV1800H: | 2780 | case CX88_BOARD_WINFAST_DTV1800H: |
2729 | return cx88_xc3028_winfast1800h_callback(core, command, arg); | 2781 | return cx88_xc3028_winfast1800h_callback(core, command, arg); |
2730 | } | 2782 | } |
@@ -2914,6 +2966,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core) | |||
2914 | udelay(1000); | 2966 | udelay(1000); |
2915 | break; | 2967 | break; |
2916 | 2968 | ||
2969 | case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: | ||
2917 | case CX88_BOARD_WINFAST_DTV1800H: | 2970 | case CX88_BOARD_WINFAST_DTV1800H: |
2918 | /* GPIO 12 (xc3028 tuner reset) */ | 2971 | /* GPIO 12 (xc3028 tuner reset) */ |
2919 | cx_set(MO_GP1_IO, 0x1010); | 2972 | cx_set(MO_GP1_IO, 0x1010); |
@@ -2950,6 +3003,7 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl) | |||
2950 | case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: | 3003 | case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: |
2951 | ctl->demod = XC3028_FE_OREN538; | 3004 | ctl->demod = XC3028_FE_OREN538; |
2952 | break; | 3005 | break; |
3006 | case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL: | ||
2953 | case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME: | 3007 | case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME: |
2954 | case CX88_BOARD_PROLINK_PV_8000GT: | 3008 | case CX88_BOARD_PROLINK_PV_8000GT: |
2955 | /* | 3009 | /* |
@@ -2993,6 +3047,8 @@ static void cx88_card_setup(struct cx88_core *core) | |||
2993 | if (0 == core->i2c_rc) | 3047 | if (0 == core->i2c_rc) |
2994 | gdi_eeprom(core, eeprom); | 3048 | gdi_eeprom(core, eeprom); |
2995 | break; | 3049 | break; |
3050 | case CX88_BOARD_LEADTEK_PVR2000: | ||
3051 | case CX88_BOARD_WINFAST_DV2000: | ||
2996 | case CX88_BOARD_WINFAST2000XP_EXPERT: | 3052 | case CX88_BOARD_WINFAST2000XP_EXPERT: |
2997 | if (0 == core->i2c_rc) | 3053 | if (0 == core->i2c_rc) |
2998 | leadtek_eeprom(core, eeprom); | 3054 | leadtek_eeprom(core, eeprom); |
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c index 0ccac702bea4..b12770848c00 100644 --- a/drivers/media/video/cx88/cx88-video.c +++ b/drivers/media/video/cx88/cx88-video.c | |||
@@ -1111,15 +1111,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, | |||
1111 | } | 1111 | } |
1112 | 1112 | ||
1113 | f->fmt.pix.field = field; | 1113 | f->fmt.pix.field = field; |
1114 | if (f->fmt.pix.height < 32) | 1114 | v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2, |
1115 | f->fmt.pix.height = 32; | 1115 | &f->fmt.pix.height, 32, maxh, 0, 0); |
1116 | if (f->fmt.pix.height > maxh) | ||
1117 | f->fmt.pix.height = maxh; | ||
1118 | if (f->fmt.pix.width < 48) | ||
1119 | f->fmt.pix.width = 48; | ||
1120 | if (f->fmt.pix.width > maxw) | ||
1121 | f->fmt.pix.width = maxw; | ||
1122 | f->fmt.pix.width &= ~0x03; | ||
1123 | f->fmt.pix.bytesperline = | 1116 | f->fmt.pix.bytesperline = |
1124 | (f->fmt.pix.width * fmt->depth) >> 3; | 1117 | (f->fmt.pix.width * fmt->depth) >> 3; |
1125 | f->fmt.pix.sizeimage = | 1118 | f->fmt.pix.sizeimage = |
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c index 00cc791a9e44..c43fdb9bc888 100644 --- a/drivers/media/video/em28xx/em28xx-cards.c +++ b/drivers/media/video/em28xx/em28xx-cards.c | |||
@@ -139,6 +139,24 @@ static struct em28xx_reg_seq kworld_330u_digital[] = { | |||
139 | { -1, -1, -1, -1}, | 139 | { -1, -1, -1, -1}, |
140 | }; | 140 | }; |
141 | 141 | ||
142 | /* Evga inDtube | ||
143 | GPIO0 - Enable digital power (s5h1409) - low to enable | ||
144 | GPIO1 - Enable analog power (tvp5150/emp202) - low to enable | ||
145 | GPIO4 - xc3028 reset | ||
146 | GOP3 - s5h1409 reset | ||
147 | */ | ||
148 | static struct em28xx_reg_seq evga_indtube_analog[] = { | ||
149 | {EM28XX_R08_GPIO, 0x79, 0xff, 60}, | ||
150 | { -1, -1, -1, -1}, | ||
151 | }; | ||
152 | |||
153 | static struct em28xx_reg_seq evga_indtube_digital[] = { | ||
154 | {EM28XX_R08_GPIO, 0x7a, 0xff, 1}, | ||
155 | {EM2880_R04_GPO, 0x04, 0xff, 10}, | ||
156 | {EM2880_R04_GPO, 0x0c, 0xff, 1}, | ||
157 | { -1, -1, -1, -1}, | ||
158 | }; | ||
159 | |||
142 | /* Callback for the most boards */ | 160 | /* Callback for the most boards */ |
143 | static struct em28xx_reg_seq default_tuner_gpio[] = { | 161 | static struct em28xx_reg_seq default_tuner_gpio[] = { |
144 | {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10}, | 162 | {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10}, |
@@ -1449,6 +1467,33 @@ struct em28xx_board em28xx_boards[] = { | |||
1449 | .gpio = terratec_av350_unmute_gpio, | 1467 | .gpio = terratec_av350_unmute_gpio, |
1450 | } }, | 1468 | } }, |
1451 | }, | 1469 | }, |
1470 | [EM2882_BOARD_EVGA_INDTUBE] = { | ||
1471 | .name = "Evga inDtube", | ||
1472 | .tuner_type = TUNER_XC2028, | ||
1473 | .tuner_gpio = default_tuner_gpio, | ||
1474 | .decoder = EM28XX_TVP5150, | ||
1475 | .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */ | ||
1476 | .mts_firmware = 1, | ||
1477 | .has_dvb = 1, | ||
1478 | .dvb_gpio = evga_indtube_digital, | ||
1479 | .ir_codes = ir_codes_evga_indtube, | ||
1480 | .input = { { | ||
1481 | .type = EM28XX_VMUX_TELEVISION, | ||
1482 | .vmux = TVP5150_COMPOSITE0, | ||
1483 | .amux = EM28XX_AMUX_VIDEO, | ||
1484 | .gpio = evga_indtube_analog, | ||
1485 | }, { | ||
1486 | .type = EM28XX_VMUX_COMPOSITE1, | ||
1487 | .vmux = TVP5150_COMPOSITE1, | ||
1488 | .amux = EM28XX_AMUX_LINE_IN, | ||
1489 | .gpio = evga_indtube_analog, | ||
1490 | }, { | ||
1491 | .type = EM28XX_VMUX_SVIDEO, | ||
1492 | .vmux = TVP5150_SVIDEO, | ||
1493 | .amux = EM28XX_AMUX_LINE_IN, | ||
1494 | .gpio = evga_indtube_analog, | ||
1495 | } }, | ||
1496 | }, | ||
1452 | }; | 1497 | }; |
1453 | const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); | 1498 | const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); |
1454 | 1499 | ||
@@ -1571,6 +1616,7 @@ static struct em28xx_hash_table em28xx_eeprom_hash[] = { | |||
1571 | {0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF}, | 1616 | {0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF}, |
1572 | {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, | 1617 | {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, |
1573 | {0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028}, | 1618 | {0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028}, |
1619 | {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028}, | ||
1574 | }; | 1620 | }; |
1575 | 1621 | ||
1576 | /* I2C devicelist hash table for devices with generic USB IDs */ | 1622 | /* I2C devicelist hash table for devices with generic USB IDs */ |
@@ -1834,6 +1880,10 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl) | |||
1834 | ctl->demod = XC3028_FE_CHINA; | 1880 | ctl->demod = XC3028_FE_CHINA; |
1835 | ctl->fname = XC2028_DEFAULT_FIRMWARE; | 1881 | ctl->fname = XC2028_DEFAULT_FIRMWARE; |
1836 | break; | 1882 | break; |
1883 | case EM2882_BOARD_EVGA_INDTUBE: | ||
1884 | ctl->demod = XC3028_FE_CHINA; | ||
1885 | ctl->fname = XC3028L_DEFAULT_FIRMWARE; | ||
1886 | break; | ||
1837 | default: | 1887 | default: |
1838 | ctl->demod = XC3028_FE_OREN538; | 1888 | ctl->demod = XC3028_FE_OREN538; |
1839 | } | 1889 | } |
@@ -2101,6 +2151,12 @@ void em28xx_card_setup(struct em28xx *dev) | |||
2101 | case EM2880_BOARD_MSI_DIGIVOX_AD: | 2151 | case EM2880_BOARD_MSI_DIGIVOX_AD: |
2102 | if (!em28xx_hint_board(dev)) | 2152 | if (!em28xx_hint_board(dev)) |
2103 | em28xx_set_model(dev); | 2153 | em28xx_set_model(dev); |
2154 | |||
2155 | /* In cases where we had to use a board hint, the call to | ||
2156 | em28xx_set_mode() in em28xx_pre_card_setup() was a no-op, | ||
2157 | so make the call now so the analog GPIOs are set properly | ||
2158 | before probing the i2c bus. */ | ||
2159 | em28xx_set_mode(dev, EM28XX_ANALOG_MODE); | ||
2104 | break; | 2160 | break; |
2105 | } | 2161 | } |
2106 | 2162 | ||
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c index 563dd2b1c8e9..e7b47c8da8f3 100644 --- a/drivers/media/video/em28xx/em28xx-dvb.c +++ b/drivers/media/video/em28xx/em28xx-dvb.c | |||
@@ -445,6 +445,7 @@ static int dvb_init(struct em28xx *dev) | |||
445 | } | 445 | } |
446 | break; | 446 | break; |
447 | case EM2883_BOARD_KWORLD_HYBRID_330U: | 447 | case EM2883_BOARD_KWORLD_HYBRID_330U: |
448 | case EM2882_BOARD_EVGA_INDTUBE: | ||
448 | dvb->frontend = dvb_attach(s5h1409_attach, | 449 | dvb->frontend = dvb_attach(s5h1409_attach, |
449 | &em28xx_s5h1409_with_xc3028, | 450 | &em28xx_s5h1409_with_xc3028, |
450 | &dev->i2c_adap); | 451 | &dev->i2c_adap); |
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c index 882796e84dbc..8fe1beecfffa 100644 --- a/drivers/media/video/em28xx/em28xx-video.c +++ b/drivers/media/video/em28xx/em28xx-video.c | |||
@@ -687,8 +687,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, | |||
687 | { | 687 | { |
688 | struct em28xx_fh *fh = priv; | 688 | struct em28xx_fh *fh = priv; |
689 | struct em28xx *dev = fh->dev; | 689 | struct em28xx *dev = fh->dev; |
690 | int width = f->fmt.pix.width; | 690 | unsigned int width = f->fmt.pix.width; |
691 | int height = f->fmt.pix.height; | 691 | unsigned int height = f->fmt.pix.height; |
692 | unsigned int maxw = norm_maxw(dev); | 692 | unsigned int maxw = norm_maxw(dev); |
693 | unsigned int maxh = norm_maxh(dev); | 693 | unsigned int maxh = norm_maxh(dev); |
694 | unsigned int hscale, vscale; | 694 | unsigned int hscale, vscale; |
@@ -701,34 +701,20 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, | |||
701 | return -EINVAL; | 701 | return -EINVAL; |
702 | } | 702 | } |
703 | 703 | ||
704 | /* width must even because of the YUYV format | ||
705 | height must be even because of interlacing */ | ||
706 | height &= 0xfffe; | ||
707 | width &= 0xfffe; | ||
708 | |||
709 | if (unlikely(height < 32)) | ||
710 | height = 32; | ||
711 | if (unlikely(height > maxh)) | ||
712 | height = maxh; | ||
713 | if (unlikely(width < 48)) | ||
714 | width = 48; | ||
715 | if (unlikely(width > maxw)) | ||
716 | width = maxw; | ||
717 | |||
718 | if (dev->board.is_em2800) { | 704 | if (dev->board.is_em2800) { |
719 | /* the em2800 can only scale down to 50% */ | 705 | /* the em2800 can only scale down to 50% */ |
720 | if (height % (maxh / 2)) | 706 | height = height > (3 * maxh / 4) ? maxh : maxh / 2; |
721 | height = maxh; | 707 | width = width > (3 * maxw / 4) ? maxw : maxw / 2; |
722 | if (width % (maxw / 2)) | 708 | /* According to empiatech support the MaxPacketSize is too small |
723 | width = maxw; | 709 | * to support framesizes larger than 640x480 @ 30 fps or 640x576 |
724 | /* according to empiatech support */ | 710 | * @ 25 fps. As this would cut of a part of the image we prefer |
725 | /* the MaxPacketSize is to small to support */ | 711 | * 360x576 or 360x480 for now */ |
726 | /* framesizes larger than 640x480 @ 30 fps */ | ||
727 | /* or 640x576 @ 25 fps. As this would cut */ | ||
728 | /* of a part of the image we prefer */ | ||
729 | /* 360x576 or 360x480 for now */ | ||
730 | if (width == maxw && height == maxh) | 712 | if (width == maxw && height == maxh) |
731 | width /= 2; | 713 | width /= 2; |
714 | } else { | ||
715 | /* width must even because of the YUYV format | ||
716 | height must be even because of interlacing */ | ||
717 | v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0); | ||
732 | } | 718 | } |
733 | 719 | ||
734 | get_scale(dev, width, height, &hscale, &vscale); | 720 | get_scale(dev, width, height, &hscale, &vscale); |
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h index 8bf81be1da61..813ce45c2f99 100644 --- a/drivers/media/video/em28xx/em28xx.h +++ b/drivers/media/video/em28xx/em28xx.h | |||
@@ -106,6 +106,7 @@ | |||
106 | #define EM2860_BOARD_TERRATEC_GRABBY 67 | 106 | #define EM2860_BOARD_TERRATEC_GRABBY 67 |
107 | #define EM2860_BOARD_TERRATEC_AV350 68 | 107 | #define EM2860_BOARD_TERRATEC_AV350 68 |
108 | #define EM2882_BOARD_KWORLD_ATSC_315U 69 | 108 | #define EM2882_BOARD_KWORLD_ATSC_315U 69 |
109 | #define EM2882_BOARD_EVGA_INDTUBE 70 | ||
109 | 110 | ||
110 | /* Limits minimum and default number of buffers */ | 111 | /* Limits minimum and default number of buffers */ |
111 | #define EM28XX_MIN_BUF 4 | 112 | #define EM28XX_MIN_BUF 4 |
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c index f7e0355ad644..1e89600986c8 100644 --- a/drivers/media/video/gspca/gspca.c +++ b/drivers/media/video/gspca/gspca.c | |||
@@ -1042,13 +1042,11 @@ static int vidioc_queryctrl(struct file *file, void *priv, | |||
1042 | for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) { | 1042 | for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) { |
1043 | if (gspca_dev->ctrl_dis & (1 << i)) | 1043 | if (gspca_dev->ctrl_dis & (1 << i)) |
1044 | continue; | 1044 | continue; |
1045 | if (ctrls->qctrl.id < id) | 1045 | if (gspca_dev->sd_desc->ctrls[i].qctrl.id < id) |
1046 | continue; | 1046 | continue; |
1047 | if (ctrls != NULL) { | 1047 | if (ctrls && gspca_dev->sd_desc->ctrls[i].qctrl.id |
1048 | if (gspca_dev->sd_desc->ctrls[i].qctrl.id | ||
1049 | > ctrls->qctrl.id) | 1048 | > ctrls->qctrl.id) |
1050 | continue; | 1049 | continue; |
1051 | } | ||
1052 | ctrls = &gspca_dev->sd_desc->ctrls[i]; | 1050 | ctrls = &gspca_dev->sd_desc->ctrls[i]; |
1053 | } | 1051 | } |
1054 | } else { | 1052 | } else { |
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c index 188866ac6cef..2f6e135d94bc 100644 --- a/drivers/media/video/gspca/ov519.c +++ b/drivers/media/video/gspca/ov519.c | |||
@@ -50,12 +50,18 @@ static int i2c_detect_tries = 10; | |||
50 | struct sd { | 50 | struct sd { |
51 | struct gspca_dev gspca_dev; /* !! must be the first item */ | 51 | struct gspca_dev gspca_dev; /* !! must be the first item */ |
52 | 52 | ||
53 | __u8 packet_nr; | ||
54 | |||
53 | char bridge; | 55 | char bridge; |
54 | #define BRIDGE_OV511 0 | 56 | #define BRIDGE_OV511 0 |
55 | #define BRIDGE_OV511PLUS 1 | 57 | #define BRIDGE_OV511PLUS 1 |
56 | #define BRIDGE_OV518 2 | 58 | #define BRIDGE_OV518 2 |
57 | #define BRIDGE_OV518PLUS 3 | 59 | #define BRIDGE_OV518PLUS 3 |
58 | #define BRIDGE_OV519 4 | 60 | #define BRIDGE_OV519 4 |
61 | #define BRIDGE_MASK 7 | ||
62 | |||
63 | char invert_led; | ||
64 | #define BRIDGE_INVERT_LED 8 | ||
59 | 65 | ||
60 | /* Determined by sensor type */ | 66 | /* Determined by sensor type */ |
61 | __u8 sif; | 67 | __u8 sif; |
@@ -65,22 +71,25 @@ struct sd { | |||
65 | __u8 colors; | 71 | __u8 colors; |
66 | __u8 hflip; | 72 | __u8 hflip; |
67 | __u8 vflip; | 73 | __u8 vflip; |
74 | __u8 autobrightness; | ||
75 | __u8 freq; | ||
68 | 76 | ||
69 | __u8 stopped; /* Streaming is temporarily paused */ | 77 | __u8 stopped; /* Streaming is temporarily paused */ |
70 | 78 | ||
71 | __u8 frame_rate; /* current Framerate (OV519 only) */ | 79 | __u8 frame_rate; /* current Framerate */ |
72 | __u8 clockdiv; /* clockdiv override for OV519 only */ | 80 | __u8 clockdiv; /* clockdiv override */ |
73 | 81 | ||
74 | char sensor; /* Type of image sensor chip (SEN_*) */ | 82 | char sensor; /* Type of image sensor chip (SEN_*) */ |
75 | #define SEN_UNKNOWN 0 | 83 | #define SEN_UNKNOWN 0 |
76 | #define SEN_OV6620 1 | 84 | #define SEN_OV6620 1 |
77 | #define SEN_OV6630 2 | 85 | #define SEN_OV6630 2 |
78 | #define SEN_OV7610 3 | 86 | #define SEN_OV66308AF 3 |
79 | #define SEN_OV7620 4 | 87 | #define SEN_OV7610 4 |
80 | #define SEN_OV7640 5 | 88 | #define SEN_OV7620 5 |
81 | #define SEN_OV7670 6 | 89 | #define SEN_OV7640 6 |
82 | #define SEN_OV76BE 7 | 90 | #define SEN_OV7670 7 |
83 | #define SEN_OV8610 8 | 91 | #define SEN_OV76BE 8 |
92 | #define SEN_OV8610 9 | ||
84 | }; | 93 | }; |
85 | 94 | ||
86 | /* V4L2 controls supported by the driver */ | 95 | /* V4L2 controls supported by the driver */ |
@@ -94,11 +103,17 @@ static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val); | |||
94 | static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val); | 103 | static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val); |
95 | static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val); | 104 | static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val); |
96 | static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val); | 105 | static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val); |
106 | static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val); | ||
107 | static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val); | ||
108 | static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val); | ||
109 | static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); | ||
97 | static void setbrightness(struct gspca_dev *gspca_dev); | 110 | static void setbrightness(struct gspca_dev *gspca_dev); |
98 | static void setcontrast(struct gspca_dev *gspca_dev); | 111 | static void setcontrast(struct gspca_dev *gspca_dev); |
99 | static void setcolors(struct gspca_dev *gspca_dev); | 112 | static void setcolors(struct gspca_dev *gspca_dev); |
113 | static void setautobrightness(struct sd *sd); | ||
114 | static void setfreq(struct sd *sd); | ||
100 | 115 | ||
101 | static struct ctrl sd_ctrls[] = { | 116 | static const struct ctrl sd_ctrls[] = { |
102 | { | 117 | { |
103 | { | 118 | { |
104 | .id = V4L2_CID_BRIGHTNESS, | 119 | .id = V4L2_CID_BRIGHTNESS, |
@@ -141,7 +156,7 @@ static struct ctrl sd_ctrls[] = { | |||
141 | .set = sd_setcolors, | 156 | .set = sd_setcolors, |
142 | .get = sd_getcolors, | 157 | .get = sd_getcolors, |
143 | }, | 158 | }, |
144 | /* next controls work with ov7670 only */ | 159 | /* The flip controls work with ov7670 only */ |
145 | #define HFLIP_IDX 3 | 160 | #define HFLIP_IDX 3 |
146 | { | 161 | { |
147 | { | 162 | { |
@@ -172,6 +187,51 @@ static struct ctrl sd_ctrls[] = { | |||
172 | .set = sd_setvflip, | 187 | .set = sd_setvflip, |
173 | .get = sd_getvflip, | 188 | .get = sd_getvflip, |
174 | }, | 189 | }, |
190 | #define AUTOBRIGHT_IDX 5 | ||
191 | { | ||
192 | { | ||
193 | .id = V4L2_CID_AUTOBRIGHTNESS, | ||
194 | .type = V4L2_CTRL_TYPE_BOOLEAN, | ||
195 | .name = "Auto Brightness", | ||
196 | .minimum = 0, | ||
197 | .maximum = 1, | ||
198 | .step = 1, | ||
199 | #define AUTOBRIGHT_DEF 1 | ||
200 | .default_value = AUTOBRIGHT_DEF, | ||
201 | }, | ||
202 | .set = sd_setautobrightness, | ||
203 | .get = sd_getautobrightness, | ||
204 | }, | ||
205 | #define FREQ_IDX 6 | ||
206 | { | ||
207 | { | ||
208 | .id = V4L2_CID_POWER_LINE_FREQUENCY, | ||
209 | .type = V4L2_CTRL_TYPE_MENU, | ||
210 | .name = "Light frequency filter", | ||
211 | .minimum = 0, | ||
212 | .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */ | ||
213 | .step = 1, | ||
214 | #define FREQ_DEF 0 | ||
215 | .default_value = FREQ_DEF, | ||
216 | }, | ||
217 | .set = sd_setfreq, | ||
218 | .get = sd_getfreq, | ||
219 | }, | ||
220 | #define OV7670_FREQ_IDX 7 | ||
221 | { | ||
222 | { | ||
223 | .id = V4L2_CID_POWER_LINE_FREQUENCY, | ||
224 | .type = V4L2_CTRL_TYPE_MENU, | ||
225 | .name = "Light frequency filter", | ||
226 | .minimum = 0, | ||
227 | .maximum = 3, /* 0: 0, 1: 50Hz, 2:60Hz 3: Auto Hz */ | ||
228 | .step = 1, | ||
229 | #define OV7670_FREQ_DEF 3 | ||
230 | .default_value = OV7670_FREQ_DEF, | ||
231 | }, | ||
232 | .set = sd_setfreq, | ||
233 | .get = sd_getfreq, | ||
234 | }, | ||
175 | }; | 235 | }; |
176 | 236 | ||
177 | static const struct v4l2_pix_format ov519_vga_mode[] = { | 237 | static const struct v4l2_pix_format ov519_vga_mode[] = { |
@@ -187,11 +247,21 @@ static const struct v4l2_pix_format ov519_vga_mode[] = { | |||
187 | .priv = 0}, | 247 | .priv = 0}, |
188 | }; | 248 | }; |
189 | static const struct v4l2_pix_format ov519_sif_mode[] = { | 249 | static const struct v4l2_pix_format ov519_sif_mode[] = { |
250 | {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, | ||
251 | .bytesperline = 160, | ||
252 | .sizeimage = 160 * 120 * 3 / 8 + 590, | ||
253 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
254 | .priv = 3}, | ||
190 | {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, | 255 | {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, |
191 | .bytesperline = 176, | 256 | .bytesperline = 176, |
192 | .sizeimage = 176 * 144 * 3 / 8 + 590, | 257 | .sizeimage = 176 * 144 * 3 / 8 + 590, |
193 | .colorspace = V4L2_COLORSPACE_JPEG, | 258 | .colorspace = V4L2_COLORSPACE_JPEG, |
194 | .priv = 1}, | 259 | .priv = 1}, |
260 | {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, | ||
261 | .bytesperline = 320, | ||
262 | .sizeimage = 320 * 240 * 3 / 8 + 590, | ||
263 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
264 | .priv = 2}, | ||
195 | {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, | 265 | {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, |
196 | .bytesperline = 352, | 266 | .bytesperline = 352, |
197 | .sizeimage = 352 * 288 * 3 / 8 + 590, | 267 | .sizeimage = 352 * 288 * 3 / 8 + 590, |
@@ -199,42 +269,118 @@ static const struct v4l2_pix_format ov519_sif_mode[] = { | |||
199 | .priv = 0}, | 269 | .priv = 0}, |
200 | }; | 270 | }; |
201 | 271 | ||
272 | /* Note some of the sizeimage values for the ov511 / ov518 may seem | ||
273 | larger then necessary, however they need to be this big as the ov511 / | ||
274 | ov518 always fills the entire isoc frame, using 0 padding bytes when | ||
275 | it doesn't have any data. So with low framerates the amount of data | ||
276 | transfered can become quite large (libv4l will remove all the 0 padding | ||
277 | in userspace). */ | ||
202 | static const struct v4l2_pix_format ov518_vga_mode[] = { | 278 | static const struct v4l2_pix_format ov518_vga_mode[] = { |
203 | {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, | 279 | {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, |
204 | .bytesperline = 320, | 280 | .bytesperline = 320, |
205 | .sizeimage = 320 * 240 * 3 / 8 + 590, | 281 | .sizeimage = 320 * 240 * 3, |
206 | .colorspace = V4L2_COLORSPACE_JPEG, | 282 | .colorspace = V4L2_COLORSPACE_JPEG, |
207 | .priv = 1}, | 283 | .priv = 1}, |
208 | {640, 480, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, | 284 | {640, 480, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, |
209 | .bytesperline = 640, | 285 | .bytesperline = 640, |
210 | .sizeimage = 640 * 480 * 3 / 8 + 590, | 286 | .sizeimage = 640 * 480 * 2, |
211 | .colorspace = V4L2_COLORSPACE_JPEG, | 287 | .colorspace = V4L2_COLORSPACE_JPEG, |
212 | .priv = 0}, | 288 | .priv = 0}, |
213 | }; | 289 | }; |
214 | static const struct v4l2_pix_format ov518_sif_mode[] = { | 290 | static const struct v4l2_pix_format ov518_sif_mode[] = { |
291 | {160, 120, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, | ||
292 | .bytesperline = 160, | ||
293 | .sizeimage = 70000, | ||
294 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
295 | .priv = 3}, | ||
215 | {176, 144, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, | 296 | {176, 144, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, |
216 | .bytesperline = 176, | 297 | .bytesperline = 176, |
217 | .sizeimage = 40000, | 298 | .sizeimage = 70000, |
218 | .colorspace = V4L2_COLORSPACE_JPEG, | 299 | .colorspace = V4L2_COLORSPACE_JPEG, |
219 | .priv = 1}, | 300 | .priv = 1}, |
301 | {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, | ||
302 | .bytesperline = 320, | ||
303 | .sizeimage = 320 * 240 * 3, | ||
304 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
305 | .priv = 2}, | ||
220 | {352, 288, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, | 306 | {352, 288, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, |
221 | .bytesperline = 352, | 307 | .bytesperline = 352, |
222 | .sizeimage = 352 * 288 * 3 / 8 + 590, | 308 | .sizeimage = 352 * 288 * 3, |
223 | .colorspace = V4L2_COLORSPACE_JPEG, | 309 | .colorspace = V4L2_COLORSPACE_JPEG, |
224 | .priv = 0}, | 310 | .priv = 0}, |
225 | }; | 311 | }; |
226 | 312 | ||
313 | static const struct v4l2_pix_format ov511_vga_mode[] = { | ||
314 | {320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE, | ||
315 | .bytesperline = 320, | ||
316 | .sizeimage = 320 * 240 * 3, | ||
317 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
318 | .priv = 1}, | ||
319 | {640, 480, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE, | ||
320 | .bytesperline = 640, | ||
321 | .sizeimage = 640 * 480 * 2, | ||
322 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
323 | .priv = 0}, | ||
324 | }; | ||
325 | static const struct v4l2_pix_format ov511_sif_mode[] = { | ||
326 | {160, 120, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE, | ||
327 | .bytesperline = 160, | ||
328 | .sizeimage = 70000, | ||
329 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
330 | .priv = 3}, | ||
331 | {176, 144, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE, | ||
332 | .bytesperline = 176, | ||
333 | .sizeimage = 70000, | ||
334 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
335 | .priv = 1}, | ||
336 | {320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE, | ||
337 | .bytesperline = 320, | ||
338 | .sizeimage = 320 * 240 * 3, | ||
339 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
340 | .priv = 2}, | ||
341 | {352, 288, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE, | ||
342 | .bytesperline = 352, | ||
343 | .sizeimage = 352 * 288 * 3, | ||
344 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
345 | .priv = 0}, | ||
346 | }; | ||
227 | 347 | ||
228 | /* Registers common to OV511 / OV518 */ | 348 | /* Registers common to OV511 / OV518 */ |
349 | #define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */ | ||
229 | #define R51x_SYS_RESET 0x50 | 350 | #define R51x_SYS_RESET 0x50 |
351 | /* Reset type flags */ | ||
352 | #define OV511_RESET_OMNICE 0x08 | ||
230 | #define R51x_SYS_INIT 0x53 | 353 | #define R51x_SYS_INIT 0x53 |
231 | #define R51x_SYS_SNAP 0x52 | 354 | #define R51x_SYS_SNAP 0x52 |
232 | #define R51x_SYS_CUST_ID 0x5F | 355 | #define R51x_SYS_CUST_ID 0x5F |
233 | #define R51x_COMP_LUT_BEGIN 0x80 | 356 | #define R51x_COMP_LUT_BEGIN 0x80 |
234 | 357 | ||
235 | /* OV511 Camera interface register numbers */ | 358 | /* OV511 Camera interface register numbers */ |
359 | #define R511_CAM_DELAY 0x10 | ||
360 | #define R511_CAM_EDGE 0x11 | ||
361 | #define R511_CAM_PXCNT 0x12 | ||
362 | #define R511_CAM_LNCNT 0x13 | ||
363 | #define R511_CAM_PXDIV 0x14 | ||
364 | #define R511_CAM_LNDIV 0x15 | ||
365 | #define R511_CAM_UV_EN 0x16 | ||
366 | #define R511_CAM_LINE_MODE 0x17 | ||
367 | #define R511_CAM_OPTS 0x18 | ||
368 | |||
369 | #define R511_SNAP_FRAME 0x19 | ||
370 | #define R511_SNAP_PXCNT 0x1A | ||
371 | #define R511_SNAP_LNCNT 0x1B | ||
372 | #define R511_SNAP_PXDIV 0x1C | ||
373 | #define R511_SNAP_LNDIV 0x1D | ||
374 | #define R511_SNAP_UV_EN 0x1E | ||
375 | #define R511_SNAP_UV_EN 0x1E | ||
376 | #define R511_SNAP_OPTS 0x1F | ||
377 | |||
378 | #define R511_DRAM_FLOW_CTL 0x20 | ||
379 | #define R511_FIFO_OPTS 0x31 | ||
380 | #define R511_I2C_CTL 0x40 | ||
236 | #define R511_SYS_LED_CTL 0x55 /* OV511+ only */ | 381 | #define R511_SYS_LED_CTL 0x55 /* OV511+ only */ |
237 | #define OV511_RESET_NOREGS 0x3F /* All but OV511 & regs */ | 382 | #define R511_COMP_EN 0x78 |
383 | #define R511_COMP_LUT_EN 0x79 | ||
238 | 384 | ||
239 | /* OV518 Camera interface register numbers */ | 385 | /* OV518 Camera interface register numbers */ |
240 | #define R518_GPIO_OUT 0x56 /* OV518(+) only */ | 386 | #define R518_GPIO_OUT 0x56 /* OV518(+) only */ |
@@ -383,7 +529,7 @@ static const struct ov_i2c_regvals norm_6x20[] = { | |||
383 | { 0x28, 0x05 }, | 529 | { 0x28, 0x05 }, |
384 | { 0x2a, 0x04 }, /* Disable framerate adjust */ | 530 | { 0x2a, 0x04 }, /* Disable framerate adjust */ |
385 | /* { 0x2b, 0xac }, * Framerate; Set 2a[7] first */ | 531 | /* { 0x2b, 0xac }, * Framerate; Set 2a[7] first */ |
386 | { 0x2d, 0x99 }, | 532 | { 0x2d, 0x85 }, |
387 | { 0x33, 0xa0 }, /* Color Processing Parameter */ | 533 | { 0x33, 0xa0 }, /* Color Processing Parameter */ |
388 | { 0x34, 0xd2 }, /* Max A/D range */ | 534 | { 0x34, 0xd2 }, /* Max A/D range */ |
389 | { 0x38, 0x8b }, | 535 | { 0x38, 0x8b }, |
@@ -416,7 +562,7 @@ static const struct ov_i2c_regvals norm_6x30[] = { | |||
416 | { 0x07, 0x2d }, /* Sharpness */ | 562 | { 0x07, 0x2d }, /* Sharpness */ |
417 | { 0x0c, 0x20 }, | 563 | { 0x0c, 0x20 }, |
418 | { 0x0d, 0x20 }, | 564 | { 0x0d, 0x20 }, |
419 | { 0x0e, 0x20 }, | 565 | { 0x0e, 0xa0 }, /* Was 0x20, bit7 enables a 2x gain which we need */ |
420 | { 0x0f, 0x05 }, | 566 | { 0x0f, 0x05 }, |
421 | { 0x10, 0x9a }, | 567 | { 0x10, 0x9a }, |
422 | { 0x11, 0x00 }, /* Pixel clock = fastest */ | 568 | { 0x11, 0x00 }, /* Pixel clock = fastest */ |
@@ -558,7 +704,7 @@ static const struct ov_i2c_regvals norm_7620[] = { | |||
558 | { 0x23, 0x00 }, | 704 | { 0x23, 0x00 }, |
559 | { 0x26, 0xa2 }, | 705 | { 0x26, 0xa2 }, |
560 | { 0x27, 0xea }, | 706 | { 0x27, 0xea }, |
561 | { 0x28, 0x20 }, | 707 | { 0x28, 0x22 }, /* Was 0x20, bit1 enables a 2x gain which we need */ |
562 | { 0x29, 0x00 }, | 708 | { 0x29, 0x00 }, |
563 | { 0x2a, 0x10 }, | 709 | { 0x2a, 0x10 }, |
564 | { 0x2b, 0x00 }, | 710 | { 0x2b, 0x00 }, |
@@ -999,13 +1145,128 @@ static int ov518_reg_w32(struct sd *sd, __u16 index, u32 value, int n) | |||
999 | return ret; | 1145 | return ret; |
1000 | } | 1146 | } |
1001 | 1147 | ||
1148 | static int ov511_i2c_w(struct sd *sd, __u8 reg, __u8 value) | ||
1149 | { | ||
1150 | int rc, retries; | ||
1151 | |||
1152 | PDEBUG(D_USBO, "i2c 0x%02x -> [0x%02x]", value, reg); | ||
1153 | |||
1154 | /* Three byte write cycle */ | ||
1155 | for (retries = 6; ; ) { | ||
1156 | /* Select camera register */ | ||
1157 | rc = reg_w(sd, R51x_I2C_SADDR_3, reg); | ||
1158 | if (rc < 0) | ||
1159 | return rc; | ||
1160 | |||
1161 | /* Write "value" to I2C data port of OV511 */ | ||
1162 | rc = reg_w(sd, R51x_I2C_DATA, value); | ||
1163 | if (rc < 0) | ||
1164 | return rc; | ||
1165 | |||
1166 | /* Initiate 3-byte write cycle */ | ||
1167 | rc = reg_w(sd, R511_I2C_CTL, 0x01); | ||
1168 | if (rc < 0) | ||
1169 | return rc; | ||
1170 | |||
1171 | do | ||
1172 | rc = reg_r(sd, R511_I2C_CTL); | ||
1173 | while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */ | ||
1174 | |||
1175 | if (rc < 0) | ||
1176 | return rc; | ||
1177 | |||
1178 | if ((rc & 2) == 0) /* Ack? */ | ||
1179 | break; | ||
1180 | if (--retries < 0) { | ||
1181 | PDEBUG(D_USBO, "i2c write retries exhausted"); | ||
1182 | return -1; | ||
1183 | } | ||
1184 | } | ||
1185 | |||
1186 | return 0; | ||
1187 | } | ||
1188 | |||
1189 | static int ov511_i2c_r(struct sd *sd, __u8 reg) | ||
1190 | { | ||
1191 | int rc, value, retries; | ||
1192 | |||
1193 | /* Two byte write cycle */ | ||
1194 | for (retries = 6; ; ) { | ||
1195 | /* Select camera register */ | ||
1196 | rc = reg_w(sd, R51x_I2C_SADDR_2, reg); | ||
1197 | if (rc < 0) | ||
1198 | return rc; | ||
1199 | |||
1200 | /* Initiate 2-byte write cycle */ | ||
1201 | rc = reg_w(sd, R511_I2C_CTL, 0x03); | ||
1202 | if (rc < 0) | ||
1203 | return rc; | ||
1204 | |||
1205 | do | ||
1206 | rc = reg_r(sd, R511_I2C_CTL); | ||
1207 | while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */ | ||
1208 | |||
1209 | if (rc < 0) | ||
1210 | return rc; | ||
1211 | |||
1212 | if ((rc & 2) == 0) /* Ack? */ | ||
1213 | break; | ||
1214 | |||
1215 | /* I2C abort */ | ||
1216 | reg_w(sd, R511_I2C_CTL, 0x10); | ||
1217 | |||
1218 | if (--retries < 0) { | ||
1219 | PDEBUG(D_USBI, "i2c write retries exhausted"); | ||
1220 | return -1; | ||
1221 | } | ||
1222 | } | ||
1223 | |||
1224 | /* Two byte read cycle */ | ||
1225 | for (retries = 6; ; ) { | ||
1226 | /* Initiate 2-byte read cycle */ | ||
1227 | rc = reg_w(sd, R511_I2C_CTL, 0x05); | ||
1228 | if (rc < 0) | ||
1229 | return rc; | ||
1230 | |||
1231 | do | ||
1232 | rc = reg_r(sd, R511_I2C_CTL); | ||
1233 | while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */ | ||
1234 | |||
1235 | if (rc < 0) | ||
1236 | return rc; | ||
1237 | |||
1238 | if ((rc & 2) == 0) /* Ack? */ | ||
1239 | break; | ||
1240 | |||
1241 | /* I2C abort */ | ||
1242 | rc = reg_w(sd, R511_I2C_CTL, 0x10); | ||
1243 | if (rc < 0) | ||
1244 | return rc; | ||
1245 | |||
1246 | if (--retries < 0) { | ||
1247 | PDEBUG(D_USBI, "i2c read retries exhausted"); | ||
1248 | return -1; | ||
1249 | } | ||
1250 | } | ||
1251 | |||
1252 | value = reg_r(sd, R51x_I2C_DATA); | ||
1253 | |||
1254 | PDEBUG(D_USBI, "i2c [0x%02X] -> 0x%02X", reg, value); | ||
1255 | |||
1256 | /* This is needed to make i2c_w() work */ | ||
1257 | rc = reg_w(sd, R511_I2C_CTL, 0x05); | ||
1258 | if (rc < 0) | ||
1259 | return rc; | ||
1260 | |||
1261 | return value; | ||
1262 | } | ||
1002 | 1263 | ||
1003 | /* | 1264 | /* |
1004 | * The OV518 I2C I/O procedure is different, hence, this function. | 1265 | * The OV518 I2C I/O procedure is different, hence, this function. |
1005 | * This is normally only called from i2c_w(). Note that this function | 1266 | * This is normally only called from i2c_w(). Note that this function |
1006 | * always succeeds regardless of whether the sensor is present and working. | 1267 | * always succeeds regardless of whether the sensor is present and working. |
1007 | */ | 1268 | */ |
1008 | static int i2c_w(struct sd *sd, | 1269 | static int ov518_i2c_w(struct sd *sd, |
1009 | __u8 reg, | 1270 | __u8 reg, |
1010 | __u8 value) | 1271 | __u8 value) |
1011 | { | 1272 | { |
@@ -1040,7 +1301,7 @@ static int i2c_w(struct sd *sd, | |||
1040 | * This is normally only called from i2c_r(). Note that this function | 1301 | * This is normally only called from i2c_r(). Note that this function |
1041 | * always succeeds regardless of whether the sensor is present and working. | 1302 | * always succeeds regardless of whether the sensor is present and working. |
1042 | */ | 1303 | */ |
1043 | static int i2c_r(struct sd *sd, __u8 reg) | 1304 | static int ov518_i2c_r(struct sd *sd, __u8 reg) |
1044 | { | 1305 | { |
1045 | int rc, value; | 1306 | int rc, value; |
1046 | 1307 | ||
@@ -1063,6 +1324,34 @@ static int i2c_r(struct sd *sd, __u8 reg) | |||
1063 | return value; | 1324 | return value; |
1064 | } | 1325 | } |
1065 | 1326 | ||
1327 | static int i2c_w(struct sd *sd, __u8 reg, __u8 value) | ||
1328 | { | ||
1329 | switch (sd->bridge) { | ||
1330 | case BRIDGE_OV511: | ||
1331 | case BRIDGE_OV511PLUS: | ||
1332 | return ov511_i2c_w(sd, reg, value); | ||
1333 | case BRIDGE_OV518: | ||
1334 | case BRIDGE_OV518PLUS: | ||
1335 | case BRIDGE_OV519: | ||
1336 | return ov518_i2c_w(sd, reg, value); | ||
1337 | } | ||
1338 | return -1; /* Should never happen */ | ||
1339 | } | ||
1340 | |||
1341 | static int i2c_r(struct sd *sd, __u8 reg) | ||
1342 | { | ||
1343 | switch (sd->bridge) { | ||
1344 | case BRIDGE_OV511: | ||
1345 | case BRIDGE_OV511PLUS: | ||
1346 | return ov511_i2c_r(sd, reg); | ||
1347 | case BRIDGE_OV518: | ||
1348 | case BRIDGE_OV518PLUS: | ||
1349 | case BRIDGE_OV519: | ||
1350 | return ov518_i2c_r(sd, reg); | ||
1351 | } | ||
1352 | return -1; /* Should never happen */ | ||
1353 | } | ||
1354 | |||
1066 | /* Writes bits at positions specified by mask to an I2C reg. Bits that are in | 1355 | /* Writes bits at positions specified by mask to an I2C reg. Bits that are in |
1067 | * the same position as 1's in "mask" are cleared and set to "value". Bits | 1356 | * the same position as 1's in "mask" are cleared and set to "value". Bits |
1068 | * that are in the same position as 0's in "mask" are preserved, regardless | 1357 | * that are in the same position as 0's in "mask" are preserved, regardless |
@@ -1242,7 +1531,6 @@ static int ov8xx0_configure(struct sd *sd) | |||
1242 | } | 1531 | } |
1243 | 1532 | ||
1244 | /* Set sensor-specific vars */ | 1533 | /* Set sensor-specific vars */ |
1245 | /* sd->sif = 0; already done */ | ||
1246 | return 0; | 1534 | return 0; |
1247 | } | 1535 | } |
1248 | 1536 | ||
@@ -1279,15 +1567,13 @@ static int ov7xx0_configure(struct sd *sd) | |||
1279 | } | 1567 | } |
1280 | } else if ((rc & 3) == 1) { | 1568 | } else if ((rc & 3) == 1) { |
1281 | /* I don't know what's different about the 76BE yet. */ | 1569 | /* I don't know what's different about the 76BE yet. */ |
1282 | if (i2c_r(sd, 0x15) & 1) | 1570 | if (i2c_r(sd, 0x15) & 1) { |
1283 | PDEBUG(D_PROBE, "Sensor is an OV7620AE"); | 1571 | PDEBUG(D_PROBE, "Sensor is an OV7620AE"); |
1284 | else | 1572 | sd->sensor = SEN_OV7620; |
1573 | } else { | ||
1285 | PDEBUG(D_PROBE, "Sensor is an OV76BE"); | 1574 | PDEBUG(D_PROBE, "Sensor is an OV76BE"); |
1286 | 1575 | sd->sensor = SEN_OV76BE; | |
1287 | /* OV511+ will return all zero isoc data unless we | 1576 | } |
1288 | * configure the sensor as a 7620. Someone needs to | ||
1289 | * find the exact reg. setting that causes this. */ | ||
1290 | sd->sensor = SEN_OV76BE; | ||
1291 | } else if ((rc & 3) == 0) { | 1577 | } else if ((rc & 3) == 0) { |
1292 | /* try to read product id registers */ | 1578 | /* try to read product id registers */ |
1293 | high = i2c_r(sd, 0x0a); | 1579 | high = i2c_r(sd, 0x0a); |
@@ -1333,7 +1619,6 @@ static int ov7xx0_configure(struct sd *sd) | |||
1333 | } | 1619 | } |
1334 | 1620 | ||
1335 | /* Set sensor-specific vars */ | 1621 | /* Set sensor-specific vars */ |
1336 | /* sd->sif = 0; already done */ | ||
1337 | return 0; | 1622 | return 0; |
1338 | } | 1623 | } |
1339 | 1624 | ||
@@ -1362,13 +1647,14 @@ static int ov6xx0_configure(struct sd *sd) | |||
1362 | break; | 1647 | break; |
1363 | case 0x01: | 1648 | case 0x01: |
1364 | sd->sensor = SEN_OV6620; | 1649 | sd->sensor = SEN_OV6620; |
1650 | PDEBUG(D_PROBE, "Sensor is an OV6620"); | ||
1365 | break; | 1651 | break; |
1366 | case 0x02: | 1652 | case 0x02: |
1367 | sd->sensor = SEN_OV6630; | 1653 | sd->sensor = SEN_OV6630; |
1368 | PDEBUG(D_PROBE, "Sensor is an OV66308AE"); | 1654 | PDEBUG(D_PROBE, "Sensor is an OV66308AE"); |
1369 | break; | 1655 | break; |
1370 | case 0x03: | 1656 | case 0x03: |
1371 | sd->sensor = SEN_OV6630; | 1657 | sd->sensor = SEN_OV66308AF; |
1372 | PDEBUG(D_PROBE, "Sensor is an OV66308AF"); | 1658 | PDEBUG(D_PROBE, "Sensor is an OV66308AF"); |
1373 | break; | 1659 | break; |
1374 | case 0x90: | 1660 | case 0x90: |
@@ -1391,6 +1677,9 @@ static int ov6xx0_configure(struct sd *sd) | |||
1391 | /* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */ | 1677 | /* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */ |
1392 | static void ov51x_led_control(struct sd *sd, int on) | 1678 | static void ov51x_led_control(struct sd *sd, int on) |
1393 | { | 1679 | { |
1680 | if (sd->invert_led) | ||
1681 | on = !on; | ||
1682 | |||
1394 | switch (sd->bridge) { | 1683 | switch (sd->bridge) { |
1395 | /* OV511 has no LED control */ | 1684 | /* OV511 has no LED control */ |
1396 | case BRIDGE_OV511PLUS: | 1685 | case BRIDGE_OV511PLUS: |
@@ -1406,9 +1695,31 @@ static void ov51x_led_control(struct sd *sd, int on) | |||
1406 | } | 1695 | } |
1407 | } | 1696 | } |
1408 | 1697 | ||
1409 | /* OV518 quantization tables are 8x4 (instead of 8x8) */ | 1698 | static int ov51x_upload_quan_tables(struct sd *sd) |
1410 | static int ov518_upload_quan_tables(struct sd *sd) | ||
1411 | { | 1699 | { |
1700 | const unsigned char yQuanTable511[] = { | ||
1701 | 0, 1, 1, 2, 2, 3, 3, 4, | ||
1702 | 1, 1, 1, 2, 2, 3, 4, 4, | ||
1703 | 1, 1, 2, 2, 3, 4, 4, 4, | ||
1704 | 2, 2, 2, 3, 4, 4, 4, 4, | ||
1705 | 2, 2, 3, 4, 4, 5, 5, 5, | ||
1706 | 3, 3, 4, 4, 5, 5, 5, 5, | ||
1707 | 3, 4, 4, 4, 5, 5, 5, 5, | ||
1708 | 4, 4, 4, 4, 5, 5, 5, 5 | ||
1709 | }; | ||
1710 | |||
1711 | const unsigned char uvQuanTable511[] = { | ||
1712 | 0, 2, 2, 3, 4, 4, 4, 4, | ||
1713 | 2, 2, 2, 4, 4, 4, 4, 4, | ||
1714 | 2, 2, 3, 4, 4, 4, 4, 4, | ||
1715 | 3, 4, 4, 4, 4, 4, 4, 4, | ||
1716 | 4, 4, 4, 4, 4, 4, 4, 4, | ||
1717 | 4, 4, 4, 4, 4, 4, 4, 4, | ||
1718 | 4, 4, 4, 4, 4, 4, 4, 4, | ||
1719 | 4, 4, 4, 4, 4, 4, 4, 4 | ||
1720 | }; | ||
1721 | |||
1722 | /* OV518 quantization tables are 8x4 (instead of 8x8) */ | ||
1412 | const unsigned char yQuanTable518[] = { | 1723 | const unsigned char yQuanTable518[] = { |
1413 | 5, 4, 5, 6, 6, 7, 7, 7, | 1724 | 5, 4, 5, 6, 6, 7, 7, 7, |
1414 | 5, 5, 5, 5, 6, 7, 7, 7, | 1725 | 5, 5, 5, 5, 6, 7, 7, 7, |
@@ -1423,14 +1734,23 @@ static int ov518_upload_quan_tables(struct sd *sd) | |||
1423 | 7, 7, 7, 7, 7, 7, 8, 8 | 1734 | 7, 7, 7, 7, 7, 7, 8, 8 |
1424 | }; | 1735 | }; |
1425 | 1736 | ||
1426 | const unsigned char *pYTable = yQuanTable518; | 1737 | const unsigned char *pYTable, *pUVTable; |
1427 | const unsigned char *pUVTable = uvQuanTable518; | ||
1428 | unsigned char val0, val1; | 1738 | unsigned char val0, val1; |
1429 | int i, rc, reg = R51x_COMP_LUT_BEGIN; | 1739 | int i, size, rc, reg = R51x_COMP_LUT_BEGIN; |
1430 | 1740 | ||
1431 | PDEBUG(D_PROBE, "Uploading quantization tables"); | 1741 | PDEBUG(D_PROBE, "Uploading quantization tables"); |
1432 | 1742 | ||
1433 | for (i = 0; i < 16; i++) { | 1743 | if (sd->bridge == BRIDGE_OV511 || sd->bridge == BRIDGE_OV511PLUS) { |
1744 | pYTable = yQuanTable511; | ||
1745 | pUVTable = uvQuanTable511; | ||
1746 | size = 32; | ||
1747 | } else { | ||
1748 | pYTable = yQuanTable518; | ||
1749 | pUVTable = uvQuanTable518; | ||
1750 | size = 16; | ||
1751 | } | ||
1752 | |||
1753 | for (i = 0; i < size; i++) { | ||
1434 | val0 = *pYTable++; | 1754 | val0 = *pYTable++; |
1435 | val1 = *pYTable++; | 1755 | val1 = *pYTable++; |
1436 | val0 &= 0x0f; | 1756 | val0 &= 0x0f; |
@@ -1445,7 +1765,7 @@ static int ov518_upload_quan_tables(struct sd *sd) | |||
1445 | val0 &= 0x0f; | 1765 | val0 &= 0x0f; |
1446 | val1 &= 0x0f; | 1766 | val1 &= 0x0f; |
1447 | val0 |= val1 << 4; | 1767 | val0 |= val1 << 4; |
1448 | rc = reg_w(sd, reg + 16, val0); | 1768 | rc = reg_w(sd, reg + size, val0); |
1449 | if (rc < 0) | 1769 | if (rc < 0) |
1450 | return rc; | 1770 | return rc; |
1451 | 1771 | ||
@@ -1455,6 +1775,87 @@ static int ov518_upload_quan_tables(struct sd *sd) | |||
1455 | return 0; | 1775 | return 0; |
1456 | } | 1776 | } |
1457 | 1777 | ||
1778 | /* This initializes the OV511/OV511+ and the sensor */ | ||
1779 | static int ov511_configure(struct gspca_dev *gspca_dev) | ||
1780 | { | ||
1781 | struct sd *sd = (struct sd *) gspca_dev; | ||
1782 | int rc; | ||
1783 | |||
1784 | /* For 511 and 511+ */ | ||
1785 | const struct ov_regvals init_511[] = { | ||
1786 | { R51x_SYS_RESET, 0x7f }, | ||
1787 | { R51x_SYS_INIT, 0x01 }, | ||
1788 | { R51x_SYS_RESET, 0x7f }, | ||
1789 | { R51x_SYS_INIT, 0x01 }, | ||
1790 | { R51x_SYS_RESET, 0x3f }, | ||
1791 | { R51x_SYS_INIT, 0x01 }, | ||
1792 | { R51x_SYS_RESET, 0x3d }, | ||
1793 | }; | ||
1794 | |||
1795 | const struct ov_regvals norm_511[] = { | ||
1796 | { R511_DRAM_FLOW_CTL, 0x01 }, | ||
1797 | { R51x_SYS_SNAP, 0x00 }, | ||
1798 | { R51x_SYS_SNAP, 0x02 }, | ||
1799 | { R51x_SYS_SNAP, 0x00 }, | ||
1800 | { R511_FIFO_OPTS, 0x1f }, | ||
1801 | { R511_COMP_EN, 0x00 }, | ||
1802 | { R511_COMP_LUT_EN, 0x03 }, | ||
1803 | }; | ||
1804 | |||
1805 | const struct ov_regvals norm_511_p[] = { | ||
1806 | { R511_DRAM_FLOW_CTL, 0xff }, | ||
1807 | { R51x_SYS_SNAP, 0x00 }, | ||
1808 | { R51x_SYS_SNAP, 0x02 }, | ||
1809 | { R51x_SYS_SNAP, 0x00 }, | ||
1810 | { R511_FIFO_OPTS, 0xff }, | ||
1811 | { R511_COMP_EN, 0x00 }, | ||
1812 | { R511_COMP_LUT_EN, 0x03 }, | ||
1813 | }; | ||
1814 | |||
1815 | const struct ov_regvals compress_511[] = { | ||
1816 | { 0x70, 0x1f }, | ||
1817 | { 0x71, 0x05 }, | ||
1818 | { 0x72, 0x06 }, | ||
1819 | { 0x73, 0x06 }, | ||
1820 | { 0x74, 0x14 }, | ||
1821 | { 0x75, 0x03 }, | ||
1822 | { 0x76, 0x04 }, | ||
1823 | { 0x77, 0x04 }, | ||
1824 | }; | ||
1825 | |||
1826 | PDEBUG(D_PROBE, "Device custom id %x", reg_r(sd, R51x_SYS_CUST_ID)); | ||
1827 | |||
1828 | rc = write_regvals(sd, init_511, ARRAY_SIZE(init_511)); | ||
1829 | if (rc < 0) | ||
1830 | return rc; | ||
1831 | |||
1832 | switch (sd->bridge) { | ||
1833 | case BRIDGE_OV511: | ||
1834 | rc = write_regvals(sd, norm_511, ARRAY_SIZE(norm_511)); | ||
1835 | if (rc < 0) | ||
1836 | return rc; | ||
1837 | break; | ||
1838 | case BRIDGE_OV511PLUS: | ||
1839 | rc = write_regvals(sd, norm_511_p, ARRAY_SIZE(norm_511_p)); | ||
1840 | if (rc < 0) | ||
1841 | return rc; | ||
1842 | break; | ||
1843 | } | ||
1844 | |||
1845 | /* Init compression */ | ||
1846 | rc = write_regvals(sd, compress_511, ARRAY_SIZE(compress_511)); | ||
1847 | if (rc < 0) | ||
1848 | return rc; | ||
1849 | |||
1850 | rc = ov51x_upload_quan_tables(sd); | ||
1851 | if (rc < 0) { | ||
1852 | PDEBUG(D_ERR, "Error uploading quantization tables"); | ||
1853 | return rc; | ||
1854 | } | ||
1855 | |||
1856 | return 0; | ||
1857 | } | ||
1858 | |||
1458 | /* This initializes the OV518/OV518+ and the sensor */ | 1859 | /* This initializes the OV518/OV518+ and the sensor */ |
1459 | static int ov518_configure(struct gspca_dev *gspca_dev) | 1860 | static int ov518_configure(struct gspca_dev *gspca_dev) |
1460 | { | 1861 | { |
@@ -1462,7 +1863,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev) | |||
1462 | int rc; | 1863 | int rc; |
1463 | 1864 | ||
1464 | /* For 518 and 518+ */ | 1865 | /* For 518 and 518+ */ |
1465 | static struct ov_regvals init_518[] = { | 1866 | const struct ov_regvals init_518[] = { |
1466 | { R51x_SYS_RESET, 0x40 }, | 1867 | { R51x_SYS_RESET, 0x40 }, |
1467 | { R51x_SYS_INIT, 0xe1 }, | 1868 | { R51x_SYS_INIT, 0xe1 }, |
1468 | { R51x_SYS_RESET, 0x3e }, | 1869 | { R51x_SYS_RESET, 0x3e }, |
@@ -1473,7 +1874,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev) | |||
1473 | { 0x5d, 0x03 }, | 1874 | { 0x5d, 0x03 }, |
1474 | }; | 1875 | }; |
1475 | 1876 | ||
1476 | static struct ov_regvals norm_518[] = { | 1877 | const struct ov_regvals norm_518[] = { |
1477 | { R51x_SYS_SNAP, 0x02 }, /* Reset */ | 1878 | { R51x_SYS_SNAP, 0x02 }, /* Reset */ |
1478 | { R51x_SYS_SNAP, 0x01 }, /* Enable */ | 1879 | { R51x_SYS_SNAP, 0x01 }, /* Enable */ |
1479 | { 0x31, 0x0f }, | 1880 | { 0x31, 0x0f }, |
@@ -1486,7 +1887,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev) | |||
1486 | { 0x2f, 0x80 }, | 1887 | { 0x2f, 0x80 }, |
1487 | }; | 1888 | }; |
1488 | 1889 | ||
1489 | static struct ov_regvals norm_518_p[] = { | 1890 | const struct ov_regvals norm_518_p[] = { |
1490 | { R51x_SYS_SNAP, 0x02 }, /* Reset */ | 1891 | { R51x_SYS_SNAP, 0x02 }, /* Reset */ |
1491 | { R51x_SYS_SNAP, 0x01 }, /* Enable */ | 1892 | { R51x_SYS_SNAP, 0x01 }, /* Enable */ |
1492 | { 0x31, 0x0f }, | 1893 | { 0x31, 0x0f }, |
@@ -1531,7 +1932,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev) | |||
1531 | break; | 1932 | break; |
1532 | } | 1933 | } |
1533 | 1934 | ||
1534 | rc = ov518_upload_quan_tables(sd); | 1935 | rc = ov51x_upload_quan_tables(sd); |
1535 | if (rc < 0) { | 1936 | if (rc < 0) { |
1536 | PDEBUG(D_ERR, "Error uploading quantization tables"); | 1937 | PDEBUG(D_ERR, "Error uploading quantization tables"); |
1537 | return rc; | 1938 | return rc; |
@@ -1573,9 +1974,14 @@ static int sd_config(struct gspca_dev *gspca_dev, | |||
1573 | struct cam *cam; | 1974 | struct cam *cam; |
1574 | int ret = 0; | 1975 | int ret = 0; |
1575 | 1976 | ||
1576 | sd->bridge = id->driver_info; | 1977 | sd->bridge = id->driver_info & BRIDGE_MASK; |
1978 | sd->invert_led = id->driver_info & BRIDGE_INVERT_LED; | ||
1577 | 1979 | ||
1578 | switch (sd->bridge) { | 1980 | switch (sd->bridge) { |
1981 | case BRIDGE_OV511: | ||
1982 | case BRIDGE_OV511PLUS: | ||
1983 | ret = ov511_configure(gspca_dev); | ||
1984 | break; | ||
1579 | case BRIDGE_OV518: | 1985 | case BRIDGE_OV518: |
1580 | case BRIDGE_OV518PLUS: | 1986 | case BRIDGE_OV518PLUS: |
1581 | ret = ov518_configure(gspca_dev); | 1987 | ret = ov518_configure(gspca_dev); |
@@ -1634,6 +2040,16 @@ static int sd_config(struct gspca_dev *gspca_dev, | |||
1634 | 2040 | ||
1635 | cam = &gspca_dev->cam; | 2041 | cam = &gspca_dev->cam; |
1636 | switch (sd->bridge) { | 2042 | switch (sd->bridge) { |
2043 | case BRIDGE_OV511: | ||
2044 | case BRIDGE_OV511PLUS: | ||
2045 | if (!sd->sif) { | ||
2046 | cam->cam_mode = ov511_vga_mode; | ||
2047 | cam->nmodes = ARRAY_SIZE(ov511_vga_mode); | ||
2048 | } else { | ||
2049 | cam->cam_mode = ov511_sif_mode; | ||
2050 | cam->nmodes = ARRAY_SIZE(ov511_sif_mode); | ||
2051 | } | ||
2052 | break; | ||
1637 | case BRIDGE_OV518: | 2053 | case BRIDGE_OV518: |
1638 | case BRIDGE_OV518PLUS: | 2054 | case BRIDGE_OV518PLUS: |
1639 | if (!sd->sif) { | 2055 | if (!sd->sif) { |
@@ -1655,13 +2071,28 @@ static int sd_config(struct gspca_dev *gspca_dev, | |||
1655 | break; | 2071 | break; |
1656 | } | 2072 | } |
1657 | sd->brightness = BRIGHTNESS_DEF; | 2073 | sd->brightness = BRIGHTNESS_DEF; |
1658 | sd->contrast = CONTRAST_DEF; | 2074 | if (sd->sensor == SEN_OV6630 || sd->sensor == SEN_OV66308AF) |
2075 | sd->contrast = 200; /* The default is too low for the ov6630 */ | ||
2076 | else | ||
2077 | sd->contrast = CONTRAST_DEF; | ||
1659 | sd->colors = COLOR_DEF; | 2078 | sd->colors = COLOR_DEF; |
1660 | sd->hflip = HFLIP_DEF; | 2079 | sd->hflip = HFLIP_DEF; |
1661 | sd->vflip = VFLIP_DEF; | 2080 | sd->vflip = VFLIP_DEF; |
1662 | if (sd->sensor != SEN_OV7670) | 2081 | sd->autobrightness = AUTOBRIGHT_DEF; |
1663 | gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | 2082 | if (sd->sensor == SEN_OV7670) { |
1664 | | (1 << VFLIP_IDX); | 2083 | sd->freq = OV7670_FREQ_DEF; |
2084 | gspca_dev->ctrl_dis = 1 << FREQ_IDX; | ||
2085 | } else { | ||
2086 | sd->freq = FREQ_DEF; | ||
2087 | gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | | ||
2088 | (1 << OV7670_FREQ_IDX); | ||
2089 | } | ||
2090 | if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670) | ||
2091 | gspca_dev->ctrl_dis |= 1 << AUTOBRIGHT_IDX; | ||
2092 | /* OV8610 Frequency filter control should work but needs testing */ | ||
2093 | if (sd->sensor == SEN_OV8610) | ||
2094 | gspca_dev->ctrl_dis |= 1 << FREQ_IDX; | ||
2095 | |||
1665 | return 0; | 2096 | return 0; |
1666 | error: | 2097 | error: |
1667 | PDEBUG(D_ERR, "OV519 Config failed"); | 2098 | PDEBUG(D_ERR, "OV519 Config failed"); |
@@ -1680,6 +2111,7 @@ static int sd_init(struct gspca_dev *gspca_dev) | |||
1680 | return -EIO; | 2111 | return -EIO; |
1681 | break; | 2112 | break; |
1682 | case SEN_OV6630: | 2113 | case SEN_OV6630: |
2114 | case SEN_OV66308AF: | ||
1683 | if (write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30))) | 2115 | if (write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30))) |
1684 | return -EIO; | 2116 | return -EIO; |
1685 | break; | 2117 | break; |
@@ -1688,6 +2120,8 @@ static int sd_init(struct gspca_dev *gspca_dev) | |||
1688 | /* case SEN_OV76BE: */ | 2120 | /* case SEN_OV76BE: */ |
1689 | if (write_i2c_regvals(sd, norm_7610, ARRAY_SIZE(norm_7610))) | 2121 | if (write_i2c_regvals(sd, norm_7610, ARRAY_SIZE(norm_7610))) |
1690 | return -EIO; | 2122 | return -EIO; |
2123 | if (i2c_w_mask(sd, 0x0e, 0x00, 0x40)) | ||
2124 | return -EIO; | ||
1691 | break; | 2125 | break; |
1692 | case SEN_OV7620: | 2126 | case SEN_OV7620: |
1693 | if (write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620))) | 2127 | if (write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620))) |
@@ -1709,6 +2143,126 @@ static int sd_init(struct gspca_dev *gspca_dev) | |||
1709 | return 0; | 2143 | return 0; |
1710 | } | 2144 | } |
1711 | 2145 | ||
2146 | /* Set up the OV511/OV511+ with the given image parameters. | ||
2147 | * | ||
2148 | * Do not put any sensor-specific code in here (including I2C I/O functions) | ||
2149 | */ | ||
2150 | static int ov511_mode_init_regs(struct sd *sd) | ||
2151 | { | ||
2152 | int hsegs, vsegs, packet_size, fps, needed; | ||
2153 | int interlaced = 0; | ||
2154 | struct usb_host_interface *alt; | ||
2155 | struct usb_interface *intf; | ||
2156 | |||
2157 | intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface); | ||
2158 | alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); | ||
2159 | if (!alt) { | ||
2160 | PDEBUG(D_ERR, "Couldn't get altsetting"); | ||
2161 | return -EIO; | ||
2162 | } | ||
2163 | |||
2164 | packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); | ||
2165 | reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5); | ||
2166 | |||
2167 | reg_w(sd, R511_CAM_UV_EN, 0x01); | ||
2168 | reg_w(sd, R511_SNAP_UV_EN, 0x01); | ||
2169 | reg_w(sd, R511_SNAP_OPTS, 0x03); | ||
2170 | |||
2171 | /* Here I'm assuming that snapshot size == image size. | ||
2172 | * I hope that's always true. --claudio | ||
2173 | */ | ||
2174 | hsegs = (sd->gspca_dev.width >> 3) - 1; | ||
2175 | vsegs = (sd->gspca_dev.height >> 3) - 1; | ||
2176 | |||
2177 | reg_w(sd, R511_CAM_PXCNT, hsegs); | ||
2178 | reg_w(sd, R511_CAM_LNCNT, vsegs); | ||
2179 | reg_w(sd, R511_CAM_PXDIV, 0x00); | ||
2180 | reg_w(sd, R511_CAM_LNDIV, 0x00); | ||
2181 | |||
2182 | /* YUV420, low pass filter on */ | ||
2183 | reg_w(sd, R511_CAM_OPTS, 0x03); | ||
2184 | |||
2185 | /* Snapshot additions */ | ||
2186 | reg_w(sd, R511_SNAP_PXCNT, hsegs); | ||
2187 | reg_w(sd, R511_SNAP_LNCNT, vsegs); | ||
2188 | reg_w(sd, R511_SNAP_PXDIV, 0x00); | ||
2189 | reg_w(sd, R511_SNAP_LNDIV, 0x00); | ||
2190 | |||
2191 | /******** Set the framerate ********/ | ||
2192 | if (frame_rate > 0) | ||
2193 | sd->frame_rate = frame_rate; | ||
2194 | |||
2195 | switch (sd->sensor) { | ||
2196 | case SEN_OV6620: | ||
2197 | /* No framerate control, doesn't like higher rates yet */ | ||
2198 | sd->clockdiv = 3; | ||
2199 | break; | ||
2200 | |||
2201 | /* Note once the FIXME's in mode_init_ov_sensor_regs() are fixed | ||
2202 | for more sensors we need to do this for them too */ | ||
2203 | case SEN_OV7620: | ||
2204 | case SEN_OV7640: | ||
2205 | case SEN_OV76BE: | ||
2206 | if (sd->gspca_dev.width == 320) | ||
2207 | interlaced = 1; | ||
2208 | /* Fall through */ | ||
2209 | case SEN_OV6630: | ||
2210 | case SEN_OV7610: | ||
2211 | case SEN_OV7670: | ||
2212 | switch (sd->frame_rate) { | ||
2213 | case 30: | ||
2214 | case 25: | ||
2215 | /* Not enough bandwidth to do 640x480 @ 30 fps */ | ||
2216 | if (sd->gspca_dev.width != 640) { | ||
2217 | sd->clockdiv = 0; | ||
2218 | break; | ||
2219 | } | ||
2220 | /* Fall through for 640x480 case */ | ||
2221 | default: | ||
2222 | /* case 20: */ | ||
2223 | /* case 15: */ | ||
2224 | sd->clockdiv = 1; | ||
2225 | break; | ||
2226 | case 10: | ||
2227 | sd->clockdiv = 2; | ||
2228 | break; | ||
2229 | case 5: | ||
2230 | sd->clockdiv = 5; | ||
2231 | break; | ||
2232 | } | ||
2233 | if (interlaced) { | ||
2234 | sd->clockdiv = (sd->clockdiv + 1) * 2 - 1; | ||
2235 | /* Higher then 10 does not work */ | ||
2236 | if (sd->clockdiv > 10) | ||
2237 | sd->clockdiv = 10; | ||
2238 | } | ||
2239 | break; | ||
2240 | |||
2241 | case SEN_OV8610: | ||
2242 | /* No framerate control ?? */ | ||
2243 | sd->clockdiv = 0; | ||
2244 | break; | ||
2245 | } | ||
2246 | |||
2247 | /* Check if we have enough bandwidth to disable compression */ | ||
2248 | fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1; | ||
2249 | needed = fps * sd->gspca_dev.width * sd->gspca_dev.height * 3 / 2; | ||
2250 | /* 1400 is a conservative estimate of the max nr of isoc packets/sec */ | ||
2251 | if (needed > 1400 * packet_size) { | ||
2252 | /* Enable Y and UV quantization and compression */ | ||
2253 | reg_w(sd, R511_COMP_EN, 0x07); | ||
2254 | reg_w(sd, R511_COMP_LUT_EN, 0x03); | ||
2255 | } else { | ||
2256 | reg_w(sd, R511_COMP_EN, 0x06); | ||
2257 | reg_w(sd, R511_COMP_LUT_EN, 0x00); | ||
2258 | } | ||
2259 | |||
2260 | reg_w(sd, R51x_SYS_RESET, OV511_RESET_OMNICE); | ||
2261 | reg_w(sd, R51x_SYS_RESET, 0); | ||
2262 | |||
2263 | return 0; | ||
2264 | } | ||
2265 | |||
1712 | /* Sets up the OV518/OV518+ with the given image parameters | 2266 | /* Sets up the OV518/OV518+ with the given image parameters |
1713 | * | 2267 | * |
1714 | * OV518 needs a completely different approach, until we can figure out what | 2268 | * OV518 needs a completely different approach, until we can figure out what |
@@ -1718,7 +2272,19 @@ static int sd_init(struct gspca_dev *gspca_dev) | |||
1718 | */ | 2272 | */ |
1719 | static int ov518_mode_init_regs(struct sd *sd) | 2273 | static int ov518_mode_init_regs(struct sd *sd) |
1720 | { | 2274 | { |
1721 | int hsegs, vsegs; | 2275 | int hsegs, vsegs, packet_size; |
2276 | struct usb_host_interface *alt; | ||
2277 | struct usb_interface *intf; | ||
2278 | |||
2279 | intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface); | ||
2280 | alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); | ||
2281 | if (!alt) { | ||
2282 | PDEBUG(D_ERR, "Couldn't get altsetting"); | ||
2283 | return -EIO; | ||
2284 | } | ||
2285 | |||
2286 | packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); | ||
2287 | ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2); | ||
1722 | 2288 | ||
1723 | /******** Set the mode ********/ | 2289 | /******** Set the mode ********/ |
1724 | 2290 | ||
@@ -1755,20 +2321,30 @@ static int ov518_mode_init_regs(struct sd *sd) | |||
1755 | /* Windows driver does this here; who knows why */ | 2321 | /* Windows driver does this here; who knows why */ |
1756 | reg_w(sd, 0x2f, 0x80); | 2322 | reg_w(sd, 0x2f, 0x80); |
1757 | 2323 | ||
1758 | /******** Set the framerate (to 30 FPS) ********/ | 2324 | /******** Set the framerate ********/ |
1759 | if (sd->bridge == BRIDGE_OV518PLUS) | 2325 | sd->clockdiv = 1; |
1760 | sd->clockdiv = 1; | ||
1761 | else | ||
1762 | sd->clockdiv = 0; | ||
1763 | 2326 | ||
1764 | /* Mode independent, but framerate dependent, regs */ | 2327 | /* Mode independent, but framerate dependent, regs */ |
1765 | reg_w(sd, 0x51, 0x04); /* Clock divider; lower==faster */ | 2328 | /* 0x51: Clock divider; Only works on some cams which use 2 crystals */ |
2329 | reg_w(sd, 0x51, 0x04); | ||
1766 | reg_w(sd, 0x22, 0x18); | 2330 | reg_w(sd, 0x22, 0x18); |
1767 | reg_w(sd, 0x23, 0xff); | 2331 | reg_w(sd, 0x23, 0xff); |
1768 | 2332 | ||
1769 | if (sd->bridge == BRIDGE_OV518PLUS) | 2333 | if (sd->bridge == BRIDGE_OV518PLUS) { |
1770 | reg_w(sd, 0x21, 0x19); | 2334 | switch (sd->sensor) { |
1771 | else | 2335 | case SEN_OV7620: |
2336 | if (sd->gspca_dev.width == 320) { | ||
2337 | reg_w(sd, 0x20, 0x00); | ||
2338 | reg_w(sd, 0x21, 0x19); | ||
2339 | } else { | ||
2340 | reg_w(sd, 0x20, 0x60); | ||
2341 | reg_w(sd, 0x21, 0x1f); | ||
2342 | } | ||
2343 | break; | ||
2344 | default: | ||
2345 | reg_w(sd, 0x21, 0x19); | ||
2346 | } | ||
2347 | } else | ||
1772 | reg_w(sd, 0x71, 0x17); /* Compression-related? */ | 2348 | reg_w(sd, 0x71, 0x17); /* Compression-related? */ |
1773 | 2349 | ||
1774 | /* FIXME: Sensor-specific */ | 2350 | /* FIXME: Sensor-specific */ |
@@ -1879,7 +2455,11 @@ static int ov519_mode_init_regs(struct sd *sd) | |||
1879 | 2455 | ||
1880 | reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.width >> 4); | 2456 | reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.width >> 4); |
1881 | reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.height >> 3); | 2457 | reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.height >> 3); |
1882 | reg_w(sd, OV519_R12_X_OFFSETL, 0x00); | 2458 | if (sd->sensor == SEN_OV7670 && |
2459 | sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv) | ||
2460 | reg_w(sd, OV519_R12_X_OFFSETL, 0x04); | ||
2461 | else | ||
2462 | reg_w(sd, OV519_R12_X_OFFSETL, 0x00); | ||
1883 | reg_w(sd, OV519_R13_X_OFFSETH, 0x00); | 2463 | reg_w(sd, OV519_R13_X_OFFSETH, 0x00); |
1884 | reg_w(sd, OV519_R14_Y_OFFSETL, 0x00); | 2464 | reg_w(sd, OV519_R14_Y_OFFSETL, 0x00); |
1885 | reg_w(sd, OV519_R15_Y_OFFSETH, 0x00); | 2465 | reg_w(sd, OV519_R15_Y_OFFSETH, 0x00); |
@@ -1971,7 +2551,7 @@ static int mode_init_ov_sensor_regs(struct sd *sd) | |||
1971 | int qvga; | 2551 | int qvga; |
1972 | 2552 | ||
1973 | gspca_dev = &sd->gspca_dev; | 2553 | gspca_dev = &sd->gspca_dev; |
1974 | qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; | 2554 | qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 1; |
1975 | 2555 | ||
1976 | /******** Mode (VGA/QVGA) and sensor specific regs ********/ | 2556 | /******** Mode (VGA/QVGA) and sensor specific regs ********/ |
1977 | switch (sd->sensor) { | 2557 | switch (sd->sensor) { |
@@ -1983,21 +2563,16 @@ static int mode_init_ov_sensor_regs(struct sd *sd) | |||
1983 | i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); | 2563 | i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); |
1984 | break; | 2564 | break; |
1985 | case SEN_OV7620: | 2565 | case SEN_OV7620: |
1986 | /* i2c_w(sd, 0x2b, 0x00); */ | 2566 | case SEN_OV76BE: |
1987 | i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); | 2567 | i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); |
1988 | i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20); | 2568 | i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20); |
1989 | i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); | 2569 | i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); |
1990 | i2c_w(sd, 0x25, qvga ? 0x30 : 0x60); | 2570 | i2c_w(sd, 0x25, qvga ? 0x30 : 0x60); |
1991 | i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40); | 2571 | i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40); |
1992 | i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0); | 2572 | i2c_w_mask(sd, 0x67, qvga ? 0xb0 : 0x90, 0xf0); |
1993 | i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20); | 2573 | i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20); |
1994 | break; | 2574 | break; |
1995 | case SEN_OV76BE: | ||
1996 | /* i2c_w(sd, 0x2b, 0x00); */ | ||
1997 | i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); | ||
1998 | break; | ||
1999 | case SEN_OV7640: | 2575 | case SEN_OV7640: |
2000 | /* i2c_w(sd, 0x2b, 0x00); */ | ||
2001 | i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); | 2576 | i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); |
2002 | i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20); | 2577 | i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20); |
2003 | /* i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); */ | 2578 | /* i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); */ |
@@ -2016,6 +2591,7 @@ static int mode_init_ov_sensor_regs(struct sd *sd) | |||
2016 | break; | 2591 | break; |
2017 | case SEN_OV6620: | 2592 | case SEN_OV6620: |
2018 | case SEN_OV6630: | 2593 | case SEN_OV6630: |
2594 | case SEN_OV66308AF: | ||
2019 | i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); | 2595 | i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); |
2020 | break; | 2596 | break; |
2021 | default: | 2597 | default: |
@@ -2023,10 +2599,6 @@ static int mode_init_ov_sensor_regs(struct sd *sd) | |||
2023 | } | 2599 | } |
2024 | 2600 | ||
2025 | /******** Palette-specific regs ********/ | 2601 | /******** Palette-specific regs ********/ |
2026 | if (sd->sensor == SEN_OV7610 || sd->sensor == SEN_OV76BE) { | ||
2027 | /* not valid on the OV6620/OV7620/6630? */ | ||
2028 | i2c_w_mask(sd, 0x0e, 0x00, 0x40); | ||
2029 | } | ||
2030 | 2602 | ||
2031 | /* The OV518 needs special treatment. Although both the OV518 | 2603 | /* The OV518 needs special treatment. Although both the OV518 |
2032 | * and the OV6630 support a 16-bit video bus, only the 8 bit Y | 2604 | * and the OV6630 support a 16-bit video bus, only the 8 bit Y |
@@ -2036,25 +2608,12 @@ static int mode_init_ov_sensor_regs(struct sd *sd) | |||
2036 | 2608 | ||
2037 | /* OV7640 is 8-bit only */ | 2609 | /* OV7640 is 8-bit only */ |
2038 | 2610 | ||
2039 | if (sd->sensor != SEN_OV6630 && sd->sensor != SEN_OV7640) | 2611 | if (sd->sensor != SEN_OV6630 && sd->sensor != SEN_OV66308AF && |
2612 | sd->sensor != SEN_OV7640) | ||
2040 | i2c_w_mask(sd, 0x13, 0x00, 0x20); | 2613 | i2c_w_mask(sd, 0x13, 0x00, 0x20); |
2041 | 2614 | ||
2042 | /******** Clock programming ********/ | 2615 | /******** Clock programming ********/ |
2043 | /* The OV6620 needs special handling. This prevents the | 2616 | i2c_w(sd, 0x11, sd->clockdiv); |
2044 | * severe banding that normally occurs */ | ||
2045 | if (sd->sensor == SEN_OV6620) { | ||
2046 | |||
2047 | /* Clock down */ | ||
2048 | i2c_w(sd, 0x2a, 0x04); | ||
2049 | i2c_w(sd, 0x11, sd->clockdiv); | ||
2050 | i2c_w(sd, 0x2a, 0x84); | ||
2051 | /* This next setting is critical. It seems to improve | ||
2052 | * the gain or the contrast. The "reserved" bits seem | ||
2053 | * to have some effect in this case. */ | ||
2054 | i2c_w(sd, 0x2d, 0x85); | ||
2055 | } else { | ||
2056 | i2c_w(sd, 0x11, sd->clockdiv); | ||
2057 | } | ||
2058 | 2617 | ||
2059 | /******** Special Features ********/ | 2618 | /******** Special Features ********/ |
2060 | /* no evidence this is possible with OV7670, either */ | 2619 | /* no evidence this is possible with OV7670, either */ |
@@ -2098,13 +2657,14 @@ static void sethvflip(struct sd *sd) | |||
2098 | static int set_ov_sensor_window(struct sd *sd) | 2657 | static int set_ov_sensor_window(struct sd *sd) |
2099 | { | 2658 | { |
2100 | struct gspca_dev *gspca_dev; | 2659 | struct gspca_dev *gspca_dev; |
2101 | int qvga; | 2660 | int qvga, crop; |
2102 | int hwsbase, hwebase, vwsbase, vwebase, hwscale, vwscale; | 2661 | int hwsbase, hwebase, vwsbase, vwebase, hwscale, vwscale; |
2103 | int ret, hstart, hstop, vstop, vstart; | 2662 | int ret, hstart, hstop, vstop, vstart; |
2104 | __u8 v; | 2663 | __u8 v; |
2105 | 2664 | ||
2106 | gspca_dev = &sd->gspca_dev; | 2665 | gspca_dev = &sd->gspca_dev; |
2107 | qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; | 2666 | qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 1; |
2667 | crop = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 2; | ||
2108 | 2668 | ||
2109 | /* The different sensor ICs handle setting up of window differently. | 2669 | /* The different sensor ICs handle setting up of window differently. |
2110 | * IF YOU SET IT WRONG, YOU WILL GET ALL ZERO ISOC DATA FROM OV51x!! */ | 2670 | * IF YOU SET IT WRONG, YOU WILL GET ALL ZERO ISOC DATA FROM OV51x!! */ |
@@ -2123,14 +2683,19 @@ static int set_ov_sensor_window(struct sd *sd) | |||
2123 | break; | 2683 | break; |
2124 | case SEN_OV6620: | 2684 | case SEN_OV6620: |
2125 | case SEN_OV6630: | 2685 | case SEN_OV6630: |
2686 | case SEN_OV66308AF: | ||
2126 | hwsbase = 0x38; | 2687 | hwsbase = 0x38; |
2127 | hwebase = 0x3a; | 2688 | hwebase = 0x3a; |
2128 | vwsbase = 0x05; | 2689 | vwsbase = 0x05; |
2129 | vwebase = 0x06; | 2690 | vwebase = 0x06; |
2130 | if (qvga) { | 2691 | if (sd->sensor == SEN_OV66308AF && qvga) |
2131 | /* HDG: this fixes U and V getting swapped */ | 2692 | /* HDG: this fixes U and V getting swapped */ |
2132 | hwsbase--; | 2693 | hwsbase++; |
2133 | vwsbase--; | 2694 | if (crop) { |
2695 | hwsbase += 8; | ||
2696 | hwebase += 8; | ||
2697 | vwsbase += 11; | ||
2698 | vwebase += 11; | ||
2134 | } | 2699 | } |
2135 | break; | 2700 | break; |
2136 | case SEN_OV7620: | 2701 | case SEN_OV7620: |
@@ -2155,6 +2720,7 @@ static int set_ov_sensor_window(struct sd *sd) | |||
2155 | switch (sd->sensor) { | 2720 | switch (sd->sensor) { |
2156 | case SEN_OV6620: | 2721 | case SEN_OV6620: |
2157 | case SEN_OV6630: | 2722 | case SEN_OV6630: |
2723 | case SEN_OV66308AF: | ||
2158 | if (qvga) { /* QCIF */ | 2724 | if (qvga) { /* QCIF */ |
2159 | hwscale = 0; | 2725 | hwscale = 0; |
2160 | vwscale = 0; | 2726 | vwscale = 0; |
@@ -2207,7 +2773,7 @@ static int set_ov_sensor_window(struct sd *sd) | |||
2207 | if (qvga) { /* QVGA from ov7670.c by | 2773 | if (qvga) { /* QVGA from ov7670.c by |
2208 | * Jonathan Corbet */ | 2774 | * Jonathan Corbet */ |
2209 | hstart = 164; | 2775 | hstart = 164; |
2210 | hstop = 20; | 2776 | hstop = 28; |
2211 | vstart = 14; | 2777 | vstart = 14; |
2212 | vstop = 494; | 2778 | vstop = 494; |
2213 | } else { /* VGA */ | 2779 | } else { /* VGA */ |
@@ -2233,7 +2799,6 @@ static int set_ov_sensor_window(struct sd *sd) | |||
2233 | msleep(10); /* need to sleep between read and write to | 2799 | msleep(10); /* need to sleep between read and write to |
2234 | * same reg! */ | 2800 | * same reg! */ |
2235 | i2c_w(sd, OV7670_REG_VREF, v); | 2801 | i2c_w(sd, OV7670_REG_VREF, v); |
2236 | sethvflip(sd); | ||
2237 | } else { | 2802 | } else { |
2238 | i2c_w(sd, 0x17, hwsbase); | 2803 | i2c_w(sd, 0x17, hwsbase); |
2239 | i2c_w(sd, 0x18, hwebase + (sd->gspca_dev.width >> hwscale)); | 2804 | i2c_w(sd, 0x18, hwebase + (sd->gspca_dev.width >> hwscale)); |
@@ -2250,6 +2815,10 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
2250 | int ret = 0; | 2815 | int ret = 0; |
2251 | 2816 | ||
2252 | switch (sd->bridge) { | 2817 | switch (sd->bridge) { |
2818 | case BRIDGE_OV511: | ||
2819 | case BRIDGE_OV511PLUS: | ||
2820 | ret = ov511_mode_init_regs(sd); | ||
2821 | break; | ||
2253 | case BRIDGE_OV518: | 2822 | case BRIDGE_OV518: |
2254 | case BRIDGE_OV518PLUS: | 2823 | case BRIDGE_OV518PLUS: |
2255 | ret = ov518_mode_init_regs(sd); | 2824 | ret = ov518_mode_init_regs(sd); |
@@ -2268,6 +2837,9 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
2268 | setcontrast(gspca_dev); | 2837 | setcontrast(gspca_dev); |
2269 | setbrightness(gspca_dev); | 2838 | setbrightness(gspca_dev); |
2270 | setcolors(gspca_dev); | 2839 | setcolors(gspca_dev); |
2840 | sethvflip(sd); | ||
2841 | setautobrightness(sd); | ||
2842 | setfreq(sd); | ||
2271 | 2843 | ||
2272 | ret = ov51x_restart(sd); | 2844 | ret = ov51x_restart(sd); |
2273 | if (ret < 0) | 2845 | if (ret < 0) |
@@ -2287,23 +2859,88 @@ static void sd_stopN(struct gspca_dev *gspca_dev) | |||
2287 | ov51x_led_control(sd, 0); | 2859 | ov51x_led_control(sd, 0); |
2288 | } | 2860 | } |
2289 | 2861 | ||
2290 | static void ov518_pkt_scan(struct gspca_dev *gspca_dev, | 2862 | static void ov511_pkt_scan(struct gspca_dev *gspca_dev, |
2291 | struct gspca_frame *frame, /* target */ | 2863 | struct gspca_frame *frame, /* target */ |
2292 | __u8 *data, /* isoc packet */ | 2864 | __u8 *in, /* isoc packet */ |
2293 | int len) /* iso packet length */ | 2865 | int len) /* iso packet length */ |
2294 | { | 2866 | { |
2295 | PDEBUG(D_STREAM, "ov518_pkt_scan: %d bytes", len); | 2867 | struct sd *sd = (struct sd *) gspca_dev; |
2296 | 2868 | ||
2297 | if (len & 7) { | 2869 | /* SOF/EOF packets have 1st to 8th bytes zeroed and the 9th |
2298 | len--; | 2870 | * byte non-zero. The EOF packet has image width/height in the |
2299 | PDEBUG(D_STREAM, "packet number: %d\n", (int)data[len]); | 2871 | * 10th and 11th bytes. The 9th byte is given as follows: |
2872 | * | ||
2873 | * bit 7: EOF | ||
2874 | * 6: compression enabled | ||
2875 | * 5: 422/420/400 modes | ||
2876 | * 4: 422/420/400 modes | ||
2877 | * 3: 1 | ||
2878 | * 2: snapshot button on | ||
2879 | * 1: snapshot frame | ||
2880 | * 0: even/odd field | ||
2881 | */ | ||
2882 | if (!(in[0] | in[1] | in[2] | in[3] | in[4] | in[5] | in[6] | in[7]) && | ||
2883 | (in[8] & 0x08)) { | ||
2884 | if (in[8] & 0x80) { | ||
2885 | /* Frame end */ | ||
2886 | if ((in[9] + 1) * 8 != gspca_dev->width || | ||
2887 | (in[10] + 1) * 8 != gspca_dev->height) { | ||
2888 | PDEBUG(D_ERR, "Invalid frame size, got: %dx%d," | ||
2889 | " requested: %dx%d\n", | ||
2890 | (in[9] + 1) * 8, (in[10] + 1) * 8, | ||
2891 | gspca_dev->width, gspca_dev->height); | ||
2892 | gspca_dev->last_packet_type = DISCARD_PACKET; | ||
2893 | return; | ||
2894 | } | ||
2895 | /* Add 11 byte footer to frame, might be usefull */ | ||
2896 | gspca_frame_add(gspca_dev, LAST_PACKET, frame, in, 11); | ||
2897 | return; | ||
2898 | } else { | ||
2899 | /* Frame start */ | ||
2900 | gspca_frame_add(gspca_dev, FIRST_PACKET, frame, in, 0); | ||
2901 | sd->packet_nr = 0; | ||
2902 | } | ||
2300 | } | 2903 | } |
2301 | 2904 | ||
2905 | /* Ignore the packet number */ | ||
2906 | len--; | ||
2907 | |||
2908 | /* intermediate packet */ | ||
2909 | gspca_frame_add(gspca_dev, INTER_PACKET, frame, in, len); | ||
2910 | } | ||
2911 | |||
2912 | static void ov518_pkt_scan(struct gspca_dev *gspca_dev, | ||
2913 | struct gspca_frame *frame, /* target */ | ||
2914 | __u8 *data, /* isoc packet */ | ||
2915 | int len) /* iso packet length */ | ||
2916 | { | ||
2917 | struct sd *sd = (struct sd *) gspca_dev; | ||
2918 | |||
2302 | /* A false positive here is likely, until OVT gives me | 2919 | /* A false positive here is likely, until OVT gives me |
2303 | * the definitive SOF/EOF format */ | 2920 | * the definitive SOF/EOF format */ |
2304 | if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) { | 2921 | if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) { |
2305 | gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0); | 2922 | gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0); |
2306 | gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, 0); | 2923 | gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, 0); |
2924 | sd->packet_nr = 0; | ||
2925 | } | ||
2926 | |||
2927 | if (gspca_dev->last_packet_type == DISCARD_PACKET) | ||
2928 | return; | ||
2929 | |||
2930 | /* Does this device use packet numbers ? */ | ||
2931 | if (len & 7) { | ||
2932 | len--; | ||
2933 | if (sd->packet_nr == data[len]) | ||
2934 | sd->packet_nr++; | ||
2935 | /* The last few packets of the frame (which are all 0's | ||
2936 | except that they may contain part of the footer), are | ||
2937 | numbered 0 */ | ||
2938 | else if (sd->packet_nr == 0 || data[len]) { | ||
2939 | PDEBUG(D_ERR, "Invalid packet nr: %d (expect: %d)", | ||
2940 | (int)data[len], (int)sd->packet_nr); | ||
2941 | gspca_dev->last_packet_type = DISCARD_PACKET; | ||
2942 | return; | ||
2943 | } | ||
2307 | } | 2944 | } |
2308 | 2945 | ||
2309 | /* intermediate packet */ | 2946 | /* intermediate packet */ |
@@ -2364,6 +3001,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, | |||
2364 | switch (sd->bridge) { | 3001 | switch (sd->bridge) { |
2365 | case BRIDGE_OV511: | 3002 | case BRIDGE_OV511: |
2366 | case BRIDGE_OV511PLUS: | 3003 | case BRIDGE_OV511PLUS: |
3004 | ov511_pkt_scan(gspca_dev, frame, data, len); | ||
2367 | break; | 3005 | break; |
2368 | case BRIDGE_OV518: | 3006 | case BRIDGE_OV518: |
2369 | case BRIDGE_OV518PLUS: | 3007 | case BRIDGE_OV518PLUS: |
@@ -2389,13 +3027,13 @@ static void setbrightness(struct gspca_dev *gspca_dev) | |||
2389 | case SEN_OV76BE: | 3027 | case SEN_OV76BE: |
2390 | case SEN_OV6620: | 3028 | case SEN_OV6620: |
2391 | case SEN_OV6630: | 3029 | case SEN_OV6630: |
3030 | case SEN_OV66308AF: | ||
2392 | case SEN_OV7640: | 3031 | case SEN_OV7640: |
2393 | i2c_w(sd, OV7610_REG_BRT, val); | 3032 | i2c_w(sd, OV7610_REG_BRT, val); |
2394 | break; | 3033 | break; |
2395 | case SEN_OV7620: | 3034 | case SEN_OV7620: |
2396 | /* 7620 doesn't like manual changes when in auto mode */ | 3035 | /* 7620 doesn't like manual changes when in auto mode */ |
2397 | /*fixme | 3036 | if (!sd->autobrightness) |
2398 | * if (!sd->auto_brt) */ | ||
2399 | i2c_w(sd, OV7610_REG_BRT, val); | 3037 | i2c_w(sd, OV7610_REG_BRT, val); |
2400 | break; | 3038 | break; |
2401 | case SEN_OV7670: | 3039 | case SEN_OV7670: |
@@ -2418,6 +3056,7 @@ static void setcontrast(struct gspca_dev *gspca_dev) | |||
2418 | i2c_w(sd, OV7610_REG_CNT, val); | 3056 | i2c_w(sd, OV7610_REG_CNT, val); |
2419 | break; | 3057 | break; |
2420 | case SEN_OV6630: | 3058 | case SEN_OV6630: |
3059 | case SEN_OV66308AF: | ||
2421 | i2c_w_mask(sd, OV7610_REG_CNT, val >> 4, 0x0f); | 3060 | i2c_w_mask(sd, OV7610_REG_CNT, val >> 4, 0x0f); |
2422 | break; | 3061 | break; |
2423 | case SEN_OV8610: { | 3062 | case SEN_OV8610: { |
@@ -2462,6 +3101,7 @@ static void setcolors(struct gspca_dev *gspca_dev) | |||
2462 | case SEN_OV76BE: | 3101 | case SEN_OV76BE: |
2463 | case SEN_OV6620: | 3102 | case SEN_OV6620: |
2464 | case SEN_OV6630: | 3103 | case SEN_OV6630: |
3104 | case SEN_OV66308AF: | ||
2465 | i2c_w(sd, OV7610_REG_SAT, val); | 3105 | i2c_w(sd, OV7610_REG_SAT, val); |
2466 | break; | 3106 | break; |
2467 | case SEN_OV7620: | 3107 | case SEN_OV7620: |
@@ -2482,6 +3122,72 @@ static void setcolors(struct gspca_dev *gspca_dev) | |||
2482 | } | 3122 | } |
2483 | } | 3123 | } |
2484 | 3124 | ||
3125 | static void setautobrightness(struct sd *sd) | ||
3126 | { | ||
3127 | if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670) | ||
3128 | return; | ||
3129 | |||
3130 | i2c_w_mask(sd, 0x2d, sd->autobrightness ? 0x10 : 0x00, 0x10); | ||
3131 | } | ||
3132 | |||
3133 | static void setfreq(struct sd *sd) | ||
3134 | { | ||
3135 | if (sd->sensor == SEN_OV7670) { | ||
3136 | switch (sd->freq) { | ||
3137 | case 0: /* Banding filter disabled */ | ||
3138 | i2c_w_mask(sd, OV7670_REG_COM8, 0, OV7670_COM8_BFILT); | ||
3139 | break; | ||
3140 | case 1: /* 50 hz */ | ||
3141 | i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT, | ||
3142 | OV7670_COM8_BFILT); | ||
3143 | i2c_w_mask(sd, OV7670_REG_COM11, 0x08, 0x18); | ||
3144 | break; | ||
3145 | case 2: /* 60 hz */ | ||
3146 | i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT, | ||
3147 | OV7670_COM8_BFILT); | ||
3148 | i2c_w_mask(sd, OV7670_REG_COM11, 0x00, 0x18); | ||
3149 | break; | ||
3150 | case 3: /* Auto hz */ | ||
3151 | i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT, | ||
3152 | OV7670_COM8_BFILT); | ||
3153 | i2c_w_mask(sd, OV7670_REG_COM11, OV7670_COM11_HZAUTO, | ||
3154 | 0x18); | ||
3155 | break; | ||
3156 | } | ||
3157 | } else { | ||
3158 | switch (sd->freq) { | ||
3159 | case 0: /* Banding filter disabled */ | ||
3160 | i2c_w_mask(sd, 0x2d, 0x00, 0x04); | ||
3161 | i2c_w_mask(sd, 0x2a, 0x00, 0x80); | ||
3162 | break; | ||
3163 | case 1: /* 50 hz (filter on and framerate adj) */ | ||
3164 | i2c_w_mask(sd, 0x2d, 0x04, 0x04); | ||
3165 | i2c_w_mask(sd, 0x2a, 0x80, 0x80); | ||
3166 | /* 20 fps -> 16.667 fps */ | ||
3167 | if (sd->sensor == SEN_OV6620 || | ||
3168 | sd->sensor == SEN_OV6630 || | ||
3169 | sd->sensor == SEN_OV66308AF) | ||
3170 | i2c_w(sd, 0x2b, 0x5e); | ||
3171 | else | ||
3172 | i2c_w(sd, 0x2b, 0xac); | ||
3173 | break; | ||
3174 | case 2: /* 60 hz (filter on, ...) */ | ||
3175 | i2c_w_mask(sd, 0x2d, 0x04, 0x04); | ||
3176 | if (sd->sensor == SEN_OV6620 || | ||
3177 | sd->sensor == SEN_OV6630 || | ||
3178 | sd->sensor == SEN_OV66308AF) { | ||
3179 | /* 20 fps -> 15 fps */ | ||
3180 | i2c_w_mask(sd, 0x2a, 0x80, 0x80); | ||
3181 | i2c_w(sd, 0x2b, 0xa8); | ||
3182 | } else { | ||
3183 | /* no framerate adj. */ | ||
3184 | i2c_w_mask(sd, 0x2a, 0x00, 0x80); | ||
3185 | } | ||
3186 | break; | ||
3187 | } | ||
3188 | } | ||
3189 | } | ||
3190 | |||
2485 | static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) | 3191 | static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) |
2486 | { | 3192 | { |
2487 | struct sd *sd = (struct sd *) gspca_dev; | 3193 | struct sd *sd = (struct sd *) gspca_dev; |
@@ -2572,6 +3278,71 @@ static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val) | |||
2572 | return 0; | 3278 | return 0; |
2573 | } | 3279 | } |
2574 | 3280 | ||
3281 | static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val) | ||
3282 | { | ||
3283 | struct sd *sd = (struct sd *) gspca_dev; | ||
3284 | |||
3285 | sd->autobrightness = val; | ||
3286 | if (gspca_dev->streaming) | ||
3287 | setautobrightness(sd); | ||
3288 | return 0; | ||
3289 | } | ||
3290 | |||
3291 | static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val) | ||
3292 | { | ||
3293 | struct sd *sd = (struct sd *) gspca_dev; | ||
3294 | |||
3295 | *val = sd->autobrightness; | ||
3296 | return 0; | ||
3297 | } | ||
3298 | |||
3299 | static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val) | ||
3300 | { | ||
3301 | struct sd *sd = (struct sd *) gspca_dev; | ||
3302 | |||
3303 | sd->freq = val; | ||
3304 | if (gspca_dev->streaming) | ||
3305 | setfreq(sd); | ||
3306 | return 0; | ||
3307 | } | ||
3308 | |||
3309 | static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val) | ||
3310 | { | ||
3311 | struct sd *sd = (struct sd *) gspca_dev; | ||
3312 | |||
3313 | *val = sd->freq; | ||
3314 | return 0; | ||
3315 | } | ||
3316 | |||
3317 | static int sd_querymenu(struct gspca_dev *gspca_dev, | ||
3318 | struct v4l2_querymenu *menu) | ||
3319 | { | ||
3320 | struct sd *sd = (struct sd *) gspca_dev; | ||
3321 | |||
3322 | switch (menu->id) { | ||
3323 | case V4L2_CID_POWER_LINE_FREQUENCY: | ||
3324 | switch (menu->index) { | ||
3325 | case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */ | ||
3326 | strcpy((char *) menu->name, "NoFliker"); | ||
3327 | return 0; | ||
3328 | case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */ | ||
3329 | strcpy((char *) menu->name, "50 Hz"); | ||
3330 | return 0; | ||
3331 | case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */ | ||
3332 | strcpy((char *) menu->name, "60 Hz"); | ||
3333 | return 0; | ||
3334 | case 3: | ||
3335 | if (sd->sensor != SEN_OV7670) | ||
3336 | return -EINVAL; | ||
3337 | |||
3338 | strcpy((char *) menu->name, "Automatic"); | ||
3339 | return 0; | ||
3340 | } | ||
3341 | break; | ||
3342 | } | ||
3343 | return -EINVAL; | ||
3344 | } | ||
3345 | |||
2575 | /* sub-driver description */ | 3346 | /* sub-driver description */ |
2576 | static const struct sd_desc sd_desc = { | 3347 | static const struct sd_desc sd_desc = { |
2577 | .name = MODULE_NAME, | 3348 | .name = MODULE_NAME, |
@@ -2582,6 +3353,7 @@ static const struct sd_desc sd_desc = { | |||
2582 | .start = sd_start, | 3353 | .start = sd_start, |
2583 | .stopN = sd_stopN, | 3354 | .stopN = sd_stopN, |
2584 | .pkt_scan = sd_pkt_scan, | 3355 | .pkt_scan = sd_pkt_scan, |
3356 | .querymenu = sd_querymenu, | ||
2585 | }; | 3357 | }; |
2586 | 3358 | ||
2587 | /* -- module initialisation -- */ | 3359 | /* -- module initialisation -- */ |
@@ -2590,17 +3362,22 @@ static const __devinitdata struct usb_device_id device_table[] = { | |||
2590 | {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 }, | 3362 | {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 }, |
2591 | {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 }, | 3363 | {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 }, |
2592 | {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, | 3364 | {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, |
2593 | {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 }, | 3365 | {USB_DEVICE(0x041e, 0x4064), |
2594 | {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 }, | 3366 | .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, |
3367 | {USB_DEVICE(0x041e, 0x4068), | ||
3368 | .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, | ||
2595 | {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 }, | 3369 | {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 }, |
2596 | {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 }, | 3370 | {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 }, |
2597 | {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 }, | 3371 | {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 }, |
3372 | {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 }, | ||
2598 | {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 }, | 3373 | {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 }, |
2599 | {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 }, | 3374 | {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 }, |
2600 | {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 }, | 3375 | {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 }, |
2601 | {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 }, | 3376 | {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 }, |
2602 | {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 }, | 3377 | {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 }, |
3378 | {USB_DEVICE(0x05a9, 0xa511), .driver_info = BRIDGE_OV511PLUS }, | ||
2603 | {USB_DEVICE(0x05a9, 0xa518), .driver_info = BRIDGE_OV518PLUS }, | 3379 | {USB_DEVICE(0x05a9, 0xa518), .driver_info = BRIDGE_OV518PLUS }, |
3380 | {USB_DEVICE(0x0813, 0x0002), .driver_info = BRIDGE_OV511PLUS }, | ||
2604 | {} | 3381 | {} |
2605 | }; | 3382 | }; |
2606 | 3383 | ||
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c index dc6a6f11354a..0d02f41fa7d0 100644 --- a/drivers/media/video/gspca/sonixj.c +++ b/drivers/media/video/gspca/sonixj.c | |||
@@ -46,6 +46,7 @@ struct sd { | |||
46 | u8 gamma; | 46 | u8 gamma; |
47 | u8 vflip; /* ov7630/ov7648 only */ | 47 | u8 vflip; /* ov7630/ov7648 only */ |
48 | u8 infrared; /* mt9v111 only */ | 48 | u8 infrared; /* mt9v111 only */ |
49 | u8 freq; /* ov76xx only */ | ||
49 | u8 quality; /* image quality */ | 50 | u8 quality; /* image quality */ |
50 | #define QUALITY_MIN 60 | 51 | #define QUALITY_MIN 60 |
51 | #define QUALITY_MAX 95 | 52 | #define QUALITY_MAX 95 |
@@ -96,8 +97,11 @@ static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val); | |||
96 | static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val); | 97 | static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val); |
97 | static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val); | 98 | static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val); |
98 | static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val); | 99 | static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val); |
100 | static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val); | ||
101 | static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); | ||
99 | 102 | ||
100 | static struct ctrl sd_ctrls[] = { | 103 | static struct ctrl sd_ctrls[] = { |
104 | #define BRIGHTNESS_IDX 0 | ||
101 | { | 105 | { |
102 | { | 106 | { |
103 | .id = V4L2_CID_BRIGHTNESS, | 107 | .id = V4L2_CID_BRIGHTNESS, |
@@ -113,6 +117,7 @@ static struct ctrl sd_ctrls[] = { | |||
113 | .set = sd_setbrightness, | 117 | .set = sd_setbrightness, |
114 | .get = sd_getbrightness, | 118 | .get = sd_getbrightness, |
115 | }, | 119 | }, |
120 | #define CONTRAST_IDX 1 | ||
116 | { | 121 | { |
117 | { | 122 | { |
118 | .id = V4L2_CID_CONTRAST, | 123 | .id = V4L2_CID_CONTRAST, |
@@ -128,20 +133,22 @@ static struct ctrl sd_ctrls[] = { | |||
128 | .set = sd_setcontrast, | 133 | .set = sd_setcontrast, |
129 | .get = sd_getcontrast, | 134 | .get = sd_getcontrast, |
130 | }, | 135 | }, |
136 | #define COLOR_IDX 2 | ||
131 | { | 137 | { |
132 | { | 138 | { |
133 | .id = V4L2_CID_SATURATION, | 139 | .id = V4L2_CID_SATURATION, |
134 | .type = V4L2_CTRL_TYPE_INTEGER, | 140 | .type = V4L2_CTRL_TYPE_INTEGER, |
135 | .name = "Color", | 141 | .name = "Saturation", |
136 | .minimum = 0, | 142 | .minimum = 0, |
137 | .maximum = 40, | 143 | .maximum = 40, |
138 | .step = 1, | 144 | .step = 1, |
139 | #define COLOR_DEF 32 | 145 | #define COLOR_DEF 25 |
140 | .default_value = COLOR_DEF, | 146 | .default_value = COLOR_DEF, |
141 | }, | 147 | }, |
142 | .set = sd_setcolors, | 148 | .set = sd_setcolors, |
143 | .get = sd_getcolors, | 149 | .get = sd_getcolors, |
144 | }, | 150 | }, |
151 | #define BLUE_BALANCE_IDX 3 | ||
145 | { | 152 | { |
146 | { | 153 | { |
147 | .id = V4L2_CID_BLUE_BALANCE, | 154 | .id = V4L2_CID_BLUE_BALANCE, |
@@ -156,6 +163,7 @@ static struct ctrl sd_ctrls[] = { | |||
156 | .set = sd_setblue_balance, | 163 | .set = sd_setblue_balance, |
157 | .get = sd_getblue_balance, | 164 | .get = sd_getblue_balance, |
158 | }, | 165 | }, |
166 | #define RED_BALANCE_IDX 4 | ||
159 | { | 167 | { |
160 | { | 168 | { |
161 | .id = V4L2_CID_RED_BALANCE, | 169 | .id = V4L2_CID_RED_BALANCE, |
@@ -170,6 +178,7 @@ static struct ctrl sd_ctrls[] = { | |||
170 | .set = sd_setred_balance, | 178 | .set = sd_setred_balance, |
171 | .get = sd_getred_balance, | 179 | .get = sd_getred_balance, |
172 | }, | 180 | }, |
181 | #define GAMMA_IDX 5 | ||
173 | { | 182 | { |
174 | { | 183 | { |
175 | .id = V4L2_CID_GAMMA, | 184 | .id = V4L2_CID_GAMMA, |
@@ -184,7 +193,7 @@ static struct ctrl sd_ctrls[] = { | |||
184 | .set = sd_setgamma, | 193 | .set = sd_setgamma, |
185 | .get = sd_getgamma, | 194 | .get = sd_getgamma, |
186 | }, | 195 | }, |
187 | #define AUTOGAIN_IDX 5 | 196 | #define AUTOGAIN_IDX 6 |
188 | { | 197 | { |
189 | { | 198 | { |
190 | .id = V4L2_CID_AUTOGAIN, | 199 | .id = V4L2_CID_AUTOGAIN, |
@@ -200,7 +209,7 @@ static struct ctrl sd_ctrls[] = { | |||
200 | .get = sd_getautogain, | 209 | .get = sd_getautogain, |
201 | }, | 210 | }, |
202 | /* ov7630/ov7648 only */ | 211 | /* ov7630/ov7648 only */ |
203 | #define VFLIP_IDX 6 | 212 | #define VFLIP_IDX 7 |
204 | { | 213 | { |
205 | { | 214 | { |
206 | .id = V4L2_CID_VFLIP, | 215 | .id = V4L2_CID_VFLIP, |
@@ -209,14 +218,14 @@ static struct ctrl sd_ctrls[] = { | |||
209 | .minimum = 0, | 218 | .minimum = 0, |
210 | .maximum = 1, | 219 | .maximum = 1, |
211 | .step = 1, | 220 | .step = 1, |
212 | #define VFLIP_DEF 0 /* vflip def = 1 for ov7630 */ | 221 | #define VFLIP_DEF 0 |
213 | .default_value = VFLIP_DEF, | 222 | .default_value = VFLIP_DEF, |
214 | }, | 223 | }, |
215 | .set = sd_setvflip, | 224 | .set = sd_setvflip, |
216 | .get = sd_getvflip, | 225 | .get = sd_getvflip, |
217 | }, | 226 | }, |
218 | /* mt9v111 only */ | 227 | /* mt9v111 only */ |
219 | #define INFRARED_IDX 7 | 228 | #define INFRARED_IDX 8 |
220 | { | 229 | { |
221 | { | 230 | { |
222 | .id = V4L2_CID_INFRARED, | 231 | .id = V4L2_CID_INFRARED, |
@@ -231,28 +240,44 @@ static struct ctrl sd_ctrls[] = { | |||
231 | .set = sd_setinfrared, | 240 | .set = sd_setinfrared, |
232 | .get = sd_getinfrared, | 241 | .get = sd_getinfrared, |
233 | }, | 242 | }, |
243 | /* ov7630/ov7648/ov7660 only */ | ||
244 | #define FREQ_IDX 9 | ||
245 | { | ||
246 | { | ||
247 | .id = V4L2_CID_POWER_LINE_FREQUENCY, | ||
248 | .type = V4L2_CTRL_TYPE_MENU, | ||
249 | .name = "Light frequency filter", | ||
250 | .minimum = 0, | ||
251 | .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */ | ||
252 | .step = 1, | ||
253 | #define FREQ_DEF 2 | ||
254 | .default_value = FREQ_DEF, | ||
255 | }, | ||
256 | .set = sd_setfreq, | ||
257 | .get = sd_getfreq, | ||
258 | }, | ||
234 | }; | 259 | }; |
235 | 260 | ||
236 | /* table of the disabled controls */ | 261 | /* table of the disabled controls */ |
237 | static __u32 ctrl_dis[] = { | 262 | static __u32 ctrl_dis[] = { |
238 | (1 << INFRARED_IDX) | (1 << VFLIP_IDX), | 263 | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), |
239 | /* SENSOR_HV7131R 0 */ | 264 | /* SENSOR_HV7131R 0 */ |
240 | (1 << INFRARED_IDX) | (1 << VFLIP_IDX), | 265 | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), |
241 | /* SENSOR_MI0360 1 */ | 266 | /* SENSOR_MI0360 1 */ |
242 | (1 << INFRARED_IDX) | (1 << VFLIP_IDX), | 267 | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), |
243 | /* SENSOR_MO4000 2 */ | 268 | /* SENSOR_MO4000 2 */ |
244 | (1 << VFLIP_IDX), | 269 | (1 << VFLIP_IDX) | (1 << FREQ_IDX), |
245 | /* SENSOR_MT9V111 3 */ | 270 | /* SENSOR_MT9V111 3 */ |
246 | (1 << INFRARED_IDX) | (1 << VFLIP_IDX), | 271 | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), |
247 | /* SENSOR_OM6802 4 */ | 272 | /* SENSOR_OM6802 4 */ |
248 | (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX), | 273 | (1 << INFRARED_IDX), |
249 | /* SENSOR_OV7630 5 */ | 274 | /* SENSOR_OV7630 5 */ |
250 | (1 << INFRARED_IDX), | 275 | (1 << INFRARED_IDX), |
251 | /* SENSOR_OV7648 6 */ | 276 | /* SENSOR_OV7648 6 */ |
252 | (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX), | 277 | (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX), |
253 | /* SENSOR_OV7660 7 */ | 278 | /* SENSOR_OV7660 7 */ |
254 | (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX), | 279 | (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | |
255 | /* SENSOR_SP80708 8 */ | 280 | (1 << FREQ_IDX), /* SENSOR_SP80708 8 */ |
256 | }; | 281 | }; |
257 | 282 | ||
258 | static const struct v4l2_pix_format vga_mode[] = { | 283 | static const struct v4l2_pix_format vga_mode[] = { |
@@ -268,7 +293,8 @@ static const struct v4l2_pix_format vga_mode[] = { | |||
268 | .priv = 1}, | 293 | .priv = 1}, |
269 | {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, | 294 | {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, |
270 | .bytesperline = 640, | 295 | .bytesperline = 640, |
271 | .sizeimage = 640 * 480 * 3 / 8 + 590, | 296 | /* Note 3 / 8 is not large enough, not even 5 / 8 is ?! */ |
297 | .sizeimage = 640 * 480 * 3 / 4 + 590, | ||
272 | .colorspace = V4L2_COLORSPACE_JPEG, | 298 | .colorspace = V4L2_COLORSPACE_JPEG, |
273 | .priv = 0}, | 299 | .priv = 0}, |
274 | }; | 300 | }; |
@@ -604,7 +630,9 @@ static const u8 ov7630_sensor_init[][8] = { | |||
604 | /* win: i2c_r from 00 to 80 */ | 630 | /* win: i2c_r from 00 to 80 */ |
605 | {0xd1, 0x21, 0x03, 0x80, 0x10, 0x20, 0x80, 0x10}, | 631 | {0xd1, 0x21, 0x03, 0x80, 0x10, 0x20, 0x80, 0x10}, |
606 | {0xb1, 0x21, 0x0c, 0x20, 0x20, 0x00, 0x00, 0x10}, | 632 | {0xb1, 0x21, 0x0c, 0x20, 0x20, 0x00, 0x00, 0x10}, |
607 | {0xd1, 0x21, 0x11, 0x00, 0x48, 0xc0, 0x00, 0x10}, | 633 | /* HDG: 0x11 was 0x00 change to 0x01 for better exposure (15 fps instead of 30) |
634 | 0x13 was 0xc0 change to 0xc3 for auto gain and exposure */ | ||
635 | {0xd1, 0x21, 0x11, 0x01, 0x48, 0xc3, 0x00, 0x10}, | ||
608 | {0xb1, 0x21, 0x15, 0x80, 0x03, 0x00, 0x00, 0x10}, | 636 | {0xb1, 0x21, 0x15, 0x80, 0x03, 0x00, 0x00, 0x10}, |
609 | {0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10}, | 637 | {0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10}, |
610 | {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10}, | 638 | {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10}, |
@@ -638,9 +666,8 @@ static const u8 ov7630_sensor_init[][8] = { | |||
638 | {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, | 666 | {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, |
639 | {0xb1, 0x21, 0x01, 0x80, 0x80, 0x00, 0x00, 0x10}, | 667 | {0xb1, 0x21, 0x01, 0x80, 0x80, 0x00, 0x00, 0x10}, |
640 | /* */ | 668 | /* */ |
641 | {0xa1, 0x21, 0x11, 0x00, 0x00, 0x00, 0x00, 0x10}, | 669 | /* {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */ |
642 | {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10}, | 670 | /* {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */ |
643 | {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10}, | ||
644 | /* */ | 671 | /* */ |
645 | {0xa1, 0x21, 0x10, 0x83, 0x00, 0x00, 0x00, 0x10}, | 672 | {0xa1, 0x21, 0x10, 0x83, 0x00, 0x00, 0x00, 0x10}, |
646 | /* {0xb1, 0x21, 0x01, 0x88, 0x70, 0x00, 0x00, 0x10}, */ | 673 | /* {0xb1, 0x21, 0x01, 0x88, 0x70, 0x00, 0x00, 0x10}, */ |
@@ -673,7 +700,7 @@ static const u8 ov7648_sensor_init[][8] = { | |||
673 | {0xd1, 0x21, 0x21, 0x86, 0x00, 0xde, 0xa0, 0x10}, | 700 | {0xd1, 0x21, 0x21, 0x86, 0x00, 0xde, 0xa0, 0x10}, |
674 | /* {0xd1, 0x21, 0x25, 0x80, 0x32, 0xfe, 0xa0, 0x10}, jfm done */ | 701 | /* {0xd1, 0x21, 0x25, 0x80, 0x32, 0xfe, 0xa0, 0x10}, jfm done */ |
675 | /* {0xd1, 0x21, 0x29, 0x00, 0x91, 0x00, 0x88, 0x10}, jfm done */ | 702 | /* {0xd1, 0x21, 0x29, 0x00, 0x91, 0x00, 0x88, 0x10}, jfm done */ |
676 | {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10}, | 703 | /* {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10}, set by setfreq */ |
677 | /*...*/ | 704 | /*...*/ |
678 | /* {0xa1, 0x21, 0x12, 0x08, 0x00, 0x00, 0x00, 0x10}, jfm done */ | 705 | /* {0xa1, 0x21, 0x12, 0x08, 0x00, 0x00, 0x00, 0x10}, jfm done */ |
679 | /* {0xa1, 0x21, 0x75, 0x06, 0x00, 0x00, 0x00, 0x10}, * COMN | 706 | /* {0xa1, 0x21, 0x75, 0x06, 0x00, 0x00, 0x00, 0x10}, * COMN |
@@ -1294,11 +1321,9 @@ static int sd_config(struct gspca_dev *gspca_dev, | |||
1294 | sd->gamma = GAMMA_DEF; | 1321 | sd->gamma = GAMMA_DEF; |
1295 | sd->autogain = AUTOGAIN_DEF; | 1322 | sd->autogain = AUTOGAIN_DEF; |
1296 | sd->ag_cnt = -1; | 1323 | sd->ag_cnt = -1; |
1297 | if (sd->sensor != SENSOR_OV7630) | 1324 | sd->vflip = VFLIP_DEF; |
1298 | sd->vflip = 0; | ||
1299 | else | ||
1300 | sd->vflip = 1; | ||
1301 | sd->infrared = INFRARED_DEF; | 1325 | sd->infrared = INFRARED_DEF; |
1326 | sd->freq = FREQ_DEF; | ||
1302 | sd->quality = QUALITY_DEF; | 1327 | sd->quality = QUALITY_DEF; |
1303 | sd->jpegqual = 80; | 1328 | sd->jpegqual = 80; |
1304 | 1329 | ||
@@ -1569,7 +1594,7 @@ static void setautogain(struct gspca_dev *gspca_dev) | |||
1569 | else | 1594 | else |
1570 | comb = 0xa0; | 1595 | comb = 0xa0; |
1571 | if (sd->autogain) | 1596 | if (sd->autogain) |
1572 | comb |= 0x02; | 1597 | comb |= 0x03; |
1573 | i2c_w1(&sd->gspca_dev, 0x13, comb); | 1598 | i2c_w1(&sd->gspca_dev, 0x13, comb); |
1574 | return; | 1599 | return; |
1575 | } | 1600 | } |
@@ -1585,12 +1610,15 @@ static void setvflip(struct sd *sd) | |||
1585 | { | 1610 | { |
1586 | u8 comn; | 1611 | u8 comn; |
1587 | 1612 | ||
1588 | if (sd->sensor == SENSOR_OV7630) | 1613 | if (sd->sensor == SENSOR_OV7630) { |
1589 | comn = 0x02; | 1614 | comn = 0x02; |
1590 | else | 1615 | if (!sd->vflip) |
1616 | comn |= 0x80; | ||
1617 | } else { | ||
1591 | comn = 0x06; | 1618 | comn = 0x06; |
1592 | if (sd->vflip) | 1619 | if (sd->vflip) |
1593 | comn |= 0x80; | 1620 | comn |= 0x80; |
1621 | } | ||
1594 | i2c_w1(&sd->gspca_dev, 0x75, comn); | 1622 | i2c_w1(&sd->gspca_dev, 0x75, comn); |
1595 | } | 1623 | } |
1596 | 1624 | ||
@@ -1602,6 +1630,58 @@ static void setinfrared(struct sd *sd) | |||
1602 | sd->infrared ? 0x66 : 0x64); | 1630 | sd->infrared ? 0x66 : 0x64); |
1603 | } | 1631 | } |
1604 | 1632 | ||
1633 | static void setfreq(struct gspca_dev *gspca_dev) | ||
1634 | { | ||
1635 | struct sd *sd = (struct sd *) gspca_dev; | ||
1636 | |||
1637 | if (sd->sensor == SENSOR_OV7660) { | ||
1638 | switch (sd->freq) { | ||
1639 | case 0: /* Banding filter disabled */ | ||
1640 | i2c_w1(gspca_dev, 0x13, 0xdf); | ||
1641 | break; | ||
1642 | case 1: /* 50 hz */ | ||
1643 | i2c_w1(gspca_dev, 0x13, 0xff); | ||
1644 | i2c_w1(gspca_dev, 0x3b, 0x0a); | ||
1645 | break; | ||
1646 | case 2: /* 60 hz */ | ||
1647 | i2c_w1(gspca_dev, 0x13, 0xff); | ||
1648 | i2c_w1(gspca_dev, 0x3b, 0x02); | ||
1649 | break; | ||
1650 | } | ||
1651 | } else { | ||
1652 | u8 reg2a = 0, reg2b = 0, reg2d = 0; | ||
1653 | |||
1654 | /* Get reg2a / reg2d base values */ | ||
1655 | switch (sd->sensor) { | ||
1656 | case SENSOR_OV7630: | ||
1657 | reg2a = 0x08; | ||
1658 | reg2d = 0x01; | ||
1659 | break; | ||
1660 | case SENSOR_OV7648: | ||
1661 | reg2a = 0x11; | ||
1662 | reg2d = 0x81; | ||
1663 | break; | ||
1664 | } | ||
1665 | |||
1666 | switch (sd->freq) { | ||
1667 | case 0: /* Banding filter disabled */ | ||
1668 | break; | ||
1669 | case 1: /* 50 hz (filter on and framerate adj) */ | ||
1670 | reg2a |= 0x80; | ||
1671 | reg2b = 0xac; | ||
1672 | reg2d |= 0x04; | ||
1673 | break; | ||
1674 | case 2: /* 60 hz (filter on, no framerate adj) */ | ||
1675 | reg2a |= 0x80; | ||
1676 | reg2d |= 0x04; | ||
1677 | break; | ||
1678 | } | ||
1679 | i2c_w1(gspca_dev, 0x2a, reg2a); | ||
1680 | i2c_w1(gspca_dev, 0x2b, reg2b); | ||
1681 | i2c_w1(gspca_dev, 0x2d, reg2d); | ||
1682 | } | ||
1683 | } | ||
1684 | |||
1605 | static void setjpegqual(struct gspca_dev *gspca_dev) | 1685 | static void setjpegqual(struct gspca_dev *gspca_dev) |
1606 | { | 1686 | { |
1607 | struct sd *sd = (struct sd *) gspca_dev; | 1687 | struct sd *sd = (struct sd *) gspca_dev; |
@@ -1828,6 +1908,7 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
1828 | setbrightness(gspca_dev); | 1908 | setbrightness(gspca_dev); |
1829 | setcontrast(gspca_dev); | 1909 | setcontrast(gspca_dev); |
1830 | setautogain(gspca_dev); | 1910 | setautogain(gspca_dev); |
1911 | setfreq(gspca_dev); | ||
1831 | return 0; | 1912 | return 0; |
1832 | } | 1913 | } |
1833 | 1914 | ||
@@ -2131,6 +2212,24 @@ static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val) | |||
2131 | return 0; | 2212 | return 0; |
2132 | } | 2213 | } |
2133 | 2214 | ||
2215 | static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val) | ||
2216 | { | ||
2217 | struct sd *sd = (struct sd *) gspca_dev; | ||
2218 | |||
2219 | sd->freq = val; | ||
2220 | if (gspca_dev->streaming) | ||
2221 | setfreq(gspca_dev); | ||
2222 | return 0; | ||
2223 | } | ||
2224 | |||
2225 | static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val) | ||
2226 | { | ||
2227 | struct sd *sd = (struct sd *) gspca_dev; | ||
2228 | |||
2229 | *val = sd->freq; | ||
2230 | return 0; | ||
2231 | } | ||
2232 | |||
2134 | static int sd_set_jcomp(struct gspca_dev *gspca_dev, | 2233 | static int sd_set_jcomp(struct gspca_dev *gspca_dev, |
2135 | struct v4l2_jpegcompression *jcomp) | 2234 | struct v4l2_jpegcompression *jcomp) |
2136 | { | 2235 | { |
@@ -2159,6 +2258,27 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev, | |||
2159 | return 0; | 2258 | return 0; |
2160 | } | 2259 | } |
2161 | 2260 | ||
2261 | static int sd_querymenu(struct gspca_dev *gspca_dev, | ||
2262 | struct v4l2_querymenu *menu) | ||
2263 | { | ||
2264 | switch (menu->id) { | ||
2265 | case V4L2_CID_POWER_LINE_FREQUENCY: | ||
2266 | switch (menu->index) { | ||
2267 | case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */ | ||
2268 | strcpy((char *) menu->name, "NoFliker"); | ||
2269 | return 0; | ||
2270 | case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */ | ||
2271 | strcpy((char *) menu->name, "50 Hz"); | ||
2272 | return 0; | ||
2273 | case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */ | ||
2274 | strcpy((char *) menu->name, "60 Hz"); | ||
2275 | return 0; | ||
2276 | } | ||
2277 | break; | ||
2278 | } | ||
2279 | return -EINVAL; | ||
2280 | } | ||
2281 | |||
2162 | /* sub-driver description */ | 2282 | /* sub-driver description */ |
2163 | static const struct sd_desc sd_desc = { | 2283 | static const struct sd_desc sd_desc = { |
2164 | .name = MODULE_NAME, | 2284 | .name = MODULE_NAME, |
@@ -2173,6 +2293,7 @@ static const struct sd_desc sd_desc = { | |||
2173 | .dq_callback = do_autogain, | 2293 | .dq_callback = do_autogain, |
2174 | .get_jcomp = sd_get_jcomp, | 2294 | .get_jcomp = sd_get_jcomp, |
2175 | .set_jcomp = sd_set_jcomp, | 2295 | .set_jcomp = sd_set_jcomp, |
2296 | .querymenu = sd_querymenu, | ||
2176 | }; | 2297 | }; |
2177 | 2298 | ||
2178 | /* -- module initialisation -- */ | 2299 | /* -- module initialisation -- */ |
@@ -2233,7 +2354,7 @@ static const __devinitdata struct usb_device_id device_table[] = { | |||
2233 | {USB_DEVICE(0x0c45, 0x613b), BSI(SN9C120, OV7660, 0x21)}, | 2354 | {USB_DEVICE(0x0c45, 0x613b), BSI(SN9C120, OV7660, 0x21)}, |
2234 | #endif | 2355 | #endif |
2235 | {USB_DEVICE(0x0c45, 0x613c), BSI(SN9C120, HV7131R, 0x11)}, | 2356 | {USB_DEVICE(0x0c45, 0x613c), BSI(SN9C120, HV7131R, 0x11)}, |
2236 | /* {USB_DEVICE(0x0c45, 0x613e), BSI(SN9C120, OV7630, 0x??)}, */ | 2357 | {USB_DEVICE(0x0c45, 0x613e), BSI(SN9C120, OV7630, 0x21)}, |
2237 | {USB_DEVICE(0x0c45, 0x6143), BSI(SN9C120, SP80708, 0x18)}, | 2358 | {USB_DEVICE(0x0c45, 0x6143), BSI(SN9C120, SP80708, 0x18)}, |
2238 | {} | 2359 | {} |
2239 | }; | 2360 | }; |
diff --git a/drivers/media/video/gspca/stv06xx/Makefile b/drivers/media/video/gspca/stv06xx/Makefile index feeaa94ab588..2f3c3a606ce4 100644 --- a/drivers/media/video/gspca/stv06xx/Makefile +++ b/drivers/media/video/gspca/stv06xx/Makefile | |||
@@ -3,7 +3,8 @@ obj-$(CONFIG_USB_STV06XX) += gspca_stv06xx.o | |||
3 | gspca_stv06xx-objs := stv06xx.o \ | 3 | gspca_stv06xx-objs := stv06xx.o \ |
4 | stv06xx_vv6410.o \ | 4 | stv06xx_vv6410.o \ |
5 | stv06xx_hdcs.o \ | 5 | stv06xx_hdcs.o \ |
6 | stv06xx_pb0100.o | 6 | stv06xx_pb0100.o \ |
7 | stv06xx_st6422.o | ||
7 | 8 | ||
8 | EXTRA_CFLAGS += -Idrivers/media/video/gspca | 9 | EXTRA_CFLAGS += -Idrivers/media/video/gspca |
9 | 10 | ||
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c index e573c3406324..0da8e0de0456 100644 --- a/drivers/media/video/gspca/stv06xx/stv06xx.c +++ b/drivers/media/video/gspca/stv06xx/stv06xx.c | |||
@@ -92,11 +92,10 @@ static int stv06xx_write_sensor_finish(struct sd *sd) | |||
92 | { | 92 | { |
93 | int err = 0; | 93 | int err = 0; |
94 | 94 | ||
95 | if (IS_850(sd)) { | 95 | if (sd->bridge == BRIDGE_STV610) { |
96 | struct usb_device *udev = sd->gspca_dev.dev; | 96 | struct usb_device *udev = sd->gspca_dev.dev; |
97 | __u8 *buf = sd->gspca_dev.usb_buf; | 97 | __u8 *buf = sd->gspca_dev.usb_buf; |
98 | 98 | ||
99 | /* Quickam Web needs an extra packet */ | ||
100 | buf[0] = 0; | 99 | buf[0] = 0; |
101 | err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | 100 | err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), |
102 | 0x04, 0x40, 0x1704, 0, buf, 1, | 101 | 0x04, 0x40, 0x1704, 0, buf, 1, |
@@ -253,7 +252,7 @@ static int stv06xx_init(struct gspca_dev *gspca_dev) | |||
253 | 252 | ||
254 | err = sd->sensor->init(sd); | 253 | err = sd->sensor->init(sd); |
255 | 254 | ||
256 | if (dump_sensor) | 255 | if (dump_sensor && sd->sensor->dump) |
257 | sd->sensor->dump(sd); | 256 | sd->sensor->dump(sd); |
258 | 257 | ||
259 | return (err < 0) ? err : 0; | 258 | return (err < 0) ? err : 0; |
@@ -318,6 +317,8 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev, | |||
318 | __u8 *data, /* isoc packet */ | 317 | __u8 *data, /* isoc packet */ |
319 | int len) /* iso packet length */ | 318 | int len) /* iso packet length */ |
320 | { | 319 | { |
320 | struct sd *sd = (struct sd *) gspca_dev; | ||
321 | |||
321 | PDEBUG(D_PACK, "Packet of length %d arrived", len); | 322 | PDEBUG(D_PACK, "Packet of length %d arrived", len); |
322 | 323 | ||
323 | /* A packet may contain several frames | 324 | /* A packet may contain several frames |
@@ -343,14 +344,29 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev, | |||
343 | if (len < chunk_len) { | 344 | if (len < chunk_len) { |
344 | PDEBUG(D_ERR, "URB packet length is smaller" | 345 | PDEBUG(D_ERR, "URB packet length is smaller" |
345 | " than the specified chunk length"); | 346 | " than the specified chunk length"); |
347 | gspca_dev->last_packet_type = DISCARD_PACKET; | ||
346 | return; | 348 | return; |
347 | } | 349 | } |
348 | 350 | ||
351 | /* First byte seem to be 02=data 2nd byte is unknown??? */ | ||
352 | if (sd->bridge == BRIDGE_ST6422 && (id & 0xFF00) == 0x0200) | ||
353 | goto frame_data; | ||
354 | |||
349 | switch (id) { | 355 | switch (id) { |
350 | case 0x0200: | 356 | case 0x0200: |
351 | case 0x4200: | 357 | case 0x4200: |
358 | frame_data: | ||
352 | PDEBUG(D_PACK, "Frame data packet detected"); | 359 | PDEBUG(D_PACK, "Frame data packet detected"); |
353 | 360 | ||
361 | if (sd->to_skip) { | ||
362 | int skip = (sd->to_skip < chunk_len) ? | ||
363 | sd->to_skip : chunk_len; | ||
364 | data += skip; | ||
365 | len -= skip; | ||
366 | chunk_len -= skip; | ||
367 | sd->to_skip -= skip; | ||
368 | } | ||
369 | |||
354 | gspca_frame_add(gspca_dev, INTER_PACKET, frame, | 370 | gspca_frame_add(gspca_dev, INTER_PACKET, frame, |
355 | data, chunk_len); | 371 | data, chunk_len); |
356 | break; | 372 | break; |
@@ -365,6 +381,9 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev, | |||
365 | gspca_frame_add(gspca_dev, FIRST_PACKET, | 381 | gspca_frame_add(gspca_dev, FIRST_PACKET, |
366 | frame, data, 0); | 382 | frame, data, 0); |
367 | 383 | ||
384 | if (sd->bridge == BRIDGE_ST6422) | ||
385 | sd->to_skip = gspca_dev->width * 4; | ||
386 | |||
368 | if (chunk_len) | 387 | if (chunk_len) |
369 | PDEBUG(D_ERR, "Chunk length is " | 388 | PDEBUG(D_ERR, "Chunk length is " |
370 | "non-zero on a SOF"); | 389 | "non-zero on a SOF"); |
@@ -395,8 +414,12 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev, | |||
395 | /* Unknown chunk with 2 bytes of data, | 414 | /* Unknown chunk with 2 bytes of data, |
396 | occurs 2-3 times per USB interrupt */ | 415 | occurs 2-3 times per USB interrupt */ |
397 | break; | 416 | break; |
417 | case 0x42ff: | ||
418 | PDEBUG(D_PACK, "Chunk 0x42ff detected"); | ||
419 | /* Special chunk seen sometimes on the ST6422 */ | ||
420 | break; | ||
398 | default: | 421 | default: |
399 | PDEBUG(D_PACK, "Unknown chunk %d detected", id); | 422 | PDEBUG(D_PACK, "Unknown chunk 0x%04x detected", id); |
400 | /* Unknown chunk */ | 423 | /* Unknown chunk */ |
401 | } | 424 | } |
402 | data += chunk_len; | 425 | data += chunk_len; |
@@ -428,11 +451,16 @@ static int stv06xx_config(struct gspca_dev *gspca_dev, | |||
428 | 451 | ||
429 | cam = &gspca_dev->cam; | 452 | cam = &gspca_dev->cam; |
430 | sd->desc = sd_desc; | 453 | sd->desc = sd_desc; |
454 | sd->bridge = id->driver_info; | ||
431 | gspca_dev->sd_desc = &sd->desc; | 455 | gspca_dev->sd_desc = &sd->desc; |
432 | 456 | ||
433 | if (dump_bridge) | 457 | if (dump_bridge) |
434 | stv06xx_dump_bridge(sd); | 458 | stv06xx_dump_bridge(sd); |
435 | 459 | ||
460 | sd->sensor = &stv06xx_sensor_st6422; | ||
461 | if (!sd->sensor->probe(sd)) | ||
462 | return 0; | ||
463 | |||
436 | sd->sensor = &stv06xx_sensor_vv6410; | 464 | sd->sensor = &stv06xx_sensor_vv6410; |
437 | if (!sd->sensor->probe(sd)) | 465 | if (!sd->sensor->probe(sd)) |
438 | return 0; | 466 | return 0; |
@@ -457,9 +485,20 @@ static int stv06xx_config(struct gspca_dev *gspca_dev, | |||
457 | 485 | ||
458 | /* -- module initialisation -- */ | 486 | /* -- module initialisation -- */ |
459 | static const __devinitdata struct usb_device_id device_table[] = { | 487 | static const __devinitdata struct usb_device_id device_table[] = { |
460 | {USB_DEVICE(0x046d, 0x0840)}, /* QuickCam Express */ | 488 | /* QuickCam Express */ |
461 | {USB_DEVICE(0x046d, 0x0850)}, /* LEGO cam / QuickCam Web */ | 489 | {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 }, |
462 | {USB_DEVICE(0x046d, 0x0870)}, /* Dexxa WebCam USB */ | 490 | /* LEGO cam / QuickCam Web */ |
491 | {USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 }, | ||
492 | /* Dexxa WebCam USB */ | ||
493 | {USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 }, | ||
494 | /* QuickCam Messenger */ | ||
495 | {USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 }, | ||
496 | /* QuickCam Communicate */ | ||
497 | {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 }, | ||
498 | /* QuickCam Messenger (new) */ | ||
499 | {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 }, | ||
500 | /* QuickCam Messenger (new) */ | ||
501 | {USB_DEVICE(0x046D, 0x08DA), .driver_info = BRIDGE_ST6422 }, | ||
463 | {} | 502 | {} |
464 | }; | 503 | }; |
465 | MODULE_DEVICE_TABLE(usb, device_table); | 504 | MODULE_DEVICE_TABLE(usb, device_table); |
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h index 1207e7d17f14..9df7137fe67e 100644 --- a/drivers/media/video/gspca/stv06xx/stv06xx.h +++ b/drivers/media/video/gspca/stv06xx/stv06xx.h | |||
@@ -93,6 +93,17 @@ struct sd { | |||
93 | 93 | ||
94 | /* Sensor private data */ | 94 | /* Sensor private data */ |
95 | void *sensor_priv; | 95 | void *sensor_priv; |
96 | |||
97 | /* The first 4 lines produced by the stv6422 are no good, this keeps | ||
98 | track of how many bytes we still need to skip during a frame */ | ||
99 | int to_skip; | ||
100 | |||
101 | /* Bridge / Camera type */ | ||
102 | u8 bridge; | ||
103 | #define BRIDGE_STV600 0 | ||
104 | #define BRIDGE_STV602 1 | ||
105 | #define BRIDGE_STV610 2 | ||
106 | #define BRIDGE_ST6422 3 /* With integrated sensor */ | ||
96 | }; | 107 | }; |
97 | 108 | ||
98 | int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data); | 109 | int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data); |
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c index b16903814203..3039ec208f3a 100644 --- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c +++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c | |||
@@ -434,7 +434,7 @@ static int hdcs_probe_1x00(struct sd *sd) | |||
434 | hdcs->exp.er = 100; | 434 | hdcs->exp.er = 100; |
435 | 435 | ||
436 | /* | 436 | /* |
437 | * Frame rate on HDCS-1000 0x46D:0x840 depends on PSMP: | 437 | * Frame rate on HDCS-1000 with STV600 depends on PSMP: |
438 | * 4 = doesn't work at all | 438 | * 4 = doesn't work at all |
439 | * 5 = 7.8 fps, | 439 | * 5 = 7.8 fps, |
440 | * 6 = 6.9 fps, | 440 | * 6 = 6.9 fps, |
@@ -443,7 +443,7 @@ static int hdcs_probe_1x00(struct sd *sd) | |||
443 | * 15 = 4.4 fps, | 443 | * 15 = 4.4 fps, |
444 | * 31 = 2.8 fps | 444 | * 31 = 2.8 fps |
445 | * | 445 | * |
446 | * Frame rate on HDCS-1000 0x46D:0x870 depends on PSMP: | 446 | * Frame rate on HDCS-1000 with STV602 depends on PSMP: |
447 | * 15 = doesn't work at all | 447 | * 15 = doesn't work at all |
448 | * 18 = doesn't work at all | 448 | * 18 = doesn't work at all |
449 | * 19 = 7.3 fps | 449 | * 19 = 7.3 fps |
@@ -453,7 +453,7 @@ static int hdcs_probe_1x00(struct sd *sd) | |||
453 | * 24 = 6.3 fps | 453 | * 24 = 6.3 fps |
454 | * 30 = 5.4 fps | 454 | * 30 = 5.4 fps |
455 | */ | 455 | */ |
456 | hdcs->psmp = IS_870(sd) ? 20 : 5; | 456 | hdcs->psmp = (sd->bridge == BRIDGE_STV602) ? 20 : 5; |
457 | 457 | ||
458 | sd->sensor_priv = hdcs; | 458 | sd->sensor_priv = hdcs; |
459 | 459 | ||
@@ -530,7 +530,7 @@ static int hdcs_init(struct sd *sd) | |||
530 | int i, err = 0; | 530 | int i, err = 0; |
531 | 531 | ||
532 | /* Set the STV0602AA in STV0600 emulation mode */ | 532 | /* Set the STV0602AA in STV0600 emulation mode */ |
533 | if (IS_870(sd)) | 533 | if (sd->bridge == BRIDGE_STV602) |
534 | stv06xx_write_bridge(sd, STV_STV0600_EMULATION, 1); | 534 | stv06xx_write_bridge(sd, STV_STV0600_EMULATION, 1); |
535 | 535 | ||
536 | /* Execute the bridge init */ | 536 | /* Execute the bridge init */ |
@@ -558,7 +558,7 @@ static int hdcs_init(struct sd *sd) | |||
558 | return err; | 558 | return err; |
559 | 559 | ||
560 | /* Set PGA sample duration | 560 | /* Set PGA sample duration |
561 | (was 0x7E for IS_870, but caused slow framerate with HDCS-1020) */ | 561 | (was 0x7E for the STV602, but caused slow framerate with HDCS-1020) */ |
562 | if (IS_1020(sd)) | 562 | if (IS_1020(sd)) |
563 | err = stv06xx_write_sensor(sd, HDCS_TCTRL, | 563 | err = stv06xx_write_sensor(sd, HDCS_TCTRL, |
564 | (HDCS_ADC_START_SIG_DUR << 6) | hdcs->psmp); | 564 | (HDCS_ADC_START_SIG_DUR << 6) | hdcs->psmp); |
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h index e88c42f7d2f8..934b9cebc1ab 100644 --- a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h +++ b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h | |||
@@ -32,14 +32,13 @@ | |||
32 | 32 | ||
33 | #include "stv06xx.h" | 33 | #include "stv06xx.h" |
34 | 34 | ||
35 | #define IS_850(sd) ((sd)->gspca_dev.dev->descriptor.idProduct == 0x850) | ||
36 | #define IS_870(sd) ((sd)->gspca_dev.dev->descriptor.idProduct == 0x870) | ||
37 | #define IS_1020(sd) ((sd)->sensor == &stv06xx_sensor_hdcs1020) | 35 | #define IS_1020(sd) ((sd)->sensor == &stv06xx_sensor_hdcs1020) |
38 | 36 | ||
39 | extern const struct stv06xx_sensor stv06xx_sensor_vv6410; | 37 | extern const struct stv06xx_sensor stv06xx_sensor_vv6410; |
40 | extern const struct stv06xx_sensor stv06xx_sensor_hdcs1x00; | 38 | extern const struct stv06xx_sensor stv06xx_sensor_hdcs1x00; |
41 | extern const struct stv06xx_sensor stv06xx_sensor_hdcs1020; | 39 | extern const struct stv06xx_sensor stv06xx_sensor_hdcs1020; |
42 | extern const struct stv06xx_sensor stv06xx_sensor_pb0100; | 40 | extern const struct stv06xx_sensor stv06xx_sensor_pb0100; |
41 | extern const struct stv06xx_sensor stv06xx_sensor_st6422; | ||
43 | 42 | ||
44 | struct stv06xx_sensor { | 43 | struct stv06xx_sensor { |
45 | /* Defines the name of a sensor */ | 44 | /* Defines the name of a sensor */ |
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c new file mode 100644 index 000000000000..87cb5b9ddfa7 --- /dev/null +++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c | |||
@@ -0,0 +1,453 @@ | |||
1 | /* | ||
2 | * Support for the sensor part which is integrated (I think) into the | ||
3 | * st6422 stv06xx alike bridge, as its integrated there are no i2c writes | ||
4 | * but instead direct bridge writes. | ||
5 | * | ||
6 | * Copyright (c) 2009 Hans de Goede <hdegoede@redhat.com> | ||
7 | * | ||
8 | * Strongly based on qc-usb-messenger, which is: | ||
9 | * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher | ||
10 | * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland | ||
11 | * Copyright (c) 2002, 2003 Tuukka Toivonen | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include "stv06xx_st6422.h" | ||
30 | |||
31 | static struct v4l2_pix_format st6422_mode[] = { | ||
32 | /* Note we actually get 124 lines of data, of which we skip the 4st | ||
33 | 4 as they are garbage */ | ||
34 | { | ||
35 | 162, | ||
36 | 120, | ||
37 | V4L2_PIX_FMT_SGRBG8, | ||
38 | V4L2_FIELD_NONE, | ||
39 | .sizeimage = 162 * 120, | ||
40 | .bytesperline = 162, | ||
41 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
42 | .priv = 1 | ||
43 | }, | ||
44 | /* Note we actually get 248 lines of data, of which we skip the 4st | ||
45 | 4 as they are garbage, and we tell the app it only gets the | ||
46 | first 240 of the 244 lines it actually gets, so that it ignores | ||
47 | the last 4. */ | ||
48 | { | ||
49 | 324, | ||
50 | 240, | ||
51 | V4L2_PIX_FMT_SGRBG8, | ||
52 | V4L2_FIELD_NONE, | ||
53 | .sizeimage = 324 * 244, | ||
54 | .bytesperline = 324, | ||
55 | .colorspace = V4L2_COLORSPACE_SRGB, | ||
56 | .priv = 0 | ||
57 | }, | ||
58 | }; | ||
59 | |||
60 | static const struct ctrl st6422_ctrl[] = { | ||
61 | #define BRIGHTNESS_IDX 0 | ||
62 | { | ||
63 | { | ||
64 | .id = V4L2_CID_BRIGHTNESS, | ||
65 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
66 | .name = "Brightness", | ||
67 | .minimum = 0, | ||
68 | .maximum = 31, | ||
69 | .step = 1, | ||
70 | .default_value = 3 | ||
71 | }, | ||
72 | .set = st6422_set_brightness, | ||
73 | .get = st6422_get_brightness | ||
74 | }, | ||
75 | #define CONTRAST_IDX 1 | ||
76 | { | ||
77 | { | ||
78 | .id = V4L2_CID_CONTRAST, | ||
79 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
80 | .name = "Contrast", | ||
81 | .minimum = 0, | ||
82 | .maximum = 15, | ||
83 | .step = 1, | ||
84 | .default_value = 11 | ||
85 | }, | ||
86 | .set = st6422_set_contrast, | ||
87 | .get = st6422_get_contrast | ||
88 | }, | ||
89 | #define GAIN_IDX 2 | ||
90 | { | ||
91 | { | ||
92 | .id = V4L2_CID_GAIN, | ||
93 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
94 | .name = "Gain", | ||
95 | .minimum = 0, | ||
96 | .maximum = 255, | ||
97 | .step = 1, | ||
98 | .default_value = 64 | ||
99 | }, | ||
100 | .set = st6422_set_gain, | ||
101 | .get = st6422_get_gain | ||
102 | }, | ||
103 | #define EXPOSURE_IDX 3 | ||
104 | { | ||
105 | { | ||
106 | .id = V4L2_CID_EXPOSURE, | ||
107 | .type = V4L2_CTRL_TYPE_INTEGER, | ||
108 | .name = "Exposure", | ||
109 | .minimum = 0, | ||
110 | .maximum = 1023, | ||
111 | .step = 1, | ||
112 | .default_value = 256 | ||
113 | }, | ||
114 | .set = st6422_set_exposure, | ||
115 | .get = st6422_get_exposure | ||
116 | }, | ||
117 | }; | ||
118 | |||
119 | static int st6422_probe(struct sd *sd) | ||
120 | { | ||
121 | int i; | ||
122 | s32 *sensor_settings; | ||
123 | |||
124 | if (sd->bridge != BRIDGE_ST6422) | ||
125 | return -ENODEV; | ||
126 | |||
127 | info("st6422 sensor detected"); | ||
128 | |||
129 | sensor_settings = kmalloc(ARRAY_SIZE(st6422_ctrl) * sizeof(s32), | ||
130 | GFP_KERNEL); | ||
131 | if (!sensor_settings) | ||
132 | return -ENOMEM; | ||
133 | |||
134 | sd->gspca_dev.cam.cam_mode = st6422_mode; | ||
135 | sd->gspca_dev.cam.nmodes = ARRAY_SIZE(st6422_mode); | ||
136 | sd->desc.ctrls = st6422_ctrl; | ||
137 | sd->desc.nctrls = ARRAY_SIZE(st6422_ctrl); | ||
138 | sd->sensor_priv = sensor_settings; | ||
139 | |||
140 | for (i = 0; i < sd->desc.nctrls; i++) | ||
141 | sensor_settings[i] = st6422_ctrl[i].qctrl.default_value; | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static int st6422_init(struct sd *sd) | ||
147 | { | ||
148 | int err = 0, i; | ||
149 | |||
150 | const u16 st6422_bridge_init[][2] = { | ||
151 | { STV_ISO_ENABLE, 0x00 }, /* disable capture */ | ||
152 | { 0x1436, 0x00 }, | ||
153 | { 0x1432, 0x03 }, /* 0x00-0x1F brightness */ | ||
154 | { 0x143a, 0xF9 }, /* 0x00-0x0F contrast */ | ||
155 | { 0x0509, 0x38 }, /* R */ | ||
156 | { 0x050a, 0x38 }, /* G */ | ||
157 | { 0x050b, 0x38 }, /* B */ | ||
158 | { 0x050c, 0x2A }, | ||
159 | { 0x050d, 0x01 }, | ||
160 | |||
161 | |||
162 | { 0x1431, 0x00 }, /* 0x00-0x07 ??? */ | ||
163 | { 0x1433, 0x34 }, /* 160x120, 0x00-0x01 night filter */ | ||
164 | { 0x1438, 0x18 }, /* 640x480 */ | ||
165 | /* 18 bayes */ | ||
166 | /* 10 compressed? */ | ||
167 | |||
168 | { 0x1439, 0x00 }, | ||
169 | /* antiflimmer?? 0xa2 ger perfekt bild mot monitor */ | ||
170 | |||
171 | { 0x143b, 0x05 }, | ||
172 | { 0x143c, 0x00 }, /* 0x00-0x01 - ??? */ | ||
173 | |||
174 | |||
175 | /* shutter time 0x0000-0x03FF */ | ||
176 | /* low value give good picures on moving objects (but requires much light) */ | ||
177 | /* high value gives good picures in darkness (but tends to be overexposed) */ | ||
178 | { 0x143e, 0x01 }, | ||
179 | { 0x143d, 0x00 }, | ||
180 | |||
181 | { 0x1442, 0xe2 }, | ||
182 | /* write: 1x1x xxxx */ | ||
183 | /* read: 1x1x xxxx */ | ||
184 | /* bit 5 == button pressed and hold if 0 */ | ||
185 | /* write 0xe2,0xea */ | ||
186 | |||
187 | /* 0x144a */ | ||
188 | /* 0x00 init */ | ||
189 | /* bit 7 == button has been pressed, but not handled */ | ||
190 | |||
191 | /* interrupt */ | ||
192 | /* if(urb->iso_frame_desc[i].status == 0x80) { */ | ||
193 | /* if(urb->iso_frame_desc[i].status == 0x88) { */ | ||
194 | |||
195 | { 0x1500, 0xd0 }, | ||
196 | { 0x1500, 0xd0 }, | ||
197 | { 0x1500, 0x50 }, /* 0x00 - 0xFF 0x80 == compr ? */ | ||
198 | |||
199 | { 0x1501, 0xaf }, | ||
200 | /* high val-> ljus area blir morkare. */ | ||
201 | /* low val -> ljus area blir ljusare. */ | ||
202 | { 0x1502, 0xc2 }, | ||
203 | /* high val-> ljus area blir morkare. */ | ||
204 | /* low val -> ljus area blir ljusare. */ | ||
205 | { 0x1503, 0x45 }, | ||
206 | /* high val-> ljus area blir morkare. */ | ||
207 | /* low val -> ljus area blir ljusare. */ | ||
208 | |||
209 | { 0x1505, 0x02 }, | ||
210 | /* 2 : 324x248 80352 bytes */ | ||
211 | /* 7 : 248x162 40176 bytes */ | ||
212 | /* c+f: 162*124 20088 bytes */ | ||
213 | |||
214 | { 0x150e, 0x8e }, | ||
215 | { 0x150f, 0x37 }, | ||
216 | { 0x15c0, 0x00 }, | ||
217 | { 0x15c1, 1023 }, /* 160x120, ISOC_PACKET_SIZE */ | ||
218 | { 0x15c3, 0x08 }, /* 0x04/0x14 ... test pictures ??? */ | ||
219 | |||
220 | |||
221 | { 0x143f, 0x01 }, /* commit settings */ | ||
222 | |||
223 | }; | ||
224 | |||
225 | for (i = 0; i < ARRAY_SIZE(st6422_bridge_init) && !err; i++) { | ||
226 | err = stv06xx_write_bridge(sd, st6422_bridge_init[i][0], | ||
227 | st6422_bridge_init[i][1]); | ||
228 | } | ||
229 | |||
230 | return err; | ||
231 | } | ||
232 | |||
233 | static void st6422_disconnect(struct sd *sd) | ||
234 | { | ||
235 | sd->sensor = NULL; | ||
236 | kfree(sd->sensor_priv); | ||
237 | } | ||
238 | |||
239 | static int st6422_start(struct sd *sd) | ||
240 | { | ||
241 | int err, packet_size; | ||
242 | struct cam *cam = &sd->gspca_dev.cam; | ||
243 | s32 *sensor_settings = sd->sensor_priv; | ||
244 | struct usb_host_interface *alt; | ||
245 | struct usb_interface *intf; | ||
246 | |||
247 | intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface); | ||
248 | alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); | ||
249 | if (!alt) { | ||
250 | PDEBUG(D_ERR, "Couldn't get altsetting"); | ||
251 | return -EIO; | ||
252 | } | ||
253 | |||
254 | packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); | ||
255 | err = stv06xx_write_bridge(sd, 0x15c1, packet_size); | ||
256 | if (err < 0) | ||
257 | return err; | ||
258 | |||
259 | if (cam->cam_mode[sd->gspca_dev.curr_mode].priv) | ||
260 | err = stv06xx_write_bridge(sd, 0x1505, 0x0f); | ||
261 | else | ||
262 | err = stv06xx_write_bridge(sd, 0x1505, 0x02); | ||
263 | if (err < 0) | ||
264 | return err; | ||
265 | |||
266 | err = st6422_set_brightness(&sd->gspca_dev, | ||
267 | sensor_settings[BRIGHTNESS_IDX]); | ||
268 | if (err < 0) | ||
269 | return err; | ||
270 | |||
271 | err = st6422_set_contrast(&sd->gspca_dev, | ||
272 | sensor_settings[CONTRAST_IDX]); | ||
273 | if (err < 0) | ||
274 | return err; | ||
275 | |||
276 | err = st6422_set_exposure(&sd->gspca_dev, | ||
277 | sensor_settings[EXPOSURE_IDX]); | ||
278 | if (err < 0) | ||
279 | return err; | ||
280 | |||
281 | err = st6422_set_gain(&sd->gspca_dev, | ||
282 | sensor_settings[GAIN_IDX]); | ||
283 | if (err < 0) | ||
284 | return err; | ||
285 | |||
286 | PDEBUG(D_STREAM, "Starting stream"); | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static int st6422_stop(struct sd *sd) | ||
292 | { | ||
293 | PDEBUG(D_STREAM, "Halting stream"); | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static int st6422_get_brightness(struct gspca_dev *gspca_dev, __s32 *val) | ||
299 | { | ||
300 | struct sd *sd = (struct sd *) gspca_dev; | ||
301 | s32 *sensor_settings = sd->sensor_priv; | ||
302 | |||
303 | *val = sensor_settings[BRIGHTNESS_IDX]; | ||
304 | |||
305 | PDEBUG(D_V4L2, "Read brightness %d", *val); | ||
306 | |||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | static int st6422_set_brightness(struct gspca_dev *gspca_dev, __s32 val) | ||
311 | { | ||
312 | int err; | ||
313 | struct sd *sd = (struct sd *) gspca_dev; | ||
314 | s32 *sensor_settings = sd->sensor_priv; | ||
315 | |||
316 | sensor_settings[BRIGHTNESS_IDX] = val; | ||
317 | |||
318 | if (!gspca_dev->streaming) | ||
319 | return 0; | ||
320 | |||
321 | /* val goes from 0 -> 31 */ | ||
322 | PDEBUG(D_V4L2, "Set brightness to %d", val); | ||
323 | err = stv06xx_write_bridge(sd, 0x1432, val); | ||
324 | if (err < 0) | ||
325 | return err; | ||
326 | |||
327 | /* commit settings */ | ||
328 | err = stv06xx_write_bridge(sd, 0x143f, 0x01); | ||
329 | return (err < 0) ? err : 0; | ||
330 | } | ||
331 | |||
332 | static int st6422_get_contrast(struct gspca_dev *gspca_dev, __s32 *val) | ||
333 | { | ||
334 | struct sd *sd = (struct sd *) gspca_dev; | ||
335 | s32 *sensor_settings = sd->sensor_priv; | ||
336 | |||
337 | *val = sensor_settings[CONTRAST_IDX]; | ||
338 | |||
339 | PDEBUG(D_V4L2, "Read contrast %d", *val); | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static int st6422_set_contrast(struct gspca_dev *gspca_dev, __s32 val) | ||
345 | { | ||
346 | int err; | ||
347 | struct sd *sd = (struct sd *) gspca_dev; | ||
348 | s32 *sensor_settings = sd->sensor_priv; | ||
349 | |||
350 | sensor_settings[CONTRAST_IDX] = val; | ||
351 | |||
352 | if (!gspca_dev->streaming) | ||
353 | return 0; | ||
354 | |||
355 | /* Val goes from 0 -> 15 */ | ||
356 | PDEBUG(D_V4L2, "Set contrast to %d\n", val); | ||
357 | err = stv06xx_write_bridge(sd, 0x143a, 0xf0 | val); | ||
358 | if (err < 0) | ||
359 | return err; | ||
360 | |||
361 | /* commit settings */ | ||
362 | err = stv06xx_write_bridge(sd, 0x143f, 0x01); | ||
363 | return (err < 0) ? err : 0; | ||
364 | } | ||
365 | |||
366 | static int st6422_get_gain(struct gspca_dev *gspca_dev, __s32 *val) | ||
367 | { | ||
368 | struct sd *sd = (struct sd *) gspca_dev; | ||
369 | s32 *sensor_settings = sd->sensor_priv; | ||
370 | |||
371 | *val = sensor_settings[GAIN_IDX]; | ||
372 | |||
373 | PDEBUG(D_V4L2, "Read gain %d", *val); | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | static int st6422_set_gain(struct gspca_dev *gspca_dev, __s32 val) | ||
379 | { | ||
380 | int err; | ||
381 | struct sd *sd = (struct sd *) gspca_dev; | ||
382 | s32 *sensor_settings = sd->sensor_priv; | ||
383 | |||
384 | sensor_settings[GAIN_IDX] = val; | ||
385 | |||
386 | if (!gspca_dev->streaming) | ||
387 | return 0; | ||
388 | |||
389 | PDEBUG(D_V4L2, "Set gain to %d", val); | ||
390 | |||
391 | /* Set red, green, blue, gain */ | ||
392 | err = stv06xx_write_bridge(sd, 0x0509, val); | ||
393 | if (err < 0) | ||
394 | return err; | ||
395 | |||
396 | err = stv06xx_write_bridge(sd, 0x050a, val); | ||
397 | if (err < 0) | ||
398 | return err; | ||
399 | |||
400 | err = stv06xx_write_bridge(sd, 0x050b, val); | ||
401 | if (err < 0) | ||
402 | return err; | ||
403 | |||
404 | /* 2 mystery writes */ | ||
405 | err = stv06xx_write_bridge(sd, 0x050c, 0x2a); | ||
406 | if (err < 0) | ||
407 | return err; | ||
408 | |||
409 | err = stv06xx_write_bridge(sd, 0x050d, 0x01); | ||
410 | if (err < 0) | ||
411 | return err; | ||
412 | |||
413 | /* commit settings */ | ||
414 | err = stv06xx_write_bridge(sd, 0x143f, 0x01); | ||
415 | return (err < 0) ? err : 0; | ||
416 | } | ||
417 | |||
418 | static int st6422_get_exposure(struct gspca_dev *gspca_dev, __s32 *val) | ||
419 | { | ||
420 | struct sd *sd = (struct sd *) gspca_dev; | ||
421 | s32 *sensor_settings = sd->sensor_priv; | ||
422 | |||
423 | *val = sensor_settings[EXPOSURE_IDX]; | ||
424 | |||
425 | PDEBUG(D_V4L2, "Read exposure %d", *val); | ||
426 | |||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | static int st6422_set_exposure(struct gspca_dev *gspca_dev, __s32 val) | ||
431 | { | ||
432 | int err; | ||
433 | struct sd *sd = (struct sd *) gspca_dev; | ||
434 | s32 *sensor_settings = sd->sensor_priv; | ||
435 | |||
436 | sensor_settings[EXPOSURE_IDX] = val; | ||
437 | |||
438 | if (!gspca_dev->streaming) | ||
439 | return 0; | ||
440 | |||
441 | PDEBUG(D_V4L2, "Set exposure to %d\n", val); | ||
442 | err = stv06xx_write_bridge(sd, 0x143d, val & 0xff); | ||
443 | if (err < 0) | ||
444 | return err; | ||
445 | |||
446 | err = stv06xx_write_bridge(sd, 0x143e, val >> 8); | ||
447 | if (err < 0) | ||
448 | return err; | ||
449 | |||
450 | /* commit settings */ | ||
451 | err = stv06xx_write_bridge(sd, 0x143f, 0x01); | ||
452 | return (err < 0) ? err : 0; | ||
453 | } | ||
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h new file mode 100644 index 000000000000..b2d45fe50522 --- /dev/null +++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Support for the sensor part which is integrated (I think) into the | ||
3 | * st6422 stv06xx alike bridge, as its integrated there are no i2c writes | ||
4 | * but instead direct bridge writes. | ||
5 | * | ||
6 | * Copyright (c) 2009 Hans de Goede <hdegoede@redhat.com> | ||
7 | * | ||
8 | * Strongly based on qc-usb-messenger, which is: | ||
9 | * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher | ||
10 | * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland | ||
11 | * Copyright (c) 2002, 2003 Tuukka Toivonen | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #ifndef STV06XX_ST6422_H_ | ||
30 | #define STV06XX_ST6422_H_ | ||
31 | |||
32 | #include "stv06xx_sensor.h" | ||
33 | |||
34 | static int st6422_probe(struct sd *sd); | ||
35 | static int st6422_start(struct sd *sd); | ||
36 | static int st6422_init(struct sd *sd); | ||
37 | static int st6422_stop(struct sd *sd); | ||
38 | static void st6422_disconnect(struct sd *sd); | ||
39 | |||
40 | /* V4L2 controls supported by the driver */ | ||
41 | static int st6422_get_brightness(struct gspca_dev *gspca_dev, __s32 *val); | ||
42 | static int st6422_set_brightness(struct gspca_dev *gspca_dev, __s32 val); | ||
43 | static int st6422_get_contrast(struct gspca_dev *gspca_dev, __s32 *val); | ||
44 | static int st6422_set_contrast(struct gspca_dev *gspca_dev, __s32 val); | ||
45 | static int st6422_get_gain(struct gspca_dev *gspca_dev, __s32 *val); | ||
46 | static int st6422_set_gain(struct gspca_dev *gspca_dev, __s32 val); | ||
47 | static int st6422_get_exposure(struct gspca_dev *gspca_dev, __s32 *val); | ||
48 | static int st6422_set_exposure(struct gspca_dev *gspca_dev, __s32 val); | ||
49 | |||
50 | const struct stv06xx_sensor stv06xx_sensor_st6422 = { | ||
51 | .name = "ST6422", | ||
52 | .init = st6422_init, | ||
53 | .probe = st6422_probe, | ||
54 | .start = st6422_start, | ||
55 | .stop = st6422_stop, | ||
56 | .disconnect = st6422_disconnect, | ||
57 | }; | ||
58 | |||
59 | #endif | ||
diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c index 84995bcf4a75..a3b77ed3f089 100644 --- a/drivers/media/video/ivtv/ivtv-controls.c +++ b/drivers/media/video/ivtv/ivtv-controls.c | |||
@@ -60,6 +60,8 @@ int ivtv_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qctrl) | |||
60 | 60 | ||
61 | switch (qctrl->id) { | 61 | switch (qctrl->id) { |
62 | /* Standard V4L2 controls */ | 62 | /* Standard V4L2 controls */ |
63 | case V4L2_CID_USER_CLASS: | ||
64 | return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0); | ||
63 | case V4L2_CID_BRIGHTNESS: | 65 | case V4L2_CID_BRIGHTNESS: |
64 | case V4L2_CID_HUE: | 66 | case V4L2_CID_HUE: |
65 | case V4L2_CID_SATURATION: | 67 | case V4L2_CID_SATURATION: |
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c index 459c04cbf69d..4d794b42d6cd 100644 --- a/drivers/media/video/mt9m001.c +++ b/drivers/media/video/mt9m001.c | |||
@@ -280,15 +280,9 @@ static int mt9m001_try_fmt(struct soc_camera_device *icd, | |||
280 | { | 280 | { |
281 | struct v4l2_pix_format *pix = &f->fmt.pix; | 281 | struct v4l2_pix_format *pix = &f->fmt.pix; |
282 | 282 | ||
283 | if (pix->height < 32 + icd->y_skip_top) | 283 | v4l_bound_align_image(&pix->width, 48, 1280, 1, |
284 | pix->height = 32 + icd->y_skip_top; | 284 | &pix->height, 32 + icd->y_skip_top, |
285 | if (pix->height > 1024 + icd->y_skip_top) | 285 | 1024 + icd->y_skip_top, 0, 0); |
286 | pix->height = 1024 + icd->y_skip_top; | ||
287 | if (pix->width < 48) | ||
288 | pix->width = 48; | ||
289 | if (pix->width > 1280) | ||
290 | pix->width = 1280; | ||
291 | pix->width &= ~0x01; /* has to be even, unsure why was ~3 */ | ||
292 | 286 | ||
293 | return 0; | 287 | return 0; |
294 | } | 288 | } |
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c index f72aeb7c4deb..4207fb342670 100644 --- a/drivers/media/video/mt9t031.c +++ b/drivers/media/video/mt9t031.c | |||
@@ -385,17 +385,9 @@ static int mt9t031_try_fmt(struct soc_camera_device *icd, | |||
385 | { | 385 | { |
386 | struct v4l2_pix_format *pix = &f->fmt.pix; | 386 | struct v4l2_pix_format *pix = &f->fmt.pix; |
387 | 387 | ||
388 | if (pix->height < MT9T031_MIN_HEIGHT) | 388 | v4l_bound_align_image( |
389 | pix->height = MT9T031_MIN_HEIGHT; | 389 | &pix->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1, |
390 | if (pix->height > MT9T031_MAX_HEIGHT) | 390 | &pix->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0); |
391 | pix->height = MT9T031_MAX_HEIGHT; | ||
392 | if (pix->width < MT9T031_MIN_WIDTH) | ||
393 | pix->width = MT9T031_MIN_WIDTH; | ||
394 | if (pix->width > MT9T031_MAX_WIDTH) | ||
395 | pix->width = MT9T031_MAX_WIDTH; | ||
396 | |||
397 | pix->width &= ~0x01; /* has to be even */ | ||
398 | pix->height &= ~0x01; /* has to be even */ | ||
399 | 391 | ||
400 | return 0; | 392 | return 0; |
401 | } | 393 | } |
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index be20d312b1dc..dbdcc86ae50d 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c | |||
@@ -364,15 +364,9 @@ static int mt9v022_try_fmt(struct soc_camera_device *icd, | |||
364 | { | 364 | { |
365 | struct v4l2_pix_format *pix = &f->fmt.pix; | 365 | struct v4l2_pix_format *pix = &f->fmt.pix; |
366 | 366 | ||
367 | if (pix->height < 32 + icd->y_skip_top) | 367 | v4l_bound_align_image(&pix->width, 48, 752, 2 /* ? */, |
368 | pix->height = 32 + icd->y_skip_top; | 368 | &pix->height, 32 + icd->y_skip_top, |
369 | if (pix->height > 480 + icd->y_skip_top) | 369 | 480 + icd->y_skip_top, 0, 0); |
370 | pix->height = 480 + icd->y_skip_top; | ||
371 | if (pix->width < 48) | ||
372 | pix->width = 48; | ||
373 | if (pix->width > 752) | ||
374 | pix->width = 752; | ||
375 | pix->width &= ~0x03; /* ? */ | ||
376 | 370 | ||
377 | return 0; | 371 | return 0; |
378 | } | 372 | } |
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c index 08cfd3e4ae8a..0bc2cf573c76 100644 --- a/drivers/media/video/ov511.c +++ b/drivers/media/video/ov511.c | |||
@@ -211,8 +211,6 @@ static const int i2c_detect_tries = 5; | |||
211 | static struct usb_device_id device_table [] = { | 211 | static struct usb_device_id device_table [] = { |
212 | { USB_DEVICE(VEND_OMNIVISION, PROD_OV511) }, | 212 | { USB_DEVICE(VEND_OMNIVISION, PROD_OV511) }, |
213 | { USB_DEVICE(VEND_OMNIVISION, PROD_OV511PLUS) }, | 213 | { USB_DEVICE(VEND_OMNIVISION, PROD_OV511PLUS) }, |
214 | { USB_DEVICE(VEND_OMNIVISION, PROD_OV518) }, | ||
215 | { USB_DEVICE(VEND_OMNIVISION, PROD_OV518PLUS) }, | ||
216 | { USB_DEVICE(VEND_MATTEL, PROD_ME2CAM) }, | 214 | { USB_DEVICE(VEND_MATTEL, PROD_ME2CAM) }, |
217 | { } /* Terminating entry */ | 215 | { } /* Terminating entry */ |
218 | }; | 216 | }; |
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.c b/drivers/media/video/pvrusb2/pvrusb2-audio.c index 10ef1a2c13ea..416933ca607d 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-audio.c +++ b/drivers/media/video/pvrusb2/pvrusb2-audio.c | |||
@@ -48,11 +48,13 @@ static const int routing_scheme0[] = { | |||
48 | MSP_DSP_IN_SCART), | 48 | MSP_DSP_IN_SCART), |
49 | }; | 49 | }; |
50 | 50 | ||
51 | static const struct routing_scheme routing_schemes[] = { | 51 | static const struct routing_scheme routing_def0 = { |
52 | [PVR2_ROUTING_SCHEME_HAUPPAUGE] = { | 52 | .def = routing_scheme0, |
53 | .def = routing_scheme0, | 53 | .cnt = ARRAY_SIZE(routing_scheme0), |
54 | .cnt = ARRAY_SIZE(routing_scheme0), | 54 | }; |
55 | }, | 55 | |
56 | static const struct routing_scheme *routing_schemes[] = { | ||
57 | [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0, | ||
56 | }; | 58 | }; |
57 | 59 | ||
58 | void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) | 60 | void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) |
@@ -65,7 +67,7 @@ void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) | |||
65 | pvr2_trace(PVR2_TRACE_CHIPS, "subdev msp3400 v4l2 set_stereo"); | 67 | pvr2_trace(PVR2_TRACE_CHIPS, "subdev msp3400 v4l2 set_stereo"); |
66 | 68 | ||
67 | if ((sid < ARRAY_SIZE(routing_schemes)) && | 69 | if ((sid < ARRAY_SIZE(routing_schemes)) && |
68 | ((sp = routing_schemes + sid) != NULL) && | 70 | ((sp = routing_schemes[sid]) != NULL) && |
69 | (hdw->input_val >= 0) && | 71 | (hdw->input_val >= 0) && |
70 | (hdw->input_val < sp->cnt)) { | 72 | (hdw->input_val < sp->cnt)) { |
71 | input = sp->def[hdw->input_val]; | 73 | input = sp->def[hdw->input_val]; |
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c b/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c index 9023adf3fdcc..68980e19409f 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c +++ b/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c | |||
@@ -49,11 +49,13 @@ static const int routing_scheme1[] = { | |||
49 | [PVR2_CVAL_INPUT_SVIDEO] = 0, | 49 | [PVR2_CVAL_INPUT_SVIDEO] = 0, |
50 | }; | 50 | }; |
51 | 51 | ||
52 | static const struct routing_scheme routing_schemes[] = { | 52 | static const struct routing_scheme routing_def1 = { |
53 | [PVR2_ROUTING_SCHEME_ONAIR] = { | 53 | .def = routing_scheme1, |
54 | .def = routing_scheme1, | 54 | .cnt = ARRAY_SIZE(routing_scheme1), |
55 | .cnt = ARRAY_SIZE(routing_scheme1), | 55 | }; |
56 | }, | 56 | |
57 | static const struct routing_scheme *routing_schemes[] = { | ||
58 | [PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1, | ||
57 | }; | 59 | }; |
58 | 60 | ||
59 | 61 | ||
@@ -65,12 +67,11 @@ void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) | |||
65 | u32 input; | 67 | u32 input; |
66 | pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)", | 68 | pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)", |
67 | hdw->input_val); | 69 | hdw->input_val); |
68 | if ((sid < ARRAY_SIZE(routing_schemes)) && | 70 | sp = (sid < ARRAY_SIZE(routing_schemes)) ? |
69 | ((sp = routing_schemes + sid) != NULL) && | 71 | routing_schemes[sid] : NULL; |
70 | (hdw->input_val >= 0) && | 72 | if ((sp == NULL) || |
71 | (hdw->input_val < sp->cnt)) { | 73 | (hdw->input_val < 0) || |
72 | input = sp->def[hdw->input_val]; | 74 | (hdw->input_val >= sp->cnt)) { |
73 | } else { | ||
74 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | 75 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, |
75 | "*** WARNING *** subdev v4l2 set_input:" | 76 | "*** WARNING *** subdev v4l2 set_input:" |
76 | " Invalid routing scheme (%u)" | 77 | " Invalid routing scheme (%u)" |
@@ -78,6 +79,7 @@ void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) | |||
78 | sid, hdw->input_val); | 79 | sid, hdw->input_val); |
79 | return; | 80 | return; |
80 | } | 81 | } |
82 | input = sp->def[hdw->input_val]; | ||
81 | sd->ops->audio->s_routing(sd, input, 0, 0); | 83 | sd->ops->audio->s_routing(sd, input, 0, 0); |
82 | } | 84 | } |
83 | } | 85 | } |
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c index 05e52358ae49..82c135835753 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c +++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c | |||
@@ -68,6 +68,11 @@ static const struct routing_scheme_item routing_scheme0[] = { | |||
68 | }, | 68 | }, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static const struct routing_scheme routing_def0 = { | ||
72 | .def = routing_scheme0, | ||
73 | .cnt = ARRAY_SIZE(routing_scheme0), | ||
74 | }; | ||
75 | |||
71 | /* Specific to gotview device */ | 76 | /* Specific to gotview device */ |
72 | static const struct routing_scheme_item routing_schemegv[] = { | 77 | static const struct routing_scheme_item routing_schemegv[] = { |
73 | [PVR2_CVAL_INPUT_TV] = { | 78 | [PVR2_CVAL_INPUT_TV] = { |
@@ -90,15 +95,14 @@ static const struct routing_scheme_item routing_schemegv[] = { | |||
90 | }, | 95 | }, |
91 | }; | 96 | }; |
92 | 97 | ||
93 | static const struct routing_scheme routing_schemes[] = { | 98 | static const struct routing_scheme routing_defgv = { |
94 | [PVR2_ROUTING_SCHEME_HAUPPAUGE] = { | 99 | .def = routing_schemegv, |
95 | .def = routing_scheme0, | 100 | .cnt = ARRAY_SIZE(routing_schemegv), |
96 | .cnt = ARRAY_SIZE(routing_scheme0), | 101 | }; |
97 | }, | 102 | |
98 | [PVR2_ROUTING_SCHEME_GOTVIEW] = { | 103 | static const struct routing_scheme *routing_schemes[] = { |
99 | .def = routing_schemegv, | 104 | [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0, |
100 | .cnt = ARRAY_SIZE(routing_schemegv), | 105 | [PVR2_ROUTING_SCHEME_GOTVIEW] = &routing_defgv, |
101 | }, | ||
102 | }; | 106 | }; |
103 | 107 | ||
104 | void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) | 108 | void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) |
@@ -110,13 +114,11 @@ void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) | |||
110 | const struct routing_scheme *sp; | 114 | const struct routing_scheme *sp; |
111 | unsigned int sid = hdw->hdw_desc->signal_routing_scheme; | 115 | unsigned int sid = hdw->hdw_desc->signal_routing_scheme; |
112 | 116 | ||
113 | if ((sid < ARRAY_SIZE(routing_schemes)) && | 117 | sp = (sid < ARRAY_SIZE(routing_schemes)) ? |
114 | ((sp = routing_schemes + sid) != NULL) && | 118 | routing_schemes[sid] : NULL; |
115 | (hdw->input_val >= 0) && | 119 | if ((sp == NULL) || |
116 | (hdw->input_val < sp->cnt)) { | 120 | (hdw->input_val < 0) || |
117 | vid_input = sp->def[hdw->input_val].vid; | 121 | (hdw->input_val >= sp->cnt)) { |
118 | aud_input = sp->def[hdw->input_val].aud; | ||
119 | } else { | ||
120 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | 122 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, |
121 | "*** WARNING *** subdev cx2584x set_input:" | 123 | "*** WARNING *** subdev cx2584x set_input:" |
122 | " Invalid routing scheme (%u)" | 124 | " Invalid routing scheme (%u)" |
@@ -124,7 +126,8 @@ void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) | |||
124 | sid, hdw->input_val); | 126 | sid, hdw->input_val); |
125 | return; | 127 | return; |
126 | } | 128 | } |
127 | 129 | vid_input = sp->def[hdw->input_val].vid; | |
130 | aud_input = sp->def[hdw->input_val].aud; | ||
128 | pvr2_trace(PVR2_TRACE_CHIPS, | 131 | pvr2_trace(PVR2_TRACE_CHIPS, |
129 | "subdev cx2584x set_input vid=0x%x aud=0x%x", | 132 | "subdev cx2584x set_input vid=0x%x aud=0x%x", |
130 | vid_input, aud_input); | 133 | vid_input, aud_input); |
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c index 0c745b142fb7..cbc388729d77 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c | |||
@@ -85,8 +85,8 @@ MODULE_PARM_DESC(video_std,"specify initial video standard"); | |||
85 | module_param_array(tolerance, int, NULL, 0444); | 85 | module_param_array(tolerance, int, NULL, 0444); |
86 | MODULE_PARM_DESC(tolerance,"specify stream error tolerance"); | 86 | MODULE_PARM_DESC(tolerance,"specify stream error tolerance"); |
87 | 87 | ||
88 | /* US Broadcast channel 7 (175.25 MHz) */ | 88 | /* US Broadcast channel 3 (61.25 MHz), to help with testing */ |
89 | static int default_tv_freq = 175250000L; | 89 | static int default_tv_freq = 61250000L; |
90 | /* 104.3 MHz, a usable FM station for my area */ | 90 | /* 104.3 MHz, a usable FM station for my area */ |
91 | static int default_radio_freq = 104300000L; | 91 | static int default_radio_freq = 104300000L; |
92 | 92 | ||
@@ -1987,6 +1987,34 @@ static unsigned int pvr2_copy_i2c_addr_list( | |||
1987 | } | 1987 | } |
1988 | 1988 | ||
1989 | 1989 | ||
1990 | static void pvr2_hdw_cx25840_vbi_hack(struct pvr2_hdw *hdw) | ||
1991 | { | ||
1992 | /* | ||
1993 | Mike Isely <isely@pobox.com> 19-Nov-2006 - This bit of nuttiness | ||
1994 | for cx25840 causes that module to correctly set up its video | ||
1995 | scaling. This is really a problem in the cx25840 module itself, | ||
1996 | but we work around it here. The problem has not been seen in | ||
1997 | ivtv because there VBI is supported and set up. We don't do VBI | ||
1998 | here (at least not yet) and thus we never attempted to even set | ||
1999 | it up. | ||
2000 | */ | ||
2001 | struct v4l2_format fmt; | ||
2002 | if (hdw->decoder_client_id != PVR2_CLIENT_ID_CX25840) { | ||
2003 | /* We're not using a cx25840 so don't enable the hack */ | ||
2004 | return; | ||
2005 | } | ||
2006 | |||
2007 | pvr2_trace(PVR2_TRACE_INIT, | ||
2008 | "Module ID %u:" | ||
2009 | " Executing cx25840 VBI hack", | ||
2010 | hdw->decoder_client_id); | ||
2011 | memset(&fmt, 0, sizeof(fmt)); | ||
2012 | fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; | ||
2013 | v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id, | ||
2014 | video, s_fmt, &fmt); | ||
2015 | } | ||
2016 | |||
2017 | |||
1990 | static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw, | 2018 | static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw, |
1991 | const struct pvr2_device_client_desc *cd) | 2019 | const struct pvr2_device_client_desc *cd) |
1992 | { | 2020 | { |
@@ -2078,30 +2106,6 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw, | |||
2078 | /* client-specific setup... */ | 2106 | /* client-specific setup... */ |
2079 | switch (mid) { | 2107 | switch (mid) { |
2080 | case PVR2_CLIENT_ID_CX25840: | 2108 | case PVR2_CLIENT_ID_CX25840: |
2081 | hdw->decoder_client_id = mid; | ||
2082 | { | ||
2083 | /* | ||
2084 | Mike Isely <isely@pobox.com> 19-Nov-2006 - This | ||
2085 | bit of nuttiness for cx25840 causes that module | ||
2086 | to correctly set up its video scaling. This is | ||
2087 | really a problem in the cx25840 module itself, | ||
2088 | but we work around it here. The problem has not | ||
2089 | been seen in ivtv because there VBI is supported | ||
2090 | and set up. We don't do VBI here (at least not | ||
2091 | yet) and thus we never attempted to even set it | ||
2092 | up. | ||
2093 | */ | ||
2094 | struct v4l2_format fmt; | ||
2095 | pvr2_trace(PVR2_TRACE_INIT, | ||
2096 | "Module ID %u:" | ||
2097 | " Executing cx25840 VBI hack", | ||
2098 | mid); | ||
2099 | memset(&fmt, 0, sizeof(fmt)); | ||
2100 | fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; | ||
2101 | v4l2_device_call_all(&hdw->v4l2_dev, mid, | ||
2102 | video, s_fmt, &fmt); | ||
2103 | } | ||
2104 | break; | ||
2105 | case PVR2_CLIENT_ID_SAA7115: | 2109 | case PVR2_CLIENT_ID_SAA7115: |
2106 | hdw->decoder_client_id = mid; | 2110 | hdw->decoder_client_id = mid; |
2107 | break; | 2111 | break; |
@@ -2202,6 +2206,8 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw) | |||
2202 | cptr->info->set_value(cptr,~0,cptr->info->default_value); | 2206 | cptr->info->set_value(cptr,~0,cptr->info->default_value); |
2203 | } | 2207 | } |
2204 | 2208 | ||
2209 | pvr2_hdw_cx25840_vbi_hack(hdw); | ||
2210 | |||
2205 | /* Set up special default values for the television and radio | 2211 | /* Set up special default values for the television and radio |
2206 | frequencies here. It's not really important what these defaults | 2212 | frequencies here. It's not really important what these defaults |
2207 | are, but I set them to something usable in the Chicago area just | 2213 | are, but I set them to something usable in the Chicago area just |
@@ -2954,6 +2960,7 @@ static void pvr2_subdev_update(struct pvr2_hdw *hdw) | |||
2954 | vs = hdw->std_mask_cur; | 2960 | vs = hdw->std_mask_cur; |
2955 | v4l2_device_call_all(&hdw->v4l2_dev, 0, | 2961 | v4l2_device_call_all(&hdw->v4l2_dev, 0, |
2956 | core, s_std, vs); | 2962 | core, s_std, vs); |
2963 | pvr2_hdw_cx25840_vbi_hack(hdw); | ||
2957 | } | 2964 | } |
2958 | hdw->tuner_signal_stale = !0; | 2965 | hdw->tuner_signal_stale = !0; |
2959 | hdw->cropcap_stale = !0; | 2966 | hdw->cropcap_stale = !0; |
@@ -4076,6 +4083,7 @@ int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *hdw) | |||
4076 | if (hdw->decoder_client_id) { | 4083 | if (hdw->decoder_client_id) { |
4077 | v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id, | 4084 | v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id, |
4078 | core, reset, 0); | 4085 | core, reset, 0); |
4086 | pvr2_hdw_cx25840_vbi_hack(hdw); | ||
4079 | return 0; | 4087 | return 0; |
4080 | } | 4088 | } |
4081 | pvr2_trace(PVR2_TRACE_INIT, | 4089 | pvr2_trace(PVR2_TRACE_INIT, |
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c index d2fe7c8f2c3a..4c96cf48c796 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c +++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c | |||
@@ -54,6 +54,11 @@ static const int routing_scheme0[] = { | |||
54 | [PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2, | 54 | [PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2, |
55 | }; | 55 | }; |
56 | 56 | ||
57 | static const struct routing_scheme routing_def0 = { | ||
58 | .def = routing_scheme0, | ||
59 | .cnt = ARRAY_SIZE(routing_scheme0), | ||
60 | }; | ||
61 | |||
57 | static const int routing_scheme1[] = { | 62 | static const int routing_scheme1[] = { |
58 | [PVR2_CVAL_INPUT_TV] = SAA7115_COMPOSITE4, | 63 | [PVR2_CVAL_INPUT_TV] = SAA7115_COMPOSITE4, |
59 | [PVR2_CVAL_INPUT_RADIO] = SAA7115_COMPOSITE5, | 64 | [PVR2_CVAL_INPUT_RADIO] = SAA7115_COMPOSITE5, |
@@ -61,15 +66,14 @@ static const int routing_scheme1[] = { | |||
61 | [PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2, /* or SVIDEO0, it seems */ | 66 | [PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2, /* or SVIDEO0, it seems */ |
62 | }; | 67 | }; |
63 | 68 | ||
64 | static const struct routing_scheme routing_schemes[] = { | 69 | static const struct routing_scheme routing_def1 = { |
65 | [PVR2_ROUTING_SCHEME_HAUPPAUGE] = { | 70 | .def = routing_scheme1, |
66 | .def = routing_scheme0, | 71 | .cnt = ARRAY_SIZE(routing_scheme1), |
67 | .cnt = ARRAY_SIZE(routing_scheme0), | 72 | }; |
68 | }, | 73 | |
69 | [PVR2_ROUTING_SCHEME_ONAIR] = { | 74 | static const struct routing_scheme *routing_schemes[] = { |
70 | .def = routing_scheme1, | 75 | [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0, |
71 | .cnt = ARRAY_SIZE(routing_scheme1), | 76 | [PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1, |
72 | }, | ||
73 | }; | 77 | }; |
74 | 78 | ||
75 | void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) | 79 | void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) |
@@ -81,12 +85,12 @@ void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) | |||
81 | 85 | ||
82 | pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)", | 86 | pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)", |
83 | hdw->input_val); | 87 | hdw->input_val); |
84 | if ((sid < ARRAY_SIZE(routing_schemes)) && | 88 | |
85 | ((sp = routing_schemes + sid) != NULL) && | 89 | sp = (sid < ARRAY_SIZE(routing_schemes)) ? |
86 | (hdw->input_val >= 0) && | 90 | routing_schemes[sid] : NULL; |
87 | (hdw->input_val < sp->cnt)) { | 91 | if ((sp == NULL) || |
88 | input = sp->def[hdw->input_val]; | 92 | (hdw->input_val < 0) || |
89 | } else { | 93 | (hdw->input_val >= sp->cnt)) { |
90 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | 94 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, |
91 | "*** WARNING *** subdev v4l2 set_input:" | 95 | "*** WARNING *** subdev v4l2 set_input:" |
92 | " Invalid routing scheme (%u)" | 96 | " Invalid routing scheme (%u)" |
@@ -94,6 +98,7 @@ void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd) | |||
94 | sid, hdw->input_val); | 98 | sid, hdw->input_val); |
95 | return; | 99 | return; |
96 | } | 100 | } |
101 | input = sp->def[hdw->input_val]; | ||
97 | sd->ops->video->s_routing(sd, input, 0, 0); | 102 | sd->ops->video->s_routing(sd, input, 0, 0); |
98 | } | 103 | } |
99 | } | 104 | } |
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index f60de40fd21f..46e0d8ad880f 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c | |||
@@ -163,13 +163,6 @@ | |||
163 | CICR0_EOFM | CICR0_FOM) | 163 | CICR0_EOFM | CICR0_FOM) |
164 | 164 | ||
165 | /* | 165 | /* |
166 | * YUV422P picture size should be a multiple of 16, so the heuristic aligns | ||
167 | * height, width on 4 byte boundaries to reach the 16 multiple for the size. | ||
168 | */ | ||
169 | #define YUV422P_X_Y_ALIGN 4 | ||
170 | #define YUV422P_SIZE_ALIGN YUV422P_X_Y_ALIGN * YUV422P_X_Y_ALIGN | ||
171 | |||
172 | /* | ||
173 | * Structures | 166 | * Structures |
174 | */ | 167 | */ |
175 | enum pxa_camera_active_dma { | 168 | enum pxa_camera_active_dma { |
@@ -1398,28 +1391,15 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd, | |||
1398 | return -EINVAL; | 1391 | return -EINVAL; |
1399 | } | 1392 | } |
1400 | 1393 | ||
1401 | /* limit to pxa hardware capabilities */ | ||
1402 | if (pix->height < 32) | ||
1403 | pix->height = 32; | ||
1404 | if (pix->height > 2048) | ||
1405 | pix->height = 2048; | ||
1406 | if (pix->width < 48) | ||
1407 | pix->width = 48; | ||
1408 | if (pix->width > 2048) | ||
1409 | pix->width = 2048; | ||
1410 | pix->width &= ~0x01; | ||
1411 | |||
1412 | /* | 1394 | /* |
1413 | * YUV422P planar format requires images size to be a 16 bytes | 1395 | * Limit to pxa hardware capabilities. YUV422P planar format requires |
1414 | * multiple. If not, zeros will be inserted between Y and U planes, and | 1396 | * images size to be a multiple of 16 bytes. If not, zeros will be |
1415 | * U and V planes, and YUV422P standard would be violated. | 1397 | * inserted between Y and U planes, and U and V planes, which violates |
1398 | * the YUV422P standard. | ||
1416 | */ | 1399 | */ |
1417 | if (xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUV422P) { | 1400 | v4l_bound_align_image(&pix->width, 48, 2048, 1, |
1418 | if (!IS_ALIGNED(pix->width * pix->height, YUV422P_SIZE_ALIGN)) | 1401 | &pix->height, 32, 2048, 0, |
1419 | pix->height = ALIGN(pix->height, YUV422P_X_Y_ALIGN); | 1402 | xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0); |
1420 | if (!IS_ALIGNED(pix->width * pix->height, YUV422P_SIZE_ALIGN)) | ||
1421 | pix->width = ALIGN(pix->width, YUV422P_X_Y_ALIGN); | ||
1422 | } | ||
1423 | 1403 | ||
1424 | pix->bytesperline = pix->width * | 1404 | pix->bytesperline = pix->width * |
1425 | DIV_ROUND_UP(xlate->host_fmt->depth, 8); | 1405 | DIV_ROUND_UP(xlate->host_fmt->depth, 8); |
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c index e305c1674cee..ba87128542e0 100644 --- a/drivers/media/video/saa7134/saa7134-video.c +++ b/drivers/media/video/saa7134/saa7134-video.c | |||
@@ -1640,15 +1640,8 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv, | |||
1640 | } | 1640 | } |
1641 | 1641 | ||
1642 | f->fmt.pix.field = field; | 1642 | f->fmt.pix.field = field; |
1643 | if (f->fmt.pix.width < 48) | 1643 | v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2, |
1644 | f->fmt.pix.width = 48; | 1644 | &f->fmt.pix.height, 32, maxh, 0, 0); |
1645 | if (f->fmt.pix.height < 32) | ||
1646 | f->fmt.pix.height = 32; | ||
1647 | if (f->fmt.pix.width > maxw) | ||
1648 | f->fmt.pix.width = maxw; | ||
1649 | if (f->fmt.pix.height > maxh) | ||
1650 | f->fmt.pix.height = maxh; | ||
1651 | f->fmt.pix.width &= ~0x03; | ||
1652 | f->fmt.pix.bytesperline = | 1645 | f->fmt.pix.bytesperline = |
1653 | (f->fmt.pix.width * fmt->depth) >> 3; | 1646 | (f->fmt.pix.width * fmt->depth) >> 3; |
1654 | f->fmt.pix.sizeimage = | 1647 | f->fmt.pix.sizeimage = |
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index d369e8409ab8..0db88a53d92c 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c | |||
@@ -689,16 +689,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, | |||
689 | 689 | ||
690 | /* FIXME: calculate using depth and bus width */ | 690 | /* FIXME: calculate using depth and bus width */ |
691 | 691 | ||
692 | if (f->fmt.pix.height < 4) | 692 | v4l_bound_align_image(&f->fmt.pix.width, 2, 2560, 1, |
693 | f->fmt.pix.height = 4; | 693 | &f->fmt.pix.height, 4, 1920, 2, 0); |
694 | if (f->fmt.pix.height > 1920) | ||
695 | f->fmt.pix.height = 1920; | ||
696 | if (f->fmt.pix.width < 2) | ||
697 | f->fmt.pix.width = 2; | ||
698 | if (f->fmt.pix.width > 2560) | ||
699 | f->fmt.pix.width = 2560; | ||
700 | f->fmt.pix.width &= ~0x01; | ||
701 | f->fmt.pix.height &= ~0x03; | ||
702 | 694 | ||
703 | f->fmt.pix.bytesperline = f->fmt.pix.width * | 695 | f->fmt.pix.bytesperline = f->fmt.pix.width * |
704 | DIV_ROUND_UP(xlate->host_fmt->depth, 8); | 696 | DIV_ROUND_UP(xlate->host_fmt->depth, 8); |
diff --git a/drivers/media/video/tcm825x.c b/drivers/media/video/tcm825x.c index b30c49248217..b90e9da3167d 100644 --- a/drivers/media/video/tcm825x.c +++ b/drivers/media/video/tcm825x.c | |||
@@ -878,7 +878,7 @@ static int tcm825x_probe(struct i2c_client *client, | |||
878 | return rval; | 878 | return rval; |
879 | } | 879 | } |
880 | 880 | ||
881 | static int __exit tcm825x_remove(struct i2c_client *client) | 881 | static int tcm825x_remove(struct i2c_client *client) |
882 | { | 882 | { |
883 | struct tcm825x_sensor *sensor = i2c_get_clientdata(client); | 883 | struct tcm825x_sensor *sensor = i2c_get_clientdata(client); |
884 | 884 | ||
@@ -902,7 +902,7 @@ static struct i2c_driver tcm825x_i2c_driver = { | |||
902 | .name = TCM825X_NAME, | 902 | .name = TCM825X_NAME, |
903 | }, | 903 | }, |
904 | .probe = tcm825x_probe, | 904 | .probe = tcm825x_probe, |
905 | .remove = __exit_p(tcm825x_remove), | 905 | .remove = tcm825x_remove, |
906 | .id_table = tcm825x_id, | 906 | .id_table = tcm825x_id, |
907 | }; | 907 | }; |
908 | 908 | ||
diff --git a/drivers/media/video/usbvideo/Kconfig b/drivers/media/video/usbvideo/Kconfig index e4cb99c1f94b..adb1c044ad7d 100644 --- a/drivers/media/video/usbvideo/Kconfig +++ b/drivers/media/video/usbvideo/Kconfig | |||
@@ -38,10 +38,13 @@ config USB_KONICAWC | |||
38 | module will be called konicawc. | 38 | module will be called konicawc. |
39 | 39 | ||
40 | config USB_QUICKCAM_MESSENGER | 40 | config USB_QUICKCAM_MESSENGER |
41 | tristate "USB Logitech Quickcam Messenger" | 41 | tristate "USB Logitech Quickcam Messenger (DEPRECATED)" |
42 | depends on VIDEO_V4L1 | 42 | depends on VIDEO_V4L1 |
43 | select VIDEO_USBVIDEO | 43 | select VIDEO_USBVIDEO |
44 | ---help--- | 44 | ---help--- |
45 | This driver is DEPRECATED please use the gspca stv06xx module | ||
46 | instead. | ||
47 | |||
45 | Say Y or M here to enable support for the USB Logitech Quickcam | 48 | Say Y or M here to enable support for the USB Logitech Quickcam |
46 | Messenger webcam. | 49 | Messenger webcam. |
47 | 50 | ||
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c index f96475626da7..b91d66a767d7 100644 --- a/drivers/media/video/v4l2-common.c +++ b/drivers/media/video/v4l2-common.c | |||
@@ -802,6 +802,17 @@ struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, | |||
802 | /* Decrease the module use count to match the first try_module_get. */ | 802 | /* Decrease the module use count to match the first try_module_get. */ |
803 | module_put(client->driver->driver.owner); | 803 | module_put(client->driver->driver.owner); |
804 | 804 | ||
805 | if (sd) { | ||
806 | /* We return errors from v4l2_subdev_call only if we have the | ||
807 | callback as the .s_config is not mandatory */ | ||
808 | int err = v4l2_subdev_call(sd, core, s_config, 0, NULL); | ||
809 | |||
810 | if (err && err != -ENOIOCTLCMD) { | ||
811 | v4l2_device_unregister_subdev(sd); | ||
812 | sd = NULL; | ||
813 | } | ||
814 | } | ||
815 | |||
805 | error: | 816 | error: |
806 | /* If we have a client but no subdev, then something went wrong and | 817 | /* If we have a client but no subdev, then something went wrong and |
807 | we must unregister the client. */ | 818 | we must unregister the client. */ |
@@ -852,6 +863,17 @@ struct v4l2_subdev *v4l2_i2c_new_probed_subdev(struct v4l2_device *v4l2_dev, | |||
852 | /* Decrease the module use count to match the first try_module_get. */ | 863 | /* Decrease the module use count to match the first try_module_get. */ |
853 | module_put(client->driver->driver.owner); | 864 | module_put(client->driver->driver.owner); |
854 | 865 | ||
866 | if (sd) { | ||
867 | /* We return errors from v4l2_subdev_call only if we have the | ||
868 | callback as the .s_config is not mandatory */ | ||
869 | int err = v4l2_subdev_call(sd, core, s_config, 0, NULL); | ||
870 | |||
871 | if (err && err != -ENOIOCTLCMD) { | ||
872 | v4l2_device_unregister_subdev(sd); | ||
873 | sd = NULL; | ||
874 | } | ||
875 | } | ||
876 | |||
855 | error: | 877 | error: |
856 | /* If we have a client but no subdev, then something went wrong and | 878 | /* If we have a client but no subdev, then something went wrong and |
857 | we must unregister the client. */ | 879 | we must unregister the client. */ |
@@ -872,6 +894,89 @@ struct v4l2_subdev *v4l2_i2c_new_probed_subdev_addr(struct v4l2_device *v4l2_dev | |||
872 | } | 894 | } |
873 | EXPORT_SYMBOL_GPL(v4l2_i2c_new_probed_subdev_addr); | 895 | EXPORT_SYMBOL_GPL(v4l2_i2c_new_probed_subdev_addr); |
874 | 896 | ||
897 | /* Load an i2c sub-device. */ | ||
898 | struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, | ||
899 | struct i2c_adapter *adapter, const char *module_name, | ||
900 | struct i2c_board_info *info, const unsigned short *probe_addrs) | ||
901 | { | ||
902 | struct v4l2_subdev *sd = NULL; | ||
903 | struct i2c_client *client; | ||
904 | |||
905 | BUG_ON(!v4l2_dev); | ||
906 | |||
907 | if (module_name) | ||
908 | request_module(module_name); | ||
909 | |||
910 | /* Create the i2c client */ | ||
911 | if (info->addr == 0 && probe_addrs) | ||
912 | client = i2c_new_probed_device(adapter, info, probe_addrs); | ||
913 | else | ||
914 | client = i2c_new_device(adapter, info); | ||
915 | |||
916 | /* Note: by loading the module first we are certain that c->driver | ||
917 | will be set if the driver was found. If the module was not loaded | ||
918 | first, then the i2c core tries to delay-load the module for us, | ||
919 | and then c->driver is still NULL until the module is finally | ||
920 | loaded. This delay-load mechanism doesn't work if other drivers | ||
921 | want to use the i2c device, so explicitly loading the module | ||
922 | is the best alternative. */ | ||
923 | if (client == NULL || client->driver == NULL) | ||
924 | goto error; | ||
925 | |||
926 | /* Lock the module so we can safely get the v4l2_subdev pointer */ | ||
927 | if (!try_module_get(client->driver->driver.owner)) | ||
928 | goto error; | ||
929 | sd = i2c_get_clientdata(client); | ||
930 | |||
931 | /* Register with the v4l2_device which increases the module's | ||
932 | use count as well. */ | ||
933 | if (v4l2_device_register_subdev(v4l2_dev, sd)) | ||
934 | sd = NULL; | ||
935 | /* Decrease the module use count to match the first try_module_get. */ | ||
936 | module_put(client->driver->driver.owner); | ||
937 | |||
938 | if (sd) { | ||
939 | /* We return errors from v4l2_subdev_call only if we have the | ||
940 | callback as the .s_config is not mandatory */ | ||
941 | int err = v4l2_subdev_call(sd, core, s_config, | ||
942 | info->irq, info->platform_data); | ||
943 | |||
944 | if (err && err != -ENOIOCTLCMD) { | ||
945 | v4l2_device_unregister_subdev(sd); | ||
946 | sd = NULL; | ||
947 | } | ||
948 | } | ||
949 | |||
950 | error: | ||
951 | /* If we have a client but no subdev, then something went wrong and | ||
952 | we must unregister the client. */ | ||
953 | if (client && sd == NULL) | ||
954 | i2c_unregister_device(client); | ||
955 | return sd; | ||
956 | } | ||
957 | EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board); | ||
958 | |||
959 | struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev, | ||
960 | struct i2c_adapter *adapter, | ||
961 | const char *module_name, const char *client_type, | ||
962 | int irq, void *platform_data, | ||
963 | u8 addr, const unsigned short *probe_addrs) | ||
964 | { | ||
965 | struct i2c_board_info info; | ||
966 | |||
967 | /* Setup the i2c board info with the device type and | ||
968 | the device address. */ | ||
969 | memset(&info, 0, sizeof(info)); | ||
970 | strlcpy(info.type, client_type, sizeof(info.type)); | ||
971 | info.addr = addr; | ||
972 | info.irq = irq; | ||
973 | info.platform_data = platform_data; | ||
974 | |||
975 | return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, module_name, | ||
976 | &info, probe_addrs); | ||
977 | } | ||
978 | EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg); | ||
979 | |||
875 | /* Return i2c client address of v4l2_subdev. */ | 980 | /* Return i2c client address of v4l2_subdev. */ |
876 | unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd) | 981 | unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd) |
877 | { | 982 | { |
@@ -916,4 +1021,78 @@ const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type) | |||
916 | } | 1021 | } |
917 | EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs); | 1022 | EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs); |
918 | 1023 | ||
919 | #endif | 1024 | #endif /* defined(CONFIG_I2C) */ |
1025 | |||
1026 | /* Clamp x to be between min and max, aligned to a multiple of 2^align. min | ||
1027 | * and max don't have to be aligned, but there must be at least one valid | ||
1028 | * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples | ||
1029 | * of 16 between 17 and 31. */ | ||
1030 | static unsigned int clamp_align(unsigned int x, unsigned int min, | ||
1031 | unsigned int max, unsigned int align) | ||
1032 | { | ||
1033 | /* Bits that must be zero to be aligned */ | ||
1034 | unsigned int mask = ~((1 << align) - 1); | ||
1035 | |||
1036 | /* Round to nearest aligned value */ | ||
1037 | if (align) | ||
1038 | x = (x + (1 << (align - 1))) & mask; | ||
1039 | |||
1040 | /* Clamp to aligned value of min and max */ | ||
1041 | if (x < min) | ||
1042 | x = (min + ~mask) & mask; | ||
1043 | else if (x > max) | ||
1044 | x = max & mask; | ||
1045 | |||
1046 | return x; | ||
1047 | } | ||
1048 | |||
1049 | /* Bound an image to have a width between wmin and wmax, and height between | ||
1050 | * hmin and hmax, inclusive. Additionally, the width will be a multiple of | ||
1051 | * 2^walign, the height will be a multiple of 2^halign, and the overall size | ||
1052 | * (width*height) will be a multiple of 2^salign. The image may be shrunk | ||
1053 | * or enlarged to fit the alignment constraints. | ||
1054 | * | ||
1055 | * The width or height maximum must not be smaller than the corresponding | ||
1056 | * minimum. The alignments must not be so high there are no possible image | ||
1057 | * sizes within the allowed bounds. wmin and hmin must be at least 1 | ||
1058 | * (don't use 0). If you don't care about a certain alignment, specify 0, | ||
1059 | * as 2^0 is 1 and one byte alignment is equivalent to no alignment. If | ||
1060 | * you only want to adjust downward, specify a maximum that's the same as | ||
1061 | * the initial value. | ||
1062 | */ | ||
1063 | void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax, | ||
1064 | unsigned int walign, | ||
1065 | u32 *h, unsigned int hmin, unsigned int hmax, | ||
1066 | unsigned int halign, unsigned int salign) | ||
1067 | { | ||
1068 | *w = clamp_align(*w, wmin, wmax, walign); | ||
1069 | *h = clamp_align(*h, hmin, hmax, halign); | ||
1070 | |||
1071 | /* Usually we don't need to align the size and are done now. */ | ||
1072 | if (!salign) | ||
1073 | return; | ||
1074 | |||
1075 | /* How much alignment do we have? */ | ||
1076 | walign = __ffs(*w); | ||
1077 | halign = __ffs(*h); | ||
1078 | /* Enough to satisfy the image alignment? */ | ||
1079 | if (walign + halign < salign) { | ||
1080 | /* Max walign where there is still a valid width */ | ||
1081 | unsigned int wmaxa = __fls(wmax ^ (wmin - 1)); | ||
1082 | /* Max halign where there is still a valid height */ | ||
1083 | unsigned int hmaxa = __fls(hmax ^ (hmin - 1)); | ||
1084 | |||
1085 | /* up the smaller alignment until we have enough */ | ||
1086 | do { | ||
1087 | if (halign >= hmaxa || | ||
1088 | (walign <= halign && walign < wmaxa)) { | ||
1089 | *w = clamp_align(*w, wmin, wmax, walign + 1); | ||
1090 | walign = __ffs(*w); | ||
1091 | } else { | ||
1092 | *h = clamp_align(*h, hmin, hmax, halign + 1); | ||
1093 | halign = __ffs(*h); | ||
1094 | } | ||
1095 | } while (halign + walign < salign); | ||
1096 | } | ||
1097 | } | ||
1098 | EXPORT_SYMBOL_GPL(v4l_bound_align_image); | ||
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c index fbfefae7886f..cd7266858462 100644 --- a/drivers/media/video/vivi.c +++ b/drivers/media/video/vivi.c | |||
@@ -883,15 +883,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, | |||
883 | maxh = norm_maxh(); | 883 | maxh = norm_maxh(); |
884 | 884 | ||
885 | f->fmt.pix.field = field; | 885 | f->fmt.pix.field = field; |
886 | if (f->fmt.pix.height < 32) | 886 | v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2, |
887 | f->fmt.pix.height = 32; | 887 | &f->fmt.pix.height, 32, maxh, 0, 0); |
888 | if (f->fmt.pix.height > maxh) | ||
889 | f->fmt.pix.height = maxh; | ||
890 | if (f->fmt.pix.width < 48) | ||
891 | f->fmt.pix.width = 48; | ||
892 | if (f->fmt.pix.width > maxw) | ||
893 | f->fmt.pix.width = maxw; | ||
894 | f->fmt.pix.width &= ~0x03; | ||
895 | f->fmt.pix.bytesperline = | 888 | f->fmt.pix.bytesperline = |
896 | (f->fmt.pix.width * fmt->depth) >> 3; | 889 | (f->fmt.pix.width * fmt->depth) >> 3; |
897 | f->fmt.pix.sizeimage = | 890 | f->fmt.pix.sizeimage = |
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c index f59b2bd07e89..6c3f23e31b5c 100644 --- a/drivers/media/video/w9968cf.c +++ b/drivers/media/video/w9968cf.c | |||
@@ -460,7 +460,7 @@ static int w9968cf_set_picture(struct w9968cf_device*, struct video_picture); | |||
460 | static int w9968cf_set_window(struct w9968cf_device*, struct video_window); | 460 | static int w9968cf_set_window(struct w9968cf_device*, struct video_window); |
461 | static int w9968cf_postprocess_frame(struct w9968cf_device*, | 461 | static int w9968cf_postprocess_frame(struct w9968cf_device*, |
462 | struct w9968cf_frame_t*); | 462 | struct w9968cf_frame_t*); |
463 | static int w9968cf_adjust_window_size(struct w9968cf_device*, u16* w, u16* h); | 463 | static int w9968cf_adjust_window_size(struct w9968cf_device*, u32 *w, u32 *h); |
464 | static void w9968cf_init_framelist(struct w9968cf_device*); | 464 | static void w9968cf_init_framelist(struct w9968cf_device*); |
465 | static void w9968cf_push_frame(struct w9968cf_device*, u8 f_num); | 465 | static void w9968cf_push_frame(struct w9968cf_device*, u8 f_num); |
466 | static void w9968cf_pop_frame(struct w9968cf_device*,struct w9968cf_frame_t**); | 466 | static void w9968cf_pop_frame(struct w9968cf_device*,struct w9968cf_frame_t**); |
@@ -1763,8 +1763,7 @@ w9968cf_set_window(struct w9968cf_device* cam, struct video_window win) | |||
1763 | #define UNSC(x) ((x) >> 10) | 1763 | #define UNSC(x) ((x) >> 10) |
1764 | 1764 | ||
1765 | /* Make sure we are using a supported resolution */ | 1765 | /* Make sure we are using a supported resolution */ |
1766 | if ((err = w9968cf_adjust_window_size(cam, (u16*)&win.width, | 1766 | if ((err = w9968cf_adjust_window_size(cam, &win.width, &win.height))) |
1767 | (u16*)&win.height))) | ||
1768 | goto error; | 1767 | goto error; |
1769 | 1768 | ||
1770 | /* Scaling factors */ | 1769 | /* Scaling factors */ |
@@ -1914,12 +1913,9 @@ error: | |||
1914 | Return 0 on success, -1 otherwise. | 1913 | Return 0 on success, -1 otherwise. |
1915 | --------------------------------------------------------------------------*/ | 1914 | --------------------------------------------------------------------------*/ |
1916 | static int | 1915 | static int |
1917 | w9968cf_adjust_window_size(struct w9968cf_device* cam, u16* width, u16* height) | 1916 | w9968cf_adjust_window_size(struct w9968cf_device *cam, u32 *width, u32 *height) |
1918 | { | 1917 | { |
1919 | u16 maxw, maxh; | 1918 | unsigned int maxw, maxh, align; |
1920 | |||
1921 | if ((*width < cam->minwidth) || (*height < cam->minheight)) | ||
1922 | return -ERANGE; | ||
1923 | 1919 | ||
1924 | maxw = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) && | 1920 | maxw = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) && |
1925 | w9968cf_vpp ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth) | 1921 | w9968cf_vpp ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth) |
@@ -1927,16 +1923,10 @@ w9968cf_adjust_window_size(struct w9968cf_device* cam, u16* width, u16* height) | |||
1927 | maxh = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) && | 1923 | maxh = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) && |
1928 | w9968cf_vpp ? max((u16)W9968CF_MAX_HEIGHT, cam->maxheight) | 1924 | w9968cf_vpp ? max((u16)W9968CF_MAX_HEIGHT, cam->maxheight) |
1929 | : cam->maxheight; | 1925 | : cam->maxheight; |
1926 | align = (cam->vpp_flag & VPP_DECOMPRESSION) ? 4 : 0; | ||
1930 | 1927 | ||
1931 | if (*width > maxw) | 1928 | v4l_bound_align_image(width, cam->minwidth, maxw, align, |
1932 | *width = maxw; | 1929 | height, cam->minheight, maxh, align, 0); |
1933 | if (*height > maxh) | ||
1934 | *height = maxh; | ||
1935 | |||
1936 | if (cam->vpp_flag & VPP_DECOMPRESSION) { | ||
1937 | *width &= ~15L; /* multiple of 16 */ | ||
1938 | *height &= ~15L; | ||
1939 | } | ||
1940 | 1930 | ||
1941 | PDBGG("Window size adjusted w=%u, h=%u ", *width, *height) | 1931 | PDBGG("Window size adjusted w=%u, h=%u ", *width, *height) |
1942 | 1932 | ||
@@ -3043,8 +3033,8 @@ static long w9968cf_v4l_ioctl(struct file *filp, | |||
3043 | if (win.clipcount != 0 || win.flags != 0) | 3033 | if (win.clipcount != 0 || win.flags != 0) |
3044 | return -EINVAL; | 3034 | return -EINVAL; |
3045 | 3035 | ||
3046 | if ((err = w9968cf_adjust_window_size(cam, (u16*)&win.width, | 3036 | if ((err = w9968cf_adjust_window_size(cam, &win.width, |
3047 | (u16*)&win.height))) { | 3037 | &win.height))) { |
3048 | DBG(4, "Resolution not supported (%ux%u). " | 3038 | DBG(4, "Resolution not supported (%ux%u). " |
3049 | "VIDIOCSWIN failed", win.width, win.height) | 3039 | "VIDIOCSWIN failed", win.width, win.height) |
3050 | return err; | 3040 | return err; |
@@ -3116,6 +3106,7 @@ static long w9968cf_v4l_ioctl(struct file *filp, | |||
3116 | { | 3106 | { |
3117 | struct video_mmap mmap; | 3107 | struct video_mmap mmap; |
3118 | struct w9968cf_frame_t* fr; | 3108 | struct w9968cf_frame_t* fr; |
3109 | u32 w, h; | ||
3119 | int err = 0; | 3110 | int err = 0; |
3120 | 3111 | ||
3121 | if (copy_from_user(&mmap, arg, sizeof(mmap))) | 3112 | if (copy_from_user(&mmap, arg, sizeof(mmap))) |
@@ -3164,8 +3155,10 @@ static long w9968cf_v4l_ioctl(struct file *filp, | |||
3164 | } | 3155 | } |
3165 | } | 3156 | } |
3166 | 3157 | ||
3167 | if ((err = w9968cf_adjust_window_size(cam, (u16*)&mmap.width, | 3158 | w = mmap.width; h = mmap.height; |
3168 | (u16*)&mmap.height))) { | 3159 | err = w9968cf_adjust_window_size(cam, &w, &h); |
3160 | mmap.width = w; mmap.height = h; | ||
3161 | if (err) { | ||
3169 | DBG(4, "Resolution not supported (%dx%d). " | 3162 | DBG(4, "Resolution not supported (%dx%d). " |
3170 | "VIDIOCMCAPTURE failed", | 3163 | "VIDIOCMCAPTURE failed", |
3171 | mmap.width, mmap.height) | 3164 | mmap.width, mmap.height) |
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c index 643cccaa1aab..3d7df32a3d87 100644 --- a/drivers/media/video/zoran/zoran_driver.c +++ b/drivers/media/video/zoran/zoran_driver.c | |||
@@ -2088,16 +2088,10 @@ static int zoran_try_fmt_vid_cap(struct file *file, void *__fh, | |||
2088 | return -EINVAL; | 2088 | return -EINVAL; |
2089 | } | 2089 | } |
2090 | 2090 | ||
2091 | bpp = (zoran_formats[i].depth + 7) / 8; | 2091 | bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8); |
2092 | fmt->fmt.pix.width &= ~((bpp == 2) ? 1 : 3); | 2092 | v4l_bound_align_image( |
2093 | if (fmt->fmt.pix.width > BUZ_MAX_WIDTH) | 2093 | &fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2, |
2094 | fmt->fmt.pix.width = BUZ_MAX_WIDTH; | 2094 | &fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0); |
2095 | if (fmt->fmt.pix.width < BUZ_MIN_WIDTH) | ||
2096 | fmt->fmt.pix.width = BUZ_MIN_WIDTH; | ||
2097 | if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT) | ||
2098 | fmt->fmt.pix.height = BUZ_MAX_HEIGHT; | ||
2099 | if (fmt->fmt.pix.height < BUZ_MIN_HEIGHT) | ||
2100 | fmt->fmt.pix.height = BUZ_MIN_HEIGHT; | ||
2101 | mutex_unlock(&zr->resource_lock); | 2095 | mutex_unlock(&zr->resource_lock); |
2102 | 2096 | ||
2103 | return 0; | 2097 | return 0; |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 20e0b447e8e8..55ff25244af4 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -3518,7 +3518,7 @@ retry_page: | |||
3518 | } else | 3518 | } else |
3519 | mptsas_volume_delete(ioc, sas_info->fw.id); | 3519 | mptsas_volume_delete(ioc, sas_info->fw.id); |
3520 | } | 3520 | } |
3521 | mutex_lock(&ioc->sas_device_info_mutex); | 3521 | mutex_unlock(&ioc->sas_device_info_mutex); |
3522 | 3522 | ||
3523 | /* expanders */ | 3523 | /* expanders */ |
3524 | mutex_lock(&ioc->sas_topology_mutex); | 3524 | mutex_lock(&ioc->sas_topology_mutex); |
@@ -3549,7 +3549,7 @@ retry_page: | |||
3549 | goto redo_expander_scan; | 3549 | goto redo_expander_scan; |
3550 | } | 3550 | } |
3551 | } | 3551 | } |
3552 | mutex_lock(&ioc->sas_topology_mutex); | 3552 | mutex_unlock(&ioc->sas_topology_mutex); |
3553 | } | 3553 | } |
3554 | 3554 | ||
3555 | /** | 3555 | /** |
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c index cd1008c19cd7..ca54996ffd0e 100644 --- a/drivers/mfd/twl4030-core.c +++ b/drivers/mfd/twl4030-core.c | |||
@@ -101,6 +101,12 @@ | |||
101 | #define twl_has_usb() false | 101 | #define twl_has_usb() false |
102 | #endif | 102 | #endif |
103 | 103 | ||
104 | #if defined(CONFIG_TWL4030_WATCHDOG) || \ | ||
105 | defined(CONFIG_TWL4030_WATCHDOG_MODULE) | ||
106 | #define twl_has_watchdog() true | ||
107 | #else | ||
108 | #define twl_has_watchdog() false | ||
109 | #endif | ||
104 | 110 | ||
105 | /* Triton Core internal information (BEGIN) */ | 111 | /* Triton Core internal information (BEGIN) */ |
106 | 112 | ||
@@ -526,6 +532,12 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) | |||
526 | usb_transceiver = child; | 532 | usb_transceiver = child; |
527 | } | 533 | } |
528 | 534 | ||
535 | if (twl_has_watchdog()) { | ||
536 | child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0); | ||
537 | if (IS_ERR(child)) | ||
538 | return PTR_ERR(child); | ||
539 | } | ||
540 | |||
529 | if (twl_has_regulator()) { | 541 | if (twl_has_regulator()) { |
530 | /* | 542 | /* |
531 | child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1); | 543 | child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 1dc721517e4c..c155bd3ec9f1 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1725,6 +1725,7 @@ config TLAN | |||
1725 | 1725 | ||
1726 | config KS8842 | 1726 | config KS8842 |
1727 | tristate "Micrel KSZ8842" | 1727 | tristate "Micrel KSZ8842" |
1728 | depends on HAS_IOMEM | ||
1728 | help | 1729 | help |
1729 | This platform driver is for Micrel KSZ8842 chip. | 1730 | This platform driver is for Micrel KSZ8842 chip. |
1730 | 1731 | ||
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 38f1c3375d7f..b70cc99962fc 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -6825,6 +6825,14 @@ bnx2_nway_reset(struct net_device *dev) | |||
6825 | return 0; | 6825 | return 0; |
6826 | } | 6826 | } |
6827 | 6827 | ||
6828 | static u32 | ||
6829 | bnx2_get_link(struct net_device *dev) | ||
6830 | { | ||
6831 | struct bnx2 *bp = netdev_priv(dev); | ||
6832 | |||
6833 | return bp->link_up; | ||
6834 | } | ||
6835 | |||
6828 | static int | 6836 | static int |
6829 | bnx2_get_eeprom_len(struct net_device *dev) | 6837 | bnx2_get_eeprom_len(struct net_device *dev) |
6830 | { | 6838 | { |
@@ -7392,7 +7400,7 @@ static const struct ethtool_ops bnx2_ethtool_ops = { | |||
7392 | .get_wol = bnx2_get_wol, | 7400 | .get_wol = bnx2_get_wol, |
7393 | .set_wol = bnx2_set_wol, | 7401 | .set_wol = bnx2_set_wol, |
7394 | .nway_reset = bnx2_nway_reset, | 7402 | .nway_reset = bnx2_nway_reset, |
7395 | .get_link = ethtool_op_get_link, | 7403 | .get_link = bnx2_get_link, |
7396 | .get_eeprom_len = bnx2_get_eeprom_len, | 7404 | .get_eeprom_len = bnx2_get_eeprom_len, |
7397 | .get_eeprom = bnx2_get_eeprom, | 7405 | .get_eeprom = bnx2_get_eeprom, |
7398 | .set_eeprom = bnx2_set_eeprom, | 7406 | .set_eeprom = bnx2_set_eeprom, |
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index d5e18812bf49..33821a81cbf8 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig | |||
@@ -36,7 +36,7 @@ config CAN_CALC_BITTIMING | |||
36 | If unsure, say Y. | 36 | If unsure, say Y. |
37 | 37 | ||
38 | config CAN_SJA1000 | 38 | config CAN_SJA1000 |
39 | depends on CAN_DEV | 39 | depends on CAN_DEV && HAS_IOMEM |
40 | tristate "Philips SJA1000" | 40 | tristate "Philips SJA1000" |
41 | ---help--- | 41 | ---help--- |
42 | Driver for the SJA1000 CAN controllers from Philips or NXP | 42 | Driver for the SJA1000 CAN controllers from Philips or NXP |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 44f77eb1180f..4d1515f45ba2 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -25,8 +25,6 @@ | |||
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/ethtool.h> | 26 | #include <linux/ethtool.h> |
27 | #include <linux/if_vlan.h> | 27 | #include <linux/if_vlan.h> |
28 | #include <linux/module.h> | ||
29 | |||
30 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 28 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
31 | #define BCM_VLAN 1 | 29 | #define BCM_VLAN 1 |
32 | #endif | 30 | #endif |
@@ -2521,9 +2519,9 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) | |||
2521 | struct cnic_dev *cdev; | 2519 | struct cnic_dev *cdev; |
2522 | struct cnic_local *cp; | 2520 | struct cnic_local *cp; |
2523 | struct cnic_eth_dev *ethdev = NULL; | 2521 | struct cnic_eth_dev *ethdev = NULL; |
2524 | struct cnic_eth_dev *(*probe)(void *) = NULL; | 2522 | struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; |
2525 | 2523 | ||
2526 | probe = __symbol_get("bnx2_cnic_probe"); | 2524 | probe = symbol_get(bnx2_cnic_probe); |
2527 | if (probe) { | 2525 | if (probe) { |
2528 | ethdev = (*probe)(dev); | 2526 | ethdev = (*probe)(dev); |
2529 | symbol_put_addr(probe); | 2527 | symbol_put_addr(probe); |
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index 06380963a34e..d1bce27ee99e 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h | |||
@@ -296,4 +296,6 @@ extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); | |||
296 | 296 | ||
297 | extern int cnic_unregister_driver(int ulp_type); | 297 | extern int cnic_unregister_driver(int ulp_type); |
298 | 298 | ||
299 | extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev); | ||
300 | |||
299 | #endif | 301 | #endif |
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 5887e4764d22..f96948be0a44 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -399,11 +399,14 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
399 | if (!mtts) | 399 | if (!mtts) |
400 | return -ENOMEM; | 400 | return -ENOMEM; |
401 | 401 | ||
402 | dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, | ||
403 | npages * sizeof (u64), DMA_TO_DEVICE); | ||
404 | |||
402 | for (i = 0; i < npages; ++i) | 405 | for (i = 0; i < npages; ++i) |
403 | mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | 406 | mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); |
404 | 407 | ||
405 | dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, | 408 | dma_sync_single_for_device(&dev->pdev->dev, dma_handle, |
406 | npages * sizeof (u64), DMA_TO_DEVICE); | 409 | npages * sizeof (u64), DMA_TO_DEVICE); |
407 | 410 | ||
408 | return 0; | 411 | return 0; |
409 | } | 412 | } |
@@ -547,11 +550,14 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list | |||
547 | /* Make sure MPT status is visible before writing MTT entries */ | 550 | /* Make sure MPT status is visible before writing MTT entries */ |
548 | wmb(); | 551 | wmb(); |
549 | 552 | ||
553 | dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, | ||
554 | npages * sizeof(u64), DMA_TO_DEVICE); | ||
555 | |||
550 | for (i = 0; i < npages; ++i) | 556 | for (i = 0; i < npages; ++i) |
551 | fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | 557 | fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); |
552 | 558 | ||
553 | dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, | 559 | dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, |
554 | npages * sizeof(u64), DMA_TO_DEVICE); | 560 | npages * sizeof(u64), DMA_TO_DEVICE); |
555 | 561 | ||
556 | fmr->mpt->key = cpu_to_be32(key); | 562 | fmr->mpt->key = cpu_to_be32(key); |
557 | fmr->mpt->lkey = cpu_to_be32(key); | 563 | fmr->mpt->lkey = cpu_to_be32(key); |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index bdb143d2b5c7..055bb61d6e77 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -944,28 +944,31 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) | |||
944 | u32 val = 0; | 944 | u32 val = 0; |
945 | int retries = 60; | 945 | int retries = 60; |
946 | 946 | ||
947 | if (!pegtune_val) { | 947 | if (pegtune_val) |
948 | do { | 948 | return 0; |
949 | val = NXRD32(adapter, CRB_CMDPEG_STATE); | ||
950 | 949 | ||
951 | if (val == PHAN_INITIALIZE_COMPLETE || | 950 | do { |
952 | val == PHAN_INITIALIZE_ACK) | 951 | val = NXRD32(adapter, CRB_CMDPEG_STATE); |
953 | return 0; | ||
954 | 952 | ||
955 | msleep(500); | 953 | switch (val) { |
954 | case PHAN_INITIALIZE_COMPLETE: | ||
955 | case PHAN_INITIALIZE_ACK: | ||
956 | return 0; | ||
957 | case PHAN_INITIALIZE_FAILED: | ||
958 | goto out_err; | ||
959 | default: | ||
960 | break; | ||
961 | } | ||
956 | 962 | ||
957 | } while (--retries); | 963 | msleep(500); |
958 | 964 | ||
959 | if (!retries) { | 965 | } while (--retries); |
960 | pegtune_val = NXRD32(adapter, | ||
961 | NETXEN_ROMUSB_GLB_PEGTUNE_DONE); | ||
962 | printk(KERN_WARNING "netxen_phantom_init: init failed, " | ||
963 | "pegtune_val=%x\n", pegtune_val); | ||
964 | return -1; | ||
965 | } | ||
966 | } | ||
967 | 966 | ||
968 | return 0; | 967 | NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); |
968 | |||
969 | out_err: | ||
970 | dev_warn(&adapter->pdev->dev, "firmware init failed\n"); | ||
971 | return -EIO; | ||
969 | } | 972 | } |
970 | 973 | ||
971 | static int | 974 | static int |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 71daa3d5f114..2919a2d12bf4 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -705,7 +705,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw) | |||
705 | first_driver = (adapter->ahw.pci_func == 0); | 705 | first_driver = (adapter->ahw.pci_func == 0); |
706 | 706 | ||
707 | if (!first_driver) | 707 | if (!first_driver) |
708 | return 0; | 708 | goto wait_init; |
709 | 709 | ||
710 | first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); | 710 | first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); |
711 | 711 | ||
@@ -752,6 +752,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw) | |||
752 | | (_NETXEN_NIC_LINUX_SUBVERSION); | 752 | | (_NETXEN_NIC_LINUX_SUBVERSION); |
753 | NXWR32(adapter, CRB_DRIVER_VERSION, val); | 753 | NXWR32(adapter, CRB_DRIVER_VERSION, val); |
754 | 754 | ||
755 | wait_init: | ||
755 | /* Handshake with the card before we register the devices. */ | 756 | /* Handshake with the card before we register the devices. */ |
756 | err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); | 757 | err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); |
757 | if (err) { | 758 | if (err) { |
@@ -1178,6 +1179,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) | |||
1178 | free_netdev(netdev); | 1179 | free_netdev(netdev); |
1179 | } | 1180 | } |
1180 | 1181 | ||
1182 | #ifdef CONFIG_PM | ||
1181 | static int | 1183 | static int |
1182 | netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) | 1184 | netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) |
1183 | { | 1185 | { |
@@ -1242,6 +1244,7 @@ netxen_nic_resume(struct pci_dev *pdev) | |||
1242 | 1244 | ||
1243 | return 0; | 1245 | return 0; |
1244 | } | 1246 | } |
1247 | #endif | ||
1245 | 1248 | ||
1246 | static int netxen_nic_open(struct net_device *netdev) | 1249 | static int netxen_nic_open(struct net_device *netdev) |
1247 | { | 1250 | { |
@@ -1771,8 +1774,10 @@ static struct pci_driver netxen_driver = { | |||
1771 | .id_table = netxen_pci_tbl, | 1774 | .id_table = netxen_pci_tbl, |
1772 | .probe = netxen_nic_probe, | 1775 | .probe = netxen_nic_probe, |
1773 | .remove = __devexit_p(netxen_nic_remove), | 1776 | .remove = __devexit_p(netxen_nic_remove), |
1777 | #ifdef CONFIG_PM | ||
1774 | .suspend = netxen_nic_suspend, | 1778 | .suspend = netxen_nic_suspend, |
1775 | .resume = netxen_nic_resume | 1779 | .resume = netxen_nic_resume |
1780 | #endif | ||
1776 | }; | 1781 | }; |
1777 | 1782 | ||
1778 | /* Driver Registration on NetXen card */ | 1783 | /* Driver Registration on NetXen card */ |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index bbc6d4d3cc94..3e4b67aaa6ea 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -3142,6 +3142,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3142 | (void __iomem *)port_regs; | 3142 | (void __iomem *)port_regs; |
3143 | u32 delay = 10; | 3143 | u32 delay = 10; |
3144 | int status = 0; | 3144 | int status = 0; |
3145 | unsigned long hw_flags = 0; | ||
3145 | 3146 | ||
3146 | if(ql_mii_setup(qdev)) | 3147 | if(ql_mii_setup(qdev)) |
3147 | return -1; | 3148 | return -1; |
@@ -3150,7 +3151,8 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3150 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 3151 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, |
3151 | (ISP_SERIAL_PORT_IF_WE | | 3152 | (ISP_SERIAL_PORT_IF_WE | |
3152 | (ISP_SERIAL_PORT_IF_WE << 16))); | 3153 | (ISP_SERIAL_PORT_IF_WE << 16))); |
3153 | 3154 | /* Give the PHY time to come out of reset. */ | |
3155 | mdelay(100); | ||
3154 | qdev->port_link_state = LS_DOWN; | 3156 | qdev->port_link_state = LS_DOWN; |
3155 | netif_carrier_off(qdev->ndev); | 3157 | netif_carrier_off(qdev->ndev); |
3156 | 3158 | ||
@@ -3350,7 +3352,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3350 | value = ql_read_page0_reg(qdev, &port_regs->portStatus); | 3352 | value = ql_read_page0_reg(qdev, &port_regs->portStatus); |
3351 | if (value & PORT_STATUS_IC) | 3353 | if (value & PORT_STATUS_IC) |
3352 | break; | 3354 | break; |
3355 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
3353 | msleep(500); | 3356 | msleep(500); |
3357 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
3354 | } while (--delay); | 3358 | } while (--delay); |
3355 | 3359 | ||
3356 | if (delay == 0) { | 3360 | if (delay == 0) { |
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index fbc63d5e459f..eb159587d0bf 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
@@ -354,7 +354,7 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, | |||
354 | status = acpi_run_hpp(handle, hpp); | 354 | status = acpi_run_hpp(handle, hpp); |
355 | if (ACPI_SUCCESS(status)) | 355 | if (ACPI_SUCCESS(status)) |
356 | break; | 356 | break; |
357 | if (acpi_root_bridge(handle)) | 357 | if (acpi_is_root_bridge(handle)) |
358 | break; | 358 | break; |
359 | status = acpi_get_parent(handle, &phandle); | 359 | status = acpi_get_parent(handle, &phandle); |
360 | if (ACPI_FAILURE(status)) | 360 | if (ACPI_FAILURE(status)) |
@@ -428,7 +428,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | |||
428 | status = acpi_run_oshp(handle); | 428 | status = acpi_run_oshp(handle); |
429 | if (ACPI_SUCCESS(status)) | 429 | if (ACPI_SUCCESS(status)) |
430 | goto got_one; | 430 | goto got_one; |
431 | if (acpi_root_bridge(handle)) | 431 | if (acpi_is_root_bridge(handle)) |
432 | break; | 432 | break; |
433 | chandle = handle; | 433 | chandle = handle; |
434 | status = acpi_get_parent(chandle, &handle); | 434 | status = acpi_get_parent(chandle, &handle); |
@@ -449,42 +449,6 @@ got_one: | |||
449 | } | 449 | } |
450 | EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); | 450 | EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); |
451 | 451 | ||
452 | /* acpi_root_bridge - check to see if this acpi object is a root bridge | ||
453 | * | ||
454 | * @handle - the acpi object in question. | ||
455 | */ | ||
456 | int acpi_root_bridge(acpi_handle handle) | ||
457 | { | ||
458 | acpi_status status; | ||
459 | struct acpi_device_info *info; | ||
460 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
461 | int i; | ||
462 | |||
463 | status = acpi_get_object_info(handle, &buffer); | ||
464 | if (ACPI_SUCCESS(status)) { | ||
465 | info = buffer.pointer; | ||
466 | if ((info->valid & ACPI_VALID_HID) && | ||
467 | !strcmp(PCI_ROOT_HID_STRING, | ||
468 | info->hardware_id.value)) { | ||
469 | kfree(buffer.pointer); | ||
470 | return 1; | ||
471 | } | ||
472 | if (info->valid & ACPI_VALID_CID) { | ||
473 | for (i=0; i < info->compatibility_id.count; i++) { | ||
474 | if (!strcmp(PCI_ROOT_HID_STRING, | ||
475 | info->compatibility_id.id[i].value)) { | ||
476 | kfree(buffer.pointer); | ||
477 | return 1; | ||
478 | } | ||
479 | } | ||
480 | } | ||
481 | kfree(buffer.pointer); | ||
482 | } | ||
483 | return 0; | ||
484 | } | ||
485 | EXPORT_SYMBOL_GPL(acpi_root_bridge); | ||
486 | |||
487 | |||
488 | static int is_ejectable(acpi_handle handle) | 452 | static int is_ejectable(acpi_handle handle) |
489 | { | 453 | { |
490 | acpi_status status; | 454 | acpi_status status; |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 3a6064bce561..0cb0f830a993 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -678,18 +678,9 @@ static void remove_bridge(acpi_handle handle) | |||
678 | 678 | ||
679 | static struct pci_dev * get_apic_pci_info(acpi_handle handle) | 679 | static struct pci_dev * get_apic_pci_info(acpi_handle handle) |
680 | { | 680 | { |
681 | struct acpi_pci_id id; | ||
682 | struct pci_bus *bus; | ||
683 | struct pci_dev *dev; | 681 | struct pci_dev *dev; |
684 | 682 | ||
685 | if (ACPI_FAILURE(acpi_get_pci_id(handle, &id))) | 683 | dev = acpi_get_pci_dev(handle); |
686 | return NULL; | ||
687 | |||
688 | bus = pci_find_bus(id.segment, id.bus); | ||
689 | if (!bus) | ||
690 | return NULL; | ||
691 | |||
692 | dev = pci_get_slot(bus, PCI_DEVFN(id.device, id.function)); | ||
693 | if (!dev) | 684 | if (!dev) |
694 | return NULL; | 685 | return NULL; |
695 | 686 | ||
@@ -1396,19 +1387,16 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus) | |||
1396 | /* Program resources in newly inserted bridge */ | 1387 | /* Program resources in newly inserted bridge */ |
1397 | static int acpiphp_configure_bridge (acpi_handle handle) | 1388 | static int acpiphp_configure_bridge (acpi_handle handle) |
1398 | { | 1389 | { |
1399 | struct acpi_pci_id pci_id; | 1390 | struct pci_dev *dev; |
1400 | struct pci_bus *bus; | 1391 | struct pci_bus *bus; |
1401 | 1392 | ||
1402 | if (ACPI_FAILURE(acpi_get_pci_id(handle, &pci_id))) { | 1393 | dev = acpi_get_pci_dev(handle); |
1394 | if (!dev) { | ||
1403 | err("cannot get PCI domain and bus number for bridge\n"); | 1395 | err("cannot get PCI domain and bus number for bridge\n"); |
1404 | return -EINVAL; | 1396 | return -EINVAL; |
1405 | } | 1397 | } |
1406 | bus = pci_find_bus(pci_id.segment, pci_id.bus); | 1398 | |
1407 | if (!bus) { | 1399 | bus = dev->bus; |
1408 | err("cannot find bus %d:%d\n", | ||
1409 | pci_id.segment, pci_id.bus); | ||
1410 | return -EINVAL; | ||
1411 | } | ||
1412 | 1400 | ||
1413 | pci_bus_size_bridges(bus); | 1401 | pci_bus_size_bridges(bus); |
1414 | pci_bus_assign_resources(bus); | 1402 | pci_bus_assign_resources(bus); |
@@ -1416,6 +1404,7 @@ static int acpiphp_configure_bridge (acpi_handle handle) | |||
1416 | acpiphp_set_hpp_values(handle, bus); | 1404 | acpiphp_set_hpp_values(handle, bus); |
1417 | pci_enable_bridges(bus); | 1405 | pci_enable_bridges(bus); |
1418 | acpiphp_configure_ioapics(handle); | 1406 | acpiphp_configure_ioapics(handle); |
1407 | pci_dev_put(dev); | ||
1419 | return 0; | 1408 | return 0; |
1420 | } | 1409 | } |
1421 | 1410 | ||
@@ -1631,7 +1620,7 @@ find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
1631 | { | 1620 | { |
1632 | int *count = (int *)context; | 1621 | int *count = (int *)context; |
1633 | 1622 | ||
1634 | if (acpi_root_bridge(handle)) { | 1623 | if (acpi_is_root_bridge(handle)) { |
1635 | acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, | 1624 | acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, |
1636 | handle_hotplug_event_bridge, NULL); | 1625 | handle_hotplug_event_bridge, NULL); |
1637 | (*count)++; | 1626 | (*count)++; |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 178853a07440..e53eacd75c8d 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/sysdev.h> | 39 | #include <linux/sysdev.h> |
40 | #include <asm/cacheflush.h> | 40 | #include <asm/cacheflush.h> |
41 | #include <asm/iommu.h> | 41 | #include <asm/iommu.h> |
42 | #include <asm/e820.h> | ||
42 | #include "pci.h" | 43 | #include "pci.h" |
43 | 44 | ||
44 | #define ROOT_SIZE VTD_PAGE_SIZE | 45 | #define ROOT_SIZE VTD_PAGE_SIZE |
@@ -217,6 +218,14 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
217 | return (pte->val & 3) != 0; | 218 | return (pte->val & 3) != 0; |
218 | } | 219 | } |
219 | 220 | ||
221 | /* | ||
222 | * This domain is a statically identity mapping domain. | ||
223 | * 1. This domain creats a static 1:1 mapping to all usable memory. | ||
224 | * 2. It maps to each iommu if successful. | ||
225 | * 3. Each iommu mapps to this domain if successful. | ||
226 | */ | ||
227 | struct dmar_domain *si_domain; | ||
228 | |||
220 | /* devices under the same p2p bridge are owned in one domain */ | 229 | /* devices under the same p2p bridge are owned in one domain */ |
221 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) | 230 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) |
222 | 231 | ||
@@ -225,6 +234,9 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
225 | */ | 234 | */ |
226 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) | 235 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) |
227 | 236 | ||
237 | /* si_domain contains mulitple devices */ | ||
238 | #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2) | ||
239 | |||
228 | struct dmar_domain { | 240 | struct dmar_domain { |
229 | int id; /* domain id */ | 241 | int id; /* domain id */ |
230 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ | 242 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ |
@@ -435,12 +447,14 @@ int iommu_calculate_agaw(struct intel_iommu *iommu) | |||
435 | return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); | 447 | return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
436 | } | 448 | } |
437 | 449 | ||
438 | /* in native case, each domain is related to only one iommu */ | 450 | /* This functionin only returns single iommu in a domain */ |
439 | static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) | 451 | static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) |
440 | { | 452 | { |
441 | int iommu_id; | 453 | int iommu_id; |
442 | 454 | ||
455 | /* si_domain and vm domain should not get here. */ | ||
443 | BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); | 456 | BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); |
457 | BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY); | ||
444 | 458 | ||
445 | iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | 459 | iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); |
446 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) | 460 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) |
@@ -1189,48 +1203,71 @@ void free_dmar_iommu(struct intel_iommu *iommu) | |||
1189 | free_context_table(iommu); | 1203 | free_context_table(iommu); |
1190 | } | 1204 | } |
1191 | 1205 | ||
1192 | static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | 1206 | static struct dmar_domain *alloc_domain(void) |
1193 | { | 1207 | { |
1194 | unsigned long num; | ||
1195 | unsigned long ndomains; | ||
1196 | struct dmar_domain *domain; | 1208 | struct dmar_domain *domain; |
1197 | unsigned long flags; | ||
1198 | 1209 | ||
1199 | domain = alloc_domain_mem(); | 1210 | domain = alloc_domain_mem(); |
1200 | if (!domain) | 1211 | if (!domain) |
1201 | return NULL; | 1212 | return NULL; |
1202 | 1213 | ||
1214 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); | ||
1215 | domain->flags = 0; | ||
1216 | |||
1217 | return domain; | ||
1218 | } | ||
1219 | |||
1220 | static int iommu_attach_domain(struct dmar_domain *domain, | ||
1221 | struct intel_iommu *iommu) | ||
1222 | { | ||
1223 | int num; | ||
1224 | unsigned long ndomains; | ||
1225 | unsigned long flags; | ||
1226 | |||
1203 | ndomains = cap_ndoms(iommu->cap); | 1227 | ndomains = cap_ndoms(iommu->cap); |
1204 | 1228 | ||
1205 | spin_lock_irqsave(&iommu->lock, flags); | 1229 | spin_lock_irqsave(&iommu->lock, flags); |
1230 | |||
1206 | num = find_first_zero_bit(iommu->domain_ids, ndomains); | 1231 | num = find_first_zero_bit(iommu->domain_ids, ndomains); |
1207 | if (num >= ndomains) { | 1232 | if (num >= ndomains) { |
1208 | spin_unlock_irqrestore(&iommu->lock, flags); | 1233 | spin_unlock_irqrestore(&iommu->lock, flags); |
1209 | free_domain_mem(domain); | ||
1210 | printk(KERN_ERR "IOMMU: no free domain ids\n"); | 1234 | printk(KERN_ERR "IOMMU: no free domain ids\n"); |
1211 | return NULL; | 1235 | return -ENOMEM; |
1212 | } | 1236 | } |
1213 | 1237 | ||
1214 | set_bit(num, iommu->domain_ids); | ||
1215 | domain->id = num; | 1238 | domain->id = num; |
1216 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); | 1239 | set_bit(num, iommu->domain_ids); |
1217 | set_bit(iommu->seq_id, &domain->iommu_bmp); | 1240 | set_bit(iommu->seq_id, &domain->iommu_bmp); |
1218 | domain->flags = 0; | ||
1219 | iommu->domains[num] = domain; | 1241 | iommu->domains[num] = domain; |
1220 | spin_unlock_irqrestore(&iommu->lock, flags); | 1242 | spin_unlock_irqrestore(&iommu->lock, flags); |
1221 | 1243 | ||
1222 | return domain; | 1244 | return 0; |
1223 | } | 1245 | } |
1224 | 1246 | ||
1225 | static void iommu_free_domain(struct dmar_domain *domain) | 1247 | static void iommu_detach_domain(struct dmar_domain *domain, |
1248 | struct intel_iommu *iommu) | ||
1226 | { | 1249 | { |
1227 | unsigned long flags; | 1250 | unsigned long flags; |
1228 | struct intel_iommu *iommu; | 1251 | int num, ndomains; |
1229 | 1252 | int found = 0; | |
1230 | iommu = domain_get_iommu(domain); | ||
1231 | 1253 | ||
1232 | spin_lock_irqsave(&iommu->lock, flags); | 1254 | spin_lock_irqsave(&iommu->lock, flags); |
1233 | clear_bit(domain->id, iommu->domain_ids); | 1255 | ndomains = cap_ndoms(iommu->cap); |
1256 | num = find_first_bit(iommu->domain_ids, ndomains); | ||
1257 | for (; num < ndomains; ) { | ||
1258 | if (iommu->domains[num] == domain) { | ||
1259 | found = 1; | ||
1260 | break; | ||
1261 | } | ||
1262 | num = find_next_bit(iommu->domain_ids, | ||
1263 | cap_ndoms(iommu->cap), num+1); | ||
1264 | } | ||
1265 | |||
1266 | if (found) { | ||
1267 | clear_bit(num, iommu->domain_ids); | ||
1268 | clear_bit(iommu->seq_id, &domain->iommu_bmp); | ||
1269 | iommu->domains[num] = NULL; | ||
1270 | } | ||
1234 | spin_unlock_irqrestore(&iommu->lock, flags); | 1271 | spin_unlock_irqrestore(&iommu->lock, flags); |
1235 | } | 1272 | } |
1236 | 1273 | ||
@@ -1350,6 +1387,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1350 | 1387 | ||
1351 | static void domain_exit(struct dmar_domain *domain) | 1388 | static void domain_exit(struct dmar_domain *domain) |
1352 | { | 1389 | { |
1390 | struct dmar_drhd_unit *drhd; | ||
1391 | struct intel_iommu *iommu; | ||
1353 | u64 end; | 1392 | u64 end; |
1354 | 1393 | ||
1355 | /* Domain 0 is reserved, so dont process it */ | 1394 | /* Domain 0 is reserved, so dont process it */ |
@@ -1368,7 +1407,10 @@ static void domain_exit(struct dmar_domain *domain) | |||
1368 | /* free page tables */ | 1407 | /* free page tables */ |
1369 | dma_pte_free_pagetable(domain, 0, end); | 1408 | dma_pte_free_pagetable(domain, 0, end); |
1370 | 1409 | ||
1371 | iommu_free_domain(domain); | 1410 | for_each_active_iommu(iommu, drhd) |
1411 | if (test_bit(iommu->seq_id, &domain->iommu_bmp)) | ||
1412 | iommu_detach_domain(domain, iommu); | ||
1413 | |||
1372 | free_domain_mem(domain); | 1414 | free_domain_mem(domain); |
1373 | } | 1415 | } |
1374 | 1416 | ||
@@ -1408,7 +1450,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1408 | id = domain->id; | 1450 | id = domain->id; |
1409 | pgd = domain->pgd; | 1451 | pgd = domain->pgd; |
1410 | 1452 | ||
1411 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { | 1453 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
1454 | domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) { | ||
1412 | int found = 0; | 1455 | int found = 0; |
1413 | 1456 | ||
1414 | /* find an available domain id for this device in iommu */ | 1457 | /* find an available domain id for this device in iommu */ |
@@ -1433,6 +1476,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1433 | } | 1476 | } |
1434 | 1477 | ||
1435 | set_bit(num, iommu->domain_ids); | 1478 | set_bit(num, iommu->domain_ids); |
1479 | set_bit(iommu->seq_id, &domain->iommu_bmp); | ||
1436 | iommu->domains[num] = domain; | 1480 | iommu->domains[num] = domain; |
1437 | id = num; | 1481 | id = num; |
1438 | } | 1482 | } |
@@ -1675,6 +1719,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1675 | unsigned long flags; | 1719 | unsigned long flags; |
1676 | int bus = 0, devfn = 0; | 1720 | int bus = 0, devfn = 0; |
1677 | int segment; | 1721 | int segment; |
1722 | int ret; | ||
1678 | 1723 | ||
1679 | domain = find_domain(pdev); | 1724 | domain = find_domain(pdev); |
1680 | if (domain) | 1725 | if (domain) |
@@ -1707,6 +1752,10 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1707 | } | 1752 | } |
1708 | } | 1753 | } |
1709 | 1754 | ||
1755 | domain = alloc_domain(); | ||
1756 | if (!domain) | ||
1757 | goto error; | ||
1758 | |||
1710 | /* Allocate new domain for the device */ | 1759 | /* Allocate new domain for the device */ |
1711 | drhd = dmar_find_matched_drhd_unit(pdev); | 1760 | drhd = dmar_find_matched_drhd_unit(pdev); |
1712 | if (!drhd) { | 1761 | if (!drhd) { |
@@ -1716,9 +1765,11 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1716 | } | 1765 | } |
1717 | iommu = drhd->iommu; | 1766 | iommu = drhd->iommu; |
1718 | 1767 | ||
1719 | domain = iommu_alloc_domain(iommu); | 1768 | ret = iommu_attach_domain(domain, iommu); |
1720 | if (!domain) | 1769 | if (ret) { |
1770 | domain_exit(domain); | ||
1721 | goto error; | 1771 | goto error; |
1772 | } | ||
1722 | 1773 | ||
1723 | if (domain_init(domain, gaw)) { | 1774 | if (domain_init(domain, gaw)) { |
1724 | domain_exit(domain); | 1775 | domain_exit(domain); |
@@ -1792,6 +1843,8 @@ error: | |||
1792 | return find_domain(pdev); | 1843 | return find_domain(pdev); |
1793 | } | 1844 | } |
1794 | 1845 | ||
1846 | static int iommu_identity_mapping; | ||
1847 | |||
1795 | static int iommu_prepare_identity_map(struct pci_dev *pdev, | 1848 | static int iommu_prepare_identity_map(struct pci_dev *pdev, |
1796 | unsigned long long start, | 1849 | unsigned long long start, |
1797 | unsigned long long end) | 1850 | unsigned long long end) |
@@ -1804,8 +1857,11 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, | |||
1804 | printk(KERN_INFO | 1857 | printk(KERN_INFO |
1805 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", | 1858 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", |
1806 | pci_name(pdev), start, end); | 1859 | pci_name(pdev), start, end); |
1807 | /* page table init */ | 1860 | if (iommu_identity_mapping) |
1808 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); | 1861 | domain = si_domain; |
1862 | else | ||
1863 | /* page table init */ | ||
1864 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); | ||
1809 | if (!domain) | 1865 | if (!domain) |
1810 | return -ENOMEM; | 1866 | return -ENOMEM; |
1811 | 1867 | ||
@@ -1952,7 +2008,110 @@ static int __init init_context_pass_through(void) | |||
1952 | return 0; | 2008 | return 0; |
1953 | } | 2009 | } |
1954 | 2010 | ||
1955 | static int __init init_dmars(void) | 2011 | static int md_domain_init(struct dmar_domain *domain, int guest_width); |
2012 | static int si_domain_init(void) | ||
2013 | { | ||
2014 | struct dmar_drhd_unit *drhd; | ||
2015 | struct intel_iommu *iommu; | ||
2016 | int ret = 0; | ||
2017 | |||
2018 | si_domain = alloc_domain(); | ||
2019 | if (!si_domain) | ||
2020 | return -EFAULT; | ||
2021 | |||
2022 | |||
2023 | for_each_active_iommu(iommu, drhd) { | ||
2024 | ret = iommu_attach_domain(si_domain, iommu); | ||
2025 | if (ret) { | ||
2026 | domain_exit(si_domain); | ||
2027 | return -EFAULT; | ||
2028 | } | ||
2029 | } | ||
2030 | |||
2031 | if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | ||
2032 | domain_exit(si_domain); | ||
2033 | return -EFAULT; | ||
2034 | } | ||
2035 | |||
2036 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; | ||
2037 | |||
2038 | return 0; | ||
2039 | } | ||
2040 | |||
2041 | static void domain_remove_one_dev_info(struct dmar_domain *domain, | ||
2042 | struct pci_dev *pdev); | ||
2043 | static int identity_mapping(struct pci_dev *pdev) | ||
2044 | { | ||
2045 | struct device_domain_info *info; | ||
2046 | |||
2047 | if (likely(!iommu_identity_mapping)) | ||
2048 | return 0; | ||
2049 | |||
2050 | |||
2051 | list_for_each_entry(info, &si_domain->devices, link) | ||
2052 | if (info->dev == pdev) | ||
2053 | return 1; | ||
2054 | return 0; | ||
2055 | } | ||
2056 | |||
2057 | static int domain_add_dev_info(struct dmar_domain *domain, | ||
2058 | struct pci_dev *pdev) | ||
2059 | { | ||
2060 | struct device_domain_info *info; | ||
2061 | unsigned long flags; | ||
2062 | |||
2063 | info = alloc_devinfo_mem(); | ||
2064 | if (!info) | ||
2065 | return -ENOMEM; | ||
2066 | |||
2067 | info->segment = pci_domain_nr(pdev->bus); | ||
2068 | info->bus = pdev->bus->number; | ||
2069 | info->devfn = pdev->devfn; | ||
2070 | info->dev = pdev; | ||
2071 | info->domain = domain; | ||
2072 | |||
2073 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2074 | list_add(&info->link, &domain->devices); | ||
2075 | list_add(&info->global, &device_domain_list); | ||
2076 | pdev->dev.archdata.iommu = info; | ||
2077 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2078 | |||
2079 | return 0; | ||
2080 | } | ||
2081 | |||
2082 | static int iommu_prepare_static_identity_mapping(void) | ||
2083 | { | ||
2084 | int i; | ||
2085 | struct pci_dev *pdev = NULL; | ||
2086 | int ret; | ||
2087 | |||
2088 | ret = si_domain_init(); | ||
2089 | if (ret) | ||
2090 | return -EFAULT; | ||
2091 | |||
2092 | printk(KERN_INFO "IOMMU: Setting identity map:\n"); | ||
2093 | for_each_pci_dev(pdev) { | ||
2094 | for (i = 0; i < e820.nr_map; i++) { | ||
2095 | struct e820entry *ei = &e820.map[i]; | ||
2096 | |||
2097 | if (ei->type == E820_RAM) { | ||
2098 | ret = iommu_prepare_identity_map(pdev, | ||
2099 | ei->addr, ei->addr + ei->size); | ||
2100 | if (ret) { | ||
2101 | printk(KERN_INFO "1:1 mapping to one domain failed.\n"); | ||
2102 | return -EFAULT; | ||
2103 | } | ||
2104 | } | ||
2105 | } | ||
2106 | ret = domain_add_dev_info(si_domain, pdev); | ||
2107 | if (ret) | ||
2108 | return ret; | ||
2109 | } | ||
2110 | |||
2111 | return 0; | ||
2112 | } | ||
2113 | |||
2114 | int __init init_dmars(void) | ||
1956 | { | 2115 | { |
1957 | struct dmar_drhd_unit *drhd; | 2116 | struct dmar_drhd_unit *drhd; |
1958 | struct dmar_rmrr_unit *rmrr; | 2117 | struct dmar_rmrr_unit *rmrr; |
@@ -1962,6 +2121,13 @@ static int __init init_dmars(void) | |||
1962 | int pass_through = 1; | 2121 | int pass_through = 1; |
1963 | 2122 | ||
1964 | /* | 2123 | /* |
2124 | * In case pass through can not be enabled, iommu tries to use identity | ||
2125 | * mapping. | ||
2126 | */ | ||
2127 | if (iommu_pass_through) | ||
2128 | iommu_identity_mapping = 1; | ||
2129 | |||
2130 | /* | ||
1965 | * for each drhd | 2131 | * for each drhd |
1966 | * allocate root | 2132 | * allocate root |
1967 | * initialize and program root entry to not present | 2133 | * initialize and program root entry to not present |
@@ -2090,9 +2256,12 @@ static int __init init_dmars(void) | |||
2090 | 2256 | ||
2091 | /* | 2257 | /* |
2092 | * If pass through is not set or not enabled, setup context entries for | 2258 | * If pass through is not set or not enabled, setup context entries for |
2093 | * identity mappings for rmrr, gfx, and isa. | 2259 | * identity mappings for rmrr, gfx, and isa and may fall back to static |
2260 | * identity mapping if iommu_identity_mapping is set. | ||
2094 | */ | 2261 | */ |
2095 | if (!iommu_pass_through) { | 2262 | if (!iommu_pass_through) { |
2263 | if (iommu_identity_mapping) | ||
2264 | iommu_prepare_static_identity_mapping(); | ||
2096 | /* | 2265 | /* |
2097 | * For each rmrr | 2266 | * For each rmrr |
2098 | * for each dev attached to rmrr | 2267 | * for each dev attached to rmrr |
@@ -2107,6 +2276,7 @@ static int __init init_dmars(void) | |||
2107 | * endfor | 2276 | * endfor |
2108 | * endfor | 2277 | * endfor |
2109 | */ | 2278 | */ |
2279 | printk(KERN_INFO "IOMMU: Setting RMRR:\n"); | ||
2110 | for_each_rmrr_units(rmrr) { | 2280 | for_each_rmrr_units(rmrr) { |
2111 | for (i = 0; i < rmrr->devices_cnt; i++) { | 2281 | for (i = 0; i < rmrr->devices_cnt; i++) { |
2112 | pdev = rmrr->devices[i]; | 2282 | pdev = rmrr->devices[i]; |
@@ -2248,6 +2418,52 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
2248 | return domain; | 2418 | return domain; |
2249 | } | 2419 | } |
2250 | 2420 | ||
2421 | static int iommu_dummy(struct pci_dev *pdev) | ||
2422 | { | ||
2423 | return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; | ||
2424 | } | ||
2425 | |||
2426 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ | ||
2427 | static int iommu_no_mapping(struct pci_dev *pdev) | ||
2428 | { | ||
2429 | int found; | ||
2430 | |||
2431 | if (!iommu_identity_mapping) | ||
2432 | return iommu_dummy(pdev); | ||
2433 | |||
2434 | found = identity_mapping(pdev); | ||
2435 | if (found) { | ||
2436 | if (pdev->dma_mask > DMA_BIT_MASK(32)) | ||
2437 | return 1; | ||
2438 | else { | ||
2439 | /* | ||
2440 | * 32 bit DMA is removed from si_domain and fall back | ||
2441 | * to non-identity mapping. | ||
2442 | */ | ||
2443 | domain_remove_one_dev_info(si_domain, pdev); | ||
2444 | printk(KERN_INFO "32bit %s uses non-identity mapping\n", | ||
2445 | pci_name(pdev)); | ||
2446 | return 0; | ||
2447 | } | ||
2448 | } else { | ||
2449 | /* | ||
2450 | * In case of a detached 64 bit DMA device from vm, the device | ||
2451 | * is put into si_domain for identity mapping. | ||
2452 | */ | ||
2453 | if (pdev->dma_mask > DMA_BIT_MASK(32)) { | ||
2454 | int ret; | ||
2455 | ret = domain_add_dev_info(si_domain, pdev); | ||
2456 | if (!ret) { | ||
2457 | printk(KERN_INFO "64bit %s uses identity mapping\n", | ||
2458 | pci_name(pdev)); | ||
2459 | return 1; | ||
2460 | } | ||
2461 | } | ||
2462 | } | ||
2463 | |||
2464 | return iommu_dummy(pdev); | ||
2465 | } | ||
2466 | |||
2251 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | 2467 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, |
2252 | size_t size, int dir, u64 dma_mask) | 2468 | size_t size, int dir, u64 dma_mask) |
2253 | { | 2469 | { |
@@ -2260,7 +2476,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2260 | struct intel_iommu *iommu; | 2476 | struct intel_iommu *iommu; |
2261 | 2477 | ||
2262 | BUG_ON(dir == DMA_NONE); | 2478 | BUG_ON(dir == DMA_NONE); |
2263 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2479 | |
2480 | if (iommu_no_mapping(pdev)) | ||
2264 | return paddr; | 2481 | return paddr; |
2265 | 2482 | ||
2266 | domain = get_valid_domain_for_dev(pdev); | 2483 | domain = get_valid_domain_for_dev(pdev); |
@@ -2401,8 +2618,9 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
2401 | struct iova *iova; | 2618 | struct iova *iova; |
2402 | struct intel_iommu *iommu; | 2619 | struct intel_iommu *iommu; |
2403 | 2620 | ||
2404 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2621 | if (iommu_no_mapping(pdev)) |
2405 | return; | 2622 | return; |
2623 | |||
2406 | domain = find_domain(pdev); | 2624 | domain = find_domain(pdev); |
2407 | BUG_ON(!domain); | 2625 | BUG_ON(!domain); |
2408 | 2626 | ||
@@ -2492,7 +2710,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2492 | struct scatterlist *sg; | 2710 | struct scatterlist *sg; |
2493 | struct intel_iommu *iommu; | 2711 | struct intel_iommu *iommu; |
2494 | 2712 | ||
2495 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2713 | if (iommu_no_mapping(pdev)) |
2496 | return; | 2714 | return; |
2497 | 2715 | ||
2498 | domain = find_domain(pdev); | 2716 | domain = find_domain(pdev); |
@@ -2553,7 +2771,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2553 | struct intel_iommu *iommu; | 2771 | struct intel_iommu *iommu; |
2554 | 2772 | ||
2555 | BUG_ON(dir == DMA_NONE); | 2773 | BUG_ON(dir == DMA_NONE); |
2556 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2774 | if (iommu_no_mapping(pdev)) |
2557 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); | 2775 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); |
2558 | 2776 | ||
2559 | domain = get_valid_domain_for_dev(pdev); | 2777 | domain = get_valid_domain_for_dev(pdev); |
@@ -2951,31 +3169,6 @@ int __init intel_iommu_init(void) | |||
2951 | return 0; | 3169 | return 0; |
2952 | } | 3170 | } |
2953 | 3171 | ||
2954 | static int vm_domain_add_dev_info(struct dmar_domain *domain, | ||
2955 | struct pci_dev *pdev) | ||
2956 | { | ||
2957 | struct device_domain_info *info; | ||
2958 | unsigned long flags; | ||
2959 | |||
2960 | info = alloc_devinfo_mem(); | ||
2961 | if (!info) | ||
2962 | return -ENOMEM; | ||
2963 | |||
2964 | info->segment = pci_domain_nr(pdev->bus); | ||
2965 | info->bus = pdev->bus->number; | ||
2966 | info->devfn = pdev->devfn; | ||
2967 | info->dev = pdev; | ||
2968 | info->domain = domain; | ||
2969 | |||
2970 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2971 | list_add(&info->link, &domain->devices); | ||
2972 | list_add(&info->global, &device_domain_list); | ||
2973 | pdev->dev.archdata.iommu = info; | ||
2974 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2975 | |||
2976 | return 0; | ||
2977 | } | ||
2978 | |||
2979 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | 3172 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, |
2980 | struct pci_dev *pdev) | 3173 | struct pci_dev *pdev) |
2981 | { | 3174 | { |
@@ -3003,7 +3196,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | |||
3003 | } | 3196 | } |
3004 | } | 3197 | } |
3005 | 3198 | ||
3006 | static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, | 3199 | static void domain_remove_one_dev_info(struct dmar_domain *domain, |
3007 | struct pci_dev *pdev) | 3200 | struct pci_dev *pdev) |
3008 | { | 3201 | { |
3009 | struct device_domain_info *info; | 3202 | struct device_domain_info *info; |
@@ -3136,7 +3329,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void) | |||
3136 | return domain; | 3329 | return domain; |
3137 | } | 3330 | } |
3138 | 3331 | ||
3139 | static int vm_domain_init(struct dmar_domain *domain, int guest_width) | 3332 | static int md_domain_init(struct dmar_domain *domain, int guest_width) |
3140 | { | 3333 | { |
3141 | int adjust_width; | 3334 | int adjust_width; |
3142 | 3335 | ||
@@ -3227,7 +3420,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) | |||
3227 | "intel_iommu_domain_init: dmar_domain == NULL\n"); | 3420 | "intel_iommu_domain_init: dmar_domain == NULL\n"); |
3228 | return -ENOMEM; | 3421 | return -ENOMEM; |
3229 | } | 3422 | } |
3230 | if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 3423 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
3231 | printk(KERN_ERR | 3424 | printk(KERN_ERR |
3232 | "intel_iommu_domain_init() failed\n"); | 3425 | "intel_iommu_domain_init() failed\n"); |
3233 | vm_domain_exit(dmar_domain); | 3426 | vm_domain_exit(dmar_domain); |
@@ -3262,8 +3455,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
3262 | 3455 | ||
3263 | old_domain = find_domain(pdev); | 3456 | old_domain = find_domain(pdev); |
3264 | if (old_domain) { | 3457 | if (old_domain) { |
3265 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) | 3458 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
3266 | vm_domain_remove_one_dev_info(old_domain, pdev); | 3459 | dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) |
3460 | domain_remove_one_dev_info(old_domain, pdev); | ||
3267 | else | 3461 | else |
3268 | domain_remove_dev_info(old_domain); | 3462 | domain_remove_dev_info(old_domain); |
3269 | } | 3463 | } |
@@ -3285,7 +3479,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
3285 | return -EFAULT; | 3479 | return -EFAULT; |
3286 | } | 3480 | } |
3287 | 3481 | ||
3288 | ret = vm_domain_add_dev_info(dmar_domain, pdev); | 3482 | ret = domain_add_dev_info(dmar_domain, pdev); |
3289 | if (ret) | 3483 | if (ret) |
3290 | return ret; | 3484 | return ret; |
3291 | 3485 | ||
@@ -3299,7 +3493,7 @@ static void intel_iommu_detach_device(struct iommu_domain *domain, | |||
3299 | struct dmar_domain *dmar_domain = domain->priv; | 3493 | struct dmar_domain *dmar_domain = domain->priv; |
3300 | struct pci_dev *pdev = to_pci_dev(dev); | 3494 | struct pci_dev *pdev = to_pci_dev(dev); |
3301 | 3495 | ||
3302 | vm_domain_remove_one_dev_info(dmar_domain, pdev); | 3496 | domain_remove_one_dev_info(dmar_domain, pdev); |
3303 | } | 3497 | } |
3304 | 3498 | ||
3305 | static int intel_iommu_map_range(struct iommu_domain *domain, | 3499 | static int intel_iommu_map_range(struct iommu_domain *domain, |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 1e83c8c5f985..4f5b8712931f 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -10,6 +10,8 @@ | |||
10 | #include <linux/intel-iommu.h> | 10 | #include <linux/intel-iommu.h> |
11 | #include "intr_remapping.h" | 11 | #include "intr_remapping.h" |
12 | #include <acpi/acpi.h> | 12 | #include <acpi/acpi.h> |
13 | #include <asm/pci-direct.h> | ||
14 | #include "pci.h" | ||
13 | 15 | ||
14 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | 16 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; |
15 | static int ir_ioapic_num; | 17 | static int ir_ioapic_num; |
@@ -314,7 +316,8 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
314 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 316 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
315 | irte = &iommu->ir_table->base[index]; | 317 | irte = &iommu->ir_table->base[index]; |
316 | 318 | ||
317 | set_64bit((unsigned long *)irte, irte_modified->low); | 319 | set_64bit((unsigned long *)&irte->low, irte_modified->low); |
320 | set_64bit((unsigned long *)&irte->high, irte_modified->high); | ||
318 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); | 321 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
319 | 322 | ||
320 | rc = qi_flush_iec(iommu, index, 0); | 323 | rc = qi_flush_iec(iommu, index, 0); |
@@ -369,12 +372,32 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) | |||
369 | return drhd->iommu; | 372 | return drhd->iommu; |
370 | } | 373 | } |
371 | 374 | ||
375 | static int clear_entries(struct irq_2_iommu *irq_iommu) | ||
376 | { | ||
377 | struct irte *start, *entry, *end; | ||
378 | struct intel_iommu *iommu; | ||
379 | int index; | ||
380 | |||
381 | if (irq_iommu->sub_handle) | ||
382 | return 0; | ||
383 | |||
384 | iommu = irq_iommu->iommu; | ||
385 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | ||
386 | |||
387 | start = iommu->ir_table->base + index; | ||
388 | end = start + (1 << irq_iommu->irte_mask); | ||
389 | |||
390 | for (entry = start; entry < end; entry++) { | ||
391 | set_64bit((unsigned long *)&entry->low, 0); | ||
392 | set_64bit((unsigned long *)&entry->high, 0); | ||
393 | } | ||
394 | |||
395 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); | ||
396 | } | ||
397 | |||
372 | int free_irte(int irq) | 398 | int free_irte(int irq) |
373 | { | 399 | { |
374 | int rc = 0; | 400 | int rc = 0; |
375 | int index, i; | ||
376 | struct irte *irte; | ||
377 | struct intel_iommu *iommu; | ||
378 | struct irq_2_iommu *irq_iommu; | 401 | struct irq_2_iommu *irq_iommu; |
379 | unsigned long flags; | 402 | unsigned long flags; |
380 | 403 | ||
@@ -385,16 +408,7 @@ int free_irte(int irq) | |||
385 | return -1; | 408 | return -1; |
386 | } | 409 | } |
387 | 410 | ||
388 | iommu = irq_iommu->iommu; | 411 | rc = clear_entries(irq_iommu); |
389 | |||
390 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | ||
391 | irte = &iommu->ir_table->base[index]; | ||
392 | |||
393 | if (!irq_iommu->sub_handle) { | ||
394 | for (i = 0; i < (1 << irq_iommu->irte_mask); i++) | ||
395 | set_64bit((unsigned long *)(irte + i), 0); | ||
396 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); | ||
397 | } | ||
398 | 412 | ||
399 | irq_iommu->iommu = NULL; | 413 | irq_iommu->iommu = NULL; |
400 | irq_iommu->irte_index = 0; | 414 | irq_iommu->irte_index = 0; |
@@ -406,6 +420,91 @@ int free_irte(int irq) | |||
406 | return rc; | 420 | return rc; |
407 | } | 421 | } |
408 | 422 | ||
423 | /* | ||
424 | * source validation type | ||
425 | */ | ||
426 | #define SVT_NO_VERIFY 0x0 /* no verification is required */ | ||
427 | #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */ | ||
428 | #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ | ||
429 | |||
430 | /* | ||
431 | * source-id qualifier | ||
432 | */ | ||
433 | #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ | ||
434 | #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore | ||
435 | * the third least significant bit | ||
436 | */ | ||
437 | #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore | ||
438 | * the second and third least significant bits | ||
439 | */ | ||
440 | #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore | ||
441 | * the least three significant bits | ||
442 | */ | ||
443 | |||
444 | /* | ||
445 | * set SVT, SQ and SID fields of irte to verify | ||
446 | * source ids of interrupt requests | ||
447 | */ | ||
448 | static void set_irte_sid(struct irte *irte, unsigned int svt, | ||
449 | unsigned int sq, unsigned int sid) | ||
450 | { | ||
451 | irte->svt = svt; | ||
452 | irte->sq = sq; | ||
453 | irte->sid = sid; | ||
454 | } | ||
455 | |||
456 | int set_ioapic_sid(struct irte *irte, int apic) | ||
457 | { | ||
458 | int i; | ||
459 | u16 sid = 0; | ||
460 | |||
461 | if (!irte) | ||
462 | return -1; | ||
463 | |||
464 | for (i = 0; i < MAX_IO_APICS; i++) { | ||
465 | if (ir_ioapic[i].id == apic) { | ||
466 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | ||
467 | break; | ||
468 | } | ||
469 | } | ||
470 | |||
471 | if (sid == 0) { | ||
472 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); | ||
473 | return -1; | ||
474 | } | ||
475 | |||
476 | set_irte_sid(irte, 1, 0, sid); | ||
477 | |||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | int set_msi_sid(struct irte *irte, struct pci_dev *dev) | ||
482 | { | ||
483 | struct pci_dev *bridge; | ||
484 | |||
485 | if (!irte || !dev) | ||
486 | return -1; | ||
487 | |||
488 | /* PCIe device or Root Complex integrated PCI device */ | ||
489 | if (dev->is_pcie || !dev->bus->parent) { | ||
490 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | ||
491 | (dev->bus->number << 8) | dev->devfn); | ||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | bridge = pci_find_upstream_pcie_bridge(dev); | ||
496 | if (bridge) { | ||
497 | if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */ | ||
498 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, | ||
499 | (bridge->bus->number << 8) | dev->bus->number); | ||
500 | else /* this is a legacy PCI bridge */ | ||
501 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | ||
502 | (bridge->bus->number << 8) | bridge->devfn); | ||
503 | } | ||
504 | |||
505 | return 0; | ||
506 | } | ||
507 | |||
409 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | 508 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) |
410 | { | 509 | { |
411 | u64 addr; | 510 | u64 addr; |
@@ -612,6 +711,35 @@ error: | |||
612 | return -1; | 711 | return -1; |
613 | } | 712 | } |
614 | 713 | ||
714 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, | ||
715 | struct intel_iommu *iommu) | ||
716 | { | ||
717 | struct acpi_dmar_pci_path *path; | ||
718 | u8 bus; | ||
719 | int count; | ||
720 | |||
721 | bus = scope->bus; | ||
722 | path = (struct acpi_dmar_pci_path *)(scope + 1); | ||
723 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | ||
724 | / sizeof(struct acpi_dmar_pci_path); | ||
725 | |||
726 | while (--count > 0) { | ||
727 | /* | ||
728 | * Access PCI directly due to the PCI | ||
729 | * subsystem isn't initialized yet. | ||
730 | */ | ||
731 | bus = read_pci_config_byte(bus, path->dev, path->fn, | ||
732 | PCI_SECONDARY_BUS); | ||
733 | path++; | ||
734 | } | ||
735 | |||
736 | ir_ioapic[ir_ioapic_num].bus = bus; | ||
737 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); | ||
738 | ir_ioapic[ir_ioapic_num].iommu = iommu; | ||
739 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | ||
740 | ir_ioapic_num++; | ||
741 | } | ||
742 | |||
615 | static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, | 743 | static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, |
616 | struct intel_iommu *iommu) | 744 | struct intel_iommu *iommu) |
617 | { | 745 | { |
@@ -636,9 +764,7 @@ static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, | |||
636 | " 0x%Lx\n", scope->enumeration_id, | 764 | " 0x%Lx\n", scope->enumeration_id, |
637 | drhd->address); | 765 | drhd->address); |
638 | 766 | ||
639 | ir_ioapic[ir_ioapic_num].iommu = iommu; | 767 | ir_parse_one_ioapic_scope(scope, iommu); |
640 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | ||
641 | ir_ioapic_num++; | ||
642 | } | 768 | } |
643 | start += scope->length; | 769 | start += scope->length; |
644 | } | 770 | } |
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h index ca48f0df8ac9..63a263c18415 100644 --- a/drivers/pci/intr_remapping.h +++ b/drivers/pci/intr_remapping.h | |||
@@ -3,6 +3,8 @@ | |||
3 | struct ioapic_scope { | 3 | struct ioapic_scope { |
4 | struct intel_iommu *iommu; | 4 | struct intel_iommu *iommu; |
5 | unsigned int id; | 5 | unsigned int id; |
6 | unsigned int bus; /* PCI bus number */ | ||
7 | unsigned int devfn; /* PCI devfn number */ | ||
6 | }; | 8 | }; |
7 | 9 | ||
8 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) | 10 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index c682ac536415..fee6a4022bc1 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -34,10 +34,27 @@ config ACER_WMI | |||
34 | If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M | 34 | If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M |
35 | here. | 35 | here. |
36 | 36 | ||
37 | config ACERHDF | ||
38 | tristate "Acer Aspire One temperature and fan driver" | ||
39 | depends on THERMAL && THERMAL_HWMON && ACPI | ||
40 | ---help--- | ||
41 | This is a driver for Acer Aspire One netbooks. It allows to access | ||
42 | the temperature sensor and to control the fan. | ||
43 | |||
44 | After loading this driver the BIOS is still in control of the fan. | ||
45 | To let the kernel handle the fan, do: | ||
46 | echo -n enabled > /sys/class/thermal/thermal_zone0/mode | ||
47 | |||
48 | For more information about this driver see | ||
49 | <http://piie.net/files/acerhdf_README.txt> | ||
50 | |||
51 | If you have an Acer Aspire One netbook, say Y or M | ||
52 | here. | ||
53 | |||
37 | config ASUS_LAPTOP | 54 | config ASUS_LAPTOP |
38 | tristate "Asus Laptop Extras (EXPERIMENTAL)" | 55 | tristate "Asus Laptop Extras" |
39 | depends on ACPI | 56 | depends on ACPI |
40 | depends on EXPERIMENTAL && !ACPI_ASUS | 57 | depends on !ACPI_ASUS |
41 | select LEDS_CLASS | 58 | select LEDS_CLASS |
42 | select NEW_LEDS | 59 | select NEW_LEDS |
43 | select BACKLIGHT_CLASS_DEVICE | 60 | select BACKLIGHT_CLASS_DEVICE |
@@ -45,12 +62,12 @@ config ASUS_LAPTOP | |||
45 | ---help--- | 62 | ---help--- |
46 | This is the new Linux driver for Asus laptops. It may also support some | 63 | This is the new Linux driver for Asus laptops. It may also support some |
47 | MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate | 64 | MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate |
48 | standard ACPI events that go through /proc/acpi/events. It also adds | 65 | standard ACPI events and input events. It also adds |
49 | support for video output switching, LCD backlight control, Bluetooth and | 66 | support for video output switching, LCD backlight control, Bluetooth and |
50 | Wlan control, and most importantly, allows you to blink those fancy LEDs. | 67 | Wlan control, and most importantly, allows you to blink those fancy LEDs. |
51 | 68 | ||
52 | For more information and a userspace daemon for handling the extra | 69 | For more information and a userspace daemon for handling the extra |
53 | buttons see <http://acpi4asus.sf.net/>. | 70 | buttons see <http://acpi4asus.sf.net>. |
54 | 71 | ||
55 | If you have an ACPI-compatible ASUS laptop, say Y or M here. | 72 | If you have an ACPI-compatible ASUS laptop, say Y or M here. |
56 | 73 | ||
@@ -340,9 +357,14 @@ config EEEPC_LAPTOP | |||
340 | depends on RFKILL || RFKILL = n | 357 | depends on RFKILL || RFKILL = n |
341 | select BACKLIGHT_CLASS_DEVICE | 358 | select BACKLIGHT_CLASS_DEVICE |
342 | select HWMON | 359 | select HWMON |
360 | select HOTPLUG | ||
361 | select HOTPLUG_PCI if PCI | ||
343 | ---help--- | 362 | ---help--- |
344 | This driver supports the Fn-Fx keys on Eee PC laptops. | 363 | This driver supports the Fn-Fx keys on Eee PC laptops. |
345 | It also adds the ability to switch camera/wlan on/off. | 364 | |
365 | It also gives access to some extra laptop functionalities like | ||
366 | Bluetooth, backlight and allows powering on/off some other | ||
367 | devices. | ||
346 | 368 | ||
347 | If you have an Eee PC laptop, say Y or M here. | 369 | If you have an Eee PC laptop, say Y or M here. |
348 | 370 | ||
@@ -369,7 +391,7 @@ config ACPI_WMI | |||
369 | any ACPI-WMI devices. | 391 | any ACPI-WMI devices. |
370 | 392 | ||
371 | config ACPI_ASUS | 393 | config ACPI_ASUS |
372 | tristate "ASUS/Medion Laptop Extras" | 394 | tristate "ASUS/Medion Laptop Extras (DEPRECATED)" |
373 | depends on ACPI | 395 | depends on ACPI |
374 | select BACKLIGHT_CLASS_DEVICE | 396 | select BACKLIGHT_CLASS_DEVICE |
375 | ---help--- | 397 | ---help--- |
@@ -390,7 +412,7 @@ config ACPI_ASUS | |||
390 | parameters. | 412 | parameters. |
391 | 413 | ||
392 | More information and a userspace daemon for handling the extra buttons | 414 | More information and a userspace daemon for handling the extra buttons |
393 | at <http://sourceforge.net/projects/acpi4asus/>. | 415 | at <http://acpi4asus.sf.net>. |
394 | 416 | ||
395 | If you have an ACPI-compatible ASUS laptop, say Y or M here. This | 417 | If you have an ACPI-compatible ASUS laptop, say Y or M here. This |
396 | driver is still under development, so if your laptop is unsupported or | 418 | driver is still under development, so if your laptop is unsupported or |
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index e40c7bd1b87e..641b8bfa5538 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile | |||
@@ -9,6 +9,7 @@ obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o | |||
9 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o | 9 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o |
10 | obj-$(CONFIG_DELL_WMI) += dell-wmi.o | 10 | obj-$(CONFIG_DELL_WMI) += dell-wmi.o |
11 | obj-$(CONFIG_ACER_WMI) += acer-wmi.o | 11 | obj-$(CONFIG_ACER_WMI) += acer-wmi.o |
12 | obj-$(CONFIG_ACERHDF) += acerhdf.o | ||
12 | obj-$(CONFIG_HP_WMI) += hp-wmi.o | 13 | obj-$(CONFIG_HP_WMI) += hp-wmi.o |
13 | obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o | 14 | obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o |
14 | obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o | 15 | obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o |
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c new file mode 100644 index 000000000000..bdfee177eefb --- /dev/null +++ b/drivers/platform/x86/acerhdf.c | |||
@@ -0,0 +1,602 @@ | |||
1 | /* | ||
2 | * acerhdf - A driver which monitors the temperature | ||
3 | * of the aspire one netbook, turns on/off the fan | ||
4 | * as soon as the upper/lower threshold is reached. | ||
5 | * | ||
6 | * (C) 2009 - Peter Feuerer peter (a) piie.net | ||
7 | * http://piie.net | ||
8 | * 2009 Borislav Petkov <petkovbb@gmail.com> | ||
9 | * | ||
10 | * Inspired by and many thanks to: | ||
11 | * o acerfand - Rachel Greenham | ||
12 | * o acer_ec.pl - Michael Kurz michi.kurz (at) googlemail.com | ||
13 | * - Petr Tomasek tomasek (#) etf,cuni,cz | ||
14 | * - Carlos Corbacho cathectic (at) gmail.com | ||
15 | * o lkml - Matthew Garrett | ||
16 | * - Borislav Petkov | ||
17 | * - Andreas Mohr | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or modify | ||
20 | * it under the terms of the GNU General Public License as published by | ||
21 | * the Free Software Foundation; either version 2 of the License, or | ||
22 | * (at your option) any later version. | ||
23 | * | ||
24 | * This program is distributed in the hope that it will be useful, | ||
25 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
26 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
27 | * GNU General Public License for more details. | ||
28 | * | ||
29 | * You should have received a copy of the GNU General Public License | ||
30 | * along with this program; if not, write to the Free Software | ||
31 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
32 | */ | ||
33 | |||
34 | #define pr_fmt(fmt) "acerhdf: " fmt | ||
35 | |||
36 | #include <linux/kernel.h> | ||
37 | #include <linux/module.h> | ||
38 | #include <linux/fs.h> | ||
39 | #include <linux/dmi.h> | ||
40 | #include <acpi/acpi_drivers.h> | ||
41 | #include <linux/sched.h> | ||
42 | #include <linux/thermal.h> | ||
43 | #include <linux/platform_device.h> | ||
44 | |||
45 | /* | ||
46 | * The driver is started with "kernel mode off" by default. That means, the BIOS | ||
47 | * is still in control of the fan. In this mode the driver allows to read the | ||
48 | * temperature of the cpu and a userspace tool may take over control of the fan. | ||
49 | * If the driver is switched to "kernel mode" (e.g. via module parameter) the | ||
50 | * driver is in full control of the fan. If you want the module to be started in | ||
51 | * kernel mode by default, define the following: | ||
52 | */ | ||
53 | #undef START_IN_KERNEL_MODE | ||
54 | |||
55 | #define DRV_VER "0.5.13" | ||
56 | |||
57 | /* | ||
58 | * According to the Atom N270 datasheet, | ||
59 | * (http://download.intel.com/design/processor/datashts/320032.pdf) the | ||
60 | * CPU's optimal operating limits denoted in junction temperature as | ||
61 | * measured by the on-die thermal monitor are within 0 <= Tj <= 90. So, | ||
62 | * assume 89°C is critical temperature. | ||
63 | */ | ||
64 | #define ACERHDF_TEMP_CRIT 89 | ||
65 | #define ACERHDF_FAN_OFF 0 | ||
66 | #define ACERHDF_FAN_AUTO 1 | ||
67 | |||
68 | /* | ||
69 | * No matter what value the user puts into the fanon variable, turn on the fan | ||
70 | * at 80 degree Celsius to prevent hardware damage | ||
71 | */ | ||
72 | #define ACERHDF_MAX_FANON 80 | ||
73 | |||
74 | /* | ||
75 | * Maximum interval between two temperature checks is 15 seconds, as the die | ||
76 | * can get hot really fast under heavy load (plus we shouldn't forget about | ||
77 | * possible impact of _external_ aggressive sources such as heaters, sun etc.) | ||
78 | */ | ||
79 | #define ACERHDF_MAX_INTERVAL 15 | ||
80 | |||
81 | #ifdef START_IN_KERNEL_MODE | ||
82 | static int kernelmode = 1; | ||
83 | #else | ||
84 | static int kernelmode; | ||
85 | #endif | ||
86 | |||
87 | static unsigned int interval = 10; | ||
88 | static unsigned int fanon = 63; | ||
89 | static unsigned int fanoff = 58; | ||
90 | static unsigned int verbose; | ||
91 | static unsigned int fanstate = ACERHDF_FAN_AUTO; | ||
92 | static char force_bios[16]; | ||
93 | static unsigned int prev_interval; | ||
94 | struct thermal_zone_device *thz_dev; | ||
95 | struct thermal_cooling_device *cl_dev; | ||
96 | struct platform_device *acerhdf_dev; | ||
97 | |||
98 | module_param(kernelmode, uint, 0); | ||
99 | MODULE_PARM_DESC(kernelmode, "Kernel mode fan control on / off"); | ||
100 | module_param(interval, uint, 0600); | ||
101 | MODULE_PARM_DESC(interval, "Polling interval of temperature check"); | ||
102 | module_param(fanon, uint, 0600); | ||
103 | MODULE_PARM_DESC(fanon, "Turn the fan on above this temperature"); | ||
104 | module_param(fanoff, uint, 0600); | ||
105 | MODULE_PARM_DESC(fanoff, "Turn the fan off below this temperature"); | ||
106 | module_param(verbose, uint, 0600); | ||
107 | MODULE_PARM_DESC(verbose, "Enable verbose dmesg output"); | ||
108 | module_param_string(force_bios, force_bios, 16, 0); | ||
109 | MODULE_PARM_DESC(force_bios, "Force BIOS version and omit BIOS check"); | ||
110 | |||
111 | /* BIOS settings */ | ||
112 | struct bios_settings_t { | ||
113 | const char *vendor; | ||
114 | const char *version; | ||
115 | unsigned char fanreg; | ||
116 | unsigned char tempreg; | ||
117 | unsigned char fancmd[2]; /* fan off and auto commands */ | ||
118 | }; | ||
119 | |||
120 | /* Register addresses and values for different BIOS versions */ | ||
121 | static const struct bios_settings_t bios_tbl[] = { | ||
122 | {"Acer", "v0.3109", 0x55, 0x58, {0x1f, 0x00} }, | ||
123 | {"Acer", "v0.3114", 0x55, 0x58, {0x1f, 0x00} }, | ||
124 | {"Acer", "v0.3301", 0x55, 0x58, {0xaf, 0x00} }, | ||
125 | {"Acer", "v0.3304", 0x55, 0x58, {0xaf, 0x00} }, | ||
126 | {"Acer", "v0.3305", 0x55, 0x58, {0xaf, 0x00} }, | ||
127 | {"Acer", "v0.3308", 0x55, 0x58, {0x21, 0x00} }, | ||
128 | {"Acer", "v0.3309", 0x55, 0x58, {0x21, 0x00} }, | ||
129 | {"Acer", "v0.3310", 0x55, 0x58, {0x21, 0x00} }, | ||
130 | {"Gateway", "v0.3103", 0x55, 0x58, {0x21, 0x00} }, | ||
131 | {"Packard Bell", "v0.3105", 0x55, 0x58, {0x21, 0x00} }, | ||
132 | {"", "", 0, 0, {0, 0} } | ||
133 | }; | ||
134 | |||
135 | static const struct bios_settings_t *bios_cfg __read_mostly; | ||
136 | |||
137 | |||
138 | static int acerhdf_get_temp(int *temp) | ||
139 | { | ||
140 | u8 read_temp; | ||
141 | |||
142 | if (ec_read(bios_cfg->tempreg, &read_temp)) | ||
143 | return -EINVAL; | ||
144 | |||
145 | *temp = read_temp; | ||
146 | |||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static int acerhdf_get_fanstate(int *state) | ||
151 | { | ||
152 | u8 fan; | ||
153 | bool tmp; | ||
154 | |||
155 | if (ec_read(bios_cfg->fanreg, &fan)) | ||
156 | return -EINVAL; | ||
157 | |||
158 | tmp = (fan == bios_cfg->fancmd[ACERHDF_FAN_OFF]); | ||
159 | *state = tmp ? ACERHDF_FAN_OFF : ACERHDF_FAN_AUTO; | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static void acerhdf_change_fanstate(int state) | ||
165 | { | ||
166 | unsigned char cmd; | ||
167 | |||
168 | if (verbose) | ||
169 | pr_notice("fan %s\n", (state == ACERHDF_FAN_OFF) ? | ||
170 | "OFF" : "ON"); | ||
171 | |||
172 | if ((state != ACERHDF_FAN_OFF) && (state != ACERHDF_FAN_AUTO)) { | ||
173 | pr_err("invalid fan state %d requested, setting to auto!\n", | ||
174 | state); | ||
175 | state = ACERHDF_FAN_AUTO; | ||
176 | } | ||
177 | |||
178 | cmd = bios_cfg->fancmd[state]; | ||
179 | fanstate = state; | ||
180 | |||
181 | ec_write(bios_cfg->fanreg, cmd); | ||
182 | } | ||
183 | |||
184 | static void acerhdf_check_param(struct thermal_zone_device *thermal) | ||
185 | { | ||
186 | if (fanon > ACERHDF_MAX_FANON) { | ||
187 | pr_err("fanon temperature too high, set to %d\n", | ||
188 | ACERHDF_MAX_FANON); | ||
189 | fanon = ACERHDF_MAX_FANON; | ||
190 | } | ||
191 | |||
192 | if (kernelmode && prev_interval != interval) { | ||
193 | if (interval > ACERHDF_MAX_INTERVAL) { | ||
194 | pr_err("interval too high, set to %d\n", | ||
195 | ACERHDF_MAX_INTERVAL); | ||
196 | interval = ACERHDF_MAX_INTERVAL; | ||
197 | } | ||
198 | if (verbose) | ||
199 | pr_notice("interval changed to: %d\n", | ||
200 | interval); | ||
201 | thermal->polling_delay = interval*1000; | ||
202 | prev_interval = interval; | ||
203 | } | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * This is the thermal zone callback which does the delayed polling of the fan | ||
208 | * state. We do check /sysfs-originating settings here in acerhdf_check_param() | ||
209 | * as late as the polling interval is since we can't do that in the respective | ||
210 | * accessors of the module parameters. | ||
211 | */ | ||
212 | static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal, | ||
213 | unsigned long *t) | ||
214 | { | ||
215 | int temp, err = 0; | ||
216 | |||
217 | acerhdf_check_param(thermal); | ||
218 | |||
219 | err = acerhdf_get_temp(&temp); | ||
220 | if (err) | ||
221 | return err; | ||
222 | |||
223 | if (verbose) | ||
224 | pr_notice("temp %d\n", temp); | ||
225 | |||
226 | *t = temp; | ||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static int acerhdf_bind(struct thermal_zone_device *thermal, | ||
231 | struct thermal_cooling_device *cdev) | ||
232 | { | ||
233 | /* if the cooling device is the one from acerhdf bind it */ | ||
234 | if (cdev != cl_dev) | ||
235 | return 0; | ||
236 | |||
237 | if (thermal_zone_bind_cooling_device(thermal, 0, cdev)) { | ||
238 | pr_err("error binding cooling dev\n"); | ||
239 | return -EINVAL; | ||
240 | } | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static int acerhdf_unbind(struct thermal_zone_device *thermal, | ||
245 | struct thermal_cooling_device *cdev) | ||
246 | { | ||
247 | if (cdev != cl_dev) | ||
248 | return 0; | ||
249 | |||
250 | if (thermal_zone_unbind_cooling_device(thermal, 0, cdev)) { | ||
251 | pr_err("error unbinding cooling dev\n"); | ||
252 | return -EINVAL; | ||
253 | } | ||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | static inline void acerhdf_revert_to_bios_mode(void) | ||
258 | { | ||
259 | acerhdf_change_fanstate(ACERHDF_FAN_AUTO); | ||
260 | kernelmode = 0; | ||
261 | if (thz_dev) | ||
262 | thz_dev->polling_delay = 0; | ||
263 | pr_notice("kernel mode fan control OFF\n"); | ||
264 | } | ||
265 | static inline void acerhdf_enable_kernelmode(void) | ||
266 | { | ||
267 | kernelmode = 1; | ||
268 | |||
269 | thz_dev->polling_delay = interval*1000; | ||
270 | thermal_zone_device_update(thz_dev); | ||
271 | pr_notice("kernel mode fan control ON\n"); | ||
272 | } | ||
273 | |||
274 | static int acerhdf_get_mode(struct thermal_zone_device *thermal, | ||
275 | enum thermal_device_mode *mode) | ||
276 | { | ||
277 | if (verbose) | ||
278 | pr_notice("kernel mode fan control %d\n", kernelmode); | ||
279 | |||
280 | *mode = (kernelmode) ? THERMAL_DEVICE_ENABLED | ||
281 | : THERMAL_DEVICE_DISABLED; | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * set operation mode; | ||
288 | * enabled: the thermal layer of the kernel takes care about | ||
289 | * the temperature and the fan. | ||
290 | * disabled: the BIOS takes control of the fan. | ||
291 | */ | ||
292 | static int acerhdf_set_mode(struct thermal_zone_device *thermal, | ||
293 | enum thermal_device_mode mode) | ||
294 | { | ||
295 | if (mode == THERMAL_DEVICE_DISABLED && kernelmode) | ||
296 | acerhdf_revert_to_bios_mode(); | ||
297 | else if (mode == THERMAL_DEVICE_ENABLED && !kernelmode) | ||
298 | acerhdf_enable_kernelmode(); | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static int acerhdf_get_trip_type(struct thermal_zone_device *thermal, int trip, | ||
304 | enum thermal_trip_type *type) | ||
305 | { | ||
306 | if (trip == 0) | ||
307 | *type = THERMAL_TRIP_ACTIVE; | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip, | ||
313 | unsigned long *temp) | ||
314 | { | ||
315 | if (trip == 0) | ||
316 | *temp = fanon; | ||
317 | |||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal, | ||
322 | unsigned long *temperature) | ||
323 | { | ||
324 | *temperature = ACERHDF_TEMP_CRIT; | ||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | /* bind callback functions to thermalzone */ | ||
329 | struct thermal_zone_device_ops acerhdf_dev_ops = { | ||
330 | .bind = acerhdf_bind, | ||
331 | .unbind = acerhdf_unbind, | ||
332 | .get_temp = acerhdf_get_ec_temp, | ||
333 | .get_mode = acerhdf_get_mode, | ||
334 | .set_mode = acerhdf_set_mode, | ||
335 | .get_trip_type = acerhdf_get_trip_type, | ||
336 | .get_trip_temp = acerhdf_get_trip_temp, | ||
337 | .get_crit_temp = acerhdf_get_crit_temp, | ||
338 | }; | ||
339 | |||
340 | |||
341 | /* | ||
342 | * cooling device callback functions | ||
343 | * get maximal fan cooling state | ||
344 | */ | ||
345 | static int acerhdf_get_max_state(struct thermal_cooling_device *cdev, | ||
346 | unsigned long *state) | ||
347 | { | ||
348 | *state = 1; | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | static int acerhdf_get_cur_state(struct thermal_cooling_device *cdev, | ||
354 | unsigned long *state) | ||
355 | { | ||
356 | int err = 0, tmp; | ||
357 | |||
358 | err = acerhdf_get_fanstate(&tmp); | ||
359 | if (err) | ||
360 | return err; | ||
361 | |||
362 | *state = (tmp == ACERHDF_FAN_AUTO) ? 1 : 0; | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | /* change current fan state - is overwritten when running in kernel mode */ | ||
367 | static int acerhdf_set_cur_state(struct thermal_cooling_device *cdev, | ||
368 | unsigned long state) | ||
369 | { | ||
370 | int cur_temp, cur_state, err = 0; | ||
371 | |||
372 | if (!kernelmode) | ||
373 | return 0; | ||
374 | |||
375 | err = acerhdf_get_temp(&cur_temp); | ||
376 | if (err) { | ||
377 | pr_err("error reading temperature, hand off control to BIOS\n"); | ||
378 | goto err_out; | ||
379 | } | ||
380 | |||
381 | err = acerhdf_get_fanstate(&cur_state); | ||
382 | if (err) { | ||
383 | pr_err("error reading fan state, hand off control to BIOS\n"); | ||
384 | goto err_out; | ||
385 | } | ||
386 | |||
387 | if (state == 0) { | ||
388 | /* turn fan off only if below fanoff temperature */ | ||
389 | if ((cur_state == ACERHDF_FAN_AUTO) && | ||
390 | (cur_temp < fanoff)) | ||
391 | acerhdf_change_fanstate(ACERHDF_FAN_OFF); | ||
392 | } else { | ||
393 | if (cur_state == ACERHDF_FAN_OFF) | ||
394 | acerhdf_change_fanstate(ACERHDF_FAN_AUTO); | ||
395 | } | ||
396 | return 0; | ||
397 | |||
398 | err_out: | ||
399 | acerhdf_revert_to_bios_mode(); | ||
400 | return -EINVAL; | ||
401 | } | ||
402 | |||
403 | /* bind fan callbacks to fan device */ | ||
404 | struct thermal_cooling_device_ops acerhdf_cooling_ops = { | ||
405 | .get_max_state = acerhdf_get_max_state, | ||
406 | .get_cur_state = acerhdf_get_cur_state, | ||
407 | .set_cur_state = acerhdf_set_cur_state, | ||
408 | }; | ||
409 | |||
410 | /* suspend / resume functionality */ | ||
411 | static int acerhdf_suspend(struct platform_device *dev, pm_message_t state) | ||
412 | { | ||
413 | if (kernelmode) | ||
414 | acerhdf_change_fanstate(ACERHDF_FAN_AUTO); | ||
415 | |||
416 | if (verbose) | ||
417 | pr_notice("going suspend\n"); | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | static int acerhdf_resume(struct platform_device *device) | ||
423 | { | ||
424 | if (verbose) | ||
425 | pr_notice("resuming\n"); | ||
426 | |||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | static int __devinit acerhdf_probe(struct platform_device *device) | ||
431 | { | ||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | static int acerhdf_remove(struct platform_device *device) | ||
436 | { | ||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | struct platform_driver acerhdf_drv = { | ||
441 | .driver = { | ||
442 | .name = "acerhdf", | ||
443 | .owner = THIS_MODULE, | ||
444 | }, | ||
445 | .probe = acerhdf_probe, | ||
446 | .remove = acerhdf_remove, | ||
447 | .suspend = acerhdf_suspend, | ||
448 | .resume = acerhdf_resume, | ||
449 | }; | ||
450 | |||
451 | |||
452 | /* check hardware */ | ||
453 | static int acerhdf_check_hardware(void) | ||
454 | { | ||
455 | char const *vendor, *version, *product; | ||
456 | int i; | ||
457 | |||
458 | /* get BIOS data */ | ||
459 | vendor = dmi_get_system_info(DMI_SYS_VENDOR); | ||
460 | version = dmi_get_system_info(DMI_BIOS_VERSION); | ||
461 | product = dmi_get_system_info(DMI_PRODUCT_NAME); | ||
462 | |||
463 | pr_info("Acer Aspire One Fan driver, v.%s\n", DRV_VER); | ||
464 | |||
465 | if (!force_bios[0]) { | ||
466 | if (strncmp(product, "AO", 2)) { | ||
467 | pr_err("no Aspire One hardware found\n"); | ||
468 | return -EINVAL; | ||
469 | } | ||
470 | } else { | ||
471 | pr_info("forcing BIOS version: %s\n", version); | ||
472 | version = force_bios; | ||
473 | kernelmode = 0; | ||
474 | } | ||
475 | |||
476 | if (verbose) | ||
477 | pr_info("BIOS info: %s %s, product: %s\n", | ||
478 | vendor, version, product); | ||
479 | |||
480 | /* search BIOS version and vendor in BIOS settings table */ | ||
481 | for (i = 0; bios_tbl[i].version[0]; i++) { | ||
482 | if (!strcmp(bios_tbl[i].vendor, vendor) && | ||
483 | !strcmp(bios_tbl[i].version, version)) { | ||
484 | bios_cfg = &bios_tbl[i]; | ||
485 | break; | ||
486 | } | ||
487 | } | ||
488 | |||
489 | if (!bios_cfg) { | ||
490 | pr_err("unknown (unsupported) BIOS version %s/%s, " | ||
491 | "please report, aborting!\n", vendor, version); | ||
492 | return -EINVAL; | ||
493 | } | ||
494 | |||
495 | /* | ||
496 | * if started with kernel mode off, prevent the kernel from switching | ||
497 | * off the fan | ||
498 | */ | ||
499 | if (!kernelmode) { | ||
500 | pr_notice("Fan control off, to enable do:\n"); | ||
501 | pr_notice("echo -n \"enabled\" > " | ||
502 | "/sys/class/thermal/thermal_zone0/mode\n"); | ||
503 | } | ||
504 | |||
505 | return 0; | ||
506 | } | ||
507 | |||
508 | static int acerhdf_register_platform(void) | ||
509 | { | ||
510 | int err = 0; | ||
511 | |||
512 | err = platform_driver_register(&acerhdf_drv); | ||
513 | if (err) | ||
514 | return err; | ||
515 | |||
516 | acerhdf_dev = platform_device_alloc("acerhdf", -1); | ||
517 | platform_device_add(acerhdf_dev); | ||
518 | |||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | static void acerhdf_unregister_platform(void) | ||
523 | { | ||
524 | if (!acerhdf_dev) | ||
525 | return; | ||
526 | |||
527 | platform_device_del(acerhdf_dev); | ||
528 | platform_driver_unregister(&acerhdf_drv); | ||
529 | } | ||
530 | |||
531 | static int acerhdf_register_thermal(void) | ||
532 | { | ||
533 | cl_dev = thermal_cooling_device_register("acerhdf-fan", NULL, | ||
534 | &acerhdf_cooling_ops); | ||
535 | |||
536 | if (IS_ERR(cl_dev)) | ||
537 | return -EINVAL; | ||
538 | |||
539 | thz_dev = thermal_zone_device_register("acerhdf", 1, NULL, | ||
540 | &acerhdf_dev_ops, 0, 0, 0, | ||
541 | (kernelmode) ? interval*1000 : 0); | ||
542 | if (IS_ERR(thz_dev)) | ||
543 | return -EINVAL; | ||
544 | |||
545 | return 0; | ||
546 | } | ||
547 | |||
548 | static void acerhdf_unregister_thermal(void) | ||
549 | { | ||
550 | if (cl_dev) { | ||
551 | thermal_cooling_device_unregister(cl_dev); | ||
552 | cl_dev = NULL; | ||
553 | } | ||
554 | |||
555 | if (thz_dev) { | ||
556 | thermal_zone_device_unregister(thz_dev); | ||
557 | thz_dev = NULL; | ||
558 | } | ||
559 | } | ||
560 | |||
561 | static int __init acerhdf_init(void) | ||
562 | { | ||
563 | int err = 0; | ||
564 | |||
565 | err = acerhdf_check_hardware(); | ||
566 | if (err) | ||
567 | goto out_err; | ||
568 | |||
569 | err = acerhdf_register_platform(); | ||
570 | if (err) | ||
571 | goto err_unreg; | ||
572 | |||
573 | err = acerhdf_register_thermal(); | ||
574 | if (err) | ||
575 | goto err_unreg; | ||
576 | |||
577 | return 0; | ||
578 | |||
579 | err_unreg: | ||
580 | acerhdf_unregister_thermal(); | ||
581 | acerhdf_unregister_platform(); | ||
582 | |||
583 | out_err: | ||
584 | return -ENODEV; | ||
585 | } | ||
586 | |||
587 | static void __exit acerhdf_exit(void) | ||
588 | { | ||
589 | acerhdf_change_fanstate(ACERHDF_FAN_AUTO); | ||
590 | acerhdf_unregister_thermal(); | ||
591 | acerhdf_unregister_platform(); | ||
592 | } | ||
593 | |||
594 | MODULE_LICENSE("GPL"); | ||
595 | MODULE_AUTHOR("Peter Feuerer"); | ||
596 | MODULE_DESCRIPTION("Aspire One temperature and fan driver"); | ||
597 | MODULE_ALIAS("dmi:*:*Acer*:*:"); | ||
598 | MODULE_ALIAS("dmi:*:*Gateway*:*:"); | ||
599 | MODULE_ALIAS("dmi:*:*Packard Bell*:*:"); | ||
600 | |||
601 | module_init(acerhdf_init); | ||
602 | module_exit(acerhdf_exit); | ||
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index bfc1a8892a32..db657bbeec90 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c | |||
@@ -33,6 +33,8 @@ | |||
33 | * Sam Lin - GPS support | 33 | * Sam Lin - GPS support |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
37 | |||
36 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
37 | #include <linux/module.h> | 39 | #include <linux/module.h> |
38 | #include <linux/init.h> | 40 | #include <linux/init.h> |
@@ -53,9 +55,10 @@ | |||
53 | #define ASUS_HOTK_NAME "Asus Laptop Support" | 55 | #define ASUS_HOTK_NAME "Asus Laptop Support" |
54 | #define ASUS_HOTK_CLASS "hotkey" | 56 | #define ASUS_HOTK_CLASS "hotkey" |
55 | #define ASUS_HOTK_DEVICE_NAME "Hotkey" | 57 | #define ASUS_HOTK_DEVICE_NAME "Hotkey" |
56 | #define ASUS_HOTK_FILE "asus-laptop" | 58 | #define ASUS_HOTK_FILE KBUILD_MODNAME |
57 | #define ASUS_HOTK_PREFIX "\\_SB.ATKD." | 59 | #define ASUS_HOTK_PREFIX "\\_SB.ATKD." |
58 | 60 | ||
61 | |||
59 | /* | 62 | /* |
60 | * Some events we use, same for all Asus | 63 | * Some events we use, same for all Asus |
61 | */ | 64 | */ |
@@ -207,13 +210,17 @@ MODULE_DEVICE_TABLE(acpi, asus_device_ids); | |||
207 | 210 | ||
208 | static int asus_hotk_add(struct acpi_device *device); | 211 | static int asus_hotk_add(struct acpi_device *device); |
209 | static int asus_hotk_remove(struct acpi_device *device, int type); | 212 | static int asus_hotk_remove(struct acpi_device *device, int type); |
213 | static void asus_hotk_notify(struct acpi_device *device, u32 event); | ||
214 | |||
210 | static struct acpi_driver asus_hotk_driver = { | 215 | static struct acpi_driver asus_hotk_driver = { |
211 | .name = ASUS_HOTK_NAME, | 216 | .name = ASUS_HOTK_NAME, |
212 | .class = ASUS_HOTK_CLASS, | 217 | .class = ASUS_HOTK_CLASS, |
213 | .ids = asus_device_ids, | 218 | .ids = asus_device_ids, |
219 | .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, | ||
214 | .ops = { | 220 | .ops = { |
215 | .add = asus_hotk_add, | 221 | .add = asus_hotk_add, |
216 | .remove = asus_hotk_remove, | 222 | .remove = asus_hotk_remove, |
223 | .notify = asus_hotk_notify, | ||
217 | }, | 224 | }, |
218 | }; | 225 | }; |
219 | 226 | ||
@@ -323,7 +330,7 @@ static int read_wireless_status(int mask) | |||
323 | 330 | ||
324 | rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status); | 331 | rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status); |
325 | if (ACPI_FAILURE(rv)) | 332 | if (ACPI_FAILURE(rv)) |
326 | printk(ASUS_WARNING "Error reading Wireless status\n"); | 333 | pr_warning("Error reading Wireless status\n"); |
327 | else | 334 | else |
328 | return (status & mask) ? 1 : 0; | 335 | return (status & mask) ? 1 : 0; |
329 | 336 | ||
@@ -337,7 +344,7 @@ static int read_gps_status(void) | |||
337 | 344 | ||
338 | rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status); | 345 | rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status); |
339 | if (ACPI_FAILURE(rv)) | 346 | if (ACPI_FAILURE(rv)) |
340 | printk(ASUS_WARNING "Error reading GPS status\n"); | 347 | pr_warning("Error reading GPS status\n"); |
341 | else | 348 | else |
342 | return status ? 1 : 0; | 349 | return status ? 1 : 0; |
343 | 350 | ||
@@ -377,7 +384,7 @@ static void write_status(acpi_handle handle, int out, int mask) | |||
377 | } | 384 | } |
378 | 385 | ||
379 | if (write_acpi_int(handle, NULL, out, NULL)) | 386 | if (write_acpi_int(handle, NULL, out, NULL)) |
380 | printk(ASUS_WARNING " write failed %x\n", mask); | 387 | pr_warning(" write failed %x\n", mask); |
381 | } | 388 | } |
382 | 389 | ||
383 | /* /sys/class/led handlers */ | 390 | /* /sys/class/led handlers */ |
@@ -420,7 +427,7 @@ static int set_lcd_state(int value) | |||
420 | NULL, NULL, NULL); | 427 | NULL, NULL, NULL); |
421 | 428 | ||
422 | if (ACPI_FAILURE(status)) | 429 | if (ACPI_FAILURE(status)) |
423 | printk(ASUS_WARNING "Error switching LCD\n"); | 430 | pr_warning("Error switching LCD\n"); |
424 | } | 431 | } |
425 | 432 | ||
426 | write_status(NULL, lcd, LCD_ON); | 433 | write_status(NULL, lcd, LCD_ON); |
@@ -444,7 +451,7 @@ static int read_brightness(struct backlight_device *bd) | |||
444 | 451 | ||
445 | rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value); | 452 | rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value); |
446 | if (ACPI_FAILURE(rv)) | 453 | if (ACPI_FAILURE(rv)) |
447 | printk(ASUS_WARNING "Error reading brightness\n"); | 454 | pr_warning("Error reading brightness\n"); |
448 | 455 | ||
449 | return value; | 456 | return value; |
450 | } | 457 | } |
@@ -457,7 +464,7 @@ static int set_brightness(struct backlight_device *bd, int value) | |||
457 | /* 0 <= value <= 15 */ | 464 | /* 0 <= value <= 15 */ |
458 | 465 | ||
459 | if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) { | 466 | if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) { |
460 | printk(ASUS_WARNING "Error changing brightness\n"); | 467 | pr_warning("Error changing brightness\n"); |
461 | ret = -EIO; | 468 | ret = -EIO; |
462 | } | 469 | } |
463 | 470 | ||
@@ -587,7 +594,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr, | |||
587 | rv = parse_arg(buf, count, &value); | 594 | rv = parse_arg(buf, count, &value); |
588 | if (rv > 0) { | 595 | if (rv > 0) { |
589 | if (write_acpi_int(ledd_set_handle, NULL, value, NULL)) | 596 | if (write_acpi_int(ledd_set_handle, NULL, value, NULL)) |
590 | printk(ASUS_WARNING "LED display write failed\n"); | 597 | pr_warning("LED display write failed\n"); |
591 | else | 598 | else |
592 | hotk->ledd_status = (u32) value; | 599 | hotk->ledd_status = (u32) value; |
593 | } | 600 | } |
@@ -632,7 +639,7 @@ static void set_display(int value) | |||
632 | { | 639 | { |
633 | /* no sanity check needed for now */ | 640 | /* no sanity check needed for now */ |
634 | if (write_acpi_int(display_set_handle, NULL, value, NULL)) | 641 | if (write_acpi_int(display_set_handle, NULL, value, NULL)) |
635 | printk(ASUS_WARNING "Error setting display\n"); | 642 | pr_warning("Error setting display\n"); |
636 | return; | 643 | return; |
637 | } | 644 | } |
638 | 645 | ||
@@ -647,7 +654,7 @@ static int read_display(void) | |||
647 | rv = acpi_evaluate_integer(display_get_handle, NULL, | 654 | rv = acpi_evaluate_integer(display_get_handle, NULL, |
648 | NULL, &value); | 655 | NULL, &value); |
649 | if (ACPI_FAILURE(rv)) | 656 | if (ACPI_FAILURE(rv)) |
650 | printk(ASUS_WARNING "Error reading display status\n"); | 657 | pr_warning("Error reading display status\n"); |
651 | } | 658 | } |
652 | 659 | ||
653 | value &= 0x0F; /* needed for some models, shouldn't hurt others */ | 660 | value &= 0x0F; /* needed for some models, shouldn't hurt others */ |
@@ -689,7 +696,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr, | |||
689 | static void set_light_sens_switch(int value) | 696 | static void set_light_sens_switch(int value) |
690 | { | 697 | { |
691 | if (write_acpi_int(ls_switch_handle, NULL, value, NULL)) | 698 | if (write_acpi_int(ls_switch_handle, NULL, value, NULL)) |
692 | printk(ASUS_WARNING "Error setting light sensor switch\n"); | 699 | pr_warning("Error setting light sensor switch\n"); |
693 | hotk->light_switch = value; | 700 | hotk->light_switch = value; |
694 | } | 701 | } |
695 | 702 | ||
@@ -714,7 +721,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr, | |||
714 | static void set_light_sens_level(int value) | 721 | static void set_light_sens_level(int value) |
715 | { | 722 | { |
716 | if (write_acpi_int(ls_level_handle, NULL, value, NULL)) | 723 | if (write_acpi_int(ls_level_handle, NULL, value, NULL)) |
717 | printk(ASUS_WARNING "Error setting light sensor level\n"); | 724 | pr_warning("Error setting light sensor level\n"); |
718 | hotk->light_level = value; | 725 | hotk->light_level = value; |
719 | } | 726 | } |
720 | 727 | ||
@@ -812,7 +819,7 @@ static int asus_setkeycode(struct input_dev *dev, int scancode, int keycode) | |||
812 | return -EINVAL; | 819 | return -EINVAL; |
813 | } | 820 | } |
814 | 821 | ||
815 | static void asus_hotk_notify(acpi_handle handle, u32 event, void *data) | 822 | static void asus_hotk_notify(struct acpi_device *device, u32 event) |
816 | { | 823 | { |
817 | static struct key_entry *key; | 824 | static struct key_entry *key; |
818 | u16 count; | 825 | u16 count; |
@@ -975,11 +982,11 @@ static int asus_hotk_get_info(void) | |||
975 | */ | 982 | */ |
976 | status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info); | 983 | status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info); |
977 | if (ACPI_FAILURE(status)) | 984 | if (ACPI_FAILURE(status)) |
978 | printk(ASUS_WARNING "Couldn't get the DSDT table header\n"); | 985 | pr_warning("Couldn't get the DSDT table header\n"); |
979 | 986 | ||
980 | /* We have to write 0 on init this far for all ASUS models */ | 987 | /* We have to write 0 on init this far for all ASUS models */ |
981 | if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { | 988 | if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { |
982 | printk(ASUS_ERR "Hotkey initialization failed\n"); | 989 | pr_err("Hotkey initialization failed\n"); |
983 | return -ENODEV; | 990 | return -ENODEV; |
984 | } | 991 | } |
985 | 992 | ||
@@ -987,9 +994,9 @@ static int asus_hotk_get_info(void) | |||
987 | status = | 994 | status = |
988 | acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result); | 995 | acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result); |
989 | if (ACPI_FAILURE(status)) | 996 | if (ACPI_FAILURE(status)) |
990 | printk(ASUS_WARNING "Error calling BSTS\n"); | 997 | pr_warning("Error calling BSTS\n"); |
991 | else if (bsts_result) | 998 | else if (bsts_result) |
992 | printk(ASUS_NOTICE "BSTS called, 0x%02x returned\n", | 999 | pr_notice("BSTS called, 0x%02x returned\n", |
993 | (uint) bsts_result); | 1000 | (uint) bsts_result); |
994 | 1001 | ||
995 | /* This too ... */ | 1002 | /* This too ... */ |
@@ -1020,7 +1027,7 @@ static int asus_hotk_get_info(void) | |||
1020 | return -ENOMEM; | 1027 | return -ENOMEM; |
1021 | 1028 | ||
1022 | if (*string) | 1029 | if (*string) |
1023 | printk(ASUS_NOTICE " %s model detected\n", string); | 1030 | pr_notice(" %s model detected\n", string); |
1024 | 1031 | ||
1025 | ASUS_HANDLE_INIT(mled_set); | 1032 | ASUS_HANDLE_INIT(mled_set); |
1026 | ASUS_HANDLE_INIT(tled_set); | 1033 | ASUS_HANDLE_INIT(tled_set); |
@@ -1077,7 +1084,7 @@ static int asus_input_init(void) | |||
1077 | 1084 | ||
1078 | hotk->inputdev = input_allocate_device(); | 1085 | hotk->inputdev = input_allocate_device(); |
1079 | if (!hotk->inputdev) { | 1086 | if (!hotk->inputdev) { |
1080 | printk(ASUS_INFO "Unable to allocate input device\n"); | 1087 | pr_info("Unable to allocate input device\n"); |
1081 | return 0; | 1088 | return 0; |
1082 | } | 1089 | } |
1083 | hotk->inputdev->name = "Asus Laptop extra buttons"; | 1090 | hotk->inputdev->name = "Asus Laptop extra buttons"; |
@@ -1096,7 +1103,7 @@ static int asus_input_init(void) | |||
1096 | } | 1103 | } |
1097 | result = input_register_device(hotk->inputdev); | 1104 | result = input_register_device(hotk->inputdev); |
1098 | if (result) { | 1105 | if (result) { |
1099 | printk(ASUS_INFO "Unable to register input device\n"); | 1106 | pr_info("Unable to register input device\n"); |
1100 | input_free_device(hotk->inputdev); | 1107 | input_free_device(hotk->inputdev); |
1101 | } | 1108 | } |
1102 | return result; | 1109 | return result; |
@@ -1113,7 +1120,7 @@ static int asus_hotk_check(void) | |||
1113 | if (hotk->device->status.present) { | 1120 | if (hotk->device->status.present) { |
1114 | result = asus_hotk_get_info(); | 1121 | result = asus_hotk_get_info(); |
1115 | } else { | 1122 | } else { |
1116 | printk(ASUS_ERR "Hotkey device not present, aborting\n"); | 1123 | pr_err("Hotkey device not present, aborting\n"); |
1117 | return -EINVAL; | 1124 | return -EINVAL; |
1118 | } | 1125 | } |
1119 | 1126 | ||
@@ -1124,13 +1131,12 @@ static int asus_hotk_found; | |||
1124 | 1131 | ||
1125 | static int asus_hotk_add(struct acpi_device *device) | 1132 | static int asus_hotk_add(struct acpi_device *device) |
1126 | { | 1133 | { |
1127 | acpi_status status = AE_OK; | ||
1128 | int result; | 1134 | int result; |
1129 | 1135 | ||
1130 | if (!device) | 1136 | if (!device) |
1131 | return -EINVAL; | 1137 | return -EINVAL; |
1132 | 1138 | ||
1133 | printk(ASUS_NOTICE "Asus Laptop Support version %s\n", | 1139 | pr_notice("Asus Laptop Support version %s\n", |
1134 | ASUS_LAPTOP_VERSION); | 1140 | ASUS_LAPTOP_VERSION); |
1135 | 1141 | ||
1136 | hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL); | 1142 | hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL); |
@@ -1149,15 +1155,6 @@ static int asus_hotk_add(struct acpi_device *device) | |||
1149 | 1155 | ||
1150 | asus_hotk_add_fs(); | 1156 | asus_hotk_add_fs(); |
1151 | 1157 | ||
1152 | /* | ||
1153 | * We install the handler, it will receive the hotk in parameter, so, we | ||
1154 | * could add other data to the hotk struct | ||
1155 | */ | ||
1156 | status = acpi_install_notify_handler(hotk->handle, ACPI_ALL_NOTIFY, | ||
1157 | asus_hotk_notify, hotk); | ||
1158 | if (ACPI_FAILURE(status)) | ||
1159 | printk(ASUS_ERR "Error installing notify handler\n"); | ||
1160 | |||
1161 | asus_hotk_found = 1; | 1158 | asus_hotk_found = 1; |
1162 | 1159 | ||
1163 | /* WLED and BLED are on by default */ | 1160 | /* WLED and BLED are on by default */ |
@@ -1198,16 +1195,9 @@ end: | |||
1198 | 1195 | ||
1199 | static int asus_hotk_remove(struct acpi_device *device, int type) | 1196 | static int asus_hotk_remove(struct acpi_device *device, int type) |
1200 | { | 1197 | { |
1201 | acpi_status status = 0; | ||
1202 | |||
1203 | if (!device || !acpi_driver_data(device)) | 1198 | if (!device || !acpi_driver_data(device)) |
1204 | return -EINVAL; | 1199 | return -EINVAL; |
1205 | 1200 | ||
1206 | status = acpi_remove_notify_handler(hotk->handle, ACPI_ALL_NOTIFY, | ||
1207 | asus_hotk_notify); | ||
1208 | if (ACPI_FAILURE(status)) | ||
1209 | printk(ASUS_ERR "Error removing notify handler\n"); | ||
1210 | |||
1211 | kfree(hotk->name); | 1201 | kfree(hotk->name); |
1212 | kfree(hotk); | 1202 | kfree(hotk); |
1213 | 1203 | ||
@@ -1260,8 +1250,7 @@ static int asus_backlight_init(struct device *dev) | |||
1260 | bd = backlight_device_register(ASUS_HOTK_FILE, dev, | 1250 | bd = backlight_device_register(ASUS_HOTK_FILE, dev, |
1261 | NULL, &asusbl_ops); | 1251 | NULL, &asusbl_ops); |
1262 | if (IS_ERR(bd)) { | 1252 | if (IS_ERR(bd)) { |
1263 | printk(ASUS_ERR | 1253 | pr_err("Could not register asus backlight device\n"); |
1264 | "Could not register asus backlight device\n"); | ||
1265 | asus_backlight_device = NULL; | 1254 | asus_backlight_device = NULL; |
1266 | return PTR_ERR(bd); | 1255 | return PTR_ERR(bd); |
1267 | } | 1256 | } |
@@ -1334,7 +1323,6 @@ out: | |||
1334 | 1323 | ||
1335 | static int __init asus_laptop_init(void) | 1324 | static int __init asus_laptop_init(void) |
1336 | { | 1325 | { |
1337 | struct device *dev; | ||
1338 | int result; | 1326 | int result; |
1339 | 1327 | ||
1340 | if (acpi_disabled) | 1328 | if (acpi_disabled) |
@@ -1356,24 +1344,10 @@ static int __init asus_laptop_init(void) | |||
1356 | return -ENODEV; | 1344 | return -ENODEV; |
1357 | } | 1345 | } |
1358 | 1346 | ||
1359 | dev = acpi_get_physical_device(hotk->device->handle); | ||
1360 | |||
1361 | if (!acpi_video_backlight_support()) { | ||
1362 | result = asus_backlight_init(dev); | ||
1363 | if (result) | ||
1364 | goto fail_backlight; | ||
1365 | } else | ||
1366 | printk(ASUS_INFO "Brightness ignored, must be controlled by " | ||
1367 | "ACPI video driver\n"); | ||
1368 | |||
1369 | result = asus_input_init(); | 1347 | result = asus_input_init(); |
1370 | if (result) | 1348 | if (result) |
1371 | goto fail_input; | 1349 | goto fail_input; |
1372 | 1350 | ||
1373 | result = asus_led_init(dev); | ||
1374 | if (result) | ||
1375 | goto fail_led; | ||
1376 | |||
1377 | /* Register platform stuff */ | 1351 | /* Register platform stuff */ |
1378 | result = platform_driver_register(&asuspf_driver); | 1352 | result = platform_driver_register(&asuspf_driver); |
1379 | if (result) | 1353 | if (result) |
@@ -1394,8 +1368,27 @@ static int __init asus_laptop_init(void) | |||
1394 | if (result) | 1368 | if (result) |
1395 | goto fail_sysfs; | 1369 | goto fail_sysfs; |
1396 | 1370 | ||
1371 | result = asus_led_init(&asuspf_device->dev); | ||
1372 | if (result) | ||
1373 | goto fail_led; | ||
1374 | |||
1375 | if (!acpi_video_backlight_support()) { | ||
1376 | result = asus_backlight_init(&asuspf_device->dev); | ||
1377 | if (result) | ||
1378 | goto fail_backlight; | ||
1379 | } else | ||
1380 | pr_info("Brightness ignored, must be controlled by " | ||
1381 | "ACPI video driver\n"); | ||
1382 | |||
1397 | return 0; | 1383 | return 0; |
1398 | 1384 | ||
1385 | fail_backlight: | ||
1386 | asus_led_exit(); | ||
1387 | |||
1388 | fail_led: | ||
1389 | sysfs_remove_group(&asuspf_device->dev.kobj, | ||
1390 | &asuspf_attribute_group); | ||
1391 | |||
1399 | fail_sysfs: | 1392 | fail_sysfs: |
1400 | platform_device_del(asuspf_device); | 1393 | platform_device_del(asuspf_device); |
1401 | 1394 | ||
@@ -1406,15 +1399,9 @@ fail_platform_device1: | |||
1406 | platform_driver_unregister(&asuspf_driver); | 1399 | platform_driver_unregister(&asuspf_driver); |
1407 | 1400 | ||
1408 | fail_platform_driver: | 1401 | fail_platform_driver: |
1409 | asus_led_exit(); | ||
1410 | |||
1411 | fail_led: | ||
1412 | asus_input_exit(); | 1402 | asus_input_exit(); |
1413 | 1403 | ||
1414 | fail_input: | 1404 | fail_input: |
1415 | asus_backlight_exit(); | ||
1416 | |||
1417 | fail_backlight: | ||
1418 | 1405 | ||
1419 | return result; | 1406 | return result; |
1420 | } | 1407 | } |
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index ba1f7497e4b9..ddf5240ade8c 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c | |||
@@ -455,6 +455,8 @@ static struct asus_hotk *hotk; | |||
455 | */ | 455 | */ |
456 | static int asus_hotk_add(struct acpi_device *device); | 456 | static int asus_hotk_add(struct acpi_device *device); |
457 | static int asus_hotk_remove(struct acpi_device *device, int type); | 457 | static int asus_hotk_remove(struct acpi_device *device, int type); |
458 | static void asus_hotk_notify(struct acpi_device *device, u32 event); | ||
459 | |||
458 | static const struct acpi_device_id asus_device_ids[] = { | 460 | static const struct acpi_device_id asus_device_ids[] = { |
459 | {"ATK0100", 0}, | 461 | {"ATK0100", 0}, |
460 | {"", 0}, | 462 | {"", 0}, |
@@ -465,9 +467,11 @@ static struct acpi_driver asus_hotk_driver = { | |||
465 | .name = "asus_acpi", | 467 | .name = "asus_acpi", |
466 | .class = ACPI_HOTK_CLASS, | 468 | .class = ACPI_HOTK_CLASS, |
467 | .ids = asus_device_ids, | 469 | .ids = asus_device_ids, |
470 | .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, | ||
468 | .ops = { | 471 | .ops = { |
469 | .add = asus_hotk_add, | 472 | .add = asus_hotk_add, |
470 | .remove = asus_hotk_remove, | 473 | .remove = asus_hotk_remove, |
474 | .notify = asus_hotk_notify, | ||
471 | }, | 475 | }, |
472 | }; | 476 | }; |
473 | 477 | ||
@@ -1101,12 +1105,20 @@ static int asus_hotk_remove_fs(struct acpi_device *device) | |||
1101 | return 0; | 1105 | return 0; |
1102 | } | 1106 | } |
1103 | 1107 | ||
1104 | static void asus_hotk_notify(acpi_handle handle, u32 event, void *data) | 1108 | static void asus_hotk_notify(struct acpi_device *device, u32 event) |
1105 | { | 1109 | { |
1106 | /* TODO Find a better way to handle events count. */ | 1110 | /* TODO Find a better way to handle events count. */ |
1107 | if (!hotk) | 1111 | if (!hotk) |
1108 | return; | 1112 | return; |
1109 | 1113 | ||
1114 | /* | ||
1115 | * The BIOS *should* be sending us device events, but apparently | ||
1116 | * Asus uses system events instead, so just ignore any device | ||
1117 | * events we get. | ||
1118 | */ | ||
1119 | if (event > ACPI_MAX_SYS_NOTIFY) | ||
1120 | return; | ||
1121 | |||
1110 | if ((event & ~((u32) BR_UP)) < 16) | 1122 | if ((event & ~((u32) BR_UP)) < 16) |
1111 | hotk->brightness = (event & ~((u32) BR_UP)); | 1123 | hotk->brightness = (event & ~((u32) BR_UP)); |
1112 | else if ((event & ~((u32) BR_DOWN)) < 16) | 1124 | else if ((event & ~((u32) BR_DOWN)) < 16) |
@@ -1346,15 +1358,6 @@ static int asus_hotk_add(struct acpi_device *device) | |||
1346 | if (result) | 1358 | if (result) |
1347 | goto end; | 1359 | goto end; |
1348 | 1360 | ||
1349 | /* | ||
1350 | * We install the handler, it will receive the hotk in parameter, so, we | ||
1351 | * could add other data to the hotk struct | ||
1352 | */ | ||
1353 | status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY, | ||
1354 | asus_hotk_notify, hotk); | ||
1355 | if (ACPI_FAILURE(status)) | ||
1356 | printk(KERN_ERR " Error installing notify handler\n"); | ||
1357 | |||
1358 | /* For laptops without GPLV: init the hotk->brightness value */ | 1361 | /* For laptops without GPLV: init the hotk->brightness value */ |
1359 | if ((!hotk->methods->brightness_get) | 1362 | if ((!hotk->methods->brightness_get) |
1360 | && (!hotk->methods->brightness_status) | 1363 | && (!hotk->methods->brightness_status) |
@@ -1389,16 +1392,9 @@ end: | |||
1389 | 1392 | ||
1390 | static int asus_hotk_remove(struct acpi_device *device, int type) | 1393 | static int asus_hotk_remove(struct acpi_device *device, int type) |
1391 | { | 1394 | { |
1392 | acpi_status status = 0; | ||
1393 | |||
1394 | if (!device || !acpi_driver_data(device)) | 1395 | if (!device || !acpi_driver_data(device)) |
1395 | return -EINVAL; | 1396 | return -EINVAL; |
1396 | 1397 | ||
1397 | status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY, | ||
1398 | asus_hotk_notify); | ||
1399 | if (ACPI_FAILURE(status)) | ||
1400 | printk(KERN_ERR "Asus ACPI: Error removing notify handler\n"); | ||
1401 | |||
1402 | asus_hotk_remove_fs(device); | 1398 | asus_hotk_remove_fs(device); |
1403 | 1399 | ||
1404 | kfree(hotk); | 1400 | kfree(hotk); |
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 2fab94162147..0f900cc9fa7a 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c | |||
@@ -46,10 +46,53 @@ struct key_entry { | |||
46 | u16 keycode; | 46 | u16 keycode; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | enum { KE_KEY, KE_SW, KE_END }; | 49 | enum { KE_KEY, KE_SW, KE_IGNORE, KE_END }; |
50 | |||
51 | /* | ||
52 | * Certain keys are flagged as KE_IGNORE. All of these are either | ||
53 | * notifications (rather than requests for change) or are also sent | ||
54 | * via the keyboard controller so should not be sent again. | ||
55 | */ | ||
50 | 56 | ||
51 | static struct key_entry dell_wmi_keymap[] = { | 57 | static struct key_entry dell_wmi_keymap[] = { |
52 | {KE_KEY, 0xe045, KEY_PROG1}, | 58 | {KE_KEY, 0xe045, KEY_PROG1}, |
59 | {KE_KEY, 0xe009, KEY_EJECTCD}, | ||
60 | |||
61 | /* These also contain the brightness level at offset 6 */ | ||
62 | {KE_KEY, 0xe006, KEY_BRIGHTNESSUP}, | ||
63 | {KE_KEY, 0xe005, KEY_BRIGHTNESSDOWN}, | ||
64 | |||
65 | /* Battery health status button */ | ||
66 | {KE_KEY, 0xe007, KEY_BATTERY}, | ||
67 | |||
68 | /* This is actually for all radios. Although physically a | ||
69 | * switch, the notification does not provide an indication of | ||
70 | * state and so it should be reported as a key */ | ||
71 | {KE_KEY, 0xe008, KEY_WLAN}, | ||
72 | |||
73 | /* The next device is at offset 6, the active devices are at | ||
74 | offset 8 and the attached devices at offset 10 */ | ||
75 | {KE_KEY, 0xe00b, KEY_DISPLAYTOGGLE}, | ||
76 | |||
77 | {KE_IGNORE, 0xe00c, KEY_KBDILLUMTOGGLE}, | ||
78 | |||
79 | /* BIOS error detected */ | ||
80 | {KE_IGNORE, 0xe00d, KEY_RESERVED}, | ||
81 | |||
82 | /* Wifi Catcher */ | ||
83 | {KE_KEY, 0xe011, KEY_PROG2}, | ||
84 | |||
85 | /* Ambient light sensor toggle */ | ||
86 | {KE_IGNORE, 0xe013, KEY_RESERVED}, | ||
87 | |||
88 | {KE_IGNORE, 0xe020, KEY_MUTE}, | ||
89 | {KE_IGNORE, 0xe02e, KEY_VOLUMEDOWN}, | ||
90 | {KE_IGNORE, 0xe030, KEY_VOLUMEUP}, | ||
91 | {KE_IGNORE, 0xe033, KEY_KBDILLUMUP}, | ||
92 | {KE_IGNORE, 0xe034, KEY_KBDILLUMDOWN}, | ||
93 | {KE_IGNORE, 0xe03a, KEY_CAPSLOCK}, | ||
94 | {KE_IGNORE, 0xe045, KEY_NUMLOCK}, | ||
95 | {KE_IGNORE, 0xe046, KEY_SCROLLLOCK}, | ||
53 | {KE_END, 0} | 96 | {KE_END, 0} |
54 | }; | 97 | }; |
55 | 98 | ||
@@ -122,15 +165,20 @@ static void dell_wmi_notify(u32 value, void *context) | |||
122 | 165 | ||
123 | if (obj && obj->type == ACPI_TYPE_BUFFER) { | 166 | if (obj && obj->type == ACPI_TYPE_BUFFER) { |
124 | int *buffer = (int *)obj->buffer.pointer; | 167 | int *buffer = (int *)obj->buffer.pointer; |
125 | key = dell_wmi_get_entry_by_scancode(buffer[1]); | 168 | /* |
169 | * The upper bytes of the event may contain | ||
170 | * additional information, so mask them off for the | ||
171 | * scancode lookup | ||
172 | */ | ||
173 | key = dell_wmi_get_entry_by_scancode(buffer[1] & 0xFFFF); | ||
126 | if (key) { | 174 | if (key) { |
127 | input_report_key(dell_wmi_input_dev, key->keycode, 1); | 175 | input_report_key(dell_wmi_input_dev, key->keycode, 1); |
128 | input_sync(dell_wmi_input_dev); | 176 | input_sync(dell_wmi_input_dev); |
129 | input_report_key(dell_wmi_input_dev, key->keycode, 0); | 177 | input_report_key(dell_wmi_input_dev, key->keycode, 0); |
130 | input_sync(dell_wmi_input_dev); | 178 | input_sync(dell_wmi_input_dev); |
131 | } else | 179 | } else if (buffer[1] & 0xFFFF) |
132 | printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n", | 180 | printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n", |
133 | buffer[1]); | 181 | buffer[1] & 0xFFFF); |
134 | } | 182 | } |
135 | } | 183 | } |
136 | 184 | ||
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 8153b3e59189..ec560f16d720 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c | |||
@@ -16,6 +16,8 @@ | |||
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
20 | |||
19 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
20 | #include <linux/module.h> | 22 | #include <linux/module.h> |
21 | #include <linux/init.h> | 23 | #include <linux/init.h> |
@@ -31,6 +33,7 @@ | |||
31 | #include <linux/input.h> | 33 | #include <linux/input.h> |
32 | #include <linux/rfkill.h> | 34 | #include <linux/rfkill.h> |
33 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
36 | #include <linux/pci_hotplug.h> | ||
34 | 37 | ||
35 | #define EEEPC_LAPTOP_VERSION "0.1" | 38 | #define EEEPC_LAPTOP_VERSION "0.1" |
36 | 39 | ||
@@ -40,11 +43,6 @@ | |||
40 | #define EEEPC_HOTK_DEVICE_NAME "Hotkey" | 43 | #define EEEPC_HOTK_DEVICE_NAME "Hotkey" |
41 | #define EEEPC_HOTK_HID "ASUS010" | 44 | #define EEEPC_HOTK_HID "ASUS010" |
42 | 45 | ||
43 | #define EEEPC_LOG EEEPC_HOTK_FILE ": " | ||
44 | #define EEEPC_ERR KERN_ERR EEEPC_LOG | ||
45 | #define EEEPC_WARNING KERN_WARNING EEEPC_LOG | ||
46 | #define EEEPC_NOTICE KERN_NOTICE EEEPC_LOG | ||
47 | #define EEEPC_INFO KERN_INFO EEEPC_LOG | ||
48 | 46 | ||
49 | /* | 47 | /* |
50 | * Definitions for Asus EeePC | 48 | * Definitions for Asus EeePC |
@@ -62,7 +60,10 @@ enum { | |||
62 | DISABLE_ASL_GPS = 0x0020, | 60 | DISABLE_ASL_GPS = 0x0020, |
63 | DISABLE_ASL_DISPLAYSWITCH = 0x0040, | 61 | DISABLE_ASL_DISPLAYSWITCH = 0x0040, |
64 | DISABLE_ASL_MODEM = 0x0080, | 62 | DISABLE_ASL_MODEM = 0x0080, |
65 | DISABLE_ASL_CARDREADER = 0x0100 | 63 | DISABLE_ASL_CARDREADER = 0x0100, |
64 | DISABLE_ASL_3G = 0x0200, | ||
65 | DISABLE_ASL_WIMAX = 0x0400, | ||
66 | DISABLE_ASL_HWCF = 0x0800 | ||
66 | }; | 67 | }; |
67 | 68 | ||
68 | enum { | 69 | enum { |
@@ -87,7 +88,13 @@ enum { | |||
87 | CM_ASL_USBPORT3, | 88 | CM_ASL_USBPORT3, |
88 | CM_ASL_MODEM, | 89 | CM_ASL_MODEM, |
89 | CM_ASL_CARDREADER, | 90 | CM_ASL_CARDREADER, |
90 | CM_ASL_LID | 91 | CM_ASL_3G, |
92 | CM_ASL_WIMAX, | ||
93 | CM_ASL_HWCF, | ||
94 | CM_ASL_LID, | ||
95 | CM_ASL_TYPE, | ||
96 | CM_ASL_PANELPOWER, /*P901*/ | ||
97 | CM_ASL_TPD | ||
91 | }; | 98 | }; |
92 | 99 | ||
93 | static const char *cm_getv[] = { | 100 | static const char *cm_getv[] = { |
@@ -96,7 +103,8 @@ static const char *cm_getv[] = { | |||
96 | NULL, "PBLG", NULL, NULL, | 103 | NULL, "PBLG", NULL, NULL, |
97 | "CFVG", NULL, NULL, NULL, | 104 | "CFVG", NULL, NULL, NULL, |
98 | "USBG", NULL, NULL, "MODG", | 105 | "USBG", NULL, NULL, "MODG", |
99 | "CRDG", "LIDG" | 106 | "CRDG", "M3GG", "WIMG", "HWCF", |
107 | "LIDG", "TYPE", "PBPG", "TPDG" | ||
100 | }; | 108 | }; |
101 | 109 | ||
102 | static const char *cm_setv[] = { | 110 | static const char *cm_setv[] = { |
@@ -105,7 +113,8 @@ static const char *cm_setv[] = { | |||
105 | "SDSP", "PBLS", "HDPS", NULL, | 113 | "SDSP", "PBLS", "HDPS", NULL, |
106 | "CFVS", NULL, NULL, NULL, | 114 | "CFVS", NULL, NULL, NULL, |
107 | "USBG", NULL, NULL, "MODS", | 115 | "USBG", NULL, NULL, "MODS", |
108 | "CRDS", NULL | 116 | "CRDS", "M3GS", "WIMS", NULL, |
117 | NULL, NULL, "PBPS", "TPDS" | ||
109 | }; | 118 | }; |
110 | 119 | ||
111 | #define EEEPC_EC "\\_SB.PCI0.SBRG.EC0." | 120 | #define EEEPC_EC "\\_SB.PCI0.SBRG.EC0." |
@@ -130,8 +139,10 @@ struct eeepc_hotk { | |||
130 | u16 event_count[128]; /* count for each event */ | 139 | u16 event_count[128]; /* count for each event */ |
131 | struct input_dev *inputdev; | 140 | struct input_dev *inputdev; |
132 | u16 *keycode_map; | 141 | u16 *keycode_map; |
133 | struct rfkill *eeepc_wlan_rfkill; | 142 | struct rfkill *wlan_rfkill; |
134 | struct rfkill *eeepc_bluetooth_rfkill; | 143 | struct rfkill *bluetooth_rfkill; |
144 | struct rfkill *wwan3g_rfkill; | ||
145 | struct hotplug_slot *hotplug_slot; | ||
135 | }; | 146 | }; |
136 | 147 | ||
137 | /* The actual device the driver binds to */ | 148 | /* The actual device the driver binds to */ |
@@ -181,6 +192,7 @@ static struct key_entry eeepc_keymap[] = { | |||
181 | static int eeepc_hotk_add(struct acpi_device *device); | 192 | static int eeepc_hotk_add(struct acpi_device *device); |
182 | static int eeepc_hotk_remove(struct acpi_device *device, int type); | 193 | static int eeepc_hotk_remove(struct acpi_device *device, int type); |
183 | static int eeepc_hotk_resume(struct acpi_device *device); | 194 | static int eeepc_hotk_resume(struct acpi_device *device); |
195 | static void eeepc_hotk_notify(struct acpi_device *device, u32 event); | ||
184 | 196 | ||
185 | static const struct acpi_device_id eeepc_device_ids[] = { | 197 | static const struct acpi_device_id eeepc_device_ids[] = { |
186 | {EEEPC_HOTK_HID, 0}, | 198 | {EEEPC_HOTK_HID, 0}, |
@@ -192,13 +204,24 @@ static struct acpi_driver eeepc_hotk_driver = { | |||
192 | .name = EEEPC_HOTK_NAME, | 204 | .name = EEEPC_HOTK_NAME, |
193 | .class = EEEPC_HOTK_CLASS, | 205 | .class = EEEPC_HOTK_CLASS, |
194 | .ids = eeepc_device_ids, | 206 | .ids = eeepc_device_ids, |
207 | .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, | ||
195 | .ops = { | 208 | .ops = { |
196 | .add = eeepc_hotk_add, | 209 | .add = eeepc_hotk_add, |
197 | .remove = eeepc_hotk_remove, | 210 | .remove = eeepc_hotk_remove, |
198 | .resume = eeepc_hotk_resume, | 211 | .resume = eeepc_hotk_resume, |
212 | .notify = eeepc_hotk_notify, | ||
199 | }, | 213 | }, |
200 | }; | 214 | }; |
201 | 215 | ||
216 | /* PCI hotplug ops */ | ||
217 | static int eeepc_get_adapter_status(struct hotplug_slot *slot, u8 *value); | ||
218 | |||
219 | static struct hotplug_slot_ops eeepc_hotplug_slot_ops = { | ||
220 | .owner = THIS_MODULE, | ||
221 | .get_adapter_status = eeepc_get_adapter_status, | ||
222 | .get_power_status = eeepc_get_adapter_status, | ||
223 | }; | ||
224 | |||
202 | /* The backlight device /sys/class/backlight */ | 225 | /* The backlight device /sys/class/backlight */ |
203 | static struct backlight_device *eeepc_backlight_device; | 226 | static struct backlight_device *eeepc_backlight_device; |
204 | 227 | ||
@@ -260,20 +283,20 @@ static int set_acpi(int cm, int value) | |||
260 | if (method == NULL) | 283 | if (method == NULL) |
261 | return -ENODEV; | 284 | return -ENODEV; |
262 | if (write_acpi_int(ehotk->handle, method, value, NULL)) | 285 | if (write_acpi_int(ehotk->handle, method, value, NULL)) |
263 | printk(EEEPC_WARNING "Error writing %s\n", method); | 286 | pr_warning("Error writing %s\n", method); |
264 | } | 287 | } |
265 | return 0; | 288 | return 0; |
266 | } | 289 | } |
267 | 290 | ||
268 | static int get_acpi(int cm) | 291 | static int get_acpi(int cm) |
269 | { | 292 | { |
270 | int value = -1; | 293 | int value = -ENODEV; |
271 | if ((ehotk->cm_supported & (0x1 << cm))) { | 294 | if ((ehotk->cm_supported & (0x1 << cm))) { |
272 | const char *method = cm_getv[cm]; | 295 | const char *method = cm_getv[cm]; |
273 | if (method == NULL) | 296 | if (method == NULL) |
274 | return -ENODEV; | 297 | return -ENODEV; |
275 | if (read_acpi_int(ehotk->handle, method, &value)) | 298 | if (read_acpi_int(ehotk->handle, method, &value)) |
276 | printk(EEEPC_WARNING "Error reading %s\n", method); | 299 | pr_warning("Error reading %s\n", method); |
277 | } | 300 | } |
278 | return value; | 301 | return value; |
279 | } | 302 | } |
@@ -318,6 +341,15 @@ static const struct rfkill_ops eeepc_rfkill_ops = { | |||
318 | .set_block = eeepc_rfkill_set, | 341 | .set_block = eeepc_rfkill_set, |
319 | }; | 342 | }; |
320 | 343 | ||
344 | static void __init eeepc_enable_camera(void) | ||
345 | { | ||
346 | /* | ||
347 | * If the following call to set_acpi() fails, it's because there's no | ||
348 | * camera so we can ignore the error. | ||
349 | */ | ||
350 | set_acpi(CM_ASL_CAMERA, 1); | ||
351 | } | ||
352 | |||
321 | /* | 353 | /* |
322 | * Sys helpers | 354 | * Sys helpers |
323 | */ | 355 | */ |
@@ -336,13 +368,19 @@ static ssize_t store_sys_acpi(int cm, const char *buf, size_t count) | |||
336 | 368 | ||
337 | rv = parse_arg(buf, count, &value); | 369 | rv = parse_arg(buf, count, &value); |
338 | if (rv > 0) | 370 | if (rv > 0) |
339 | set_acpi(cm, value); | 371 | value = set_acpi(cm, value); |
372 | if (value < 0) | ||
373 | return value; | ||
340 | return rv; | 374 | return rv; |
341 | } | 375 | } |
342 | 376 | ||
343 | static ssize_t show_sys_acpi(int cm, char *buf) | 377 | static ssize_t show_sys_acpi(int cm, char *buf) |
344 | { | 378 | { |
345 | return sprintf(buf, "%d\n", get_acpi(cm)); | 379 | int value = get_acpi(cm); |
380 | |||
381 | if (value < 0) | ||
382 | return value; | ||
383 | return sprintf(buf, "%d\n", value); | ||
346 | } | 384 | } |
347 | 385 | ||
348 | #define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \ | 386 | #define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \ |
@@ -369,13 +407,88 @@ static ssize_t show_sys_acpi(int cm, char *buf) | |||
369 | EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA); | 407 | EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA); |
370 | EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER); | 408 | EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER); |
371 | EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH); | 409 | EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH); |
372 | EEEPC_CREATE_DEVICE_ATTR(cpufv, CM_ASL_CPUFV); | 410 | |
411 | struct eeepc_cpufv { | ||
412 | int num; | ||
413 | int cur; | ||
414 | }; | ||
415 | |||
416 | static int get_cpufv(struct eeepc_cpufv *c) | ||
417 | { | ||
418 | c->cur = get_acpi(CM_ASL_CPUFV); | ||
419 | c->num = (c->cur >> 8) & 0xff; | ||
420 | c->cur &= 0xff; | ||
421 | if (c->cur < 0 || c->num <= 0 || c->num > 12) | ||
422 | return -ENODEV; | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | static ssize_t show_available_cpufv(struct device *dev, | ||
427 | struct device_attribute *attr, | ||
428 | char *buf) | ||
429 | { | ||
430 | struct eeepc_cpufv c; | ||
431 | int i; | ||
432 | ssize_t len = 0; | ||
433 | |||
434 | if (get_cpufv(&c)) | ||
435 | return -ENODEV; | ||
436 | for (i = 0; i < c.num; i++) | ||
437 | len += sprintf(buf + len, "%d ", i); | ||
438 | len += sprintf(buf + len, "\n"); | ||
439 | return len; | ||
440 | } | ||
441 | |||
442 | static ssize_t show_cpufv(struct device *dev, | ||
443 | struct device_attribute *attr, | ||
444 | char *buf) | ||
445 | { | ||
446 | struct eeepc_cpufv c; | ||
447 | |||
448 | if (get_cpufv(&c)) | ||
449 | return -ENODEV; | ||
450 | return sprintf(buf, "%#x\n", (c.num << 8) | c.cur); | ||
451 | } | ||
452 | |||
453 | static ssize_t store_cpufv(struct device *dev, | ||
454 | struct device_attribute *attr, | ||
455 | const char *buf, size_t count) | ||
456 | { | ||
457 | struct eeepc_cpufv c; | ||
458 | int rv, value; | ||
459 | |||
460 | if (get_cpufv(&c)) | ||
461 | return -ENODEV; | ||
462 | rv = parse_arg(buf, count, &value); | ||
463 | if (rv < 0) | ||
464 | return rv; | ||
465 | if (!rv || value < 0 || value >= c.num) | ||
466 | return -EINVAL; | ||
467 | set_acpi(CM_ASL_CPUFV, value); | ||
468 | return rv; | ||
469 | } | ||
470 | |||
471 | static struct device_attribute dev_attr_cpufv = { | ||
472 | .attr = { | ||
473 | .name = "cpufv", | ||
474 | .mode = 0644 }, | ||
475 | .show = show_cpufv, | ||
476 | .store = store_cpufv | ||
477 | }; | ||
478 | |||
479 | static struct device_attribute dev_attr_available_cpufv = { | ||
480 | .attr = { | ||
481 | .name = "available_cpufv", | ||
482 | .mode = 0444 }, | ||
483 | .show = show_available_cpufv | ||
484 | }; | ||
373 | 485 | ||
374 | static struct attribute *platform_attributes[] = { | 486 | static struct attribute *platform_attributes[] = { |
375 | &dev_attr_camera.attr, | 487 | &dev_attr_camera.attr, |
376 | &dev_attr_cardr.attr, | 488 | &dev_attr_cardr.attr, |
377 | &dev_attr_disp.attr, | 489 | &dev_attr_disp.attr, |
378 | &dev_attr_cpufv.attr, | 490 | &dev_attr_cpufv.attr, |
491 | &dev_attr_available_cpufv.attr, | ||
379 | NULL | 492 | NULL |
380 | }; | 493 | }; |
381 | 494 | ||
@@ -441,6 +554,28 @@ static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode) | |||
441 | return -EINVAL; | 554 | return -EINVAL; |
442 | } | 555 | } |
443 | 556 | ||
557 | static void cmsg_quirk(int cm, const char *name) | ||
558 | { | ||
559 | int dummy; | ||
560 | |||
561 | /* Some BIOSes do not report cm although it is avaliable. | ||
562 | Check if cm_getv[cm] works and, if yes, assume cm should be set. */ | ||
563 | if (!(ehotk->cm_supported & (1 << cm)) | ||
564 | && !read_acpi_int(ehotk->handle, cm_getv[cm], &dummy)) { | ||
565 | pr_info("%s (%x) not reported by BIOS," | ||
566 | " enabling anyway\n", name, 1 << cm); | ||
567 | ehotk->cm_supported |= 1 << cm; | ||
568 | } | ||
569 | } | ||
570 | |||
571 | static void cmsg_quirks(void) | ||
572 | { | ||
573 | cmsg_quirk(CM_ASL_LID, "LID"); | ||
574 | cmsg_quirk(CM_ASL_TYPE, "TYPE"); | ||
575 | cmsg_quirk(CM_ASL_PANELPOWER, "PANELPOWER"); | ||
576 | cmsg_quirk(CM_ASL_TPD, "TPD"); | ||
577 | } | ||
578 | |||
444 | static int eeepc_hotk_check(void) | 579 | static int eeepc_hotk_check(void) |
445 | { | 580 | { |
446 | const struct key_entry *key; | 581 | const struct key_entry *key; |
@@ -453,26 +588,24 @@ static int eeepc_hotk_check(void) | |||
453 | if (ehotk->device->status.present) { | 588 | if (ehotk->device->status.present) { |
454 | if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag, | 589 | if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag, |
455 | &buffer)) { | 590 | &buffer)) { |
456 | printk(EEEPC_ERR "Hotkey initialization failed\n"); | 591 | pr_err("Hotkey initialization failed\n"); |
457 | return -ENODEV; | 592 | return -ENODEV; |
458 | } else { | 593 | } else { |
459 | printk(EEEPC_NOTICE "Hotkey init flags 0x%x\n", | 594 | pr_notice("Hotkey init flags 0x%x\n", ehotk->init_flag); |
460 | ehotk->init_flag); | ||
461 | } | 595 | } |
462 | /* get control methods supported */ | 596 | /* get control methods supported */ |
463 | if (read_acpi_int(ehotk->handle, "CMSG" | 597 | if (read_acpi_int(ehotk->handle, "CMSG" |
464 | , &ehotk->cm_supported)) { | 598 | , &ehotk->cm_supported)) { |
465 | printk(EEEPC_ERR | 599 | pr_err("Get control methods supported failed\n"); |
466 | "Get control methods supported failed\n"); | ||
467 | return -ENODEV; | 600 | return -ENODEV; |
468 | } else { | 601 | } else { |
469 | printk(EEEPC_INFO | 602 | cmsg_quirks(); |
470 | "Get control methods supported: 0x%x\n", | 603 | pr_info("Get control methods supported: 0x%x\n", |
471 | ehotk->cm_supported); | 604 | ehotk->cm_supported); |
472 | } | 605 | } |
473 | ehotk->inputdev = input_allocate_device(); | 606 | ehotk->inputdev = input_allocate_device(); |
474 | if (!ehotk->inputdev) { | 607 | if (!ehotk->inputdev) { |
475 | printk(EEEPC_INFO "Unable to allocate input device\n"); | 608 | pr_info("Unable to allocate input device\n"); |
476 | return 0; | 609 | return 0; |
477 | } | 610 | } |
478 | ehotk->inputdev->name = "Asus EeePC extra buttons"; | 611 | ehotk->inputdev->name = "Asus EeePC extra buttons"; |
@@ -491,12 +624,12 @@ static int eeepc_hotk_check(void) | |||
491 | } | 624 | } |
492 | result = input_register_device(ehotk->inputdev); | 625 | result = input_register_device(ehotk->inputdev); |
493 | if (result) { | 626 | if (result) { |
494 | printk(EEEPC_INFO "Unable to register input device\n"); | 627 | pr_info("Unable to register input device\n"); |
495 | input_free_device(ehotk->inputdev); | 628 | input_free_device(ehotk->inputdev); |
496 | return 0; | 629 | return 0; |
497 | } | 630 | } |
498 | } else { | 631 | } else { |
499 | printk(EEEPC_ERR "Hotkey device not present, aborting\n"); | 632 | pr_err("Hotkey device not present, aborting\n"); |
500 | return -EINVAL; | 633 | return -EINVAL; |
501 | } | 634 | } |
502 | return 0; | 635 | return 0; |
@@ -514,6 +647,19 @@ static int notify_brn(void) | |||
514 | return -1; | 647 | return -1; |
515 | } | 648 | } |
516 | 649 | ||
650 | static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot, | ||
651 | u8 *value) | ||
652 | { | ||
653 | int val = get_acpi(CM_ASL_WLAN); | ||
654 | |||
655 | if (val == 1 || val == 0) | ||
656 | *value = val; | ||
657 | else | ||
658 | return -EINVAL; | ||
659 | |||
660 | return 0; | ||
661 | } | ||
662 | |||
517 | static void eeepc_rfkill_hotplug(void) | 663 | static void eeepc_rfkill_hotplug(void) |
518 | { | 664 | { |
519 | struct pci_dev *dev; | 665 | struct pci_dev *dev; |
@@ -521,7 +667,7 @@ static void eeepc_rfkill_hotplug(void) | |||
521 | bool blocked; | 667 | bool blocked; |
522 | 668 | ||
523 | if (!bus) { | 669 | if (!bus) { |
524 | printk(EEEPC_WARNING "Unable to find PCI bus 1?\n"); | 670 | pr_warning("Unable to find PCI bus 1?\n"); |
525 | return; | 671 | return; |
526 | } | 672 | } |
527 | 673 | ||
@@ -537,7 +683,7 @@ static void eeepc_rfkill_hotplug(void) | |||
537 | if (dev) { | 683 | if (dev) { |
538 | pci_bus_assign_resources(bus); | 684 | pci_bus_assign_resources(bus); |
539 | if (pci_bus_add_device(dev)) | 685 | if (pci_bus_add_device(dev)) |
540 | printk(EEEPC_ERR "Unable to hotplug wifi\n"); | 686 | pr_err("Unable to hotplug wifi\n"); |
541 | } | 687 | } |
542 | } else { | 688 | } else { |
543 | dev = pci_get_slot(bus, 0); | 689 | dev = pci_get_slot(bus, 0); |
@@ -547,7 +693,7 @@ static void eeepc_rfkill_hotplug(void) | |||
547 | } | 693 | } |
548 | } | 694 | } |
549 | 695 | ||
550 | rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked); | 696 | rfkill_set_sw_state(ehotk->wlan_rfkill, blocked); |
551 | } | 697 | } |
552 | 698 | ||
553 | static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) | 699 | static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) |
@@ -558,7 +704,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) | |||
558 | eeepc_rfkill_hotplug(); | 704 | eeepc_rfkill_hotplug(); |
559 | } | 705 | } |
560 | 706 | ||
561 | static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data) | 707 | static void eeepc_hotk_notify(struct acpi_device *device, u32 event) |
562 | { | 708 | { |
563 | static struct key_entry *key; | 709 | static struct key_entry *key; |
564 | u16 count; | 710 | u16 count; |
@@ -566,6 +712,8 @@ static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data) | |||
566 | 712 | ||
567 | if (!ehotk) | 713 | if (!ehotk) |
568 | return; | 714 | return; |
715 | if (event > ACPI_MAX_SYS_NOTIFY) | ||
716 | return; | ||
569 | if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) | 717 | if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) |
570 | brn = notify_brn(); | 718 | brn = notify_brn(); |
571 | count = ehotk->event_count[event % 128]++; | 719 | count = ehotk->event_count[event % 128]++; |
@@ -618,8 +766,7 @@ static int eeepc_register_rfkill_notifier(char *node) | |||
618 | eeepc_rfkill_notify, | 766 | eeepc_rfkill_notify, |
619 | NULL); | 767 | NULL); |
620 | if (ACPI_FAILURE(status)) | 768 | if (ACPI_FAILURE(status)) |
621 | printk(EEEPC_WARNING | 769 | pr_warning("Failed to register notify on %s\n", node); |
622 | "Failed to register notify on %s\n", node); | ||
623 | } else | 770 | } else |
624 | return -ENODEV; | 771 | return -ENODEV; |
625 | 772 | ||
@@ -638,20 +785,66 @@ static void eeepc_unregister_rfkill_notifier(char *node) | |||
638 | ACPI_SYSTEM_NOTIFY, | 785 | ACPI_SYSTEM_NOTIFY, |
639 | eeepc_rfkill_notify); | 786 | eeepc_rfkill_notify); |
640 | if (ACPI_FAILURE(status)) | 787 | if (ACPI_FAILURE(status)) |
641 | printk(EEEPC_ERR | 788 | pr_err("Error removing rfkill notify handler %s\n", |
642 | "Error removing rfkill notify handler %s\n", | ||
643 | node); | 789 | node); |
644 | } | 790 | } |
645 | } | 791 | } |
646 | 792 | ||
793 | static void eeepc_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot) | ||
794 | { | ||
795 | kfree(hotplug_slot->info); | ||
796 | kfree(hotplug_slot); | ||
797 | } | ||
798 | |||
799 | static int eeepc_setup_pci_hotplug(void) | ||
800 | { | ||
801 | int ret = -ENOMEM; | ||
802 | struct pci_bus *bus = pci_find_bus(0, 1); | ||
803 | |||
804 | if (!bus) { | ||
805 | pr_err("Unable to find wifi PCI bus\n"); | ||
806 | return -ENODEV; | ||
807 | } | ||
808 | |||
809 | ehotk->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); | ||
810 | if (!ehotk->hotplug_slot) | ||
811 | goto error_slot; | ||
812 | |||
813 | ehotk->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info), | ||
814 | GFP_KERNEL); | ||
815 | if (!ehotk->hotplug_slot->info) | ||
816 | goto error_info; | ||
817 | |||
818 | ehotk->hotplug_slot->private = ehotk; | ||
819 | ehotk->hotplug_slot->release = &eeepc_cleanup_pci_hotplug; | ||
820 | ehotk->hotplug_slot->ops = &eeepc_hotplug_slot_ops; | ||
821 | eeepc_get_adapter_status(ehotk->hotplug_slot, | ||
822 | &ehotk->hotplug_slot->info->adapter_status); | ||
823 | |||
824 | ret = pci_hp_register(ehotk->hotplug_slot, bus, 0, "eeepc-wifi"); | ||
825 | if (ret) { | ||
826 | pr_err("Unable to register hotplug slot - %d\n", ret); | ||
827 | goto error_register; | ||
828 | } | ||
829 | |||
830 | return 0; | ||
831 | |||
832 | error_register: | ||
833 | kfree(ehotk->hotplug_slot->info); | ||
834 | error_info: | ||
835 | kfree(ehotk->hotplug_slot); | ||
836 | ehotk->hotplug_slot = NULL; | ||
837 | error_slot: | ||
838 | return ret; | ||
839 | } | ||
840 | |||
647 | static int eeepc_hotk_add(struct acpi_device *device) | 841 | static int eeepc_hotk_add(struct acpi_device *device) |
648 | { | 842 | { |
649 | acpi_status status = AE_OK; | ||
650 | int result; | 843 | int result; |
651 | 844 | ||
652 | if (!device) | 845 | if (!device) |
653 | return -EINVAL; | 846 | return -EINVAL; |
654 | printk(EEEPC_NOTICE EEEPC_HOTK_NAME "\n"); | 847 | pr_notice(EEEPC_HOTK_NAME "\n"); |
655 | ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL); | 848 | ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL); |
656 | if (!ehotk) | 849 | if (!ehotk) |
657 | return -ENOMEM; | 850 | return -ENOMEM; |
@@ -664,58 +857,9 @@ static int eeepc_hotk_add(struct acpi_device *device) | |||
664 | result = eeepc_hotk_check(); | 857 | result = eeepc_hotk_check(); |
665 | if (result) | 858 | if (result) |
666 | goto ehotk_fail; | 859 | goto ehotk_fail; |
667 | status = acpi_install_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY, | ||
668 | eeepc_hotk_notify, ehotk); | ||
669 | if (ACPI_FAILURE(status)) | ||
670 | printk(EEEPC_ERR "Error installing notify handler\n"); | ||
671 | |||
672 | eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6"); | ||
673 | eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7"); | ||
674 | |||
675 | if (get_acpi(CM_ASL_WLAN) != -1) { | ||
676 | ehotk->eeepc_wlan_rfkill = rfkill_alloc("eeepc-wlan", | ||
677 | &device->dev, | ||
678 | RFKILL_TYPE_WLAN, | ||
679 | &eeepc_rfkill_ops, | ||
680 | (void *)CM_ASL_WLAN); | ||
681 | |||
682 | if (!ehotk->eeepc_wlan_rfkill) | ||
683 | goto wlan_fail; | ||
684 | |||
685 | rfkill_init_sw_state(ehotk->eeepc_wlan_rfkill, | ||
686 | get_acpi(CM_ASL_WLAN) != 1); | ||
687 | result = rfkill_register(ehotk->eeepc_wlan_rfkill); | ||
688 | if (result) | ||
689 | goto wlan_fail; | ||
690 | } | ||
691 | |||
692 | if (get_acpi(CM_ASL_BLUETOOTH) != -1) { | ||
693 | ehotk->eeepc_bluetooth_rfkill = | ||
694 | rfkill_alloc("eeepc-bluetooth", | ||
695 | &device->dev, | ||
696 | RFKILL_TYPE_BLUETOOTH, | ||
697 | &eeepc_rfkill_ops, | ||
698 | (void *)CM_ASL_BLUETOOTH); | ||
699 | |||
700 | if (!ehotk->eeepc_bluetooth_rfkill) | ||
701 | goto bluetooth_fail; | ||
702 | |||
703 | rfkill_init_sw_state(ehotk->eeepc_bluetooth_rfkill, | ||
704 | get_acpi(CM_ASL_BLUETOOTH) != 1); | ||
705 | result = rfkill_register(ehotk->eeepc_bluetooth_rfkill); | ||
706 | if (result) | ||
707 | goto bluetooth_fail; | ||
708 | } | ||
709 | 860 | ||
710 | return 0; | 861 | return 0; |
711 | 862 | ||
712 | bluetooth_fail: | ||
713 | rfkill_destroy(ehotk->eeepc_bluetooth_rfkill); | ||
714 | rfkill_unregister(ehotk->eeepc_wlan_rfkill); | ||
715 | wlan_fail: | ||
716 | rfkill_destroy(ehotk->eeepc_wlan_rfkill); | ||
717 | eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6"); | ||
718 | eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7"); | ||
719 | ehotk_fail: | 863 | ehotk_fail: |
720 | kfree(ehotk); | 864 | kfree(ehotk); |
721 | ehotk = NULL; | 865 | ehotk = NULL; |
@@ -725,17 +869,8 @@ static int eeepc_hotk_add(struct acpi_device *device) | |||
725 | 869 | ||
726 | static int eeepc_hotk_remove(struct acpi_device *device, int type) | 870 | static int eeepc_hotk_remove(struct acpi_device *device, int type) |
727 | { | 871 | { |
728 | acpi_status status = 0; | ||
729 | |||
730 | if (!device || !acpi_driver_data(device)) | 872 | if (!device || !acpi_driver_data(device)) |
731 | return -EINVAL; | 873 | return -EINVAL; |
732 | status = acpi_remove_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY, | ||
733 | eeepc_hotk_notify); | ||
734 | if (ACPI_FAILURE(status)) | ||
735 | printk(EEEPC_ERR "Error removing notify handler\n"); | ||
736 | |||
737 | eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6"); | ||
738 | eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7"); | ||
739 | 874 | ||
740 | kfree(ehotk); | 875 | kfree(ehotk); |
741 | return 0; | 876 | return 0; |
@@ -743,7 +878,7 @@ static int eeepc_hotk_remove(struct acpi_device *device, int type) | |||
743 | 878 | ||
744 | static int eeepc_hotk_resume(struct acpi_device *device) | 879 | static int eeepc_hotk_resume(struct acpi_device *device) |
745 | { | 880 | { |
746 | if (ehotk->eeepc_wlan_rfkill) { | 881 | if (ehotk->wlan_rfkill) { |
747 | bool wlan; | 882 | bool wlan; |
748 | 883 | ||
749 | /* Workaround - it seems that _PTS disables the wireless | 884 | /* Workaround - it seems that _PTS disables the wireless |
@@ -755,14 +890,13 @@ static int eeepc_hotk_resume(struct acpi_device *device) | |||
755 | wlan = get_acpi(CM_ASL_WLAN); | 890 | wlan = get_acpi(CM_ASL_WLAN); |
756 | set_acpi(CM_ASL_WLAN, wlan); | 891 | set_acpi(CM_ASL_WLAN, wlan); |
757 | 892 | ||
758 | rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, | 893 | rfkill_set_sw_state(ehotk->wlan_rfkill, wlan != 1); |
759 | wlan != 1); | ||
760 | 894 | ||
761 | eeepc_rfkill_hotplug(); | 895 | eeepc_rfkill_hotplug(); |
762 | } | 896 | } |
763 | 897 | ||
764 | if (ehotk->eeepc_bluetooth_rfkill) | 898 | if (ehotk->bluetooth_rfkill) |
765 | rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill, | 899 | rfkill_set_sw_state(ehotk->bluetooth_rfkill, |
766 | get_acpi(CM_ASL_BLUETOOTH) != 1); | 900 | get_acpi(CM_ASL_BLUETOOTH) != 1); |
767 | 901 | ||
768 | return 0; | 902 | return 0; |
@@ -884,10 +1018,16 @@ static void eeepc_backlight_exit(void) | |||
884 | 1018 | ||
885 | static void eeepc_rfkill_exit(void) | 1019 | static void eeepc_rfkill_exit(void) |
886 | { | 1020 | { |
887 | if (ehotk->eeepc_wlan_rfkill) | 1021 | eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6"); |
888 | rfkill_unregister(ehotk->eeepc_wlan_rfkill); | 1022 | eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7"); |
889 | if (ehotk->eeepc_bluetooth_rfkill) | 1023 | if (ehotk->wlan_rfkill) |
890 | rfkill_unregister(ehotk->eeepc_bluetooth_rfkill); | 1024 | rfkill_unregister(ehotk->wlan_rfkill); |
1025 | if (ehotk->bluetooth_rfkill) | ||
1026 | rfkill_unregister(ehotk->bluetooth_rfkill); | ||
1027 | if (ehotk->wwan3g_rfkill) | ||
1028 | rfkill_unregister(ehotk->wwan3g_rfkill); | ||
1029 | if (ehotk->hotplug_slot) | ||
1030 | pci_hp_deregister(ehotk->hotplug_slot); | ||
891 | } | 1031 | } |
892 | 1032 | ||
893 | static void eeepc_input_exit(void) | 1033 | static void eeepc_input_exit(void) |
@@ -922,6 +1062,75 @@ static void __exit eeepc_laptop_exit(void) | |||
922 | platform_driver_unregister(&platform_driver); | 1062 | platform_driver_unregister(&platform_driver); |
923 | } | 1063 | } |
924 | 1064 | ||
1065 | static int eeepc_new_rfkill(struct rfkill **rfkill, | ||
1066 | const char *name, struct device *dev, | ||
1067 | enum rfkill_type type, int cm) | ||
1068 | { | ||
1069 | int result; | ||
1070 | |||
1071 | result = get_acpi(cm); | ||
1072 | if (result < 0) | ||
1073 | return result; | ||
1074 | |||
1075 | *rfkill = rfkill_alloc(name, dev, type, | ||
1076 | &eeepc_rfkill_ops, (void *)(unsigned long)cm); | ||
1077 | |||
1078 | if (!*rfkill) | ||
1079 | return -EINVAL; | ||
1080 | |||
1081 | rfkill_init_sw_state(*rfkill, get_acpi(cm) != 1); | ||
1082 | result = rfkill_register(*rfkill); | ||
1083 | if (result) { | ||
1084 | rfkill_destroy(*rfkill); | ||
1085 | *rfkill = NULL; | ||
1086 | return result; | ||
1087 | } | ||
1088 | return 0; | ||
1089 | } | ||
1090 | |||
1091 | |||
1092 | static int eeepc_rfkill_init(struct device *dev) | ||
1093 | { | ||
1094 | int result = 0; | ||
1095 | |||
1096 | eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6"); | ||
1097 | eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7"); | ||
1098 | |||
1099 | result = eeepc_new_rfkill(&ehotk->wlan_rfkill, | ||
1100 | "eeepc-wlan", dev, | ||
1101 | RFKILL_TYPE_WLAN, CM_ASL_WLAN); | ||
1102 | |||
1103 | if (result && result != -ENODEV) | ||
1104 | goto exit; | ||
1105 | |||
1106 | result = eeepc_new_rfkill(&ehotk->bluetooth_rfkill, | ||
1107 | "eeepc-bluetooth", dev, | ||
1108 | RFKILL_TYPE_BLUETOOTH, CM_ASL_BLUETOOTH); | ||
1109 | |||
1110 | if (result && result != -ENODEV) | ||
1111 | goto exit; | ||
1112 | |||
1113 | result = eeepc_new_rfkill(&ehotk->wwan3g_rfkill, | ||
1114 | "eeepc-wwan3g", dev, | ||
1115 | RFKILL_TYPE_WWAN, CM_ASL_3G); | ||
1116 | |||
1117 | if (result && result != -ENODEV) | ||
1118 | goto exit; | ||
1119 | |||
1120 | result = eeepc_setup_pci_hotplug(); | ||
1121 | /* | ||
1122 | * If we get -EBUSY then something else is handling the PCI hotplug - | ||
1123 | * don't fail in this case | ||
1124 | */ | ||
1125 | if (result == -EBUSY) | ||
1126 | result = 0; | ||
1127 | |||
1128 | exit: | ||
1129 | if (result && result != -ENODEV) | ||
1130 | eeepc_rfkill_exit(); | ||
1131 | return result; | ||
1132 | } | ||
1133 | |||
925 | static int eeepc_backlight_init(struct device *dev) | 1134 | static int eeepc_backlight_init(struct device *dev) |
926 | { | 1135 | { |
927 | struct backlight_device *bd; | 1136 | struct backlight_device *bd; |
@@ -929,8 +1138,7 @@ static int eeepc_backlight_init(struct device *dev) | |||
929 | bd = backlight_device_register(EEEPC_HOTK_FILE, dev, | 1138 | bd = backlight_device_register(EEEPC_HOTK_FILE, dev, |
930 | NULL, &eeepcbl_ops); | 1139 | NULL, &eeepcbl_ops); |
931 | if (IS_ERR(bd)) { | 1140 | if (IS_ERR(bd)) { |
932 | printk(EEEPC_ERR | 1141 | pr_err("Could not register eeepc backlight device\n"); |
933 | "Could not register eeepc backlight device\n"); | ||
934 | eeepc_backlight_device = NULL; | 1142 | eeepc_backlight_device = NULL; |
935 | return PTR_ERR(bd); | 1143 | return PTR_ERR(bd); |
936 | } | 1144 | } |
@@ -949,8 +1157,7 @@ static int eeepc_hwmon_init(struct device *dev) | |||
949 | 1157 | ||
950 | hwmon = hwmon_device_register(dev); | 1158 | hwmon = hwmon_device_register(dev); |
951 | if (IS_ERR(hwmon)) { | 1159 | if (IS_ERR(hwmon)) { |
952 | printk(EEEPC_ERR | 1160 | pr_err("Could not register eeepc hwmon device\n"); |
953 | "Could not register eeepc hwmon device\n"); | ||
954 | eeepc_hwmon_device = NULL; | 1161 | eeepc_hwmon_device = NULL; |
955 | return PTR_ERR(hwmon); | 1162 | return PTR_ERR(hwmon); |
956 | } | 1163 | } |
@@ -976,19 +1183,9 @@ static int __init eeepc_laptop_init(void) | |||
976 | acpi_bus_unregister_driver(&eeepc_hotk_driver); | 1183 | acpi_bus_unregister_driver(&eeepc_hotk_driver); |
977 | return -ENODEV; | 1184 | return -ENODEV; |
978 | } | 1185 | } |
979 | dev = acpi_get_physical_device(ehotk->device->handle); | ||
980 | 1186 | ||
981 | if (!acpi_video_backlight_support()) { | 1187 | eeepc_enable_camera(); |
982 | result = eeepc_backlight_init(dev); | ||
983 | if (result) | ||
984 | goto fail_backlight; | ||
985 | } else | ||
986 | printk(EEEPC_INFO "Backlight controlled by ACPI video " | ||
987 | "driver\n"); | ||
988 | 1188 | ||
989 | result = eeepc_hwmon_init(dev); | ||
990 | if (result) | ||
991 | goto fail_hwmon; | ||
992 | /* Register platform stuff */ | 1189 | /* Register platform stuff */ |
993 | result = platform_driver_register(&platform_driver); | 1190 | result = platform_driver_register(&platform_driver); |
994 | if (result) | 1191 | if (result) |
@@ -1005,7 +1202,33 @@ static int __init eeepc_laptop_init(void) | |||
1005 | &platform_attribute_group); | 1202 | &platform_attribute_group); |
1006 | if (result) | 1203 | if (result) |
1007 | goto fail_sysfs; | 1204 | goto fail_sysfs; |
1205 | |||
1206 | dev = &platform_device->dev; | ||
1207 | |||
1208 | if (!acpi_video_backlight_support()) { | ||
1209 | result = eeepc_backlight_init(dev); | ||
1210 | if (result) | ||
1211 | goto fail_backlight; | ||
1212 | } else | ||
1213 | pr_info("Backlight controlled by ACPI video " | ||
1214 | "driver\n"); | ||
1215 | |||
1216 | result = eeepc_hwmon_init(dev); | ||
1217 | if (result) | ||
1218 | goto fail_hwmon; | ||
1219 | |||
1220 | result = eeepc_rfkill_init(dev); | ||
1221 | if (result) | ||
1222 | goto fail_rfkill; | ||
1223 | |||
1008 | return 0; | 1224 | return 0; |
1225 | fail_rfkill: | ||
1226 | eeepc_hwmon_exit(); | ||
1227 | fail_hwmon: | ||
1228 | eeepc_backlight_exit(); | ||
1229 | fail_backlight: | ||
1230 | sysfs_remove_group(&platform_device->dev.kobj, | ||
1231 | &platform_attribute_group); | ||
1009 | fail_sysfs: | 1232 | fail_sysfs: |
1010 | platform_device_del(platform_device); | 1233 | platform_device_del(platform_device); |
1011 | fail_platform_device2: | 1234 | fail_platform_device2: |
@@ -1013,12 +1236,7 @@ fail_platform_device2: | |||
1013 | fail_platform_device1: | 1236 | fail_platform_device1: |
1014 | platform_driver_unregister(&platform_driver); | 1237 | platform_driver_unregister(&platform_driver); |
1015 | fail_platform_driver: | 1238 | fail_platform_driver: |
1016 | eeepc_hwmon_exit(); | ||
1017 | fail_hwmon: | ||
1018 | eeepc_backlight_exit(); | ||
1019 | fail_backlight: | ||
1020 | eeepc_input_exit(); | 1239 | eeepc_input_exit(); |
1021 | eeepc_rfkill_exit(); | ||
1022 | return result; | 1240 | return result; |
1023 | } | 1241 | } |
1024 | 1242 | ||
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 16fffe44e333..4ac2311c00af 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
@@ -47,7 +47,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); | |||
47 | #define HPWMI_DISPLAY_QUERY 0x1 | 47 | #define HPWMI_DISPLAY_QUERY 0x1 |
48 | #define HPWMI_HDDTEMP_QUERY 0x2 | 48 | #define HPWMI_HDDTEMP_QUERY 0x2 |
49 | #define HPWMI_ALS_QUERY 0x3 | 49 | #define HPWMI_ALS_QUERY 0x3 |
50 | #define HPWMI_DOCK_QUERY 0x4 | 50 | #define HPWMI_HARDWARE_QUERY 0x4 |
51 | #define HPWMI_WIRELESS_QUERY 0x5 | 51 | #define HPWMI_WIRELESS_QUERY 0x5 |
52 | #define HPWMI_HOTKEY_QUERY 0xc | 52 | #define HPWMI_HOTKEY_QUERY 0xc |
53 | 53 | ||
@@ -75,10 +75,9 @@ struct key_entry { | |||
75 | u16 keycode; | 75 | u16 keycode; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | enum { KE_KEY, KE_SW, KE_END }; | 78 | enum { KE_KEY, KE_END }; |
79 | 79 | ||
80 | static struct key_entry hp_wmi_keymap[] = { | 80 | static struct key_entry hp_wmi_keymap[] = { |
81 | {KE_SW, 0x01, SW_DOCK}, | ||
82 | {KE_KEY, 0x02, KEY_BRIGHTNESSUP}, | 81 | {KE_KEY, 0x02, KEY_BRIGHTNESSUP}, |
83 | {KE_KEY, 0x03, KEY_BRIGHTNESSDOWN}, | 82 | {KE_KEY, 0x03, KEY_BRIGHTNESSDOWN}, |
84 | {KE_KEY, 0x20e6, KEY_PROG1}, | 83 | {KE_KEY, 0x20e6, KEY_PROG1}, |
@@ -151,7 +150,22 @@ static int hp_wmi_als_state(void) | |||
151 | 150 | ||
152 | static int hp_wmi_dock_state(void) | 151 | static int hp_wmi_dock_state(void) |
153 | { | 152 | { |
154 | return hp_wmi_perform_query(HPWMI_DOCK_QUERY, 0, 0); | 153 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, 0); |
154 | |||
155 | if (ret < 0) | ||
156 | return ret; | ||
157 | |||
158 | return ret & 0x1; | ||
159 | } | ||
160 | |||
161 | static int hp_wmi_tablet_state(void) | ||
162 | { | ||
163 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, 0); | ||
164 | |||
165 | if (ret < 0) | ||
166 | return ret; | ||
167 | |||
168 | return (ret & 0x4) ? 1 : 0; | ||
155 | } | 169 | } |
156 | 170 | ||
157 | static int hp_wmi_set_block(void *data, bool blocked) | 171 | static int hp_wmi_set_block(void *data, bool blocked) |
@@ -232,6 +246,15 @@ static ssize_t show_dock(struct device *dev, struct device_attribute *attr, | |||
232 | return sprintf(buf, "%d\n", value); | 246 | return sprintf(buf, "%d\n", value); |
233 | } | 247 | } |
234 | 248 | ||
249 | static ssize_t show_tablet(struct device *dev, struct device_attribute *attr, | ||
250 | char *buf) | ||
251 | { | ||
252 | int value = hp_wmi_tablet_state(); | ||
253 | if (value < 0) | ||
254 | return -EINVAL; | ||
255 | return sprintf(buf, "%d\n", value); | ||
256 | } | ||
257 | |||
235 | static ssize_t set_als(struct device *dev, struct device_attribute *attr, | 258 | static ssize_t set_als(struct device *dev, struct device_attribute *attr, |
236 | const char *buf, size_t count) | 259 | const char *buf, size_t count) |
237 | { | 260 | { |
@@ -244,6 +267,7 @@ static DEVICE_ATTR(display, S_IRUGO, show_display, NULL); | |||
244 | static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL); | 267 | static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL); |
245 | static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als); | 268 | static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als); |
246 | static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL); | 269 | static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL); |
270 | static DEVICE_ATTR(tablet, S_IRUGO, show_tablet, NULL); | ||
247 | 271 | ||
248 | static struct key_entry *hp_wmi_get_entry_by_scancode(int code) | 272 | static struct key_entry *hp_wmi_get_entry_by_scancode(int code) |
249 | { | 273 | { |
@@ -326,13 +350,13 @@ static void hp_wmi_notify(u32 value, void *context) | |||
326 | key->keycode, 0); | 350 | key->keycode, 0); |
327 | input_sync(hp_wmi_input_dev); | 351 | input_sync(hp_wmi_input_dev); |
328 | break; | 352 | break; |
329 | case KE_SW: | ||
330 | input_report_switch(hp_wmi_input_dev, | ||
331 | key->keycode, | ||
332 | hp_wmi_dock_state()); | ||
333 | input_sync(hp_wmi_input_dev); | ||
334 | break; | ||
335 | } | 353 | } |
354 | } else if (eventcode == 0x1) { | ||
355 | input_report_switch(hp_wmi_input_dev, SW_DOCK, | ||
356 | hp_wmi_dock_state()); | ||
357 | input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, | ||
358 | hp_wmi_tablet_state()); | ||
359 | input_sync(hp_wmi_input_dev); | ||
336 | } else if (eventcode == 0x5) { | 360 | } else if (eventcode == 0x5) { |
337 | if (wifi_rfkill) | 361 | if (wifi_rfkill) |
338 | rfkill_set_sw_state(wifi_rfkill, | 362 | rfkill_set_sw_state(wifi_rfkill, |
@@ -369,18 +393,19 @@ static int __init hp_wmi_input_setup(void) | |||
369 | set_bit(EV_KEY, hp_wmi_input_dev->evbit); | 393 | set_bit(EV_KEY, hp_wmi_input_dev->evbit); |
370 | set_bit(key->keycode, hp_wmi_input_dev->keybit); | 394 | set_bit(key->keycode, hp_wmi_input_dev->keybit); |
371 | break; | 395 | break; |
372 | case KE_SW: | ||
373 | set_bit(EV_SW, hp_wmi_input_dev->evbit); | ||
374 | set_bit(key->keycode, hp_wmi_input_dev->swbit); | ||
375 | |||
376 | /* Set initial dock state */ | ||
377 | input_report_switch(hp_wmi_input_dev, key->keycode, | ||
378 | hp_wmi_dock_state()); | ||
379 | input_sync(hp_wmi_input_dev); | ||
380 | break; | ||
381 | } | 396 | } |
382 | } | 397 | } |
383 | 398 | ||
399 | set_bit(EV_SW, hp_wmi_input_dev->evbit); | ||
400 | set_bit(SW_DOCK, hp_wmi_input_dev->swbit); | ||
401 | set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit); | ||
402 | |||
403 | /* Set initial hardware state */ | ||
404 | input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state()); | ||
405 | input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, | ||
406 | hp_wmi_tablet_state()); | ||
407 | input_sync(hp_wmi_input_dev); | ||
408 | |||
384 | err = input_register_device(hp_wmi_input_dev); | 409 | err = input_register_device(hp_wmi_input_dev); |
385 | 410 | ||
386 | if (err) { | 411 | if (err) { |
@@ -397,6 +422,7 @@ static void cleanup_sysfs(struct platform_device *device) | |||
397 | device_remove_file(&device->dev, &dev_attr_hddtemp); | 422 | device_remove_file(&device->dev, &dev_attr_hddtemp); |
398 | device_remove_file(&device->dev, &dev_attr_als); | 423 | device_remove_file(&device->dev, &dev_attr_als); |
399 | device_remove_file(&device->dev, &dev_attr_dock); | 424 | device_remove_file(&device->dev, &dev_attr_dock); |
425 | device_remove_file(&device->dev, &dev_attr_tablet); | ||
400 | } | 426 | } |
401 | 427 | ||
402 | static int __init hp_wmi_bios_setup(struct platform_device *device) | 428 | static int __init hp_wmi_bios_setup(struct platform_device *device) |
@@ -416,6 +442,9 @@ static int __init hp_wmi_bios_setup(struct platform_device *device) | |||
416 | err = device_create_file(&device->dev, &dev_attr_dock); | 442 | err = device_create_file(&device->dev, &dev_attr_dock); |
417 | if (err) | 443 | if (err) |
418 | goto add_sysfs_error; | 444 | goto add_sysfs_error; |
445 | err = device_create_file(&device->dev, &dev_attr_tablet); | ||
446 | if (err) | ||
447 | goto add_sysfs_error; | ||
419 | 448 | ||
420 | if (wireless & 0x1) { | 449 | if (wireless & 0x1) { |
421 | wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, | 450 | wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, |
@@ -485,23 +514,17 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device) | |||
485 | 514 | ||
486 | static int hp_wmi_resume_handler(struct platform_device *device) | 515 | static int hp_wmi_resume_handler(struct platform_device *device) |
487 | { | 516 | { |
488 | struct key_entry *key; | ||
489 | |||
490 | /* | 517 | /* |
491 | * Docking state may have changed while suspended, so trigger | 518 | * Hardware state may have changed while suspended, so trigger |
492 | * an input event for the current state. As this is a switch, | 519 | * input events for the current state. As this is a switch, |
493 | * the input layer will only actually pass it on if the state | 520 | * the input layer will only actually pass it on if the state |
494 | * changed. | 521 | * changed. |
495 | */ | 522 | */ |
496 | for (key = hp_wmi_keymap; key->type != KE_END; key++) { | 523 | |
497 | switch (key->type) { | 524 | input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state()); |
498 | case KE_SW: | 525 | input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, |
499 | input_report_switch(hp_wmi_input_dev, key->keycode, | 526 | hp_wmi_tablet_state()); |
500 | hp_wmi_dock_state()); | 527 | input_sync(hp_wmi_input_dev); |
501 | input_sync(hp_wmi_input_dev); | ||
502 | break; | ||
503 | } | ||
504 | } | ||
505 | 528 | ||
506 | return 0; | 529 | return 0; |
507 | } | 530 | } |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 40d64c03278c..a463fd72c495 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -22,7 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | #define TPACPI_VERSION "0.23" | 24 | #define TPACPI_VERSION "0.23" |
25 | #define TPACPI_SYSFS_VERSION 0x020300 | 25 | #define TPACPI_SYSFS_VERSION 0x020400 |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Changelog: | 28 | * Changelog: |
@@ -257,6 +257,8 @@ static struct { | |||
257 | u32 wan:1; | 257 | u32 wan:1; |
258 | u32 uwb:1; | 258 | u32 uwb:1; |
259 | u32 fan_ctrl_status_undef:1; | 259 | u32 fan_ctrl_status_undef:1; |
260 | u32 second_fan:1; | ||
261 | u32 beep_needs_two_args:1; | ||
260 | u32 input_device_registered:1; | 262 | u32 input_device_registered:1; |
261 | u32 platform_drv_registered:1; | 263 | u32 platform_drv_registered:1; |
262 | u32 platform_drv_attrs_registered:1; | 264 | u32 platform_drv_attrs_registered:1; |
@@ -277,8 +279,10 @@ struct thinkpad_id_data { | |||
277 | char *bios_version_str; /* Something like 1ZET51WW (1.03z) */ | 279 | char *bios_version_str; /* Something like 1ZET51WW (1.03z) */ |
278 | char *ec_version_str; /* Something like 1ZHT51WW-1.04a */ | 280 | char *ec_version_str; /* Something like 1ZHT51WW-1.04a */ |
279 | 281 | ||
280 | u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */ | 282 | u16 bios_model; /* 1Y = 0x5931, 0 = unknown */ |
281 | u16 ec_model; | 283 | u16 ec_model; |
284 | u16 bios_release; /* 1ZETK1WW = 0x314b, 0 = unknown */ | ||
285 | u16 ec_release; | ||
282 | 286 | ||
283 | char *model_str; /* ThinkPad T43 */ | 287 | char *model_str; /* ThinkPad T43 */ |
284 | char *nummodel_str; /* 9384A9C for a 9384-A9C model */ | 288 | char *nummodel_str; /* 9384A9C for a 9384-A9C model */ |
@@ -355,6 +359,73 @@ static void tpacpi_log_usertask(const char * const what) | |||
355 | } \ | 359 | } \ |
356 | } while (0) | 360 | } while (0) |
357 | 361 | ||
362 | /* | ||
363 | * Quirk handling helpers | ||
364 | * | ||
365 | * ThinkPad IDs and versions seen in the field so far | ||
366 | * are two-characters from the set [0-9A-Z], i.e. base 36. | ||
367 | * | ||
368 | * We use values well outside that range as specials. | ||
369 | */ | ||
370 | |||
371 | #define TPACPI_MATCH_ANY 0xffffU | ||
372 | #define TPACPI_MATCH_UNKNOWN 0U | ||
373 | |||
374 | /* TPID('1', 'Y') == 0x5931 */ | ||
375 | #define TPID(__c1, __c2) (((__c2) << 8) | (__c1)) | ||
376 | |||
377 | #define TPACPI_Q_IBM(__id1, __id2, __quirk) \ | ||
378 | { .vendor = PCI_VENDOR_ID_IBM, \ | ||
379 | .bios = TPID(__id1, __id2), \ | ||
380 | .ec = TPACPI_MATCH_ANY, \ | ||
381 | .quirks = (__quirk) } | ||
382 | |||
383 | #define TPACPI_Q_LNV(__id1, __id2, __quirk) \ | ||
384 | { .vendor = PCI_VENDOR_ID_LENOVO, \ | ||
385 | .bios = TPID(__id1, __id2), \ | ||
386 | .ec = TPACPI_MATCH_ANY, \ | ||
387 | .quirks = (__quirk) } | ||
388 | |||
389 | struct tpacpi_quirk { | ||
390 | unsigned int vendor; | ||
391 | u16 bios; | ||
392 | u16 ec; | ||
393 | unsigned long quirks; | ||
394 | }; | ||
395 | |||
396 | /** | ||
397 | * tpacpi_check_quirks() - search BIOS/EC version on a list | ||
398 | * @qlist: array of &struct tpacpi_quirk | ||
399 | * @qlist_size: number of elements in @qlist | ||
400 | * | ||
401 | * Iterates over a quirks list until one is found that matches the | ||
402 | * ThinkPad's vendor, BIOS and EC model. | ||
403 | * | ||
404 | * Returns 0 if nothing matches, otherwise returns the quirks field of | ||
405 | * the matching &struct tpacpi_quirk entry. | ||
406 | * | ||
407 | * The match criteria is: vendor, ec and bios much match. | ||
408 | */ | ||
409 | static unsigned long __init tpacpi_check_quirks( | ||
410 | const struct tpacpi_quirk *qlist, | ||
411 | unsigned int qlist_size) | ||
412 | { | ||
413 | while (qlist_size) { | ||
414 | if ((qlist->vendor == thinkpad_id.vendor || | ||
415 | qlist->vendor == TPACPI_MATCH_ANY) && | ||
416 | (qlist->bios == thinkpad_id.bios_model || | ||
417 | qlist->bios == TPACPI_MATCH_ANY) && | ||
418 | (qlist->ec == thinkpad_id.ec_model || | ||
419 | qlist->ec == TPACPI_MATCH_ANY)) | ||
420 | return qlist->quirks; | ||
421 | |||
422 | qlist_size--; | ||
423 | qlist++; | ||
424 | } | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | |||
358 | /**************************************************************************** | 429 | /**************************************************************************** |
359 | **************************************************************************** | 430 | **************************************************************************** |
360 | * | 431 | * |
@@ -2880,7 +2951,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
2880 | /* update bright_acpimode... */ | 2951 | /* update bright_acpimode... */ |
2881 | tpacpi_check_std_acpi_brightness_support(); | 2952 | tpacpi_check_std_acpi_brightness_support(); |
2882 | 2953 | ||
2883 | if (tp_features.bright_acpimode) { | 2954 | if (tp_features.bright_acpimode && acpi_video_backlight_support()) { |
2884 | printk(TPACPI_INFO | 2955 | printk(TPACPI_INFO |
2885 | "This ThinkPad has standard ACPI backlight " | 2956 | "This ThinkPad has standard ACPI backlight " |
2886 | "brightness control, supported by the ACPI " | 2957 | "brightness control, supported by the ACPI " |
@@ -4773,7 +4844,7 @@ TPACPI_HANDLE(led, ec, "SLED", /* 570 */ | |||
4773 | "LED", /* all others */ | 4844 | "LED", /* all others */ |
4774 | ); /* R30, R31 */ | 4845 | ); /* R30, R31 */ |
4775 | 4846 | ||
4776 | #define TPACPI_LED_NUMLEDS 8 | 4847 | #define TPACPI_LED_NUMLEDS 16 |
4777 | static struct tpacpi_led_classdev *tpacpi_leds; | 4848 | static struct tpacpi_led_classdev *tpacpi_leds; |
4778 | static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS]; | 4849 | static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS]; |
4779 | static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = { | 4850 | static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = { |
@@ -4786,15 +4857,20 @@ static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = { | |||
4786 | "tpacpi::dock_batt", | 4857 | "tpacpi::dock_batt", |
4787 | "tpacpi::unknown_led", | 4858 | "tpacpi::unknown_led", |
4788 | "tpacpi::standby", | 4859 | "tpacpi::standby", |
4860 | "tpacpi::dock_status1", | ||
4861 | "tpacpi::dock_status2", | ||
4862 | "tpacpi::unknown_led2", | ||
4863 | "tpacpi::unknown_led3", | ||
4864 | "tpacpi::thinkvantage", | ||
4789 | }; | 4865 | }; |
4790 | #define TPACPI_SAFE_LEDS 0x0081U | 4866 | #define TPACPI_SAFE_LEDS 0x1081U |
4791 | 4867 | ||
4792 | static inline bool tpacpi_is_led_restricted(const unsigned int led) | 4868 | static inline bool tpacpi_is_led_restricted(const unsigned int led) |
4793 | { | 4869 | { |
4794 | #ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS | 4870 | #ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS |
4795 | return false; | 4871 | return false; |
4796 | #else | 4872 | #else |
4797 | return (TPACPI_SAFE_LEDS & (1 << led)) == 0; | 4873 | return (1U & (TPACPI_SAFE_LEDS >> led)) == 0; |
4798 | #endif | 4874 | #endif |
4799 | } | 4875 | } |
4800 | 4876 | ||
@@ -4956,6 +5032,10 @@ static int __init tpacpi_init_led(unsigned int led) | |||
4956 | 5032 | ||
4957 | tpacpi_leds[led].led = led; | 5033 | tpacpi_leds[led].led = led; |
4958 | 5034 | ||
5035 | /* LEDs with no name don't get registered */ | ||
5036 | if (!tpacpi_led_names[led]) | ||
5037 | return 0; | ||
5038 | |||
4959 | tpacpi_leds[led].led_classdev.brightness_set = &led_sysfs_set; | 5039 | tpacpi_leds[led].led_classdev.brightness_set = &led_sysfs_set; |
4960 | tpacpi_leds[led].led_classdev.blink_set = &led_sysfs_blink_set; | 5040 | tpacpi_leds[led].led_classdev.blink_set = &led_sysfs_blink_set; |
4961 | if (led_supported == TPACPI_LED_570) | 5041 | if (led_supported == TPACPI_LED_570) |
@@ -4974,10 +5054,59 @@ static int __init tpacpi_init_led(unsigned int led) | |||
4974 | return rc; | 5054 | return rc; |
4975 | } | 5055 | } |
4976 | 5056 | ||
5057 | static const struct tpacpi_quirk led_useful_qtable[] __initconst = { | ||
5058 | TPACPI_Q_IBM('1', 'E', 0x009f), /* A30 */ | ||
5059 | TPACPI_Q_IBM('1', 'N', 0x009f), /* A31 */ | ||
5060 | TPACPI_Q_IBM('1', 'G', 0x009f), /* A31 */ | ||
5061 | |||
5062 | TPACPI_Q_IBM('1', 'I', 0x0097), /* T30 */ | ||
5063 | TPACPI_Q_IBM('1', 'R', 0x0097), /* T40, T41, T42, R50, R51 */ | ||
5064 | TPACPI_Q_IBM('7', '0', 0x0097), /* T43, R52 */ | ||
5065 | TPACPI_Q_IBM('1', 'Y', 0x0097), /* T43 */ | ||
5066 | TPACPI_Q_IBM('1', 'W', 0x0097), /* R50e */ | ||
5067 | TPACPI_Q_IBM('1', 'V', 0x0097), /* R51 */ | ||
5068 | TPACPI_Q_IBM('7', '8', 0x0097), /* R51e */ | ||
5069 | TPACPI_Q_IBM('7', '6', 0x0097), /* R52 */ | ||
5070 | |||
5071 | TPACPI_Q_IBM('1', 'K', 0x00bf), /* X30 */ | ||
5072 | TPACPI_Q_IBM('1', 'Q', 0x00bf), /* X31, X32 */ | ||
5073 | TPACPI_Q_IBM('1', 'U', 0x00bf), /* X40 */ | ||
5074 | TPACPI_Q_IBM('7', '4', 0x00bf), /* X41 */ | ||
5075 | TPACPI_Q_IBM('7', '5', 0x00bf), /* X41t */ | ||
5076 | |||
5077 | TPACPI_Q_IBM('7', '9', 0x1f97), /* T60 (1) */ | ||
5078 | TPACPI_Q_IBM('7', '7', 0x1f97), /* Z60* (1) */ | ||
5079 | TPACPI_Q_IBM('7', 'F', 0x1f97), /* Z61* (1) */ | ||
5080 | TPACPI_Q_IBM('7', 'B', 0x1fb7), /* X60 (1) */ | ||
5081 | |||
5082 | /* (1) - may have excess leds enabled on MSB */ | ||
5083 | |||
5084 | /* Defaults (order matters, keep last, don't reorder!) */ | ||
5085 | { /* Lenovo */ | ||
5086 | .vendor = PCI_VENDOR_ID_LENOVO, | ||
5087 | .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY, | ||
5088 | .quirks = 0x1fffU, | ||
5089 | }, | ||
5090 | { /* IBM ThinkPads with no EC version string */ | ||
5091 | .vendor = PCI_VENDOR_ID_IBM, | ||
5092 | .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_UNKNOWN, | ||
5093 | .quirks = 0x00ffU, | ||
5094 | }, | ||
5095 | { /* IBM ThinkPads with EC version string */ | ||
5096 | .vendor = PCI_VENDOR_ID_IBM, | ||
5097 | .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY, | ||
5098 | .quirks = 0x00bfU, | ||
5099 | }, | ||
5100 | }; | ||
5101 | |||
5102 | #undef TPACPI_LEDQ_IBM | ||
5103 | #undef TPACPI_LEDQ_LNV | ||
5104 | |||
4977 | static int __init led_init(struct ibm_init_struct *iibm) | 5105 | static int __init led_init(struct ibm_init_struct *iibm) |
4978 | { | 5106 | { |
4979 | unsigned int i; | 5107 | unsigned int i; |
4980 | int rc; | 5108 | int rc; |
5109 | unsigned long useful_leds; | ||
4981 | 5110 | ||
4982 | vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n"); | 5111 | vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n"); |
4983 | 5112 | ||
@@ -4999,6 +5128,9 @@ static int __init led_init(struct ibm_init_struct *iibm) | |||
4999 | vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n", | 5128 | vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n", |
5000 | str_supported(led_supported), led_supported); | 5129 | str_supported(led_supported), led_supported); |
5001 | 5130 | ||
5131 | if (led_supported == TPACPI_LED_NONE) | ||
5132 | return 1; | ||
5133 | |||
5002 | tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS, | 5134 | tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS, |
5003 | GFP_KERNEL); | 5135 | GFP_KERNEL); |
5004 | if (!tpacpi_leds) { | 5136 | if (!tpacpi_leds) { |
@@ -5006,8 +5138,12 @@ static int __init led_init(struct ibm_init_struct *iibm) | |||
5006 | return -ENOMEM; | 5138 | return -ENOMEM; |
5007 | } | 5139 | } |
5008 | 5140 | ||
5141 | useful_leds = tpacpi_check_quirks(led_useful_qtable, | ||
5142 | ARRAY_SIZE(led_useful_qtable)); | ||
5143 | |||
5009 | for (i = 0; i < TPACPI_LED_NUMLEDS; i++) { | 5144 | for (i = 0; i < TPACPI_LED_NUMLEDS; i++) { |
5010 | if (!tpacpi_is_led_restricted(i)) { | 5145 | if (!tpacpi_is_led_restricted(i) && |
5146 | test_bit(i, &useful_leds)) { | ||
5011 | rc = tpacpi_init_led(i); | 5147 | rc = tpacpi_init_led(i); |
5012 | if (rc < 0) { | 5148 | if (rc < 0) { |
5013 | led_exit(); | 5149 | led_exit(); |
@@ -5017,12 +5153,11 @@ static int __init led_init(struct ibm_init_struct *iibm) | |||
5017 | } | 5153 | } |
5018 | 5154 | ||
5019 | #ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS | 5155 | #ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS |
5020 | if (led_supported != TPACPI_LED_NONE) | 5156 | printk(TPACPI_NOTICE |
5021 | printk(TPACPI_NOTICE | 5157 | "warning: userspace override of important " |
5022 | "warning: userspace override of important " | 5158 | "firmware LEDs is enabled\n"); |
5023 | "firmware LEDs is enabled\n"); | ||
5024 | #endif | 5159 | #endif |
5025 | return (led_supported != TPACPI_LED_NONE)? 0 : 1; | 5160 | return 0; |
5026 | } | 5161 | } |
5027 | 5162 | ||
5028 | #define str_led_status(s) \ | 5163 | #define str_led_status(s) \ |
@@ -5052,7 +5187,7 @@ static int led_read(char *p) | |||
5052 | } | 5187 | } |
5053 | 5188 | ||
5054 | len += sprintf(p + len, "commands:\t" | 5189 | len += sprintf(p + len, "commands:\t" |
5055 | "<led> on, <led> off, <led> blink (<led> is 0-7)\n"); | 5190 | "<led> on, <led> off, <led> blink (<led> is 0-15)\n"); |
5056 | 5191 | ||
5057 | return len; | 5192 | return len; |
5058 | } | 5193 | } |
@@ -5067,7 +5202,7 @@ static int led_write(char *buf) | |||
5067 | return -ENODEV; | 5202 | return -ENODEV; |
5068 | 5203 | ||
5069 | while ((cmd = next_cmd(&buf))) { | 5204 | while ((cmd = next_cmd(&buf))) { |
5070 | if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 7) | 5205 | if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 15) |
5071 | return -EINVAL; | 5206 | return -EINVAL; |
5072 | 5207 | ||
5073 | if (strstr(cmd, "off")) { | 5208 | if (strstr(cmd, "off")) { |
@@ -5101,8 +5236,17 @@ static struct ibm_struct led_driver_data = { | |||
5101 | 5236 | ||
5102 | TPACPI_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */ | 5237 | TPACPI_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */ |
5103 | 5238 | ||
5239 | #define TPACPI_BEEP_Q1 0x0001 | ||
5240 | |||
5241 | static const struct tpacpi_quirk beep_quirk_table[] __initconst = { | ||
5242 | TPACPI_Q_IBM('I', 'M', TPACPI_BEEP_Q1), /* 570 */ | ||
5243 | TPACPI_Q_IBM('I', 'U', TPACPI_BEEP_Q1), /* 570E - unverified */ | ||
5244 | }; | ||
5245 | |||
5104 | static int __init beep_init(struct ibm_init_struct *iibm) | 5246 | static int __init beep_init(struct ibm_init_struct *iibm) |
5105 | { | 5247 | { |
5248 | unsigned long quirks; | ||
5249 | |||
5106 | vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n"); | 5250 | vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n"); |
5107 | 5251 | ||
5108 | TPACPI_ACPIHANDLE_INIT(beep); | 5252 | TPACPI_ACPIHANDLE_INIT(beep); |
@@ -5110,6 +5254,11 @@ static int __init beep_init(struct ibm_init_struct *iibm) | |||
5110 | vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n", | 5254 | vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n", |
5111 | str_supported(beep_handle != NULL)); | 5255 | str_supported(beep_handle != NULL)); |
5112 | 5256 | ||
5257 | quirks = tpacpi_check_quirks(beep_quirk_table, | ||
5258 | ARRAY_SIZE(beep_quirk_table)); | ||
5259 | |||
5260 | tp_features.beep_needs_two_args = !!(quirks & TPACPI_BEEP_Q1); | ||
5261 | |||
5113 | return (beep_handle)? 0 : 1; | 5262 | return (beep_handle)? 0 : 1; |
5114 | } | 5263 | } |
5115 | 5264 | ||
@@ -5141,8 +5290,15 @@ static int beep_write(char *buf) | |||
5141 | /* beep_cmd set */ | 5290 | /* beep_cmd set */ |
5142 | } else | 5291 | } else |
5143 | return -EINVAL; | 5292 | return -EINVAL; |
5144 | if (!acpi_evalf(beep_handle, NULL, NULL, "vdd", beep_cmd, 0)) | 5293 | if (tp_features.beep_needs_two_args) { |
5145 | return -EIO; | 5294 | if (!acpi_evalf(beep_handle, NULL, NULL, "vdd", |
5295 | beep_cmd, 0)) | ||
5296 | return -EIO; | ||
5297 | } else { | ||
5298 | if (!acpi_evalf(beep_handle, NULL, NULL, "vd", | ||
5299 | beep_cmd)) | ||
5300 | return -EIO; | ||
5301 | } | ||
5146 | } | 5302 | } |
5147 | 5303 | ||
5148 | return 0; | 5304 | return 0; |
@@ -5569,6 +5725,10 @@ static struct ibm_struct ecdump_driver_data = { | |||
5569 | * Bit 3-0: backlight brightness level | 5725 | * Bit 3-0: backlight brightness level |
5570 | * | 5726 | * |
5571 | * brightness_get_raw returns status data in the HBRV layout | 5727 | * brightness_get_raw returns status data in the HBRV layout |
5728 | * | ||
5729 | * WARNING: The X61 has been verified to use HBRV for something else, so | ||
5730 | * this should be used _only_ on IBM ThinkPads, and maybe with some careful | ||
5731 | * testing on the very early *60 Lenovo models... | ||
5572 | */ | 5732 | */ |
5573 | 5733 | ||
5574 | enum { | 5734 | enum { |
@@ -5869,6 +6029,12 @@ static int __init brightness_init(struct ibm_init_struct *iibm) | |||
5869 | brightness_mode); | 6029 | brightness_mode); |
5870 | } | 6030 | } |
5871 | 6031 | ||
6032 | /* Safety */ | ||
6033 | if (thinkpad_id.vendor != PCI_VENDOR_ID_IBM && | ||
6034 | (brightness_mode == TPACPI_BRGHT_MODE_ECNVRAM || | ||
6035 | brightness_mode == TPACPI_BRGHT_MODE_EC)) | ||
6036 | return -EINVAL; | ||
6037 | |||
5872 | if (tpacpi_brightness_get_raw(&b) < 0) | 6038 | if (tpacpi_brightness_get_raw(&b) < 0) |
5873 | return 1; | 6039 | return 1; |
5874 | 6040 | ||
@@ -6161,6 +6327,21 @@ static struct ibm_struct volume_driver_data = { | |||
6161 | * For firmware bugs, refer to: | 6327 | * For firmware bugs, refer to: |
6162 | * http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues | 6328 | * http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues |
6163 | * | 6329 | * |
6330 | * ---- | ||
6331 | * | ||
6332 | * ThinkPad EC register 0x31 bit 0 (only on select models) | ||
6333 | * | ||
6334 | * When bit 0 of EC register 0x31 is zero, the tachometer registers | ||
6335 | * show the speed of the main fan. When bit 0 of EC register 0x31 | ||
6336 | * is one, the tachometer registers show the speed of the auxiliary | ||
6337 | * fan. | ||
6338 | * | ||
6339 | * Fan control seems to affect both fans, regardless of the state | ||
6340 | * of this bit. | ||
6341 | * | ||
6342 | * So far, only the firmware for the X60/X61 non-tablet versions | ||
6343 | * seem to support this (firmware TP-7M). | ||
6344 | * | ||
6164 | * TPACPI_FAN_WR_ACPI_FANS: | 6345 | * TPACPI_FAN_WR_ACPI_FANS: |
6165 | * ThinkPad X31, X40, X41. Not available in the X60. | 6346 | * ThinkPad X31, X40, X41. Not available in the X60. |
6166 | * | 6347 | * |
@@ -6187,6 +6368,8 @@ enum { /* Fan control constants */ | |||
6187 | fan_status_offset = 0x2f, /* EC register 0x2f */ | 6368 | fan_status_offset = 0x2f, /* EC register 0x2f */ |
6188 | fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM) | 6369 | fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM) |
6189 | * 0x84 must be read before 0x85 */ | 6370 | * 0x84 must be read before 0x85 */ |
6371 | fan_select_offset = 0x31, /* EC register 0x31 (Firmware 7M) | ||
6372 | bit 0 selects which fan is active */ | ||
6190 | 6373 | ||
6191 | TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */ | 6374 | TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */ |
6192 | TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */ | 6375 | TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */ |
@@ -6249,30 +6432,18 @@ TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */ | |||
6249 | * We assume 0x07 really means auto mode while this quirk is active, | 6432 | * We assume 0x07 really means auto mode while this quirk is active, |
6250 | * as this is far more likely than the ThinkPad being in level 7, | 6433 | * as this is far more likely than the ThinkPad being in level 7, |
6251 | * which is only used by the firmware during thermal emergencies. | 6434 | * which is only used by the firmware during thermal emergencies. |
6435 | * | ||
6436 | * Enable for TP-1Y (T43), TP-78 (R51e), TP-76 (R52), | ||
6437 | * TP-70 (T43, R52), which are known to be buggy. | ||
6252 | */ | 6438 | */ |
6253 | 6439 | ||
6254 | static void fan_quirk1_detect(void) | 6440 | static void fan_quirk1_setup(void) |
6255 | { | 6441 | { |
6256 | /* In some ThinkPads, neither the EC nor the ACPI | ||
6257 | * DSDT initialize the HFSP register, and it ends up | ||
6258 | * being initially set to 0x07 when it *could* be | ||
6259 | * either 0x07 or 0x80. | ||
6260 | * | ||
6261 | * Enable for TP-1Y (T43), TP-78 (R51e), | ||
6262 | * TP-76 (R52), TP-70 (T43, R52), which are known | ||
6263 | * to be buggy. */ | ||
6264 | if (fan_control_initial_status == 0x07) { | 6442 | if (fan_control_initial_status == 0x07) { |
6265 | switch (thinkpad_id.ec_model) { | 6443 | printk(TPACPI_NOTICE |
6266 | case 0x5931: /* TP-1Y */ | 6444 | "fan_init: initial fan status is unknown, " |
6267 | case 0x3837: /* TP-78 */ | 6445 | "assuming it is in auto mode\n"); |
6268 | case 0x3637: /* TP-76 */ | 6446 | tp_features.fan_ctrl_status_undef = 1; |
6269 | case 0x3037: /* TP-70 */ | ||
6270 | printk(TPACPI_NOTICE | ||
6271 | "fan_init: initial fan status is unknown, " | ||
6272 | "assuming it is in auto mode\n"); | ||
6273 | tp_features.fan_ctrl_status_undef = 1; | ||
6274 | ;; | ||
6275 | } | ||
6276 | } | 6447 | } |
6277 | } | 6448 | } |
6278 | 6449 | ||
@@ -6292,6 +6463,38 @@ static void fan_quirk1_handle(u8 *fan_status) | |||
6292 | } | 6463 | } |
6293 | } | 6464 | } |
6294 | 6465 | ||
6466 | /* Select main fan on X60/X61, NOOP on others */ | ||
6467 | static bool fan_select_fan1(void) | ||
6468 | { | ||
6469 | if (tp_features.second_fan) { | ||
6470 | u8 val; | ||
6471 | |||
6472 | if (ec_read(fan_select_offset, &val) < 0) | ||
6473 | return false; | ||
6474 | val &= 0xFEU; | ||
6475 | if (ec_write(fan_select_offset, val) < 0) | ||
6476 | return false; | ||
6477 | } | ||
6478 | return true; | ||
6479 | } | ||
6480 | |||
6481 | /* Select secondary fan on X60/X61 */ | ||
6482 | static bool fan_select_fan2(void) | ||
6483 | { | ||
6484 | u8 val; | ||
6485 | |||
6486 | if (!tp_features.second_fan) | ||
6487 | return false; | ||
6488 | |||
6489 | if (ec_read(fan_select_offset, &val) < 0) | ||
6490 | return false; | ||
6491 | val |= 0x01U; | ||
6492 | if (ec_write(fan_select_offset, val) < 0) | ||
6493 | return false; | ||
6494 | |||
6495 | return true; | ||
6496 | } | ||
6497 | |||
6295 | /* | 6498 | /* |
6296 | * Call with fan_mutex held | 6499 | * Call with fan_mutex held |
6297 | */ | 6500 | */ |
@@ -6369,6 +6572,8 @@ static int fan_get_speed(unsigned int *speed) | |||
6369 | switch (fan_status_access_mode) { | 6572 | switch (fan_status_access_mode) { |
6370 | case TPACPI_FAN_RD_TPEC: | 6573 | case TPACPI_FAN_RD_TPEC: |
6371 | /* all except 570, 600e/x, 770e, 770x */ | 6574 | /* all except 570, 600e/x, 770e, 770x */ |
6575 | if (unlikely(!fan_select_fan1())) | ||
6576 | return -EIO; | ||
6372 | if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) || | 6577 | if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) || |
6373 | !acpi_ec_read(fan_rpm_offset + 1, &hi))) | 6578 | !acpi_ec_read(fan_rpm_offset + 1, &hi))) |
6374 | return -EIO; | 6579 | return -EIO; |
@@ -6385,6 +6590,34 @@ static int fan_get_speed(unsigned int *speed) | |||
6385 | return 0; | 6590 | return 0; |
6386 | } | 6591 | } |
6387 | 6592 | ||
6593 | static int fan2_get_speed(unsigned int *speed) | ||
6594 | { | ||
6595 | u8 hi, lo; | ||
6596 | bool rc; | ||
6597 | |||
6598 | switch (fan_status_access_mode) { | ||
6599 | case TPACPI_FAN_RD_TPEC: | ||
6600 | /* all except 570, 600e/x, 770e, 770x */ | ||
6601 | if (unlikely(!fan_select_fan2())) | ||
6602 | return -EIO; | ||
6603 | rc = !acpi_ec_read(fan_rpm_offset, &lo) || | ||
6604 | !acpi_ec_read(fan_rpm_offset + 1, &hi); | ||
6605 | fan_select_fan1(); /* play it safe */ | ||
6606 | if (rc) | ||
6607 | return -EIO; | ||
6608 | |||
6609 | if (likely(speed)) | ||
6610 | *speed = (hi << 8) | lo; | ||
6611 | |||
6612 | break; | ||
6613 | |||
6614 | default: | ||
6615 | return -ENXIO; | ||
6616 | } | ||
6617 | |||
6618 | return 0; | ||
6619 | } | ||
6620 | |||
6388 | static int fan_set_level(int level) | 6621 | static int fan_set_level(int level) |
6389 | { | 6622 | { |
6390 | if (!fan_control_allowed) | 6623 | if (!fan_control_allowed) |
@@ -6790,6 +7023,25 @@ static struct device_attribute dev_attr_fan_fan1_input = | |||
6790 | __ATTR(fan1_input, S_IRUGO, | 7023 | __ATTR(fan1_input, S_IRUGO, |
6791 | fan_fan1_input_show, NULL); | 7024 | fan_fan1_input_show, NULL); |
6792 | 7025 | ||
7026 | /* sysfs fan fan2_input ------------------------------------------------ */ | ||
7027 | static ssize_t fan_fan2_input_show(struct device *dev, | ||
7028 | struct device_attribute *attr, | ||
7029 | char *buf) | ||
7030 | { | ||
7031 | int res; | ||
7032 | unsigned int speed; | ||
7033 | |||
7034 | res = fan2_get_speed(&speed); | ||
7035 | if (res < 0) | ||
7036 | return res; | ||
7037 | |||
7038 | return snprintf(buf, PAGE_SIZE, "%u\n", speed); | ||
7039 | } | ||
7040 | |||
7041 | static struct device_attribute dev_attr_fan_fan2_input = | ||
7042 | __ATTR(fan2_input, S_IRUGO, | ||
7043 | fan_fan2_input_show, NULL); | ||
7044 | |||
6793 | /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */ | 7045 | /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */ |
6794 | static ssize_t fan_fan_watchdog_show(struct device_driver *drv, | 7046 | static ssize_t fan_fan_watchdog_show(struct device_driver *drv, |
6795 | char *buf) | 7047 | char *buf) |
@@ -6823,6 +7075,7 @@ static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO, | |||
6823 | static struct attribute *fan_attributes[] = { | 7075 | static struct attribute *fan_attributes[] = { |
6824 | &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr, | 7076 | &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr, |
6825 | &dev_attr_fan_fan1_input.attr, | 7077 | &dev_attr_fan_fan1_input.attr, |
7078 | NULL, /* for fan2_input */ | ||
6826 | NULL | 7079 | NULL |
6827 | }; | 7080 | }; |
6828 | 7081 | ||
@@ -6830,9 +7083,36 @@ static const struct attribute_group fan_attr_group = { | |||
6830 | .attrs = fan_attributes, | 7083 | .attrs = fan_attributes, |
6831 | }; | 7084 | }; |
6832 | 7085 | ||
7086 | #define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */ | ||
7087 | #define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */ | ||
7088 | |||
7089 | #define TPACPI_FAN_QI(__id1, __id2, __quirks) \ | ||
7090 | { .vendor = PCI_VENDOR_ID_IBM, \ | ||
7091 | .bios = TPACPI_MATCH_ANY, \ | ||
7092 | .ec = TPID(__id1, __id2), \ | ||
7093 | .quirks = __quirks } | ||
7094 | |||
7095 | #define TPACPI_FAN_QL(__id1, __id2, __quirks) \ | ||
7096 | { .vendor = PCI_VENDOR_ID_LENOVO, \ | ||
7097 | .bios = TPACPI_MATCH_ANY, \ | ||
7098 | .ec = TPID(__id1, __id2), \ | ||
7099 | .quirks = __quirks } | ||
7100 | |||
7101 | static const struct tpacpi_quirk fan_quirk_table[] __initconst = { | ||
7102 | TPACPI_FAN_QI('1', 'Y', TPACPI_FAN_Q1), | ||
7103 | TPACPI_FAN_QI('7', '8', TPACPI_FAN_Q1), | ||
7104 | TPACPI_FAN_QI('7', '6', TPACPI_FAN_Q1), | ||
7105 | TPACPI_FAN_QI('7', '0', TPACPI_FAN_Q1), | ||
7106 | TPACPI_FAN_QL('7', 'M', TPACPI_FAN_2FAN), | ||
7107 | }; | ||
7108 | |||
7109 | #undef TPACPI_FAN_QL | ||
7110 | #undef TPACPI_FAN_QI | ||
7111 | |||
6833 | static int __init fan_init(struct ibm_init_struct *iibm) | 7112 | static int __init fan_init(struct ibm_init_struct *iibm) |
6834 | { | 7113 | { |
6835 | int rc; | 7114 | int rc; |
7115 | unsigned long quirks; | ||
6836 | 7116 | ||
6837 | vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN, | 7117 | vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN, |
6838 | "initializing fan subdriver\n"); | 7118 | "initializing fan subdriver\n"); |
@@ -6843,12 +7123,16 @@ static int __init fan_init(struct ibm_init_struct *iibm) | |||
6843 | fan_control_commands = 0; | 7123 | fan_control_commands = 0; |
6844 | fan_watchdog_maxinterval = 0; | 7124 | fan_watchdog_maxinterval = 0; |
6845 | tp_features.fan_ctrl_status_undef = 0; | 7125 | tp_features.fan_ctrl_status_undef = 0; |
7126 | tp_features.second_fan = 0; | ||
6846 | fan_control_desired_level = 7; | 7127 | fan_control_desired_level = 7; |
6847 | 7128 | ||
6848 | TPACPI_ACPIHANDLE_INIT(fans); | 7129 | TPACPI_ACPIHANDLE_INIT(fans); |
6849 | TPACPI_ACPIHANDLE_INIT(gfan); | 7130 | TPACPI_ACPIHANDLE_INIT(gfan); |
6850 | TPACPI_ACPIHANDLE_INIT(sfan); | 7131 | TPACPI_ACPIHANDLE_INIT(sfan); |
6851 | 7132 | ||
7133 | quirks = tpacpi_check_quirks(fan_quirk_table, | ||
7134 | ARRAY_SIZE(fan_quirk_table)); | ||
7135 | |||
6852 | if (gfan_handle) { | 7136 | if (gfan_handle) { |
6853 | /* 570, 600e/x, 770e, 770x */ | 7137 | /* 570, 600e/x, 770e, 770x */ |
6854 | fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; | 7138 | fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; |
@@ -6858,7 +7142,13 @@ static int __init fan_init(struct ibm_init_struct *iibm) | |||
6858 | if (likely(acpi_ec_read(fan_status_offset, | 7142 | if (likely(acpi_ec_read(fan_status_offset, |
6859 | &fan_control_initial_status))) { | 7143 | &fan_control_initial_status))) { |
6860 | fan_status_access_mode = TPACPI_FAN_RD_TPEC; | 7144 | fan_status_access_mode = TPACPI_FAN_RD_TPEC; |
6861 | fan_quirk1_detect(); | 7145 | if (quirks & TPACPI_FAN_Q1) |
7146 | fan_quirk1_setup(); | ||
7147 | if (quirks & TPACPI_FAN_2FAN) { | ||
7148 | tp_features.second_fan = 1; | ||
7149 | dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN, | ||
7150 | "secondary fan support enabled\n"); | ||
7151 | } | ||
6862 | } else { | 7152 | } else { |
6863 | printk(TPACPI_ERR | 7153 | printk(TPACPI_ERR |
6864 | "ThinkPad ACPI EC access misbehaving, " | 7154 | "ThinkPad ACPI EC access misbehaving, " |
@@ -6914,6 +7204,11 @@ static int __init fan_init(struct ibm_init_struct *iibm) | |||
6914 | 7204 | ||
6915 | if (fan_status_access_mode != TPACPI_FAN_NONE || | 7205 | if (fan_status_access_mode != TPACPI_FAN_NONE || |
6916 | fan_control_access_mode != TPACPI_FAN_WR_NONE) { | 7206 | fan_control_access_mode != TPACPI_FAN_WR_NONE) { |
7207 | if (tp_features.second_fan) { | ||
7208 | /* attach second fan tachometer */ | ||
7209 | fan_attributes[ARRAY_SIZE(fan_attributes)-2] = | ||
7210 | &dev_attr_fan_fan2_input.attr; | ||
7211 | } | ||
6917 | rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, | 7212 | rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, |
6918 | &fan_attr_group); | 7213 | &fan_attr_group); |
6919 | if (rc < 0) | 7214 | if (rc < 0) |
@@ -7385,6 +7680,24 @@ err_out: | |||
7385 | 7680 | ||
7386 | /* Probing */ | 7681 | /* Probing */ |
7387 | 7682 | ||
7683 | static bool __pure __init tpacpi_is_fw_digit(const char c) | ||
7684 | { | ||
7685 | return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z'); | ||
7686 | } | ||
7687 | |||
7688 | /* Most models: xxyTkkWW (#.##c); Ancient 570/600 and -SL lacks (#.##c) */ | ||
7689 | static bool __pure __init tpacpi_is_valid_fw_id(const char* const s, | ||
7690 | const char t) | ||
7691 | { | ||
7692 | return s && strlen(s) >= 8 && | ||
7693 | tpacpi_is_fw_digit(s[0]) && | ||
7694 | tpacpi_is_fw_digit(s[1]) && | ||
7695 | s[2] == t && s[3] == 'T' && | ||
7696 | tpacpi_is_fw_digit(s[4]) && | ||
7697 | tpacpi_is_fw_digit(s[5]) && | ||
7698 | s[6] == 'W' && s[7] == 'W'; | ||
7699 | } | ||
7700 | |||
7388 | /* returns 0 - probe ok, or < 0 - probe error. | 7701 | /* returns 0 - probe ok, or < 0 - probe error. |
7389 | * Probe ok doesn't mean thinkpad found. | 7702 | * Probe ok doesn't mean thinkpad found. |
7390 | * On error, kfree() cleanup on tp->* is not performed, caller must do it */ | 7703 | * On error, kfree() cleanup on tp->* is not performed, caller must do it */ |
@@ -7411,10 +7724,15 @@ static int __must_check __init get_thinkpad_model_data( | |||
7411 | tp->bios_version_str = kstrdup(s, GFP_KERNEL); | 7724 | tp->bios_version_str = kstrdup(s, GFP_KERNEL); |
7412 | if (s && !tp->bios_version_str) | 7725 | if (s && !tp->bios_version_str) |
7413 | return -ENOMEM; | 7726 | return -ENOMEM; |
7414 | if (!tp->bios_version_str) | 7727 | |
7728 | /* Really ancient ThinkPad 240X will fail this, which is fine */ | ||
7729 | if (!tpacpi_is_valid_fw_id(tp->bios_version_str, 'E')) | ||
7415 | return 0; | 7730 | return 0; |
7731 | |||
7416 | tp->bios_model = tp->bios_version_str[0] | 7732 | tp->bios_model = tp->bios_version_str[0] |
7417 | | (tp->bios_version_str[1] << 8); | 7733 | | (tp->bios_version_str[1] << 8); |
7734 | tp->bios_release = (tp->bios_version_str[4] << 8) | ||
7735 | | tp->bios_version_str[5]; | ||
7418 | 7736 | ||
7419 | /* | 7737 | /* |
7420 | * ThinkPad T23 or newer, A31 or newer, R50e or newer, | 7738 | * ThinkPad T23 or newer, A31 or newer, R50e or newer, |
@@ -7433,8 +7751,21 @@ static int __must_check __init get_thinkpad_model_data( | |||
7433 | tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL); | 7751 | tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL); |
7434 | if (!tp->ec_version_str) | 7752 | if (!tp->ec_version_str) |
7435 | return -ENOMEM; | 7753 | return -ENOMEM; |
7436 | tp->ec_model = ec_fw_string[0] | 7754 | |
7437 | | (ec_fw_string[1] << 8); | 7755 | if (tpacpi_is_valid_fw_id(ec_fw_string, 'H')) { |
7756 | tp->ec_model = ec_fw_string[0] | ||
7757 | | (ec_fw_string[1] << 8); | ||
7758 | tp->ec_release = (ec_fw_string[4] << 8) | ||
7759 | | ec_fw_string[5]; | ||
7760 | } else { | ||
7761 | printk(TPACPI_NOTICE | ||
7762 | "ThinkPad firmware release %s " | ||
7763 | "doesn't match the known patterns\n", | ||
7764 | ec_fw_string); | ||
7765 | printk(TPACPI_NOTICE | ||
7766 | "please report this to %s\n", | ||
7767 | TPACPI_MAIL); | ||
7768 | } | ||
7438 | break; | 7769 | break; |
7439 | } | 7770 | } |
7440 | } | 7771 | } |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 7f207f335bec..ef3a2cd3a7a0 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
@@ -287,6 +287,25 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, | |||
287 | ACPI_DECODE_16); | 287 | ACPI_DECODE_16); |
288 | } | 288 | } |
289 | 289 | ||
290 | static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev, | ||
291 | struct acpi_resource *res) | ||
292 | { | ||
293 | struct acpi_resource_extended_address64 *p = &res->data.ext_address64; | ||
294 | |||
295 | if (p->producer_consumer == ACPI_PRODUCER) | ||
296 | return; | ||
297 | |||
298 | if (p->resource_type == ACPI_MEMORY_RANGE) | ||
299 | pnpacpi_parse_allocated_memresource(dev, | ||
300 | p->minimum, p->address_length, | ||
301 | p->info.mem.write_protect); | ||
302 | else if (p->resource_type == ACPI_IO_RANGE) | ||
303 | pnpacpi_parse_allocated_ioresource(dev, | ||
304 | p->minimum, p->address_length, | ||
305 | p->granularity == 0xfff ? ACPI_DECODE_10 : | ||
306 | ACPI_DECODE_16); | ||
307 | } | ||
308 | |||
290 | static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | 309 | static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, |
291 | void *data) | 310 | void *data) |
292 | { | 311 | { |
@@ -400,8 +419,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
400 | break; | 419 | break; |
401 | 420 | ||
402 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: | 421 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: |
403 | if (res->data.ext_address64.producer_consumer == ACPI_PRODUCER) | 422 | pnpacpi_parse_allocated_ext_address_space(dev, res); |
404 | return AE_OK; | ||
405 | break; | 423 | break; |
406 | 424 | ||
407 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | 425 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: |
@@ -630,6 +648,28 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev, | |||
630 | IORESOURCE_IO_FIXED); | 648 | IORESOURCE_IO_FIXED); |
631 | } | 649 | } |
632 | 650 | ||
651 | static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, | ||
652 | unsigned int option_flags, | ||
653 | struct acpi_resource *r) | ||
654 | { | ||
655 | struct acpi_resource_extended_address64 *p = &r->data.ext_address64; | ||
656 | unsigned char flags = 0; | ||
657 | |||
658 | if (p->address_length == 0) | ||
659 | return; | ||
660 | |||
661 | if (p->resource_type == ACPI_MEMORY_RANGE) { | ||
662 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) | ||
663 | flags = IORESOURCE_MEM_WRITEABLE; | ||
664 | pnp_register_mem_resource(dev, option_flags, p->minimum, | ||
665 | p->minimum, 0, p->address_length, | ||
666 | flags); | ||
667 | } else if (p->resource_type == ACPI_IO_RANGE) | ||
668 | pnp_register_port_resource(dev, option_flags, p->minimum, | ||
669 | p->minimum, 0, p->address_length, | ||
670 | IORESOURCE_IO_FIXED); | ||
671 | } | ||
672 | |||
633 | struct acpipnp_parse_option_s { | 673 | struct acpipnp_parse_option_s { |
634 | struct pnp_dev *dev; | 674 | struct pnp_dev *dev; |
635 | unsigned int option_flags; | 675 | unsigned int option_flags; |
@@ -711,6 +751,7 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res, | |||
711 | break; | 751 | break; |
712 | 752 | ||
713 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: | 753 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: |
754 | pnpacpi_parse_ext_address_option(dev, option_flags, res); | ||
714 | break; | 755 | break; |
715 | 756 | ||
716 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | 757 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: |
@@ -765,6 +806,7 @@ static int pnpacpi_supported_resource(struct acpi_resource *res) | |||
765 | case ACPI_RESOURCE_TYPE_ADDRESS16: | 806 | case ACPI_RESOURCE_TYPE_ADDRESS16: |
766 | case ACPI_RESOURCE_TYPE_ADDRESS32: | 807 | case ACPI_RESOURCE_TYPE_ADDRESS32: |
767 | case ACPI_RESOURCE_TYPE_ADDRESS64: | 808 | case ACPI_RESOURCE_TYPE_ADDRESS64: |
809 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: | ||
768 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | 810 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: |
769 | return 1; | 811 | return 1; |
770 | } | 812 | } |
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 33da1127992a..7eda34838bfe 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig | |||
@@ -82,6 +82,14 @@ config BATTERY_DA9030 | |||
82 | Say Y here to enable support for batteries charger integrated into | 82 | Say Y here to enable support for batteries charger integrated into |
83 | DA9030 PMIC. | 83 | DA9030 PMIC. |
84 | 84 | ||
85 | config BATTERY_MAX17040 | ||
86 | tristate "Maxim MAX17040 Fuel Gauge" | ||
87 | depends on I2C | ||
88 | help | ||
89 | MAX17040 is fuel-gauge systems for lithium-ion (Li+) batteries | ||
90 | in handheld and portable equipment. The MAX17040 is configured | ||
91 | to operate with a single lithium cell | ||
92 | |||
85 | config CHARGER_PCF50633 | 93 | config CHARGER_PCF50633 |
86 | tristate "NXP PCF50633 MBC" | 94 | tristate "NXP PCF50633 MBC" |
87 | depends on MFD_PCF50633 | 95 | depends on MFD_PCF50633 |
diff --git a/drivers/power/Makefile b/drivers/power/Makefile index 2fcf41d13e5c..daf3179689aa 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile | |||
@@ -25,4 +25,5 @@ obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o | |||
25 | obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o | 25 | obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o |
26 | obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o | 26 | obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o |
27 | obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o | 27 | obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o |
28 | obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o \ No newline at end of file | 28 | obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o |
29 | obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o | ||
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c index 1662bb0f23a5..3364198134a1 100644 --- a/drivers/power/da9030_battery.c +++ b/drivers/power/da9030_battery.c | |||
@@ -22,8 +22,6 @@ | |||
22 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
23 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
24 | 24 | ||
25 | #define DA9030_STATUS_CHDET (1 << 3) | ||
26 | |||
27 | #define DA9030_FAULT_LOG 0x0a | 25 | #define DA9030_FAULT_LOG 0x0a |
28 | #define DA9030_FAULT_LOG_OVER_TEMP (1 << 7) | 26 | #define DA9030_FAULT_LOG_OVER_TEMP (1 << 7) |
29 | #define DA9030_FAULT_LOG_VBAT_OVER (1 << 4) | 27 | #define DA9030_FAULT_LOG_VBAT_OVER (1 << 4) |
@@ -244,6 +242,8 @@ static void da9030_set_charge(struct da9030_charger *charger, int on) | |||
244 | } | 242 | } |
245 | 243 | ||
246 | da903x_write(charger->master, DA9030_CHARGE_CONTROL, val); | 244 | da903x_write(charger->master, DA9030_CHARGE_CONTROL, val); |
245 | |||
246 | power_supply_changed(&charger->psy); | ||
247 | } | 247 | } |
248 | 248 | ||
249 | static void da9030_charger_check_state(struct da9030_charger *charger) | 249 | static void da9030_charger_check_state(struct da9030_charger *charger) |
@@ -258,6 +258,12 @@ static void da9030_charger_check_state(struct da9030_charger *charger) | |||
258 | da9030_set_charge(charger, 1); | 258 | da9030_set_charge(charger, 1); |
259 | } | 259 | } |
260 | } else { | 260 | } else { |
261 | /* Charger has been pulled out */ | ||
262 | if (!charger->chdet) { | ||
263 | da9030_set_charge(charger, 0); | ||
264 | return; | ||
265 | } | ||
266 | |||
261 | if (charger->adc.vbat_res >= | 267 | if (charger->adc.vbat_res >= |
262 | charger->thresholds.vbat_charge_stop) { | 268 | charger->thresholds.vbat_charge_stop) { |
263 | da9030_set_charge(charger, 0); | 269 | da9030_set_charge(charger, 0); |
@@ -395,13 +401,11 @@ static int da9030_battery_event(struct notifier_block *nb, unsigned long event, | |||
395 | { | 401 | { |
396 | struct da9030_charger *charger = | 402 | struct da9030_charger *charger = |
397 | container_of(nb, struct da9030_charger, nb); | 403 | container_of(nb, struct da9030_charger, nb); |
398 | int status; | ||
399 | 404 | ||
400 | switch (event) { | 405 | switch (event) { |
401 | case DA9030_EVENT_CHDET: | 406 | case DA9030_EVENT_CHDET: |
402 | status = da903x_query_status(charger->master, | 407 | cancel_delayed_work_sync(&charger->work); |
403 | DA9030_STATUS_CHDET); | 408 | schedule_work(&charger->work.work); |
404 | da9030_set_charge(charger, status); | ||
405 | break; | 409 | break; |
406 | case DA9030_EVENT_VBATMON: | 410 | case DA9030_EVENT_VBATMON: |
407 | da9030_battery_vbat_event(charger); | 411 | da9030_battery_vbat_event(charger); |
@@ -565,7 +569,8 @@ static int da9030_battery_remove(struct platform_device *dev) | |||
565 | da903x_unregister_notifier(charger->master, &charger->nb, | 569 | da903x_unregister_notifier(charger->master, &charger->nb, |
566 | DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON | | 570 | DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON | |
567 | DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT); | 571 | DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT); |
568 | cancel_delayed_work(&charger->work); | 572 | cancel_delayed_work_sync(&charger->work); |
573 | da9030_set_charge(charger, 0); | ||
569 | power_supply_unregister(&charger->psy); | 574 | power_supply_unregister(&charger->psy); |
570 | 575 | ||
571 | kfree(charger); | 576 | kfree(charger); |
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index a52d4a11652d..520b5c49ff30 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c | |||
@@ -62,6 +62,10 @@ static unsigned int cache_time = 1000; | |||
62 | module_param(cache_time, uint, 0644); | 62 | module_param(cache_time, uint, 0644); |
63 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); | 63 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); |
64 | 64 | ||
65 | static unsigned int pmod_enabled; | ||
66 | module_param(pmod_enabled, bool, 0644); | ||
67 | MODULE_PARM_DESC(pmod_enabled, "PMOD enable bit"); | ||
68 | |||
65 | /* Some batteries have their rated capacity stored a N * 10 mAh, while | 69 | /* Some batteries have their rated capacity stored a N * 10 mAh, while |
66 | * others use an index into this table. */ | 70 | * others use an index into this table. */ |
67 | static int rated_capacities[] = { | 71 | static int rated_capacities[] = { |
@@ -259,6 +263,17 @@ static void ds2760_battery_update_status(struct ds2760_device_info *di) | |||
259 | power_supply_changed(&di->bat); | 263 | power_supply_changed(&di->bat); |
260 | } | 264 | } |
261 | 265 | ||
266 | static void ds2760_battery_write_status(struct ds2760_device_info *di, | ||
267 | char status) | ||
268 | { | ||
269 | if (status == di->raw[DS2760_STATUS_REG]) | ||
270 | return; | ||
271 | |||
272 | w1_ds2760_write(di->w1_dev, &status, DS2760_STATUS_WRITE_REG, 1); | ||
273 | w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1); | ||
274 | w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1); | ||
275 | } | ||
276 | |||
262 | static void ds2760_battery_work(struct work_struct *work) | 277 | static void ds2760_battery_work(struct work_struct *work) |
263 | { | 278 | { |
264 | struct ds2760_device_info *di = container_of(work, | 279 | struct ds2760_device_info *di = container_of(work, |
@@ -342,9 +357,9 @@ static enum power_supply_property ds2760_battery_props[] = { | |||
342 | 357 | ||
343 | static int ds2760_battery_probe(struct platform_device *pdev) | 358 | static int ds2760_battery_probe(struct platform_device *pdev) |
344 | { | 359 | { |
360 | char status; | ||
345 | int retval = 0; | 361 | int retval = 0; |
346 | struct ds2760_device_info *di; | 362 | struct ds2760_device_info *di; |
347 | struct ds2760_platform_data *pdata; | ||
348 | 363 | ||
349 | di = kzalloc(sizeof(*di), GFP_KERNEL); | 364 | di = kzalloc(sizeof(*di), GFP_KERNEL); |
350 | if (!di) { | 365 | if (!di) { |
@@ -354,14 +369,13 @@ static int ds2760_battery_probe(struct platform_device *pdev) | |||
354 | 369 | ||
355 | platform_set_drvdata(pdev, di); | 370 | platform_set_drvdata(pdev, di); |
356 | 371 | ||
357 | pdata = pdev->dev.platform_data; | 372 | di->dev = &pdev->dev; |
358 | di->dev = &pdev->dev; | 373 | di->w1_dev = pdev->dev.parent; |
359 | di->w1_dev = pdev->dev.parent; | 374 | di->bat.name = dev_name(&pdev->dev); |
360 | di->bat.name = dev_name(&pdev->dev); | 375 | di->bat.type = POWER_SUPPLY_TYPE_BATTERY; |
361 | di->bat.type = POWER_SUPPLY_TYPE_BATTERY; | 376 | di->bat.properties = ds2760_battery_props; |
362 | di->bat.properties = ds2760_battery_props; | 377 | di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props); |
363 | di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props); | 378 | di->bat.get_property = ds2760_battery_get_property; |
364 | di->bat.get_property = ds2760_battery_get_property; | ||
365 | di->bat.external_power_changed = | 379 | di->bat.external_power_changed = |
366 | ds2760_battery_external_power_changed; | 380 | ds2760_battery_external_power_changed; |
367 | 381 | ||
@@ -373,6 +387,16 @@ static int ds2760_battery_probe(struct platform_device *pdev) | |||
373 | goto batt_failed; | 387 | goto batt_failed; |
374 | } | 388 | } |
375 | 389 | ||
390 | /* enable sleep mode feature */ | ||
391 | ds2760_battery_read_status(di); | ||
392 | status = di->raw[DS2760_STATUS_REG]; | ||
393 | if (pmod_enabled) | ||
394 | status |= DS2760_STATUS_PMOD; | ||
395 | else | ||
396 | status &= ~DS2760_STATUS_PMOD; | ||
397 | |||
398 | ds2760_battery_write_status(di, status); | ||
399 | |||
376 | INIT_DELAYED_WORK(&di->monitor_work, ds2760_battery_work); | 400 | INIT_DELAYED_WORK(&di->monitor_work, ds2760_battery_work); |
377 | di->monitor_wqueue = create_singlethread_workqueue(dev_name(&pdev->dev)); | 401 | di->monitor_wqueue = create_singlethread_workqueue(dev_name(&pdev->dev)); |
378 | if (!di->monitor_wqueue) { | 402 | if (!di->monitor_wqueue) { |
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c new file mode 100644 index 000000000000..87b98bf27ae1 --- /dev/null +++ b/drivers/power/max17040_battery.c | |||
@@ -0,0 +1,309 @@ | |||
1 | /* | ||
2 | * max17040_battery.c | ||
3 | * fuel-gauge systems for lithium-ion (Li+) batteries | ||
4 | * | ||
5 | * Copyright (C) 2009 Samsung Electronics | ||
6 | * Minkyu Kang <mk7.kang@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/mutex.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/i2c.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/power_supply.h> | ||
21 | #include <linux/max17040_battery.h> | ||
22 | |||
23 | #define MAX17040_VCELL_MSB 0x02 | ||
24 | #define MAX17040_VCELL_LSB 0x03 | ||
25 | #define MAX17040_SOC_MSB 0x04 | ||
26 | #define MAX17040_SOC_LSB 0x05 | ||
27 | #define MAX17040_MODE_MSB 0x06 | ||
28 | #define MAX17040_MODE_LSB 0x07 | ||
29 | #define MAX17040_VER_MSB 0x08 | ||
30 | #define MAX17040_VER_LSB 0x09 | ||
31 | #define MAX17040_RCOMP_MSB 0x0C | ||
32 | #define MAX17040_RCOMP_LSB 0x0D | ||
33 | #define MAX17040_CMD_MSB 0xFE | ||
34 | #define MAX17040_CMD_LSB 0xFF | ||
35 | |||
36 | #define MAX17040_DELAY 1000 | ||
37 | #define MAX17040_BATTERY_FULL 95 | ||
38 | |||
39 | struct max17040_chip { | ||
40 | struct i2c_client *client; | ||
41 | struct delayed_work work; | ||
42 | struct power_supply battery; | ||
43 | struct max17040_platform_data *pdata; | ||
44 | |||
45 | /* State Of Connect */ | ||
46 | int online; | ||
47 | /* battery voltage */ | ||
48 | int vcell; | ||
49 | /* battery capacity */ | ||
50 | int soc; | ||
51 | /* State Of Charge */ | ||
52 | int status; | ||
53 | }; | ||
54 | |||
55 | static int max17040_get_property(struct power_supply *psy, | ||
56 | enum power_supply_property psp, | ||
57 | union power_supply_propval *val) | ||
58 | { | ||
59 | struct max17040_chip *chip = container_of(psy, | ||
60 | struct max17040_chip, battery); | ||
61 | |||
62 | switch (psp) { | ||
63 | case POWER_SUPPLY_PROP_STATUS: | ||
64 | val->intval = chip->status; | ||
65 | break; | ||
66 | case POWER_SUPPLY_PROP_ONLINE: | ||
67 | val->intval = chip->online; | ||
68 | break; | ||
69 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: | ||
70 | val->intval = chip->vcell; | ||
71 | break; | ||
72 | case POWER_SUPPLY_PROP_CAPACITY: | ||
73 | val->intval = chip->soc; | ||
74 | break; | ||
75 | default: | ||
76 | return -EINVAL; | ||
77 | } | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | static int max17040_write_reg(struct i2c_client *client, int reg, u8 value) | ||
82 | { | ||
83 | int ret; | ||
84 | |||
85 | ret = i2c_smbus_write_byte_data(client, reg, value); | ||
86 | |||
87 | if (ret < 0) | ||
88 | dev_err(&client->dev, "%s: err %d\n", __func__, ret); | ||
89 | |||
90 | return ret; | ||
91 | } | ||
92 | |||
93 | static int max17040_read_reg(struct i2c_client *client, int reg) | ||
94 | { | ||
95 | int ret; | ||
96 | |||
97 | ret = i2c_smbus_read_byte_data(client, reg); | ||
98 | |||
99 | if (ret < 0) | ||
100 | dev_err(&client->dev, "%s: err %d\n", __func__, ret); | ||
101 | |||
102 | return ret; | ||
103 | } | ||
104 | |||
105 | static void max17040_reset(struct i2c_client *client) | ||
106 | { | ||
107 | max17040_write_reg(client, MAX17040_CMD_MSB, 0x54); | ||
108 | max17040_write_reg(client, MAX17040_CMD_LSB, 0x00); | ||
109 | } | ||
110 | |||
111 | static void max17040_get_vcell(struct i2c_client *client) | ||
112 | { | ||
113 | struct max17040_chip *chip = i2c_get_clientdata(client); | ||
114 | u8 msb; | ||
115 | u8 lsb; | ||
116 | |||
117 | msb = max17040_read_reg(client, MAX17040_VCELL_MSB); | ||
118 | lsb = max17040_read_reg(client, MAX17040_VCELL_LSB); | ||
119 | |||
120 | chip->vcell = (msb << 4) + (lsb >> 4); | ||
121 | } | ||
122 | |||
123 | static void max17040_get_soc(struct i2c_client *client) | ||
124 | { | ||
125 | struct max17040_chip *chip = i2c_get_clientdata(client); | ||
126 | u8 msb; | ||
127 | u8 lsb; | ||
128 | |||
129 | msb = max17040_read_reg(client, MAX17040_SOC_MSB); | ||
130 | lsb = max17040_read_reg(client, MAX17040_SOC_LSB); | ||
131 | |||
132 | chip->soc = msb; | ||
133 | } | ||
134 | |||
135 | static void max17040_get_version(struct i2c_client *client) | ||
136 | { | ||
137 | u8 msb; | ||
138 | u8 lsb; | ||
139 | |||
140 | msb = max17040_read_reg(client, MAX17040_VER_MSB); | ||
141 | lsb = max17040_read_reg(client, MAX17040_VER_LSB); | ||
142 | |||
143 | dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver %d%d\n", msb, lsb); | ||
144 | } | ||
145 | |||
146 | static void max17040_get_online(struct i2c_client *client) | ||
147 | { | ||
148 | struct max17040_chip *chip = i2c_get_clientdata(client); | ||
149 | |||
150 | if (chip->pdata->battery_online) | ||
151 | chip->online = chip->pdata->battery_online(); | ||
152 | else | ||
153 | chip->online = 1; | ||
154 | } | ||
155 | |||
156 | static void max17040_get_status(struct i2c_client *client) | ||
157 | { | ||
158 | struct max17040_chip *chip = i2c_get_clientdata(client); | ||
159 | |||
160 | if (!chip->pdata->charger_online || !chip->pdata->charger_enable) { | ||
161 | chip->status = POWER_SUPPLY_STATUS_UNKNOWN; | ||
162 | return; | ||
163 | } | ||
164 | |||
165 | if (chip->pdata->charger_online()) { | ||
166 | if (chip->pdata->charger_enable()) | ||
167 | chip->status = POWER_SUPPLY_STATUS_CHARGING; | ||
168 | else | ||
169 | chip->status = POWER_SUPPLY_STATUS_NOT_CHARGING; | ||
170 | } else { | ||
171 | chip->status = POWER_SUPPLY_STATUS_DISCHARGING; | ||
172 | } | ||
173 | |||
174 | if (chip->soc > MAX17040_BATTERY_FULL) | ||
175 | chip->status = POWER_SUPPLY_STATUS_FULL; | ||
176 | } | ||
177 | |||
178 | static void max17040_work(struct work_struct *work) | ||
179 | { | ||
180 | struct max17040_chip *chip; | ||
181 | |||
182 | chip = container_of(work, struct max17040_chip, work.work); | ||
183 | |||
184 | max17040_get_vcell(chip->client); | ||
185 | max17040_get_soc(chip->client); | ||
186 | max17040_get_online(chip->client); | ||
187 | max17040_get_status(chip->client); | ||
188 | |||
189 | schedule_delayed_work(&chip->work, MAX17040_DELAY); | ||
190 | } | ||
191 | |||
192 | static enum power_supply_property max17040_battery_props[] = { | ||
193 | POWER_SUPPLY_PROP_STATUS, | ||
194 | POWER_SUPPLY_PROP_ONLINE, | ||
195 | POWER_SUPPLY_PROP_VOLTAGE_NOW, | ||
196 | POWER_SUPPLY_PROP_CAPACITY, | ||
197 | }; | ||
198 | |||
199 | static int __devinit max17040_probe(struct i2c_client *client, | ||
200 | const struct i2c_device_id *id) | ||
201 | { | ||
202 | struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); | ||
203 | struct max17040_chip *chip; | ||
204 | int ret; | ||
205 | |||
206 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) | ||
207 | return -EIO; | ||
208 | |||
209 | chip = kzalloc(sizeof(*chip), GFP_KERNEL); | ||
210 | if (!chip) | ||
211 | return -ENOMEM; | ||
212 | |||
213 | chip->client = client; | ||
214 | chip->pdata = client->dev.platform_data; | ||
215 | |||
216 | i2c_set_clientdata(client, chip); | ||
217 | |||
218 | chip->battery.name = "battery"; | ||
219 | chip->battery.type = POWER_SUPPLY_TYPE_BATTERY; | ||
220 | chip->battery.get_property = max17040_get_property; | ||
221 | chip->battery.properties = max17040_battery_props; | ||
222 | chip->battery.num_properties = ARRAY_SIZE(max17040_battery_props); | ||
223 | |||
224 | ret = power_supply_register(&client->dev, &chip->battery); | ||
225 | if (ret) { | ||
226 | dev_err(&client->dev, "failed: power supply register\n"); | ||
227 | i2c_set_clientdata(client, NULL); | ||
228 | kfree(chip); | ||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | max17040_reset(client); | ||
233 | max17040_get_version(client); | ||
234 | |||
235 | INIT_DELAYED_WORK_DEFERRABLE(&chip->work, max17040_work); | ||
236 | schedule_delayed_work(&chip->work, MAX17040_DELAY); | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static int __devexit max17040_remove(struct i2c_client *client) | ||
242 | { | ||
243 | struct max17040_chip *chip = i2c_get_clientdata(client); | ||
244 | |||
245 | power_supply_unregister(&chip->battery); | ||
246 | cancel_delayed_work(&chip->work); | ||
247 | i2c_set_clientdata(client, NULL); | ||
248 | kfree(chip); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | #ifdef CONFIG_PM | ||
253 | |||
254 | static int max17040_suspend(struct i2c_client *client, | ||
255 | pm_message_t state) | ||
256 | { | ||
257 | struct max17040_chip *chip = i2c_get_clientdata(client); | ||
258 | |||
259 | cancel_delayed_work(&chip->work); | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static int max17040_resume(struct i2c_client *client) | ||
264 | { | ||
265 | struct max17040_chip *chip = i2c_get_clientdata(client); | ||
266 | |||
267 | schedule_delayed_work(&chip->work, MAX17040_DELAY); | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | #else | ||
272 | |||
273 | #define max17040_suspend NULL | ||
274 | #define max17040_resume NULL | ||
275 | |||
276 | #endif /* CONFIG_PM */ | ||
277 | |||
278 | static const struct i2c_device_id max17040_id[] = { | ||
279 | { "max17040", 0 }, | ||
280 | { } | ||
281 | }; | ||
282 | MODULE_DEVICE_TABLE(i2c, max17040_id); | ||
283 | |||
284 | static struct i2c_driver max17040_i2c_driver = { | ||
285 | .driver = { | ||
286 | .name = "max17040", | ||
287 | }, | ||
288 | .probe = max17040_probe, | ||
289 | .remove = __devexit_p(max17040_remove), | ||
290 | .suspend = max17040_suspend, | ||
291 | .resume = max17040_resume, | ||
292 | .id_table = max17040_id, | ||
293 | }; | ||
294 | |||
295 | static int __init max17040_init(void) | ||
296 | { | ||
297 | return i2c_add_driver(&max17040_i2c_driver); | ||
298 | } | ||
299 | module_init(max17040_init); | ||
300 | |||
301 | static void __exit max17040_exit(void) | ||
302 | { | ||
303 | i2c_del_driver(&max17040_i2c_driver); | ||
304 | } | ||
305 | module_exit(max17040_exit); | ||
306 | |||
307 | MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>"); | ||
308 | MODULE_DESCRIPTION("MAX17040 Fuel Gauge"); | ||
309 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 6a19ed9a1194..9c23122f755f 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -258,10 +258,21 @@ config SCSI_SCAN_ASYNC | |||
258 | or async on the kernel's command line. | 258 | or async on the kernel's command line. |
259 | 259 | ||
260 | config SCSI_WAIT_SCAN | 260 | config SCSI_WAIT_SCAN |
261 | tristate | 261 | tristate # No prompt here, this is an invisible symbol. |
262 | default m | 262 | default m |
263 | depends on SCSI | 263 | depends on SCSI |
264 | depends on MODULES | 264 | depends on MODULES |
265 | # scsi_wait_scan is a loadable module which waits until all the async scans are | ||
266 | # complete. The idea is to use it in initrd/ initramfs scripts. You modprobe | ||
267 | # it after all the modprobes of the root SCSI drivers and it will wait until | ||
268 | # they have all finished scanning their buses before allowing the boot to | ||
269 | # proceed. (This method is not applicable if targets boot independently in | ||
270 | # parallel with the initiator, or with transports with non-deterministic target | ||
271 | # discovery schemes, or if a transport driver does not support scsi_wait_scan.) | ||
272 | # | ||
273 | # This symbol is not exposed as a prompt because little is to be gained by | ||
274 | # disabling it, whereas people who accidentally switch it off may wonder why | ||
275 | # their mkinitrd gets into trouble. | ||
265 | 276 | ||
266 | menu "SCSI Transports" | 277 | menu "SCSI Transports" |
267 | depends on SCSI | 278 | depends on SCSI |
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig index b62b482e55e7..1e9f7141102b 100644 --- a/drivers/scsi/bnx2i/Kconfig +++ b/drivers/scsi/bnx2i/Kconfig | |||
@@ -1,6 +1,8 @@ | |||
1 | config SCSI_BNX2_ISCSI | 1 | config SCSI_BNX2_ISCSI |
2 | tristate "Broadcom NetXtreme II iSCSI support" | 2 | tristate "Broadcom NetXtreme II iSCSI support" |
3 | select SCSI_ISCSI_ATTRS | 3 | select SCSI_ISCSI_ATTRS |
4 | select NETDEVICES | ||
5 | select NETDEV_1000 | ||
4 | select CNIC | 6 | select CNIC |
5 | depends on PCI | 7 | depends on PCI |
6 | ---help--- | 8 | ---help--- |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c index 99c912547902..344fd53b9954 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c +++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c | |||
@@ -206,6 +206,31 @@ int cxgb3i_ddp_find_page_index(unsigned long pgsz) | |||
206 | return DDP_PGIDX_MAX; | 206 | return DDP_PGIDX_MAX; |
207 | } | 207 | } |
208 | 208 | ||
209 | /** | ||
210 | * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE | ||
211 | * return the ddp page index, if no match is found return DDP_PGIDX_MAX. | ||
212 | */ | ||
213 | int cxgb3i_ddp_adjust_page_table(void) | ||
214 | { | ||
215 | int i; | ||
216 | unsigned int base_order, order; | ||
217 | |||
218 | if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { | ||
219 | ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n", | ||
220 | PAGE_SIZE, 1UL << ddp_page_shift[0]); | ||
221 | return -EINVAL; | ||
222 | } | ||
223 | |||
224 | base_order = get_order(1UL << ddp_page_shift[0]); | ||
225 | order = get_order(1 << PAGE_SHIFT); | ||
226 | for (i = 0; i < DDP_PGIDX_MAX; i++) { | ||
227 | /* first is the kernel page size, then just doubling the size */ | ||
228 | ddp_page_order[i] = order - base_order + i; | ||
229 | ddp_page_shift[i] = PAGE_SHIFT + i; | ||
230 | } | ||
231 | return 0; | ||
232 | } | ||
233 | |||
209 | static inline void ddp_gl_unmap(struct pci_dev *pdev, | 234 | static inline void ddp_gl_unmap(struct pci_dev *pdev, |
210 | struct cxgb3i_gather_list *gl) | 235 | struct cxgb3i_gather_list *gl) |
211 | { | 236 | { |
@@ -598,30 +623,40 @@ int cxgb3i_adapter_ddp_info(struct t3cdev *tdev, | |||
598 | * release all the resource held by the ddp pagepod manager for a given | 623 | * release all the resource held by the ddp pagepod manager for a given |
599 | * adapter if needed | 624 | * adapter if needed |
600 | */ | 625 | */ |
601 | void cxgb3i_ddp_cleanup(struct t3cdev *tdev) | 626 | |
627 | static void ddp_cleanup(struct kref *kref) | ||
602 | { | 628 | { |
629 | struct cxgb3i_ddp_info *ddp = container_of(kref, | ||
630 | struct cxgb3i_ddp_info, | ||
631 | refcnt); | ||
603 | int i = 0; | 632 | int i = 0; |
633 | |||
634 | ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp, ddp->tdev); | ||
635 | |||
636 | ddp->tdev->ulp_iscsi = NULL; | ||
637 | while (i < ddp->nppods) { | ||
638 | struct cxgb3i_gather_list *gl = ddp->gl_map[i]; | ||
639 | if (gl) { | ||
640 | int npods = (gl->nelem + PPOD_PAGES_MAX - 1) | ||
641 | >> PPOD_PAGES_SHIFT; | ||
642 | ddp_log_info("t3dev 0x%p, ddp %d + %d.\n", | ||
643 | ddp->tdev, i, npods); | ||
644 | kfree(gl); | ||
645 | ddp_free_gl_skb(ddp, i, npods); | ||
646 | i += npods; | ||
647 | } else | ||
648 | i++; | ||
649 | } | ||
650 | cxgb3i_free_big_mem(ddp); | ||
651 | } | ||
652 | |||
653 | void cxgb3i_ddp_cleanup(struct t3cdev *tdev) | ||
654 | { | ||
604 | struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; | 655 | struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; |
605 | 656 | ||
606 | ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp); | 657 | ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp); |
607 | 658 | if (ddp) | |
608 | if (ddp) { | 659 | kref_put(&ddp->refcnt, ddp_cleanup); |
609 | tdev->ulp_iscsi = NULL; | ||
610 | while (i < ddp->nppods) { | ||
611 | struct cxgb3i_gather_list *gl = ddp->gl_map[i]; | ||
612 | if (gl) { | ||
613 | int npods = (gl->nelem + PPOD_PAGES_MAX - 1) | ||
614 | >> PPOD_PAGES_SHIFT; | ||
615 | ddp_log_info("t3dev 0x%p, ddp %d + %d.\n", | ||
616 | tdev, i, npods); | ||
617 | kfree(gl); | ||
618 | ddp_free_gl_skb(ddp, i, npods); | ||
619 | i += npods; | ||
620 | } else | ||
621 | i++; | ||
622 | } | ||
623 | cxgb3i_free_big_mem(ddp); | ||
624 | } | ||
625 | } | 660 | } |
626 | 661 | ||
627 | /** | 662 | /** |
@@ -631,12 +666,13 @@ void cxgb3i_ddp_cleanup(struct t3cdev *tdev) | |||
631 | */ | 666 | */ |
632 | static void ddp_init(struct t3cdev *tdev) | 667 | static void ddp_init(struct t3cdev *tdev) |
633 | { | 668 | { |
634 | struct cxgb3i_ddp_info *ddp; | 669 | struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi; |
635 | struct ulp_iscsi_info uinfo; | 670 | struct ulp_iscsi_info uinfo; |
636 | unsigned int ppmax, bits; | 671 | unsigned int ppmax, bits; |
637 | int i, err; | 672 | int i, err; |
638 | 673 | ||
639 | if (tdev->ulp_iscsi) { | 674 | if (ddp) { |
675 | kref_get(&ddp->refcnt); | ||
640 | ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n", | 676 | ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n", |
641 | tdev, tdev->ulp_iscsi); | 677 | tdev, tdev->ulp_iscsi); |
642 | return; | 678 | return; |
@@ -670,6 +706,7 @@ static void ddp_init(struct t3cdev *tdev) | |||
670 | ppmax * | 706 | ppmax * |
671 | sizeof(struct cxgb3i_gather_list *)); | 707 | sizeof(struct cxgb3i_gather_list *)); |
672 | spin_lock_init(&ddp->map_lock); | 708 | spin_lock_init(&ddp->map_lock); |
709 | kref_init(&ddp->refcnt); | ||
673 | 710 | ||
674 | ddp->tdev = tdev; | 711 | ddp->tdev = tdev; |
675 | ddp->pdev = uinfo.pdev; | 712 | ddp->pdev = uinfo.pdev; |
@@ -715,6 +752,17 @@ void cxgb3i_ddp_init(struct t3cdev *tdev) | |||
715 | { | 752 | { |
716 | if (page_idx == DDP_PGIDX_MAX) { | 753 | if (page_idx == DDP_PGIDX_MAX) { |
717 | page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); | 754 | page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); |
755 | |||
756 | if (page_idx == DDP_PGIDX_MAX) { | ||
757 | ddp_log_info("system PAGE_SIZE %lu, update hw.\n", | ||
758 | PAGE_SIZE); | ||
759 | if (cxgb3i_ddp_adjust_page_table() < 0) { | ||
760 | ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n", | ||
761 | PAGE_SIZE); | ||
762 | return; | ||
763 | } | ||
764 | page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); | ||
765 | } | ||
718 | ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n", | 766 | ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n", |
719 | PAGE_SIZE, page_idx); | 767 | PAGE_SIZE, page_idx); |
720 | } | 768 | } |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h index 0d296de7cf32..87dd56b422bf 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h +++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h | |||
@@ -54,6 +54,7 @@ struct cxgb3i_gather_list { | |||
54 | * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload | 54 | * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload |
55 | * | 55 | * |
56 | * @list: list head to link elements | 56 | * @list: list head to link elements |
57 | * @refcnt: ref. count | ||
57 | * @tdev: pointer to t3cdev used by cxgb3 driver | 58 | * @tdev: pointer to t3cdev used by cxgb3 driver |
58 | * @max_txsz: max tx packet size for ddp | 59 | * @max_txsz: max tx packet size for ddp |
59 | * @max_rxsz: max rx packet size for ddp | 60 | * @max_rxsz: max rx packet size for ddp |
@@ -70,6 +71,7 @@ struct cxgb3i_gather_list { | |||
70 | */ | 71 | */ |
71 | struct cxgb3i_ddp_info { | 72 | struct cxgb3i_ddp_info { |
72 | struct list_head list; | 73 | struct list_head list; |
74 | struct kref refcnt; | ||
73 | struct t3cdev *tdev; | 75 | struct t3cdev *tdev; |
74 | struct pci_dev *pdev; | 76 | struct pci_dev *pdev; |
75 | unsigned int max_txsz; | 77 | unsigned int max_txsz; |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index c15878e88157..0a5609bb5817 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -45,8 +45,6 @@ | |||
45 | 45 | ||
46 | #include "fcoe.h" | 46 | #include "fcoe.h" |
47 | 47 | ||
48 | static int debug_fcoe; | ||
49 | |||
50 | MODULE_AUTHOR("Open-FCoE.org"); | 48 | MODULE_AUTHOR("Open-FCoE.org"); |
51 | MODULE_DESCRIPTION("FCoE"); | 49 | MODULE_DESCRIPTION("FCoE"); |
52 | MODULE_LICENSE("GPL v2"); | 50 | MODULE_LICENSE("GPL v2"); |
@@ -305,23 +303,22 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) | |||
305 | #ifdef NETIF_F_FCOE_CRC | 303 | #ifdef NETIF_F_FCOE_CRC |
306 | if (netdev->features & NETIF_F_FCOE_CRC) { | 304 | if (netdev->features & NETIF_F_FCOE_CRC) { |
307 | lp->crc_offload = 1; | 305 | lp->crc_offload = 1; |
308 | printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n", | 306 | FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); |
309 | netdev->name); | ||
310 | } | 307 | } |
311 | #endif | 308 | #endif |
312 | #ifdef NETIF_F_FSO | 309 | #ifdef NETIF_F_FSO |
313 | if (netdev->features & NETIF_F_FSO) { | 310 | if (netdev->features & NETIF_F_FSO) { |
314 | lp->seq_offload = 1; | 311 | lp->seq_offload = 1; |
315 | lp->lso_max = netdev->gso_max_size; | 312 | lp->lso_max = netdev->gso_max_size; |
316 | printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n", | 313 | FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", |
317 | netdev->name, lp->lso_max); | 314 | lp->lso_max); |
318 | } | 315 | } |
319 | #endif | 316 | #endif |
320 | if (netdev->fcoe_ddp_xid) { | 317 | if (netdev->fcoe_ddp_xid) { |
321 | lp->lro_enabled = 1; | 318 | lp->lro_enabled = 1; |
322 | lp->lro_xid = netdev->fcoe_ddp_xid; | 319 | lp->lro_xid = netdev->fcoe_ddp_xid; |
323 | printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n", | 320 | FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", |
324 | netdev->name, lp->lro_xid); | 321 | lp->lro_xid); |
325 | } | 322 | } |
326 | skb_queue_head_init(&fc->fcoe_pending_queue); | 323 | skb_queue_head_init(&fc->fcoe_pending_queue); |
327 | fc->fcoe_pending_queue_active = 0; | 324 | fc->fcoe_pending_queue_active = 0; |
@@ -407,7 +404,8 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, | |||
407 | /* add the new host to the SCSI-ml */ | 404 | /* add the new host to the SCSI-ml */ |
408 | rc = scsi_add_host(lp->host, dev); | 405 | rc = scsi_add_host(lp->host, dev); |
409 | if (rc) { | 406 | if (rc) { |
410 | FC_DBG("fcoe_shost_config:error on scsi_add_host\n"); | 407 | FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: " |
408 | "error on scsi_add_host\n"); | ||
411 | return rc; | 409 | return rc; |
412 | } | 410 | } |
413 | sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", | 411 | sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", |
@@ -448,8 +446,7 @@ static int fcoe_if_destroy(struct net_device *netdev) | |||
448 | 446 | ||
449 | BUG_ON(!netdev); | 447 | BUG_ON(!netdev); |
450 | 448 | ||
451 | printk(KERN_DEBUG "fcoe_if_destroy:interface on %s\n", | 449 | FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); |
452 | netdev->name); | ||
453 | 450 | ||
454 | lp = fcoe_hostlist_lookup(netdev); | 451 | lp = fcoe_hostlist_lookup(netdev); |
455 | if (!lp) | 452 | if (!lp) |
@@ -560,8 +557,7 @@ static int fcoe_if_create(struct net_device *netdev) | |||
560 | 557 | ||
561 | BUG_ON(!netdev); | 558 | BUG_ON(!netdev); |
562 | 559 | ||
563 | printk(KERN_DEBUG "fcoe_if_create:interface on %s\n", | 560 | FCOE_NETDEV_DBG(netdev, "Create Interface\n"); |
564 | netdev->name); | ||
565 | 561 | ||
566 | lp = fcoe_hostlist_lookup(netdev); | 562 | lp = fcoe_hostlist_lookup(netdev); |
567 | if (lp) | 563 | if (lp) |
@@ -570,7 +566,7 @@ static int fcoe_if_create(struct net_device *netdev) | |||
570 | shost = libfc_host_alloc(&fcoe_shost_template, | 566 | shost = libfc_host_alloc(&fcoe_shost_template, |
571 | sizeof(struct fcoe_softc)); | 567 | sizeof(struct fcoe_softc)); |
572 | if (!shost) { | 568 | if (!shost) { |
573 | FC_DBG("Could not allocate host structure\n"); | 569 | FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); |
574 | return -ENOMEM; | 570 | return -ENOMEM; |
575 | } | 571 | } |
576 | lp = shost_priv(shost); | 572 | lp = shost_priv(shost); |
@@ -579,7 +575,8 @@ static int fcoe_if_create(struct net_device *netdev) | |||
579 | /* configure fc_lport, e.g., em */ | 575 | /* configure fc_lport, e.g., em */ |
580 | rc = fcoe_lport_config(lp); | 576 | rc = fcoe_lport_config(lp); |
581 | if (rc) { | 577 | if (rc) { |
582 | FC_DBG("Could not configure lport\n"); | 578 | FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " |
579 | "interface\n"); | ||
583 | goto out_host_put; | 580 | goto out_host_put; |
584 | } | 581 | } |
585 | 582 | ||
@@ -593,28 +590,32 @@ static int fcoe_if_create(struct net_device *netdev) | |||
593 | /* configure lport network properties */ | 590 | /* configure lport network properties */ |
594 | rc = fcoe_netdev_config(lp, netdev); | 591 | rc = fcoe_netdev_config(lp, netdev); |
595 | if (rc) { | 592 | if (rc) { |
596 | FC_DBG("Could not configure netdev for the interface\n"); | 593 | FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the " |
594 | "interface\n"); | ||
597 | goto out_netdev_cleanup; | 595 | goto out_netdev_cleanup; |
598 | } | 596 | } |
599 | 597 | ||
600 | /* configure lport scsi host properties */ | 598 | /* configure lport scsi host properties */ |
601 | rc = fcoe_shost_config(lp, shost, &netdev->dev); | 599 | rc = fcoe_shost_config(lp, shost, &netdev->dev); |
602 | if (rc) { | 600 | if (rc) { |
603 | FC_DBG("Could not configure shost for lport\n"); | 601 | FCOE_NETDEV_DBG(netdev, "Could not configure shost for the " |
602 | "interface\n"); | ||
604 | goto out_netdev_cleanup; | 603 | goto out_netdev_cleanup; |
605 | } | 604 | } |
606 | 605 | ||
607 | /* lport exch manager allocation */ | 606 | /* lport exch manager allocation */ |
608 | rc = fcoe_em_config(lp); | 607 | rc = fcoe_em_config(lp); |
609 | if (rc) { | 608 | if (rc) { |
610 | FC_DBG("Could not configure em for lport\n"); | 609 | FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the " |
610 | "interface\n"); | ||
611 | goto out_netdev_cleanup; | 611 | goto out_netdev_cleanup; |
612 | } | 612 | } |
613 | 613 | ||
614 | /* Initialize the library */ | 614 | /* Initialize the library */ |
615 | rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ); | 615 | rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ); |
616 | if (rc) { | 616 | if (rc) { |
617 | FC_DBG("Could not configure libfc for lport!\n"); | 617 | FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " |
618 | "interface\n"); | ||
618 | goto out_lp_destroy; | 619 | goto out_lp_destroy; |
619 | } | 620 | } |
620 | 621 | ||
@@ -653,7 +654,7 @@ static int __init fcoe_if_init(void) | |||
653 | fc_attach_transport(&fcoe_transport_function); | 654 | fc_attach_transport(&fcoe_transport_function); |
654 | 655 | ||
655 | if (!scsi_transport_fcoe_sw) { | 656 | if (!scsi_transport_fcoe_sw) { |
656 | printk(KERN_ERR "fcoe_init:fc_attach_transport() failed\n"); | 657 | printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); |
657 | return -ENODEV; | 658 | return -ENODEV; |
658 | } | 659 | } |
659 | 660 | ||
@@ -714,7 +715,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu) | |||
714 | unsigned targ_cpu = smp_processor_id(); | 715 | unsigned targ_cpu = smp_processor_id(); |
715 | #endif /* CONFIG_SMP */ | 716 | #endif /* CONFIG_SMP */ |
716 | 717 | ||
717 | printk(KERN_DEBUG "fcoe: Destroying receive thread for CPU %d\n", cpu); | 718 | FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); |
718 | 719 | ||
719 | /* Prevent any new skbs from being queued for this CPU. */ | 720 | /* Prevent any new skbs from being queued for this CPU. */ |
720 | p = &per_cpu(fcoe_percpu, cpu); | 721 | p = &per_cpu(fcoe_percpu, cpu); |
@@ -736,8 +737,8 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu) | |||
736 | p0 = &per_cpu(fcoe_percpu, targ_cpu); | 737 | p0 = &per_cpu(fcoe_percpu, targ_cpu); |
737 | spin_lock_bh(&p0->fcoe_rx_list.lock); | 738 | spin_lock_bh(&p0->fcoe_rx_list.lock); |
738 | if (p0->thread) { | 739 | if (p0->thread) { |
739 | FC_DBG("Moving frames from CPU %d to CPU %d\n", | 740 | FCOE_DBG("Moving frames from CPU %d to CPU %d\n", |
740 | cpu, targ_cpu); | 741 | cpu, targ_cpu); |
741 | 742 | ||
742 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) | 743 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) |
743 | __skb_queue_tail(&p0->fcoe_rx_list, skb); | 744 | __skb_queue_tail(&p0->fcoe_rx_list, skb); |
@@ -803,12 +804,12 @@ static int fcoe_cpu_callback(struct notifier_block *nfb, | |||
803 | switch (action) { | 804 | switch (action) { |
804 | case CPU_ONLINE: | 805 | case CPU_ONLINE: |
805 | case CPU_ONLINE_FROZEN: | 806 | case CPU_ONLINE_FROZEN: |
806 | FC_DBG("CPU %x online: Create Rx thread\n", cpu); | 807 | FCOE_DBG("CPU %x online: Create Rx thread\n", cpu); |
807 | fcoe_percpu_thread_create(cpu); | 808 | fcoe_percpu_thread_create(cpu); |
808 | break; | 809 | break; |
809 | case CPU_DEAD: | 810 | case CPU_DEAD: |
810 | case CPU_DEAD_FROZEN: | 811 | case CPU_DEAD_FROZEN: |
811 | FC_DBG("CPU %x offline: Remove Rx thread\n", cpu); | 812 | FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu); |
812 | fcoe_percpu_thread_destroy(cpu); | 813 | fcoe_percpu_thread_destroy(cpu); |
813 | break; | 814 | break; |
814 | default: | 815 | default: |
@@ -846,24 +847,21 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
846 | fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); | 847 | fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); |
847 | lp = fc->ctlr.lp; | 848 | lp = fc->ctlr.lp; |
848 | if (unlikely(lp == NULL)) { | 849 | if (unlikely(lp == NULL)) { |
849 | FC_DBG("cannot find hba structure"); | 850 | FCOE_NETDEV_DBG(dev, "Cannot find hba structure"); |
850 | goto err2; | 851 | goto err2; |
851 | } | 852 | } |
852 | if (!lp->link_up) | 853 | if (!lp->link_up) |
853 | goto err2; | 854 | goto err2; |
854 | 855 | ||
855 | if (unlikely(debug_fcoe)) { | 856 | FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p " |
856 | FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p " | 857 | "data:%p tail:%p end:%p sum:%d dev:%s", |
857 | "end:%p sum:%d dev:%s", skb->len, skb->data_len, | 858 | skb->len, skb->data_len, skb->head, skb->data, |
858 | skb->head, skb->data, skb_tail_pointer(skb), | 859 | skb_tail_pointer(skb), skb_end_pointer(skb), |
859 | skb_end_pointer(skb), skb->csum, | 860 | skb->csum, skb->dev ? skb->dev->name : "<NULL>"); |
860 | skb->dev ? skb->dev->name : "<NULL>"); | ||
861 | |||
862 | } | ||
863 | 861 | ||
864 | /* check for FCOE packet type */ | 862 | /* check for FCOE packet type */ |
865 | if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { | 863 | if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { |
866 | FC_DBG("wrong FC type frame"); | 864 | FCOE_NETDEV_DBG(dev, "Wrong FC type frame"); |
867 | goto err; | 865 | goto err; |
868 | } | 866 | } |
869 | 867 | ||
@@ -901,8 +899,9 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
901 | * the first CPU now. For non-SMP systems this | 899 | * the first CPU now. For non-SMP systems this |
902 | * will check the same CPU twice. | 900 | * will check the same CPU twice. |
903 | */ | 901 | */ |
904 | FC_DBG("CPU is online, but no receive thread ready " | 902 | FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread " |
905 | "for incoming skb- using first online CPU.\n"); | 903 | "ready for incoming skb- using first online " |
904 | "CPU.\n"); | ||
906 | 905 | ||
907 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | 906 | spin_unlock_bh(&fps->fcoe_rx_list.lock); |
908 | cpu = first_cpu(cpu_online_map); | 907 | cpu = first_cpu(cpu_online_map); |
@@ -1201,19 +1200,17 @@ int fcoe_percpu_receive_thread(void *arg) | |||
1201 | fr = fcoe_dev_from_skb(skb); | 1200 | fr = fcoe_dev_from_skb(skb); |
1202 | lp = fr->fr_dev; | 1201 | lp = fr->fr_dev; |
1203 | if (unlikely(lp == NULL)) { | 1202 | if (unlikely(lp == NULL)) { |
1204 | FC_DBG("invalid HBA Structure"); | 1203 | FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure"); |
1205 | kfree_skb(skb); | 1204 | kfree_skb(skb); |
1206 | continue; | 1205 | continue; |
1207 | } | 1206 | } |
1208 | 1207 | ||
1209 | if (unlikely(debug_fcoe)) { | 1208 | FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " |
1210 | FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p " | 1209 | "head:%p data:%p tail:%p end:%p sum:%d dev:%s", |
1211 | "tail:%p end:%p sum:%d dev:%s", | 1210 | skb->len, skb->data_len, |
1212 | skb->len, skb->data_len, | 1211 | skb->head, skb->data, skb_tail_pointer(skb), |
1213 | skb->head, skb->data, skb_tail_pointer(skb), | 1212 | skb_end_pointer(skb), skb->csum, |
1214 | skb_end_pointer(skb), skb->csum, | 1213 | skb->dev ? skb->dev->name : "<NULL>"); |
1215 | skb->dev ? skb->dev->name : "<NULL>"); | ||
1216 | } | ||
1217 | 1214 | ||
1218 | /* | 1215 | /* |
1219 | * Save source MAC address before discarding header. | 1216 | * Save source MAC address before discarding header. |
@@ -1233,7 +1230,7 @@ int fcoe_percpu_receive_thread(void *arg) | |||
1233 | stats = fc_lport_get_stats(lp); | 1230 | stats = fc_lport_get_stats(lp); |
1234 | if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { | 1231 | if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { |
1235 | if (stats->ErrorFrames < 5) | 1232 | if (stats->ErrorFrames < 5) |
1236 | printk(KERN_WARNING "FCoE version " | 1233 | printk(KERN_WARNING "fcoe: FCoE version " |
1237 | "mismatch: The frame has " | 1234 | "mismatch: The frame has " |
1238 | "version %x, but the " | 1235 | "version %x, but the " |
1239 | "initiator supports version " | 1236 | "initiator supports version " |
@@ -1286,7 +1283,7 @@ int fcoe_percpu_receive_thread(void *arg) | |||
1286 | if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { | 1283 | if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { |
1287 | if (le32_to_cpu(fr_crc(fp)) != | 1284 | if (le32_to_cpu(fr_crc(fp)) != |
1288 | ~crc32(~0, skb->data, fr_len)) { | 1285 | ~crc32(~0, skb->data, fr_len)) { |
1289 | if (debug_fcoe || stats->InvalidCRCCount < 5) | 1286 | if (stats->InvalidCRCCount < 5) |
1290 | printk(KERN_WARNING "fcoe: dropping " | 1287 | printk(KERN_WARNING "fcoe: dropping " |
1291 | "frame with CRC error\n"); | 1288 | "frame with CRC error\n"); |
1292 | stats->InvalidCRCCount++; | 1289 | stats->InvalidCRCCount++; |
@@ -1432,7 +1429,8 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
1432 | case NETDEV_REGISTER: | 1429 | case NETDEV_REGISTER: |
1433 | break; | 1430 | break; |
1434 | default: | 1431 | default: |
1435 | FC_DBG("Unknown event %ld from netdev netlink\n", event); | 1432 | FCOE_NETDEV_DBG(real_dev, "Unknown event %ld " |
1433 | "from netdev netlink\n", event); | ||
1436 | } | 1434 | } |
1437 | if (link_possible && !fcoe_link_ok(lp)) | 1435 | if (link_possible && !fcoe_link_ok(lp)) |
1438 | fcoe_ctlr_link_up(&fc->ctlr); | 1436 | fcoe_ctlr_link_up(&fc->ctlr); |
@@ -1505,8 +1503,8 @@ static int fcoe_ethdrv_get(const struct net_device *netdev) | |||
1505 | 1503 | ||
1506 | owner = fcoe_netdev_to_module_owner(netdev); | 1504 | owner = fcoe_netdev_to_module_owner(netdev); |
1507 | if (owner) { | 1505 | if (owner) { |
1508 | printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n", | 1506 | FCOE_NETDEV_DBG(netdev, "Hold driver module %s\n", |
1509 | module_name(owner), netdev->name); | 1507 | module_name(owner)); |
1510 | return try_module_get(owner); | 1508 | return try_module_get(owner); |
1511 | } | 1509 | } |
1512 | return -ENODEV; | 1510 | return -ENODEV; |
@@ -1527,8 +1525,8 @@ static int fcoe_ethdrv_put(const struct net_device *netdev) | |||
1527 | 1525 | ||
1528 | owner = fcoe_netdev_to_module_owner(netdev); | 1526 | owner = fcoe_netdev_to_module_owner(netdev); |
1529 | if (owner) { | 1527 | if (owner) { |
1530 | printk(KERN_DEBUG "fcoe:release driver module %s for %s\n", | 1528 | FCOE_NETDEV_DBG(netdev, "Release driver module %s\n", |
1531 | module_name(owner), netdev->name); | 1529 | module_name(owner)); |
1532 | module_put(owner); | 1530 | module_put(owner); |
1533 | return 0; | 1531 | return 0; |
1534 | } | 1532 | } |
@@ -1559,7 +1557,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) | |||
1559 | } | 1557 | } |
1560 | rc = fcoe_if_destroy(netdev); | 1558 | rc = fcoe_if_destroy(netdev); |
1561 | if (rc) { | 1559 | if (rc) { |
1562 | printk(KERN_ERR "fcoe: fcoe_if_destroy(%s) failed\n", | 1560 | printk(KERN_ERR "fcoe: Failed to destroy interface (%s)\n", |
1563 | netdev->name); | 1561 | netdev->name); |
1564 | rc = -EIO; | 1562 | rc = -EIO; |
1565 | goto out_putdev; | 1563 | goto out_putdev; |
@@ -1598,7 +1596,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) | |||
1598 | 1596 | ||
1599 | rc = fcoe_if_create(netdev); | 1597 | rc = fcoe_if_create(netdev); |
1600 | if (rc) { | 1598 | if (rc) { |
1601 | printk(KERN_ERR "fcoe: fcoe_if_create(%s) failed\n", | 1599 | printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", |
1602 | netdev->name); | 1600 | netdev->name); |
1603 | fcoe_ethdrv_put(netdev); | 1601 | fcoe_ethdrv_put(netdev); |
1604 | rc = -EIO; | 1602 | rc = -EIO; |
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index a1eb8c1988b0..0d724fa0898f 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h | |||
@@ -40,6 +40,30 @@ | |||
40 | #define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */ | 40 | #define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */ |
41 | #define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */ | 41 | #define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */ |
42 | 42 | ||
43 | unsigned int fcoe_debug_logging; | ||
44 | module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); | ||
45 | MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); | ||
46 | |||
47 | #define FCOE_LOGGING 0x01 /* General logging, not categorized */ | ||
48 | #define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */ | ||
49 | |||
50 | #define FCOE_CHECK_LOGGING(LEVEL, CMD) \ | ||
51 | do { \ | ||
52 | if (unlikely(fcoe_debug_logging & LEVEL)) \ | ||
53 | do { \ | ||
54 | CMD; \ | ||
55 | } while (0); \ | ||
56 | } while (0); | ||
57 | |||
58 | #define FCOE_DBG(fmt, args...) \ | ||
59 | FCOE_CHECK_LOGGING(FCOE_LOGGING, \ | ||
60 | printk(KERN_INFO "fcoe: " fmt, ##args);) | ||
61 | |||
62 | #define FCOE_NETDEV_DBG(netdev, fmt, args...) \ | ||
63 | FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \ | ||
64 | printk(KERN_INFO "fcoe: %s" fmt, \ | ||
65 | netdev->name, ##args);) | ||
66 | |||
43 | /* | 67 | /* |
44 | * this percpu struct for fcoe | 68 | * this percpu struct for fcoe |
45 | */ | 69 | */ |
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 2f5bc7fd3fa9..f544340d318b 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c | |||
@@ -56,15 +56,28 @@ static void fcoe_ctlr_recv_work(struct work_struct *); | |||
56 | 56 | ||
57 | static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; | 57 | static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; |
58 | 58 | ||
59 | static u32 fcoe_ctlr_debug; /* 1 for basic, 2 for noisy debug */ | 59 | unsigned int libfcoe_debug_logging; |
60 | module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); | ||
61 | MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); | ||
60 | 62 | ||
61 | #define FIP_DBG_LVL(level, fmt, args...) \ | 63 | #define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ |
64 | #define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ | ||
65 | |||
66 | #define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ | ||
67 | do { \ | ||
68 | if (unlikely(libfcoe_debug_logging & LEVEL)) \ | ||
62 | do { \ | 69 | do { \ |
63 | if (fcoe_ctlr_debug >= (level)) \ | 70 | CMD; \ |
64 | FC_DBG(fmt, ##args); \ | 71 | } while (0); \ |
65 | } while (0) | 72 | } while (0); |
73 | |||
74 | #define LIBFCOE_DBG(fmt, args...) \ | ||
75 | LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ | ||
76 | printk(KERN_INFO "libfcoe: " fmt, ##args);) | ||
66 | 77 | ||
67 | #define FIP_DBG(fmt, args...) FIP_DBG_LVL(1, fmt, ##args) | 78 | #define LIBFCOE_FIP_DBG(fmt, args...) \ |
79 | LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ | ||
80 | printk(KERN_INFO "fip: " fmt, ##args);) | ||
68 | 81 | ||
69 | /* | 82 | /* |
70 | * Return non-zero if FCF fcoe_size has been validated. | 83 | * Return non-zero if FCF fcoe_size has been validated. |
@@ -243,7 +256,7 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) | |||
243 | fip->last_link = 1; | 256 | fip->last_link = 1; |
244 | fip->link = 1; | 257 | fip->link = 1; |
245 | spin_unlock_bh(&fip->lock); | 258 | spin_unlock_bh(&fip->lock); |
246 | FIP_DBG("%s", "setting AUTO mode.\n"); | 259 | LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n"); |
247 | fc_linkup(fip->lp); | 260 | fc_linkup(fip->lp); |
248 | fcoe_ctlr_solicit(fip, NULL); | 261 | fcoe_ctlr_solicit(fip, NULL); |
249 | } else | 262 | } else |
@@ -614,7 +627,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) | |||
614 | ((struct fip_mac_desc *)desc)->fd_mac, | 627 | ((struct fip_mac_desc *)desc)->fd_mac, |
615 | ETH_ALEN); | 628 | ETH_ALEN); |
616 | if (!is_valid_ether_addr(fcf->fcf_mac)) { | 629 | if (!is_valid_ether_addr(fcf->fcf_mac)) { |
617 | FIP_DBG("invalid MAC addr in FIP adv\n"); | 630 | LIBFCOE_FIP_DBG("Invalid MAC address " |
631 | "in FIP adv\n"); | ||
618 | return -EINVAL; | 632 | return -EINVAL; |
619 | } | 633 | } |
620 | break; | 634 | break; |
@@ -647,8 +661,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) | |||
647 | case FIP_DT_LOGO: | 661 | case FIP_DT_LOGO: |
648 | case FIP_DT_ELP: | 662 | case FIP_DT_ELP: |
649 | default: | 663 | default: |
650 | FIP_DBG("unexpected descriptor type %x in FIP adv\n", | 664 | LIBFCOE_FIP_DBG("unexpected descriptor type %x " |
651 | desc->fip_dtype); | 665 | "in FIP adv\n", desc->fip_dtype); |
652 | /* standard says ignore unknown descriptors >= 128 */ | 666 | /* standard says ignore unknown descriptors >= 128 */ |
653 | if (desc->fip_dtype < FIP_DT_VENDOR_BASE) | 667 | if (desc->fip_dtype < FIP_DT_VENDOR_BASE) |
654 | return -EINVAL; | 668 | return -EINVAL; |
@@ -664,8 +678,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf) | |||
664 | return 0; | 678 | return 0; |
665 | 679 | ||
666 | len_err: | 680 | len_err: |
667 | FIP_DBG("FIP length error in descriptor type %x len %zu\n", | 681 | LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n", |
668 | desc->fip_dtype, dlen); | 682 | desc->fip_dtype, dlen); |
669 | return -EINVAL; | 683 | return -EINVAL; |
670 | } | 684 | } |
671 | 685 | ||
@@ -728,9 +742,10 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
728 | } | 742 | } |
729 | mtu_valid = fcoe_ctlr_mtu_valid(fcf); | 743 | mtu_valid = fcoe_ctlr_mtu_valid(fcf); |
730 | fcf->time = jiffies; | 744 | fcf->time = jiffies; |
731 | FIP_DBG_LVL(found ? 2 : 1, "%s FCF for fab %llx map %x val %d\n", | 745 | if (!found) { |
732 | found ? "old" : "new", | 746 | LIBFCOE_FIP_DBG("New FCF for fab %llx map %x val %d\n", |
733 | fcf->fabric_name, fcf->fc_map, mtu_valid); | 747 | fcf->fabric_name, fcf->fc_map, mtu_valid); |
748 | } | ||
734 | 749 | ||
735 | /* | 750 | /* |
736 | * If this advertisement is not solicited and our max receive size | 751 | * If this advertisement is not solicited and our max receive size |
@@ -807,7 +822,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
807 | ((struct fip_mac_desc *)desc)->fd_mac, | 822 | ((struct fip_mac_desc *)desc)->fd_mac, |
808 | ETH_ALEN); | 823 | ETH_ALEN); |
809 | if (!is_valid_ether_addr(granted_mac)) { | 824 | if (!is_valid_ether_addr(granted_mac)) { |
810 | FIP_DBG("invalid MAC addrs in FIP ELS\n"); | 825 | LIBFCOE_FIP_DBG("Invalid MAC address " |
826 | "in FIP ELS\n"); | ||
811 | goto drop; | 827 | goto drop; |
812 | } | 828 | } |
813 | break; | 829 | break; |
@@ -825,8 +841,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
825 | els_dtype = desc->fip_dtype; | 841 | els_dtype = desc->fip_dtype; |
826 | break; | 842 | break; |
827 | default: | 843 | default: |
828 | FIP_DBG("unexpected descriptor type %x " | 844 | LIBFCOE_FIP_DBG("unexpected descriptor type %x " |
829 | "in FIP adv\n", desc->fip_dtype); | 845 | "in FIP adv\n", desc->fip_dtype); |
830 | /* standard says ignore unknown descriptors >= 128 */ | 846 | /* standard says ignore unknown descriptors >= 128 */ |
831 | if (desc->fip_dtype < FIP_DT_VENDOR_BASE) | 847 | if (desc->fip_dtype < FIP_DT_VENDOR_BASE) |
832 | goto drop; | 848 | goto drop; |
@@ -867,8 +883,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
867 | return; | 883 | return; |
868 | 884 | ||
869 | len_err: | 885 | len_err: |
870 | FIP_DBG("FIP length error in descriptor type %x len %zu\n", | 886 | LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n", |
871 | desc->fip_dtype, dlen); | 887 | desc->fip_dtype, dlen); |
872 | drop: | 888 | drop: |
873 | kfree_skb(skb); | 889 | kfree_skb(skb); |
874 | } | 890 | } |
@@ -894,7 +910,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, | |||
894 | struct fc_lport *lp = fip->lp; | 910 | struct fc_lport *lp = fip->lp; |
895 | u32 desc_mask; | 911 | u32 desc_mask; |
896 | 912 | ||
897 | FIP_DBG("Clear Virtual Link received\n"); | 913 | LIBFCOE_FIP_DBG("Clear Virtual Link received\n"); |
898 | if (!fcf) | 914 | if (!fcf) |
899 | return; | 915 | return; |
900 | if (!fcf || !fc_host_port_id(lp->host)) | 916 | if (!fcf || !fc_host_port_id(lp->host)) |
@@ -952,9 +968,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, | |||
952 | * reset only if all required descriptors were present and valid. | 968 | * reset only if all required descriptors were present and valid. |
953 | */ | 969 | */ |
954 | if (desc_mask) { | 970 | if (desc_mask) { |
955 | FIP_DBG("missing descriptors mask %x\n", desc_mask); | 971 | LIBFCOE_FIP_DBG("missing descriptors mask %x\n", desc_mask); |
956 | } else { | 972 | } else { |
957 | FIP_DBG("performing Clear Virtual Link\n"); | 973 | LIBFCOE_FIP_DBG("performing Clear Virtual Link\n"); |
958 | fcoe_ctlr_reset(fip, FIP_ST_ENABLED); | 974 | fcoe_ctlr_reset(fip, FIP_ST_ENABLED); |
959 | } | 975 | } |
960 | } | 976 | } |
@@ -1002,10 +1018,6 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
1002 | op = ntohs(fiph->fip_op); | 1018 | op = ntohs(fiph->fip_op); |
1003 | sub = fiph->fip_subcode; | 1019 | sub = fiph->fip_subcode; |
1004 | 1020 | ||
1005 | FIP_DBG_LVL(2, "ver %x op %x/%x dl %x fl %x\n", | ||
1006 | FIP_VER_DECAPS(fiph->fip_ver), op, sub, | ||
1007 | ntohs(fiph->fip_dl_len), ntohs(fiph->fip_flags)); | ||
1008 | |||
1009 | if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) | 1021 | if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) |
1010 | goto drop; | 1022 | goto drop; |
1011 | if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) | 1023 | if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) |
@@ -1017,7 +1029,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
1017 | fip->map_dest = 0; | 1029 | fip->map_dest = 0; |
1018 | fip->state = FIP_ST_ENABLED; | 1030 | fip->state = FIP_ST_ENABLED; |
1019 | state = FIP_ST_ENABLED; | 1031 | state = FIP_ST_ENABLED; |
1020 | FIP_DBG("using FIP mode\n"); | 1032 | LIBFCOE_FIP_DBG("Using FIP mode\n"); |
1021 | } | 1033 | } |
1022 | spin_unlock_bh(&fip->lock); | 1034 | spin_unlock_bh(&fip->lock); |
1023 | if (state != FIP_ST_ENABLED) | 1035 | if (state != FIP_ST_ENABLED) |
@@ -1052,14 +1064,15 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip) | |||
1052 | struct fcoe_fcf *best = NULL; | 1064 | struct fcoe_fcf *best = NULL; |
1053 | 1065 | ||
1054 | list_for_each_entry(fcf, &fip->fcfs, list) { | 1066 | list_for_each_entry(fcf, &fip->fcfs, list) { |
1055 | FIP_DBG("consider FCF for fab %llx VFID %d map %x val %d\n", | 1067 | LIBFCOE_FIP_DBG("consider FCF for fab %llx VFID %d map %x " |
1056 | fcf->fabric_name, fcf->vfid, | 1068 | "val %d\n", fcf->fabric_name, fcf->vfid, |
1057 | fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); | 1069 | fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); |
1058 | if (!fcoe_ctlr_fcf_usable(fcf)) { | 1070 | if (!fcoe_ctlr_fcf_usable(fcf)) { |
1059 | FIP_DBG("FCF for fab %llx map %x %svalid %savailable\n", | 1071 | LIBFCOE_FIP_DBG("FCF for fab %llx map %x %svalid " |
1060 | fcf->fabric_name, fcf->fc_map, | 1072 | "%savailable\n", fcf->fabric_name, |
1061 | (fcf->flags & FIP_FL_SOL) ? "" : "in", | 1073 | fcf->fc_map, (fcf->flags & FIP_FL_SOL) |
1062 | (fcf->flags & FIP_FL_AVAIL) ? "" : "un"); | 1074 | ? "" : "in", (fcf->flags & FIP_FL_AVAIL) |
1075 | ? "" : "un"); | ||
1063 | continue; | 1076 | continue; |
1064 | } | 1077 | } |
1065 | if (!best) { | 1078 | if (!best) { |
@@ -1069,7 +1082,8 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip) | |||
1069 | if (fcf->fabric_name != best->fabric_name || | 1082 | if (fcf->fabric_name != best->fabric_name || |
1070 | fcf->vfid != best->vfid || | 1083 | fcf->vfid != best->vfid || |
1071 | fcf->fc_map != best->fc_map) { | 1084 | fcf->fc_map != best->fc_map) { |
1072 | FIP_DBG("conflicting fabric, VFID, or FC-MAP\n"); | 1085 | LIBFCOE_FIP_DBG("Conflicting fabric, VFID, " |
1086 | "or FC-MAP\n"); | ||
1073 | return; | 1087 | return; |
1074 | } | 1088 | } |
1075 | if (fcf->pri < best->pri) | 1089 | if (fcf->pri < best->pri) |
@@ -1113,7 +1127,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) | |||
1113 | if (sel != fcf) { | 1127 | if (sel != fcf) { |
1114 | fcf = sel; /* the old FCF may have been freed */ | 1128 | fcf = sel; /* the old FCF may have been freed */ |
1115 | if (sel) { | 1129 | if (sel) { |
1116 | printk(KERN_INFO "host%d: FIP selected " | 1130 | printk(KERN_INFO "libfcoe: host%d: FIP selected " |
1117 | "Fibre-Channel Forwarder MAC %s\n", | 1131 | "Fibre-Channel Forwarder MAC %s\n", |
1118 | fip->lp->host->host_no, | 1132 | fip->lp->host->host_no, |
1119 | print_mac(buf, sel->fcf_mac)); | 1133 | print_mac(buf, sel->fcf_mac)); |
@@ -1123,7 +1137,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) | |||
1123 | fip->ctlr_ka_time = jiffies + sel->fka_period; | 1137 | fip->ctlr_ka_time = jiffies + sel->fka_period; |
1124 | fip->link = 1; | 1138 | fip->link = 1; |
1125 | } else { | 1139 | } else { |
1126 | printk(KERN_NOTICE "host%d: " | 1140 | printk(KERN_NOTICE "libfcoe: host%d: " |
1127 | "FIP Fibre-Channel Forwarder timed out. " | 1141 | "FIP Fibre-Channel Forwarder timed out. " |
1128 | "Starting FCF discovery.\n", | 1142 | "Starting FCF discovery.\n", |
1129 | fip->lp->host->host_no); | 1143 | fip->lp->host->host_no); |
@@ -1247,7 +1261,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa) | |||
1247 | return -EINVAL; | 1261 | return -EINVAL; |
1248 | } | 1262 | } |
1249 | fip->state = FIP_ST_NON_FIP; | 1263 | fip->state = FIP_ST_NON_FIP; |
1250 | FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n"); | 1264 | LIBFCOE_FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n"); |
1251 | 1265 | ||
1252 | /* | 1266 | /* |
1253 | * FLOGI accepted. | 1267 | * FLOGI accepted. |
@@ -1276,7 +1290,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa) | |||
1276 | memcpy(fip->dest_addr, sa, ETH_ALEN); | 1290 | memcpy(fip->dest_addr, sa, ETH_ALEN); |
1277 | fip->map_dest = 0; | 1291 | fip->map_dest = 0; |
1278 | if (fip->state == FIP_ST_NON_FIP) | 1292 | if (fip->state == FIP_ST_NON_FIP) |
1279 | FIP_DBG("received FLOGI REQ, " | 1293 | LIBFCOE_FIP_DBG("received FLOGI REQ, " |
1280 | "using non-FIP mode\n"); | 1294 | "using non-FIP mode\n"); |
1281 | fip->state = FIP_ST_NON_FIP; | 1295 | fip->state = FIP_ST_NON_FIP; |
1282 | } | 1296 | } |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 89d41a424b33..5fd2da494d08 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include "scsi_logging.h" | 40 | #include "scsi_logging.h" |
41 | 41 | ||
42 | 42 | ||
43 | static int scsi_host_next_hn; /* host_no for next new host */ | 43 | static atomic_t scsi_host_next_hn; /* host_no for next new host */ |
44 | 44 | ||
45 | 45 | ||
46 | static void scsi_host_cls_release(struct device *dev) | 46 | static void scsi_host_cls_release(struct device *dev) |
@@ -333,7 +333,11 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) | |||
333 | 333 | ||
334 | mutex_init(&shost->scan_mutex); | 334 | mutex_init(&shost->scan_mutex); |
335 | 335 | ||
336 | shost->host_no = scsi_host_next_hn++; /* XXX(hch): still racy */ | 336 | /* |
337 | * subtract one because we increment first then return, but we need to | ||
338 | * know what the next host number was before increment | ||
339 | */ | ||
340 | shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1; | ||
337 | shost->dma_channel = 0xff; | 341 | shost->dma_channel = 0xff; |
338 | 342 | ||
339 | /* These three are default values which can be overridden */ | 343 | /* These three are default values which can be overridden */ |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index b4b805e8d7db..166d96450a0e 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -2254,10 +2254,13 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, | |||
2254 | continue; | 2254 | continue; |
2255 | if (crq->node_name && tgt->ids.node_name != crq->node_name) | 2255 | if (crq->node_name && tgt->ids.node_name != crq->node_name) |
2256 | continue; | 2256 | continue; |
2257 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 2257 | if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO) |
2258 | tgt->logo_rcvd = 1; | ||
2259 | if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) { | ||
2260 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | ||
2261 | ibmvfc_reinit_host(vhost); | ||
2262 | } | ||
2258 | } | 2263 | } |
2259 | |||
2260 | ibmvfc_reinit_host(vhost); | ||
2261 | break; | 2264 | break; |
2262 | case IBMVFC_AE_LINK_DOWN: | 2265 | case IBMVFC_AE_LINK_DOWN: |
2263 | case IBMVFC_AE_ADAPTER_FAILED: | 2266 | case IBMVFC_AE_ADAPTER_FAILED: |
@@ -2783,27 +2786,27 @@ static void ibmvfc_tasklet(void *data) | |||
2783 | 2786 | ||
2784 | spin_lock_irqsave(vhost->host->host_lock, flags); | 2787 | spin_lock_irqsave(vhost->host->host_lock, flags); |
2785 | while (!done) { | 2788 | while (!done) { |
2786 | /* Pull all the valid messages off the CRQ */ | ||
2787 | while ((crq = ibmvfc_next_crq(vhost)) != NULL) { | ||
2788 | ibmvfc_handle_crq(crq, vhost); | ||
2789 | crq->valid = 0; | ||
2790 | } | ||
2791 | |||
2792 | /* Pull all the valid messages off the async CRQ */ | 2789 | /* Pull all the valid messages off the async CRQ */ |
2793 | while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { | 2790 | while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { |
2794 | ibmvfc_handle_async(async, vhost); | 2791 | ibmvfc_handle_async(async, vhost); |
2795 | async->valid = 0; | 2792 | async->valid = 0; |
2796 | } | 2793 | } |
2797 | 2794 | ||
2798 | vio_enable_interrupts(vdev); | 2795 | /* Pull all the valid messages off the CRQ */ |
2799 | if ((crq = ibmvfc_next_crq(vhost)) != NULL) { | 2796 | while ((crq = ibmvfc_next_crq(vhost)) != NULL) { |
2800 | vio_disable_interrupts(vdev); | ||
2801 | ibmvfc_handle_crq(crq, vhost); | 2797 | ibmvfc_handle_crq(crq, vhost); |
2802 | crq->valid = 0; | 2798 | crq->valid = 0; |
2803 | } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { | 2799 | } |
2800 | |||
2801 | vio_enable_interrupts(vdev); | ||
2802 | if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { | ||
2804 | vio_disable_interrupts(vdev); | 2803 | vio_disable_interrupts(vdev); |
2805 | ibmvfc_handle_async(async, vhost); | 2804 | ibmvfc_handle_async(async, vhost); |
2806 | async->valid = 0; | 2805 | async->valid = 0; |
2806 | } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) { | ||
2807 | vio_disable_interrupts(vdev); | ||
2808 | ibmvfc_handle_crq(crq, vhost); | ||
2809 | crq->valid = 0; | ||
2807 | } else | 2810 | } else |
2808 | done = 1; | 2811 | done = 1; |
2809 | } | 2812 | } |
@@ -2927,7 +2930,11 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) | |||
2927 | break; | 2930 | break; |
2928 | case IBMVFC_MAD_FAILED: | 2931 | case IBMVFC_MAD_FAILED: |
2929 | default: | 2932 | default: |
2930 | if (ibmvfc_retry_cmd(rsp->status, rsp->error)) | 2933 | if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED) |
2934 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); | ||
2935 | else if (tgt->logo_rcvd) | ||
2936 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); | ||
2937 | else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) | ||
2931 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); | 2938 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); |
2932 | else | 2939 | else |
2933 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 2940 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
@@ -3054,6 +3061,7 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt) | |||
3054 | return; | 3061 | return; |
3055 | 3062 | ||
3056 | kref_get(&tgt->kref); | 3063 | kref_get(&tgt->kref); |
3064 | tgt->logo_rcvd = 0; | ||
3057 | evt = ibmvfc_get_event(vhost); | 3065 | evt = ibmvfc_get_event(vhost); |
3058 | vhost->discovery_threads++; | 3066 | vhost->discovery_threads++; |
3059 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); | 3067 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index c2668d7d67f5..007fa1c9ef14 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
@@ -605,6 +605,7 @@ struct ibmvfc_target { | |||
605 | int need_login; | 605 | int need_login; |
606 | int add_rport; | 606 | int add_rport; |
607 | int init_retries; | 607 | int init_retries; |
608 | int logo_rcvd; | ||
608 | u32 cancel_key; | 609 | u32 cancel_key; |
609 | struct ibmvfc_service_parms service_parms; | 610 | struct ibmvfc_service_parms service_parms; |
610 | struct ibmvfc_service_parms service_parms_change; | 611 | struct ibmvfc_service_parms service_parms_change; |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 0f8bc772b112..5f045505a1f4 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -131,13 +131,13 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { | |||
131 | }; | 131 | }; |
132 | 132 | ||
133 | static const struct ipr_chip_t ipr_chip[] = { | 133 | static const struct ipr_chip_t ipr_chip[] = { |
134 | { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] }, | 134 | { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] }, |
135 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] }, | 135 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] }, |
136 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] }, | 136 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, |
137 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] }, | 137 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, |
138 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] }, | 138 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] }, |
139 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] }, | 139 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] }, |
140 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] } | 140 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] } |
141 | }; | 141 | }; |
142 | 142 | ||
143 | static int ipr_max_bus_speeds [] = { | 143 | static int ipr_max_bus_speeds [] = { |
@@ -7367,6 +7367,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
7367 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); | 7367 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); |
7368 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); | 7368 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); |
7369 | init_waitqueue_head(&ioa_cfg->reset_wait_q); | 7369 | init_waitqueue_head(&ioa_cfg->reset_wait_q); |
7370 | init_waitqueue_head(&ioa_cfg->msi_wait_q); | ||
7370 | ioa_cfg->sdt_state = INACTIVE; | 7371 | ioa_cfg->sdt_state = INACTIVE; |
7371 | if (ipr_enable_cache) | 7372 | if (ipr_enable_cache) |
7372 | ioa_cfg->cache_state = CACHE_ENABLED; | 7373 | ioa_cfg->cache_state = CACHE_ENABLED; |
@@ -7398,25 +7399,108 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
7398 | } | 7399 | } |
7399 | 7400 | ||
7400 | /** | 7401 | /** |
7401 | * ipr_get_chip_cfg - Find adapter chip configuration | 7402 | * ipr_get_chip_info - Find adapter chip information |
7402 | * @dev_id: PCI device id struct | 7403 | * @dev_id: PCI device id struct |
7403 | * | 7404 | * |
7404 | * Return value: | 7405 | * Return value: |
7405 | * ptr to chip config on success / NULL on failure | 7406 | * ptr to chip information on success / NULL on failure |
7406 | **/ | 7407 | **/ |
7407 | static const struct ipr_chip_cfg_t * __devinit | 7408 | static const struct ipr_chip_t * __devinit |
7408 | ipr_get_chip_cfg(const struct pci_device_id *dev_id) | 7409 | ipr_get_chip_info(const struct pci_device_id *dev_id) |
7409 | { | 7410 | { |
7410 | int i; | 7411 | int i; |
7411 | 7412 | ||
7412 | for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) | 7413 | for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) |
7413 | if (ipr_chip[i].vendor == dev_id->vendor && | 7414 | if (ipr_chip[i].vendor == dev_id->vendor && |
7414 | ipr_chip[i].device == dev_id->device) | 7415 | ipr_chip[i].device == dev_id->device) |
7415 | return ipr_chip[i].cfg; | 7416 | return &ipr_chip[i]; |
7416 | return NULL; | 7417 | return NULL; |
7417 | } | 7418 | } |
7418 | 7419 | ||
7419 | /** | 7420 | /** |
7421 | * ipr_test_intr - Handle the interrupt generated in ipr_test_msi(). | ||
7422 | * @pdev: PCI device struct | ||
7423 | * | ||
7424 | * Description: Simply set the msi_received flag to 1 indicating that | ||
7425 | * Message Signaled Interrupts are supported. | ||
7426 | * | ||
7427 | * Return value: | ||
7428 | * 0 on success / non-zero on failure | ||
7429 | **/ | ||
7430 | static irqreturn_t __devinit ipr_test_intr(int irq, void *devp) | ||
7431 | { | ||
7432 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; | ||
7433 | unsigned long lock_flags = 0; | ||
7434 | irqreturn_t rc = IRQ_HANDLED; | ||
7435 | |||
7436 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
7437 | |||
7438 | ioa_cfg->msi_received = 1; | ||
7439 | wake_up(&ioa_cfg->msi_wait_q); | ||
7440 | |||
7441 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
7442 | return rc; | ||
7443 | } | ||
7444 | |||
7445 | /** | ||
7446 | * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. | ||
7447 | * @pdev: PCI device struct | ||
7448 | * | ||
7449 | * Description: The return value from pci_enable_msi() can not always be | ||
7450 | * trusted. This routine sets up and initiates a test interrupt to determine | ||
7451 | * if the interrupt is received via the ipr_test_intr() service routine. | ||
7452 | * If the tests fails, the driver will fall back to LSI. | ||
7453 | * | ||
7454 | * Return value: | ||
7455 | * 0 on success / non-zero on failure | ||
7456 | **/ | ||
7457 | static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, | ||
7458 | struct pci_dev *pdev) | ||
7459 | { | ||
7460 | int rc; | ||
7461 | volatile u32 int_reg; | ||
7462 | unsigned long lock_flags = 0; | ||
7463 | |||
7464 | ENTER; | ||
7465 | |||
7466 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
7467 | init_waitqueue_head(&ioa_cfg->msi_wait_q); | ||
7468 | ioa_cfg->msi_received = 0; | ||
7469 | ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); | ||
7470 | writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg); | ||
7471 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | ||
7472 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
7473 | |||
7474 | rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); | ||
7475 | if (rc) { | ||
7476 | dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); | ||
7477 | return rc; | ||
7478 | } else if (ipr_debug) | ||
7479 | dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); | ||
7480 | |||
7481 | writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg); | ||
7482 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); | ||
7483 | wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); | ||
7484 | ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); | ||
7485 | |||
7486 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
7487 | if (!ioa_cfg->msi_received) { | ||
7488 | /* MSI test failed */ | ||
7489 | dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); | ||
7490 | rc = -EOPNOTSUPP; | ||
7491 | } else if (ipr_debug) | ||
7492 | dev_info(&pdev->dev, "MSI test succeeded.\n"); | ||
7493 | |||
7494 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
7495 | |||
7496 | free_irq(pdev->irq, ioa_cfg); | ||
7497 | |||
7498 | LEAVE; | ||
7499 | |||
7500 | return rc; | ||
7501 | } | ||
7502 | |||
7503 | /** | ||
7420 | * ipr_probe_ioa - Allocates memory and does first stage of initialization | 7504 | * ipr_probe_ioa - Allocates memory and does first stage of initialization |
7421 | * @pdev: PCI device struct | 7505 | * @pdev: PCI device struct |
7422 | * @dev_id: PCI device id struct | 7506 | * @dev_id: PCI device id struct |
@@ -7441,11 +7525,6 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7441 | goto out; | 7525 | goto out; |
7442 | } | 7526 | } |
7443 | 7527 | ||
7444 | if (!(rc = pci_enable_msi(pdev))) | ||
7445 | dev_info(&pdev->dev, "MSI enabled\n"); | ||
7446 | else if (ipr_debug) | ||
7447 | dev_info(&pdev->dev, "Cannot enable MSI\n"); | ||
7448 | |||
7449 | dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); | 7528 | dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); |
7450 | 7529 | ||
7451 | host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); | 7530 | host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); |
@@ -7461,14 +7540,16 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7461 | ata_host_init(&ioa_cfg->ata_host, &pdev->dev, | 7540 | ata_host_init(&ioa_cfg->ata_host, &pdev->dev, |
7462 | sata_port_info.flags, &ipr_sata_ops); | 7541 | sata_port_info.flags, &ipr_sata_ops); |
7463 | 7542 | ||
7464 | ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id); | 7543 | ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); |
7465 | 7544 | ||
7466 | if (!ioa_cfg->chip_cfg) { | 7545 | if (!ioa_cfg->ipr_chip) { |
7467 | dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", | 7546 | dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", |
7468 | dev_id->vendor, dev_id->device); | 7547 | dev_id->vendor, dev_id->device); |
7469 | goto out_scsi_host_put; | 7548 | goto out_scsi_host_put; |
7470 | } | 7549 | } |
7471 | 7550 | ||
7551 | ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; | ||
7552 | |||
7472 | if (ipr_transop_timeout) | 7553 | if (ipr_transop_timeout) |
7473 | ioa_cfg->transop_timeout = ipr_transop_timeout; | 7554 | ioa_cfg->transop_timeout = ipr_transop_timeout; |
7474 | else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) | 7555 | else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) |
@@ -7519,6 +7600,18 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7519 | goto cleanup_nomem; | 7600 | goto cleanup_nomem; |
7520 | } | 7601 | } |
7521 | 7602 | ||
7603 | /* Enable MSI style interrupts if they are supported. */ | ||
7604 | if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) { | ||
7605 | rc = ipr_test_msi(ioa_cfg, pdev); | ||
7606 | if (rc == -EOPNOTSUPP) | ||
7607 | pci_disable_msi(pdev); | ||
7608 | else if (rc) | ||
7609 | goto out_msi_disable; | ||
7610 | else | ||
7611 | dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq); | ||
7612 | } else if (ipr_debug) | ||
7613 | dev_info(&pdev->dev, "Cannot enable MSI.\n"); | ||
7614 | |||
7522 | /* Save away PCI config space for use following IOA reset */ | 7615 | /* Save away PCI config space for use following IOA reset */ |
7523 | rc = pci_save_state(pdev); | 7616 | rc = pci_save_state(pdev); |
7524 | 7617 | ||
@@ -7556,7 +7649,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7556 | ioa_cfg->ioa_unit_checked = 1; | 7649 | ioa_cfg->ioa_unit_checked = 1; |
7557 | 7650 | ||
7558 | ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); | 7651 | ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); |
7559 | rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg); | 7652 | rc = request_irq(pdev->irq, ipr_isr, |
7653 | ioa_cfg->msi_received ? 0 : IRQF_SHARED, | ||
7654 | IPR_NAME, ioa_cfg); | ||
7560 | 7655 | ||
7561 | if (rc) { | 7656 | if (rc) { |
7562 | dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", | 7657 | dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", |
@@ -7583,12 +7678,13 @@ cleanup_nolog: | |||
7583 | ipr_free_mem(ioa_cfg); | 7678 | ipr_free_mem(ioa_cfg); |
7584 | cleanup_nomem: | 7679 | cleanup_nomem: |
7585 | iounmap(ipr_regs); | 7680 | iounmap(ipr_regs); |
7681 | out_msi_disable: | ||
7682 | pci_disable_msi(pdev); | ||
7586 | out_release_regions: | 7683 | out_release_regions: |
7587 | pci_release_regions(pdev); | 7684 | pci_release_regions(pdev); |
7588 | out_scsi_host_put: | 7685 | out_scsi_host_put: |
7589 | scsi_host_put(host); | 7686 | scsi_host_put(host); |
7590 | out_disable: | 7687 | out_disable: |
7591 | pci_disable_msi(pdev); | ||
7592 | pci_disable_device(pdev); | 7688 | pci_disable_device(pdev); |
7593 | goto out; | 7689 | goto out; |
7594 | } | 7690 | } |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 79a3ae4fb2c7..4b63dd6b1c81 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
@@ -37,8 +37,8 @@ | |||
37 | /* | 37 | /* |
38 | * Literals | 38 | * Literals |
39 | */ | 39 | */ |
40 | #define IPR_DRIVER_VERSION "2.4.2" | 40 | #define IPR_DRIVER_VERSION "2.4.3" |
41 | #define IPR_DRIVER_DATE "(January 21, 2009)" | 41 | #define IPR_DRIVER_DATE "(June 10, 2009)" |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding | 44 | * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding |
@@ -1025,6 +1025,9 @@ struct ipr_chip_cfg_t { | |||
1025 | struct ipr_chip_t { | 1025 | struct ipr_chip_t { |
1026 | u16 vendor; | 1026 | u16 vendor; |
1027 | u16 device; | 1027 | u16 device; |
1028 | u16 intr_type; | ||
1029 | #define IPR_USE_LSI 0x00 | ||
1030 | #define IPR_USE_MSI 0x01 | ||
1028 | const struct ipr_chip_cfg_t *cfg; | 1031 | const struct ipr_chip_cfg_t *cfg; |
1029 | }; | 1032 | }; |
1030 | 1033 | ||
@@ -1094,6 +1097,7 @@ struct ipr_ioa_cfg { | |||
1094 | u8 needs_hard_reset:1; | 1097 | u8 needs_hard_reset:1; |
1095 | u8 dual_raid:1; | 1098 | u8 dual_raid:1; |
1096 | u8 needs_warm_reset:1; | 1099 | u8 needs_warm_reset:1; |
1100 | u8 msi_received:1; | ||
1097 | 1101 | ||
1098 | u8 revid; | 1102 | u8 revid; |
1099 | 1103 | ||
@@ -1159,6 +1163,7 @@ struct ipr_ioa_cfg { | |||
1159 | 1163 | ||
1160 | unsigned int transop_timeout; | 1164 | unsigned int transop_timeout; |
1161 | const struct ipr_chip_cfg_t *chip_cfg; | 1165 | const struct ipr_chip_cfg_t *chip_cfg; |
1166 | const struct ipr_chip_t *ipr_chip; | ||
1162 | 1167 | ||
1163 | void __iomem *hdw_dma_regs; /* iomapped PCI memory space */ | 1168 | void __iomem *hdw_dma_regs; /* iomapped PCI memory space */ |
1164 | unsigned long hdw_dma_regs_pci; /* raw PCI memory space */ | 1169 | unsigned long hdw_dma_regs_pci; /* raw PCI memory space */ |
@@ -1179,6 +1184,7 @@ struct ipr_ioa_cfg { | |||
1179 | struct work_struct work_q; | 1184 | struct work_struct work_q; |
1180 | 1185 | ||
1181 | wait_queue_head_t reset_wait_q; | 1186 | wait_queue_head_t reset_wait_q; |
1187 | wait_queue_head_t msi_wait_q; | ||
1182 | 1188 | ||
1183 | struct ipr_dump *dump; | 1189 | struct ipr_dump *dump; |
1184 | enum ipr_sdt_state sdt_state; | 1190 | enum ipr_sdt_state sdt_state; |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index b7c092d63bbe..518dbd91df85 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -253,8 +253,6 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn, | |||
253 | 253 | ||
254 | if (r < 0) { | 254 | if (r < 0) { |
255 | iscsi_tcp_segment_unmap(segment); | 255 | iscsi_tcp_segment_unmap(segment); |
256 | if (copied || r == -EAGAIN) | ||
257 | break; | ||
258 | return r; | 256 | return r; |
259 | } | 257 | } |
260 | copied += r; | 258 | copied += r; |
@@ -275,11 +273,17 @@ static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn) | |||
275 | 273 | ||
276 | while (1) { | 274 | while (1) { |
277 | rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment); | 275 | rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment); |
278 | if (rc < 0) { | 276 | /* |
277 | * We may not have been able to send data because the conn | ||
278 | * is getting stopped. libiscsi will know so propogate err | ||
279 | * for it to do the right thing. | ||
280 | */ | ||
281 | if (rc == -EAGAIN) | ||
282 | return rc; | ||
283 | else if (rc < 0) { | ||
279 | rc = ISCSI_ERR_XMIT_FAILED; | 284 | rc = ISCSI_ERR_XMIT_FAILED; |
280 | goto error; | 285 | goto error; |
281 | } | 286 | } else if (rc == 0) |
282 | if (rc == 0) | ||
283 | break; | 287 | break; |
284 | 288 | ||
285 | consumed += rc; | 289 | consumed += rc; |
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 4c880656990b..6fabf66972b9 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c | |||
@@ -45,14 +45,6 @@ | |||
45 | 45 | ||
46 | #define FC_DISC_DELAY 3 | 46 | #define FC_DISC_DELAY 3 |
47 | 47 | ||
48 | static int fc_disc_debug; | ||
49 | |||
50 | #define FC_DEBUG_DISC(fmt...) \ | ||
51 | do { \ | ||
52 | if (fc_disc_debug) \ | ||
53 | FC_DBG(fmt); \ | ||
54 | } while (0) | ||
55 | |||
56 | static void fc_disc_gpn_ft_req(struct fc_disc *); | 48 | static void fc_disc_gpn_ft_req(struct fc_disc *); |
57 | static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); | 49 | static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); |
58 | static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, | 50 | static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, |
@@ -137,8 +129,8 @@ static void fc_disc_rport_callback(struct fc_lport *lport, | |||
137 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | 129 | struct fc_rport_libfc_priv *rdata = rport->dd_data; |
138 | struct fc_disc *disc = &lport->disc; | 130 | struct fc_disc *disc = &lport->disc; |
139 | 131 | ||
140 | FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event, | 132 | FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event, |
141 | rport->port_id); | 133 | rport->port_id); |
142 | 134 | ||
143 | switch (event) { | 135 | switch (event) { |
144 | case RPORT_EV_CREATED: | 136 | case RPORT_EV_CREATED: |
@@ -191,8 +183,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, | |||
191 | 183 | ||
192 | lport = disc->lport; | 184 | lport = disc->lport; |
193 | 185 | ||
194 | FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n", | 186 | FC_DISC_DBG(disc, "Received an RSCN event\n"); |
195 | fc_host_port_id(lport->host)); | ||
196 | 187 | ||
197 | /* make sure the frame contains an RSCN message */ | 188 | /* make sure the frame contains an RSCN message */ |
198 | rp = fc_frame_payload_get(fp, sizeof(*rp)); | 189 | rp = fc_frame_payload_get(fp, sizeof(*rp)); |
@@ -225,8 +216,8 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, | |||
225 | */ | 216 | */ |
226 | switch (fmt) { | 217 | switch (fmt) { |
227 | case ELS_ADDR_FMT_PORT: | 218 | case ELS_ADDR_FMT_PORT: |
228 | FC_DEBUG_DISC("Port address format for port (%6x)\n", | 219 | FC_DISC_DBG(disc, "Port address format for port " |
229 | ntoh24(pp->rscn_fid)); | 220 | "(%6x)\n", ntoh24(pp->rscn_fid)); |
230 | dp = kzalloc(sizeof(*dp), GFP_KERNEL); | 221 | dp = kzalloc(sizeof(*dp), GFP_KERNEL); |
231 | if (!dp) { | 222 | if (!dp) { |
232 | redisc = 1; | 223 | redisc = 1; |
@@ -243,19 +234,19 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, | |||
243 | case ELS_ADDR_FMT_DOM: | 234 | case ELS_ADDR_FMT_DOM: |
244 | case ELS_ADDR_FMT_FAB: | 235 | case ELS_ADDR_FMT_FAB: |
245 | default: | 236 | default: |
246 | FC_DEBUG_DISC("Address format is (%d)\n", fmt); | 237 | FC_DISC_DBG(disc, "Address format is (%d)\n", fmt); |
247 | redisc = 1; | 238 | redisc = 1; |
248 | break; | 239 | break; |
249 | } | 240 | } |
250 | } | 241 | } |
251 | lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); | 242 | lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); |
252 | if (redisc) { | 243 | if (redisc) { |
253 | FC_DEBUG_DISC("RSCN received: rediscovering\n"); | 244 | FC_DISC_DBG(disc, "RSCN received: rediscovering\n"); |
254 | fc_disc_restart(disc); | 245 | fc_disc_restart(disc); |
255 | } else { | 246 | } else { |
256 | FC_DEBUG_DISC("RSCN received: not rediscovering. " | 247 | FC_DISC_DBG(disc, "RSCN received: not rediscovering. " |
257 | "redisc %d state %d in_prog %d\n", | 248 | "redisc %d state %d in_prog %d\n", |
258 | redisc, lport->state, disc->pending); | 249 | redisc, lport->state, disc->pending); |
259 | list_for_each_entry_safe(dp, next, &disc_ports, peers) { | 250 | list_for_each_entry_safe(dp, next, &disc_ports, peers) { |
260 | list_del(&dp->peers); | 251 | list_del(&dp->peers); |
261 | rport = lport->tt.rport_lookup(lport, dp->ids.port_id); | 252 | rport = lport->tt.rport_lookup(lport, dp->ids.port_id); |
@@ -270,7 +261,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, | |||
270 | fc_frame_free(fp); | 261 | fc_frame_free(fp); |
271 | return; | 262 | return; |
272 | reject: | 263 | reject: |
273 | FC_DEBUG_DISC("Received a bad RSCN frame\n"); | 264 | FC_DISC_DBG(disc, "Received a bad RSCN frame\n"); |
274 | rjt_data.fp = NULL; | 265 | rjt_data.fp = NULL; |
275 | rjt_data.reason = ELS_RJT_LOGIC; | 266 | rjt_data.reason = ELS_RJT_LOGIC; |
276 | rjt_data.explan = ELS_EXPL_NONE; | 267 | rjt_data.explan = ELS_EXPL_NONE; |
@@ -302,7 +293,8 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, | |||
302 | mutex_unlock(&disc->disc_mutex); | 293 | mutex_unlock(&disc->disc_mutex); |
303 | break; | 294 | break; |
304 | default: | 295 | default: |
305 | FC_DBG("Received an unsupported request. opcode (%x)\n", op); | 296 | FC_DISC_DBG(disc, "Received an unsupported request, " |
297 | "the opcode is (%x)\n", op); | ||
306 | break; | 298 | break; |
307 | } | 299 | } |
308 | } | 300 | } |
@@ -320,12 +312,10 @@ static void fc_disc_restart(struct fc_disc *disc) | |||
320 | struct fc_rport_libfc_priv *rdata, *next; | 312 | struct fc_rport_libfc_priv *rdata, *next; |
321 | struct fc_lport *lport = disc->lport; | 313 | struct fc_lport *lport = disc->lport; |
322 | 314 | ||
323 | FC_DEBUG_DISC("Restarting discovery for port (%6x)\n", | 315 | FC_DISC_DBG(disc, "Restarting discovery\n"); |
324 | fc_host_port_id(lport->host)); | ||
325 | 316 | ||
326 | list_for_each_entry_safe(rdata, next, &disc->rports, peers) { | 317 | list_for_each_entry_safe(rdata, next, &disc->rports, peers) { |
327 | rport = PRIV_TO_RPORT(rdata); | 318 | rport = PRIV_TO_RPORT(rdata); |
328 | FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id); | ||
329 | list_del(&rdata->peers); | 319 | list_del(&rdata->peers); |
330 | lport->tt.rport_logoff(rport); | 320 | lport->tt.rport_logoff(rport); |
331 | } | 321 | } |
@@ -485,8 +475,7 @@ static void fc_disc_done(struct fc_disc *disc) | |||
485 | struct fc_lport *lport = disc->lport; | 475 | struct fc_lport *lport = disc->lport; |
486 | enum fc_disc_event event; | 476 | enum fc_disc_event event; |
487 | 477 | ||
488 | FC_DEBUG_DISC("Discovery complete for port (%6x)\n", | 478 | FC_DISC_DBG(disc, "Discovery complete\n"); |
489 | fc_host_port_id(lport->host)); | ||
490 | 479 | ||
491 | event = disc->event; | 480 | event = disc->event; |
492 | disc->event = DISC_EV_NONE; | 481 | disc->event = DISC_EV_NONE; |
@@ -510,10 +499,10 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) | |||
510 | { | 499 | { |
511 | struct fc_lport *lport = disc->lport; | 500 | struct fc_lport *lport = disc->lport; |
512 | unsigned long delay = 0; | 501 | unsigned long delay = 0; |
513 | if (fc_disc_debug) | 502 | |
514 | FC_DBG("Error %ld, retries %d/%d\n", | 503 | FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n", |
515 | PTR_ERR(fp), disc->retry_count, | 504 | PTR_ERR(fp), disc->retry_count, |
516 | FC_DISC_RETRY_LIMIT); | 505 | FC_DISC_RETRY_LIMIT); |
517 | 506 | ||
518 | if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { | 507 | if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { |
519 | /* | 508 | /* |
@@ -649,9 +638,9 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) | |||
649 | &disc->rogue_rports); | 638 | &disc->rogue_rports); |
650 | lport->tt.rport_login(rport); | 639 | lport->tt.rport_login(rport); |
651 | } else | 640 | } else |
652 | FC_DBG("Failed to allocate memory for " | 641 | printk(KERN_WARNING "libfc: Failed to allocate " |
653 | "the newly discovered port (%6x)\n", | 642 | "memory for the newly discovered port " |
654 | dp.ids.port_id); | 643 | "(%6x)\n", dp.ids.port_id); |
655 | } | 644 | } |
656 | 645 | ||
657 | if (np->fp_flags & FC_NS_FID_LAST) { | 646 | if (np->fp_flags & FC_NS_FID_LAST) { |
@@ -671,9 +660,8 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) | |||
671 | */ | 660 | */ |
672 | if (error == 0 && len > 0 && len < sizeof(*np)) { | 661 | if (error == 0 && len > 0 && len < sizeof(*np)) { |
673 | if (np != &disc->partial_buf) { | 662 | if (np != &disc->partial_buf) { |
674 | FC_DEBUG_DISC("Partial buffer remains " | 663 | FC_DISC_DBG(disc, "Partial buffer remains " |
675 | "for discovery by (%6x)\n", | 664 | "for discovery\n"); |
676 | fc_host_port_id(lport->host)); | ||
677 | memcpy(&disc->partial_buf, np, len); | 665 | memcpy(&disc->partial_buf, np, len); |
678 | } | 666 | } |
679 | disc->buf_len = (unsigned char) len; | 667 | disc->buf_len = (unsigned char) len; |
@@ -721,8 +709,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
721 | int error; | 709 | int error; |
722 | 710 | ||
723 | mutex_lock(&disc->disc_mutex); | 711 | mutex_lock(&disc->disc_mutex); |
724 | FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n", | 712 | FC_DISC_DBG(disc, "Received a GPN_FT response\n"); |
725 | fc_host_port_id(disc->lport->host)); | ||
726 | 713 | ||
727 | if (IS_ERR(fp)) { | 714 | if (IS_ERR(fp)) { |
728 | fc_disc_error(disc, fp); | 715 | fc_disc_error(disc, fp); |
@@ -738,30 +725,30 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
738 | disc->seq_count == 0) { | 725 | disc->seq_count == 0) { |
739 | cp = fc_frame_payload_get(fp, sizeof(*cp)); | 726 | cp = fc_frame_payload_get(fp, sizeof(*cp)); |
740 | if (!cp) { | 727 | if (!cp) { |
741 | FC_DBG("GPN_FT response too short, len %d\n", | 728 | FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n", |
742 | fr_len(fp)); | 729 | fr_len(fp)); |
743 | } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { | 730 | } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { |
744 | 731 | ||
745 | /* Accepted, parse the response. */ | 732 | /* Accepted, parse the response. */ |
746 | buf = cp + 1; | 733 | buf = cp + 1; |
747 | len -= sizeof(*cp); | 734 | len -= sizeof(*cp); |
748 | } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { | 735 | } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { |
749 | FC_DBG("GPN_FT rejected reason %x exp %x " | 736 | FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x " |
750 | "(check zoning)\n", cp->ct_reason, | 737 | "(check zoning)\n", cp->ct_reason, |
751 | cp->ct_explan); | 738 | cp->ct_explan); |
752 | disc->event = DISC_EV_FAILED; | 739 | disc->event = DISC_EV_FAILED; |
753 | fc_disc_done(disc); | 740 | fc_disc_done(disc); |
754 | } else { | 741 | } else { |
755 | FC_DBG("GPN_FT unexpected response code %x\n", | 742 | FC_DISC_DBG(disc, "GPN_FT unexpected response code " |
756 | ntohs(cp->ct_cmd)); | 743 | "%x\n", ntohs(cp->ct_cmd)); |
757 | } | 744 | } |
758 | } else if (fr_sof(fp) == FC_SOF_N3 && | 745 | } else if (fr_sof(fp) == FC_SOF_N3 && |
759 | seq_cnt == disc->seq_count) { | 746 | seq_cnt == disc->seq_count) { |
760 | buf = fh + 1; | 747 | buf = fh + 1; |
761 | } else { | 748 | } else { |
762 | FC_DBG("GPN_FT unexpected frame - out of sequence? " | 749 | FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? " |
763 | "seq_cnt %x expected %x sof %x eof %x\n", | 750 | "seq_cnt %x expected %x sof %x eof %x\n", |
764 | seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); | 751 | seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); |
765 | } | 752 | } |
766 | if (buf) { | 753 | if (buf) { |
767 | error = fc_disc_gpn_ft_parse(disc, buf, len); | 754 | error = fc_disc_gpn_ft_parse(disc, buf, len); |
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 7af9bceb8aa9..2bc22be5f849 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c | |||
@@ -32,18 +32,7 @@ | |||
32 | #include <scsi/libfc.h> | 32 | #include <scsi/libfc.h> |
33 | #include <scsi/fc_encode.h> | 33 | #include <scsi/fc_encode.h> |
34 | 34 | ||
35 | /* | 35 | static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ |
36 | * fc_exch_debug can be set in debugger or at compile time to get more logs. | ||
37 | */ | ||
38 | static int fc_exch_debug; | ||
39 | |||
40 | #define FC_DEBUG_EXCH(fmt...) \ | ||
41 | do { \ | ||
42 | if (fc_exch_debug) \ | ||
43 | FC_DBG(fmt); \ | ||
44 | } while (0) | ||
45 | |||
46 | static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ | ||
47 | 36 | ||
48 | /* | 37 | /* |
49 | * Structure and function definitions for managing Fibre Channel Exchanges | 38 | * Structure and function definitions for managing Fibre Channel Exchanges |
@@ -333,8 +322,8 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, | |||
333 | if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) | 322 | if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) |
334 | return; | 323 | return; |
335 | 324 | ||
336 | FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n", | 325 | FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n"); |
337 | ep->xid); | 326 | |
338 | if (schedule_delayed_work(&ep->timeout_work, | 327 | if (schedule_delayed_work(&ep->timeout_work, |
339 | msecs_to_jiffies(timer_msec))) | 328 | msecs_to_jiffies(timer_msec))) |
340 | fc_exch_hold(ep); /* hold for timer */ | 329 | fc_exch_hold(ep); /* hold for timer */ |
@@ -545,7 +534,7 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, | |||
545 | /* alloc a new xid */ | 534 | /* alloc a new xid */ |
546 | xid = fc_em_alloc_xid(mp, fp); | 535 | xid = fc_em_alloc_xid(mp, fp); |
547 | if (!xid) { | 536 | if (!xid) { |
548 | printk(KERN_ERR "fc_em_alloc_xid() failed\n"); | 537 | printk(KERN_WARNING "libfc: Failed to allocate an exhange\n"); |
549 | goto err; | 538 | goto err; |
550 | } | 539 | } |
551 | } | 540 | } |
@@ -820,8 +809,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) | |||
820 | struct fc_exch *ep = fc_seq_exch(sp); | 809 | struct fc_exch *ep = fc_seq_exch(sp); |
821 | 810 | ||
822 | sp = fc_seq_alloc(ep, ep->seq_id++); | 811 | sp = fc_seq_alloc(ep, ep->seq_id++); |
823 | FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n", | 812 | FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", |
824 | ep->xid, ep->f_ctl, sp->id); | 813 | ep->f_ctl, sp->id); |
825 | return sp; | 814 | return sp; |
826 | } | 815 | } |
827 | /* | 816 | /* |
@@ -901,7 +890,7 @@ void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, | |||
901 | fc_exch_els_rec(sp, els_data->fp); | 890 | fc_exch_els_rec(sp, els_data->fp); |
902 | break; | 891 | break; |
903 | default: | 892 | default: |
904 | FC_DBG("Invalid ELS CMD:%x\n", els_cmd); | 893 | FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd); |
905 | } | 894 | } |
906 | } | 895 | } |
907 | EXPORT_SYMBOL(fc_seq_els_rsp_send); | 896 | EXPORT_SYMBOL(fc_seq_els_rsp_send); |
@@ -1134,7 +1123,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, | |||
1134 | lp->tt.lport_recv(lp, sp, fp); | 1123 | lp->tt.lport_recv(lp, sp, fp); |
1135 | fc_exch_release(ep); /* release from lookup */ | 1124 | fc_exch_release(ep); /* release from lookup */ |
1136 | } else { | 1125 | } else { |
1137 | FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject); | 1126 | FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject); |
1138 | fc_frame_free(fp); | 1127 | fc_frame_free(fp); |
1139 | } | 1128 | } |
1140 | } | 1129 | } |
@@ -1242,10 +1231,10 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) | |||
1242 | sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ | 1231 | sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ |
1243 | if (!sp) { | 1232 | if (!sp) { |
1244 | atomic_inc(&mp->stats.xid_not_found); | 1233 | atomic_inc(&mp->stats.xid_not_found); |
1245 | FC_DEBUG_EXCH("seq lookup failed\n"); | 1234 | FC_EM_DBG(mp, "seq lookup failed\n"); |
1246 | } else { | 1235 | } else { |
1247 | atomic_inc(&mp->stats.non_bls_resp); | 1236 | atomic_inc(&mp->stats.non_bls_resp); |
1248 | FC_DEBUG_EXCH("non-BLS response to sequence"); | 1237 | FC_EM_DBG(mp, "non-BLS response to sequence"); |
1249 | } | 1238 | } |
1250 | fc_frame_free(fp); | 1239 | fc_frame_free(fp); |
1251 | } | 1240 | } |
@@ -1266,8 +1255,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) | |||
1266 | int rc = 1, has_rec = 0; | 1255 | int rc = 1, has_rec = 0; |
1267 | 1256 | ||
1268 | fh = fc_frame_header_get(fp); | 1257 | fh = fc_frame_header_get(fp); |
1269 | FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n", | 1258 | FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl, |
1270 | fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl)); | 1259 | fc_exch_rctl_name(fh->fh_r_ctl)); |
1271 | 1260 | ||
1272 | if (cancel_delayed_work_sync(&ep->timeout_work)) | 1261 | if (cancel_delayed_work_sync(&ep->timeout_work)) |
1273 | fc_exch_release(ep); /* release from pending timer hold */ | 1262 | fc_exch_release(ep); /* release from pending timer hold */ |
@@ -1359,9 +1348,9 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) | |||
1359 | case FC_RCTL_ACK_0: | 1348 | case FC_RCTL_ACK_0: |
1360 | break; | 1349 | break; |
1361 | default: | 1350 | default: |
1362 | FC_DEBUG_EXCH("BLS rctl %x - %s received", | 1351 | FC_EXCH_DBG(ep, "BLS rctl %x - %s received", |
1363 | fh->fh_r_ctl, | 1352 | fh->fh_r_ctl, |
1364 | fc_exch_rctl_name(fh->fh_r_ctl)); | 1353 | fc_exch_rctl_name(fh->fh_r_ctl)); |
1365 | break; | 1354 | break; |
1366 | } | 1355 | } |
1367 | fc_frame_free(fp); | 1356 | fc_frame_free(fp); |
@@ -1599,7 +1588,8 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) | |||
1599 | 1588 | ||
1600 | if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT) | 1589 | if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT) |
1601 | goto cleanup; | 1590 | goto cleanup; |
1602 | FC_DBG("Cannot process RRQ, because of frame error %d\n", err); | 1591 | FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, " |
1592 | "frame error %d\n", err); | ||
1603 | return; | 1593 | return; |
1604 | } | 1594 | } |
1605 | 1595 | ||
@@ -1608,12 +1598,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) | |||
1608 | 1598 | ||
1609 | switch (op) { | 1599 | switch (op) { |
1610 | case ELS_LS_RJT: | 1600 | case ELS_LS_RJT: |
1611 | FC_DBG("LS_RJT for RRQ"); | 1601 | FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ"); |
1612 | /* fall through */ | 1602 | /* fall through */ |
1613 | case ELS_LS_ACC: | 1603 | case ELS_LS_ACC: |
1614 | goto cleanup; | 1604 | goto cleanup; |
1615 | default: | 1605 | default: |
1616 | FC_DBG("unexpected response op %x for RRQ", op); | 1606 | FC_EXCH_DBG(aborted_ep, "unexpected response op %x " |
1607 | "for RRQ", op); | ||
1617 | return; | 1608 | return; |
1618 | } | 1609 | } |
1619 | 1610 | ||
@@ -1740,8 +1731,8 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, | |||
1740 | size_t len; | 1731 | size_t len; |
1741 | 1732 | ||
1742 | if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) { | 1733 | if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) { |
1743 | FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n", | 1734 | FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", |
1744 | min_xid, max_xid); | 1735 | min_xid, max_xid); |
1745 | return NULL; | 1736 | return NULL; |
1746 | } | 1737 | } |
1747 | 1738 | ||
@@ -1878,7 +1869,8 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, | |||
1878 | 1869 | ||
1879 | /* lport lock ? */ | 1870 | /* lport lock ? */ |
1880 | if (!lp || !mp || (lp->state == LPORT_ST_NONE)) { | 1871 | if (!lp || !mp || (lp->state == LPORT_ST_NONE)) { |
1881 | FC_DBG("fc_lport or EM is not allocated and configured"); | 1872 | FC_LPORT_DBG(lp, "Receiving frames for an lport that " |
1873 | "has not been initialized correctly\n"); | ||
1882 | fc_frame_free(fp); | 1874 | fc_frame_free(fp); |
1883 | return; | 1875 | return; |
1884 | } | 1876 | } |
@@ -1904,7 +1896,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, | |||
1904 | fc_exch_recv_req(lp, mp, fp); | 1896 | fc_exch_recv_req(lp, mp, fp); |
1905 | break; | 1897 | break; |
1906 | default: | 1898 | default: |
1907 | FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp)); | 1899 | FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp)); |
1908 | fc_frame_free(fp); | 1900 | fc_frame_free(fp); |
1909 | break; | 1901 | break; |
1910 | } | 1902 | } |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index ad8b747837b0..e303e0d12c4b 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -43,13 +43,9 @@ MODULE_AUTHOR("Open-FCoE.org"); | |||
43 | MODULE_DESCRIPTION("libfc"); | 43 | MODULE_DESCRIPTION("libfc"); |
44 | MODULE_LICENSE("GPL v2"); | 44 | MODULE_LICENSE("GPL v2"); |
45 | 45 | ||
46 | static int fc_fcp_debug; | 46 | unsigned int fc_debug_logging; |
47 | 47 | module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); | |
48 | #define FC_DEBUG_FCP(fmt...) \ | 48 | MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); |
49 | do { \ | ||
50 | if (fc_fcp_debug) \ | ||
51 | FC_DBG(fmt); \ | ||
52 | } while (0) | ||
53 | 49 | ||
54 | static struct kmem_cache *scsi_pkt_cachep; | 50 | static struct kmem_cache *scsi_pkt_cachep; |
55 | 51 | ||
@@ -347,8 +343,8 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
347 | if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && | 343 | if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && |
348 | fc_frame_crc_check(fp)) | 344 | fc_frame_crc_check(fp)) |
349 | goto crc_err; | 345 | goto crc_err; |
350 | FC_DEBUG_FCP("data received past end. len %zx offset %zx " | 346 | FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx " |
351 | "data_len %x\n", len, offset, fsp->data_len); | 347 | "data_len %x\n", len, offset, fsp->data_len); |
352 | fc_fcp_retry_cmd(fsp); | 348 | fc_fcp_retry_cmd(fsp); |
353 | return; | 349 | return; |
354 | } | 350 | } |
@@ -411,7 +407,8 @@ crc_err: | |||
411 | stats->ErrorFrames++; | 407 | stats->ErrorFrames++; |
412 | /* FIXME - per cpu count, not total count! */ | 408 | /* FIXME - per cpu count, not total count! */ |
413 | if (stats->InvalidCRCCount++ < 5) | 409 | if (stats->InvalidCRCCount++ < 5) |
414 | printk(KERN_WARNING "CRC error on data frame for port (%6x)\n", | 410 | printk(KERN_WARNING "libfc: CRC error on data " |
411 | "frame for port (%6x)\n", | ||
415 | fc_host_port_id(lp->host)); | 412 | fc_host_port_id(lp->host)); |
416 | /* | 413 | /* |
417 | * Assume the frame is total garbage. | 414 | * Assume the frame is total garbage. |
@@ -475,14 +472,14 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
475 | WARN_ON(seq_blen <= 0); | 472 | WARN_ON(seq_blen <= 0); |
476 | if (unlikely(offset + seq_blen > fsp->data_len)) { | 473 | if (unlikely(offset + seq_blen > fsp->data_len)) { |
477 | /* this should never happen */ | 474 | /* this should never happen */ |
478 | FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n", | 475 | FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx " |
479 | seq_blen, offset); | 476 | "offset %zx\n", seq_blen, offset); |
480 | fc_fcp_send_abort(fsp); | 477 | fc_fcp_send_abort(fsp); |
481 | return 0; | 478 | return 0; |
482 | } else if (offset != fsp->xfer_len) { | 479 | } else if (offset != fsp->xfer_len) { |
483 | /* Out of Order Data Request - no problem, but unexpected. */ | 480 | /* Out of Order Data Request - no problem, but unexpected. */ |
484 | FC_DEBUG_FCP("xfer-ready non-contiguous. " | 481 | FC_FCP_DBG(fsp, "xfer-ready non-contiguous. " |
485 | "seq_blen %zx offset %zx\n", seq_blen, offset); | 482 | "seq_blen %zx offset %zx\n", seq_blen, offset); |
486 | } | 483 | } |
487 | 484 | ||
488 | /* | 485 | /* |
@@ -493,7 +490,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | |||
493 | t_blen = fsp->max_payload; | 490 | t_blen = fsp->max_payload; |
494 | if (lp->seq_offload) { | 491 | if (lp->seq_offload) { |
495 | t_blen = min(seq_blen, (size_t)lp->lso_max); | 492 | t_blen = min(seq_blen, (size_t)lp->lso_max); |
496 | FC_DEBUG_FCP("fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", | 493 | FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", |
497 | fsp, seq_blen, lp->lso_max, t_blen); | 494 | fsp, seq_blen, lp->lso_max, t_blen); |
498 | } | 495 | } |
499 | 496 | ||
@@ -694,7 +691,7 @@ static void fc_fcp_reduce_can_queue(struct fc_lport *lp) | |||
694 | if (!can_queue) | 691 | if (!can_queue) |
695 | can_queue = 1; | 692 | can_queue = 1; |
696 | lp->host->can_queue = can_queue; | 693 | lp->host->can_queue = can_queue; |
697 | shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n" | 694 | shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n" |
698 | "Reducing can_queue to %d.\n", can_queue); | 695 | "Reducing can_queue to %d.\n", can_queue); |
699 | done: | 696 | done: |
700 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 697 | spin_unlock_irqrestore(lp->host->host_lock, flags); |
@@ -768,7 +765,7 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
768 | 765 | ||
769 | fc_fcp_resp(fsp, fp); | 766 | fc_fcp_resp(fsp, fp); |
770 | } else { | 767 | } else { |
771 | FC_DBG("unexpected frame. r_ctl %x\n", r_ctl); | 768 | FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl); |
772 | } | 769 | } |
773 | unlock: | 770 | unlock: |
774 | fc_fcp_unlock_pkt(fsp); | 771 | fc_fcp_unlock_pkt(fsp); |
@@ -877,17 +874,17 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
877 | return; | 874 | return; |
878 | } | 875 | } |
879 | fsp->status_code = FC_DATA_OVRRUN; | 876 | fsp->status_code = FC_DATA_OVRRUN; |
880 | FC_DBG("tgt %6x xfer len %zx greater than expected len %x. " | 877 | FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, " |
881 | "data len %x\n", | 878 | "len %x, data len %x\n", |
882 | fsp->rport->port_id, | 879 | fsp->rport->port_id, |
883 | fsp->xfer_len, expected_len, fsp->data_len); | 880 | fsp->xfer_len, expected_len, fsp->data_len); |
884 | } | 881 | } |
885 | fc_fcp_complete_locked(fsp); | 882 | fc_fcp_complete_locked(fsp); |
886 | return; | 883 | return; |
887 | 884 | ||
888 | len_err: | 885 | len_err: |
889 | FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n", | 886 | FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u " |
890 | flags, fr_len(fp), respl, snsl); | 887 | "snsl %u\n", flags, fr_len(fp), respl, snsl); |
891 | err: | 888 | err: |
892 | fsp->status_code = FC_ERROR; | 889 | fsp->status_code = FC_ERROR; |
893 | fc_fcp_complete_locked(fsp); | 890 | fc_fcp_complete_locked(fsp); |
@@ -1107,13 +1104,11 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
1107 | if (fc_fcp_lock_pkt(fsp)) | 1104 | if (fc_fcp_lock_pkt(fsp)) |
1108 | return; | 1105 | return; |
1109 | 1106 | ||
1110 | switch (error) { | 1107 | if (error == -FC_EX_CLOSED) { |
1111 | case -FC_EX_CLOSED: | ||
1112 | fc_fcp_retry_cmd(fsp); | 1108 | fc_fcp_retry_cmd(fsp); |
1113 | goto unlock; | 1109 | goto unlock; |
1114 | default: | ||
1115 | FC_DBG("unknown error %ld\n", PTR_ERR(fp)); | ||
1116 | } | 1110 | } |
1111 | |||
1117 | /* | 1112 | /* |
1118 | * clear abort pending, because the lower layer | 1113 | * clear abort pending, because the lower layer |
1119 | * decided to force completion. | 1114 | * decided to force completion. |
@@ -1145,10 +1140,10 @@ static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp) | |||
1145 | fsp->wait_for_comp = 0; | 1140 | fsp->wait_for_comp = 0; |
1146 | 1141 | ||
1147 | if (!rc) { | 1142 | if (!rc) { |
1148 | FC_DBG("target abort cmd failed\n"); | 1143 | FC_FCP_DBG(fsp, "target abort cmd failed\n"); |
1149 | rc = FAILED; | 1144 | rc = FAILED; |
1150 | } else if (fsp->state & FC_SRB_ABORTED) { | 1145 | } else if (fsp->state & FC_SRB_ABORTED) { |
1151 | FC_DBG("target abort cmd passed\n"); | 1146 | FC_FCP_DBG(fsp, "target abort cmd passed\n"); |
1152 | rc = SUCCESS; | 1147 | rc = SUCCESS; |
1153 | fc_fcp_complete_locked(fsp); | 1148 | fc_fcp_complete_locked(fsp); |
1154 | } | 1149 | } |
@@ -1213,7 +1208,7 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | |||
1213 | spin_unlock_bh(&fsp->scsi_pkt_lock); | 1208 | spin_unlock_bh(&fsp->scsi_pkt_lock); |
1214 | 1209 | ||
1215 | if (!rc) { | 1210 | if (!rc) { |
1216 | FC_DBG("lun reset failed\n"); | 1211 | FC_SCSI_DBG(lp, "lun reset failed\n"); |
1217 | return FAILED; | 1212 | return FAILED; |
1218 | } | 1213 | } |
1219 | 1214 | ||
@@ -1221,7 +1216,7 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | |||
1221 | if (fsp->cdb_status != FCP_TMF_CMPL) | 1216 | if (fsp->cdb_status != FCP_TMF_CMPL) |
1222 | return FAILED; | 1217 | return FAILED; |
1223 | 1218 | ||
1224 | FC_DBG("lun reset to lun %u completed\n", lun); | 1219 | FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun); |
1225 | fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); | 1220 | fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED); |
1226 | return SUCCESS; | 1221 | return SUCCESS; |
1227 | } | 1222 | } |
@@ -1388,13 +1383,13 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
1388 | rjt = fc_frame_payload_get(fp, sizeof(*rjt)); | 1383 | rjt = fc_frame_payload_get(fp, sizeof(*rjt)); |
1389 | switch (rjt->er_reason) { | 1384 | switch (rjt->er_reason) { |
1390 | default: | 1385 | default: |
1391 | FC_DEBUG_FCP("device %x unexpected REC reject " | 1386 | FC_FCP_DBG(fsp, "device %x unexpected REC reject " |
1392 | "reason %d expl %d\n", | 1387 | "reason %d expl %d\n", |
1393 | fsp->rport->port_id, rjt->er_reason, | 1388 | fsp->rport->port_id, rjt->er_reason, |
1394 | rjt->er_explan); | 1389 | rjt->er_explan); |
1395 | /* fall through */ | 1390 | /* fall through */ |
1396 | case ELS_RJT_UNSUP: | 1391 | case ELS_RJT_UNSUP: |
1397 | FC_DEBUG_FCP("device does not support REC\n"); | 1392 | FC_FCP_DBG(fsp, "device does not support REC\n"); |
1398 | rp = fsp->rport->dd_data; | 1393 | rp = fsp->rport->dd_data; |
1399 | /* | 1394 | /* |
1400 | * if we do not spport RECs or got some bogus | 1395 | * if we do not spport RECs or got some bogus |
@@ -1514,8 +1509,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
1514 | break; | 1509 | break; |
1515 | 1510 | ||
1516 | default: | 1511 | default: |
1517 | FC_DBG("REC %p fid %x error unexpected error %d\n", | 1512 | FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n", |
1518 | fsp, fsp->rport->port_id, error); | 1513 | fsp, fsp->rport->port_id, error); |
1519 | fsp->status_code = FC_CMD_PLOGO; | 1514 | fsp->status_code = FC_CMD_PLOGO; |
1520 | /* fall through */ | 1515 | /* fall through */ |
1521 | 1516 | ||
@@ -1524,9 +1519,9 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
1524 | * Assume REC or LS_ACC was lost. | 1519 | * Assume REC or LS_ACC was lost. |
1525 | * The exchange manager will have aborted REC, so retry. | 1520 | * The exchange manager will have aborted REC, so retry. |
1526 | */ | 1521 | */ |
1527 | FC_DBG("REC fid %x error error %d retry %d/%d\n", | 1522 | FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n", |
1528 | fsp->rport->port_id, error, fsp->recov_retry, | 1523 | fsp->rport->port_id, error, fsp->recov_retry, |
1529 | FC_MAX_RECOV_RETRY); | 1524 | FC_MAX_RECOV_RETRY); |
1530 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) | 1525 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) |
1531 | fc_fcp_rec(fsp); | 1526 | fc_fcp_rec(fsp); |
1532 | else | 1527 | else |
@@ -2011,9 +2006,11 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) | |||
2011 | if (lp->state != LPORT_ST_READY) | 2006 | if (lp->state != LPORT_ST_READY) |
2012 | return rc; | 2007 | return rc; |
2013 | 2008 | ||
2009 | FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id); | ||
2010 | |||
2014 | fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); | 2011 | fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO); |
2015 | if (fsp == NULL) { | 2012 | if (fsp == NULL) { |
2016 | FC_DBG("could not allocate scsi_pkt\n"); | 2013 | printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); |
2017 | sc_cmd->result = DID_NO_CONNECT << 16; | 2014 | sc_cmd->result = DID_NO_CONNECT << 16; |
2018 | goto out; | 2015 | goto out; |
2019 | } | 2016 | } |
@@ -2048,17 +2045,21 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) | |||
2048 | struct fc_lport *lp = shost_priv(shost); | 2045 | struct fc_lport *lp = shost_priv(shost); |
2049 | unsigned long wait_tmo; | 2046 | unsigned long wait_tmo; |
2050 | 2047 | ||
2048 | FC_SCSI_DBG(lp, "Resetting host\n"); | ||
2049 | |||
2051 | lp->tt.lport_reset(lp); | 2050 | lp->tt.lport_reset(lp); |
2052 | wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; | 2051 | wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; |
2053 | while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) | 2052 | while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo)) |
2054 | msleep(1000); | 2053 | msleep(1000); |
2055 | 2054 | ||
2056 | if (fc_fcp_lport_queue_ready(lp)) { | 2055 | if (fc_fcp_lport_queue_ready(lp)) { |
2057 | shost_printk(KERN_INFO, shost, "Host reset succeeded.\n"); | 2056 | shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " |
2057 | "on port (%6x)\n", fc_host_port_id(lp->host)); | ||
2058 | return SUCCESS; | 2058 | return SUCCESS; |
2059 | } else { | 2059 | } else { |
2060 | shost_printk(KERN_INFO, shost, "Host reset failed. " | 2060 | shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " |
2061 | "lport not ready.\n"); | 2061 | "port (%6x) is not ready.\n", |
2062 | fc_host_port_id(lp->host)); | ||
2062 | return FAILED; | 2063 | return FAILED; |
2063 | } | 2064 | } |
2064 | } | 2065 | } |
@@ -2117,7 +2118,8 @@ void fc_fcp_destroy(struct fc_lport *lp) | |||
2117 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); | 2118 | struct fc_fcp_internal *si = fc_get_scsi_internal(lp); |
2118 | 2119 | ||
2119 | if (!list_empty(&si->scsi_pkt_queue)) | 2120 | if (!list_empty(&si->scsi_pkt_queue)) |
2120 | printk(KERN_ERR "Leaked scsi packets.\n"); | 2121 | printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " |
2122 | "port (%6x)\n", fc_host_port_id(lp->host)); | ||
2121 | 2123 | ||
2122 | mempool_destroy(si->scsi_pkt_pool); | 2124 | mempool_destroy(si->scsi_pkt_pool); |
2123 | kfree(si); | 2125 | kfree(si); |
@@ -2166,7 +2168,8 @@ static int __init libfc_init(void) | |||
2166 | sizeof(struct fc_fcp_pkt), | 2168 | sizeof(struct fc_fcp_pkt), |
2167 | 0, SLAB_HWCACHE_ALIGN, NULL); | 2169 | 0, SLAB_HWCACHE_ALIGN, NULL); |
2168 | if (scsi_pkt_cachep == NULL) { | 2170 | if (scsi_pkt_cachep == NULL) { |
2169 | FC_DBG("Unable to allocate SRB cache...module load failed!"); | 2171 | printk(KERN_ERR "libfc: Unable to allocate SRB cache, " |
2172 | "module load failed!"); | ||
2170 | return -ENOMEM; | 2173 | return -ENOMEM; |
2171 | } | 2174 | } |
2172 | 2175 | ||
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index e0c247724d2b..745fa5555d6a 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -101,14 +101,6 @@ | |||
101 | 101 | ||
102 | #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ | 102 | #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ |
103 | 103 | ||
104 | static int fc_lport_debug; | ||
105 | |||
106 | #define FC_DEBUG_LPORT(fmt...) \ | ||
107 | do { \ | ||
108 | if (fc_lport_debug) \ | ||
109 | FC_DBG(fmt); \ | ||
110 | } while (0) | ||
111 | |||
112 | static void fc_lport_error(struct fc_lport *, struct fc_frame *); | 104 | static void fc_lport_error(struct fc_lport *, struct fc_frame *); |
113 | 105 | ||
114 | static void fc_lport_enter_reset(struct fc_lport *); | 106 | static void fc_lport_enter_reset(struct fc_lport *); |
@@ -151,8 +143,8 @@ static void fc_lport_rport_callback(struct fc_lport *lport, | |||
151 | struct fc_rport *rport, | 143 | struct fc_rport *rport, |
152 | enum fc_rport_event event) | 144 | enum fc_rport_event event) |
153 | { | 145 | { |
154 | FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event, | 146 | FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event, |
155 | rport->port_id); | 147 | rport->port_id); |
156 | 148 | ||
157 | switch (event) { | 149 | switch (event) { |
158 | case RPORT_EV_CREATED: | 150 | case RPORT_EV_CREATED: |
@@ -162,19 +154,19 @@ static void fc_lport_rport_callback(struct fc_lport *lport, | |||
162 | lport->dns_rp = rport; | 154 | lport->dns_rp = rport; |
163 | fc_lport_enter_rpn_id(lport); | 155 | fc_lport_enter_rpn_id(lport); |
164 | } else { | 156 | } else { |
165 | FC_DEBUG_LPORT("Received an CREATED event on " | 157 | FC_LPORT_DBG(lport, "Received an CREATED event " |
166 | "port (%6x) for the directory " | 158 | "on port (%6x) for the directory " |
167 | "server, but the lport is not " | 159 | "server, but the lport is not " |
168 | "in the DNS state, it's in the " | 160 | "in the DNS state, it's in the " |
169 | "%d state", rport->port_id, | 161 | "%d state", rport->port_id, |
170 | lport->state); | 162 | lport->state); |
171 | lport->tt.rport_logoff(rport); | 163 | lport->tt.rport_logoff(rport); |
172 | } | 164 | } |
173 | mutex_unlock(&lport->lp_mutex); | 165 | mutex_unlock(&lport->lp_mutex); |
174 | } else | 166 | } else |
175 | FC_DEBUG_LPORT("Received an event for port (%6x) " | 167 | FC_LPORT_DBG(lport, "Received an event for port (%6x) " |
176 | "which is not the directory server\n", | 168 | "which is not the directory server\n", |
177 | rport->port_id); | 169 | rport->port_id); |
178 | break; | 170 | break; |
179 | case RPORT_EV_LOGO: | 171 | case RPORT_EV_LOGO: |
180 | case RPORT_EV_FAILED: | 172 | case RPORT_EV_FAILED: |
@@ -185,9 +177,9 @@ static void fc_lport_rport_callback(struct fc_lport *lport, | |||
185 | mutex_unlock(&lport->lp_mutex); | 177 | mutex_unlock(&lport->lp_mutex); |
186 | 178 | ||
187 | } else | 179 | } else |
188 | FC_DEBUG_LPORT("Received an event for port (%6x) " | 180 | FC_LPORT_DBG(lport, "Received an event for port (%6x) " |
189 | "which is not the directory server\n", | 181 | "which is not the directory server\n", |
190 | rport->port_id); | 182 | rport->port_id); |
191 | break; | 183 | break; |
192 | case RPORT_EV_NONE: | 184 | case RPORT_EV_NONE: |
193 | break; | 185 | break; |
@@ -363,8 +355,8 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) | |||
363 | static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, | 355 | static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, |
364 | struct fc_lport *lport) | 356 | struct fc_lport *lport) |
365 | { | 357 | { |
366 | FC_DEBUG_LPORT("Received RLIR request while in state %s\n", | 358 | FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", |
367 | fc_lport_state(lport)); | 359 | fc_lport_state(lport)); |
368 | 360 | ||
369 | lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); | 361 | lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); |
370 | fc_frame_free(fp); | 362 | fc_frame_free(fp); |
@@ -389,8 +381,8 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
389 | void *dp; | 381 | void *dp; |
390 | u32 f_ctl; | 382 | u32 f_ctl; |
391 | 383 | ||
392 | FC_DEBUG_LPORT("Received RLIR request while in state %s\n", | 384 | FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", |
393 | fc_lport_state(lport)); | 385 | fc_lport_state(lport)); |
394 | 386 | ||
395 | len = fr_len(in_fp) - sizeof(struct fc_frame_header); | 387 | len = fr_len(in_fp) - sizeof(struct fc_frame_header); |
396 | pp = fc_frame_payload_get(in_fp, len); | 388 | pp = fc_frame_payload_get(in_fp, len); |
@@ -437,8 +429,8 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
437 | size_t len; | 429 | size_t len; |
438 | u32 f_ctl; | 430 | u32 f_ctl; |
439 | 431 | ||
440 | FC_DEBUG_LPORT("Received RNID request while in state %s\n", | 432 | FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", |
441 | fc_lport_state(lport)); | 433 | fc_lport_state(lport)); |
442 | 434 | ||
443 | req = fc_frame_payload_get(in_fp, sizeof(*req)); | 435 | req = fc_frame_payload_get(in_fp, sizeof(*req)); |
444 | if (!req) { | 436 | if (!req) { |
@@ -498,8 +490,8 @@ static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
498 | size_t len; | 490 | size_t len; |
499 | u32 f_ctl; | 491 | u32 f_ctl; |
500 | 492 | ||
501 | FC_DEBUG_LPORT("Received ADISC request while in state %s\n", | 493 | FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n", |
502 | fc_lport_state(lport)); | 494 | fc_lport_state(lport)); |
503 | 495 | ||
504 | req = fc_frame_payload_get(in_fp, sizeof(*req)); | 496 | req = fc_frame_payload_get(in_fp, sizeof(*req)); |
505 | if (!req) { | 497 | if (!req) { |
@@ -574,8 +566,8 @@ EXPORT_SYMBOL(fc_fabric_login); | |||
574 | */ | 566 | */ |
575 | void fc_linkup(struct fc_lport *lport) | 567 | void fc_linkup(struct fc_lport *lport) |
576 | { | 568 | { |
577 | FC_DEBUG_LPORT("Link is up for port (%6x)\n", | 569 | printk(KERN_INFO "libfc: Link up on port (%6x)\n", |
578 | fc_host_port_id(lport->host)); | 570 | fc_host_port_id(lport->host)); |
579 | 571 | ||
580 | mutex_lock(&lport->lp_mutex); | 572 | mutex_lock(&lport->lp_mutex); |
581 | if (!lport->link_up) { | 573 | if (!lport->link_up) { |
@@ -595,8 +587,8 @@ EXPORT_SYMBOL(fc_linkup); | |||
595 | void fc_linkdown(struct fc_lport *lport) | 587 | void fc_linkdown(struct fc_lport *lport) |
596 | { | 588 | { |
597 | mutex_lock(&lport->lp_mutex); | 589 | mutex_lock(&lport->lp_mutex); |
598 | FC_DEBUG_LPORT("Link is down for port (%6x)\n", | 590 | printk(KERN_INFO "libfc: Link down on port (%6x)\n", |
599 | fc_host_port_id(lport->host)); | 591 | fc_host_port_id(lport->host)); |
600 | 592 | ||
601 | if (lport->link_up) { | 593 | if (lport->link_up) { |
602 | lport->link_up = 0; | 594 | lport->link_up = 0; |
@@ -701,12 +693,11 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) | |||
701 | { | 693 | { |
702 | switch (event) { | 694 | switch (event) { |
703 | case DISC_EV_SUCCESS: | 695 | case DISC_EV_SUCCESS: |
704 | FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n", | 696 | FC_LPORT_DBG(lport, "Discovery succeeded\n"); |
705 | fc_host_port_id(lport->host)); | ||
706 | break; | 697 | break; |
707 | case DISC_EV_FAILED: | 698 | case DISC_EV_FAILED: |
708 | FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n", | 699 | printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n", |
709 | fc_host_port_id(lport->host)); | 700 | fc_host_port_id(lport->host)); |
710 | mutex_lock(&lport->lp_mutex); | 701 | mutex_lock(&lport->lp_mutex); |
711 | fc_lport_enter_reset(lport); | 702 | fc_lport_enter_reset(lport); |
712 | mutex_unlock(&lport->lp_mutex); | 703 | mutex_unlock(&lport->lp_mutex); |
@@ -726,8 +717,8 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) | |||
726 | */ | 717 | */ |
727 | static void fc_lport_enter_ready(struct fc_lport *lport) | 718 | static void fc_lport_enter_ready(struct fc_lport *lport) |
728 | { | 719 | { |
729 | FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n", | 720 | FC_LPORT_DBG(lport, "Entered READY from state %s\n", |
730 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 721 | fc_lport_state(lport)); |
731 | 722 | ||
732 | fc_lport_state_enter(lport, LPORT_ST_READY); | 723 | fc_lport_state_enter(lport, LPORT_ST_READY); |
733 | 724 | ||
@@ -762,8 +753,8 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, | |||
762 | u32 local_fid; | 753 | u32 local_fid; |
763 | u32 f_ctl; | 754 | u32 f_ctl; |
764 | 755 | ||
765 | FC_DEBUG_LPORT("Received FLOGI request while in state %s\n", | 756 | FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", |
766 | fc_lport_state(lport)); | 757 | fc_lport_state(lport)); |
767 | 758 | ||
768 | fh = fc_frame_header_get(rx_fp); | 759 | fh = fc_frame_header_get(rx_fp); |
769 | remote_fid = ntoh24(fh->fh_s_id); | 760 | remote_fid = ntoh24(fh->fh_s_id); |
@@ -772,12 +763,11 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, | |||
772 | goto out; | 763 | goto out; |
773 | remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); | 764 | remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); |
774 | if (remote_wwpn == lport->wwpn) { | 765 | if (remote_wwpn == lport->wwpn) { |
775 | FC_DBG("FLOGI from port with same WWPN %llx " | 766 | printk(KERN_WARNING "libfc: Received FLOGI from port " |
776 | "possible configuration error\n", | 767 | "with same WWPN %llx\n", remote_wwpn); |
777 | (unsigned long long)remote_wwpn); | ||
778 | goto out; | 768 | goto out; |
779 | } | 769 | } |
780 | FC_DBG("FLOGI from port WWPN %llx\n", (unsigned long long)remote_wwpn); | 770 | FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn); |
781 | 771 | ||
782 | /* | 772 | /* |
783 | * XXX what is the right thing to do for FIDs? | 773 | * XXX what is the right thing to do for FIDs? |
@@ -909,7 +899,8 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, | |||
909 | } | 899 | } |
910 | } | 900 | } |
911 | } else { | 901 | } else { |
912 | FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp)); | 902 | FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", |
903 | fr_eof(fp)); | ||
913 | fc_frame_free(fp); | 904 | fc_frame_free(fp); |
914 | } | 905 | } |
915 | mutex_unlock(&lport->lp_mutex); | 906 | mutex_unlock(&lport->lp_mutex); |
@@ -947,8 +938,8 @@ EXPORT_SYMBOL(fc_lport_reset); | |||
947 | */ | 938 | */ |
948 | static void fc_lport_enter_reset(struct fc_lport *lport) | 939 | static void fc_lport_enter_reset(struct fc_lport *lport) |
949 | { | 940 | { |
950 | FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n", | 941 | FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", |
951 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 942 | fc_lport_state(lport)); |
952 | 943 | ||
953 | fc_lport_state_enter(lport, LPORT_ST_RESET); | 944 | fc_lport_state_enter(lport, LPORT_ST_RESET); |
954 | 945 | ||
@@ -982,9 +973,9 @@ static void fc_lport_enter_reset(struct fc_lport *lport) | |||
982 | static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) | 973 | static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) |
983 | { | 974 | { |
984 | unsigned long delay = 0; | 975 | unsigned long delay = 0; |
985 | FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n", | 976 | FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", |
986 | PTR_ERR(fp), fc_lport_state(lport), | 977 | PTR_ERR(fp), fc_lport_state(lport), |
987 | lport->retry_count); | 978 | lport->retry_count); |
988 | 979 | ||
989 | if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { | 980 | if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { |
990 | /* | 981 | /* |
@@ -1040,11 +1031,11 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1040 | 1031 | ||
1041 | mutex_lock(&lport->lp_mutex); | 1032 | mutex_lock(&lport->lp_mutex); |
1042 | 1033 | ||
1043 | FC_DEBUG_LPORT("Received a RFT_ID response\n"); | 1034 | FC_LPORT_DBG(lport, "Received a RFT_ID response\n"); |
1044 | 1035 | ||
1045 | if (lport->state != LPORT_ST_RFT_ID) { | 1036 | if (lport->state != LPORT_ST_RFT_ID) { |
1046 | FC_DBG("Received a RFT_ID response, but in state %s\n", | 1037 | FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state " |
1047 | fc_lport_state(lport)); | 1038 | "%s\n", fc_lport_state(lport)); |
1048 | if (IS_ERR(fp)) | 1039 | if (IS_ERR(fp)) |
1049 | goto err; | 1040 | goto err; |
1050 | goto out; | 1041 | goto out; |
@@ -1094,11 +1085,11 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1094 | 1085 | ||
1095 | mutex_lock(&lport->lp_mutex); | 1086 | mutex_lock(&lport->lp_mutex); |
1096 | 1087 | ||
1097 | FC_DEBUG_LPORT("Received a RPN_ID response\n"); | 1088 | FC_LPORT_DBG(lport, "Received a RPN_ID response\n"); |
1098 | 1089 | ||
1099 | if (lport->state != LPORT_ST_RPN_ID) { | 1090 | if (lport->state != LPORT_ST_RPN_ID) { |
1100 | FC_DBG("Received a RPN_ID response, but in state %s\n", | 1091 | FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state " |
1101 | fc_lport_state(lport)); | 1092 | "%s\n", fc_lport_state(lport)); |
1102 | if (IS_ERR(fp)) | 1093 | if (IS_ERR(fp)) |
1103 | goto err; | 1094 | goto err; |
1104 | goto out; | 1095 | goto out; |
@@ -1146,11 +1137,11 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1146 | 1137 | ||
1147 | mutex_lock(&lport->lp_mutex); | 1138 | mutex_lock(&lport->lp_mutex); |
1148 | 1139 | ||
1149 | FC_DEBUG_LPORT("Received a SCR response\n"); | 1140 | FC_LPORT_DBG(lport, "Received a SCR response\n"); |
1150 | 1141 | ||
1151 | if (lport->state != LPORT_ST_SCR) { | 1142 | if (lport->state != LPORT_ST_SCR) { |
1152 | FC_DBG("Received a SCR response, but in state %s\n", | 1143 | FC_LPORT_DBG(lport, "Received a SCR response, but in state " |
1153 | fc_lport_state(lport)); | 1144 | "%s\n", fc_lport_state(lport)); |
1154 | if (IS_ERR(fp)) | 1145 | if (IS_ERR(fp)) |
1155 | goto err; | 1146 | goto err; |
1156 | goto out; | 1147 | goto out; |
@@ -1184,8 +1175,8 @@ static void fc_lport_enter_scr(struct fc_lport *lport) | |||
1184 | { | 1175 | { |
1185 | struct fc_frame *fp; | 1176 | struct fc_frame *fp; |
1186 | 1177 | ||
1187 | FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n", | 1178 | FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", |
1188 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 1179 | fc_lport_state(lport)); |
1189 | 1180 | ||
1190 | fc_lport_state_enter(lport, LPORT_ST_SCR); | 1181 | fc_lport_state_enter(lport, LPORT_ST_SCR); |
1191 | 1182 | ||
@@ -1213,8 +1204,8 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) | |||
1213 | struct fc_ns_fts *lps; | 1204 | struct fc_ns_fts *lps; |
1214 | int i; | 1205 | int i; |
1215 | 1206 | ||
1216 | FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n", | 1207 | FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n", |
1217 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 1208 | fc_lport_state(lport)); |
1218 | 1209 | ||
1219 | fc_lport_state_enter(lport, LPORT_ST_RFT_ID); | 1210 | fc_lport_state_enter(lport, LPORT_ST_RFT_ID); |
1220 | 1211 | ||
@@ -1253,8 +1244,8 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport) | |||
1253 | { | 1244 | { |
1254 | struct fc_frame *fp; | 1245 | struct fc_frame *fp; |
1255 | 1246 | ||
1256 | FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n", | 1247 | FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n", |
1257 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 1248 | fc_lport_state(lport)); |
1258 | 1249 | ||
1259 | fc_lport_state_enter(lport, LPORT_ST_RPN_ID); | 1250 | fc_lport_state_enter(lport, LPORT_ST_RPN_ID); |
1260 | 1251 | ||
@@ -1294,8 +1285,8 @@ static void fc_lport_enter_dns(struct fc_lport *lport) | |||
1294 | dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; | 1285 | dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; |
1295 | dp.lp = lport; | 1286 | dp.lp = lport; |
1296 | 1287 | ||
1297 | FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n", | 1288 | FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", |
1298 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 1289 | fc_lport_state(lport)); |
1299 | 1290 | ||
1300 | fc_lport_state_enter(lport, LPORT_ST_DNS); | 1291 | fc_lport_state_enter(lport, LPORT_ST_DNS); |
1301 | 1292 | ||
@@ -1374,11 +1365,11 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1374 | 1365 | ||
1375 | mutex_lock(&lport->lp_mutex); | 1366 | mutex_lock(&lport->lp_mutex); |
1376 | 1367 | ||
1377 | FC_DEBUG_LPORT("Received a LOGO response\n"); | 1368 | FC_LPORT_DBG(lport, "Received a LOGO response\n"); |
1378 | 1369 | ||
1379 | if (lport->state != LPORT_ST_LOGO) { | 1370 | if (lport->state != LPORT_ST_LOGO) { |
1380 | FC_DBG("Received a LOGO response, but in state %s\n", | 1371 | FC_LPORT_DBG(lport, "Received a LOGO response, but in state " |
1381 | fc_lport_state(lport)); | 1372 | "%s\n", fc_lport_state(lport)); |
1382 | if (IS_ERR(fp)) | 1373 | if (IS_ERR(fp)) |
1383 | goto err; | 1374 | goto err; |
1384 | goto out; | 1375 | goto out; |
@@ -1413,8 +1404,8 @@ static void fc_lport_enter_logo(struct fc_lport *lport) | |||
1413 | struct fc_frame *fp; | 1404 | struct fc_frame *fp; |
1414 | struct fc_els_logo *logo; | 1405 | struct fc_els_logo *logo; |
1415 | 1406 | ||
1416 | FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n", | 1407 | FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", |
1417 | fc_host_port_id(lport->host), fc_lport_state(lport)); | 1408 | fc_lport_state(lport)); |
1418 | 1409 | ||
1419 | fc_lport_state_enter(lport, LPORT_ST_LOGO); | 1410 | fc_lport_state_enter(lport, LPORT_ST_LOGO); |
1420 | 1411 | ||
@@ -1456,11 +1447,11 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1456 | 1447 | ||
1457 | mutex_lock(&lport->lp_mutex); | 1448 | mutex_lock(&lport->lp_mutex); |
1458 | 1449 | ||
1459 | FC_DEBUG_LPORT("Received a FLOGI response\n"); | 1450 | FC_LPORT_DBG(lport, "Received a FLOGI response\n"); |
1460 | 1451 | ||
1461 | if (lport->state != LPORT_ST_FLOGI) { | 1452 | if (lport->state != LPORT_ST_FLOGI) { |
1462 | FC_DBG("Received a FLOGI response, but in state %s\n", | 1453 | FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " |
1463 | fc_lport_state(lport)); | 1454 | "%s\n", fc_lport_state(lport)); |
1464 | if (IS_ERR(fp)) | 1455 | if (IS_ERR(fp)) |
1465 | goto err; | 1456 | goto err; |
1466 | goto out; | 1457 | goto out; |
@@ -1475,7 +1466,8 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1475 | did = ntoh24(fh->fh_d_id); | 1466 | did = ntoh24(fh->fh_d_id); |
1476 | if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { | 1467 | if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { |
1477 | 1468 | ||
1478 | FC_DEBUG_LPORT("Assigned fid %x\n", did); | 1469 | printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n", |
1470 | did); | ||
1479 | fc_host_port_id(lport->host) = did; | 1471 | fc_host_port_id(lport->host) = did; |
1480 | 1472 | ||
1481 | flp = fc_frame_payload_get(fp, sizeof(*flp)); | 1473 | flp = fc_frame_payload_get(fp, sizeof(*flp)); |
@@ -1494,7 +1486,8 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1494 | if (e_d_tov > lport->e_d_tov) | 1486 | if (e_d_tov > lport->e_d_tov) |
1495 | lport->e_d_tov = e_d_tov; | 1487 | lport->e_d_tov = e_d_tov; |
1496 | lport->r_a_tov = 2 * e_d_tov; | 1488 | lport->r_a_tov = 2 * e_d_tov; |
1497 | FC_DBG("Point-to-Point mode\n"); | 1489 | printk(KERN_INFO "libfc: Port (%6x) entered " |
1490 | "point to point mode\n", did); | ||
1498 | fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), | 1491 | fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), |
1499 | get_unaligned_be64( | 1492 | get_unaligned_be64( |
1500 | &flp->fl_wwpn), | 1493 | &flp->fl_wwpn), |
@@ -1517,7 +1510,7 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1517 | } | 1510 | } |
1518 | } | 1511 | } |
1519 | } else { | 1512 | } else { |
1520 | FC_DBG("bad FLOGI response\n"); | 1513 | FC_LPORT_DBG(lport, "Bad FLOGI response\n"); |
1521 | } | 1514 | } |
1522 | 1515 | ||
1523 | out: | 1516 | out: |
@@ -1537,7 +1530,8 @@ void fc_lport_enter_flogi(struct fc_lport *lport) | |||
1537 | { | 1530 | { |
1538 | struct fc_frame *fp; | 1531 | struct fc_frame *fp; |
1539 | 1532 | ||
1540 | FC_DEBUG_LPORT("Processing FLOGI state\n"); | 1533 | FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", |
1534 | fc_lport_state(lport)); | ||
1541 | 1535 | ||
1542 | fc_lport_state_enter(lport, LPORT_ST_FLOGI); | 1536 | fc_lport_state_enter(lport, LPORT_ST_FLOGI); |
1543 | 1537 | ||
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 7bfbff7e0efb..7162385f52eb 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -55,14 +55,6 @@ | |||
55 | #include <scsi/libfc.h> | 55 | #include <scsi/libfc.h> |
56 | #include <scsi/fc_encode.h> | 56 | #include <scsi/fc_encode.h> |
57 | 57 | ||
58 | static int fc_rport_debug; | ||
59 | |||
60 | #define FC_DEBUG_RPORT(fmt...) \ | ||
61 | do { \ | ||
62 | if (fc_rport_debug) \ | ||
63 | FC_DBG(fmt); \ | ||
64 | } while (0) | ||
65 | |||
66 | struct workqueue_struct *rport_event_queue; | 58 | struct workqueue_struct *rport_event_queue; |
67 | 59 | ||
68 | static void fc_rport_enter_plogi(struct fc_rport *); | 60 | static void fc_rport_enter_plogi(struct fc_rport *); |
@@ -97,7 +89,7 @@ static const char *fc_rport_state_names[] = { | |||
97 | static void fc_rport_rogue_destroy(struct device *dev) | 89 | static void fc_rport_rogue_destroy(struct device *dev) |
98 | { | 90 | { |
99 | struct fc_rport *rport = dev_to_rport(dev); | 91 | struct fc_rport *rport = dev_to_rport(dev); |
100 | FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id); | 92 | FC_RPORT_DBG(rport, "Destroying rogue rport\n"); |
101 | kfree(rport); | 93 | kfree(rport); |
102 | } | 94 | } |
103 | 95 | ||
@@ -263,8 +255,8 @@ static void fc_rport_work(struct work_struct *work) | |||
263 | 255 | ||
264 | fc_rport_state_enter(new_rport, RPORT_ST_READY); | 256 | fc_rport_state_enter(new_rport, RPORT_ST_READY); |
265 | } else { | 257 | } else { |
266 | FC_DBG("Failed to create the rport for port " | 258 | printk(KERN_WARNING "libfc: Failed to allocate " |
267 | "(%6x).\n", ids.port_id); | 259 | " memory for rport (%6x)\n", ids.port_id); |
268 | event = RPORT_EV_FAILED; | 260 | event = RPORT_EV_FAILED; |
269 | } | 261 | } |
270 | if (rport->port_id != FC_FID_DIR_SERV) | 262 | if (rport->port_id != FC_FID_DIR_SERV) |
@@ -309,7 +301,7 @@ int fc_rport_login(struct fc_rport *rport) | |||
309 | 301 | ||
310 | mutex_lock(&rdata->rp_mutex); | 302 | mutex_lock(&rdata->rp_mutex); |
311 | 303 | ||
312 | FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id); | 304 | FC_RPORT_DBG(rport, "Login to port\n"); |
313 | 305 | ||
314 | fc_rport_enter_plogi(rport); | 306 | fc_rport_enter_plogi(rport); |
315 | 307 | ||
@@ -329,16 +321,13 @@ int fc_rport_login(struct fc_rport *rport) | |||
329 | int fc_rport_logoff(struct fc_rport *rport) | 321 | int fc_rport_logoff(struct fc_rport *rport) |
330 | { | 322 | { |
331 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | 323 | struct fc_rport_libfc_priv *rdata = rport->dd_data; |
332 | struct fc_lport *lport = rdata->local_port; | ||
333 | 324 | ||
334 | mutex_lock(&rdata->rp_mutex); | 325 | mutex_lock(&rdata->rp_mutex); |
335 | 326 | ||
336 | FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id); | 327 | FC_RPORT_DBG(rport, "Remove port\n"); |
337 | 328 | ||
338 | if (rdata->rp_state == RPORT_ST_NONE) { | 329 | if (rdata->rp_state == RPORT_ST_NONE) { |
339 | FC_DEBUG_RPORT("(%6x): Port (%6x) in NONE state," | 330 | FC_RPORT_DBG(rport, "Port in NONE state, not removing\n"); |
340 | " not removing", fc_host_port_id(lport->host), | ||
341 | rport->port_id); | ||
342 | mutex_unlock(&rdata->rp_mutex); | 331 | mutex_unlock(&rdata->rp_mutex); |
343 | goto out; | 332 | goto out; |
344 | } | 333 | } |
@@ -379,7 +368,7 @@ static void fc_rport_enter_ready(struct fc_rport *rport) | |||
379 | 368 | ||
380 | fc_rport_state_enter(rport, RPORT_ST_READY); | 369 | fc_rport_state_enter(rport, RPORT_ST_READY); |
381 | 370 | ||
382 | FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id); | 371 | FC_RPORT_DBG(rport, "Port is Ready\n"); |
383 | 372 | ||
384 | rdata->event = RPORT_EV_CREATED; | 373 | rdata->event = RPORT_EV_CREATED; |
385 | queue_work(rport_event_queue, &rdata->event_work); | 374 | queue_work(rport_event_queue, &rdata->event_work); |
@@ -436,8 +425,8 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) | |||
436 | { | 425 | { |
437 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | 426 | struct fc_rport_libfc_priv *rdata = rport->dd_data; |
438 | 427 | ||
439 | FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n", | 428 | FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n", |
440 | PTR_ERR(fp), fc_rport_state(rport), rdata->retries); | 429 | PTR_ERR(fp), fc_rport_state(rport), rdata->retries); |
441 | 430 | ||
442 | switch (rdata->rp_state) { | 431 | switch (rdata->rp_state) { |
443 | case RPORT_ST_PLOGI: | 432 | case RPORT_ST_PLOGI: |
@@ -479,8 +468,8 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) | |||
479 | return fc_rport_error(rport, fp); | 468 | return fc_rport_error(rport, fp); |
480 | 469 | ||
481 | if (rdata->retries < rdata->local_port->max_rport_retry_count) { | 470 | if (rdata->retries < rdata->local_port->max_rport_retry_count) { |
482 | FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", | 471 | FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n", |
483 | PTR_ERR(fp), fc_rport_state(rport)); | 472 | PTR_ERR(fp), fc_rport_state(rport)); |
484 | rdata->retries++; | 473 | rdata->retries++; |
485 | /* no additional delay on exchange timeouts */ | 474 | /* no additional delay on exchange timeouts */ |
486 | if (PTR_ERR(fp) == -FC_EX_TIMEOUT) | 475 | if (PTR_ERR(fp) == -FC_EX_TIMEOUT) |
@@ -517,12 +506,11 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
517 | 506 | ||
518 | mutex_lock(&rdata->rp_mutex); | 507 | mutex_lock(&rdata->rp_mutex); |
519 | 508 | ||
520 | FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n", | 509 | FC_RPORT_DBG(rport, "Received a PLOGI response\n"); |
521 | rport->port_id); | ||
522 | 510 | ||
523 | if (rdata->rp_state != RPORT_ST_PLOGI) { | 511 | if (rdata->rp_state != RPORT_ST_PLOGI) { |
524 | FC_DBG("Received a PLOGI response, but in state %s\n", | 512 | FC_RPORT_DBG(rport, "Received a PLOGI response, but in state " |
525 | fc_rport_state(rport)); | 513 | "%s\n", fc_rport_state(rport)); |
526 | if (IS_ERR(fp)) | 514 | if (IS_ERR(fp)) |
527 | goto err; | 515 | goto err; |
528 | goto out; | 516 | goto out; |
@@ -583,8 +571,8 @@ static void fc_rport_enter_plogi(struct fc_rport *rport) | |||
583 | struct fc_lport *lport = rdata->local_port; | 571 | struct fc_lport *lport = rdata->local_port; |
584 | struct fc_frame *fp; | 572 | struct fc_frame *fp; |
585 | 573 | ||
586 | FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n", | 574 | FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n", |
587 | rport->port_id, fc_rport_state(rport)); | 575 | fc_rport_state(rport)); |
588 | 576 | ||
589 | fc_rport_state_enter(rport, RPORT_ST_PLOGI); | 577 | fc_rport_state_enter(rport, RPORT_ST_PLOGI); |
590 | 578 | ||
@@ -628,12 +616,11 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
628 | 616 | ||
629 | mutex_lock(&rdata->rp_mutex); | 617 | mutex_lock(&rdata->rp_mutex); |
630 | 618 | ||
631 | FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n", | 619 | FC_RPORT_DBG(rport, "Received a PRLI response\n"); |
632 | rport->port_id); | ||
633 | 620 | ||
634 | if (rdata->rp_state != RPORT_ST_PRLI) { | 621 | if (rdata->rp_state != RPORT_ST_PRLI) { |
635 | FC_DBG("Received a PRLI response, but in state %s\n", | 622 | FC_RPORT_DBG(rport, "Received a PRLI response, but in state " |
636 | fc_rport_state(rport)); | 623 | "%s\n", fc_rport_state(rport)); |
637 | if (IS_ERR(fp)) | 624 | if (IS_ERR(fp)) |
638 | goto err; | 625 | goto err; |
639 | goto out; | 626 | goto out; |
@@ -663,7 +650,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
663 | fc_rport_enter_rtv(rport); | 650 | fc_rport_enter_rtv(rport); |
664 | 651 | ||
665 | } else { | 652 | } else { |
666 | FC_DBG("Bad ELS response\n"); | 653 | FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n"); |
667 | rdata->event = RPORT_EV_FAILED; | 654 | rdata->event = RPORT_EV_FAILED; |
668 | fc_rport_state_enter(rport, RPORT_ST_NONE); | 655 | fc_rport_state_enter(rport, RPORT_ST_NONE); |
669 | queue_work(rport_event_queue, &rdata->event_work); | 656 | queue_work(rport_event_queue, &rdata->event_work); |
@@ -695,12 +682,11 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
695 | 682 | ||
696 | mutex_lock(&rdata->rp_mutex); | 683 | mutex_lock(&rdata->rp_mutex); |
697 | 684 | ||
698 | FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n", | 685 | FC_RPORT_DBG(rport, "Received a LOGO response\n"); |
699 | rport->port_id); | ||
700 | 686 | ||
701 | if (rdata->rp_state != RPORT_ST_LOGO) { | 687 | if (rdata->rp_state != RPORT_ST_LOGO) { |
702 | FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n", | 688 | FC_RPORT_DBG(rport, "Received a LOGO response, but in state " |
703 | fc_rport_state(rport)); | 689 | "%s\n", fc_rport_state(rport)); |
704 | if (IS_ERR(fp)) | 690 | if (IS_ERR(fp)) |
705 | goto err; | 691 | goto err; |
706 | goto out; | 692 | goto out; |
@@ -715,7 +701,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
715 | if (op == ELS_LS_ACC) { | 701 | if (op == ELS_LS_ACC) { |
716 | fc_rport_enter_rtv(rport); | 702 | fc_rport_enter_rtv(rport); |
717 | } else { | 703 | } else { |
718 | FC_DBG("Bad ELS response\n"); | 704 | FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n"); |
719 | rdata->event = RPORT_EV_LOGO; | 705 | rdata->event = RPORT_EV_LOGO; |
720 | fc_rport_state_enter(rport, RPORT_ST_NONE); | 706 | fc_rport_state_enter(rport, RPORT_ST_NONE); |
721 | queue_work(rport_event_queue, &rdata->event_work); | 707 | queue_work(rport_event_queue, &rdata->event_work); |
@@ -745,8 +731,8 @@ static void fc_rport_enter_prli(struct fc_rport *rport) | |||
745 | } *pp; | 731 | } *pp; |
746 | struct fc_frame *fp; | 732 | struct fc_frame *fp; |
747 | 733 | ||
748 | FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n", | 734 | FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n", |
749 | rport->port_id, fc_rport_state(rport)); | 735 | fc_rport_state(rport)); |
750 | 736 | ||
751 | fc_rport_state_enter(rport, RPORT_ST_PRLI); | 737 | fc_rport_state_enter(rport, RPORT_ST_PRLI); |
752 | 738 | ||
@@ -784,12 +770,11 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
784 | 770 | ||
785 | mutex_lock(&rdata->rp_mutex); | 771 | mutex_lock(&rdata->rp_mutex); |
786 | 772 | ||
787 | FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n", | 773 | FC_RPORT_DBG(rport, "Received a RTV response\n"); |
788 | rport->port_id); | ||
789 | 774 | ||
790 | if (rdata->rp_state != RPORT_ST_RTV) { | 775 | if (rdata->rp_state != RPORT_ST_RTV) { |
791 | FC_DBG("Received a RTV response, but in state %s\n", | 776 | FC_RPORT_DBG(rport, "Received a RTV response, but in state " |
792 | fc_rport_state(rport)); | 777 | "%s\n", fc_rport_state(rport)); |
793 | if (IS_ERR(fp)) | 778 | if (IS_ERR(fp)) |
794 | goto err; | 779 | goto err; |
795 | goto out; | 780 | goto out; |
@@ -844,8 +829,8 @@ static void fc_rport_enter_rtv(struct fc_rport *rport) | |||
844 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | 829 | struct fc_rport_libfc_priv *rdata = rport->dd_data; |
845 | struct fc_lport *lport = rdata->local_port; | 830 | struct fc_lport *lport = rdata->local_port; |
846 | 831 | ||
847 | FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n", | 832 | FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n", |
848 | rport->port_id, fc_rport_state(rport)); | 833 | fc_rport_state(rport)); |
849 | 834 | ||
850 | fc_rport_state_enter(rport, RPORT_ST_RTV); | 835 | fc_rport_state_enter(rport, RPORT_ST_RTV); |
851 | 836 | ||
@@ -875,8 +860,8 @@ static void fc_rport_enter_logo(struct fc_rport *rport) | |||
875 | struct fc_lport *lport = rdata->local_port; | 860 | struct fc_lport *lport = rdata->local_port; |
876 | struct fc_frame *fp; | 861 | struct fc_frame *fp; |
877 | 862 | ||
878 | FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n", | 863 | FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n", |
879 | rport->port_id, fc_rport_state(rport)); | 864 | fc_rport_state(rport)); |
880 | 865 | ||
881 | fc_rport_state_enter(rport, RPORT_ST_LOGO); | 866 | fc_rport_state_enter(rport, RPORT_ST_LOGO); |
882 | 867 | ||
@@ -983,14 +968,13 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, | |||
983 | 968 | ||
984 | fh = fc_frame_header_get(fp); | 969 | fh = fc_frame_header_get(fp); |
985 | 970 | ||
986 | FC_DEBUG_RPORT("Received PLOGI request from port (%6x) " | 971 | FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n", |
987 | "while in state %s\n", ntoh24(fh->fh_s_id), | 972 | fc_rport_state(rport)); |
988 | fc_rport_state(rport)); | ||
989 | 973 | ||
990 | sid = ntoh24(fh->fh_s_id); | 974 | sid = ntoh24(fh->fh_s_id); |
991 | pl = fc_frame_payload_get(fp, sizeof(*pl)); | 975 | pl = fc_frame_payload_get(fp, sizeof(*pl)); |
992 | if (!pl) { | 976 | if (!pl) { |
993 | FC_DBG("incoming PLOGI from %x too short\n", sid); | 977 | FC_RPORT_DBG(rport, "Received PLOGI too short\n"); |
994 | WARN_ON(1); | 978 | WARN_ON(1); |
995 | /* XXX TBD: send reject? */ | 979 | /* XXX TBD: send reject? */ |
996 | fc_frame_free(fp); | 980 | fc_frame_free(fp); |
@@ -1012,26 +996,26 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, | |||
1012 | */ | 996 | */ |
1013 | switch (rdata->rp_state) { | 997 | switch (rdata->rp_state) { |
1014 | case RPORT_ST_INIT: | 998 | case RPORT_ST_INIT: |
1015 | FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT " | 999 | FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT " |
1016 | "- reject\n", sid, (unsigned long long)wwpn); | 1000 | "- reject\n", (unsigned long long)wwpn); |
1017 | reject = ELS_RJT_UNSUP; | 1001 | reject = ELS_RJT_UNSUP; |
1018 | break; | 1002 | break; |
1019 | case RPORT_ST_PLOGI: | 1003 | case RPORT_ST_PLOGI: |
1020 | FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n", | 1004 | FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n", |
1021 | sid, rdata->rp_state); | 1005 | rdata->rp_state); |
1022 | if (wwpn < lport->wwpn) | 1006 | if (wwpn < lport->wwpn) |
1023 | reject = ELS_RJT_INPROG; | 1007 | reject = ELS_RJT_INPROG; |
1024 | break; | 1008 | break; |
1025 | case RPORT_ST_PRLI: | 1009 | case RPORT_ST_PRLI: |
1026 | case RPORT_ST_READY: | 1010 | case RPORT_ST_READY: |
1027 | FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d " | 1011 | FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d " |
1028 | "- ignored for now\n", sid, rdata->rp_state); | 1012 | "- ignored for now\n", rdata->rp_state); |
1029 | /* XXX TBD - should reset */ | 1013 | /* XXX TBD - should reset */ |
1030 | break; | 1014 | break; |
1031 | case RPORT_ST_NONE: | 1015 | case RPORT_ST_NONE: |
1032 | default: | 1016 | default: |
1033 | FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected " | 1017 | FC_RPORT_DBG(rport, "Received PLOGI in unexpected " |
1034 | "state %d\n", sid, rdata->rp_state); | 1018 | "state %d\n", rdata->rp_state); |
1035 | fc_frame_free(fp); | 1019 | fc_frame_free(fp); |
1036 | return; | 1020 | return; |
1037 | break; | 1021 | break; |
@@ -1115,9 +1099,8 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, | |||
1115 | 1099 | ||
1116 | fh = fc_frame_header_get(rx_fp); | 1100 | fh = fc_frame_header_get(rx_fp); |
1117 | 1101 | ||
1118 | FC_DEBUG_RPORT("Received PRLI request from port (%6x) " | 1102 | FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n", |
1119 | "while in state %s\n", ntoh24(fh->fh_s_id), | 1103 | fc_rport_state(rport)); |
1120 | fc_rport_state(rport)); | ||
1121 | 1104 | ||
1122 | switch (rdata->rp_state) { | 1105 | switch (rdata->rp_state) { |
1123 | case RPORT_ST_PRLI: | 1106 | case RPORT_ST_PRLI: |
@@ -1252,9 +1235,8 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, | |||
1252 | 1235 | ||
1253 | fh = fc_frame_header_get(fp); | 1236 | fh = fc_frame_header_get(fp); |
1254 | 1237 | ||
1255 | FC_DEBUG_RPORT("Received PRLO request from port (%6x) " | 1238 | FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n", |
1256 | "while in state %s\n", ntoh24(fh->fh_s_id), | 1239 | fc_rport_state(rport)); |
1257 | fc_rport_state(rport)); | ||
1258 | 1240 | ||
1259 | if (rdata->rp_state == RPORT_ST_NONE) { | 1241 | if (rdata->rp_state == RPORT_ST_NONE) { |
1260 | fc_frame_free(fp); | 1242 | fc_frame_free(fp); |
@@ -1286,9 +1268,8 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp, | |||
1286 | 1268 | ||
1287 | fh = fc_frame_header_get(fp); | 1269 | fh = fc_frame_header_get(fp); |
1288 | 1270 | ||
1289 | FC_DEBUG_RPORT("Received LOGO request from port (%6x) " | 1271 | FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n", |
1290 | "while in state %s\n", ntoh24(fh->fh_s_id), | 1272 | fc_rport_state(rport)); |
1291 | fc_rport_state(rport)); | ||
1292 | 1273 | ||
1293 | if (rdata->rp_state == RPORT_ST_NONE) { | 1274 | if (rdata->rp_state == RPORT_ST_NONE) { |
1294 | fc_frame_free(fp); | 1275 | fc_frame_free(fp); |
@@ -1308,7 +1289,6 @@ static void fc_rport_flush_queue(void) | |||
1308 | flush_workqueue(rport_event_queue); | 1289 | flush_workqueue(rport_event_queue); |
1309 | } | 1290 | } |
1310 | 1291 | ||
1311 | |||
1312 | int fc_rport_init(struct fc_lport *lport) | 1292 | int fc_rport_init(struct fc_lport *lport) |
1313 | { | 1293 | { |
1314 | if (!lport->tt.rport_create) | 1294 | if (!lport->tt.rport_create) |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 59908aead531..716cc344c5df 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -38,15 +38,30 @@ | |||
38 | #include <scsi/scsi_transport_iscsi.h> | 38 | #include <scsi/scsi_transport_iscsi.h> |
39 | #include <scsi/libiscsi.h> | 39 | #include <scsi/libiscsi.h> |
40 | 40 | ||
41 | static int iscsi_dbg_lib; | 41 | static int iscsi_dbg_lib_conn; |
42 | module_param_named(debug_libiscsi, iscsi_dbg_lib, int, S_IRUGO | S_IWUSR); | 42 | module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int, |
43 | MODULE_PARM_DESC(debug_libiscsi, "Turn on debugging for libiscsi module. " | 43 | S_IRUGO | S_IWUSR); |
44 | "Set to 1 to turn on, and zero to turn off. Default " | 44 | MODULE_PARM_DESC(debug_libiscsi_conn, |
45 | "is off."); | 45 | "Turn on debugging for connections in libiscsi module. " |
46 | "Set to 1 to turn on, and zero to turn off. Default is off."); | ||
47 | |||
48 | static int iscsi_dbg_lib_session; | ||
49 | module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int, | ||
50 | S_IRUGO | S_IWUSR); | ||
51 | MODULE_PARM_DESC(debug_libiscsi_session, | ||
52 | "Turn on debugging for sessions in libiscsi module. " | ||
53 | "Set to 1 to turn on, and zero to turn off. Default is off."); | ||
54 | |||
55 | static int iscsi_dbg_lib_eh; | ||
56 | module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int, | ||
57 | S_IRUGO | S_IWUSR); | ||
58 | MODULE_PARM_DESC(debug_libiscsi_eh, | ||
59 | "Turn on debugging for error handling in libiscsi module. " | ||
60 | "Set to 1 to turn on, and zero to turn off. Default is off."); | ||
46 | 61 | ||
47 | #define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \ | 62 | #define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \ |
48 | do { \ | 63 | do { \ |
49 | if (iscsi_dbg_lib) \ | 64 | if (iscsi_dbg_lib_conn) \ |
50 | iscsi_conn_printk(KERN_INFO, _conn, \ | 65 | iscsi_conn_printk(KERN_INFO, _conn, \ |
51 | "%s " dbg_fmt, \ | 66 | "%s " dbg_fmt, \ |
52 | __func__, ##arg); \ | 67 | __func__, ##arg); \ |
@@ -54,7 +69,15 @@ MODULE_PARM_DESC(debug_libiscsi, "Turn on debugging for libiscsi module. " | |||
54 | 69 | ||
55 | #define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \ | 70 | #define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \ |
56 | do { \ | 71 | do { \ |
57 | if (iscsi_dbg_lib) \ | 72 | if (iscsi_dbg_lib_session) \ |
73 | iscsi_session_printk(KERN_INFO, _session, \ | ||
74 | "%s " dbg_fmt, \ | ||
75 | __func__, ##arg); \ | ||
76 | } while (0); | ||
77 | |||
78 | #define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \ | ||
79 | do { \ | ||
80 | if (iscsi_dbg_lib_eh) \ | ||
58 | iscsi_session_printk(KERN_INFO, _session, \ | 81 | iscsi_session_printk(KERN_INFO, _session, \ |
59 | "%s " dbg_fmt, \ | 82 | "%s " dbg_fmt, \ |
60 | __func__, ##arg); \ | 83 | __func__, ##arg); \ |
@@ -954,6 +977,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
954 | task = iscsi_itt_to_ctask(conn, hdr->itt); | 977 | task = iscsi_itt_to_ctask(conn, hdr->itt); |
955 | if (!task) | 978 | if (!task) |
956 | return ISCSI_ERR_BAD_ITT; | 979 | return ISCSI_ERR_BAD_ITT; |
980 | task->last_xfer = jiffies; | ||
957 | break; | 981 | break; |
958 | case ISCSI_OP_R2T: | 982 | case ISCSI_OP_R2T: |
959 | /* | 983 | /* |
@@ -1192,10 +1216,12 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) | |||
1192 | spin_unlock_bh(&conn->session->lock); | 1216 | spin_unlock_bh(&conn->session->lock); |
1193 | rc = conn->session->tt->xmit_task(task); | 1217 | rc = conn->session->tt->xmit_task(task); |
1194 | spin_lock_bh(&conn->session->lock); | 1218 | spin_lock_bh(&conn->session->lock); |
1195 | __iscsi_put_task(task); | 1219 | if (!rc) { |
1196 | if (!rc) | ||
1197 | /* done with this task */ | 1220 | /* done with this task */ |
1221 | task->last_xfer = jiffies; | ||
1198 | conn->task = NULL; | 1222 | conn->task = NULL; |
1223 | } | ||
1224 | __iscsi_put_task(task); | ||
1199 | return rc; | 1225 | return rc; |
1200 | } | 1226 | } |
1201 | 1227 | ||
@@ -1361,6 +1387,9 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn, | |||
1361 | task->state = ISCSI_TASK_PENDING; | 1387 | task->state = ISCSI_TASK_PENDING; |
1362 | task->conn = conn; | 1388 | task->conn = conn; |
1363 | task->sc = sc; | 1389 | task->sc = sc; |
1390 | task->have_checked_conn = false; | ||
1391 | task->last_timeout = jiffies; | ||
1392 | task->last_xfer = jiffies; | ||
1364 | INIT_LIST_HEAD(&task->running); | 1393 | INIT_LIST_HEAD(&task->running); |
1365 | return task; | 1394 | return task; |
1366 | } | 1395 | } |
@@ -1555,10 +1584,10 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc) | |||
1555 | spin_lock_bh(&session->lock); | 1584 | spin_lock_bh(&session->lock); |
1556 | if (session->state == ISCSI_STATE_TERMINATE) { | 1585 | if (session->state == ISCSI_STATE_TERMINATE) { |
1557 | failed: | 1586 | failed: |
1558 | iscsi_session_printk(KERN_INFO, session, | 1587 | ISCSI_DBG_EH(session, |
1559 | "failing target reset: Could not log " | 1588 | "failing target reset: Could not log back into " |
1560 | "back into target [age %d]\n", | 1589 | "target [age %d]\n", |
1561 | session->age); | 1590 | session->age); |
1562 | spin_unlock_bh(&session->lock); | 1591 | spin_unlock_bh(&session->lock); |
1563 | mutex_unlock(&session->eh_mutex); | 1592 | mutex_unlock(&session->eh_mutex); |
1564 | return FAILED; | 1593 | return FAILED; |
@@ -1572,7 +1601,7 @@ failed: | |||
1572 | */ | 1601 | */ |
1573 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 1602 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
1574 | 1603 | ||
1575 | ISCSI_DBG_SESSION(session, "wait for relogin\n"); | 1604 | ISCSI_DBG_EH(session, "wait for relogin\n"); |
1576 | wait_event_interruptible(conn->ehwait, | 1605 | wait_event_interruptible(conn->ehwait, |
1577 | session->state == ISCSI_STATE_TERMINATE || | 1606 | session->state == ISCSI_STATE_TERMINATE || |
1578 | session->state == ISCSI_STATE_LOGGED_IN || | 1607 | session->state == ISCSI_STATE_LOGGED_IN || |
@@ -1582,10 +1611,10 @@ failed: | |||
1582 | 1611 | ||
1583 | mutex_lock(&session->eh_mutex); | 1612 | mutex_lock(&session->eh_mutex); |
1584 | spin_lock_bh(&session->lock); | 1613 | spin_lock_bh(&session->lock); |
1585 | if (session->state == ISCSI_STATE_LOGGED_IN) | 1614 | if (session->state == ISCSI_STATE_LOGGED_IN) { |
1586 | iscsi_session_printk(KERN_INFO, session, | 1615 | ISCSI_DBG_EH(session, |
1587 | "target reset succeeded\n"); | 1616 | "target reset succeeded\n"); |
1588 | else | 1617 | } else |
1589 | goto failed; | 1618 | goto failed; |
1590 | spin_unlock_bh(&session->lock); | 1619 | spin_unlock_bh(&session->lock); |
1591 | mutex_unlock(&session->eh_mutex); | 1620 | mutex_unlock(&session->eh_mutex); |
@@ -1601,7 +1630,7 @@ static void iscsi_tmf_timedout(unsigned long data) | |||
1601 | spin_lock(&session->lock); | 1630 | spin_lock(&session->lock); |
1602 | if (conn->tmf_state == TMF_QUEUED) { | 1631 | if (conn->tmf_state == TMF_QUEUED) { |
1603 | conn->tmf_state = TMF_TIMEDOUT; | 1632 | conn->tmf_state = TMF_TIMEDOUT; |
1604 | ISCSI_DBG_SESSION(session, "tmf timedout\n"); | 1633 | ISCSI_DBG_EH(session, "tmf timedout\n"); |
1605 | /* unblock eh_abort() */ | 1634 | /* unblock eh_abort() */ |
1606 | wake_up(&conn->ehwait); | 1635 | wake_up(&conn->ehwait); |
1607 | } | 1636 | } |
@@ -1621,7 +1650,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, | |||
1621 | spin_unlock_bh(&session->lock); | 1650 | spin_unlock_bh(&session->lock); |
1622 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 1651 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
1623 | spin_lock_bh(&session->lock); | 1652 | spin_lock_bh(&session->lock); |
1624 | ISCSI_DBG_SESSION(session, "tmf exec failure\n"); | 1653 | ISCSI_DBG_EH(session, "tmf exec failure\n"); |
1625 | return -EPERM; | 1654 | return -EPERM; |
1626 | } | 1655 | } |
1627 | conn->tmfcmd_pdus_cnt++; | 1656 | conn->tmfcmd_pdus_cnt++; |
@@ -1629,7 +1658,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, | |||
1629 | conn->tmf_timer.function = iscsi_tmf_timedout; | 1658 | conn->tmf_timer.function = iscsi_tmf_timedout; |
1630 | conn->tmf_timer.data = (unsigned long)conn; | 1659 | conn->tmf_timer.data = (unsigned long)conn; |
1631 | add_timer(&conn->tmf_timer); | 1660 | add_timer(&conn->tmf_timer); |
1632 | ISCSI_DBG_SESSION(session, "tmf set timeout\n"); | 1661 | ISCSI_DBG_EH(session, "tmf set timeout\n"); |
1633 | 1662 | ||
1634 | spin_unlock_bh(&session->lock); | 1663 | spin_unlock_bh(&session->lock); |
1635 | mutex_unlock(&session->eh_mutex); | 1664 | mutex_unlock(&session->eh_mutex); |
@@ -1716,17 +1745,18 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) | |||
1716 | return 0; | 1745 | return 0; |
1717 | } | 1746 | } |
1718 | 1747 | ||
1719 | static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) | 1748 | static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) |
1720 | { | 1749 | { |
1750 | enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; | ||
1751 | struct iscsi_task *task = NULL; | ||
1721 | struct iscsi_cls_session *cls_session; | 1752 | struct iscsi_cls_session *cls_session; |
1722 | struct iscsi_session *session; | 1753 | struct iscsi_session *session; |
1723 | struct iscsi_conn *conn; | 1754 | struct iscsi_conn *conn; |
1724 | enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; | ||
1725 | 1755 | ||
1726 | cls_session = starget_to_session(scsi_target(scmd->device)); | 1756 | cls_session = starget_to_session(scsi_target(sc->device)); |
1727 | session = cls_session->dd_data; | 1757 | session = cls_session->dd_data; |
1728 | 1758 | ||
1729 | ISCSI_DBG_SESSION(session, "scsi cmd %p timedout\n", scmd); | 1759 | ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); |
1730 | 1760 | ||
1731 | spin_lock(&session->lock); | 1761 | spin_lock(&session->lock); |
1732 | if (session->state != ISCSI_STATE_LOGGED_IN) { | 1762 | if (session->state != ISCSI_STATE_LOGGED_IN) { |
@@ -1745,6 +1775,26 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) | |||
1745 | goto done; | 1775 | goto done; |
1746 | } | 1776 | } |
1747 | 1777 | ||
1778 | task = (struct iscsi_task *)sc->SCp.ptr; | ||
1779 | if (!task) | ||
1780 | goto done; | ||
1781 | /* | ||
1782 | * If we have sent (at least queued to the network layer) a pdu or | ||
1783 | * recvd one for the task since the last timeout ask for | ||
1784 | * more time. If on the next timeout we have not made progress | ||
1785 | * we can check if it is the task or connection when we send the | ||
1786 | * nop as a ping. | ||
1787 | */ | ||
1788 | if (time_after_eq(task->last_xfer, task->last_timeout)) { | ||
1789 | ISCSI_DBG_EH(session, "Command making progress. Asking " | ||
1790 | "scsi-ml for more time to complete. " | ||
1791 | "Last data recv at %lu. Last timeout was at " | ||
1792 | "%lu\n.", task->last_xfer, task->last_timeout); | ||
1793 | task->have_checked_conn = false; | ||
1794 | rc = BLK_EH_RESET_TIMER; | ||
1795 | goto done; | ||
1796 | } | ||
1797 | |||
1748 | if (!conn->recv_timeout && !conn->ping_timeout) | 1798 | if (!conn->recv_timeout && !conn->ping_timeout) |
1749 | goto done; | 1799 | goto done; |
1750 | /* | 1800 | /* |
@@ -1755,23 +1805,32 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) | |||
1755 | rc = BLK_EH_RESET_TIMER; | 1805 | rc = BLK_EH_RESET_TIMER; |
1756 | goto done; | 1806 | goto done; |
1757 | } | 1807 | } |
1808 | |||
1809 | /* Assumes nop timeout is shorter than scsi cmd timeout */ | ||
1810 | if (task->have_checked_conn) | ||
1811 | goto done; | ||
1812 | |||
1758 | /* | 1813 | /* |
1759 | * if we are about to check the transport then give the command | 1814 | * Checking the transport already or nop from a cmd timeout still |
1760 | * more time | 1815 | * running |
1761 | */ | 1816 | */ |
1762 | if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), | 1817 | if (conn->ping_task) { |
1763 | jiffies)) { | 1818 | task->have_checked_conn = true; |
1764 | rc = BLK_EH_RESET_TIMER; | 1819 | rc = BLK_EH_RESET_TIMER; |
1765 | goto done; | 1820 | goto done; |
1766 | } | 1821 | } |
1767 | 1822 | ||
1768 | /* if in the middle of checking the transport then give us more time */ | 1823 | /* Make sure there is a transport check done */ |
1769 | if (conn->ping_task) | 1824 | iscsi_send_nopout(conn, NULL); |
1770 | rc = BLK_EH_RESET_TIMER; | 1825 | task->have_checked_conn = true; |
1826 | rc = BLK_EH_RESET_TIMER; | ||
1827 | |||
1771 | done: | 1828 | done: |
1829 | if (task) | ||
1830 | task->last_timeout = jiffies; | ||
1772 | spin_unlock(&session->lock); | 1831 | spin_unlock(&session->lock); |
1773 | ISCSI_DBG_SESSION(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? | 1832 | ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? |
1774 | "timer reset" : "nh"); | 1833 | "timer reset" : "nh"); |
1775 | return rc; | 1834 | return rc; |
1776 | } | 1835 | } |
1777 | 1836 | ||
@@ -1841,7 +1900,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1841 | cls_session = starget_to_session(scsi_target(sc->device)); | 1900 | cls_session = starget_to_session(scsi_target(sc->device)); |
1842 | session = cls_session->dd_data; | 1901 | session = cls_session->dd_data; |
1843 | 1902 | ||
1844 | ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc); | 1903 | ISCSI_DBG_EH(session, "aborting sc %p\n", sc); |
1845 | 1904 | ||
1846 | mutex_lock(&session->eh_mutex); | 1905 | mutex_lock(&session->eh_mutex); |
1847 | spin_lock_bh(&session->lock); | 1906 | spin_lock_bh(&session->lock); |
@@ -1850,8 +1909,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1850 | * got the command. | 1909 | * got the command. |
1851 | */ | 1910 | */ |
1852 | if (!sc->SCp.ptr) { | 1911 | if (!sc->SCp.ptr) { |
1853 | ISCSI_DBG_SESSION(session, "sc never reached iscsi layer or " | 1912 | ISCSI_DBG_EH(session, "sc never reached iscsi layer or " |
1854 | "it completed.\n"); | 1913 | "it completed.\n"); |
1855 | spin_unlock_bh(&session->lock); | 1914 | spin_unlock_bh(&session->lock); |
1856 | mutex_unlock(&session->eh_mutex); | 1915 | mutex_unlock(&session->eh_mutex); |
1857 | return SUCCESS; | 1916 | return SUCCESS; |
@@ -1865,7 +1924,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1865 | sc->SCp.phase != session->age) { | 1924 | sc->SCp.phase != session->age) { |
1866 | spin_unlock_bh(&session->lock); | 1925 | spin_unlock_bh(&session->lock); |
1867 | mutex_unlock(&session->eh_mutex); | 1926 | mutex_unlock(&session->eh_mutex); |
1868 | ISCSI_DBG_SESSION(session, "failing abort due to dropped " | 1927 | ISCSI_DBG_EH(session, "failing abort due to dropped " |
1869 | "session.\n"); | 1928 | "session.\n"); |
1870 | return FAILED; | 1929 | return FAILED; |
1871 | } | 1930 | } |
@@ -1875,13 +1934,12 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1875 | age = session->age; | 1934 | age = session->age; |
1876 | 1935 | ||
1877 | task = (struct iscsi_task *)sc->SCp.ptr; | 1936 | task = (struct iscsi_task *)sc->SCp.ptr; |
1878 | ISCSI_DBG_SESSION(session, "aborting [sc %p itt 0x%x]\n", | 1937 | ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", |
1879 | sc, task->itt); | 1938 | sc, task->itt); |
1880 | 1939 | ||
1881 | /* task completed before time out */ | 1940 | /* task completed before time out */ |
1882 | if (!task->sc) { | 1941 | if (!task->sc) { |
1883 | ISCSI_DBG_SESSION(session, "sc completed while abort in " | 1942 | ISCSI_DBG_EH(session, "sc completed while abort in progress\n"); |
1884 | "progress\n"); | ||
1885 | goto success; | 1943 | goto success; |
1886 | } | 1944 | } |
1887 | 1945 | ||
@@ -1930,8 +1988,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1930 | if (!sc->SCp.ptr) { | 1988 | if (!sc->SCp.ptr) { |
1931 | conn->tmf_state = TMF_INITIAL; | 1989 | conn->tmf_state = TMF_INITIAL; |
1932 | /* task completed before tmf abort response */ | 1990 | /* task completed before tmf abort response */ |
1933 | ISCSI_DBG_SESSION(session, "sc completed while abort " | 1991 | ISCSI_DBG_EH(session, "sc completed while abort in " |
1934 | "in progress\n"); | 1992 | "progress\n"); |
1935 | goto success; | 1993 | goto success; |
1936 | } | 1994 | } |
1937 | /* fall through */ | 1995 | /* fall through */ |
@@ -1943,16 +2001,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1943 | success: | 2001 | success: |
1944 | spin_unlock_bh(&session->lock); | 2002 | spin_unlock_bh(&session->lock); |
1945 | success_unlocked: | 2003 | success_unlocked: |
1946 | ISCSI_DBG_SESSION(session, "abort success [sc %p itt 0x%x]\n", | 2004 | ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n", |
1947 | sc, task->itt); | 2005 | sc, task->itt); |
1948 | mutex_unlock(&session->eh_mutex); | 2006 | mutex_unlock(&session->eh_mutex); |
1949 | return SUCCESS; | 2007 | return SUCCESS; |
1950 | 2008 | ||
1951 | failed: | 2009 | failed: |
1952 | spin_unlock_bh(&session->lock); | 2010 | spin_unlock_bh(&session->lock); |
1953 | failed_unlocked: | 2011 | failed_unlocked: |
1954 | ISCSI_DBG_SESSION(session, "abort failed [sc %p itt 0x%x]\n", sc, | 2012 | ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc, |
1955 | task ? task->itt : 0); | 2013 | task ? task->itt : 0); |
1956 | mutex_unlock(&session->eh_mutex); | 2014 | mutex_unlock(&session->eh_mutex); |
1957 | return FAILED; | 2015 | return FAILED; |
1958 | } | 2016 | } |
@@ -1979,8 +2037,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) | |||
1979 | cls_session = starget_to_session(scsi_target(sc->device)); | 2037 | cls_session = starget_to_session(scsi_target(sc->device)); |
1980 | session = cls_session->dd_data; | 2038 | session = cls_session->dd_data; |
1981 | 2039 | ||
1982 | ISCSI_DBG_SESSION(session, "LU Reset [sc %p lun %u]\n", | 2040 | ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun); |
1983 | sc, sc->device->lun); | ||
1984 | 2041 | ||
1985 | mutex_lock(&session->eh_mutex); | 2042 | mutex_lock(&session->eh_mutex); |
1986 | spin_lock_bh(&session->lock); | 2043 | spin_lock_bh(&session->lock); |
@@ -2034,8 +2091,8 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) | |||
2034 | unlock: | 2091 | unlock: |
2035 | spin_unlock_bh(&session->lock); | 2092 | spin_unlock_bh(&session->lock); |
2036 | done: | 2093 | done: |
2037 | ISCSI_DBG_SESSION(session, "dev reset result = %s\n", | 2094 | ISCSI_DBG_EH(session, "dev reset result = %s\n", |
2038 | rc == SUCCESS ? "SUCCESS" : "FAILED"); | 2095 | rc == SUCCESS ? "SUCCESS" : "FAILED"); |
2039 | mutex_unlock(&session->eh_mutex); | 2096 | mutex_unlock(&session->eh_mutex); |
2040 | return rc; | 2097 | return rc; |
2041 | } | 2098 | } |
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 2bc07090321d..2e0746d70303 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c | |||
@@ -686,6 +686,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | |||
686 | "offset=%d, datalen=%d)\n", | 686 | "offset=%d, datalen=%d)\n", |
687 | tcp_task->data_offset, | 687 | tcp_task->data_offset, |
688 | tcp_conn->in.datalen); | 688 | tcp_conn->in.datalen); |
689 | task->last_xfer = jiffies; | ||
689 | rc = iscsi_segment_seek_sg(&tcp_conn->in.segment, | 690 | rc = iscsi_segment_seek_sg(&tcp_conn->in.segment, |
690 | sdb->table.sgl, | 691 | sdb->table.sgl, |
691 | sdb->table.nents, | 692 | sdb->table.nents, |
@@ -713,9 +714,10 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | |||
713 | rc = ISCSI_ERR_BAD_ITT; | 714 | rc = ISCSI_ERR_BAD_ITT; |
714 | else if (ahslen) | 715 | else if (ahslen) |
715 | rc = ISCSI_ERR_AHSLEN; | 716 | rc = ISCSI_ERR_AHSLEN; |
716 | else if (task->sc->sc_data_direction == DMA_TO_DEVICE) | 717 | else if (task->sc->sc_data_direction == DMA_TO_DEVICE) { |
718 | task->last_xfer = jiffies; | ||
717 | rc = iscsi_tcp_r2t_rsp(conn, task); | 719 | rc = iscsi_tcp_r2t_rsp(conn, task); |
718 | else | 720 | } else |
719 | rc = ISCSI_ERR_PROTO; | 721 | rc = ISCSI_ERR_PROTO; |
720 | spin_unlock(&conn->session->lock); | 722 | spin_unlock(&conn->session->lock); |
721 | break; | 723 | break; |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 4a990f4da4ea..cca8e4ab0372 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -216,7 +216,7 @@ qla24xx_soft_reset(struct qla_hw_data *ha) | |||
216 | 216 | ||
217 | static int | 217 | static int |
218 | qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, | 218 | qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, |
219 | uint16_t ram_words, void **nxt) | 219 | uint32_t ram_words, void **nxt) |
220 | { | 220 | { |
221 | int rval; | 221 | int rval; |
222 | uint32_t cnt, stat, timer, words, idx; | 222 | uint32_t cnt, stat, timer, words, idx; |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 262026129325..f2ce8e3cc91b 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -2301,7 +2301,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
2301 | static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; | 2301 | static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; |
2302 | char *link_speed; | 2302 | char *link_speed; |
2303 | int rval; | 2303 | int rval; |
2304 | uint16_t mb[6]; | 2304 | uint16_t mb[4]; |
2305 | struct qla_hw_data *ha = vha->hw; | 2305 | struct qla_hw_data *ha = vha->hw; |
2306 | 2306 | ||
2307 | if (!IS_IIDMA_CAPABLE(ha)) | 2307 | if (!IS_IIDMA_CAPABLE(ha)) |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 451ece0760b0..fe69f3057671 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -1267,17 +1267,22 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) | |||
1267 | 1267 | ||
1268 | mcp->mb[0] = MBC_GET_FIRMWARE_STATE; | 1268 | mcp->mb[0] = MBC_GET_FIRMWARE_STATE; |
1269 | mcp->out_mb = MBX_0; | 1269 | mcp->out_mb = MBX_0; |
1270 | mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 1270 | if (IS_FWI2_CAPABLE(vha->hw)) |
1271 | mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | ||
1272 | else | ||
1273 | mcp->in_mb = MBX_1|MBX_0; | ||
1271 | mcp->tov = MBX_TOV_SECONDS; | 1274 | mcp->tov = MBX_TOV_SECONDS; |
1272 | mcp->flags = 0; | 1275 | mcp->flags = 0; |
1273 | rval = qla2x00_mailbox_command(vha, mcp); | 1276 | rval = qla2x00_mailbox_command(vha, mcp); |
1274 | 1277 | ||
1275 | /* Return firmware states. */ | 1278 | /* Return firmware states. */ |
1276 | states[0] = mcp->mb[1]; | 1279 | states[0] = mcp->mb[1]; |
1277 | states[1] = mcp->mb[2]; | 1280 | if (IS_FWI2_CAPABLE(vha->hw)) { |
1278 | states[2] = mcp->mb[3]; | 1281 | states[1] = mcp->mb[2]; |
1279 | states[3] = mcp->mb[4]; | 1282 | states[2] = mcp->mb[3]; |
1280 | states[4] = mcp->mb[5]; | 1283 | states[3] = mcp->mb[4]; |
1284 | states[4] = mcp->mb[5]; | ||
1285 | } | ||
1281 | 1286 | ||
1282 | if (rval != QLA_SUCCESS) { | 1287 | if (rval != QLA_SUCCESS) { |
1283 | /*EMPTY*/ | 1288 | /*EMPTY*/ |
@@ -2697,10 +2702,13 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, | |||
2697 | mcp->mb[0] = MBC_PORT_PARAMS; | 2702 | mcp->mb[0] = MBC_PORT_PARAMS; |
2698 | mcp->mb[1] = loop_id; | 2703 | mcp->mb[1] = loop_id; |
2699 | mcp->mb[2] = BIT_0; | 2704 | mcp->mb[2] = BIT_0; |
2700 | mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); | 2705 | if (IS_QLA81XX(vha->hw)) |
2701 | mcp->mb[4] = mcp->mb[5] = 0; | 2706 | mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); |
2702 | mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | 2707 | else |
2703 | mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; | 2708 | mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); |
2709 | mcp->mb[9] = vha->vp_idx; | ||
2710 | mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; | ||
2711 | mcp->in_mb = MBX_3|MBX_1|MBX_0; | ||
2704 | mcp->tov = MBX_TOV_SECONDS; | 2712 | mcp->tov = MBX_TOV_SECONDS; |
2705 | mcp->flags = 0; | 2713 | mcp->flags = 0; |
2706 | rval = qla2x00_mailbox_command(vha, mcp); | 2714 | rval = qla2x00_mailbox_command(vha, mcp); |
@@ -2710,8 +2718,6 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, | |||
2710 | mb[0] = mcp->mb[0]; | 2718 | mb[0] = mcp->mb[0]; |
2711 | mb[1] = mcp->mb[1]; | 2719 | mb[1] = mcp->mb[1]; |
2712 | mb[3] = mcp->mb[3]; | 2720 | mb[3] = mcp->mb[3]; |
2713 | mb[4] = mcp->mb[4]; | ||
2714 | mb[5] = mcp->mb[5]; | ||
2715 | } | 2721 | } |
2716 | 2722 | ||
2717 | if (rval != QLA_SUCCESS) { | 2723 | if (rval != QLA_SUCCESS) { |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index dcf011679c8b..f0396e79b6fa 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1663,7 +1663,7 @@ skip_pio: | |||
1663 | /* queue 0 uses two msix vectors */ | 1663 | /* queue 0 uses two msix vectors */ |
1664 | if (ql2xmultique_tag) { | 1664 | if (ql2xmultique_tag) { |
1665 | cpus = num_online_cpus(); | 1665 | cpus = num_online_cpus(); |
1666 | ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ? | 1666 | ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? |
1667 | (cpus + 1) : (ha->msix_count - 1); | 1667 | (cpus + 1) : (ha->msix_count - 1); |
1668 | ha->max_req_queues = 2; | 1668 | ha->max_req_queues = 2; |
1669 | } else if (ql2xmaxqueues > 1) { | 1669 | } else if (ql2xmaxqueues > 1) { |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index b63feaf43126..84369705a9ad 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.03.01-k3" | 10 | #define QLA2XXX_VERSION "8.03.01-k4" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 41a21772df12..fb9af207d61d 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -101,6 +101,8 @@ static const char * scsi_debug_version_date = "20070104"; | |||
101 | #define DEF_DIF 0 | 101 | #define DEF_DIF 0 |
102 | #define DEF_GUARD 0 | 102 | #define DEF_GUARD 0 |
103 | #define DEF_ATO 1 | 103 | #define DEF_ATO 1 |
104 | #define DEF_PHYSBLK_EXP 0 | ||
105 | #define DEF_LOWEST_ALIGNED 0 | ||
104 | 106 | ||
105 | /* bit mask values for scsi_debug_opts */ | 107 | /* bit mask values for scsi_debug_opts */ |
106 | #define SCSI_DEBUG_OPT_NOISE 1 | 108 | #define SCSI_DEBUG_OPT_NOISE 1 |
@@ -156,6 +158,8 @@ static int scsi_debug_dix = DEF_DIX; | |||
156 | static int scsi_debug_dif = DEF_DIF; | 158 | static int scsi_debug_dif = DEF_DIF; |
157 | static int scsi_debug_guard = DEF_GUARD; | 159 | static int scsi_debug_guard = DEF_GUARD; |
158 | static int scsi_debug_ato = DEF_ATO; | 160 | static int scsi_debug_ato = DEF_ATO; |
161 | static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; | ||
162 | static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; | ||
159 | 163 | ||
160 | static int scsi_debug_cmnd_count = 0; | 164 | static int scsi_debug_cmnd_count = 0; |
161 | 165 | ||
@@ -657,7 +661,12 @@ static unsigned char vpdb0_data[] = { | |||
657 | 661 | ||
658 | static int inquiry_evpd_b0(unsigned char * arr) | 662 | static int inquiry_evpd_b0(unsigned char * arr) |
659 | { | 663 | { |
664 | unsigned int gran; | ||
665 | |||
660 | memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); | 666 | memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); |
667 | gran = 1 << scsi_debug_physblk_exp; | ||
668 | arr[2] = (gran >> 8) & 0xff; | ||
669 | arr[3] = gran & 0xff; | ||
661 | if (sdebug_store_sectors > 0x400) { | 670 | if (sdebug_store_sectors > 0x400) { |
662 | arr[4] = (sdebug_store_sectors >> 24) & 0xff; | 671 | arr[4] = (sdebug_store_sectors >> 24) & 0xff; |
663 | arr[5] = (sdebug_store_sectors >> 16) & 0xff; | 672 | arr[5] = (sdebug_store_sectors >> 16) & 0xff; |
@@ -945,6 +954,9 @@ static int resp_readcap16(struct scsi_cmnd * scp, | |||
945 | arr[9] = (scsi_debug_sector_size >> 16) & 0xff; | 954 | arr[9] = (scsi_debug_sector_size >> 16) & 0xff; |
946 | arr[10] = (scsi_debug_sector_size >> 8) & 0xff; | 955 | arr[10] = (scsi_debug_sector_size >> 8) & 0xff; |
947 | arr[11] = scsi_debug_sector_size & 0xff; | 956 | arr[11] = scsi_debug_sector_size & 0xff; |
957 | arr[13] = scsi_debug_physblk_exp & 0xf; | ||
958 | arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; | ||
959 | arr[15] = scsi_debug_lowest_aligned & 0xff; | ||
948 | 960 | ||
949 | if (scsi_debug_dif) { | 961 | if (scsi_debug_dif) { |
950 | arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */ | 962 | arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */ |
@@ -2380,6 +2392,8 @@ module_param_named(dix, scsi_debug_dix, int, S_IRUGO); | |||
2380 | module_param_named(dif, scsi_debug_dif, int, S_IRUGO); | 2392 | module_param_named(dif, scsi_debug_dif, int, S_IRUGO); |
2381 | module_param_named(guard, scsi_debug_guard, int, S_IRUGO); | 2393 | module_param_named(guard, scsi_debug_guard, int, S_IRUGO); |
2382 | module_param_named(ato, scsi_debug_ato, int, S_IRUGO); | 2394 | module_param_named(ato, scsi_debug_ato, int, S_IRUGO); |
2395 | module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); | ||
2396 | module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); | ||
2383 | 2397 | ||
2384 | MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); | 2398 | MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); |
2385 | MODULE_DESCRIPTION("SCSI debug adapter driver"); | 2399 | MODULE_DESCRIPTION("SCSI debug adapter driver"); |
@@ -2401,7 +2415,9 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); | |||
2401 | MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); | 2415 | MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); |
2402 | MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); | 2416 | MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); |
2403 | MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); | 2417 | MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); |
2404 | MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)"); | 2418 | MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); |
2419 | MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); | ||
2420 | MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); | ||
2405 | MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); | 2421 | MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); |
2406 | MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); | 2422 | MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); |
2407 | MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); | 2423 | MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); |
@@ -2874,6 +2890,18 @@ static int __init scsi_debug_init(void) | |||
2874 | return -EINVAL; | 2890 | return -EINVAL; |
2875 | } | 2891 | } |
2876 | 2892 | ||
2893 | if (scsi_debug_physblk_exp > 15) { | ||
2894 | printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n", | ||
2895 | scsi_debug_physblk_exp); | ||
2896 | return -EINVAL; | ||
2897 | } | ||
2898 | |||
2899 | if (scsi_debug_lowest_aligned > 0x3fff) { | ||
2900 | printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n", | ||
2901 | scsi_debug_lowest_aligned); | ||
2902 | return -EINVAL; | ||
2903 | } | ||
2904 | |||
2877 | if (scsi_debug_dev_size_mb < 1) | 2905 | if (scsi_debug_dev_size_mb < 1) |
2878 | scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ | 2906 | scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ |
2879 | sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; | 2907 | sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 8821df9a277b..93c2622cb969 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
@@ -24,6 +24,13 @@ struct scsi_dev_info_list { | |||
24 | unsigned compatible; /* for use with scsi_static_device_list entries */ | 24 | unsigned compatible; /* for use with scsi_static_device_list entries */ |
25 | }; | 25 | }; |
26 | 26 | ||
27 | struct scsi_dev_info_list_table { | ||
28 | struct list_head node; /* our node for being on the master list */ | ||
29 | struct list_head scsi_dev_info_list; /* head of dev info list */ | ||
30 | const char *name; /* name of list for /proc (NULL for global) */ | ||
31 | int key; /* unique numeric identifier */ | ||
32 | }; | ||
33 | |||
27 | 34 | ||
28 | static const char spaces[] = " "; /* 16 of them */ | 35 | static const char spaces[] = " "; /* 16 of them */ |
29 | static unsigned scsi_default_dev_flags; | 36 | static unsigned scsi_default_dev_flags; |
@@ -247,6 +254,22 @@ static struct { | |||
247 | { NULL, NULL, NULL, 0 }, | 254 | { NULL, NULL, NULL, 0 }, |
248 | }; | 255 | }; |
249 | 256 | ||
257 | static struct scsi_dev_info_list_table *scsi_devinfo_lookup_by_key(int key) | ||
258 | { | ||
259 | struct scsi_dev_info_list_table *devinfo_table; | ||
260 | int found = 0; | ||
261 | |||
262 | list_for_each_entry(devinfo_table, &scsi_dev_info_list, node) | ||
263 | if (devinfo_table->key == key) { | ||
264 | found = 1; | ||
265 | break; | ||
266 | } | ||
267 | if (!found) | ||
268 | return ERR_PTR(-EINVAL); | ||
269 | |||
270 | return devinfo_table; | ||
271 | } | ||
272 | |||
250 | /* | 273 | /* |
251 | * scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into | 274 | * scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into |
252 | * devinfo vendor and model strings. | 275 | * devinfo vendor and model strings. |
@@ -296,7 +319,38 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length, | |||
296 | static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, | 319 | static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, |
297 | char *strflags, int flags) | 320 | char *strflags, int flags) |
298 | { | 321 | { |
322 | return scsi_dev_info_list_add_keyed(compatible, vendor, model, | ||
323 | strflags, flags, | ||
324 | SCSI_DEVINFO_GLOBAL); | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * scsi_dev_info_list_add_keyed - add one dev_info list entry. | ||
329 | * @compatible: if true, null terminate short strings. Otherwise space pad. | ||
330 | * @vendor: vendor string | ||
331 | * @model: model (product) string | ||
332 | * @strflags: integer string | ||
333 | * @flags: if strflags NULL, use this flag value | ||
334 | * @key: specify list to use | ||
335 | * | ||
336 | * Description: | ||
337 | * Create and add one dev_info entry for @vendor, @model, | ||
338 | * @strflags or @flag in list specified by @key. If @compatible, | ||
339 | * add to the tail of the list, do not space pad, and set | ||
340 | * devinfo->compatible. The scsi_static_device_list entries are | ||
341 | * added with @compatible 1 and @clfags NULL. | ||
342 | * | ||
343 | * Returns: 0 OK, -error on failure. | ||
344 | **/ | ||
345 | int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, | ||
346 | char *strflags, int flags, int key) | ||
347 | { | ||
299 | struct scsi_dev_info_list *devinfo; | 348 | struct scsi_dev_info_list *devinfo; |
349 | struct scsi_dev_info_list_table *devinfo_table = | ||
350 | scsi_devinfo_lookup_by_key(key); | ||
351 | |||
352 | if (IS_ERR(devinfo_table)) | ||
353 | return PTR_ERR(devinfo_table); | ||
300 | 354 | ||
301 | devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); | 355 | devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); |
302 | if (!devinfo) { | 356 | if (!devinfo) { |
@@ -317,12 +371,15 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, | |||
317 | devinfo->compatible = compatible; | 371 | devinfo->compatible = compatible; |
318 | 372 | ||
319 | if (compatible) | 373 | if (compatible) |
320 | list_add_tail(&devinfo->dev_info_list, &scsi_dev_info_list); | 374 | list_add_tail(&devinfo->dev_info_list, |
375 | &devinfo_table->scsi_dev_info_list); | ||
321 | else | 376 | else |
322 | list_add(&devinfo->dev_info_list, &scsi_dev_info_list); | 377 | list_add(&devinfo->dev_info_list, |
378 | &devinfo_table->scsi_dev_info_list); | ||
323 | 379 | ||
324 | return 0; | 380 | return 0; |
325 | } | 381 | } |
382 | EXPORT_SYMBOL(scsi_dev_info_list_add_keyed); | ||
326 | 383 | ||
327 | /** | 384 | /** |
328 | * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. | 385 | * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. |
@@ -382,22 +439,48 @@ static int scsi_dev_info_list_add_str(char *dev_list) | |||
382 | * @model: model name | 439 | * @model: model name |
383 | * | 440 | * |
384 | * Description: | 441 | * Description: |
385 | * Search the scsi_dev_info_list for an entry matching @vendor and | 442 | * Search the global scsi_dev_info_list (specified by list zero) |
386 | * @model, if found, return the matching flags value, else return | 443 | * for an entry matching @vendor and @model, if found, return the |
387 | * the host or global default settings. Called during scan time. | 444 | * matching flags value, else return the host or global default |
445 | * settings. Called during scan time. | ||
388 | **/ | 446 | **/ |
389 | int scsi_get_device_flags(struct scsi_device *sdev, | 447 | int scsi_get_device_flags(struct scsi_device *sdev, |
390 | const unsigned char *vendor, | 448 | const unsigned char *vendor, |
391 | const unsigned char *model) | 449 | const unsigned char *model) |
392 | { | 450 | { |
451 | return scsi_get_device_flags_keyed(sdev, vendor, model, | ||
452 | SCSI_DEVINFO_GLOBAL); | ||
453 | } | ||
454 | |||
455 | |||
456 | /** | ||
457 | * get_device_flags_keyed - get device specific flags from the dynamic device list. | ||
458 | * @sdev: &scsi_device to get flags for | ||
459 | * @vendor: vendor name | ||
460 | * @model: model name | ||
461 | * @key: list to look up | ||
462 | * | ||
463 | * Description: | ||
464 | * Search the scsi_dev_info_list specified by @key for an entry | ||
465 | * matching @vendor and @model, if found, return the matching | ||
466 | * flags value, else return the host or global default settings. | ||
467 | * Called during scan time. | ||
468 | **/ | ||
469 | int scsi_get_device_flags_keyed(struct scsi_device *sdev, | ||
470 | const unsigned char *vendor, | ||
471 | const unsigned char *model, | ||
472 | int key) | ||
473 | { | ||
393 | struct scsi_dev_info_list *devinfo; | 474 | struct scsi_dev_info_list *devinfo; |
394 | unsigned int bflags; | 475 | struct scsi_dev_info_list_table *devinfo_table; |
476 | |||
477 | devinfo_table = scsi_devinfo_lookup_by_key(key); | ||
395 | 478 | ||
396 | bflags = sdev->sdev_bflags; | 479 | if (IS_ERR(devinfo_table)) |
397 | if (!bflags) | 480 | return PTR_ERR(devinfo_table); |
398 | bflags = scsi_default_dev_flags; | ||
399 | 481 | ||
400 | list_for_each_entry(devinfo, &scsi_dev_info_list, dev_info_list) { | 482 | list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list, |
483 | dev_info_list) { | ||
401 | if (devinfo->compatible) { | 484 | if (devinfo->compatible) { |
402 | /* | 485 | /* |
403 | * Behave like the older version of get_device_flags. | 486 | * Behave like the older version of get_device_flags. |
@@ -447,32 +530,89 @@ int scsi_get_device_flags(struct scsi_device *sdev, | |||
447 | return devinfo->flags; | 530 | return devinfo->flags; |
448 | } | 531 | } |
449 | } | 532 | } |
450 | return bflags; | 533 | /* nothing found, return nothing */ |
534 | if (key != SCSI_DEVINFO_GLOBAL) | ||
535 | return 0; | ||
536 | |||
537 | /* except for the global list, where we have an exception */ | ||
538 | if (sdev->sdev_bflags) | ||
539 | return sdev->sdev_bflags; | ||
540 | |||
541 | return scsi_default_dev_flags; | ||
451 | } | 542 | } |
543 | EXPORT_SYMBOL(scsi_get_device_flags_keyed); | ||
452 | 544 | ||
453 | #ifdef CONFIG_SCSI_PROC_FS | 545 | #ifdef CONFIG_SCSI_PROC_FS |
546 | struct double_list { | ||
547 | struct list_head *top; | ||
548 | struct list_head *bottom; | ||
549 | }; | ||
550 | |||
454 | static int devinfo_seq_show(struct seq_file *m, void *v) | 551 | static int devinfo_seq_show(struct seq_file *m, void *v) |
455 | { | 552 | { |
553 | struct double_list *dl = v; | ||
554 | struct scsi_dev_info_list_table *devinfo_table = | ||
555 | list_entry(dl->top, struct scsi_dev_info_list_table, node); | ||
456 | struct scsi_dev_info_list *devinfo = | 556 | struct scsi_dev_info_list *devinfo = |
457 | list_entry(v, struct scsi_dev_info_list, dev_info_list); | 557 | list_entry(dl->bottom, struct scsi_dev_info_list, |
558 | dev_info_list); | ||
559 | |||
560 | if (devinfo_table->scsi_dev_info_list.next == dl->bottom && | ||
561 | devinfo_table->name) | ||
562 | seq_printf(m, "[%s]:\n", devinfo_table->name); | ||
458 | 563 | ||
459 | seq_printf(m, "'%.8s' '%.16s' 0x%x\n", | 564 | seq_printf(m, "'%.8s' '%.16s' 0x%x\n", |
460 | devinfo->vendor, devinfo->model, devinfo->flags); | 565 | devinfo->vendor, devinfo->model, devinfo->flags); |
461 | return 0; | 566 | return 0; |
462 | } | 567 | } |
463 | 568 | ||
464 | static void * devinfo_seq_start(struct seq_file *m, loff_t *pos) | 569 | static void *devinfo_seq_start(struct seq_file *m, loff_t *ppos) |
465 | { | 570 | { |
466 | return seq_list_start(&scsi_dev_info_list, *pos); | 571 | struct double_list *dl = kmalloc(sizeof(*dl), GFP_KERNEL); |
572 | loff_t pos = *ppos; | ||
573 | |||
574 | if (!dl) | ||
575 | return NULL; | ||
576 | |||
577 | list_for_each(dl->top, &scsi_dev_info_list) { | ||
578 | struct scsi_dev_info_list_table *devinfo_table = | ||
579 | list_entry(dl->top, struct scsi_dev_info_list_table, | ||
580 | node); | ||
581 | list_for_each(dl->bottom, &devinfo_table->scsi_dev_info_list) | ||
582 | if (pos-- == 0) | ||
583 | return dl; | ||
584 | } | ||
585 | |||
586 | kfree(dl); | ||
587 | return NULL; | ||
467 | } | 588 | } |
468 | 589 | ||
469 | static void * devinfo_seq_next(struct seq_file *m, void *v, loff_t *pos) | 590 | static void *devinfo_seq_next(struct seq_file *m, void *v, loff_t *ppos) |
470 | { | 591 | { |
471 | return seq_list_next(v, &scsi_dev_info_list, pos); | 592 | struct double_list *dl = v; |
593 | struct scsi_dev_info_list_table *devinfo_table = | ||
594 | list_entry(dl->top, struct scsi_dev_info_list_table, node); | ||
595 | |||
596 | ++*ppos; | ||
597 | dl->bottom = dl->bottom->next; | ||
598 | while (&devinfo_table->scsi_dev_info_list == dl->bottom) { | ||
599 | dl->top = dl->top->next; | ||
600 | if (dl->top == &scsi_dev_info_list) { | ||
601 | kfree(dl); | ||
602 | return NULL; | ||
603 | } | ||
604 | devinfo_table = list_entry(dl->top, | ||
605 | struct scsi_dev_info_list_table, | ||
606 | node); | ||
607 | dl->bottom = devinfo_table->scsi_dev_info_list.next; | ||
608 | } | ||
609 | |||
610 | return dl; | ||
472 | } | 611 | } |
473 | 612 | ||
474 | static void devinfo_seq_stop(struct seq_file *m, void *v) | 613 | static void devinfo_seq_stop(struct seq_file *m, void *v) |
475 | { | 614 | { |
615 | kfree(v); | ||
476 | } | 616 | } |
477 | 617 | ||
478 | static const struct seq_operations scsi_devinfo_seq_ops = { | 618 | static const struct seq_operations scsi_devinfo_seq_ops = { |
@@ -549,19 +689,78 @@ MODULE_PARM_DESC(default_dev_flags, | |||
549 | **/ | 689 | **/ |
550 | void scsi_exit_devinfo(void) | 690 | void scsi_exit_devinfo(void) |
551 | { | 691 | { |
552 | struct list_head *lh, *lh_next; | ||
553 | struct scsi_dev_info_list *devinfo; | ||
554 | |||
555 | #ifdef CONFIG_SCSI_PROC_FS | 692 | #ifdef CONFIG_SCSI_PROC_FS |
556 | remove_proc_entry("scsi/device_info", NULL); | 693 | remove_proc_entry("scsi/device_info", NULL); |
557 | #endif | 694 | #endif |
558 | 695 | ||
559 | list_for_each_safe(lh, lh_next, &scsi_dev_info_list) { | 696 | scsi_dev_info_remove_list(SCSI_DEVINFO_GLOBAL); |
697 | } | ||
698 | |||
699 | /** | ||
700 | * scsi_dev_info_add_list - add a new devinfo list | ||
701 | * @key: key of the list to add | ||
702 | * @name: Name of the list to add (for /proc/scsi/device_info) | ||
703 | * | ||
704 | * Adds the requested list, returns zero on success, -EEXIST if the | ||
705 | * key is already registered to a list, or other error on failure. | ||
706 | */ | ||
707 | int scsi_dev_info_add_list(int key, const char *name) | ||
708 | { | ||
709 | struct scsi_dev_info_list_table *devinfo_table = | ||
710 | scsi_devinfo_lookup_by_key(key); | ||
711 | |||
712 | if (!IS_ERR(devinfo_table)) | ||
713 | /* list already exists */ | ||
714 | return -EEXIST; | ||
715 | |||
716 | devinfo_table = kmalloc(sizeof(*devinfo_table), GFP_KERNEL); | ||
717 | |||
718 | if (!devinfo_table) | ||
719 | return -ENOMEM; | ||
720 | |||
721 | INIT_LIST_HEAD(&devinfo_table->node); | ||
722 | INIT_LIST_HEAD(&devinfo_table->scsi_dev_info_list); | ||
723 | devinfo_table->name = name; | ||
724 | devinfo_table->key = key; | ||
725 | list_add_tail(&devinfo_table->node, &scsi_dev_info_list); | ||
726 | |||
727 | return 0; | ||
728 | } | ||
729 | EXPORT_SYMBOL(scsi_dev_info_add_list); | ||
730 | |||
731 | /** | ||
732 | * scsi_dev_info_remove_list - destroy an added devinfo list | ||
733 | * @key: key of the list to destroy | ||
734 | * | ||
735 | * Iterates over the entire list first, freeing all the values, then | ||
736 | * frees the list itself. Returns 0 on success or -EINVAL if the key | ||
737 | * can't be found. | ||
738 | */ | ||
739 | int scsi_dev_info_remove_list(int key) | ||
740 | { | ||
741 | struct list_head *lh, *lh_next; | ||
742 | struct scsi_dev_info_list_table *devinfo_table = | ||
743 | scsi_devinfo_lookup_by_key(key); | ||
744 | |||
745 | if (IS_ERR(devinfo_table)) | ||
746 | /* no such list */ | ||
747 | return -EINVAL; | ||
748 | |||
749 | /* remove from the master list */ | ||
750 | list_del(&devinfo_table->node); | ||
751 | |||
752 | list_for_each_safe(lh, lh_next, &devinfo_table->scsi_dev_info_list) { | ||
753 | struct scsi_dev_info_list *devinfo; | ||
754 | |||
560 | devinfo = list_entry(lh, struct scsi_dev_info_list, | 755 | devinfo = list_entry(lh, struct scsi_dev_info_list, |
561 | dev_info_list); | 756 | dev_info_list); |
562 | kfree(devinfo); | 757 | kfree(devinfo); |
563 | } | 758 | } |
759 | kfree(devinfo_table); | ||
760 | |||
761 | return 0; | ||
564 | } | 762 | } |
763 | EXPORT_SYMBOL(scsi_dev_info_remove_list); | ||
565 | 764 | ||
566 | /** | 765 | /** |
567 | * scsi_init_devinfo - set up the dynamic device list. | 766 | * scsi_init_devinfo - set up the dynamic device list. |
@@ -577,10 +776,14 @@ int __init scsi_init_devinfo(void) | |||
577 | #endif | 776 | #endif |
578 | int error, i; | 777 | int error, i; |
579 | 778 | ||
580 | error = scsi_dev_info_list_add_str(scsi_dev_flags); | 779 | error = scsi_dev_info_add_list(SCSI_DEVINFO_GLOBAL, NULL); |
581 | if (error) | 780 | if (error) |
582 | return error; | 781 | return error; |
583 | 782 | ||
783 | error = scsi_dev_info_list_add_str(scsi_dev_flags); | ||
784 | if (error) | ||
785 | goto out; | ||
786 | |||
584 | for (i = 0; scsi_static_device_list[i].vendor; i++) { | 787 | for (i = 0; scsi_static_device_list[i].vendor; i++) { |
585 | error = scsi_dev_info_list_add(1 /* compatibile */, | 788 | error = scsi_dev_info_list_add(1 /* compatibile */, |
586 | scsi_static_device_list[i].vendor, | 789 | scsi_static_device_list[i].vendor, |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 30f3275e119e..f3c40898fc7d 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1207,6 +1207,7 @@ int scsi_prep_fn(struct request_queue *q, struct request *req) | |||
1207 | ret = scsi_setup_blk_pc_cmnd(sdev, req); | 1207 | ret = scsi_setup_blk_pc_cmnd(sdev, req); |
1208 | return scsi_prep_return(q, req, ret); | 1208 | return scsi_prep_return(q, req, ret); |
1209 | } | 1209 | } |
1210 | EXPORT_SYMBOL(scsi_prep_fn); | ||
1210 | 1211 | ||
1211 | /* | 1212 | /* |
1212 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else | 1213 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else |
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index fbc83bebdd8e..021e503c8c44 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h | |||
@@ -39,9 +39,25 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) | |||
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | /* scsi_devinfo.c */ | 41 | /* scsi_devinfo.c */ |
42 | |||
43 | /* list of keys for the lists */ | ||
44 | enum { | ||
45 | SCSI_DEVINFO_GLOBAL = 0, | ||
46 | SCSI_DEVINFO_SPI, | ||
47 | }; | ||
48 | |||
42 | extern int scsi_get_device_flags(struct scsi_device *sdev, | 49 | extern int scsi_get_device_flags(struct scsi_device *sdev, |
43 | const unsigned char *vendor, | 50 | const unsigned char *vendor, |
44 | const unsigned char *model); | 51 | const unsigned char *model); |
52 | extern int scsi_get_device_flags_keyed(struct scsi_device *sdev, | ||
53 | const unsigned char *vendor, | ||
54 | const unsigned char *model, int key); | ||
55 | extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor, | ||
56 | char *model, char *strflags, | ||
57 | int flags, int key); | ||
58 | extern int scsi_dev_info_add_list(int key, const char *name); | ||
59 | extern int scsi_dev_info_remove_list(int key); | ||
60 | |||
45 | extern int __init scsi_init_devinfo(void); | 61 | extern int __init scsi_init_devinfo(void); |
46 | extern void scsi_exit_devinfo(void); | 62 | extern void scsi_exit_devinfo(void); |
47 | 63 | ||
@@ -71,7 +87,6 @@ extern int scsi_init_queue(void); | |||
71 | extern void scsi_exit_queue(void); | 87 | extern void scsi_exit_queue(void); |
72 | struct request_queue; | 88 | struct request_queue; |
73 | struct request; | 89 | struct request; |
74 | extern int scsi_prep_fn(struct request_queue *, struct request *); | ||
75 | extern struct kmem_cache *scsi_sdb_cache; | 90 | extern struct kmem_cache *scsi_sdb_cache; |
76 | 91 | ||
77 | /* scsi_proc.c */ | 92 | /* scsi_proc.c */ |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index fa4711d12744..91482f2dcc50 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -420,29 +420,12 @@ static int scsi_bus_resume(struct device * dev) | |||
420 | return err; | 420 | return err; |
421 | } | 421 | } |
422 | 422 | ||
423 | static int scsi_bus_remove(struct device *dev) | ||
424 | { | ||
425 | struct device_driver *drv = dev->driver; | ||
426 | struct scsi_device *sdev = to_scsi_device(dev); | ||
427 | int err = 0; | ||
428 | |||
429 | /* reset the prep_fn back to the default since the | ||
430 | * driver may have altered it and it's being removed */ | ||
431 | blk_queue_prep_rq(sdev->request_queue, scsi_prep_fn); | ||
432 | |||
433 | if (drv && drv->remove) | ||
434 | err = drv->remove(dev); | ||
435 | |||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | struct bus_type scsi_bus_type = { | 423 | struct bus_type scsi_bus_type = { |
440 | .name = "scsi", | 424 | .name = "scsi", |
441 | .match = scsi_bus_match, | 425 | .match = scsi_bus_match, |
442 | .uevent = scsi_bus_uevent, | 426 | .uevent = scsi_bus_uevent, |
443 | .suspend = scsi_bus_suspend, | 427 | .suspend = scsi_bus_suspend, |
444 | .resume = scsi_bus_resume, | 428 | .resume = scsi_bus_resume, |
445 | .remove = scsi_bus_remove, | ||
446 | }; | 429 | }; |
447 | EXPORT_SYMBOL_GPL(scsi_bus_type); | 430 | EXPORT_SYMBOL_GPL(scsi_bus_type); |
448 | 431 | ||
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 3f64d93b6c8b..2eee9e6e4fe8 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -3397,7 +3397,6 @@ fc_destroy_bsgjob(struct fc_bsg_job *job) | |||
3397 | kfree(job); | 3397 | kfree(job); |
3398 | } | 3398 | } |
3399 | 3399 | ||
3400 | |||
3401 | /** | 3400 | /** |
3402 | * fc_bsg_jobdone - completion routine for bsg requests that the LLD has | 3401 | * fc_bsg_jobdone - completion routine for bsg requests that the LLD has |
3403 | * completed | 3402 | * completed |
@@ -3408,15 +3407,10 @@ fc_bsg_jobdone(struct fc_bsg_job *job) | |||
3408 | { | 3407 | { |
3409 | struct request *req = job->req; | 3408 | struct request *req = job->req; |
3410 | struct request *rsp = req->next_rq; | 3409 | struct request *rsp = req->next_rq; |
3411 | unsigned long flags; | ||
3412 | int err; | 3410 | int err; |
3413 | 3411 | ||
3414 | spin_lock_irqsave(&job->job_lock, flags); | ||
3415 | job->state_flags |= FC_RQST_STATE_DONE; | ||
3416 | job->ref_cnt--; | ||
3417 | spin_unlock_irqrestore(&job->job_lock, flags); | ||
3418 | |||
3419 | err = job->req->errors = job->reply->result; | 3412 | err = job->req->errors = job->reply->result; |
3413 | |||
3420 | if (err < 0) | 3414 | if (err < 0) |
3421 | /* we're only returning the result field in the reply */ | 3415 | /* we're only returning the result field in the reply */ |
3422 | job->req->sense_len = sizeof(uint32_t); | 3416 | job->req->sense_len = sizeof(uint32_t); |
@@ -3433,13 +3427,27 @@ fc_bsg_jobdone(struct fc_bsg_job *job) | |||
3433 | rsp->resid_len -= min(job->reply->reply_payload_rcv_len, | 3427 | rsp->resid_len -= min(job->reply->reply_payload_rcv_len, |
3434 | rsp->resid_len); | 3428 | rsp->resid_len); |
3435 | } | 3429 | } |
3430 | blk_complete_request(req); | ||
3431 | } | ||
3436 | 3432 | ||
3437 | blk_end_request_all(req, err); | 3433 | /** |
3434 | * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests | ||
3435 | * @req: BSG request that holds the job to be destroyed | ||
3436 | */ | ||
3437 | static void fc_bsg_softirq_done(struct request *rq) | ||
3438 | { | ||
3439 | struct fc_bsg_job *job = rq->special; | ||
3440 | unsigned long flags; | ||
3438 | 3441 | ||
3442 | spin_lock_irqsave(&job->job_lock, flags); | ||
3443 | job->state_flags |= FC_RQST_STATE_DONE; | ||
3444 | job->ref_cnt--; | ||
3445 | spin_unlock_irqrestore(&job->job_lock, flags); | ||
3446 | |||
3447 | blk_end_request_all(rq, rq->errors); | ||
3439 | fc_destroy_bsgjob(job); | 3448 | fc_destroy_bsgjob(job); |
3440 | } | 3449 | } |
3441 | 3450 | ||
3442 | |||
3443 | /** | 3451 | /** |
3444 | * fc_bsg_job_timeout - handler for when a bsg request timesout | 3452 | * fc_bsg_job_timeout - handler for when a bsg request timesout |
3445 | * @req: request that timed out | 3453 | * @req: request that timed out |
@@ -3471,19 +3479,13 @@ fc_bsg_job_timeout(struct request *req) | |||
3471 | "abort failed with status %d\n", err); | 3479 | "abort failed with status %d\n", err); |
3472 | } | 3480 | } |
3473 | 3481 | ||
3474 | if (!done) { | ||
3475 | spin_lock_irqsave(&job->job_lock, flags); | ||
3476 | job->ref_cnt--; | ||
3477 | spin_unlock_irqrestore(&job->job_lock, flags); | ||
3478 | fc_destroy_bsgjob(job); | ||
3479 | } | ||
3480 | |||
3481 | /* the blk_end_sync_io() doesn't check the error */ | 3482 | /* the blk_end_sync_io() doesn't check the error */ |
3482 | return BLK_EH_HANDLED; | 3483 | if (done) |
3484 | return BLK_EH_NOT_HANDLED; | ||
3485 | else | ||
3486 | return BLK_EH_HANDLED; | ||
3483 | } | 3487 | } |
3484 | 3488 | ||
3485 | |||
3486 | |||
3487 | static int | 3489 | static int |
3488 | fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req) | 3490 | fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req) |
3489 | { | 3491 | { |
@@ -3859,7 +3861,7 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) | |||
3859 | struct fc_internal *i = to_fc_internal(shost->transportt); | 3861 | struct fc_internal *i = to_fc_internal(shost->transportt); |
3860 | struct request_queue *q; | 3862 | struct request_queue *q; |
3861 | int err; | 3863 | int err; |
3862 | char bsg_name[BUS_ID_SIZE]; /*20*/ | 3864 | char bsg_name[20]; |
3863 | 3865 | ||
3864 | fc_host->rqst_q = NULL; | 3866 | fc_host->rqst_q = NULL; |
3865 | 3867 | ||
@@ -3879,6 +3881,7 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) | |||
3879 | 3881 | ||
3880 | q->queuedata = shost; | 3882 | q->queuedata = shost; |
3881 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); | 3883 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); |
3884 | blk_queue_softirq_done(q, fc_bsg_softirq_done); | ||
3882 | blk_queue_rq_timed_out(q, fc_bsg_job_timeout); | 3885 | blk_queue_rq_timed_out(q, fc_bsg_job_timeout); |
3883 | blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); | 3886 | blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); |
3884 | 3887 | ||
@@ -3924,6 +3927,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) | |||
3924 | 3927 | ||
3925 | q->queuedata = rport; | 3928 | q->queuedata = rport; |
3926 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); | 3929 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); |
3930 | blk_queue_softirq_done(q, fc_bsg_softirq_done); | ||
3927 | blk_queue_rq_timed_out(q, fc_bsg_job_timeout); | 3931 | blk_queue_rq_timed_out(q, fc_bsg_job_timeout); |
3928 | blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); | 3932 | blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); |
3929 | 3933 | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index f3e664628d7a..783e33c65eb7 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -692,6 +692,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) | |||
692 | "Too many iscsi targets. Max " | 692 | "Too many iscsi targets. Max " |
693 | "number of targets is %d.\n", | 693 | "number of targets is %d.\n", |
694 | ISCSI_MAX_TARGET - 1); | 694 | ISCSI_MAX_TARGET - 1); |
695 | err = -EOVERFLOW; | ||
695 | goto release_host; | 696 | goto release_host; |
696 | } | 697 | } |
697 | } | 698 | } |
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index d606452297cf..0895d3c71b03 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
@@ -173,9 +173,9 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost, | |||
173 | ret = handler(shost, rphy, req); | 173 | ret = handler(shost, rphy, req); |
174 | req->errors = ret; | 174 | req->errors = ret; |
175 | 175 | ||
176 | spin_lock_irq(q->queue_lock); | 176 | blk_end_request_all(req, ret); |
177 | 177 | ||
178 | req->end_io(req, ret); | 178 | spin_lock_irq(q->queue_lock); |
179 | } | 179 | } |
180 | } | 180 | } |
181 | 181 | ||
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 654a34fb04cb..c25bd9a34e02 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c | |||
@@ -46,6 +46,22 @@ | |||
46 | #define DV_RETRIES 3 /* should only need at most | 46 | #define DV_RETRIES 3 /* should only need at most |
47 | * two cc/ua clears */ | 47 | * two cc/ua clears */ |
48 | 48 | ||
49 | /* Our blacklist flags */ | ||
50 | enum { | ||
51 | SPI_BLIST_NOIUS = 0x1, | ||
52 | }; | ||
53 | |||
54 | /* blacklist table, modelled on scsi_devinfo.c */ | ||
55 | static struct { | ||
56 | char *vendor; | ||
57 | char *model; | ||
58 | unsigned flags; | ||
59 | } spi_static_device_list[] __initdata = { | ||
60 | {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS }, | ||
61 | {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS }, | ||
62 | {NULL, NULL, 0} | ||
63 | }; | ||
64 | |||
49 | /* Private data accessors (keep these out of the header file) */ | 65 | /* Private data accessors (keep these out of the header file) */ |
50 | #define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) | 66 | #define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) |
51 | #define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) | 67 | #define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) |
@@ -207,6 +223,9 @@ static int spi_device_configure(struct transport_container *tc, | |||
207 | { | 223 | { |
208 | struct scsi_device *sdev = to_scsi_device(dev); | 224 | struct scsi_device *sdev = to_scsi_device(dev); |
209 | struct scsi_target *starget = sdev->sdev_target; | 225 | struct scsi_target *starget = sdev->sdev_target; |
226 | unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8], | ||
227 | &sdev->inquiry[16], | ||
228 | SCSI_DEVINFO_SPI); | ||
210 | 229 | ||
211 | /* Populate the target capability fields with the values | 230 | /* Populate the target capability fields with the values |
212 | * gleaned from the device inquiry */ | 231 | * gleaned from the device inquiry */ |
@@ -216,6 +235,10 @@ static int spi_device_configure(struct transport_container *tc, | |||
216 | spi_support_dt(starget) = scsi_device_dt(sdev); | 235 | spi_support_dt(starget) = scsi_device_dt(sdev); |
217 | spi_support_dt_only(starget) = scsi_device_dt_only(sdev); | 236 | spi_support_dt_only(starget) = scsi_device_dt_only(sdev); |
218 | spi_support_ius(starget) = scsi_device_ius(sdev); | 237 | spi_support_ius(starget) = scsi_device_ius(sdev); |
238 | if (bflags & SPI_BLIST_NOIUS) { | ||
239 | dev_info(dev, "Information Units disabled by blacklist\n"); | ||
240 | spi_support_ius(starget) = 0; | ||
241 | } | ||
219 | spi_support_qas(starget) = scsi_device_qas(sdev); | 242 | spi_support_qas(starget) = scsi_device_qas(sdev); |
220 | 243 | ||
221 | return 0; | 244 | return 0; |
@@ -833,7 +856,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
833 | return; | 856 | return; |
834 | } | 857 | } |
835 | 858 | ||
836 | if (!scsi_device_wide(sdev)) { | 859 | if (!spi_support_wide(starget)) { |
837 | spi_max_width(starget) = 0; | 860 | spi_max_width(starget) = 0; |
838 | max_width = 0; | 861 | max_width = 0; |
839 | } | 862 | } |
@@ -860,7 +883,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
860 | return; | 883 | return; |
861 | 884 | ||
862 | /* device can't handle synchronous */ | 885 | /* device can't handle synchronous */ |
863 | if (!scsi_device_sync(sdev) && !scsi_device_dt(sdev)) | 886 | if (!spi_support_sync(starget) && !spi_support_dt(starget)) |
864 | return; | 887 | return; |
865 | 888 | ||
866 | /* len == -1 is the signal that we need to ascertain the | 889 | /* len == -1 is the signal that we need to ascertain the |
@@ -876,13 +899,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
876 | 899 | ||
877 | /* try QAS requests; this should be harmless to set if the | 900 | /* try QAS requests; this should be harmless to set if the |
878 | * target supports it */ | 901 | * target supports it */ |
879 | if (scsi_device_qas(sdev) && spi_max_qas(starget)) { | 902 | if (spi_support_qas(starget) && spi_max_qas(starget)) { |
880 | DV_SET(qas, 1); | 903 | DV_SET(qas, 1); |
881 | } else { | 904 | } else { |
882 | DV_SET(qas, 0); | 905 | DV_SET(qas, 0); |
883 | } | 906 | } |
884 | 907 | ||
885 | if (scsi_device_ius(sdev) && spi_max_iu(starget) && min_period < 9) { | 908 | if (spi_support_ius(starget) && spi_max_iu(starget) && |
909 | min_period < 9) { | ||
886 | /* This u320 (or u640). Set IU transfers */ | 910 | /* This u320 (or u640). Set IU transfers */ |
887 | DV_SET(iu, 1); | 911 | DV_SET(iu, 1); |
888 | /* Then set the optional parameters */ | 912 | /* Then set the optional parameters */ |
@@ -902,7 +926,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) | |||
902 | i->f->get_signalling(shost); | 926 | i->f->get_signalling(shost); |
903 | if (spi_signalling(shost) == SPI_SIGNAL_SE || | 927 | if (spi_signalling(shost) == SPI_SIGNAL_SE || |
904 | spi_signalling(shost) == SPI_SIGNAL_HVD || | 928 | spi_signalling(shost) == SPI_SIGNAL_HVD || |
905 | !scsi_device_dt(sdev)) { | 929 | !spi_support_dt(starget)) { |
906 | DV_SET(dt, 0); | 930 | DV_SET(dt, 0); |
907 | } else { | 931 | } else { |
908 | DV_SET(dt, 1); | 932 | DV_SET(dt, 1); |
@@ -1523,7 +1547,21 @@ EXPORT_SYMBOL(spi_release_transport); | |||
1523 | 1547 | ||
1524 | static __init int spi_transport_init(void) | 1548 | static __init int spi_transport_init(void) |
1525 | { | 1549 | { |
1526 | int error = transport_class_register(&spi_transport_class); | 1550 | int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI, |
1551 | "SCSI Parallel Transport Class"); | ||
1552 | if (!error) { | ||
1553 | int i; | ||
1554 | |||
1555 | for (i = 0; spi_static_device_list[i].vendor; i++) | ||
1556 | scsi_dev_info_list_add_keyed(1, /* compatible */ | ||
1557 | spi_static_device_list[i].vendor, | ||
1558 | spi_static_device_list[i].model, | ||
1559 | NULL, | ||
1560 | spi_static_device_list[i].flags, | ||
1561 | SCSI_DEVINFO_SPI); | ||
1562 | } | ||
1563 | |||
1564 | error = transport_class_register(&spi_transport_class); | ||
1527 | if (error) | 1565 | if (error) |
1528 | return error; | 1566 | return error; |
1529 | error = anon_transport_class_register(&spi_device_class); | 1567 | error = anon_transport_class_register(&spi_device_class); |
@@ -1535,6 +1573,7 @@ static void __exit spi_transport_exit(void) | |||
1535 | transport_class_unregister(&spi_transport_class); | 1573 | transport_class_unregister(&spi_transport_class); |
1536 | anon_transport_class_unregister(&spi_device_class); | 1574 | anon_transport_class_unregister(&spi_device_class); |
1537 | transport_class_unregister(&spi_host_class); | 1575 | transport_class_unregister(&spi_host_class); |
1576 | scsi_dev_info_remove_list(SCSI_DEVINFO_SPI); | ||
1538 | } | 1577 | } |
1539 | 1578 | ||
1540 | MODULE_AUTHOR("Martin Hicks"); | 1579 | MODULE_AUTHOR("Martin Hicks"); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 878b17a9af30..5616cd780ff3 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -1307,6 +1307,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, | |||
1307 | int sense_valid = 0; | 1307 | int sense_valid = 0; |
1308 | int the_result; | 1308 | int the_result; |
1309 | int retries = 3; | 1309 | int retries = 3; |
1310 | unsigned int alignment; | ||
1310 | unsigned long long lba; | 1311 | unsigned long long lba; |
1311 | unsigned sector_size; | 1312 | unsigned sector_size; |
1312 | 1313 | ||
@@ -1358,6 +1359,16 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, | |||
1358 | return -EOVERFLOW; | 1359 | return -EOVERFLOW; |
1359 | } | 1360 | } |
1360 | 1361 | ||
1362 | /* Logical blocks per physical block exponent */ | ||
1363 | sdkp->hw_sector_size = (1 << (buffer[13] & 0xf)) * sector_size; | ||
1364 | |||
1365 | /* Lowest aligned logical block */ | ||
1366 | alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; | ||
1367 | blk_queue_alignment_offset(sdp->request_queue, alignment); | ||
1368 | if (alignment && sdkp->first_scan) | ||
1369 | sd_printk(KERN_NOTICE, sdkp, | ||
1370 | "physical block alignment offset: %u\n", alignment); | ||
1371 | |||
1361 | sdkp->capacity = lba + 1; | 1372 | sdkp->capacity = lba + 1; |
1362 | return sector_size; | 1373 | return sector_size; |
1363 | } | 1374 | } |
@@ -1409,6 +1420,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, | |||
1409 | } | 1420 | } |
1410 | 1421 | ||
1411 | sdkp->capacity = lba + 1; | 1422 | sdkp->capacity = lba + 1; |
1423 | sdkp->hw_sector_size = sector_size; | ||
1412 | return sector_size; | 1424 | return sector_size; |
1413 | } | 1425 | } |
1414 | 1426 | ||
@@ -1521,11 +1533,17 @@ got_data: | |||
1521 | string_get_size(sz, STRING_UNITS_10, cap_str_10, | 1533 | string_get_size(sz, STRING_UNITS_10, cap_str_10, |
1522 | sizeof(cap_str_10)); | 1534 | sizeof(cap_str_10)); |
1523 | 1535 | ||
1524 | if (sdkp->first_scan || old_capacity != sdkp->capacity) | 1536 | if (sdkp->first_scan || old_capacity != sdkp->capacity) { |
1525 | sd_printk(KERN_NOTICE, sdkp, | 1537 | sd_printk(KERN_NOTICE, sdkp, |
1526 | "%llu %d-byte hardware sectors: (%s/%s)\n", | 1538 | "%llu %d-byte logical blocks: (%s/%s)\n", |
1527 | (unsigned long long)sdkp->capacity, | 1539 | (unsigned long long)sdkp->capacity, |
1528 | sector_size, cap_str_10, cap_str_2); | 1540 | sector_size, cap_str_10, cap_str_2); |
1541 | |||
1542 | if (sdkp->hw_sector_size != sector_size) | ||
1543 | sd_printk(KERN_NOTICE, sdkp, | ||
1544 | "%u-byte physical blocks\n", | ||
1545 | sdkp->hw_sector_size); | ||
1546 | } | ||
1529 | } | 1547 | } |
1530 | 1548 | ||
1531 | /* Rescale capacity to 512-byte units */ | 1549 | /* Rescale capacity to 512-byte units */ |
@@ -1538,6 +1556,7 @@ got_data: | |||
1538 | else if (sector_size == 256) | 1556 | else if (sector_size == 256) |
1539 | sdkp->capacity >>= 1; | 1557 | sdkp->capacity >>= 1; |
1540 | 1558 | ||
1559 | blk_queue_physical_block_size(sdp->request_queue, sdkp->hw_sector_size); | ||
1541 | sdkp->device->sector_size = sector_size; | 1560 | sdkp->device->sector_size = sector_size; |
1542 | } | 1561 | } |
1543 | 1562 | ||
@@ -1776,6 +1795,52 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) | |||
1776 | } | 1795 | } |
1777 | 1796 | ||
1778 | /** | 1797 | /** |
1798 | * sd_read_block_limits - Query disk device for preferred I/O sizes. | ||
1799 | * @disk: disk to query | ||
1800 | */ | ||
1801 | static void sd_read_block_limits(struct scsi_disk *sdkp) | ||
1802 | { | ||
1803 | unsigned int sector_sz = sdkp->device->sector_size; | ||
1804 | char *buffer; | ||
1805 | |||
1806 | /* Block Limits VPD */ | ||
1807 | buffer = scsi_get_vpd_page(sdkp->device, 0xb0); | ||
1808 | |||
1809 | if (buffer == NULL) | ||
1810 | return; | ||
1811 | |||
1812 | blk_queue_io_min(sdkp->disk->queue, | ||
1813 | get_unaligned_be16(&buffer[6]) * sector_sz); | ||
1814 | blk_queue_io_opt(sdkp->disk->queue, | ||
1815 | get_unaligned_be32(&buffer[12]) * sector_sz); | ||
1816 | |||
1817 | kfree(buffer); | ||
1818 | } | ||
1819 | |||
1820 | /** | ||
1821 | * sd_read_block_characteristics - Query block dev. characteristics | ||
1822 | * @disk: disk to query | ||
1823 | */ | ||
1824 | static void sd_read_block_characteristics(struct scsi_disk *sdkp) | ||
1825 | { | ||
1826 | char *buffer; | ||
1827 | u16 rot; | ||
1828 | |||
1829 | /* Block Device Characteristics VPD */ | ||
1830 | buffer = scsi_get_vpd_page(sdkp->device, 0xb1); | ||
1831 | |||
1832 | if (buffer == NULL) | ||
1833 | return; | ||
1834 | |||
1835 | rot = get_unaligned_be16(&buffer[4]); | ||
1836 | |||
1837 | if (rot == 1) | ||
1838 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue); | ||
1839 | |||
1840 | kfree(buffer); | ||
1841 | } | ||
1842 | |||
1843 | /** | ||
1779 | * sd_revalidate_disk - called the first time a new disk is seen, | 1844 | * sd_revalidate_disk - called the first time a new disk is seen, |
1780 | * performs disk spin up, read_capacity, etc. | 1845 | * performs disk spin up, read_capacity, etc. |
1781 | * @disk: struct gendisk we care about | 1846 | * @disk: struct gendisk we care about |
@@ -1812,6 +1877,8 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
1812 | */ | 1877 | */ |
1813 | if (sdkp->media_present) { | 1878 | if (sdkp->media_present) { |
1814 | sd_read_capacity(sdkp, buffer); | 1879 | sd_read_capacity(sdkp, buffer); |
1880 | sd_read_block_limits(sdkp); | ||
1881 | sd_read_block_characteristics(sdkp); | ||
1815 | sd_read_write_protect_flag(sdkp, buffer); | 1882 | sd_read_write_protect_flag(sdkp, buffer); |
1816 | sd_read_cache_type(sdkp, buffer); | 1883 | sd_read_cache_type(sdkp, buffer); |
1817 | sd_read_app_tag_own(sdkp, buffer); | 1884 | sd_read_app_tag_own(sdkp, buffer); |
@@ -1934,6 +2001,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie) | |||
1934 | add_disk(gd); | 2001 | add_disk(gd); |
1935 | sd_dif_config_host(sdkp); | 2002 | sd_dif_config_host(sdkp); |
1936 | 2003 | ||
2004 | sd_revalidate_disk(gd); | ||
2005 | |||
1937 | sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", | 2006 | sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", |
1938 | sdp->removable ? "removable " : ""); | 2007 | sdp->removable ? "removable " : ""); |
1939 | } | 2008 | } |
@@ -2054,6 +2123,7 @@ static int sd_remove(struct device *dev) | |||
2054 | 2123 | ||
2055 | async_synchronize_full(); | 2124 | async_synchronize_full(); |
2056 | sdkp = dev_get_drvdata(dev); | 2125 | sdkp = dev_get_drvdata(dev); |
2126 | blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn); | ||
2057 | device_del(&sdkp->dev); | 2127 | device_del(&sdkp->dev); |
2058 | del_gendisk(sdkp->disk); | 2128 | del_gendisk(sdkp->disk); |
2059 | sd_shutdown(dev); | 2129 | sd_shutdown(dev); |
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 708778cf5f06..8474b5bad3fe 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h | |||
@@ -45,6 +45,7 @@ struct scsi_disk { | |||
45 | unsigned int openers; /* protected by BKL for now, yuck */ | 45 | unsigned int openers; /* protected by BKL for now, yuck */ |
46 | sector_t capacity; /* size in 512-byte sectors */ | 46 | sector_t capacity; /* size in 512-byte sectors */ |
47 | u32 index; | 47 | u32 index; |
48 | unsigned short hw_sector_size; | ||
48 | u8 media_present; | 49 | u8 media_present; |
49 | u8 write_prot; | 50 | u8 write_prot; |
50 | u8 protection_type;/* Data Integrity Field */ | 51 | u8 protection_type;/* Data Integrity Field */ |
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index cd350dfc1216..cce0fe4c8a3b 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c | |||
@@ -881,6 +881,7 @@ static int sr_remove(struct device *dev) | |||
881 | { | 881 | { |
882 | struct scsi_cd *cd = dev_get_drvdata(dev); | 882 | struct scsi_cd *cd = dev_get_drvdata(dev); |
883 | 883 | ||
884 | blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn); | ||
884 | del_gendisk(cd->disk); | 885 | del_gendisk(cd->disk); |
885 | 886 | ||
886 | mutex_lock(&sr_ref_mutex); | 887 | mutex_lock(&sr_ref_mutex); |
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 69ad4945c936..297deb817a5d 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c | |||
@@ -2321,8 +2321,9 @@ static void sym_int_par (struct sym_hcb *np, u_short sist) | |||
2321 | int phase = cmd & 7; | 2321 | int phase = cmd & 7; |
2322 | struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); | 2322 | struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); |
2323 | 2323 | ||
2324 | printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", | 2324 | if (printk_ratelimit()) |
2325 | sym_name(np), hsts, dbc, sbcl); | 2325 | printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", |
2326 | sym_name(np), hsts, dbc, sbcl); | ||
2326 | 2327 | ||
2327 | /* | 2328 | /* |
2328 | * Check that the chip is connected to the SCSI BUS. | 2329 | * Check that the chip is connected to the SCSI BUS. |
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index e371a9c15341..a07015d646dd 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c | |||
@@ -398,8 +398,7 @@ static int sbs_init(struct pci_dev *dev) | |||
398 | { | 398 | { |
399 | u8 __iomem *p; | 399 | u8 __iomem *p; |
400 | 400 | ||
401 | p = ioremap_nocache(pci_resource_start(dev, 0), | 401 | p = pci_ioremap_bar(dev, 0); |
402 | pci_resource_len(dev, 0)); | ||
403 | 402 | ||
404 | if (p == NULL) | 403 | if (p == NULL) |
405 | return -ENOMEM; | 404 | return -ENOMEM; |
@@ -423,8 +422,7 @@ static void __devexit sbs_exit(struct pci_dev *dev) | |||
423 | { | 422 | { |
424 | u8 __iomem *p; | 423 | u8 __iomem *p; |
425 | 424 | ||
426 | p = ioremap_nocache(pci_resource_start(dev, 0), | 425 | p = pci_ioremap_bar(dev, 0); |
427 | pci_resource_len(dev, 0)); | ||
428 | /* FIXME: What if resource_len < OCT_REG_CR_OFF */ | 426 | /* FIXME: What if resource_len < OCT_REG_CR_OFF */ |
429 | if (p != NULL) | 427 | if (p != NULL) |
430 | writeb(0, p + OCT_REG_CR_OFF); | 428 | writeb(0, p + OCT_REG_CR_OFF); |
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c index 9f2891c2c4a2..cd1b6a45bb82 100644 --- a/drivers/serial/icom.c +++ b/drivers/serial/icom.c | |||
@@ -1548,8 +1548,7 @@ static int __devinit icom_probe(struct pci_dev *dev, | |||
1548 | goto probe_exit1; | 1548 | goto probe_exit1; |
1549 | } | 1549 | } |
1550 | 1550 | ||
1551 | icom_adapter->base_addr = ioremap(icom_adapter->base_addr_pci, | 1551 | icom_adapter->base_addr = pci_ioremap_bar(dev, 0); |
1552 | pci_resource_len(dev, 0)); | ||
1553 | 1552 | ||
1554 | if (!icom_adapter->base_addr) | 1553 | if (!icom_adapter->base_addr) |
1555 | goto probe_exit1; | 1554 | goto probe_exit1; |
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c index 107ce2e187b8..00f4577d2f7f 100644 --- a/drivers/serial/jsm/jsm_tty.c +++ b/drivers/serial/jsm/jsm_tty.c | |||
@@ -467,7 +467,7 @@ int __devinit jsm_uart_port_init(struct jsm_board *brd) | |||
467 | printk(KERN_INFO "jsm: linemap is full, added device failed\n"); | 467 | printk(KERN_INFO "jsm: linemap is full, added device failed\n"); |
468 | continue; | 468 | continue; |
469 | } else | 469 | } else |
470 | set_bit((int)line, linemap); | 470 | set_bit(line, linemap); |
471 | brd->channels[i]->uart_port.line = line; | 471 | brd->channels[i]->uart_port.line = line; |
472 | if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port)) | 472 | if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port)) |
473 | printk(KERN_INFO "jsm: add device failed\n"); | 473 | printk(KERN_INFO "jsm: add device failed\n"); |
@@ -503,7 +503,7 @@ int jsm_remove_uart_port(struct jsm_board *brd) | |||
503 | 503 | ||
504 | ch = brd->channels[i]; | 504 | ch = brd->channels[i]; |
505 | 505 | ||
506 | clear_bit((int)(ch->uart_port.line), linemap); | 506 | clear_bit(ch->uart_port.line, linemap); |
507 | uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port); | 507 | uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port); |
508 | } | 508 | } |
509 | 509 | ||
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c index 7313c2edcb83..54dd16d66a4b 100644 --- a/drivers/serial/serial_txx9.c +++ b/drivers/serial/serial_txx9.c | |||
@@ -461,6 +461,94 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state) | |||
461 | spin_unlock_irqrestore(&up->port.lock, flags); | 461 | spin_unlock_irqrestore(&up->port.lock, flags); |
462 | } | 462 | } |
463 | 463 | ||
464 | #if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL) | ||
465 | /* | ||
466 | * Wait for transmitter & holding register to empty | ||
467 | */ | ||
468 | static void wait_for_xmitr(struct uart_txx9_port *up) | ||
469 | { | ||
470 | unsigned int tmout = 10000; | ||
471 | |||
472 | /* Wait up to 10ms for the character(s) to be sent. */ | ||
473 | while (--tmout && | ||
474 | !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS)) | ||
475 | udelay(1); | ||
476 | |||
477 | /* Wait up to 1s for flow control if necessary */ | ||
478 | if (up->port.flags & UPF_CONS_FLOW) { | ||
479 | tmout = 1000000; | ||
480 | while (--tmout && | ||
481 | (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS)) | ||
482 | udelay(1); | ||
483 | } | ||
484 | } | ||
485 | #endif | ||
486 | |||
487 | #ifdef CONFIG_CONSOLE_POLL | ||
488 | /* | ||
489 | * Console polling routines for writing and reading from the uart while | ||
490 | * in an interrupt or debug context. | ||
491 | */ | ||
492 | |||
493 | static int serial_txx9_get_poll_char(struct uart_port *port) | ||
494 | { | ||
495 | unsigned int ier; | ||
496 | unsigned char c; | ||
497 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; | ||
498 | |||
499 | /* | ||
500 | * First save the IER then disable the interrupts | ||
501 | */ | ||
502 | ier = sio_in(up, TXX9_SIDICR); | ||
503 | sio_out(up, TXX9_SIDICR, 0); | ||
504 | |||
505 | while (sio_in(up, TXX9_SIDISR) & TXX9_SIDISR_UVALID) | ||
506 | ; | ||
507 | |||
508 | c = sio_in(up, TXX9_SIRFIFO); | ||
509 | |||
510 | /* | ||
511 | * Finally, clear RX interrupt status | ||
512 | * and restore the IER | ||
513 | */ | ||
514 | sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_RDIS); | ||
515 | sio_out(up, TXX9_SIDICR, ier); | ||
516 | return c; | ||
517 | } | ||
518 | |||
519 | |||
520 | static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c) | ||
521 | { | ||
522 | unsigned int ier; | ||
523 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; | ||
524 | |||
525 | /* | ||
526 | * First save the IER then disable the interrupts | ||
527 | */ | ||
528 | ier = sio_in(up, TXX9_SIDICR); | ||
529 | sio_out(up, TXX9_SIDICR, 0); | ||
530 | |||
531 | wait_for_xmitr(up); | ||
532 | /* | ||
533 | * Send the character out. | ||
534 | * If a LF, also do CR... | ||
535 | */ | ||
536 | sio_out(up, TXX9_SITFIFO, c); | ||
537 | if (c == 10) { | ||
538 | wait_for_xmitr(up); | ||
539 | sio_out(up, TXX9_SITFIFO, 13); | ||
540 | } | ||
541 | |||
542 | /* | ||
543 | * Finally, wait for transmitter to become empty | ||
544 | * and restore the IER | ||
545 | */ | ||
546 | wait_for_xmitr(up); | ||
547 | sio_out(up, TXX9_SIDICR, ier); | ||
548 | } | ||
549 | |||
550 | #endif /* CONFIG_CONSOLE_POLL */ | ||
551 | |||
464 | static int serial_txx9_startup(struct uart_port *port) | 552 | static int serial_txx9_startup(struct uart_port *port) |
465 | { | 553 | { |
466 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; | 554 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; |
@@ -781,6 +869,10 @@ static struct uart_ops serial_txx9_pops = { | |||
781 | .release_port = serial_txx9_release_port, | 869 | .release_port = serial_txx9_release_port, |
782 | .request_port = serial_txx9_request_port, | 870 | .request_port = serial_txx9_request_port, |
783 | .config_port = serial_txx9_config_port, | 871 | .config_port = serial_txx9_config_port, |
872 | #ifdef CONFIG_CONSOLE_POLL | ||
873 | .poll_get_char = serial_txx9_get_poll_char, | ||
874 | .poll_put_char = serial_txx9_put_poll_char, | ||
875 | #endif | ||
784 | }; | 876 | }; |
785 | 877 | ||
786 | static struct uart_txx9_port serial_txx9_ports[UART_NR]; | 878 | static struct uart_txx9_port serial_txx9_ports[UART_NR]; |
@@ -803,27 +895,6 @@ static void __init serial_txx9_register_ports(struct uart_driver *drv, | |||
803 | 895 | ||
804 | #ifdef CONFIG_SERIAL_TXX9_CONSOLE | 896 | #ifdef CONFIG_SERIAL_TXX9_CONSOLE |
805 | 897 | ||
806 | /* | ||
807 | * Wait for transmitter & holding register to empty | ||
808 | */ | ||
809 | static inline void wait_for_xmitr(struct uart_txx9_port *up) | ||
810 | { | ||
811 | unsigned int tmout = 10000; | ||
812 | |||
813 | /* Wait up to 10ms for the character(s) to be sent. */ | ||
814 | while (--tmout && | ||
815 | !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS)) | ||
816 | udelay(1); | ||
817 | |||
818 | /* Wait up to 1s for flow control if necessary */ | ||
819 | if (up->port.flags & UPF_CONS_FLOW) { | ||
820 | tmout = 1000000; | ||
821 | while (--tmout && | ||
822 | (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS)) | ||
823 | udelay(1); | ||
824 | } | ||
825 | } | ||
826 | |||
827 | static void serial_txx9_console_putchar(struct uart_port *port, int ch) | 898 | static void serial_txx9_console_putchar(struct uart_port *port, int ch) |
828 | { | 899 | { |
829 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; | 900 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; |
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile index 3c839e37d37f..c0a583cc2227 100644 --- a/drivers/staging/octeon/Makefile +++ b/drivers/staging/octeon/Makefile | |||
@@ -12,7 +12,6 @@ | |||
12 | obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o | 12 | obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o |
13 | 13 | ||
14 | octeon-ethernet-objs := ethernet.o | 14 | octeon-ethernet-objs := ethernet.o |
15 | octeon-ethernet-objs += ethernet-common.o | ||
16 | octeon-ethernet-objs += ethernet-mdio.o | 15 | octeon-ethernet-objs += ethernet-mdio.o |
17 | octeon-ethernet-objs += ethernet-mem.o | 16 | octeon-ethernet-objs += ethernet-mem.o |
18 | octeon-ethernet-objs += ethernet-proc.o | 17 | octeon-ethernet-objs += ethernet-proc.o |
diff --git a/drivers/staging/octeon/ethernet-common.c b/drivers/staging/octeon/ethernet-common.c deleted file mode 100644 index 3e6f5b8cc63d..000000000000 --- a/drivers/staging/octeon/ethernet-common.c +++ /dev/null | |||
@@ -1,328 +0,0 @@ | |||
1 | /********************************************************************** | ||
2 | * Author: Cavium Networks | ||
3 | * | ||
4 | * Contact: support@caviumnetworks.com | ||
5 | * This file is part of the OCTEON SDK | ||
6 | * | ||
7 | * Copyright (c) 2003-2007 Cavium Networks | ||
8 | * | ||
9 | * This file is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License, Version 2, as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This file is distributed in the hope that it will be useful, but | ||
14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty | ||
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or | ||
16 | * NONINFRINGEMENT. See the GNU General Public License for more | ||
17 | * details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this file; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | * or visit http://www.gnu.org/licenses/. | ||
23 | * | ||
24 | * This file may also be available under a different license from Cavium. | ||
25 | * Contact Cavium Networks for more information | ||
26 | **********************************************************************/ | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/mii.h> | ||
29 | #include <net/dst.h> | ||
30 | |||
31 | #include <asm/atomic.h> | ||
32 | #include <asm/octeon/octeon.h> | ||
33 | |||
34 | #include "ethernet-defines.h" | ||
35 | #include "ethernet-tx.h" | ||
36 | #include "ethernet-mdio.h" | ||
37 | #include "ethernet-util.h" | ||
38 | #include "octeon-ethernet.h" | ||
39 | #include "ethernet-common.h" | ||
40 | |||
41 | #include "cvmx-pip.h" | ||
42 | #include "cvmx-pko.h" | ||
43 | #include "cvmx-fau.h" | ||
44 | #include "cvmx-helper.h" | ||
45 | |||
46 | #include "cvmx-gmxx-defs.h" | ||
47 | |||
48 | /** | ||
49 | * Get the low level ethernet statistics | ||
50 | * | ||
51 | * @dev: Device to get the statistics from | ||
52 | * Returns Pointer to the statistics | ||
53 | */ | ||
54 | static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) | ||
55 | { | ||
56 | cvmx_pip_port_status_t rx_status; | ||
57 | cvmx_pko_port_status_t tx_status; | ||
58 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
59 | |||
60 | if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { | ||
61 | if (octeon_is_simulation()) { | ||
62 | /* The simulator doesn't support statistics */ | ||
63 | memset(&rx_status, 0, sizeof(rx_status)); | ||
64 | memset(&tx_status, 0, sizeof(tx_status)); | ||
65 | } else { | ||
66 | cvmx_pip_get_port_status(priv->port, 1, &rx_status); | ||
67 | cvmx_pko_get_port_status(priv->port, 1, &tx_status); | ||
68 | } | ||
69 | |||
70 | priv->stats.rx_packets += rx_status.inb_packets; | ||
71 | priv->stats.tx_packets += tx_status.packets; | ||
72 | priv->stats.rx_bytes += rx_status.inb_octets; | ||
73 | priv->stats.tx_bytes += tx_status.octets; | ||
74 | priv->stats.multicast += rx_status.multicast_packets; | ||
75 | priv->stats.rx_crc_errors += rx_status.inb_errors; | ||
76 | priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; | ||
77 | |||
78 | /* | ||
79 | * The drop counter must be incremented atomically | ||
80 | * since the RX tasklet also increments it. | ||
81 | */ | ||
82 | #ifdef CONFIG_64BIT | ||
83 | atomic64_add(rx_status.dropped_packets, | ||
84 | (atomic64_t *)&priv->stats.rx_dropped); | ||
85 | #else | ||
86 | atomic_add(rx_status.dropped_packets, | ||
87 | (atomic_t *)&priv->stats.rx_dropped); | ||
88 | #endif | ||
89 | } | ||
90 | |||
91 | return &priv->stats; | ||
92 | } | ||
93 | |||
94 | /** | ||
95 | * Set the multicast list. Currently unimplemented. | ||
96 | * | ||
97 | * @dev: Device to work on | ||
98 | */ | ||
99 | static void cvm_oct_common_set_multicast_list(struct net_device *dev) | ||
100 | { | ||
101 | union cvmx_gmxx_prtx_cfg gmx_cfg; | ||
102 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
103 | int interface = INTERFACE(priv->port); | ||
104 | int index = INDEX(priv->port); | ||
105 | |||
106 | if ((interface < 2) | ||
107 | && (cvmx_helper_interface_get_mode(interface) != | ||
108 | CVMX_HELPER_INTERFACE_MODE_SPI)) { | ||
109 | union cvmx_gmxx_rxx_adr_ctl control; | ||
110 | control.u64 = 0; | ||
111 | control.s.bcst = 1; /* Allow broadcast MAC addresses */ | ||
112 | |||
113 | if (dev->mc_list || (dev->flags & IFF_ALLMULTI) || | ||
114 | (dev->flags & IFF_PROMISC)) | ||
115 | /* Force accept multicast packets */ | ||
116 | control.s.mcst = 2; | ||
117 | else | ||
118 | /* Force reject multicat packets */ | ||
119 | control.s.mcst = 1; | ||
120 | |||
121 | if (dev->flags & IFF_PROMISC) | ||
122 | /* | ||
123 | * Reject matches if promisc. Since CAM is | ||
124 | * shut off, should accept everything. | ||
125 | */ | ||
126 | control.s.cam_mode = 0; | ||
127 | else | ||
128 | /* Filter packets based on the CAM */ | ||
129 | control.s.cam_mode = 1; | ||
130 | |||
131 | gmx_cfg.u64 = | ||
132 | cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); | ||
133 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), | ||
134 | gmx_cfg.u64 & ~1ull); | ||
135 | |||
136 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), | ||
137 | control.u64); | ||
138 | if (dev->flags & IFF_PROMISC) | ||
139 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN | ||
140 | (index, interface), 0); | ||
141 | else | ||
142 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN | ||
143 | (index, interface), 1); | ||
144 | |||
145 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), | ||
146 | gmx_cfg.u64); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * Set the hardware MAC address for a device | ||
152 | * | ||
153 | * @dev: Device to change the MAC address for | ||
154 | * @addr: Address structure to change it too. MAC address is addr + 2. | ||
155 | * Returns Zero on success | ||
156 | */ | ||
157 | static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) | ||
158 | { | ||
159 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
160 | union cvmx_gmxx_prtx_cfg gmx_cfg; | ||
161 | int interface = INTERFACE(priv->port); | ||
162 | int index = INDEX(priv->port); | ||
163 | |||
164 | memcpy(dev->dev_addr, addr + 2, 6); | ||
165 | |||
166 | if ((interface < 2) | ||
167 | && (cvmx_helper_interface_get_mode(interface) != | ||
168 | CVMX_HELPER_INTERFACE_MODE_SPI)) { | ||
169 | int i; | ||
170 | uint8_t *ptr = addr; | ||
171 | uint64_t mac = 0; | ||
172 | for (i = 0; i < 6; i++) | ||
173 | mac = (mac << 8) | (uint64_t) (ptr[i + 2]); | ||
174 | |||
175 | gmx_cfg.u64 = | ||
176 | cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); | ||
177 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), | ||
178 | gmx_cfg.u64 & ~1ull); | ||
179 | |||
180 | cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac); | ||
181 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), | ||
182 | ptr[2]); | ||
183 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), | ||
184 | ptr[3]); | ||
185 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), | ||
186 | ptr[4]); | ||
187 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), | ||
188 | ptr[5]); | ||
189 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), | ||
190 | ptr[6]); | ||
191 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), | ||
192 | ptr[7]); | ||
193 | cvm_oct_common_set_multicast_list(dev); | ||
194 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), | ||
195 | gmx_cfg.u64); | ||
196 | } | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | /** | ||
201 | * Change the link MTU. Unimplemented | ||
202 | * | ||
203 | * @dev: Device to change | ||
204 | * @new_mtu: The new MTU | ||
205 | * | ||
206 | * Returns Zero on success | ||
207 | */ | ||
208 | static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) | ||
209 | { | ||
210 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
211 | int interface = INTERFACE(priv->port); | ||
212 | int index = INDEX(priv->port); | ||
213 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
214 | int vlan_bytes = 4; | ||
215 | #else | ||
216 | int vlan_bytes = 0; | ||
217 | #endif | ||
218 | |||
219 | /* | ||
220 | * Limit the MTU to make sure the ethernet packets are between | ||
221 | * 64 bytes and 65535 bytes. | ||
222 | */ | ||
223 | if ((new_mtu + 14 + 4 + vlan_bytes < 64) | ||
224 | || (new_mtu + 14 + 4 + vlan_bytes > 65392)) { | ||
225 | pr_err("MTU must be between %d and %d.\n", | ||
226 | 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes); | ||
227 | return -EINVAL; | ||
228 | } | ||
229 | dev->mtu = new_mtu; | ||
230 | |||
231 | if ((interface < 2) | ||
232 | && (cvmx_helper_interface_get_mode(interface) != | ||
233 | CVMX_HELPER_INTERFACE_MODE_SPI)) { | ||
234 | /* Add ethernet header and FCS, and VLAN if configured. */ | ||
235 | int max_packet = new_mtu + 14 + 4 + vlan_bytes; | ||
236 | |||
237 | if (OCTEON_IS_MODEL(OCTEON_CN3XXX) | ||
238 | || OCTEON_IS_MODEL(OCTEON_CN58XX)) { | ||
239 | /* Signal errors on packets larger than the MTU */ | ||
240 | cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), | ||
241 | max_packet); | ||
242 | } else { | ||
243 | /* | ||
244 | * Set the hardware to truncate packets larger | ||
245 | * than the MTU and smaller the 64 bytes. | ||
246 | */ | ||
247 | union cvmx_pip_frm_len_chkx frm_len_chk; | ||
248 | frm_len_chk.u64 = 0; | ||
249 | frm_len_chk.s.minlen = 64; | ||
250 | frm_len_chk.s.maxlen = max_packet; | ||
251 | cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), | ||
252 | frm_len_chk.u64); | ||
253 | } | ||
254 | /* | ||
255 | * Set the hardware to truncate packets larger than | ||
256 | * the MTU. The jabber register must be set to a | ||
257 | * multiple of 8 bytes, so round up. | ||
258 | */ | ||
259 | cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), | ||
260 | (max_packet + 7) & ~7u); | ||
261 | } | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * Per network device initialization | ||
267 | * | ||
268 | * @dev: Device to initialize | ||
269 | * Returns Zero on success | ||
270 | */ | ||
271 | int cvm_oct_common_init(struct net_device *dev) | ||
272 | { | ||
273 | static int count; | ||
274 | char mac[8] = { 0x00, 0x00, | ||
275 | octeon_bootinfo->mac_addr_base[0], | ||
276 | octeon_bootinfo->mac_addr_base[1], | ||
277 | octeon_bootinfo->mac_addr_base[2], | ||
278 | octeon_bootinfo->mac_addr_base[3], | ||
279 | octeon_bootinfo->mac_addr_base[4], | ||
280 | octeon_bootinfo->mac_addr_base[5] + count | ||
281 | }; | ||
282 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
283 | |||
284 | /* | ||
285 | * Force the interface to use the POW send if always_use_pow | ||
286 | * was specified or it is in the pow send list. | ||
287 | */ | ||
288 | if ((pow_send_group != -1) | ||
289 | && (always_use_pow || strstr(pow_send_list, dev->name))) | ||
290 | priv->queue = -1; | ||
291 | |||
292 | if (priv->queue != -1) { | ||
293 | dev->hard_start_xmit = cvm_oct_xmit; | ||
294 | if (USE_HW_TCPUDP_CHECKSUM) | ||
295 | dev->features |= NETIF_F_IP_CSUM; | ||
296 | } else | ||
297 | dev->hard_start_xmit = cvm_oct_xmit_pow; | ||
298 | count++; | ||
299 | |||
300 | dev->get_stats = cvm_oct_common_get_stats; | ||
301 | dev->set_mac_address = cvm_oct_common_set_mac_address; | ||
302 | dev->set_multicast_list = cvm_oct_common_set_multicast_list; | ||
303 | dev->change_mtu = cvm_oct_common_change_mtu; | ||
304 | dev->do_ioctl = cvm_oct_ioctl; | ||
305 | /* We do our own locking, Linux doesn't need to */ | ||
306 | dev->features |= NETIF_F_LLTX; | ||
307 | SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops); | ||
308 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
309 | dev->poll_controller = cvm_oct_poll_controller; | ||
310 | #endif | ||
311 | |||
312 | cvm_oct_mdio_setup_device(dev); | ||
313 | dev->set_mac_address(dev, mac); | ||
314 | dev->change_mtu(dev, dev->mtu); | ||
315 | |||
316 | /* | ||
317 | * Zero out stats for port so we won't mistakenly show | ||
318 | * counters from the bootloader. | ||
319 | */ | ||
320 | memset(dev->get_stats(dev), 0, sizeof(struct net_device_stats)); | ||
321 | |||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | void cvm_oct_common_uninit(struct net_device *dev) | ||
326 | { | ||
327 | /* Currently nothing to do */ | ||
328 | } | ||
diff --git a/drivers/staging/octeon/ethernet-common.h b/drivers/staging/octeon/ethernet-common.h deleted file mode 100644 index 2bd9cd76a398..000000000000 --- a/drivers/staging/octeon/ethernet-common.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | /********************************************************************* | ||
2 | * Author: Cavium Networks | ||
3 | * | ||
4 | * Contact: support@caviumnetworks.com | ||
5 | * This file is part of the OCTEON SDK | ||
6 | * | ||
7 | * Copyright (c) 2003-2007 Cavium Networks | ||
8 | * | ||
9 | * This file is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License, Version 2, as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This file is distributed in the hope that it will be useful, but | ||
14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty | ||
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or | ||
16 | * NONINFRINGEMENT. See the GNU General Public License for more | ||
17 | * details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this file; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | * or visit http://www.gnu.org/licenses/. | ||
23 | * | ||
24 | * This file may also be available under a different license from Cavium. | ||
25 | * Contact Cavium Networks for more information | ||
26 | *********************************************************************/ | ||
27 | |||
28 | int cvm_oct_common_init(struct net_device *dev); | ||
29 | void cvm_oct_common_uninit(struct net_device *dev); | ||
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h index 8f7374e7664c..f13131b03c33 100644 --- a/drivers/staging/octeon/ethernet-defines.h +++ b/drivers/staging/octeon/ethernet-defines.h | |||
@@ -117,6 +117,8 @@ | |||
117 | 117 | ||
118 | /* Maximum number of packets to process per interrupt. */ | 118 | /* Maximum number of packets to process per interrupt. */ |
119 | #define MAX_RX_PACKETS 120 | 119 | #define MAX_RX_PACKETS 120 |
120 | /* Maximum number of SKBs to try to free per xmit packet. */ | ||
121 | #define MAX_SKB_TO_FREE 10 | ||
120 | #define MAX_OUT_QUEUE_DEPTH 1000 | 122 | #define MAX_OUT_QUEUE_DEPTH 1000 |
121 | 123 | ||
122 | #ifndef CONFIG_SMP | 124 | #ifndef CONFIG_SMP |
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c index 8579f1670d1e..8704133fe127 100644 --- a/drivers/staging/octeon/ethernet-rgmii.c +++ b/drivers/staging/octeon/ethernet-rgmii.c | |||
@@ -33,7 +33,6 @@ | |||
33 | 33 | ||
34 | #include "ethernet-defines.h" | 34 | #include "ethernet-defines.h" |
35 | #include "octeon-ethernet.h" | 35 | #include "octeon-ethernet.h" |
36 | #include "ethernet-common.h" | ||
37 | #include "ethernet-util.h" | 36 | #include "ethernet-util.h" |
38 | 37 | ||
39 | #include "cvmx-helper.h" | 38 | #include "cvmx-helper.h" |
@@ -265,7 +264,7 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id) | |||
265 | return return_status; | 264 | return return_status; |
266 | } | 265 | } |
267 | 266 | ||
268 | static int cvm_oct_rgmii_open(struct net_device *dev) | 267 | int cvm_oct_rgmii_open(struct net_device *dev) |
269 | { | 268 | { |
270 | union cvmx_gmxx_prtx_cfg gmx_cfg; | 269 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
271 | struct octeon_ethernet *priv = netdev_priv(dev); | 270 | struct octeon_ethernet *priv = netdev_priv(dev); |
@@ -286,7 +285,7 @@ static int cvm_oct_rgmii_open(struct net_device *dev) | |||
286 | return 0; | 285 | return 0; |
287 | } | 286 | } |
288 | 287 | ||
289 | static int cvm_oct_rgmii_stop(struct net_device *dev) | 288 | int cvm_oct_rgmii_stop(struct net_device *dev) |
290 | { | 289 | { |
291 | union cvmx_gmxx_prtx_cfg gmx_cfg; | 290 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
292 | struct octeon_ethernet *priv = netdev_priv(dev); | 291 | struct octeon_ethernet *priv = netdev_priv(dev); |
@@ -305,9 +304,7 @@ int cvm_oct_rgmii_init(struct net_device *dev) | |||
305 | int r; | 304 | int r; |
306 | 305 | ||
307 | cvm_oct_common_init(dev); | 306 | cvm_oct_common_init(dev); |
308 | dev->open = cvm_oct_rgmii_open; | 307 | dev->netdev_ops->ndo_stop(dev); |
309 | dev->stop = cvm_oct_rgmii_stop; | ||
310 | dev->stop(dev); | ||
311 | 308 | ||
312 | /* | 309 | /* |
313 | * Due to GMX errata in CN3XXX series chips, it is necessary | 310 | * Due to GMX errata in CN3XXX series chips, it is necessary |
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c index 58fa39c1d675..2b54996bd85d 100644 --- a/drivers/staging/octeon/ethernet-sgmii.c +++ b/drivers/staging/octeon/ethernet-sgmii.c | |||
@@ -34,13 +34,12 @@ | |||
34 | #include "ethernet-defines.h" | 34 | #include "ethernet-defines.h" |
35 | #include "octeon-ethernet.h" | 35 | #include "octeon-ethernet.h" |
36 | #include "ethernet-util.h" | 36 | #include "ethernet-util.h" |
37 | #include "ethernet-common.h" | ||
38 | 37 | ||
39 | #include "cvmx-helper.h" | 38 | #include "cvmx-helper.h" |
40 | 39 | ||
41 | #include "cvmx-gmxx-defs.h" | 40 | #include "cvmx-gmxx-defs.h" |
42 | 41 | ||
43 | static int cvm_oct_sgmii_open(struct net_device *dev) | 42 | int cvm_oct_sgmii_open(struct net_device *dev) |
44 | { | 43 | { |
45 | union cvmx_gmxx_prtx_cfg gmx_cfg; | 44 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
46 | struct octeon_ethernet *priv = netdev_priv(dev); | 45 | struct octeon_ethernet *priv = netdev_priv(dev); |
@@ -61,7 +60,7 @@ static int cvm_oct_sgmii_open(struct net_device *dev) | |||
61 | return 0; | 60 | return 0; |
62 | } | 61 | } |
63 | 62 | ||
64 | static int cvm_oct_sgmii_stop(struct net_device *dev) | 63 | int cvm_oct_sgmii_stop(struct net_device *dev) |
65 | { | 64 | { |
66 | union cvmx_gmxx_prtx_cfg gmx_cfg; | 65 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
67 | struct octeon_ethernet *priv = netdev_priv(dev); | 66 | struct octeon_ethernet *priv = netdev_priv(dev); |
@@ -113,9 +112,7 @@ int cvm_oct_sgmii_init(struct net_device *dev) | |||
113 | { | 112 | { |
114 | struct octeon_ethernet *priv = netdev_priv(dev); | 113 | struct octeon_ethernet *priv = netdev_priv(dev); |
115 | cvm_oct_common_init(dev); | 114 | cvm_oct_common_init(dev); |
116 | dev->open = cvm_oct_sgmii_open; | 115 | dev->netdev_ops->ndo_stop(dev); |
117 | dev->stop = cvm_oct_sgmii_stop; | ||
118 | dev->stop(dev); | ||
119 | if (!octeon_is_simulation()) | 116 | if (!octeon_is_simulation()) |
120 | priv->poll = cvm_oct_sgmii_poll; | 117 | priv->poll = cvm_oct_sgmii_poll; |
121 | 118 | ||
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c index e0971bbe4ddc..66190b0cb68f 100644 --- a/drivers/staging/octeon/ethernet-spi.c +++ b/drivers/staging/octeon/ethernet-spi.c | |||
@@ -33,7 +33,6 @@ | |||
33 | 33 | ||
34 | #include "ethernet-defines.h" | 34 | #include "ethernet-defines.h" |
35 | #include "octeon-ethernet.h" | 35 | #include "octeon-ethernet.h" |
36 | #include "ethernet-common.h" | ||
37 | #include "ethernet-util.h" | 36 | #include "ethernet-util.h" |
38 | 37 | ||
39 | #include "cvmx-spi.h" | 38 | #include "cvmx-spi.h" |
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 77b7122c8fdb..81a851390f1b 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c | |||
@@ -47,6 +47,7 @@ | |||
47 | 47 | ||
48 | #include "ethernet-defines.h" | 48 | #include "ethernet-defines.h" |
49 | #include "octeon-ethernet.h" | 49 | #include "octeon-ethernet.h" |
50 | #include "ethernet-tx.h" | ||
50 | #include "ethernet-util.h" | 51 | #include "ethernet-util.h" |
51 | 52 | ||
52 | #include "cvmx-wqe.h" | 53 | #include "cvmx-wqe.h" |
@@ -82,8 +83,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) | |||
82 | uint64_t old_scratch2; | 83 | uint64_t old_scratch2; |
83 | int dropped; | 84 | int dropped; |
84 | int qos; | 85 | int qos; |
86 | int queue_it_up; | ||
85 | struct octeon_ethernet *priv = netdev_priv(dev); | 87 | struct octeon_ethernet *priv = netdev_priv(dev); |
86 | int32_t in_use; | 88 | int32_t skb_to_free; |
89 | int32_t undo; | ||
87 | int32_t buffers_to_free; | 90 | int32_t buffers_to_free; |
88 | #if REUSE_SKBUFFS_WITHOUT_FREE | 91 | #if REUSE_SKBUFFS_WITHOUT_FREE |
89 | unsigned char *fpa_head; | 92 | unsigned char *fpa_head; |
@@ -120,15 +123,15 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) | |||
120 | old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); | 123 | old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); |
121 | 124 | ||
122 | /* | 125 | /* |
123 | * Assume we're going to be able t osend this | 126 | * Fetch and increment the number of packets to be |
124 | * packet. Fetch and increment the number of pending | 127 | * freed. |
125 | * packets for output. | ||
126 | */ | 128 | */ |
127 | cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8, | 129 | cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8, |
128 | FAU_NUM_PACKET_BUFFERS_TO_FREE, | 130 | FAU_NUM_PACKET_BUFFERS_TO_FREE, |
129 | 0); | 131 | 0); |
130 | cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, | 132 | cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, |
131 | priv->fau + qos * 4, 1); | 133 | priv->fau + qos * 4, |
134 | MAX_SKB_TO_FREE); | ||
132 | } | 135 | } |
133 | 136 | ||
134 | /* | 137 | /* |
@@ -253,10 +256,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) | |||
253 | 256 | ||
254 | /* | 257 | /* |
255 | * The skbuff will be reused without ever being freed. We must | 258 | * The skbuff will be reused without ever being freed. We must |
256 | * cleanup a bunch of Linux stuff. | 259 | * cleanup a bunch of core things. |
257 | */ | 260 | */ |
258 | dst_release(skb->dst); | 261 | dst_release(skb_dst(skb)); |
259 | skb->dst = NULL; | 262 | skb_dst_set(skb, NULL); |
260 | #ifdef CONFIG_XFRM | 263 | #ifdef CONFIG_XFRM |
261 | secpath_put(skb->sp); | 264 | secpath_put(skb->sp); |
262 | skb->sp = NULL; | 265 | skb->sp = NULL; |
@@ -286,16 +289,30 @@ dont_put_skbuff_in_hw: | |||
286 | if (USE_ASYNC_IOBDMA) { | 289 | if (USE_ASYNC_IOBDMA) { |
287 | /* Get the number of skbuffs in use by the hardware */ | 290 | /* Get the number of skbuffs in use by the hardware */ |
288 | CVMX_SYNCIOBDMA; | 291 | CVMX_SYNCIOBDMA; |
289 | in_use = cvmx_scratch_read64(CVMX_SCR_SCRATCH); | 292 | skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); |
290 | buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); | 293 | buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); |
291 | } else { | 294 | } else { |
292 | /* Get the number of skbuffs in use by the hardware */ | 295 | /* Get the number of skbuffs in use by the hardware */ |
293 | in_use = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 1); | 296 | skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, |
297 | MAX_SKB_TO_FREE); | ||
294 | buffers_to_free = | 298 | buffers_to_free = |
295 | cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); | 299 | cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); |
296 | } | 300 | } |
297 | 301 | ||
298 | /* | 302 | /* |
303 | * We try to claim MAX_SKB_TO_FREE buffers. If there were not | ||
304 | * that many available, we have to un-claim (undo) any that | ||
305 | * were in excess. If skb_to_free is positive we will free | ||
306 | * that many buffers. | ||
307 | */ | ||
308 | undo = skb_to_free > 0 ? | ||
309 | MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE; | ||
310 | if (undo > 0) | ||
311 | cvmx_fau_atomic_add32(priv->fau+qos*4, -undo); | ||
312 | skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? | ||
313 | MAX_SKB_TO_FREE : -skb_to_free; | ||
314 | |||
315 | /* | ||
299 | * If we're sending faster than the receive can free them then | 316 | * If we're sending faster than the receive can free them then |
300 | * don't do the HW free. | 317 | * don't do the HW free. |
301 | */ | 318 | */ |
@@ -330,38 +347,31 @@ dont_put_skbuff_in_hw: | |||
330 | cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); | 347 | cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); |
331 | } | 348 | } |
332 | 349 | ||
350 | queue_it_up = 0; | ||
333 | if (unlikely(dropped)) { | 351 | if (unlikely(dropped)) { |
334 | dev_kfree_skb_any(skb); | 352 | dev_kfree_skb_any(skb); |
335 | cvmx_fau_atomic_add32(priv->fau + qos * 4, -1); | ||
336 | priv->stats.tx_dropped++; | 353 | priv->stats.tx_dropped++; |
337 | } else { | 354 | } else { |
338 | if (USE_SKBUFFS_IN_HW) { | 355 | if (USE_SKBUFFS_IN_HW) { |
339 | /* Put this packet on the queue to be freed later */ | 356 | /* Put this packet on the queue to be freed later */ |
340 | if (pko_command.s.dontfree) | 357 | if (pko_command.s.dontfree) |
341 | skb_queue_tail(&priv->tx_free_list[qos], skb); | 358 | queue_it_up = 1; |
342 | else { | 359 | else |
343 | cvmx_fau_atomic_add32 | 360 | cvmx_fau_atomic_add32 |
344 | (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); | 361 | (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); |
345 | cvmx_fau_atomic_add32(priv->fau + qos * 4, -1); | ||
346 | } | ||
347 | } else { | 362 | } else { |
348 | /* Put this packet on the queue to be freed later */ | 363 | /* Put this packet on the queue to be freed later */ |
349 | skb_queue_tail(&priv->tx_free_list[qos], skb); | 364 | queue_it_up = 1; |
350 | } | 365 | } |
351 | } | 366 | } |
352 | 367 | ||
353 | /* Free skbuffs not in use by the hardware, possibly two at a time */ | 368 | if (queue_it_up) { |
354 | if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) { | ||
355 | spin_lock(&priv->tx_free_list[qos].lock); | 369 | spin_lock(&priv->tx_free_list[qos].lock); |
356 | /* | 370 | __skb_queue_tail(&priv->tx_free_list[qos], skb); |
357 | * Check again now that we have the lock. It might | 371 | cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0); |
358 | * have changed. | ||
359 | */ | ||
360 | if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) | ||
361 | dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos])); | ||
362 | if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) | ||
363 | dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos])); | ||
364 | spin_unlock(&priv->tx_free_list[qos].lock); | 372 | spin_unlock(&priv->tx_free_list[qos].lock); |
373 | } else { | ||
374 | cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1); | ||
365 | } | 375 | } |
366 | 376 | ||
367 | return 0; | 377 | return 0; |
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h index 5106236fe981..c0bebf750bc0 100644 --- a/drivers/staging/octeon/ethernet-tx.h +++ b/drivers/staging/octeon/ethernet-tx.h | |||
@@ -30,3 +30,28 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev); | |||
30 | int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, | 30 | int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, |
31 | int do_free, int qos); | 31 | int do_free, int qos); |
32 | void cvm_oct_tx_shutdown(struct net_device *dev); | 32 | void cvm_oct_tx_shutdown(struct net_device *dev); |
33 | |||
34 | /** | ||
35 | * Free dead transmit skbs. | ||
36 | * | ||
37 | * @priv: The driver data | ||
38 | * @skb_to_free: The number of SKBs to free (free none if negative). | ||
39 | * @qos: The queue to free from. | ||
40 | * @take_lock: If true, acquire the skb list lock. | ||
41 | */ | ||
42 | static inline void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv, | ||
43 | int skb_to_free, | ||
44 | int qos, int take_lock) | ||
45 | { | ||
46 | /* Free skbuffs not in use by the hardware. */ | ||
47 | if (skb_to_free > 0) { | ||
48 | if (take_lock) | ||
49 | spin_lock(&priv->tx_free_list[qos].lock); | ||
50 | while (skb_to_free > 0) { | ||
51 | dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos])); | ||
52 | skb_to_free--; | ||
53 | } | ||
54 | if (take_lock) | ||
55 | spin_unlock(&priv->tx_free_list[qos].lock); | ||
56 | } | ||
57 | } | ||
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c index f08eb32e04fc..0c2e7cc40f35 100644 --- a/drivers/staging/octeon/ethernet-xaui.c +++ b/drivers/staging/octeon/ethernet-xaui.c | |||
@@ -33,14 +33,13 @@ | |||
33 | 33 | ||
34 | #include "ethernet-defines.h" | 34 | #include "ethernet-defines.h" |
35 | #include "octeon-ethernet.h" | 35 | #include "octeon-ethernet.h" |
36 | #include "ethernet-common.h" | ||
37 | #include "ethernet-util.h" | 36 | #include "ethernet-util.h" |
38 | 37 | ||
39 | #include "cvmx-helper.h" | 38 | #include "cvmx-helper.h" |
40 | 39 | ||
41 | #include "cvmx-gmxx-defs.h" | 40 | #include "cvmx-gmxx-defs.h" |
42 | 41 | ||
43 | static int cvm_oct_xaui_open(struct net_device *dev) | 42 | int cvm_oct_xaui_open(struct net_device *dev) |
44 | { | 43 | { |
45 | union cvmx_gmxx_prtx_cfg gmx_cfg; | 44 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
46 | struct octeon_ethernet *priv = netdev_priv(dev); | 45 | struct octeon_ethernet *priv = netdev_priv(dev); |
@@ -60,7 +59,7 @@ static int cvm_oct_xaui_open(struct net_device *dev) | |||
60 | return 0; | 59 | return 0; |
61 | } | 60 | } |
62 | 61 | ||
63 | static int cvm_oct_xaui_stop(struct net_device *dev) | 62 | int cvm_oct_xaui_stop(struct net_device *dev) |
64 | { | 63 | { |
65 | union cvmx_gmxx_prtx_cfg gmx_cfg; | 64 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
66 | struct octeon_ethernet *priv = netdev_priv(dev); | 65 | struct octeon_ethernet *priv = netdev_priv(dev); |
@@ -112,9 +111,7 @@ int cvm_oct_xaui_init(struct net_device *dev) | |||
112 | { | 111 | { |
113 | struct octeon_ethernet *priv = netdev_priv(dev); | 112 | struct octeon_ethernet *priv = netdev_priv(dev); |
114 | cvm_oct_common_init(dev); | 113 | cvm_oct_common_init(dev); |
115 | dev->open = cvm_oct_xaui_open; | 114 | dev->netdev_ops->ndo_stop(dev); |
116 | dev->stop = cvm_oct_xaui_stop; | ||
117 | dev->stop(dev); | ||
118 | if (!octeon_is_simulation()) | 115 | if (!octeon_is_simulation()) |
119 | priv->poll = cvm_oct_xaui_poll; | 116 | priv->poll = cvm_oct_xaui_poll; |
120 | 117 | ||
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index e8ef9e0b791f..b8479517dce2 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c | |||
@@ -37,13 +37,14 @@ | |||
37 | #include <asm/octeon/octeon.h> | 37 | #include <asm/octeon/octeon.h> |
38 | 38 | ||
39 | #include "ethernet-defines.h" | 39 | #include "ethernet-defines.h" |
40 | #include "octeon-ethernet.h" | ||
40 | #include "ethernet-mem.h" | 41 | #include "ethernet-mem.h" |
41 | #include "ethernet-rx.h" | 42 | #include "ethernet-rx.h" |
42 | #include "ethernet-tx.h" | 43 | #include "ethernet-tx.h" |
44 | #include "ethernet-mdio.h" | ||
43 | #include "ethernet-util.h" | 45 | #include "ethernet-util.h" |
44 | #include "ethernet-proc.h" | 46 | #include "ethernet-proc.h" |
45 | #include "ethernet-common.h" | 47 | |
46 | #include "octeon-ethernet.h" | ||
47 | 48 | ||
48 | #include "cvmx-pip.h" | 49 | #include "cvmx-pip.h" |
49 | #include "cvmx-pko.h" | 50 | #include "cvmx-pko.h" |
@@ -51,6 +52,7 @@ | |||
51 | #include "cvmx-ipd.h" | 52 | #include "cvmx-ipd.h" |
52 | #include "cvmx-helper.h" | 53 | #include "cvmx-helper.h" |
53 | 54 | ||
55 | #include "cvmx-gmxx-defs.h" | ||
54 | #include "cvmx-smix-defs.h" | 56 | #include "cvmx-smix-defs.h" |
55 | 57 | ||
56 | #if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \ | 58 | #if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \ |
@@ -129,53 +131,55 @@ extern struct semaphore mdio_sem; | |||
129 | */ | 131 | */ |
130 | static void cvm_do_timer(unsigned long arg) | 132 | static void cvm_do_timer(unsigned long arg) |
131 | { | 133 | { |
134 | int32_t skb_to_free, undo; | ||
135 | int queues_per_port; | ||
136 | int qos; | ||
137 | struct octeon_ethernet *priv; | ||
132 | static int port; | 138 | static int port; |
133 | if (port < CVMX_PIP_NUM_INPUT_PORTS) { | ||
134 | if (cvm_oct_device[port]) { | ||
135 | int queues_per_port; | ||
136 | int qos; | ||
137 | struct octeon_ethernet *priv = | ||
138 | netdev_priv(cvm_oct_device[port]); | ||
139 | if (priv->poll) { | ||
140 | /* skip polling if we don't get the lock */ | ||
141 | if (!down_trylock(&mdio_sem)) { | ||
142 | priv->poll(cvm_oct_device[port]); | ||
143 | up(&mdio_sem); | ||
144 | } | ||
145 | } | ||
146 | 139 | ||
147 | queues_per_port = cvmx_pko_get_num_queues(port); | 140 | if (port >= CVMX_PIP_NUM_INPUT_PORTS) { |
148 | /* Drain any pending packets in the free list */ | 141 | /* |
149 | for (qos = 0; qos < queues_per_port; qos++) { | 142 | * All ports have been polled. Start the next |
150 | if (skb_queue_len(&priv->tx_free_list[qos])) { | 143 | * iteration through the ports in one second. |
151 | spin_lock(&priv->tx_free_list[qos]. | 144 | */ |
152 | lock); | ||
153 | while (skb_queue_len | ||
154 | (&priv->tx_free_list[qos]) > | ||
155 | cvmx_fau_fetch_and_add32(priv-> | ||
156 | fau + | ||
157 | qos * 4, | ||
158 | 0)) | ||
159 | dev_kfree_skb(__skb_dequeue | ||
160 | (&priv-> | ||
161 | tx_free_list | ||
162 | [qos])); | ||
163 | spin_unlock(&priv->tx_free_list[qos]. | ||
164 | lock); | ||
165 | } | ||
166 | } | ||
167 | cvm_oct_device[port]->get_stats(cvm_oct_device[port]); | ||
168 | } | ||
169 | port++; | ||
170 | /* Poll the next port in a 50th of a second. | ||
171 | This spreads the polling of ports out a little bit */ | ||
172 | mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50); | ||
173 | } else { | ||
174 | port = 0; | 145 | port = 0; |
175 | /* All ports have been polled. Start the next iteration through | ||
176 | the ports in one second */ | ||
177 | mod_timer(&cvm_oct_poll_timer, jiffies + HZ); | 146 | mod_timer(&cvm_oct_poll_timer, jiffies + HZ); |
147 | return; | ||
148 | } | ||
149 | if (!cvm_oct_device[port]) | ||
150 | goto out; | ||
151 | |||
152 | priv = netdev_priv(cvm_oct_device[port]); | ||
153 | if (priv->poll) { | ||
154 | /* skip polling if we don't get the lock */ | ||
155 | if (!down_trylock(&mdio_sem)) { | ||
156 | priv->poll(cvm_oct_device[port]); | ||
157 | up(&mdio_sem); | ||
158 | } | ||
178 | } | 159 | } |
160 | |||
161 | queues_per_port = cvmx_pko_get_num_queues(port); | ||
162 | /* Drain any pending packets in the free list */ | ||
163 | for (qos = 0; qos < queues_per_port; qos++) { | ||
164 | if (skb_queue_len(&priv->tx_free_list[qos]) == 0) | ||
165 | continue; | ||
166 | skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, | ||
167 | MAX_SKB_TO_FREE); | ||
168 | undo = skb_to_free > 0 ? | ||
169 | MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE; | ||
170 | if (undo > 0) | ||
171 | cvmx_fau_atomic_add32(priv->fau+qos*4, -undo); | ||
172 | skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? | ||
173 | MAX_SKB_TO_FREE : -skb_to_free; | ||
174 | cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1); | ||
175 | } | ||
176 | cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]); | ||
177 | |||
178 | out: | ||
179 | port++; | ||
180 | /* Poll the next port in a 50th of a second. | ||
181 | This spreads the polling of ports out a little bit */ | ||
182 | mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50); | ||
179 | } | 183 | } |
180 | 184 | ||
181 | /** | 185 | /** |
@@ -246,6 +250,362 @@ int cvm_oct_free_work(void *work_queue_entry) | |||
246 | EXPORT_SYMBOL(cvm_oct_free_work); | 250 | EXPORT_SYMBOL(cvm_oct_free_work); |
247 | 251 | ||
248 | /** | 252 | /** |
253 | * Get the low level ethernet statistics | ||
254 | * | ||
255 | * @dev: Device to get the statistics from | ||
256 | * Returns Pointer to the statistics | ||
257 | */ | ||
258 | static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) | ||
259 | { | ||
260 | cvmx_pip_port_status_t rx_status; | ||
261 | cvmx_pko_port_status_t tx_status; | ||
262 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
263 | |||
264 | if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { | ||
265 | if (octeon_is_simulation()) { | ||
266 | /* The simulator doesn't support statistics */ | ||
267 | memset(&rx_status, 0, sizeof(rx_status)); | ||
268 | memset(&tx_status, 0, sizeof(tx_status)); | ||
269 | } else { | ||
270 | cvmx_pip_get_port_status(priv->port, 1, &rx_status); | ||
271 | cvmx_pko_get_port_status(priv->port, 1, &tx_status); | ||
272 | } | ||
273 | |||
274 | priv->stats.rx_packets += rx_status.inb_packets; | ||
275 | priv->stats.tx_packets += tx_status.packets; | ||
276 | priv->stats.rx_bytes += rx_status.inb_octets; | ||
277 | priv->stats.tx_bytes += tx_status.octets; | ||
278 | priv->stats.multicast += rx_status.multicast_packets; | ||
279 | priv->stats.rx_crc_errors += rx_status.inb_errors; | ||
280 | priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; | ||
281 | |||
282 | /* | ||
283 | * The drop counter must be incremented atomically | ||
284 | * since the RX tasklet also increments it. | ||
285 | */ | ||
286 | #ifdef CONFIG_64BIT | ||
287 | atomic64_add(rx_status.dropped_packets, | ||
288 | (atomic64_t *)&priv->stats.rx_dropped); | ||
289 | #else | ||
290 | atomic_add(rx_status.dropped_packets, | ||
291 | (atomic_t *)&priv->stats.rx_dropped); | ||
292 | #endif | ||
293 | } | ||
294 | |||
295 | return &priv->stats; | ||
296 | } | ||
297 | |||
298 | /** | ||
299 | * Change the link MTU. Unimplemented | ||
300 | * | ||
301 | * @dev: Device to change | ||
302 | * @new_mtu: The new MTU | ||
303 | * | ||
304 | * Returns Zero on success | ||
305 | */ | ||
306 | static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) | ||
307 | { | ||
308 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
309 | int interface = INTERFACE(priv->port); | ||
310 | int index = INDEX(priv->port); | ||
311 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
312 | int vlan_bytes = 4; | ||
313 | #else | ||
314 | int vlan_bytes = 0; | ||
315 | #endif | ||
316 | |||
317 | /* | ||
318 | * Limit the MTU to make sure the ethernet packets are between | ||
319 | * 64 bytes and 65535 bytes. | ||
320 | */ | ||
321 | if ((new_mtu + 14 + 4 + vlan_bytes < 64) | ||
322 | || (new_mtu + 14 + 4 + vlan_bytes > 65392)) { | ||
323 | pr_err("MTU must be between %d and %d.\n", | ||
324 | 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes); | ||
325 | return -EINVAL; | ||
326 | } | ||
327 | dev->mtu = new_mtu; | ||
328 | |||
329 | if ((interface < 2) | ||
330 | && (cvmx_helper_interface_get_mode(interface) != | ||
331 | CVMX_HELPER_INTERFACE_MODE_SPI)) { | ||
332 | /* Add ethernet header and FCS, and VLAN if configured. */ | ||
333 | int max_packet = new_mtu + 14 + 4 + vlan_bytes; | ||
334 | |||
335 | if (OCTEON_IS_MODEL(OCTEON_CN3XXX) | ||
336 | || OCTEON_IS_MODEL(OCTEON_CN58XX)) { | ||
337 | /* Signal errors on packets larger than the MTU */ | ||
338 | cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), | ||
339 | max_packet); | ||
340 | } else { | ||
341 | /* | ||
342 | * Set the hardware to truncate packets larger | ||
343 | * than the MTU and smaller the 64 bytes. | ||
344 | */ | ||
345 | union cvmx_pip_frm_len_chkx frm_len_chk; | ||
346 | frm_len_chk.u64 = 0; | ||
347 | frm_len_chk.s.minlen = 64; | ||
348 | frm_len_chk.s.maxlen = max_packet; | ||
349 | cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), | ||
350 | frm_len_chk.u64); | ||
351 | } | ||
352 | /* | ||
353 | * Set the hardware to truncate packets larger than | ||
354 | * the MTU. The jabber register must be set to a | ||
355 | * multiple of 8 bytes, so round up. | ||
356 | */ | ||
357 | cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), | ||
358 | (max_packet + 7) & ~7u); | ||
359 | } | ||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * Set the multicast list. Currently unimplemented. | ||
365 | * | ||
366 | * @dev: Device to work on | ||
367 | */ | ||
368 | static void cvm_oct_common_set_multicast_list(struct net_device *dev) | ||
369 | { | ||
370 | union cvmx_gmxx_prtx_cfg gmx_cfg; | ||
371 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
372 | int interface = INTERFACE(priv->port); | ||
373 | int index = INDEX(priv->port); | ||
374 | |||
375 | if ((interface < 2) | ||
376 | && (cvmx_helper_interface_get_mode(interface) != | ||
377 | CVMX_HELPER_INTERFACE_MODE_SPI)) { | ||
378 | union cvmx_gmxx_rxx_adr_ctl control; | ||
379 | control.u64 = 0; | ||
380 | control.s.bcst = 1; /* Allow broadcast MAC addresses */ | ||
381 | |||
382 | if (dev->mc_list || (dev->flags & IFF_ALLMULTI) || | ||
383 | (dev->flags & IFF_PROMISC)) | ||
384 | /* Force accept multicast packets */ | ||
385 | control.s.mcst = 2; | ||
386 | else | ||
387 | /* Force reject multicat packets */ | ||
388 | control.s.mcst = 1; | ||
389 | |||
390 | if (dev->flags & IFF_PROMISC) | ||
391 | /* | ||
392 | * Reject matches if promisc. Since CAM is | ||
393 | * shut off, should accept everything. | ||
394 | */ | ||
395 | control.s.cam_mode = 0; | ||
396 | else | ||
397 | /* Filter packets based on the CAM */ | ||
398 | control.s.cam_mode = 1; | ||
399 | |||
400 | gmx_cfg.u64 = | ||
401 | cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); | ||
402 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), | ||
403 | gmx_cfg.u64 & ~1ull); | ||
404 | |||
405 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), | ||
406 | control.u64); | ||
407 | if (dev->flags & IFF_PROMISC) | ||
408 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN | ||
409 | (index, interface), 0); | ||
410 | else | ||
411 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN | ||
412 | (index, interface), 1); | ||
413 | |||
414 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), | ||
415 | gmx_cfg.u64); | ||
416 | } | ||
417 | } | ||
418 | |||
419 | /** | ||
420 | * Set the hardware MAC address for a device | ||
421 | * | ||
422 | * @dev: Device to change the MAC address for | ||
423 | * @addr: Address structure to change it too. MAC address is addr + 2. | ||
424 | * Returns Zero on success | ||
425 | */ | ||
426 | static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) | ||
427 | { | ||
428 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
429 | union cvmx_gmxx_prtx_cfg gmx_cfg; | ||
430 | int interface = INTERFACE(priv->port); | ||
431 | int index = INDEX(priv->port); | ||
432 | |||
433 | memcpy(dev->dev_addr, addr + 2, 6); | ||
434 | |||
435 | if ((interface < 2) | ||
436 | && (cvmx_helper_interface_get_mode(interface) != | ||
437 | CVMX_HELPER_INTERFACE_MODE_SPI)) { | ||
438 | int i; | ||
439 | uint8_t *ptr = addr; | ||
440 | uint64_t mac = 0; | ||
441 | for (i = 0; i < 6; i++) | ||
442 | mac = (mac << 8) | (uint64_t) (ptr[i + 2]); | ||
443 | |||
444 | gmx_cfg.u64 = | ||
445 | cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); | ||
446 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), | ||
447 | gmx_cfg.u64 & ~1ull); | ||
448 | |||
449 | cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac); | ||
450 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), | ||
451 | ptr[2]); | ||
452 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), | ||
453 | ptr[3]); | ||
454 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), | ||
455 | ptr[4]); | ||
456 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), | ||
457 | ptr[5]); | ||
458 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), | ||
459 | ptr[6]); | ||
460 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), | ||
461 | ptr[7]); | ||
462 | cvm_oct_common_set_multicast_list(dev); | ||
463 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), | ||
464 | gmx_cfg.u64); | ||
465 | } | ||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | /** | ||
470 | * Per network device initialization | ||
471 | * | ||
472 | * @dev: Device to initialize | ||
473 | * Returns Zero on success | ||
474 | */ | ||
475 | int cvm_oct_common_init(struct net_device *dev) | ||
476 | { | ||
477 | static int count; | ||
478 | char mac[8] = { 0x00, 0x00, | ||
479 | octeon_bootinfo->mac_addr_base[0], | ||
480 | octeon_bootinfo->mac_addr_base[1], | ||
481 | octeon_bootinfo->mac_addr_base[2], | ||
482 | octeon_bootinfo->mac_addr_base[3], | ||
483 | octeon_bootinfo->mac_addr_base[4], | ||
484 | octeon_bootinfo->mac_addr_base[5] + count | ||
485 | }; | ||
486 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
487 | |||
488 | /* | ||
489 | * Force the interface to use the POW send if always_use_pow | ||
490 | * was specified or it is in the pow send list. | ||
491 | */ | ||
492 | if ((pow_send_group != -1) | ||
493 | && (always_use_pow || strstr(pow_send_list, dev->name))) | ||
494 | priv->queue = -1; | ||
495 | |||
496 | if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM) | ||
497 | dev->features |= NETIF_F_IP_CSUM; | ||
498 | |||
499 | count++; | ||
500 | |||
501 | /* We do our own locking, Linux doesn't need to */ | ||
502 | dev->features |= NETIF_F_LLTX; | ||
503 | SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops); | ||
504 | |||
505 | cvm_oct_mdio_setup_device(dev); | ||
506 | dev->netdev_ops->ndo_set_mac_address(dev, mac); | ||
507 | dev->netdev_ops->ndo_change_mtu(dev, dev->mtu); | ||
508 | |||
509 | /* | ||
510 | * Zero out stats for port so we won't mistakenly show | ||
511 | * counters from the bootloader. | ||
512 | */ | ||
513 | memset(dev->netdev_ops->ndo_get_stats(dev), 0, | ||
514 | sizeof(struct net_device_stats)); | ||
515 | |||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | void cvm_oct_common_uninit(struct net_device *dev) | ||
520 | { | ||
521 | /* Currently nothing to do */ | ||
522 | } | ||
523 | |||
524 | static const struct net_device_ops cvm_oct_npi_netdev_ops = { | ||
525 | .ndo_init = cvm_oct_common_init, | ||
526 | .ndo_uninit = cvm_oct_common_uninit, | ||
527 | .ndo_start_xmit = cvm_oct_xmit, | ||
528 | .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, | ||
529 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, | ||
530 | .ndo_do_ioctl = cvm_oct_ioctl, | ||
531 | .ndo_change_mtu = cvm_oct_common_change_mtu, | ||
532 | .ndo_get_stats = cvm_oct_common_get_stats, | ||
533 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
534 | .ndo_poll_controller = cvm_oct_poll_controller, | ||
535 | #endif | ||
536 | }; | ||
537 | static const struct net_device_ops cvm_oct_xaui_netdev_ops = { | ||
538 | .ndo_init = cvm_oct_xaui_init, | ||
539 | .ndo_uninit = cvm_oct_xaui_uninit, | ||
540 | .ndo_open = cvm_oct_xaui_open, | ||
541 | .ndo_stop = cvm_oct_xaui_stop, | ||
542 | .ndo_start_xmit = cvm_oct_xmit, | ||
543 | .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, | ||
544 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, | ||
545 | .ndo_do_ioctl = cvm_oct_ioctl, | ||
546 | .ndo_change_mtu = cvm_oct_common_change_mtu, | ||
547 | .ndo_get_stats = cvm_oct_common_get_stats, | ||
548 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
549 | .ndo_poll_controller = cvm_oct_poll_controller, | ||
550 | #endif | ||
551 | }; | ||
552 | static const struct net_device_ops cvm_oct_sgmii_netdev_ops = { | ||
553 | .ndo_init = cvm_oct_sgmii_init, | ||
554 | .ndo_uninit = cvm_oct_sgmii_uninit, | ||
555 | .ndo_open = cvm_oct_sgmii_open, | ||
556 | .ndo_stop = cvm_oct_sgmii_stop, | ||
557 | .ndo_start_xmit = cvm_oct_xmit, | ||
558 | .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, | ||
559 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, | ||
560 | .ndo_do_ioctl = cvm_oct_ioctl, | ||
561 | .ndo_change_mtu = cvm_oct_common_change_mtu, | ||
562 | .ndo_get_stats = cvm_oct_common_get_stats, | ||
563 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
564 | .ndo_poll_controller = cvm_oct_poll_controller, | ||
565 | #endif | ||
566 | }; | ||
567 | static const struct net_device_ops cvm_oct_spi_netdev_ops = { | ||
568 | .ndo_init = cvm_oct_spi_init, | ||
569 | .ndo_uninit = cvm_oct_spi_uninit, | ||
570 | .ndo_start_xmit = cvm_oct_xmit, | ||
571 | .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, | ||
572 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, | ||
573 | .ndo_do_ioctl = cvm_oct_ioctl, | ||
574 | .ndo_change_mtu = cvm_oct_common_change_mtu, | ||
575 | .ndo_get_stats = cvm_oct_common_get_stats, | ||
576 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
577 | .ndo_poll_controller = cvm_oct_poll_controller, | ||
578 | #endif | ||
579 | }; | ||
580 | static const struct net_device_ops cvm_oct_rgmii_netdev_ops = { | ||
581 | .ndo_init = cvm_oct_rgmii_init, | ||
582 | .ndo_uninit = cvm_oct_rgmii_uninit, | ||
583 | .ndo_open = cvm_oct_rgmii_open, | ||
584 | .ndo_stop = cvm_oct_rgmii_stop, | ||
585 | .ndo_start_xmit = cvm_oct_xmit, | ||
586 | .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, | ||
587 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, | ||
588 | .ndo_do_ioctl = cvm_oct_ioctl, | ||
589 | .ndo_change_mtu = cvm_oct_common_change_mtu, | ||
590 | .ndo_get_stats = cvm_oct_common_get_stats, | ||
591 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
592 | .ndo_poll_controller = cvm_oct_poll_controller, | ||
593 | #endif | ||
594 | }; | ||
595 | static const struct net_device_ops cvm_oct_pow_netdev_ops = { | ||
596 | .ndo_init = cvm_oct_common_init, | ||
597 | .ndo_start_xmit = cvm_oct_xmit_pow, | ||
598 | .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, | ||
599 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, | ||
600 | .ndo_do_ioctl = cvm_oct_ioctl, | ||
601 | .ndo_change_mtu = cvm_oct_common_change_mtu, | ||
602 | .ndo_get_stats = cvm_oct_common_get_stats, | ||
603 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
604 | .ndo_poll_controller = cvm_oct_poll_controller, | ||
605 | #endif | ||
606 | }; | ||
607 | |||
608 | /** | ||
249 | * Module/ driver initialization. Creates the linux network | 609 | * Module/ driver initialization. Creates the linux network |
250 | * devices. | 610 | * devices. |
251 | * | 611 | * |
@@ -303,7 +663,7 @@ static int __init cvm_oct_init_module(void) | |||
303 | struct octeon_ethernet *priv = netdev_priv(dev); | 663 | struct octeon_ethernet *priv = netdev_priv(dev); |
304 | memset(priv, 0, sizeof(struct octeon_ethernet)); | 664 | memset(priv, 0, sizeof(struct octeon_ethernet)); |
305 | 665 | ||
306 | dev->init = cvm_oct_common_init; | 666 | dev->netdev_ops = &cvm_oct_pow_netdev_ops; |
307 | priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; | 667 | priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; |
308 | priv->port = CVMX_PIP_NUM_INPUT_PORTS; | 668 | priv->port = CVMX_PIP_NUM_INPUT_PORTS; |
309 | priv->queue = -1; | 669 | priv->queue = -1; |
@@ -372,44 +732,38 @@ static int __init cvm_oct_init_module(void) | |||
372 | break; | 732 | break; |
373 | 733 | ||
374 | case CVMX_HELPER_INTERFACE_MODE_NPI: | 734 | case CVMX_HELPER_INTERFACE_MODE_NPI: |
375 | dev->init = cvm_oct_common_init; | 735 | dev->netdev_ops = &cvm_oct_npi_netdev_ops; |
376 | dev->uninit = cvm_oct_common_uninit; | ||
377 | strcpy(dev->name, "npi%d"); | 736 | strcpy(dev->name, "npi%d"); |
378 | break; | 737 | break; |
379 | 738 | ||
380 | case CVMX_HELPER_INTERFACE_MODE_XAUI: | 739 | case CVMX_HELPER_INTERFACE_MODE_XAUI: |
381 | dev->init = cvm_oct_xaui_init; | 740 | dev->netdev_ops = &cvm_oct_xaui_netdev_ops; |
382 | dev->uninit = cvm_oct_xaui_uninit; | ||
383 | strcpy(dev->name, "xaui%d"); | 741 | strcpy(dev->name, "xaui%d"); |
384 | break; | 742 | break; |
385 | 743 | ||
386 | case CVMX_HELPER_INTERFACE_MODE_LOOP: | 744 | case CVMX_HELPER_INTERFACE_MODE_LOOP: |
387 | dev->init = cvm_oct_common_init; | 745 | dev->netdev_ops = &cvm_oct_npi_netdev_ops; |
388 | dev->uninit = cvm_oct_common_uninit; | ||
389 | strcpy(dev->name, "loop%d"); | 746 | strcpy(dev->name, "loop%d"); |
390 | break; | 747 | break; |
391 | 748 | ||
392 | case CVMX_HELPER_INTERFACE_MODE_SGMII: | 749 | case CVMX_HELPER_INTERFACE_MODE_SGMII: |
393 | dev->init = cvm_oct_sgmii_init; | 750 | dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; |
394 | dev->uninit = cvm_oct_sgmii_uninit; | ||
395 | strcpy(dev->name, "eth%d"); | 751 | strcpy(dev->name, "eth%d"); |
396 | break; | 752 | break; |
397 | 753 | ||
398 | case CVMX_HELPER_INTERFACE_MODE_SPI: | 754 | case CVMX_HELPER_INTERFACE_MODE_SPI: |
399 | dev->init = cvm_oct_spi_init; | 755 | dev->netdev_ops = &cvm_oct_spi_netdev_ops; |
400 | dev->uninit = cvm_oct_spi_uninit; | ||
401 | strcpy(dev->name, "spi%d"); | 756 | strcpy(dev->name, "spi%d"); |
402 | break; | 757 | break; |
403 | 758 | ||
404 | case CVMX_HELPER_INTERFACE_MODE_RGMII: | 759 | case CVMX_HELPER_INTERFACE_MODE_RGMII: |
405 | case CVMX_HELPER_INTERFACE_MODE_GMII: | 760 | case CVMX_HELPER_INTERFACE_MODE_GMII: |
406 | dev->init = cvm_oct_rgmii_init; | 761 | dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; |
407 | dev->uninit = cvm_oct_rgmii_uninit; | ||
408 | strcpy(dev->name, "eth%d"); | 762 | strcpy(dev->name, "eth%d"); |
409 | break; | 763 | break; |
410 | } | 764 | } |
411 | 765 | ||
412 | if (!dev->init) { | 766 | if (!dev->netdev_ops) { |
413 | kfree(dev); | 767 | kfree(dev); |
414 | } else if (register_netdev(dev) < 0) { | 768 | } else if (register_netdev(dev) < 0) { |
415 | pr_err("Failed to register ethernet device " | 769 | pr_err("Failed to register ethernet device " |
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index b3199076ef5e..3aef9878fc0a 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h | |||
@@ -111,12 +111,23 @@ static inline int cvm_oct_transmit(struct net_device *dev, | |||
111 | 111 | ||
112 | extern int cvm_oct_rgmii_init(struct net_device *dev); | 112 | extern int cvm_oct_rgmii_init(struct net_device *dev); |
113 | extern void cvm_oct_rgmii_uninit(struct net_device *dev); | 113 | extern void cvm_oct_rgmii_uninit(struct net_device *dev); |
114 | extern int cvm_oct_rgmii_open(struct net_device *dev); | ||
115 | extern int cvm_oct_rgmii_stop(struct net_device *dev); | ||
116 | |||
114 | extern int cvm_oct_sgmii_init(struct net_device *dev); | 117 | extern int cvm_oct_sgmii_init(struct net_device *dev); |
115 | extern void cvm_oct_sgmii_uninit(struct net_device *dev); | 118 | extern void cvm_oct_sgmii_uninit(struct net_device *dev); |
119 | extern int cvm_oct_sgmii_open(struct net_device *dev); | ||
120 | extern int cvm_oct_sgmii_stop(struct net_device *dev); | ||
121 | |||
116 | extern int cvm_oct_spi_init(struct net_device *dev); | 122 | extern int cvm_oct_spi_init(struct net_device *dev); |
117 | extern void cvm_oct_spi_uninit(struct net_device *dev); | 123 | extern void cvm_oct_spi_uninit(struct net_device *dev); |
118 | extern int cvm_oct_xaui_init(struct net_device *dev); | 124 | extern int cvm_oct_xaui_init(struct net_device *dev); |
119 | extern void cvm_oct_xaui_uninit(struct net_device *dev); | 125 | extern void cvm_oct_xaui_uninit(struct net_device *dev); |
126 | extern int cvm_oct_xaui_open(struct net_device *dev); | ||
127 | extern int cvm_oct_xaui_stop(struct net_device *dev); | ||
128 | |||
129 | extern int cvm_oct_common_init(struct net_device *dev); | ||
130 | extern void cvm_oct_common_uninit(struct net_device *dev); | ||
120 | 131 | ||
121 | extern int always_use_pow; | 132 | extern int always_use_pow; |
122 | extern int pow_send_group; | 133 | extern int pow_send_group; |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 38bfdb0f6660..3f1045993474 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -550,7 +550,7 @@ static void acm_waker(struct work_struct *waker) | |||
550 | static int acm_tty_open(struct tty_struct *tty, struct file *filp) | 550 | static int acm_tty_open(struct tty_struct *tty, struct file *filp) |
551 | { | 551 | { |
552 | struct acm *acm; | 552 | struct acm *acm; |
553 | int rv = -EINVAL; | 553 | int rv = -ENODEV; |
554 | int i; | 554 | int i; |
555 | dbg("Entering acm_tty_open."); | 555 | dbg("Entering acm_tty_open."); |
556 | 556 | ||
@@ -677,7 +677,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp) | |||
677 | 677 | ||
678 | /* Perform the closing process and see if we need to do the hardware | 678 | /* Perform the closing process and see if we need to do the hardware |
679 | shutdown */ | 679 | shutdown */ |
680 | if (tty_port_close_start(&acm->port, tty, filp) == 0) | 680 | if (!acm || tty_port_close_start(&acm->port, tty, filp) == 0) |
681 | return; | 681 | return; |
682 | acm_port_down(acm, 0); | 682 | acm_port_down(acm, 0); |
683 | tty_port_close_end(&acm->port, tty); | 683 | tty_port_close_end(&acm->port, tty); |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index d595aa5586a7..a84216464ca0 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -333,6 +333,9 @@ static void serial_close(struct tty_struct *tty, struct file *filp) | |||
333 | { | 333 | { |
334 | struct usb_serial_port *port = tty->driver_data; | 334 | struct usb_serial_port *port = tty->driver_data; |
335 | 335 | ||
336 | if (!port) | ||
337 | return; | ||
338 | |||
336 | dbg("%s - port %d", __func__, port->number); | 339 | dbg("%s - port %d", __func__, port->number); |
337 | 340 | ||
338 | 341 | ||
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 559f8784acf3..9052bcb4f528 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c | |||
@@ -501,7 +501,7 @@ int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header, | |||
501 | int result = -ENOANO; | 501 | int result = -ENOANO; |
502 | struct uwb_rceb *rceb = *header; | 502 | struct uwb_rceb *rceb = *header; |
503 | int event = le16_to_cpu(rceb->wEvent); | 503 | int event = le16_to_cpu(rceb->wEvent); |
504 | size_t event_size; | 504 | ssize_t event_size; |
505 | size_t core_size, offset; | 505 | size_t core_size, offset; |
506 | 506 | ||
507 | if (rceb->bEventType != UWB_RC_CET_GENERAL) | 507 | if (rceb->bEventType != UWB_RC_CET_GENERAL) |
diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c index cd2035768b47..86a853b84119 100644 --- a/drivers/uwb/wlp/txrx.c +++ b/drivers/uwb/wlp/txrx.c | |||
@@ -326,7 +326,7 @@ int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp, | |||
326 | int result = -EINVAL; | 326 | int result = -EINVAL; |
327 | struct ethhdr *eth_hdr = (void *) skb->data; | 327 | struct ethhdr *eth_hdr = (void *) skb->data; |
328 | 328 | ||
329 | if (is_broadcast_ether_addr(eth_hdr->h_dest)) { | 329 | if (is_multicast_ether_addr(eth_hdr->h_dest)) { |
330 | result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); | 330 | result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); |
331 | if (result < 0) { | 331 | if (result < 0) { |
332 | if (printk_ratelimit()) | 332 | if (printk_ratelimit()) |
diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/nvidia/nv_setup.c index 135ae18bfce8..eef2bb298d9f 100644 --- a/drivers/video/nvidia/nv_setup.c +++ b/drivers/video/nvidia/nv_setup.c | |||
@@ -543,8 +543,7 @@ int NVCommonSetup(struct fb_info *info) | |||
543 | } else if (analog_on_B) { | 543 | } else if (analog_on_B) { |
544 | CRTCnumber = outputBfromCRTC; | 544 | CRTCnumber = outputBfromCRTC; |
545 | FlatPanel = 0; | 545 | FlatPanel = 0; |
546 | printk("nvidiafb: CRTC %i" | 546 | printk("nvidiafb: CRTC %i appears to have a " |
547 | "appears to have a " | ||
548 | "CRT attached\n", CRTCnumber); | 547 | "CRT attached\n", CRTCnumber); |
549 | } else if (slaved_on_A) { | 548 | } else if (slaved_on_A) { |
550 | CRTCnumber = 0; | 549 | CRTCnumber = 0; |
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c index 1f09d4e4144c..59f708efe25f 100644 --- a/drivers/w1/slaves/w1_ds2760.c +++ b/drivers/w1/slaves/w1_ds2760.c | |||
@@ -68,6 +68,34 @@ int w1_ds2760_write(struct device *dev, char *buf, int addr, size_t count) | |||
68 | return w1_ds2760_io(dev, buf, addr, count, 1); | 68 | return w1_ds2760_io(dev, buf, addr, count, 1); |
69 | } | 69 | } |
70 | 70 | ||
71 | static int w1_ds2760_eeprom_cmd(struct device *dev, int addr, int cmd) | ||
72 | { | ||
73 | struct w1_slave *sl = container_of(dev, struct w1_slave, dev); | ||
74 | |||
75 | if (!dev) | ||
76 | return -EINVAL; | ||
77 | |||
78 | mutex_lock(&sl->master->mutex); | ||
79 | |||
80 | if (w1_reset_select_slave(sl) == 0) { | ||
81 | w1_write_8(sl->master, cmd); | ||
82 | w1_write_8(sl->master, addr); | ||
83 | } | ||
84 | |||
85 | mutex_unlock(&sl->master->mutex); | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | int w1_ds2760_store_eeprom(struct device *dev, int addr) | ||
90 | { | ||
91 | return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_COPY_DATA); | ||
92 | } | ||
93 | |||
94 | int w1_ds2760_recall_eeprom(struct device *dev, int addr) | ||
95 | { | ||
96 | return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_RECALL_DATA); | ||
97 | } | ||
98 | |||
71 | static ssize_t w1_ds2760_read_bin(struct kobject *kobj, | 99 | static ssize_t w1_ds2760_read_bin(struct kobject *kobj, |
72 | struct bin_attribute *bin_attr, | 100 | struct bin_attribute *bin_attr, |
73 | char *buf, loff_t off, size_t count) | 101 | char *buf, loff_t off, size_t count) |
@@ -200,6 +228,8 @@ static void __exit w1_ds2760_exit(void) | |||
200 | 228 | ||
201 | EXPORT_SYMBOL(w1_ds2760_read); | 229 | EXPORT_SYMBOL(w1_ds2760_read); |
202 | EXPORT_SYMBOL(w1_ds2760_write); | 230 | EXPORT_SYMBOL(w1_ds2760_write); |
231 | EXPORT_SYMBOL(w1_ds2760_store_eeprom); | ||
232 | EXPORT_SYMBOL(w1_ds2760_recall_eeprom); | ||
203 | 233 | ||
204 | module_init(w1_ds2760_init); | 234 | module_init(w1_ds2760_init); |
205 | module_exit(w1_ds2760_exit); | 235 | module_exit(w1_ds2760_exit); |
diff --git a/drivers/w1/slaves/w1_ds2760.h b/drivers/w1/slaves/w1_ds2760.h index f1302429cb02..58e774141568 100644 --- a/drivers/w1/slaves/w1_ds2760.h +++ b/drivers/w1/slaves/w1_ds2760.h | |||
@@ -25,6 +25,10 @@ | |||
25 | 25 | ||
26 | #define DS2760_PROTECTION_REG 0x00 | 26 | #define DS2760_PROTECTION_REG 0x00 |
27 | #define DS2760_STATUS_REG 0x01 | 27 | #define DS2760_STATUS_REG 0x01 |
28 | #define DS2760_STATUS_IE (1 << 2) | ||
29 | #define DS2760_STATUS_SWEN (1 << 3) | ||
30 | #define DS2760_STATUS_RNAOP (1 << 4) | ||
31 | #define DS2760_STATUS_PMOD (1 << 5) | ||
28 | #define DS2760_EEPROM_REG 0x07 | 32 | #define DS2760_EEPROM_REG 0x07 |
29 | #define DS2760_SPECIAL_FEATURE_REG 0x08 | 33 | #define DS2760_SPECIAL_FEATURE_REG 0x08 |
30 | #define DS2760_VOLTAGE_MSB 0x0c | 34 | #define DS2760_VOLTAGE_MSB 0x0c |
@@ -38,6 +42,7 @@ | |||
38 | #define DS2760_EEPROM_BLOCK0 0x20 | 42 | #define DS2760_EEPROM_BLOCK0 0x20 |
39 | #define DS2760_ACTIVE_FULL 0x20 | 43 | #define DS2760_ACTIVE_FULL 0x20 |
40 | #define DS2760_EEPROM_BLOCK1 0x30 | 44 | #define DS2760_EEPROM_BLOCK1 0x30 |
45 | #define DS2760_STATUS_WRITE_REG 0x31 | ||
41 | #define DS2760_RATED_CAPACITY 0x32 | 46 | #define DS2760_RATED_CAPACITY 0x32 |
42 | #define DS2760_CURRENT_OFFSET_BIAS 0x33 | 47 | #define DS2760_CURRENT_OFFSET_BIAS 0x33 |
43 | #define DS2760_ACTIVE_EMPTY 0x3b | 48 | #define DS2760_ACTIVE_EMPTY 0x3b |
@@ -46,5 +51,7 @@ extern int w1_ds2760_read(struct device *dev, char *buf, int addr, | |||
46 | size_t count); | 51 | size_t count); |
47 | extern int w1_ds2760_write(struct device *dev, char *buf, int addr, | 52 | extern int w1_ds2760_write(struct device *dev, char *buf, int addr, |
48 | size_t count); | 53 | size_t count); |
54 | extern int w1_ds2760_store_eeprom(struct device *dev, int addr); | ||
55 | extern int w1_ds2760_recall_eeprom(struct device *dev, int addr); | ||
49 | 56 | ||
50 | #endif /* !__w1_ds2760_h__ */ | 57 | #endif /* !__w1_ds2760_h__ */ |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index b166f2852a64..b1ccc04f3c9a 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -240,6 +240,32 @@ config ORION_WATCHDOG | |||
240 | To compile this driver as a module, choose M here: the | 240 | To compile this driver as a module, choose M here: the |
241 | module will be called orion_wdt. | 241 | module will be called orion_wdt. |
242 | 242 | ||
243 | config COH901327_WATCHDOG | ||
244 | bool "ST-Ericsson COH 901 327 watchdog" | ||
245 | depends on ARCH_U300 | ||
246 | default y if MACH_U300 | ||
247 | help | ||
248 | Say Y here to include Watchdog timer support for the | ||
249 | watchdog embedded into the ST-Ericsson U300 series platforms. | ||
250 | This watchdog is used to reset the system and thus cannot be | ||
251 | compiled as a module. | ||
252 | |||
253 | config TWL4030_WATCHDOG | ||
254 | tristate "TWL4030 Watchdog" | ||
255 | depends on TWL4030_CORE | ||
256 | help | ||
257 | Support for TI TWL4030 watchdog. Say 'Y' here to enable the | ||
258 | watchdog timer support for TWL4030 chips. | ||
259 | |||
260 | config STMP3XXX_WATCHDOG | ||
261 | tristate "Freescale STMP3XXX watchdog" | ||
262 | depends on ARCH_STMP3XXX | ||
263 | help | ||
264 | Say Y here if to include support for the watchdog timer | ||
265 | for the Sigmatel STMP37XX/378X SoC. | ||
266 | To compile this driver as a module, choose M here: the | ||
267 | module will be called stmp3xxx_wdt. | ||
268 | |||
243 | # AVR32 Architecture | 269 | # AVR32 Architecture |
244 | 270 | ||
245 | config AT32AP700X_WDT | 271 | config AT32AP700X_WDT |
@@ -703,6 +729,12 @@ config SBC_EPX_C3_WATCHDOG | |||
703 | 729 | ||
704 | # MIPS Architecture | 730 | # MIPS Architecture |
705 | 731 | ||
732 | config BCM47XX_WDT | ||
733 | tristate "Broadcom BCM47xx Watchdog Timer" | ||
734 | depends on BCM47XX | ||
735 | help | ||
736 | Hardware driver for the Broadcom BCM47xx Watchog Timer. | ||
737 | |||
706 | config RC32434_WDT | 738 | config RC32434_WDT |
707 | tristate "IDT RC32434 SoC Watchdog Timer" | 739 | tristate "IDT RC32434 SoC Watchdog Timer" |
708 | depends on MIKROTIK_RB532 | 740 | depends on MIKROTIK_RB532 |
@@ -729,6 +761,15 @@ config WDT_MTX1 | |||
729 | Hardware driver for the MTX-1 boards. This is a watchdog timer that | 761 | Hardware driver for the MTX-1 boards. This is a watchdog timer that |
730 | will reboot the machine after a 100 seconds timer expired. | 762 | will reboot the machine after a 100 seconds timer expired. |
731 | 763 | ||
764 | config PNX833X_WDT | ||
765 | tristate "PNX833x Hardware Watchdog" | ||
766 | depends on SOC_PNX8335 | ||
767 | help | ||
768 | Hardware driver for the PNX833x's watchdog. This is a | ||
769 | watchdog timer that will reboot the machine after a programable | ||
770 | timer has expired and no process has written to /dev/watchdog during | ||
771 | that time. | ||
772 | |||
732 | config WDT_RM9K_GPI | 773 | config WDT_RM9K_GPI |
733 | tristate "RM9000/GPI hardware watchdog" | 774 | tristate "RM9000/GPI hardware watchdog" |
734 | depends on CPU_RM9000 | 775 | depends on CPU_RM9000 |
@@ -966,24 +1007,16 @@ config WDTPCI | |||
966 | ---help--- | 1007 | ---help--- |
967 | If you have a PCI-WDT500/501 watchdog board, say Y here, otherwise N. | 1008 | If you have a PCI-WDT500/501 watchdog board, say Y here, otherwise N. |
968 | 1009 | ||
969 | To compile this driver as a module, choose M here: the | 1010 | If you have a PCI-WDT501 watchdog board then you can enable the |
970 | module will be called wdt_pci. | 1011 | temperature sensor by setting the type parameter to 501. |
971 | |||
972 | config WDT_501_PCI | ||
973 | bool "PCI-WDT501 features" | ||
974 | depends on WDTPCI | ||
975 | help | ||
976 | Saying Y here and creating a character special file /dev/temperature | ||
977 | with major number 10 and minor number 131 ("man mknod") will give | ||
978 | you a thermometer inside your computer: reading from | ||
979 | /dev/temperature yields one byte, the temperature in degrees | ||
980 | Fahrenheit. This works only if you have a PCI-WDT501 watchdog board | ||
981 | installed. | ||
982 | 1012 | ||
983 | If you want to enable the Fan Tachometer on the PCI-WDT501, then you | 1013 | If you want to enable the Fan Tachometer on the PCI-WDT501, then you |
984 | can do this via the tachometer parameter. Only do this if you have a | 1014 | can do this via the tachometer parameter. Only do this if you have a |
985 | fan tachometer actually set up. | 1015 | fan tachometer actually set up. |
986 | 1016 | ||
1017 | To compile this driver as a module, choose M here: the | ||
1018 | module will be called wdt_pci. | ||
1019 | |||
987 | # | 1020 | # |
988 | # USB-based Watchdog Cards | 1021 | # USB-based Watchdog Cards |
989 | # | 1022 | # |
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index c3afa14d5be1..3d774294a2b7 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile | |||
@@ -28,6 +28,7 @@ obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o | |||
28 | obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o | 28 | obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o |
29 | obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o | 29 | obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o |
30 | obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o | 30 | obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o |
31 | obj-$(CONFIG_TWL4030_WATCHDOG) += twl4030_wdt.o | ||
31 | obj-$(CONFIG_21285_WATCHDOG) += wdt285.o | 32 | obj-$(CONFIG_21285_WATCHDOG) += wdt285.o |
32 | obj-$(CONFIG_977_WATCHDOG) += wdt977.o | 33 | obj-$(CONFIG_977_WATCHDOG) += wdt977.o |
33 | obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o | 34 | obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o |
@@ -41,6 +42,8 @@ obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o | |||
41 | obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o | 42 | obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o |
42 | obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o | 43 | obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o |
43 | obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o | 44 | obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o |
45 | obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o | ||
46 | obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o | ||
44 | 47 | ||
45 | # AVR32 Architecture | 48 | # AVR32 Architecture |
46 | obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o | 49 | obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o |
@@ -98,9 +101,11 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o | |||
98 | # M68KNOMMU Architecture | 101 | # M68KNOMMU Architecture |
99 | 102 | ||
100 | # MIPS Architecture | 103 | # MIPS Architecture |
104 | obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o | ||
101 | obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o | 105 | obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o |
102 | obj-$(CONFIG_INDYDOG) += indydog.o | 106 | obj-$(CONFIG_INDYDOG) += indydog.o |
103 | obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o | 107 | obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o |
108 | obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o | ||
104 | obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o | 109 | obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o |
105 | obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o | 110 | obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o |
106 | obj-$(CONFIG_AR7_WDT) += ar7_wdt.o | 111 | obj-$(CONFIG_AR7_WDT) += ar7_wdt.o |
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c new file mode 100644 index 000000000000..5c7011cda6a6 --- /dev/null +++ b/drivers/watchdog/bcm47xx_wdt.c | |||
@@ -0,0 +1,286 @@ | |||
1 | /* | ||
2 | * Watchdog driver for Broadcom BCM47XX | ||
3 | * | ||
4 | * Copyright (C) 2008 Aleksandar Radovanovic <biblbroks@sezampro.rs> | ||
5 | * Copyright (C) 2009 Matthieu CASTET <castet.matthieu@free.fr> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/bitops.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/fs.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/miscdevice.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/moduleparam.h> | ||
21 | #include <linux/reboot.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/watchdog.h> | ||
25 | #include <linux/timer.h> | ||
26 | #include <linux/jiffies.h> | ||
27 | #include <linux/ssb/ssb_embedded.h> | ||
28 | #include <asm/mach-bcm47xx/bcm47xx.h> | ||
29 | |||
30 | #define DRV_NAME "bcm47xx_wdt" | ||
31 | |||
32 | #define WDT_DEFAULT_TIME 30 /* seconds */ | ||
33 | #define WDT_MAX_TIME 255 /* seconds */ | ||
34 | |||
35 | static int wdt_time = WDT_DEFAULT_TIME; | ||
36 | static int nowayout = WATCHDOG_NOWAYOUT; | ||
37 | |||
38 | module_param(wdt_time, int, 0); | ||
39 | MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default=" | ||
40 | __MODULE_STRING(WDT_DEFAULT_TIME) ")"); | ||
41 | |||
42 | #ifdef CONFIG_WATCHDOG_NOWAYOUT | ||
43 | module_param(nowayout, int, 0); | ||
44 | MODULE_PARM_DESC(nowayout, | ||
45 | "Watchdog cannot be stopped once started (default=" | ||
46 | __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | ||
47 | #endif | ||
48 | |||
49 | static unsigned long bcm47xx_wdt_busy; | ||
50 | static char expect_release; | ||
51 | static struct timer_list wdt_timer; | ||
52 | static atomic_t ticks; | ||
53 | |||
54 | static inline void bcm47xx_wdt_hw_start(void) | ||
55 | { | ||
56 | /* this is 2,5s on 100Mhz clock and 2s on 133 Mhz */ | ||
57 | ssb_watchdog_timer_set(&ssb_bcm47xx, 0xfffffff); | ||
58 | } | ||
59 | |||
60 | static inline int bcm47xx_wdt_hw_stop(void) | ||
61 | { | ||
62 | return ssb_watchdog_timer_set(&ssb_bcm47xx, 0); | ||
63 | } | ||
64 | |||
65 | static void bcm47xx_timer_tick(unsigned long unused) | ||
66 | { | ||
67 | if (!atomic_dec_and_test(&ticks)) { | ||
68 | bcm47xx_wdt_hw_start(); | ||
69 | mod_timer(&wdt_timer, jiffies + HZ); | ||
70 | } else { | ||
71 | printk(KERN_CRIT DRV_NAME "Watchdog will fire soon!!!\n"); | ||
72 | } | ||
73 | } | ||
74 | |||
75 | static inline void bcm47xx_wdt_pet(void) | ||
76 | { | ||
77 | atomic_set(&ticks, wdt_time); | ||
78 | } | ||
79 | |||
80 | static void bcm47xx_wdt_start(void) | ||
81 | { | ||
82 | bcm47xx_wdt_pet(); | ||
83 | bcm47xx_timer_tick(0); | ||
84 | } | ||
85 | |||
86 | static void bcm47xx_wdt_pause(void) | ||
87 | { | ||
88 | del_timer_sync(&wdt_timer); | ||
89 | bcm47xx_wdt_hw_stop(); | ||
90 | } | ||
91 | |||
92 | static void bcm47xx_wdt_stop(void) | ||
93 | { | ||
94 | bcm47xx_wdt_pause(); | ||
95 | } | ||
96 | |||
97 | static int bcm47xx_wdt_settimeout(int new_time) | ||
98 | { | ||
99 | if ((new_time <= 0) || (new_time > WDT_MAX_TIME)) | ||
100 | return -EINVAL; | ||
101 | |||
102 | wdt_time = new_time; | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | static int bcm47xx_wdt_open(struct inode *inode, struct file *file) | ||
107 | { | ||
108 | if (test_and_set_bit(0, &bcm47xx_wdt_busy)) | ||
109 | return -EBUSY; | ||
110 | |||
111 | bcm47xx_wdt_start(); | ||
112 | return nonseekable_open(inode, file); | ||
113 | } | ||
114 | |||
115 | static int bcm47xx_wdt_release(struct inode *inode, struct file *file) | ||
116 | { | ||
117 | if (expect_release == 42) { | ||
118 | bcm47xx_wdt_stop(); | ||
119 | } else { | ||
120 | printk(KERN_CRIT DRV_NAME | ||
121 | ": Unexpected close, not stopping watchdog!\n"); | ||
122 | bcm47xx_wdt_start(); | ||
123 | } | ||
124 | |||
125 | clear_bit(0, &bcm47xx_wdt_busy); | ||
126 | expect_release = 0; | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static ssize_t bcm47xx_wdt_write(struct file *file, const char __user *data, | ||
131 | size_t len, loff_t *ppos) | ||
132 | { | ||
133 | if (len) { | ||
134 | if (!nowayout) { | ||
135 | size_t i; | ||
136 | |||
137 | expect_release = 0; | ||
138 | |||
139 | for (i = 0; i != len; i++) { | ||
140 | char c; | ||
141 | if (get_user(c, data + i)) | ||
142 | return -EFAULT; | ||
143 | if (c == 'V') | ||
144 | expect_release = 42; | ||
145 | } | ||
146 | } | ||
147 | bcm47xx_wdt_pet(); | ||
148 | } | ||
149 | return len; | ||
150 | } | ||
151 | |||
152 | static struct watchdog_info bcm47xx_wdt_info = { | ||
153 | .identity = DRV_NAME, | ||
154 | .options = WDIOF_SETTIMEOUT | | ||
155 | WDIOF_KEEPALIVEPING | | ||
156 | WDIOF_MAGICCLOSE, | ||
157 | }; | ||
158 | |||
159 | static long bcm47xx_wdt_ioctl(struct file *file, | ||
160 | unsigned int cmd, unsigned long arg) | ||
161 | { | ||
162 | void __user *argp = (void __user *)arg; | ||
163 | int __user *p = argp; | ||
164 | int new_value, retval = -EINVAL;; | ||
165 | |||
166 | switch (cmd) { | ||
167 | case WDIOC_GETSUPPORT: | ||
168 | return copy_to_user(argp, &bcm47xx_wdt_info, | ||
169 | sizeof(bcm47xx_wdt_info)) ? -EFAULT : 0; | ||
170 | |||
171 | case WDIOC_GETSTATUS: | ||
172 | case WDIOC_GETBOOTSTATUS: | ||
173 | return put_user(0, p); | ||
174 | |||
175 | case WDIOC_SETOPTIONS: | ||
176 | if (get_user(new_value, p)) | ||
177 | return -EFAULT; | ||
178 | |||
179 | if (new_value & WDIOS_DISABLECARD) { | ||
180 | bcm47xx_wdt_stop(); | ||
181 | retval = 0; | ||
182 | } | ||
183 | |||
184 | if (new_value & WDIOS_ENABLECARD) { | ||
185 | bcm47xx_wdt_start(); | ||
186 | retval = 0; | ||
187 | } | ||
188 | |||
189 | return retval; | ||
190 | |||
191 | case WDIOC_KEEPALIVE: | ||
192 | bcm47xx_wdt_pet(); | ||
193 | return 0; | ||
194 | |||
195 | case WDIOC_SETTIMEOUT: | ||
196 | if (get_user(new_value, p)) | ||
197 | return -EFAULT; | ||
198 | |||
199 | if (bcm47xx_wdt_settimeout(new_value)) | ||
200 | return -EINVAL; | ||
201 | |||
202 | bcm47xx_wdt_pet(); | ||
203 | |||
204 | case WDIOC_GETTIMEOUT: | ||
205 | return put_user(wdt_time, p); | ||
206 | |||
207 | default: | ||
208 | return -ENOTTY; | ||
209 | } | ||
210 | } | ||
211 | |||
212 | static int bcm47xx_wdt_notify_sys(struct notifier_block *this, | ||
213 | unsigned long code, void *unused) | ||
214 | { | ||
215 | if (code == SYS_DOWN || code == SYS_HALT) | ||
216 | bcm47xx_wdt_stop(); | ||
217 | return NOTIFY_DONE; | ||
218 | } | ||
219 | |||
220 | static const struct file_operations bcm47xx_wdt_fops = { | ||
221 | .owner = THIS_MODULE, | ||
222 | .llseek = no_llseek, | ||
223 | .unlocked_ioctl = bcm47xx_wdt_ioctl, | ||
224 | .open = bcm47xx_wdt_open, | ||
225 | .release = bcm47xx_wdt_release, | ||
226 | .write = bcm47xx_wdt_write, | ||
227 | }; | ||
228 | |||
229 | static struct miscdevice bcm47xx_wdt_miscdev = { | ||
230 | .minor = WATCHDOG_MINOR, | ||
231 | .name = "watchdog", | ||
232 | .fops = &bcm47xx_wdt_fops, | ||
233 | }; | ||
234 | |||
235 | static struct notifier_block bcm47xx_wdt_notifier = { | ||
236 | .notifier_call = bcm47xx_wdt_notify_sys, | ||
237 | }; | ||
238 | |||
239 | static int __init bcm47xx_wdt_init(void) | ||
240 | { | ||
241 | int ret; | ||
242 | |||
243 | if (bcm47xx_wdt_hw_stop() < 0) | ||
244 | return -ENODEV; | ||
245 | |||
246 | setup_timer(&wdt_timer, bcm47xx_timer_tick, 0L); | ||
247 | |||
248 | if (bcm47xx_wdt_settimeout(wdt_time)) { | ||
249 | bcm47xx_wdt_settimeout(WDT_DEFAULT_TIME); | ||
250 | printk(KERN_INFO DRV_NAME ": " | ||
251 | "wdt_time value must be 0 < wdt_time < %d, using %d\n", | ||
252 | (WDT_MAX_TIME + 1), wdt_time); | ||
253 | } | ||
254 | |||
255 | ret = register_reboot_notifier(&bcm47xx_wdt_notifier); | ||
256 | if (ret) | ||
257 | return ret; | ||
258 | |||
259 | ret = misc_register(&bcm47xx_wdt_miscdev); | ||
260 | if (ret) { | ||
261 | unregister_reboot_notifier(&bcm47xx_wdt_notifier); | ||
262 | return ret; | ||
263 | } | ||
264 | |||
265 | printk(KERN_INFO "BCM47xx Watchdog Timer enabled (%d seconds%s)\n", | ||
266 | wdt_time, nowayout ? ", nowayout" : ""); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static void __exit bcm47xx_wdt_exit(void) | ||
271 | { | ||
272 | if (!nowayout) | ||
273 | bcm47xx_wdt_stop(); | ||
274 | |||
275 | misc_deregister(&bcm47xx_wdt_miscdev); | ||
276 | |||
277 | unregister_reboot_notifier(&bcm47xx_wdt_notifier); | ||
278 | } | ||
279 | |||
280 | module_init(bcm47xx_wdt_init); | ||
281 | module_exit(bcm47xx_wdt_exit); | ||
282 | |||
283 | MODULE_AUTHOR("Aleksandar Radovanovic"); | ||
284 | MODULE_DESCRIPTION("Watchdog driver for Broadcom BCM47xx"); | ||
285 | MODULE_LICENSE("GPL"); | ||
286 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c new file mode 100644 index 000000000000..fecb307d28e9 --- /dev/null +++ b/drivers/watchdog/coh901327_wdt.c | |||
@@ -0,0 +1,537 @@ | |||
1 | /* | ||
2 | * coh901327_wdt.c | ||
3 | * | ||
4 | * Copyright (C) 2008-2009 ST-Ericsson AB | ||
5 | * License terms: GNU General Public License (GPL) version 2 | ||
6 | * Watchdog driver for the ST-Ericsson AB COH 901 327 IP core | ||
7 | * Author: Linus Walleij <linus.walleij@stericsson.com> | ||
8 | */ | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/miscdevice.h> | ||
13 | #include <linux/watchdog.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/pm.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/io.h> | ||
18 | #include <linux/bitops.h> | ||
19 | #include <linux/uaccess.h> | ||
20 | #include <linux/clk.h> | ||
21 | |||
22 | #define DRV_NAME "WDOG COH 901 327" | ||
23 | |||
24 | /* | ||
25 | * COH 901 327 register definitions | ||
26 | */ | ||
27 | |||
28 | /* WDOG_FEED Register 32bit (-/W) */ | ||
29 | #define U300_WDOG_FR 0x00 | ||
30 | #define U300_WDOG_FR_FEED_RESTART_TIMER 0xFEEDU | ||
31 | /* WDOG_TIMEOUT Register 32bit (R/W) */ | ||
32 | #define U300_WDOG_TR 0x04 | ||
33 | #define U300_WDOG_TR_TIMEOUT_MASK 0x7FFFU | ||
34 | /* WDOG_DISABLE1 Register 32bit (-/W) */ | ||
35 | #define U300_WDOG_D1R 0x08 | ||
36 | #define U300_WDOG_D1R_DISABLE1_DISABLE_TIMER 0x2BADU | ||
37 | /* WDOG_DISABLE2 Register 32bit (R/W) */ | ||
38 | #define U300_WDOG_D2R 0x0C | ||
39 | #define U300_WDOG_D2R_DISABLE2_DISABLE_TIMER 0xCAFEU | ||
40 | #define U300_WDOG_D2R_DISABLE_STATUS_DISABLED 0xDABEU | ||
41 | #define U300_WDOG_D2R_DISABLE_STATUS_ENABLED 0x0000U | ||
42 | /* WDOG_STATUS Register 32bit (R/W) */ | ||
43 | #define U300_WDOG_SR 0x10 | ||
44 | #define U300_WDOG_SR_STATUS_TIMED_OUT 0xCFE8U | ||
45 | #define U300_WDOG_SR_STATUS_NORMAL 0x0000U | ||
46 | #define U300_WDOG_SR_RESET_STATUS_RESET 0xE8B4U | ||
47 | /* WDOG_COUNT Register 32bit (R/-) */ | ||
48 | #define U300_WDOG_CR 0x14 | ||
49 | #define U300_WDOG_CR_VALID_IND 0x8000U | ||
50 | #define U300_WDOG_CR_VALID_STABLE 0x0000U | ||
51 | #define U300_WDOG_CR_COUNT_VALUE_MASK 0x7FFFU | ||
52 | /* WDOG_JTAGOVR Register 32bit (R/W) */ | ||
53 | #define U300_WDOG_JOR 0x18 | ||
54 | #define U300_WDOG_JOR_JTAG_MODE_IND 0x0002U | ||
55 | #define U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE 0x0001U | ||
56 | /* WDOG_RESTART Register 32bit (-/W) */ | ||
57 | #define U300_WDOG_RR 0x1C | ||
58 | #define U300_WDOG_RR_RESTART_VALUE_RESUME 0xACEDU | ||
59 | /* WDOG_IRQ_EVENT Register 32bit (R/W) */ | ||
60 | #define U300_WDOG_IER 0x20 | ||
61 | #define U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND 0x0001U | ||
62 | #define U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE 0x0001U | ||
63 | /* WDOG_IRQ_MASK Register 32bit (R/W) */ | ||
64 | #define U300_WDOG_IMR 0x24 | ||
65 | #define U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE 0x0001U | ||
66 | /* WDOG_IRQ_FORCE Register 32bit (R/W) */ | ||
67 | #define U300_WDOG_IFR 0x28 | ||
68 | #define U300_WDOG_IFR_WILL_BARK_IRQ_FORCE_ENABLE 0x0001U | ||
69 | |||
70 | /* Default timeout in seconds = 1 minute */ | ||
71 | static int margin = 60; | ||
72 | static resource_size_t phybase; | ||
73 | static resource_size_t physize; | ||
74 | static int irq; | ||
75 | static void __iomem *virtbase; | ||
76 | static unsigned long coh901327_users; | ||
77 | static unsigned long boot_status; | ||
78 | static u16 wdogenablestore; | ||
79 | static u16 irqmaskstore; | ||
80 | static struct device *parent; | ||
81 | |||
82 | /* | ||
83 | * The watchdog block is of course always clocked, the | ||
84 | * clk_enable()/clk_disable() calls are mainly for performing reference | ||
85 | * counting higher up in the clock hierarchy. | ||
86 | */ | ||
87 | static struct clk *clk; | ||
88 | |||
89 | /* | ||
90 | * Enabling and disabling functions. | ||
91 | */ | ||
92 | static void coh901327_enable(u16 timeout) | ||
93 | { | ||
94 | u16 val; | ||
95 | |||
96 | clk_enable(clk); | ||
97 | /* Restart timer if it is disabled */ | ||
98 | val = readw(virtbase + U300_WDOG_D2R); | ||
99 | if (val == U300_WDOG_D2R_DISABLE_STATUS_DISABLED) | ||
100 | writew(U300_WDOG_RR_RESTART_VALUE_RESUME, | ||
101 | virtbase + U300_WDOG_RR); | ||
102 | /* Acknowledge any pending interrupt so it doesn't just fire off */ | ||
103 | writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE, | ||
104 | virtbase + U300_WDOG_IER); | ||
105 | /* Enable the watchdog interrupt */ | ||
106 | writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR); | ||
107 | /* Activate the watchdog timer */ | ||
108 | writew(timeout, virtbase + U300_WDOG_TR); | ||
109 | /* Start the watchdog timer */ | ||
110 | writew(U300_WDOG_FR_FEED_RESTART_TIMER, virtbase + U300_WDOG_FR); | ||
111 | /* | ||
112 | * Extra read so that this change propagate in the watchdog. | ||
113 | */ | ||
114 | (void) readw(virtbase + U300_WDOG_CR); | ||
115 | val = readw(virtbase + U300_WDOG_D2R); | ||
116 | clk_disable(clk); | ||
117 | if (val != U300_WDOG_D2R_DISABLE_STATUS_ENABLED) | ||
118 | dev_err(parent, | ||
119 | "%s(): watchdog not enabled! D2R value %04x\n", | ||
120 | __func__, val); | ||
121 | } | ||
122 | |||
123 | static void coh901327_disable(void) | ||
124 | { | ||
125 | u16 val; | ||
126 | |||
127 | clk_enable(clk); | ||
128 | /* Disable the watchdog interrupt if it is active */ | ||
129 | writew(0x0000U, virtbase + U300_WDOG_IMR); | ||
130 | /* If the watchdog is currently enabled, attempt to disable it */ | ||
131 | val = readw(virtbase + U300_WDOG_D2R); | ||
132 | if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED) { | ||
133 | writew(U300_WDOG_D1R_DISABLE1_DISABLE_TIMER, | ||
134 | virtbase + U300_WDOG_D1R); | ||
135 | writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER, | ||
136 | virtbase + U300_WDOG_D2R); | ||
137 | /* Write this twice (else problems occur) */ | ||
138 | writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER, | ||
139 | virtbase + U300_WDOG_D2R); | ||
140 | } | ||
141 | val = readw(virtbase + U300_WDOG_D2R); | ||
142 | clk_disable(clk); | ||
143 | if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED) | ||
144 | dev_err(parent, | ||
145 | "%s(): watchdog not disabled! D2R value %04x\n", | ||
146 | __func__, val); | ||
147 | } | ||
148 | |||
149 | static void coh901327_start(void) | ||
150 | { | ||
151 | coh901327_enable(margin * 100); | ||
152 | } | ||
153 | |||
154 | static void coh901327_keepalive(void) | ||
155 | { | ||
156 | clk_enable(clk); | ||
157 | /* Feed the watchdog */ | ||
158 | writew(U300_WDOG_FR_FEED_RESTART_TIMER, | ||
159 | virtbase + U300_WDOG_FR); | ||
160 | clk_disable(clk); | ||
161 | } | ||
162 | |||
163 | static int coh901327_settimeout(int time) | ||
164 | { | ||
165 | /* | ||
166 | * Max margin is 327 since the 10ms | ||
167 | * timeout register is max | ||
168 | * 0x7FFF = 327670ms ~= 327s. | ||
169 | */ | ||
170 | if (time <= 0 || time > 327) | ||
171 | return -EINVAL; | ||
172 | |||
173 | margin = time; | ||
174 | clk_enable(clk); | ||
175 | /* Set new timeout value */ | ||
176 | writew(margin * 100, virtbase + U300_WDOG_TR); | ||
177 | /* Feed the dog */ | ||
178 | writew(U300_WDOG_FR_FEED_RESTART_TIMER, | ||
179 | virtbase + U300_WDOG_FR); | ||
180 | clk_disable(clk); | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * This interrupt occurs 10 ms before the watchdog WILL bark. | ||
186 | */ | ||
187 | static irqreturn_t coh901327_interrupt(int irq, void *data) | ||
188 | { | ||
189 | u16 val; | ||
190 | |||
191 | /* | ||
192 | * Ack IRQ? If this occurs we're FUBAR anyway, so | ||
193 | * just acknowledge, disable the interrupt and await the imminent end. | ||
194 | * If you at some point need a host of callbacks to be called | ||
195 | * when the system is about to watchdog-reset, add them here! | ||
196 | * | ||
197 | * NOTE: on future versions of this IP-block, it will be possible | ||
198 | * to prevent a watchdog reset by feeding the watchdog at this | ||
199 | * point. | ||
200 | */ | ||
201 | clk_enable(clk); | ||
202 | val = readw(virtbase + U300_WDOG_IER); | ||
203 | if (val == U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND) | ||
204 | writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE, | ||
205 | virtbase + U300_WDOG_IER); | ||
206 | writew(0x0000U, virtbase + U300_WDOG_IMR); | ||
207 | clk_disable(clk); | ||
208 | dev_crit(parent, "watchdog is barking!\n"); | ||
209 | return IRQ_HANDLED; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * Allow only one user (daemon) to open the watchdog | ||
214 | */ | ||
215 | static int coh901327_open(struct inode *inode, struct file *file) | ||
216 | { | ||
217 | if (test_and_set_bit(1, &coh901327_users)) | ||
218 | return -EBUSY; | ||
219 | coh901327_start(); | ||
220 | return nonseekable_open(inode, file); | ||
221 | } | ||
222 | |||
223 | static int coh901327_release(struct inode *inode, struct file *file) | ||
224 | { | ||
225 | clear_bit(1, &coh901327_users); | ||
226 | coh901327_disable(); | ||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static ssize_t coh901327_write(struct file *file, const char __user *data, | ||
231 | size_t len, loff_t *ppos) | ||
232 | { | ||
233 | if (len) | ||
234 | coh901327_keepalive(); | ||
235 | return len; | ||
236 | } | ||
237 | |||
238 | static long coh901327_ioctl(struct file *file, unsigned int cmd, | ||
239 | unsigned long arg) | ||
240 | { | ||
241 | int ret = -ENOTTY; | ||
242 | u16 val; | ||
243 | int time; | ||
244 | int new_options; | ||
245 | union { | ||
246 | struct watchdog_info __user *ident; | ||
247 | int __user *i; | ||
248 | } uarg; | ||
249 | static struct watchdog_info ident = { | ||
250 | .options = WDIOF_CARDRESET | | ||
251 | WDIOF_SETTIMEOUT | | ||
252 | WDIOF_KEEPALIVEPING, | ||
253 | .identity = "COH 901 327 Watchdog", | ||
254 | .firmware_version = 1, | ||
255 | }; | ||
256 | uarg.i = (int __user *)arg; | ||
257 | |||
258 | switch (cmd) { | ||
259 | case WDIOC_GETSUPPORT: | ||
260 | ret = copy_to_user(uarg.ident, &ident, | ||
261 | sizeof(ident)) ? -EFAULT : 0; | ||
262 | break; | ||
263 | |||
264 | case WDIOC_GETSTATUS: | ||
265 | ret = put_user(0, uarg.i); | ||
266 | break; | ||
267 | |||
268 | case WDIOC_GETBOOTSTATUS: | ||
269 | ret = put_user(boot_status, uarg.i); | ||
270 | break; | ||
271 | |||
272 | case WDIOC_SETOPTIONS: | ||
273 | ret = get_user(new_options, uarg.i); | ||
274 | if (ret) | ||
275 | break; | ||
276 | if (new_options & WDIOS_DISABLECARD) | ||
277 | coh901327_disable(); | ||
278 | if (new_options & WDIOS_ENABLECARD) | ||
279 | coh901327_start(); | ||
280 | ret = 0; | ||
281 | break; | ||
282 | |||
283 | case WDIOC_KEEPALIVE: | ||
284 | coh901327_keepalive(); | ||
285 | ret = 0; | ||
286 | break; | ||
287 | |||
288 | case WDIOC_SETTIMEOUT: | ||
289 | ret = get_user(time, uarg.i); | ||
290 | if (ret) | ||
291 | break; | ||
292 | |||
293 | ret = coh901327_settimeout(time); | ||
294 | if (ret) | ||
295 | break; | ||
296 | /* Then fall through to return set value */ | ||
297 | |||
298 | case WDIOC_GETTIMEOUT: | ||
299 | ret = put_user(margin, uarg.i); | ||
300 | break; | ||
301 | |||
302 | case WDIOC_GETTIMELEFT: | ||
303 | clk_enable(clk); | ||
304 | /* Read repeatedly until the value is stable! */ | ||
305 | val = readw(virtbase + U300_WDOG_CR); | ||
306 | while (val & U300_WDOG_CR_VALID_IND) | ||
307 | val = readw(virtbase + U300_WDOG_CR); | ||
308 | val &= U300_WDOG_CR_COUNT_VALUE_MASK; | ||
309 | clk_disable(clk); | ||
310 | if (val != 0) | ||
311 | val /= 100; | ||
312 | ret = put_user(val, uarg.i); | ||
313 | break; | ||
314 | } | ||
315 | return ret; | ||
316 | } | ||
317 | |||
318 | static const struct file_operations coh901327_fops = { | ||
319 | .owner = THIS_MODULE, | ||
320 | .llseek = no_llseek, | ||
321 | .write = coh901327_write, | ||
322 | .unlocked_ioctl = coh901327_ioctl, | ||
323 | .open = coh901327_open, | ||
324 | .release = coh901327_release, | ||
325 | }; | ||
326 | |||
327 | static struct miscdevice coh901327_miscdev = { | ||
328 | .minor = WATCHDOG_MINOR, | ||
329 | .name = "watchdog", | ||
330 | .fops = &coh901327_fops, | ||
331 | }; | ||
332 | |||
333 | static int __exit coh901327_remove(struct platform_device *pdev) | ||
334 | { | ||
335 | misc_deregister(&coh901327_miscdev); | ||
336 | coh901327_disable(); | ||
337 | free_irq(irq, pdev); | ||
338 | clk_put(clk); | ||
339 | iounmap(virtbase); | ||
340 | release_mem_region(phybase, physize); | ||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | |||
345 | static int __init coh901327_probe(struct platform_device *pdev) | ||
346 | { | ||
347 | int ret; | ||
348 | u16 val; | ||
349 | struct resource *res; | ||
350 | |||
351 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
352 | if (!res) | ||
353 | return -ENOENT; | ||
354 | |||
355 | parent = &pdev->dev; | ||
356 | physize = resource_size(res); | ||
357 | phybase = res->start; | ||
358 | |||
359 | if (request_mem_region(phybase, physize, DRV_NAME) == NULL) { | ||
360 | ret = -EBUSY; | ||
361 | goto out; | ||
362 | } | ||
363 | |||
364 | virtbase = ioremap(phybase, physize); | ||
365 | if (!virtbase) { | ||
366 | ret = -ENOMEM; | ||
367 | goto out_no_remap; | ||
368 | } | ||
369 | |||
370 | clk = clk_get(&pdev->dev, NULL); | ||
371 | if (IS_ERR(clk)) { | ||
372 | ret = PTR_ERR(clk); | ||
373 | dev_err(&pdev->dev, "could not get clock\n"); | ||
374 | goto out_no_clk; | ||
375 | } | ||
376 | ret = clk_enable(clk); | ||
377 | if (ret) { | ||
378 | dev_err(&pdev->dev, "could not enable clock\n"); | ||
379 | goto out_no_clk_enable; | ||
380 | } | ||
381 | |||
382 | val = readw(virtbase + U300_WDOG_SR); | ||
383 | switch (val) { | ||
384 | case U300_WDOG_SR_STATUS_TIMED_OUT: | ||
385 | dev_info(&pdev->dev, | ||
386 | "watchdog timed out since last chip reset!\n"); | ||
387 | boot_status = WDIOF_CARDRESET; | ||
388 | /* Status will be cleared below */ | ||
389 | break; | ||
390 | case U300_WDOG_SR_STATUS_NORMAL: | ||
391 | dev_info(&pdev->dev, | ||
392 | "in normal status, no timeouts have occurred.\n"); | ||
393 | break; | ||
394 | default: | ||
395 | dev_info(&pdev->dev, | ||
396 | "contains an illegal status code (%08x)\n", val); | ||
397 | break; | ||
398 | } | ||
399 | |||
400 | val = readw(virtbase + U300_WDOG_D2R); | ||
401 | switch (val) { | ||
402 | case U300_WDOG_D2R_DISABLE_STATUS_DISABLED: | ||
403 | dev_info(&pdev->dev, "currently disabled.\n"); | ||
404 | break; | ||
405 | case U300_WDOG_D2R_DISABLE_STATUS_ENABLED: | ||
406 | dev_info(&pdev->dev, | ||
407 | "currently enabled! (disabling it now)\n"); | ||
408 | coh901327_disable(); | ||
409 | break; | ||
410 | default: | ||
411 | dev_err(&pdev->dev, | ||
412 | "contains an illegal enable/disable code (%08x)\n", | ||
413 | val); | ||
414 | break; | ||
415 | } | ||
416 | |||
417 | /* Reset the watchdog */ | ||
418 | writew(U300_WDOG_SR_RESET_STATUS_RESET, virtbase + U300_WDOG_SR); | ||
419 | |||
420 | irq = platform_get_irq(pdev, 0); | ||
421 | if (request_irq(irq, coh901327_interrupt, IRQF_DISABLED, | ||
422 | DRV_NAME " Bark", pdev)) { | ||
423 | ret = -EIO; | ||
424 | goto out_no_irq; | ||
425 | } | ||
426 | |||
427 | clk_disable(clk); | ||
428 | |||
429 | ret = misc_register(&coh901327_miscdev); | ||
430 | if (ret == 0) | ||
431 | dev_info(&pdev->dev, | ||
432 | "initialized. timer margin=%d sec\n", margin); | ||
433 | else | ||
434 | goto out_no_wdog; | ||
435 | |||
436 | return 0; | ||
437 | |||
438 | out_no_wdog: | ||
439 | free_irq(irq, pdev); | ||
440 | out_no_irq: | ||
441 | clk_disable(clk); | ||
442 | out_no_clk_enable: | ||
443 | clk_put(clk); | ||
444 | out_no_clk: | ||
445 | iounmap(virtbase); | ||
446 | out_no_remap: | ||
447 | release_mem_region(phybase, SZ_4K); | ||
448 | out: | ||
449 | return ret; | ||
450 | } | ||
451 | |||
452 | #ifdef CONFIG_PM | ||
453 | static int coh901327_suspend(struct platform_device *pdev, pm_message_t state) | ||
454 | { | ||
455 | irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U; | ||
456 | wdogenablestore = readw(virtbase + U300_WDOG_D2R); | ||
457 | /* If watchdog is on, disable it here and now */ | ||
458 | if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED) | ||
459 | coh901327_disable(); | ||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | static int coh901327_resume(struct platform_device *pdev) | ||
464 | { | ||
465 | /* Restore the watchdog interrupt */ | ||
466 | writew(irqmaskstore, virtbase + U300_WDOG_IMR); | ||
467 | if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED) { | ||
468 | /* Restart the watchdog timer */ | ||
469 | writew(U300_WDOG_RR_RESTART_VALUE_RESUME, | ||
470 | virtbase + U300_WDOG_RR); | ||
471 | writew(U300_WDOG_FR_FEED_RESTART_TIMER, | ||
472 | virtbase + U300_WDOG_FR); | ||
473 | } | ||
474 | return 0; | ||
475 | } | ||
476 | #else | ||
477 | #define coh901327_suspend NULL | ||
478 | #define coh901327_resume NULL | ||
479 | #endif | ||
480 | |||
481 | /* | ||
482 | * Mistreating the watchdog is the only way to perform a software reset of the | ||
483 | * system on EMP platforms. So we implement this and export a symbol for it. | ||
484 | */ | ||
485 | void coh901327_watchdog_reset(void) | ||
486 | { | ||
487 | /* Enable even if on JTAG too */ | ||
488 | writew(U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE, | ||
489 | virtbase + U300_WDOG_JOR); | ||
490 | /* | ||
491 | * Timeout = 5s, we have to wait for the watchdog reset to | ||
492 | * actually take place: the watchdog will be reloaded with the | ||
493 | * default value immediately, so we HAVE to reboot and get back | ||
494 | * into the kernel in 30s, or the device will reboot again! | ||
495 | * The boot loader will typically deactivate the watchdog, so we | ||
496 | * need time enough for the boot loader to get to the point of | ||
497 | * deactivating the watchdog before it is shut down by it. | ||
498 | * | ||
499 | * NOTE: on future versions of the watchdog, this restriction is | ||
500 | * gone: the watchdog will be reloaded with a defaul value (1 min) | ||
501 | * instead of last value, and you can conveniently set the watchdog | ||
502 | * timeout to 10ms (value = 1) without any problems. | ||
503 | */ | ||
504 | coh901327_enable(500); | ||
505 | /* Return and await doom */ | ||
506 | } | ||
507 | |||
508 | static struct platform_driver coh901327_driver = { | ||
509 | .driver = { | ||
510 | .owner = THIS_MODULE, | ||
511 | .name = "coh901327_wdog", | ||
512 | }, | ||
513 | .remove = __exit_p(coh901327_remove), | ||
514 | .suspend = coh901327_suspend, | ||
515 | .resume = coh901327_resume, | ||
516 | }; | ||
517 | |||
518 | static int __init coh901327_init(void) | ||
519 | { | ||
520 | return platform_driver_probe(&coh901327_driver, coh901327_probe); | ||
521 | } | ||
522 | module_init(coh901327_init); | ||
523 | |||
524 | static void __exit coh901327_exit(void) | ||
525 | { | ||
526 | platform_driver_unregister(&coh901327_driver); | ||
527 | } | ||
528 | module_exit(coh901327_exit); | ||
529 | |||
530 | MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); | ||
531 | MODULE_DESCRIPTION("COH 901 327 Watchdog"); | ||
532 | |||
533 | module_param(margin, int, 0); | ||
534 | MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)"); | ||
535 | |||
536 | MODULE_LICENSE("GPL"); | ||
537 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index c0b9169ba5d5..a6c5674c78e6 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
@@ -120,7 +120,8 @@ static int nowayout = WATCHDOG_NOWAYOUT; | |||
120 | static char expect_release; | 120 | static char expect_release; |
121 | static unsigned long hpwdt_is_open; | 121 | static unsigned long hpwdt_is_open; |
122 | static unsigned int allow_kdump; | 122 | static unsigned int allow_kdump; |
123 | static int hpwdt_nmi_sourcing; | 123 | static unsigned int hpwdt_nmi_sourcing; |
124 | static unsigned int priority; /* hpwdt at end of die_notify list */ | ||
124 | 125 | ||
125 | static void __iomem *pci_mem_addr; /* the PCI-memory address */ | 126 | static void __iomem *pci_mem_addr; /* the PCI-memory address */ |
126 | static unsigned long __iomem *hpwdt_timer_reg; | 127 | static unsigned long __iomem *hpwdt_timer_reg; |
@@ -623,7 +624,7 @@ static struct miscdevice hpwdt_miscdev = { | |||
623 | 624 | ||
624 | static struct notifier_block die_notifier = { | 625 | static struct notifier_block die_notifier = { |
625 | .notifier_call = hpwdt_pretimeout, | 626 | .notifier_call = hpwdt_pretimeout, |
626 | .priority = 0x7FFFFFFF, | 627 | .priority = 0, |
627 | }; | 628 | }; |
628 | 629 | ||
629 | /* | 630 | /* |
@@ -641,7 +642,8 @@ static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev) | |||
641 | hpwdt_nmi_sourcing = 1; | 642 | hpwdt_nmi_sourcing = 1; |
642 | else | 643 | else |
643 | dev_warn(&dev->dev, "NMI sourcing is disabled. To enable this " | 644 | dev_warn(&dev->dev, "NMI sourcing is disabled. To enable this " |
644 | "functionality you must reboot with nmi_watchdog=0.\n"); | 645 | "functionality you must reboot with nmi_watchdog=0 " |
646 | "and load the hpwdt driver with priority=1.\n"); | ||
645 | } | 647 | } |
646 | #else | 648 | #else |
647 | static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev) | 649 | static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev) |
@@ -714,6 +716,14 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev, | |||
714 | cmn_regs.u1.rah = 0x0D; | 716 | cmn_regs.u1.rah = 0x0D; |
715 | cmn_regs.u1.ral = 0x02; | 717 | cmn_regs.u1.ral = 0x02; |
716 | 718 | ||
719 | /* | ||
720 | * If the priority is set to 1, then we will be put first on the | ||
721 | * die notify list to handle a critical NMI. The default is to | ||
722 | * be last so other users of the NMI signal can function. | ||
723 | */ | ||
724 | if (priority) | ||
725 | die_notifier.priority = 0x7FFFFFFF; | ||
726 | |||
717 | retval = register_die_notifier(&die_notifier); | 727 | retval = register_die_notifier(&die_notifier); |
718 | if (retval != 0) { | 728 | if (retval != 0) { |
719 | dev_warn(&dev->dev, | 729 | dev_warn(&dev->dev, |
@@ -733,9 +743,11 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev, | |||
733 | printk(KERN_INFO | 743 | printk(KERN_INFO |
734 | "hp Watchdog Timer Driver: %s" | 744 | "hp Watchdog Timer Driver: %s" |
735 | ", timer margin: %d seconds (nowayout=%d)" | 745 | ", timer margin: %d seconds (nowayout=%d)" |
736 | ", allow kernel dump: %s (default = 0/OFF).\n", | 746 | ", allow kernel dump: %s (default = 0/OFF)" |
747 | ", priority: %s (default = 0/LAST).\n", | ||
737 | HPWDT_VERSION, soft_margin, nowayout, | 748 | HPWDT_VERSION, soft_margin, nowayout, |
738 | (allow_kdump == 0) ? "OFF" : "ON"); | 749 | (allow_kdump == 0) ? "OFF" : "ON", |
750 | (priority == 0) ? "LAST" : "FIRST"); | ||
739 | 751 | ||
740 | return 0; | 752 | return 0; |
741 | 753 | ||
@@ -798,5 +810,9 @@ module_param(nowayout, int, 0); | |||
798 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" | 810 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" |
799 | __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | 811 | __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); |
800 | 812 | ||
813 | module_param(priority, int, 0); | ||
814 | MODULE_PARM_DESC(priority, "The hpwdt driver handles NMIs first or last" | ||
815 | " (default = 0/Last)\n"); | ||
816 | |||
801 | module_init(hpwdt_init); | 817 | module_init(hpwdt_init); |
802 | module_exit(hpwdt_cleanup); | 818 | module_exit(hpwdt_cleanup); |
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index f2713851aaab..3ed571a2ab18 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c | |||
@@ -159,6 +159,7 @@ static int omap_wdt_open(struct inode *inode, struct file *file) | |||
159 | file->private_data = (void *) wdev; | 159 | file->private_data = (void *) wdev; |
160 | 160 | ||
161 | omap_wdt_set_timeout(wdev); | 161 | omap_wdt_set_timeout(wdev); |
162 | omap_wdt_ping(wdev); /* trigger loading of new timeout value */ | ||
162 | omap_wdt_enable(wdev); | 163 | omap_wdt_enable(wdev); |
163 | 164 | ||
164 | return nonseekable_open(inode, file); | 165 | return nonseekable_open(inode, file); |
@@ -313,6 +314,9 @@ static int __devinit omap_wdt_probe(struct platform_device *pdev) | |||
313 | 314 | ||
314 | platform_set_drvdata(pdev, wdev); | 315 | platform_set_drvdata(pdev, wdev); |
315 | 316 | ||
317 | clk_enable(wdev->ick); | ||
318 | clk_enable(wdev->fck); | ||
319 | |||
316 | omap_wdt_disable(wdev); | 320 | omap_wdt_disable(wdev); |
317 | omap_wdt_adjust_timeout(timer_margin); | 321 | omap_wdt_adjust_timeout(timer_margin); |
318 | 322 | ||
@@ -332,6 +336,9 @@ static int __devinit omap_wdt_probe(struct platform_device *pdev) | |||
332 | /* autogate OCP interface clock */ | 336 | /* autogate OCP interface clock */ |
333 | __raw_writel(0x01, wdev->base + OMAP_WATCHDOG_SYS_CONFIG); | 337 | __raw_writel(0x01, wdev->base + OMAP_WATCHDOG_SYS_CONFIG); |
334 | 338 | ||
339 | clk_disable(wdev->ick); | ||
340 | clk_disable(wdev->fck); | ||
341 | |||
335 | omap_wdt_dev = pdev; | 342 | omap_wdt_dev = pdev; |
336 | 343 | ||
337 | return 0; | 344 | return 0; |
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c new file mode 100644 index 000000000000..538ec2c05197 --- /dev/null +++ b/drivers/watchdog/pnx833x_wdt.c | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | * PNX833x Hardware Watchdog Driver | ||
3 | * Copyright 2008 NXP Semiconductors | ||
4 | * Daniel Laird <daniel.j.laird@nxp.com> | ||
5 | * Andre McCurdy <andre.mccurdy@nxp.com> | ||
6 | * | ||
7 | * Heavily based upon - IndyDog 0.3 | ||
8 | * A Hardware Watchdog Device for SGI IP22 | ||
9 | * | ||
10 | * (c) Copyright 2002 Guido Guenther <agx@sigxcpu.org>, All Rights Reserved. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * based on softdog.c by Alan Cox <alan@redhat.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/moduleparam.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/fs.h> | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/miscdevice.h> | ||
27 | #include <linux/watchdog.h> | ||
28 | #include <linux/notifier.h> | ||
29 | #include <linux/reboot.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <asm/mach-pnx833x/pnx833x.h> | ||
32 | |||
33 | #define PFX "pnx833x: " | ||
34 | #define WATCHDOG_TIMEOUT 30 /* 30 sec Maximum timeout */ | ||
35 | #define WATCHDOG_COUNT_FREQUENCY 68000000U /* Watchdog counts at 68MHZ. */ | ||
36 | |||
37 | /** CONFIG block */ | ||
38 | #define PNX833X_CONFIG (0x07000U) | ||
39 | #define PNX833X_CONFIG_CPU_WATCHDOG (0x54) | ||
40 | #define PNX833X_CONFIG_CPU_WATCHDOG_COMPARE (0x58) | ||
41 | #define PNX833X_CONFIG_CPU_COUNTERS_CONTROL (0x1c) | ||
42 | |||
43 | /** RESET block */ | ||
44 | #define PNX833X_RESET (0x08000U) | ||
45 | #define PNX833X_RESET_CONFIG (0x08) | ||
46 | |||
47 | static int pnx833x_wdt_alive; | ||
48 | |||
49 | /* Set default timeout in MHZ.*/ | ||
50 | static int pnx833x_wdt_timeout = (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY); | ||
51 | module_param(pnx833x_wdt_timeout, int, 0); | ||
52 | MODULE_PARM_DESC(timeout, "Watchdog timeout in Mhz. (68Mhz clock), default=" | ||
53 | __MODULE_STRING(pnx833x_wdt_timeout) "(30 seconds)."); | ||
54 | |||
55 | static int nowayout = WATCHDOG_NOWAYOUT; | ||
56 | module_param(nowayout, int, 0); | ||
57 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" | ||
58 | __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | ||
59 | |||
60 | static int start_enabled = 1; | ||
61 | module_param(start_enabled, int, 0); | ||
62 | MODULE_PARM_DESC(start_enabled, "Watchdog is started on module insertion " | ||
63 | "(default=" __MODULE_STRING(start_enabled) ")"); | ||
64 | |||
65 | static void pnx833x_wdt_start(void) | ||
66 | { | ||
67 | /* Enable watchdog causing reset. */ | ||
68 | PNX833X_REG(PNX833X_RESET + PNX833X_RESET_CONFIG) |= 0x1; | ||
69 | /* Set timeout.*/ | ||
70 | PNX833X_REG(PNX833X_CONFIG + | ||
71 | PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = pnx833x_wdt_timeout; | ||
72 | /* Enable watchdog. */ | ||
73 | PNX833X_REG(PNX833X_CONFIG + | ||
74 | PNX833X_CONFIG_CPU_COUNTERS_CONTROL) |= 0x1; | ||
75 | |||
76 | printk(KERN_INFO PFX "Started watchdog timer.\n"); | ||
77 | } | ||
78 | |||
79 | static void pnx833x_wdt_stop(void) | ||
80 | { | ||
81 | /* Disable watchdog causing reset. */ | ||
82 | PNX833X_REG(PNX833X_RESET + PNX833X_CONFIG) &= 0xFFFFFFFE; | ||
83 | /* Disable watchdog.*/ | ||
84 | PNX833X_REG(PNX833X_CONFIG + | ||
85 | PNX833X_CONFIG_CPU_COUNTERS_CONTROL) &= 0xFFFFFFFE; | ||
86 | |||
87 | printk(KERN_INFO PFX "Stopped watchdog timer.\n"); | ||
88 | } | ||
89 | |||
90 | static void pnx833x_wdt_ping(void) | ||
91 | { | ||
92 | PNX833X_REG(PNX833X_CONFIG + | ||
93 | PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = pnx833x_wdt_timeout; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Allow only one person to hold it open | ||
98 | */ | ||
99 | static int pnx833x_wdt_open(struct inode *inode, struct file *file) | ||
100 | { | ||
101 | if (test_and_set_bit(0, &pnx833x_wdt_alive)) | ||
102 | return -EBUSY; | ||
103 | |||
104 | if (nowayout) | ||
105 | __module_get(THIS_MODULE); | ||
106 | |||
107 | /* Activate timer */ | ||
108 | if (!start_enabled) | ||
109 | pnx833x_wdt_start(); | ||
110 | |||
111 | pnx833x_wdt_ping(); | ||
112 | |||
113 | printk(KERN_INFO "Started watchdog timer.\n"); | ||
114 | |||
115 | return nonseekable_open(inode, file); | ||
116 | } | ||
117 | |||
118 | static int pnx833x_wdt_release(struct inode *inode, struct file *file) | ||
119 | { | ||
120 | /* Shut off the timer. | ||
121 | * Lock it in if it's a module and we defined ...NOWAYOUT */ | ||
122 | if (!nowayout) | ||
123 | pnx833x_wdt_stop(); /* Turn the WDT off */ | ||
124 | |||
125 | clear_bit(0, &pnx833x_wdt_alive); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static ssize_t pnx833x_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) | ||
130 | { | ||
131 | /* Refresh the timer. */ | ||
132 | if (len) | ||
133 | pnx833x_wdt_ping(); | ||
134 | |||
135 | return len; | ||
136 | } | ||
137 | |||
138 | static long pnx833x_wdt_ioctl(struct file *file, unsigned int cmd, | ||
139 | unsigned long arg) | ||
140 | { | ||
141 | int options, new_timeout = 0; | ||
142 | uint32_t timeout, timeout_left = 0; | ||
143 | |||
144 | static struct watchdog_info ident = { | ||
145 | .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, | ||
146 | .firmware_version = 0, | ||
147 | .identity = "Hardware Watchdog for PNX833x", | ||
148 | }; | ||
149 | |||
150 | switch (cmd) { | ||
151 | default: | ||
152 | return -ENOTTY; | ||
153 | |||
154 | case WDIOC_GETSUPPORT: | ||
155 | if (copy_to_user((struct watchdog_info *)arg, | ||
156 | &ident, sizeof(ident))) | ||
157 | return -EFAULT; | ||
158 | return 0; | ||
159 | |||
160 | case WDIOC_GETSTATUS: | ||
161 | case WDIOC_GETBOOTSTATUS: | ||
162 | return put_user(0, (int *)arg); | ||
163 | |||
164 | case WDIOC_SETOPTIONS: | ||
165 | if (get_user(options, (int *)arg)) | ||
166 | return -EFAULT; | ||
167 | |||
168 | if (options & WDIOS_DISABLECARD) | ||
169 | pnx833x_wdt_stop(); | ||
170 | |||
171 | if (options & WDIOS_ENABLECARD) | ||
172 | pnx833x_wdt_start(); | ||
173 | |||
174 | return 0; | ||
175 | |||
176 | case WDIOC_KEEPALIVE: | ||
177 | pnx833x_wdt_ping(); | ||
178 | return 0; | ||
179 | |||
180 | case WDIOC_SETTIMEOUT: | ||
181 | { | ||
182 | if (get_user(new_timeout, (int *)arg)) | ||
183 | return -EFAULT; | ||
184 | |||
185 | pnx833x_wdt_timeout = new_timeout; | ||
186 | PNX833X_REG(PNX833X_CONFIG + | ||
187 | PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = new_timeout; | ||
188 | return put_user(new_timeout, (int *)arg); | ||
189 | } | ||
190 | |||
191 | case WDIOC_GETTIMEOUT: | ||
192 | timeout = PNX833X_REG(PNX833X_CONFIG + | ||
193 | PNX833X_CONFIG_CPU_WATCHDOG_COMPARE); | ||
194 | return put_user(timeout, (int *)arg); | ||
195 | |||
196 | case WDIOC_GETTIMELEFT: | ||
197 | timeout_left = PNX833X_REG(PNX833X_CONFIG + | ||
198 | PNX833X_CONFIG_CPU_WATCHDOG); | ||
199 | return put_user(timeout_left, (int *)arg); | ||
200 | |||
201 | } | ||
202 | } | ||
203 | |||
204 | static int pnx833x_wdt_notify_sys(struct notifier_block *this, | ||
205 | unsigned long code, void *unused) | ||
206 | { | ||
207 | if (code == SYS_DOWN || code == SYS_HALT) | ||
208 | pnx833x_wdt_stop(); /* Turn the WDT off */ | ||
209 | |||
210 | return NOTIFY_DONE; | ||
211 | } | ||
212 | |||
213 | static const struct file_operations pnx833x_wdt_fops = { | ||
214 | .owner = THIS_MODULE, | ||
215 | .llseek = no_llseek, | ||
216 | .write = pnx833x_wdt_write, | ||
217 | .unlocked_ioctl = pnx833x_wdt_ioctl, | ||
218 | .open = pnx833x_wdt_open, | ||
219 | .release = pnx833x_wdt_release, | ||
220 | }; | ||
221 | |||
222 | static struct miscdevice pnx833x_wdt_miscdev = { | ||
223 | .minor = WATCHDOG_MINOR, | ||
224 | .name = "watchdog", | ||
225 | .fops = &pnx833x_wdt_fops, | ||
226 | }; | ||
227 | |||
228 | static struct notifier_block pnx833x_wdt_notifier = { | ||
229 | .notifier_call = pnx833x_wdt_notify_sys, | ||
230 | }; | ||
231 | |||
232 | static char banner[] __initdata = | ||
233 | KERN_INFO PFX "Hardware Watchdog Timer for PNX833x: Version 0.1\n"; | ||
234 | |||
235 | static int __init watchdog_init(void) | ||
236 | { | ||
237 | int ret, cause; | ||
238 | |||
239 | /* Lets check the reason for the reset.*/ | ||
240 | cause = PNX833X_REG(PNX833X_RESET); | ||
241 | /*If bit 31 is set then watchdog was cause of reset.*/ | ||
242 | if (cause & 0x80000000) { | ||
243 | printk(KERN_INFO PFX "The system was previously reset due to " | ||
244 | "the watchdog firing - please investigate...\n"); | ||
245 | } | ||
246 | |||
247 | ret = register_reboot_notifier(&pnx833x_wdt_notifier); | ||
248 | if (ret) { | ||
249 | printk(KERN_ERR PFX | ||
250 | "cannot register reboot notifier (err=%d)\n", ret); | ||
251 | return ret; | ||
252 | } | ||
253 | |||
254 | ret = misc_register(&pnx833x_wdt_miscdev); | ||
255 | if (ret) { | ||
256 | printk(KERN_ERR PFX | ||
257 | "cannot register miscdev on minor=%d (err=%d)\n", | ||
258 | WATCHDOG_MINOR, ret); | ||
259 | unregister_reboot_notifier(&pnx833x_wdt_notifier); | ||
260 | return ret; | ||
261 | } | ||
262 | |||
263 | printk(banner); | ||
264 | if (start_enabled) | ||
265 | pnx833x_wdt_start(); | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static void __exit watchdog_exit(void) | ||
271 | { | ||
272 | misc_deregister(&pnx833x_wdt_miscdev); | ||
273 | unregister_reboot_notifier(&pnx833x_wdt_notifier); | ||
274 | } | ||
275 | |||
276 | module_init(watchdog_init); | ||
277 | module_exit(watchdog_exit); | ||
278 | |||
279 | MODULE_AUTHOR("Daniel Laird/Andre McCurdy"); | ||
280 | MODULE_DESCRIPTION("Hardware Watchdog Device for PNX833x"); | ||
281 | MODULE_LICENSE("GPL"); | ||
282 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/drivers/watchdog/stmp3xxx_wdt.c b/drivers/watchdog/stmp3xxx_wdt.c new file mode 100644 index 000000000000..5dd952681f32 --- /dev/null +++ b/drivers/watchdog/stmp3xxx_wdt.c | |||
@@ -0,0 +1,296 @@ | |||
1 | /* | ||
2 | * Watchdog driver for Freescale STMP37XX/STMP378X | ||
3 | * | ||
4 | * Author: Vitaly Wool <vital@embeddedalley.com> | ||
5 | * | ||
6 | * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. | ||
7 | * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. | ||
8 | */ | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/miscdevice.h> | ||
13 | #include <linux/watchdog.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/uaccess.h> | ||
17 | |||
18 | #include <mach/platform.h> | ||
19 | #include <mach/regs-rtc.h> | ||
20 | |||
21 | #define DEFAULT_HEARTBEAT 19 | ||
22 | #define MAX_HEARTBEAT (0x10000000 >> 6) | ||
23 | |||
24 | /* missing bitmask in headers */ | ||
25 | #define BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER 0x80000000 | ||
26 | |||
27 | #define WDT_IN_USE 0 | ||
28 | #define WDT_OK_TO_CLOSE 1 | ||
29 | |||
30 | #define WDOG_COUNTER_RATE 1000 /* 1 kHz clock */ | ||
31 | |||
32 | static DEFINE_SPINLOCK(stmp3xxx_wdt_io_lock); | ||
33 | static unsigned long wdt_status; | ||
34 | static const int nowayout = WATCHDOG_NOWAYOUT; | ||
35 | static int heartbeat = DEFAULT_HEARTBEAT; | ||
36 | static unsigned long boot_status; | ||
37 | |||
38 | static void wdt_enable(u32 value) | ||
39 | { | ||
40 | spin_lock(&stmp3xxx_wdt_io_lock); | ||
41 | __raw_writel(value, REGS_RTC_BASE + HW_RTC_WATCHDOG); | ||
42 | stmp3xxx_setl(BM_RTC_CTRL_WATCHDOGEN, REGS_RTC_BASE + HW_RTC_CTRL); | ||
43 | stmp3xxx_setl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER, | ||
44 | REGS_RTC_BASE + HW_RTC_PERSISTENT1); | ||
45 | spin_unlock(&stmp3xxx_wdt_io_lock); | ||
46 | } | ||
47 | |||
48 | static void wdt_disable(void) | ||
49 | { | ||
50 | spin_lock(&stmp3xxx_wdt_io_lock); | ||
51 | stmp3xxx_clearl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER, | ||
52 | REGS_RTC_BASE + HW_RTC_PERSISTENT1); | ||
53 | stmp3xxx_clearl(BM_RTC_CTRL_WATCHDOGEN, REGS_RTC_BASE + HW_RTC_CTRL); | ||
54 | spin_unlock(&stmp3xxx_wdt_io_lock); | ||
55 | } | ||
56 | |||
57 | static void wdt_ping(void) | ||
58 | { | ||
59 | wdt_enable(heartbeat * WDOG_COUNTER_RATE); | ||
60 | } | ||
61 | |||
62 | static int stmp3xxx_wdt_open(struct inode *inode, struct file *file) | ||
63 | { | ||
64 | if (test_and_set_bit(WDT_IN_USE, &wdt_status)) | ||
65 | return -EBUSY; | ||
66 | |||
67 | clear_bit(WDT_OK_TO_CLOSE, &wdt_status); | ||
68 | wdt_ping(); | ||
69 | |||
70 | return nonseekable_open(inode, file); | ||
71 | } | ||
72 | |||
73 | static ssize_t stmp3xxx_wdt_write(struct file *file, const char __user *data, | ||
74 | size_t len, loff_t *ppos) | ||
75 | { | ||
76 | if (len) { | ||
77 | if (!nowayout) { | ||
78 | size_t i; | ||
79 | |||
80 | clear_bit(WDT_OK_TO_CLOSE, &wdt_status); | ||
81 | |||
82 | for (i = 0; i != len; i++) { | ||
83 | char c; | ||
84 | |||
85 | if (get_user(c, data + i)) | ||
86 | return -EFAULT; | ||
87 | if (c == 'V') | ||
88 | set_bit(WDT_OK_TO_CLOSE, &wdt_status); | ||
89 | } | ||
90 | } | ||
91 | wdt_ping(); | ||
92 | } | ||
93 | |||
94 | return len; | ||
95 | } | ||
96 | |||
97 | static struct watchdog_info ident = { | ||
98 | .options = WDIOF_CARDRESET | | ||
99 | WDIOF_MAGICCLOSE | | ||
100 | WDIOF_SETTIMEOUT | | ||
101 | WDIOF_KEEPALIVEPING, | ||
102 | .identity = "STMP3XXX Watchdog", | ||
103 | }; | ||
104 | |||
105 | static long stmp3xxx_wdt_ioctl(struct file *file, unsigned int cmd, | ||
106 | unsigned long arg) | ||
107 | { | ||
108 | void __user *argp = (void __user *)arg; | ||
109 | int __user *p = argp; | ||
110 | int new_heartbeat, opts; | ||
111 | int ret = -ENOTTY; | ||
112 | |||
113 | switch (cmd) { | ||
114 | case WDIOC_GETSUPPORT: | ||
115 | ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; | ||
116 | break; | ||
117 | |||
118 | case WDIOC_GETSTATUS: | ||
119 | ret = put_user(0, p); | ||
120 | break; | ||
121 | |||
122 | case WDIOC_GETBOOTSTATUS: | ||
123 | ret = put_user(boot_status, p); | ||
124 | break; | ||
125 | |||
126 | case WDIOC_SETOPTIONS: | ||
127 | if (get_user(opts, p)) { | ||
128 | ret = -EFAULT; | ||
129 | break; | ||
130 | } | ||
131 | if (opts & WDIOS_DISABLECARD) | ||
132 | wdt_disable(); | ||
133 | else if (opts & WDIOS_ENABLECARD) | ||
134 | wdt_ping(); | ||
135 | else { | ||
136 | pr_debug("%s: unknown option 0x%x\n", __func__, opts); | ||
137 | ret = -EINVAL; | ||
138 | break; | ||
139 | } | ||
140 | ret = 0; | ||
141 | break; | ||
142 | |||
143 | case WDIOC_KEEPALIVE: | ||
144 | wdt_ping(); | ||
145 | ret = 0; | ||
146 | break; | ||
147 | |||
148 | case WDIOC_SETTIMEOUT: | ||
149 | if (get_user(new_heartbeat, p)) { | ||
150 | ret = -EFAULT; | ||
151 | break; | ||
152 | } | ||
153 | if (new_heartbeat <= 0 || new_heartbeat > MAX_HEARTBEAT) { | ||
154 | ret = -EINVAL; | ||
155 | break; | ||
156 | } | ||
157 | |||
158 | heartbeat = new_heartbeat; | ||
159 | wdt_ping(); | ||
160 | /* Fall through */ | ||
161 | |||
162 | case WDIOC_GETTIMEOUT: | ||
163 | ret = put_user(heartbeat, p); | ||
164 | break; | ||
165 | } | ||
166 | return ret; | ||
167 | } | ||
168 | |||
169 | static int stmp3xxx_wdt_release(struct inode *inode, struct file *file) | ||
170 | { | ||
171 | int ret = 0; | ||
172 | |||
173 | if (!nowayout) { | ||
174 | if (!test_bit(WDT_OK_TO_CLOSE, &wdt_status)) { | ||
175 | wdt_ping(); | ||
176 | pr_debug("%s: Device closed unexpectdly\n", __func__); | ||
177 | ret = -EINVAL; | ||
178 | } else { | ||
179 | wdt_disable(); | ||
180 | clear_bit(WDT_OK_TO_CLOSE, &wdt_status); | ||
181 | } | ||
182 | } | ||
183 | clear_bit(WDT_IN_USE, &wdt_status); | ||
184 | |||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | static const struct file_operations stmp3xxx_wdt_fops = { | ||
189 | .owner = THIS_MODULE, | ||
190 | .llseek = no_llseek, | ||
191 | .write = stmp3xxx_wdt_write, | ||
192 | .unlocked_ioctl = stmp3xxx_wdt_ioctl, | ||
193 | .open = stmp3xxx_wdt_open, | ||
194 | .release = stmp3xxx_wdt_release, | ||
195 | }; | ||
196 | |||
197 | static struct miscdevice stmp3xxx_wdt_miscdev = { | ||
198 | .minor = WATCHDOG_MINOR, | ||
199 | .name = "watchdog", | ||
200 | .fops = &stmp3xxx_wdt_fops, | ||
201 | }; | ||
202 | |||
203 | static int __devinit stmp3xxx_wdt_probe(struct platform_device *pdev) | ||
204 | { | ||
205 | int ret = 0; | ||
206 | |||
207 | if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) | ||
208 | heartbeat = DEFAULT_HEARTBEAT; | ||
209 | |||
210 | boot_status = __raw_readl(REGS_RTC_BASE + HW_RTC_PERSISTENT1) & | ||
211 | BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER; | ||
212 | boot_status = !!boot_status; | ||
213 | stmp3xxx_clearl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER, | ||
214 | REGS_RTC_BASE + HW_RTC_PERSISTENT1); | ||
215 | wdt_disable(); /* disable for now */ | ||
216 | |||
217 | ret = misc_register(&stmp3xxx_wdt_miscdev); | ||
218 | if (ret < 0) { | ||
219 | dev_err(&pdev->dev, "cannot register misc device\n"); | ||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | printk(KERN_INFO "stmp3xxx watchdog: initialized, heartbeat %d sec\n", | ||
224 | heartbeat); | ||
225 | |||
226 | return ret; | ||
227 | } | ||
228 | |||
229 | static int __devexit stmp3xxx_wdt_remove(struct platform_device *pdev) | ||
230 | { | ||
231 | misc_deregister(&stmp3xxx_wdt_miscdev); | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | #ifdef CONFIG_PM | ||
236 | static int wdt_suspended; | ||
237 | static u32 wdt_saved_time; | ||
238 | |||
239 | static int stmp3xxx_wdt_suspend(struct platform_device *pdev, | ||
240 | pm_message_t state) | ||
241 | { | ||
242 | if (__raw_readl(REGS_RTC_BASE + HW_RTC_CTRL) & | ||
243 | BM_RTC_CTRL_WATCHDOGEN) { | ||
244 | wdt_suspended = 1; | ||
245 | wdt_saved_time = __raw_readl(REGS_RTC_BASE + HW_RTC_WATCHDOG); | ||
246 | wdt_disable(); | ||
247 | } | ||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | static int stmp3xxx_wdt_resume(struct platform_device *pdev) | ||
252 | { | ||
253 | if (wdt_suspended) { | ||
254 | wdt_enable(wdt_saved_time); | ||
255 | wdt_suspended = 0; | ||
256 | } | ||
257 | return 0; | ||
258 | } | ||
259 | #else | ||
260 | #define stmp3xxx_wdt_suspend NULL | ||
261 | #define stmp3xxx_wdt_resume NULL | ||
262 | #endif | ||
263 | |||
264 | static struct platform_driver platform_wdt_driver = { | ||
265 | .driver = { | ||
266 | .name = "stmp3xxx_wdt", | ||
267 | }, | ||
268 | .probe = stmp3xxx_wdt_probe, | ||
269 | .remove = __devexit_p(stmp3xxx_wdt_remove), | ||
270 | .suspend = stmp3xxx_wdt_suspend, | ||
271 | .resume = stmp3xxx_wdt_resume, | ||
272 | }; | ||
273 | |||
274 | static int __init stmp3xxx_wdt_init(void) | ||
275 | { | ||
276 | return platform_driver_register(&platform_wdt_driver); | ||
277 | } | ||
278 | |||
279 | static void __exit stmp3xxx_wdt_exit(void) | ||
280 | { | ||
281 | return platform_driver_unregister(&platform_wdt_driver); | ||
282 | } | ||
283 | |||
284 | module_init(stmp3xxx_wdt_init); | ||
285 | module_exit(stmp3xxx_wdt_exit); | ||
286 | |||
287 | MODULE_DESCRIPTION("STMP3XXX Watchdog Driver"); | ||
288 | MODULE_LICENSE("GPL"); | ||
289 | |||
290 | module_param(heartbeat, int, 0); | ||
291 | MODULE_PARM_DESC(heartbeat, | ||
292 | "Watchdog heartbeat period in seconds from 1 to " | ||
293 | __MODULE_STRING(MAX_HEARTBEAT) ", default " | ||
294 | __MODULE_STRING(DEFAULT_HEARTBEAT)); | ||
295 | |||
296 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c new file mode 100644 index 000000000000..cb46556f2973 --- /dev/null +++ b/drivers/watchdog/twl4030_wdt.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * Copyright (C) Nokia Corporation | ||
3 | * | ||
4 | * Written by Timo Kokkonen <timo.t.kokkonen at nokia.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | #include <linux/module.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/fs.h> | ||
25 | #include <linux/watchdog.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/miscdevice.h> | ||
28 | #include <linux/uaccess.h> | ||
29 | #include <linux/i2c/twl4030.h> | ||
30 | |||
31 | #define TWL4030_WATCHDOG_CFG_REG_OFFS 0x3 | ||
32 | |||
33 | #define TWL4030_WDT_STATE_OPEN 0x1 | ||
34 | #define TWL4030_WDT_STATE_ACTIVE 0x8 | ||
35 | |||
36 | static struct platform_device *twl4030_wdt_dev; | ||
37 | |||
38 | struct twl4030_wdt { | ||
39 | struct miscdevice miscdev; | ||
40 | int timer_margin; | ||
41 | unsigned long state; | ||
42 | }; | ||
43 | |||
44 | static int nowayout = WATCHDOG_NOWAYOUT; | ||
45 | module_param(nowayout, int, 0); | ||
46 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " | ||
47 | "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | ||
48 | |||
49 | static int twl4030_wdt_write(unsigned char val) | ||
50 | { | ||
51 | return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val, | ||
52 | TWL4030_WATCHDOG_CFG_REG_OFFS); | ||
53 | } | ||
54 | |||
55 | static int twl4030_wdt_enable(struct twl4030_wdt *wdt) | ||
56 | { | ||
57 | return twl4030_wdt_write(wdt->timer_margin + 1); | ||
58 | } | ||
59 | |||
60 | static int twl4030_wdt_disable(struct twl4030_wdt *wdt) | ||
61 | { | ||
62 | return twl4030_wdt_write(0); | ||
63 | } | ||
64 | |||
65 | static int twl4030_wdt_set_timeout(struct twl4030_wdt *wdt, int timeout) | ||
66 | { | ||
67 | if (timeout < 0 || timeout > 30) { | ||
68 | dev_warn(wdt->miscdev.parent, | ||
69 | "Timeout can only be in the range [0-30] seconds"); | ||
70 | return -EINVAL; | ||
71 | } | ||
72 | wdt->timer_margin = timeout; | ||
73 | return twl4030_wdt_enable(wdt); | ||
74 | } | ||
75 | |||
76 | static ssize_t twl4030_wdt_write_fop(struct file *file, | ||
77 | const char __user *data, size_t len, loff_t *ppos) | ||
78 | { | ||
79 | struct twl4030_wdt *wdt = file->private_data; | ||
80 | |||
81 | if (len) | ||
82 | twl4030_wdt_enable(wdt); | ||
83 | |||
84 | return len; | ||
85 | } | ||
86 | |||
87 | static long twl4030_wdt_ioctl(struct file *file, | ||
88 | unsigned int cmd, unsigned long arg) | ||
89 | { | ||
90 | void __user *argp = (void __user *)arg; | ||
91 | int __user *p = argp; | ||
92 | int new_margin; | ||
93 | struct twl4030_wdt *wdt = file->private_data; | ||
94 | |||
95 | static const struct watchdog_info twl4030_wd_ident = { | ||
96 | .identity = "TWL4030 Watchdog", | ||
97 | .options = WDIOF_SETTIMEOUT, | ||
98 | .firmware_version = 0, | ||
99 | }; | ||
100 | |||
101 | switch (cmd) { | ||
102 | case WDIOC_GETSUPPORT: | ||
103 | return copy_to_user(argp, &twl4030_wd_ident, | ||
104 | sizeof(twl4030_wd_ident)) ? -EFAULT : 0; | ||
105 | |||
106 | case WDIOC_GETSTATUS: | ||
107 | case WDIOC_GETBOOTSTATUS: | ||
108 | return put_user(0, p); | ||
109 | |||
110 | case WDIOC_KEEPALIVE: | ||
111 | twl4030_wdt_enable(wdt); | ||
112 | break; | ||
113 | |||
114 | case WDIOC_SETTIMEOUT: | ||
115 | if (get_user(new_margin, p)) | ||
116 | return -EFAULT; | ||
117 | if (twl4030_wdt_set_timeout(wdt, new_margin)) | ||
118 | return -EINVAL; | ||
119 | return put_user(wdt->timer_margin, p); | ||
120 | |||
121 | case WDIOC_GETTIMEOUT: | ||
122 | return put_user(wdt->timer_margin, p); | ||
123 | |||
124 | default: | ||
125 | return -ENOTTY; | ||
126 | } | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static int twl4030_wdt_open(struct inode *inode, struct file *file) | ||
132 | { | ||
133 | struct twl4030_wdt *wdt = platform_get_drvdata(twl4030_wdt_dev); | ||
134 | |||
135 | /* /dev/watchdog can only be opened once */ | ||
136 | if (test_and_set_bit(0, &wdt->state)) | ||
137 | return -EBUSY; | ||
138 | |||
139 | wdt->state |= TWL4030_WDT_STATE_ACTIVE; | ||
140 | file->private_data = (void *) wdt; | ||
141 | |||
142 | twl4030_wdt_enable(wdt); | ||
143 | return nonseekable_open(inode, file); | ||
144 | } | ||
145 | |||
146 | static int twl4030_wdt_release(struct inode *inode, struct file *file) | ||
147 | { | ||
148 | struct twl4030_wdt *wdt = file->private_data; | ||
149 | if (nowayout) { | ||
150 | dev_alert(wdt->miscdev.parent, | ||
151 | "Unexpected close, watchdog still running!\n"); | ||
152 | twl4030_wdt_enable(wdt); | ||
153 | } else { | ||
154 | if (twl4030_wdt_disable(wdt)) | ||
155 | return -EFAULT; | ||
156 | wdt->state &= ~TWL4030_WDT_STATE_ACTIVE; | ||
157 | } | ||
158 | |||
159 | clear_bit(0, &wdt->state); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | static const struct file_operations twl4030_wdt_fops = { | ||
164 | .owner = THIS_MODULE, | ||
165 | .llseek = no_llseek, | ||
166 | .open = twl4030_wdt_open, | ||
167 | .release = twl4030_wdt_release, | ||
168 | .unlocked_ioctl = twl4030_wdt_ioctl, | ||
169 | .write = twl4030_wdt_write_fop, | ||
170 | }; | ||
171 | |||
172 | static int __devinit twl4030_wdt_probe(struct platform_device *pdev) | ||
173 | { | ||
174 | int ret = 0; | ||
175 | struct twl4030_wdt *wdt; | ||
176 | |||
177 | wdt = kzalloc(sizeof(struct twl4030_wdt), GFP_KERNEL); | ||
178 | if (!wdt) | ||
179 | return -ENOMEM; | ||
180 | |||
181 | wdt->state = 0; | ||
182 | wdt->timer_margin = 30; | ||
183 | wdt->miscdev.parent = &pdev->dev; | ||
184 | wdt->miscdev.fops = &twl4030_wdt_fops; | ||
185 | wdt->miscdev.minor = WATCHDOG_MINOR; | ||
186 | wdt->miscdev.name = "watchdog"; | ||
187 | |||
188 | platform_set_drvdata(pdev, wdt); | ||
189 | |||
190 | twl4030_wdt_dev = pdev; | ||
191 | |||
192 | ret = misc_register(&wdt->miscdev); | ||
193 | if (ret) { | ||
194 | dev_err(wdt->miscdev.parent, | ||
195 | "Failed to register misc device\n"); | ||
196 | platform_set_drvdata(pdev, NULL); | ||
197 | kfree(wdt); | ||
198 | twl4030_wdt_dev = NULL; | ||
199 | return ret; | ||
200 | } | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int __devexit twl4030_wdt_remove(struct platform_device *pdev) | ||
205 | { | ||
206 | struct twl4030_wdt *wdt = platform_get_drvdata(pdev); | ||
207 | |||
208 | if (wdt->state & TWL4030_WDT_STATE_ACTIVE) | ||
209 | if (twl4030_wdt_disable(wdt)) | ||
210 | return -EFAULT; | ||
211 | |||
212 | wdt->state &= ~TWL4030_WDT_STATE_ACTIVE; | ||
213 | misc_deregister(&wdt->miscdev); | ||
214 | |||
215 | platform_set_drvdata(pdev, NULL); | ||
216 | kfree(wdt); | ||
217 | twl4030_wdt_dev = NULL; | ||
218 | |||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | #ifdef CONFIG_PM | ||
223 | static int twl4030_wdt_suspend(struct platform_device *pdev, pm_message_t state) | ||
224 | { | ||
225 | struct twl4030_wdt *wdt = platform_get_drvdata(pdev); | ||
226 | if (wdt->state & TWL4030_WDT_STATE_ACTIVE) | ||
227 | return twl4030_wdt_disable(wdt); | ||
228 | |||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | static int twl4030_wdt_resume(struct platform_device *pdev) | ||
233 | { | ||
234 | struct twl4030_wdt *wdt = platform_get_drvdata(pdev); | ||
235 | if (wdt->state & TWL4030_WDT_STATE_ACTIVE) | ||
236 | return twl4030_wdt_enable(wdt); | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | #else | ||
241 | #define twl4030_wdt_suspend NULL | ||
242 | #define twl4030_wdt_resume NULL | ||
243 | #endif | ||
244 | |||
245 | static struct platform_driver twl4030_wdt_driver = { | ||
246 | .probe = twl4030_wdt_probe, | ||
247 | .remove = __devexit_p(twl4030_wdt_remove), | ||
248 | .suspend = twl4030_wdt_suspend, | ||
249 | .resume = twl4030_wdt_resume, | ||
250 | .driver = { | ||
251 | .owner = THIS_MODULE, | ||
252 | .name = "twl4030_wdt", | ||
253 | }, | ||
254 | }; | ||
255 | |||
256 | static int __devinit twl4030_wdt_init(void) | ||
257 | { | ||
258 | return platform_driver_register(&twl4030_wdt_driver); | ||
259 | } | ||
260 | module_init(twl4030_wdt_init); | ||
261 | |||
262 | static void __devexit twl4030_wdt_exit(void) | ||
263 | { | ||
264 | platform_driver_unregister(&twl4030_wdt_driver); | ||
265 | } | ||
266 | module_exit(twl4030_wdt_exit); | ||
267 | |||
268 | MODULE_AUTHOR("Nokia Corporation"); | ||
269 | MODULE_LICENSE("GPL"); | ||
270 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
271 | MODULE_ALIAS("platform:twl4030_wdt"); | ||
272 | |||
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c index a4fe7a38d9b0..3bde56bce63a 100644 --- a/drivers/watchdog/wdrtas.c +++ b/drivers/watchdog/wdrtas.c | |||
@@ -218,16 +218,14 @@ static void wdrtas_timer_keepalive(void) | |||
218 | */ | 218 | */ |
219 | static int wdrtas_get_temperature(void) | 219 | static int wdrtas_get_temperature(void) |
220 | { | 220 | { |
221 | long result; | 221 | int result; |
222 | int temperature = 0; | 222 | int temperature = 0; |
223 | 223 | ||
224 | result = rtas_call(wdrtas_token_get_sensor_state, 2, 2, | 224 | result = rtas_get_sensor(WDRTAS_THERMAL_SENSOR, 0, &temperature); |
225 | (void *)__pa(&temperature), | ||
226 | WDRTAS_THERMAL_SENSOR, 0); | ||
227 | 225 | ||
228 | if (result < 0) | 226 | if (result < 0) |
229 | printk(KERN_WARNING "wdrtas: reading the thermal sensor " | 227 | printk(KERN_WARNING "wdrtas: reading the thermal sensor " |
230 | "faild: %li\n", result); | 228 | "failed: %i\n", result); |
231 | else | 229 | else |
232 | temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */ | 230 | temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */ |
233 | 231 | ||
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c index c45839a4a34d..7a1bdc7c95a9 100644 --- a/drivers/watchdog/wdt_pci.c +++ b/drivers/watchdog/wdt_pci.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Industrial Computer Source PCI-WDT500/501 driver | 2 | * Industrial Computer Source PCI-WDT500/501 driver |
3 | * | 3 | * |
4 | * (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>, | 4 | * (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>, |
5 | * All Rights Reserved. | 5 | * All Rights Reserved. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
@@ -99,14 +99,16 @@ MODULE_PARM_DESC(nowayout, | |||
99 | "Watchdog cannot be stopped once started (default=" | 99 | "Watchdog cannot be stopped once started (default=" |
100 | __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | 100 | __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); |
101 | 101 | ||
102 | #ifdef CONFIG_WDT_501_PCI | ||
103 | /* Support for the Fan Tachometer on the PCI-WDT501 */ | 102 | /* Support for the Fan Tachometer on the PCI-WDT501 */ |
104 | static int tachometer; | 103 | static int tachometer; |
105 | |||
106 | module_param(tachometer, int, 0); | 104 | module_param(tachometer, int, 0); |
107 | MODULE_PARM_DESC(tachometer, | 105 | MODULE_PARM_DESC(tachometer, |
108 | "PCI-WDT501 Fan Tachometer support (0=disable, default=0)"); | 106 | "PCI-WDT501 Fan Tachometer support (0=disable, default=0)"); |
109 | #endif /* CONFIG_WDT_501_PCI */ | 107 | |
108 | static int type = 500; | ||
109 | module_param(type, int, 0); | ||
110 | MODULE_PARM_DESC(type, | ||
111 | "PCI-WDT501 Card type (500 or 501 , default=500)"); | ||
110 | 112 | ||
111 | /* | 113 | /* |
112 | * Programming support | 114 | * Programming support |
@@ -266,22 +268,21 @@ static int wdtpci_get_status(int *status) | |||
266 | *status |= WDIOF_EXTERN1; | 268 | *status |= WDIOF_EXTERN1; |
267 | if (new_status & WDC_SR_ISII1) | 269 | if (new_status & WDC_SR_ISII1) |
268 | *status |= WDIOF_EXTERN2; | 270 | *status |= WDIOF_EXTERN2; |
269 | #ifdef CONFIG_WDT_501_PCI | 271 | if (type == 501) { |
270 | if (!(new_status & WDC_SR_TGOOD)) | 272 | if (!(new_status & WDC_SR_TGOOD)) |
271 | *status |= WDIOF_OVERHEAT; | 273 | *status |= WDIOF_OVERHEAT; |
272 | if (!(new_status & WDC_SR_PSUOVER)) | 274 | if (!(new_status & WDC_SR_PSUOVER)) |
273 | *status |= WDIOF_POWEROVER; | 275 | *status |= WDIOF_POWEROVER; |
274 | if (!(new_status & WDC_SR_PSUUNDR)) | 276 | if (!(new_status & WDC_SR_PSUUNDR)) |
275 | *status |= WDIOF_POWERUNDER; | 277 | *status |= WDIOF_POWERUNDER; |
276 | if (tachometer) { | 278 | if (tachometer) { |
277 | if (!(new_status & WDC_SR_FANGOOD)) | 279 | if (!(new_status & WDC_SR_FANGOOD)) |
278 | *status |= WDIOF_FANFAULT; | 280 | *status |= WDIOF_FANFAULT; |
281 | } | ||
279 | } | 282 | } |
280 | #endif /* CONFIG_WDT_501_PCI */ | ||
281 | return 0; | 283 | return 0; |
282 | } | 284 | } |
283 | 285 | ||
284 | #ifdef CONFIG_WDT_501_PCI | ||
285 | /** | 286 | /** |
286 | * wdtpci_get_temperature: | 287 | * wdtpci_get_temperature: |
287 | * | 288 | * |
@@ -300,7 +301,6 @@ static int wdtpci_get_temperature(int *temperature) | |||
300 | *temperature = (c * 11 / 15) + 7; | 301 | *temperature = (c * 11 / 15) + 7; |
301 | return 0; | 302 | return 0; |
302 | } | 303 | } |
303 | #endif /* CONFIG_WDT_501_PCI */ | ||
304 | 304 | ||
305 | /** | 305 | /** |
306 | * wdtpci_interrupt: | 306 | * wdtpci_interrupt: |
@@ -327,22 +327,22 @@ static irqreturn_t wdtpci_interrupt(int irq, void *dev_id) | |||
327 | 327 | ||
328 | printk(KERN_CRIT PFX "status %d\n", status); | 328 | printk(KERN_CRIT PFX "status %d\n", status); |
329 | 329 | ||
330 | #ifdef CONFIG_WDT_501_PCI | 330 | if (type == 501) { |
331 | if (!(status & WDC_SR_TGOOD)) { | 331 | if (!(status & WDC_SR_TGOOD)) { |
332 | u8 alarm = inb(WDT_RT); | 332 | printk(KERN_CRIT PFX "Overheat alarm.(%d)\n", |
333 | printk(KERN_CRIT PFX "Overheat alarm.(%d)\n", alarm); | 333 | inb(WDT_RT)); |
334 | udelay(8); | 334 | udelay(8); |
335 | } | 335 | } |
336 | if (!(status & WDC_SR_PSUOVER)) | 336 | if (!(status & WDC_SR_PSUOVER)) |
337 | printk(KERN_CRIT PFX "PSU over voltage.\n"); | 337 | printk(KERN_CRIT PFX "PSU over voltage.\n"); |
338 | if (!(status & WDC_SR_PSUUNDR)) | 338 | if (!(status & WDC_SR_PSUUNDR)) |
339 | printk(KERN_CRIT PFX "PSU under voltage.\n"); | 339 | printk(KERN_CRIT PFX "PSU under voltage.\n"); |
340 | if (tachometer) { | 340 | if (tachometer) { |
341 | if (!(status & WDC_SR_FANGOOD)) | 341 | if (!(status & WDC_SR_FANGOOD)) |
342 | printk(KERN_CRIT PFX "Possible fan fault.\n"); | 342 | printk(KERN_CRIT PFX "Possible fan fault.\n"); |
343 | } | ||
343 | } | 344 | } |
344 | #endif /* CONFIG_WDT_501_PCI */ | 345 | if (!(status & WDC_SR_WCCR)) { |
345 | if (!(status&WDC_SR_WCCR)) { | ||
346 | #ifdef SOFTWARE_REBOOT | 346 | #ifdef SOFTWARE_REBOOT |
347 | #ifdef ONLY_TESTING | 347 | #ifdef ONLY_TESTING |
348 | printk(KERN_CRIT PFX "Would Reboot.\n"); | 348 | printk(KERN_CRIT PFX "Would Reboot.\n"); |
@@ -371,12 +371,13 @@ static irqreturn_t wdtpci_interrupt(int irq, void *dev_id) | |||
371 | */ | 371 | */ |
372 | 372 | ||
373 | static ssize_t wdtpci_write(struct file *file, const char __user *buf, | 373 | static ssize_t wdtpci_write(struct file *file, const char __user *buf, |
374 | size_t count, loff_t *ppos) | 374 | size_t count, loff_t *ppos) |
375 | { | 375 | { |
376 | if (count) { | 376 | if (count) { |
377 | if (!nowayout) { | 377 | if (!nowayout) { |
378 | size_t i; | 378 | size_t i; |
379 | 379 | ||
380 | /* In case it was set long ago */ | ||
380 | expect_close = 0; | 381 | expect_close = 0; |
381 | 382 | ||
382 | for (i = 0; i != count; i++) { | 383 | for (i = 0; i != count; i++) { |
@@ -406,10 +407,10 @@ static ssize_t wdtpci_write(struct file *file, const char __user *buf, | |||
406 | static long wdtpci_ioctl(struct file *file, unsigned int cmd, | 407 | static long wdtpci_ioctl(struct file *file, unsigned int cmd, |
407 | unsigned long arg) | 408 | unsigned long arg) |
408 | { | 409 | { |
409 | int new_heartbeat; | ||
410 | int status; | ||
411 | void __user *argp = (void __user *)arg; | 410 | void __user *argp = (void __user *)arg; |
412 | int __user *p = argp; | 411 | int __user *p = argp; |
412 | int new_heartbeat; | ||
413 | int status; | ||
413 | 414 | ||
414 | static struct watchdog_info ident = { | 415 | static struct watchdog_info ident = { |
415 | .options = WDIOF_SETTIMEOUT| | 416 | .options = WDIOF_SETTIMEOUT| |
@@ -421,11 +422,12 @@ static long wdtpci_ioctl(struct file *file, unsigned int cmd, | |||
421 | 422 | ||
422 | /* Add options according to the card we have */ | 423 | /* Add options according to the card we have */ |
423 | ident.options |= (WDIOF_EXTERN1|WDIOF_EXTERN2); | 424 | ident.options |= (WDIOF_EXTERN1|WDIOF_EXTERN2); |
424 | #ifdef CONFIG_WDT_501_PCI | 425 | if (type == 501) { |
425 | ident.options |= (WDIOF_OVERHEAT|WDIOF_POWERUNDER|WDIOF_POWEROVER); | 426 | ident.options |= (WDIOF_OVERHEAT|WDIOF_POWERUNDER| |
426 | if (tachometer) | 427 | WDIOF_POWEROVER); |
427 | ident.options |= WDIOF_FANFAULT; | 428 | if (tachometer) |
428 | #endif /* CONFIG_WDT_501_PCI */ | 429 | ident.options |= WDIOF_FANFAULT; |
430 | } | ||
429 | 431 | ||
430 | switch (cmd) { | 432 | switch (cmd) { |
431 | case WDIOC_GETSUPPORT: | 433 | case WDIOC_GETSUPPORT: |
@@ -503,7 +505,6 @@ static int wdtpci_release(struct inode *inode, struct file *file) | |||
503 | return 0; | 505 | return 0; |
504 | } | 506 | } |
505 | 507 | ||
506 | #ifdef CONFIG_WDT_501_PCI | ||
507 | /** | 508 | /** |
508 | * wdtpci_temp_read: | 509 | * wdtpci_temp_read: |
509 | * @file: file handle to the watchdog board | 510 | * @file: file handle to the watchdog board |
@@ -554,7 +555,6 @@ static int wdtpci_temp_release(struct inode *inode, struct file *file) | |||
554 | { | 555 | { |
555 | return 0; | 556 | return 0; |
556 | } | 557 | } |
557 | #endif /* CONFIG_WDT_501_PCI */ | ||
558 | 558 | ||
559 | /** | 559 | /** |
560 | * notify_sys: | 560 | * notify_sys: |
@@ -596,7 +596,6 @@ static struct miscdevice wdtpci_miscdev = { | |||
596 | .fops = &wdtpci_fops, | 596 | .fops = &wdtpci_fops, |
597 | }; | 597 | }; |
598 | 598 | ||
599 | #ifdef CONFIG_WDT_501_PCI | ||
600 | static const struct file_operations wdtpci_temp_fops = { | 599 | static const struct file_operations wdtpci_temp_fops = { |
601 | .owner = THIS_MODULE, | 600 | .owner = THIS_MODULE, |
602 | .llseek = no_llseek, | 601 | .llseek = no_llseek, |
@@ -610,7 +609,6 @@ static struct miscdevice temp_miscdev = { | |||
610 | .name = "temperature", | 609 | .name = "temperature", |
611 | .fops = &wdtpci_temp_fops, | 610 | .fops = &wdtpci_temp_fops, |
612 | }; | 611 | }; |
613 | #endif /* CONFIG_WDT_501_PCI */ | ||
614 | 612 | ||
615 | /* | 613 | /* |
616 | * The WDT card needs to learn about soft shutdowns in order to | 614 | * The WDT card needs to learn about soft shutdowns in order to |
@@ -633,6 +631,11 @@ static int __devinit wdtpci_init_one(struct pci_dev *dev, | |||
633 | return -ENODEV; | 631 | return -ENODEV; |
634 | } | 632 | } |
635 | 633 | ||
634 | if (type != 500 && type != 501) { | ||
635 | printk(KERN_ERR PFX "unknown card type '%d'.\n", type); | ||
636 | return -ENODEV; | ||
637 | } | ||
638 | |||
636 | if (pci_enable_device(dev)) { | 639 | if (pci_enable_device(dev)) { |
637 | printk(KERN_ERR PFX "Not possible to enable PCI Device\n"); | 640 | printk(KERN_ERR PFX "Not possible to enable PCI Device\n"); |
638 | return -ENODEV; | 641 | return -ENODEV; |
@@ -678,15 +681,15 @@ static int __devinit wdtpci_init_one(struct pci_dev *dev, | |||
678 | goto out_irq; | 681 | goto out_irq; |
679 | } | 682 | } |
680 | 683 | ||
681 | #ifdef CONFIG_WDT_501_PCI | 684 | if (type == 501) { |
682 | ret = misc_register(&temp_miscdev); | 685 | ret = misc_register(&temp_miscdev); |
683 | if (ret) { | 686 | if (ret) { |
684 | printk(KERN_ERR PFX | 687 | printk(KERN_ERR PFX |
685 | "cannot register miscdev on minor=%d (err=%d)\n", | 688 | "cannot register miscdev on minor=%d (err=%d)\n", |
686 | TEMP_MINOR, ret); | 689 | TEMP_MINOR, ret); |
687 | goto out_rbt; | 690 | goto out_rbt; |
691 | } | ||
688 | } | 692 | } |
689 | #endif /* CONFIG_WDT_501_PCI */ | ||
690 | 693 | ||
691 | ret = misc_register(&wdtpci_miscdev); | 694 | ret = misc_register(&wdtpci_miscdev); |
692 | if (ret) { | 695 | if (ret) { |
@@ -698,20 +701,18 @@ static int __devinit wdtpci_init_one(struct pci_dev *dev, | |||
698 | 701 | ||
699 | printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n", | 702 | printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n", |
700 | heartbeat, nowayout); | 703 | heartbeat, nowayout); |
701 | #ifdef CONFIG_WDT_501_PCI | 704 | if (type == 501) |
702 | printk(KERN_INFO "wdt: Fan Tachometer is %s\n", | 705 | printk(KERN_INFO "wdt: Fan Tachometer is %s\n", |
703 | (tachometer ? "Enabled" : "Disabled")); | 706 | (tachometer ? "Enabled" : "Disabled")); |
704 | #endif /* CONFIG_WDT_501_PCI */ | ||
705 | 707 | ||
706 | ret = 0; | 708 | ret = 0; |
707 | out: | 709 | out: |
708 | return ret; | 710 | return ret; |
709 | 711 | ||
710 | out_misc: | 712 | out_misc: |
711 | #ifdef CONFIG_WDT_501_PCI | 713 | if (type == 501) |
712 | misc_deregister(&temp_miscdev); | 714 | misc_deregister(&temp_miscdev); |
713 | out_rbt: | 715 | out_rbt: |
714 | #endif /* CONFIG_WDT_501_PCI */ | ||
715 | unregister_reboot_notifier(&wdtpci_notifier); | 716 | unregister_reboot_notifier(&wdtpci_notifier); |
716 | out_irq: | 717 | out_irq: |
717 | free_irq(irq, &wdtpci_miscdev); | 718 | free_irq(irq, &wdtpci_miscdev); |
@@ -728,9 +729,8 @@ static void __devexit wdtpci_remove_one(struct pci_dev *pdev) | |||
728 | /* here we assume only one device will ever have | 729 | /* here we assume only one device will ever have |
729 | * been picked up and registered by probe function */ | 730 | * been picked up and registered by probe function */ |
730 | misc_deregister(&wdtpci_miscdev); | 731 | misc_deregister(&wdtpci_miscdev); |
731 | #ifdef CONFIG_WDT_501_PCI | 732 | if (type == 501) |
732 | misc_deregister(&temp_miscdev); | 733 | misc_deregister(&temp_miscdev); |
733 | #endif /* CONFIG_WDT_501_PCI */ | ||
734 | unregister_reboot_notifier(&wdtpci_notifier); | 734 | unregister_reboot_notifier(&wdtpci_notifier); |
735 | free_irq(irq, &wdtpci_miscdev); | 735 | free_irq(irq, &wdtpci_miscdev); |
736 | release_region(io, 16); | 736 | release_region(io, 16); |